--- /dev/null
+diff -Nurp linux-2.6.22-590/arch/i386/Kconfig.debug linux-2.6.22-600/arch/i386/Kconfig.debug
+--- linux-2.6.22-590/arch/i386/Kconfig.debug 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/arch/i386/Kconfig.debug 2008-04-09 18:16:14.000000000 +0200
+@@ -85,4 +85,82 @@ config DOUBLEFAULT
+ option saves about 4k and might cause you much additional grey
+ hair.
+
++config KDB
++ bool "Built-in Kernel Debugger support"
++ depends on DEBUG_KERNEL
++ select KALLSYMS
++ select KALLSYMS_ALL
++ help
++ This option provides a built-in kernel debugger. The built-in
++ kernel debugger contains commands which allow memory to be examined,
++ instructions to be disassembled and breakpoints to be set. For details,
++ see Documentation/kdb/kdb.mm and the manual pages kdb_bt, kdb_ss, etc.
++ Kdb can also be used via the serial port. Set up the system to
++ have a serial console (see Documentation/serial-console.txt).
++ The key sequence <escape>KDB on the serial port will cause the
++ kernel debugger to be entered with input from the serial port and
++ output to the serial console. If unsure, say N.
++
++config KDB_MODULES
++ tristate "KDB modules"
++ depends on KDB
++ help
++ KDB can be extended by adding your own modules, in directory
++ kdb/modules. This option selects the way that these modules should
++ be compiled, as free standing modules (select M) or built into the
++ kernel (select Y). If unsure say M.
++
++config KDB_OFF
++ bool "KDB off by default"
++ depends on KDB
++ help
++ Normally kdb is activated by default, as long as CONFIG_KDB is set.
++ If you want to ship a kernel with kdb support but only have kdb
++ turned on when the user requests it then select this option. When
++ compiled with CONFIG_KDB_OFF, kdb ignores all events unless you boot
++ with kdb=on or you echo "1" > /proc/sys/kernel/kdb. This option also
++ works in reverse, if kdb is normally activated, you can boot with
++ kdb=off or echo "0" > /proc/sys/kernel/kdb to deactivate kdb. If
++ unsure, say N.
++
++config KDB_CONTINUE_CATASTROPHIC
++ int "KDB continues after catastrophic errors"
++ depends on KDB
++ default "0"
++ help
++ This integer controls the behaviour of kdb when the kernel gets a
++ catastrophic error, i.e. for a panic, oops, NMI or other watchdog
++ tripping. CONFIG_KDB_CONTINUE_CATASTROPHIC interacts with
++ /proc/sys/kernel/kdb and CONFIG_LKCD_DUMP (if your kernel has the
++ LKCD patch).
++ When KDB is active (/proc/sys/kernel/kdb == 1) and a catastrophic
++ error occurs, nothing extra happens until you type 'go'.
++ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time
++ you type 'go', kdb warns you. The second time you type 'go', KDB
++ tries to continue - no guarantees that the kernel is still usable.
++ CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue - no
++ guarantees that the kernel is still usable.
++ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. If your kernel has the LKCD
++ patch and LKCD is configured to take a dump then KDB forces a dump.
++ Whether or not a dump is taken, KDB forces a reboot.
++ When KDB is not active (/proc/sys/kernel/kdb == 0) and a catastrophic
++ error occurs, the following steps are automatic, no human
++ intervention is required.
++ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default) or 1. KDB attempts
++ to continue - no guarantees that the kernel is still usable.
++ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. If your kernel has the LKCD
++ patch and LKCD is configured to take a dump then KDB automatically
++ forces a dump. Whether or not a dump is taken, KDB forces a
++ reboot.
++ If you are not sure, say 0. Read Documentation/kdb/dump.txt before
++ setting to 2.
++
++config KDB_USB
++ bool "Support for USB Keyboard in KDB (OHCI only)"
++ depends on KDB && USB_OHCI_HCD
++ help
++ If you want to use kdb from a OHCI USB keyboard then say Y here. If you
++ say N then kdb can only be used from a PC (AT) keyboard or a serial
++ console.
++
+ endmenu
+diff -Nurp linux-2.6.22-590/arch/i386/kdb/ChangeLog linux-2.6.22-600/arch/i386/kdb/ChangeLog
+--- linux-2.6.22-590/arch/i386/kdb/ChangeLog 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/i386/kdb/ChangeLog 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,805 @@
++2007-07-26 Keith Owens <kaos@sgi.com>
++
++ * New x86 backtrace code.
++ * kdb v4.4-2.6.22-i386-2.
++
++2007-07-09 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-i386-1.
++
++2007-07-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc7-i386-1.
++
++2007-06-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc5-i386-1.
++
++2007-06-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc4-i386-1.
++
++2007-05-28 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc3-i386-1.
++
++2007-05-22 Keith Owens <kaos@sgi.com>
++
++ * Register KDBENTER_VECTOR early on the boot cpu.
++ * kdb v4.4-2.6.22-rc2-i386-2.
++
++2007-05-22 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc2-i386-1.
++
++2007-05-22 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc1-i386-1.
++
++2007-05-17 Keith Owens <kaos@sgi.com>
++
++ * Update dumpregs comments for rdmsr and wrmsr commands.
++ Bernardo Innocenti.
++ * kdb v4.4-2.6.21-i386-3.
++
++2007-05-15 Keith Owens <kaos@sgi.com>
++
++ * Change kdba_late_init to kdba_arch_init so KDB_ENTER() can be used
++ earlier.
++ * kdb v4.4-2.6.21-i386-2.
++
++2007-04-29 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-i386-1.
++
++2007-04-16 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc7-i386-1.
++
++2007-04-10 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc6-i386-1.
++
++2007-04-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc5-i386-1.
++
++2007-03-19 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc4-i386-1.
++
++2007-03-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc3-i386-1.
++
++2007-03-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc2-i386-1.
++
++2007-03-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc1-i386-1.
++
++2007-03-01 Keith Owens <kaos@sgi.com>
++
++ * Remove sparse warnings.
++ * kdb v4.4-2.6.20-i386-3.
++
++2007-02-16 Keith Owens <kaos@sgi.com>
++
++ * Initialise variable bits of struct disassemble_info each time.
++ * kdb v4.4-2.6.20-i386-2.
++
++2007-02-06 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-i386-1.
++
++2007-02-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc7-i386-1.
++
++2007-01-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc4-i386-1.
++
++2007-01-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc3-i386-1.
++
++2006-12-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc1-i386-1.
++
++2006-11-30 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-i386-1.
++
++2006-11-27 Keith Owens <kaos@sgi.com>
++
++ * Only use VT keyboard if the command line allows it and ACPI indicates
++ that there is an i8042.
++ * kdb v4.4-2.6.19-rc6-i386-2.
++
++2006-11-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc6-i386-1.
++
++2006-11-09 Keith Owens <kaos@sgi.com>
++
++ * Change kdb() to fastcall.
++ * Add unwind info to kdb_call(). Steve Lord.
++ * Only use VT console if the command line allows it.
++ * kdb v4.4-2.6.19-rc5-i386-2.
++
++2006-11-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc5-i386-1.
++
++2006-11-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc4-i386-1.
++
++2006-10-24 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc3-i386-1.
++
++2006-10-24 Keith Owens <kaos@sgi.com>
++
++ * Remove redundant regs and envp parameters.
++ * kdb v4.4-2.6.19-rc2-i386-2.
++
++2006-10-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc2-i386-1.
++
++2006-10-11 Keith Owens <kaos@sgi.com>
++
++ * Move kdbm_x86.c from the i386 to the common KDB patch.
++ * Make the KDBENTER_VECTOR an interrupt gate instead of a trap gate, it
++ simplifies the code and disables interrupts on KDBENTER().
++ * Exclude the KDBENTER_VECTOR from irq assignment.
++ * kdb v4.4-2.6.19-rc1-i386-2.
++
++2006-10-09 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc1-i386-1.
++
++2006-10-06 Keith Owens <kaos@sgi.com>
++
++ * Remove #include <linux/config.h>
++ * kdb v4.4-2.6.18-i386-2.
++
++2006-09-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-i386-1.
++
++2006-09-15 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc7-i386-1.
++
++2006-08-30 Keith Owens <kaos@sgi.com>
++
++ * Add warning for problems when following alternate stacks.
++ * kdb v4.4-2.6.18-rc5-i386-3.
++
++2006-08-29 Keith Owens <kaos@sgi.com>
++
++ * Rewrite all backtrace code.
++ * kdb v4.4-2.6.18-rc5-i386-2.
++
++2006-08-28 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc5-i386-1.
++
++2006-08-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc4-i386-1.
++
++2006-08-04 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc3-i386-1.
++
++2006-07-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc2-i386-1.
++
++2006-07-12 Keith Owens <kaos@sgi.com>
++
++ * Remove dead KDB_REASON codes.
++ * sparse cleanups.
++ * kdb v4.4-2.6.18-rc1-i386-2.
++
++2006-07-07 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc1-i386-1.
++
++2006-07-04 Keith Owens <kaos@sgi.com>
++
++ * Make KDB rendezvous on i386 a two stage approach.
++ * Clean up generation of KDB interrupt code.
++ * Move smp_kdb_stop() and smp_kdb_interrupt() to kdbasupport.c.
++ * Move setting of interrupt traps to kdbasupport.c.
++ * Remove KDB hooks from arch/i386/kernel smp.c, smpboot.c, i8259.c,
++ io_apic.c.
++ * Add KDB_REASON_CPU_UP support.
++ * Move per cpu setup to kdba_cpu_up().
++ * Rework support for 4K stacks to make backtrace more accurate.
++ * Add BTSP option to get the full backtrace, including kdb routines.
++ * Delete kdba_enable_mce, architectures now do their own setup.
++ * Delete kdba_enable_lbr, kdba_disable_lbr, kdba_print_lbr,
++ page_fault_mca. Only ever implemented on x86, difficult to maintain
++ and rarely used in the field.
++ * Replace #ifdef KDB_HAVE_LONGJMP with #ifdef kdba_setjmp.
++ * kdb v4.4-2.6.17-i386-2.
++
++2006-06-19 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-i386-1.
++
++2006-05-25 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc5-i386-1.
++
++2006-05-15 Keith Owens <kaos@sgi.com>
++
++ * Refresh bfd related files from binutils 2.16.91.0.2.
++ * kdb v4.4-2.6.17-rc4-i386-2.
++
++2006-05-12 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc4-i386-1.
++
++2006-04-28 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc3-i386-1.
++
++2006-04-22 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc2-i386-1.
++
++2006-04-11 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc1-i386-1.
++
++2006-03-30 Keith Owens <kaos@sgi.com>
++
++ * Change CONFIG_LKCD to CONFIG_LKCD_DUMP.
++ * kdb v4.4-2.6.16-i386-3.
++
++2006-03-24 Keith Owens <kaos@sgi.com>
++
++ * Define a dummy kdba_wait_for_cpus().
++ * kdb v4.4-2.6.16-i386-2.
++
++2006-03-21 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.16-i386-1.
++
++2006-03-14 Nathan Scott <nathans@sgi.com>
++
++ * kdb v4.4-2.6.16-rc6-i386-1.
++
++2006-02-28 Nathan Scott <nathans@sgi.com>
++
++ * kdb v4.4-2.6.16-rc5-i386-1.
++
++2006-02-20 Nathan Scott <nathans@sgi.com>
++
++ * kdb v4.4-2.6.16-rc4-i386-1.
++
++2006-02-06 Keith Owens <kaos@sgi.com>
++
++ * Change CONFIG_CRASH_DUMP to CONFIG_LKCD.
++ * kdb v4.4-2.6.16-rc2-i386-2.
++
++2006-02-06 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.16-rc2-i386-1.
++
++2006-01-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.16-rc1-i386-1.
++
++2006-01-08 Keith Owens <kaos@sgi.com>
++
++ * Add DIE_KDEBUG_ENTER and DIE_KDEBUG_LEAVE to notify_die.
++ * kdb v4.4-2.6.15-i386-2.
++
++2006-01-04 Keith Owens <kaos@sgi.com>
++
++ * Remove some inlines and the last vestige of CONFIG_NUMA_REPLICATE.
++ * Read the keyboard acknowledgment after sending a character. SuSE
++ Bugzilla 60240.
++ * kdb v4.4-2.6.15-i386-1.
++
++2005-12-25 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc7-i386-1.
++
++2005-12-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc6-i386-1.
++
++2005-12-05 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc5-i386-1.
++
++2005-12-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc4-i386-1.
++
++2005-11-30 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc3-i386-1.
++
++2005-11-21 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc2-i386-1.
++
++2005-11-15 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc1-i386-1.
++
++2005-10-28 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.14-i386-1.
++
++2005-10-21 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.14-rc5-i386-1.
++
++2005-10-11 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.14-rc4-i386-1.
++
++2005-10-04 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.14-rc3-i386-1.
++
++2005-09-21 Keith Owens <kaos@sgi.com>
++
++ * Support kdb_current_task in register display and modify commands.
++ * kdb v4.4-2.6.14-rc2-i386-1.
++
++2005-09-20 Keith Owens <kaos@sgi.com>
++
++ * Remove use of __STDC_VERSION__ in ansidecl.h.
++ * kdb v4.4-2.6.14-rc1-i386-1.
++
++2005-08-29 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-i386-1.
++
++2005-08-24 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-rc7-i386-1.
++
++2005-08-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-rc6-i386-1.
++
++2005-08-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-rc5-i386-1.
++
++2005-07-30 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-rc4-i386-1.
++
++2005-07-22 Keith Owens <kaos@sgi.com>
++
++ * Compile fix for kprobes.
++ * kdb v4.4-2.6.13-rc3-i386-2.
++
++2005-07-19 Keith Owens <kaos@sgi.com>
++
++ * Add support for USB keyboard (OHCI only). Aaron Young, SGI.
++ * kdb v4.4-2.6.13-rc3-i386-1.
++
++2005-07-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-rc2-i386-1.
++
++2005-07-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-rc1-i386-1.
++
++2005-06-19 Keith Owens <kaos@sgi.com>
++
++ * gcc 4 compile fix, remove extern kdb_hardbreaks. Steve Lord.
++ * kdb v4.4-2.6.12-i386-2.
++
++2005-06-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.12-i386-1.
++
++2005-06-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.12-rc6-i386-1.
++
++2005-05-25 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.12-rc5-i386-1.
++
++2005-05-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.12-rc4-i386-1.
++
++2005-04-21 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.12-rc3-i386-1.
++
++2005-04-06 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.12-rc2-i386-1.
++
++2005-03-29 Keith Owens <kaos@sgi.com>
++
++ * Replace __copy_to_user with __copy_to_user_inatomic.
++ * kdb v4.4-2.6.12-rc1-i386-1.
++
++2005-03-08 Keith Owens <kaos@sgi.com>
++
++ * Coexistence patches for lkcd.
++ * kdb v4.4-2.6.11-i386-2.
++
++2005-03-03 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.11-i386-1.
++
++2005-02-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.11-rc4-i386-1.
++
++2005-02-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.11-rc3-bk4-i386-1.
++
++2005-02-03 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.11-rc3-i386-1.
++
++2005-01-27 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.11-rc2-i386-1.
++
++2005-01-12 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.11-rc1-i386-1.
++
++2004-12-25 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.10-i386-1.
++
++2004-12-07 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.10-rc3-i386-1.
++
++2004-11-23 Keith Owens <kaos@sgi.com>
++
++ * Coexist with asmlinkage/fastcall changes.
++ * kdb v4.4-2.6.10-rc2-i386-1.
++
++2004-10-29 Keith Owens <kaos@sgi.com>
++
++ * Handle change defintions for hard and soft irq context.
++ * Make stack switch in kdb backtrace look more like the oops output.
++ * kdb v4.4-2.6.10-rc1-i386-1.
++
++2004-10-19 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.9-i386-1.
++
++2004-10-12 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.9-rc4-i386-1.
++
++2004-10-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.9-rc3-i386-1.
++
++2004-09-30 Keith Owens <kaos@sgi.com>
++
++ * Add stackdepth command.
++ * Handle backtrace with separate soft and hard irq stacks
++ (CONFIG_4KSTACKS).
++ * Work around RESTORE_ALL macro, which can only be used once.
++ * Export kdba_dumpregs. Bryan Cardillo, UPenn.
++ * kdb v4.4-2.6.9-rc2-i386-2.
++
++2004-09-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.9-rc2-i386-1.
++
++2004-08-27 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.9-rc1-i386-1.
++
++2004-08-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.8-i386-1.
++
++2004-08-12 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.8-rc4-i386-1.
++
++2004-08-04 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.8-rc3-i386-1.
++
++2004-07-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.8-rc2-i386-1.
++
++2004-07-12 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.8-rc1-i386-1.
++
++2004-06-16 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.7-i386-1.
++
++2004-06-10 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.7-rc3-i386-1.
++
++2004-06-09 Keith Owens <kaos@sgi.com>
++
++ * Namespace clean up. Mark code/variables as static when it is only
++ used in one file, delete dead code/variables.
++ * kdb v4.4-2.6.7-rc2-i386-3.
++
++2004-06-08 Keith Owens <kaos@sgi.com>
++
++ * Whitespace clean up, no code changes.
++ * kdb v4.4-2.6.7-rc2-i386-2.
++
++2004-06-07 Keith Owens <kaos@sgi.com>
++
++ * Force KALLSYMS and KALLSYMS_ALL for CONFIG_KDB.
++ * kdb v4.4-2.6.7-rc2-i386-1.
++
++2004-06-06 Keith Owens <kaos@sgi.com>
++
++ * Correct Kconfig help text.
++ * Coexist with CONFIG_REGPARM.
++ * Add standard archkdb commands.
++ * Move kdb_{get,put}userarea_size definitions to linux/kdb.h.
++ * kdb v4.4-2.6.6-i386-2.
++
++2004-05-23 Keith Owens <kaos@sgi.com>
++
++ * Move bfd.h and ansidecl.h from arch/$(ARCH)/kdb to include/asm-$(ARCH).
++ * Update copyright notices.
++ * kdb v4.4-2.6.6-i386-1.
++
++2004-05-10 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.6-i386-1.
++
++2004-05-06 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.6-rc3-i386-1.
++
++2004-05-06 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.6-rc2-i386-1.
++
++2004-04-30 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.6-rc1-i386-1.
++
++2004-04-05 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6-5-i386-1.
++
++2004-02-29 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6-4-rc1-i386-1.
++
++2004-02-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6-3-i386-1.
++
++2004-02-17 Keith Owens <kaos@sgi.com>
++
++ * Pick up changes from Jim Houston for 2.6.
++ * Sync with kdb v4.3-2.4.25-rc1-i386-1.
++ * Adjust for LDT changes in i386 mainline.
++ * Convert longjmp buffers from static to dynamic allocation, for large
++ cpu counts.
++ * Do not use USB keyboard if it has not been probed.
++ * Do not print section data, 2.6 kallsyms does not support sections :(.
++ * kdb v4.3-2.6-3-rc3-i386-1.
++
++2003-08-29 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.4.22-i386-1.
++
++2003-08-05 Keith Owens <kaos@sgi.com>
++
++ * Remove duplicate setting of trap for machine_check.
++ * Only reset keyboard when CONFIG_VT_CONSOLE is defined.
++
++2003-07-27 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.4.22-pre8-i386-5.
++
++2003-07-20 Keith Owens <kaos@sgi.com>
++
++ * Remove compile warning on x86 commands.
++ * kdb v4.3-2.4.21-i386-5.
++
++2003-07-08 Keith Owens <kaos@sgi.com>
++
++ * Add new x86 commands - rdv, gdt, idt, ldt, ldtp, ptex.
++ Vamsi Krishna S., IBM.
++ * kdb v4.3-2.4.21-i386-4.
++
++2003-07-01 Keith Owens <kaos@sgi.com>
++
++ * Convert kdba_find_return() to two passes to reduce false positives.
++ * Correct jmp disp8 offset calculation for out of line lock code.
++ * Use NMI for kdb IPI in clustered APIC mode. Sachin Sant, IBM.
++ * kdb v4.3-2.4.21-i386-3.
++
++2003-06-23 Keith Owens <kaos@sgi.com>
++
++ * Sync with XFS 2.4.21 tree.
++ * kdb v4.3-2.4.21-i386-2.
++
++2003-06-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.4.21-i386-1.
++
++2003-06-20 Keith Owens <kaos@sgi.com>
++
++ * Add CONFIG_KDB_CONTINUE_CATASTROPHIC.
++ * Correct KDB_ENTER() definition.
++ * kdb v4.3-2.4.20-i386-1.
++
++2003-05-02 Keith Owens <kaos@sgi.com>
++
++ * Add kdba_fp_value().
++ * Limit backtrace size to catch loops.
++ * Add read/write access to user pages. Vamsi Krishna S., IBM
++ * Clean up USB keyboard support. Steven Dake.
++ * kdb v4.2-2.4.20-i386-1.
++
++2003-04-04 Keith Owens <kaos@sgi.com>
++
++ * Workarounds for scheduler bugs.
++ * kdb v4.1-2.4.20-i386-1.
++
++2003-03-16 Keith Owens <kaos@sgi.com>
++
++ * Each cpu saves its state as it enters kdb or before it enters code
++ which cannot call kdb, converting kdb from a pull to a push model.
++ * Clean up kdb interaction with CONFIG_SERIAL_CONSOLE.
++ * Removal of special cases for i386 backtrace from common code
++ simplifies the architecture code.
++ * Add command to dump i386 struct pt_regs.
++ * kdb v4.0-2.4.20-i386-1.
++
++2003-02-03 Keith Owens <kaos@sgi.com>
++
++ * Register kdb commands early.
++ * Handle KDB_ENTER() when kdb=off.
++ * Optimize __kdba_getarea_size when width is a constant.
++ * Decode oops via kallsyms if it is available.
++ * Update copyright notices to 2003.
++ * Handle call *disp32(%reg) in backtrace.
++ * Correct keyboard freeze. Ashish Kalra.
++ * Add command history and editing. Sonic Zhang.
++ * kdb_toggleled is conditional on KDB_BLINK_LED. Bernhard Fischer.
++ * Allow tab on serial line for symbol completion.
++ * Ignore KDB_ENTER() when kdb is already running.
++ * kdb v3.0-2.4.20-i386-1.
++
++2002-11-29 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.20.
++ * kdb v2.5-2.4.20-i386-1.
++
++2002-11-14 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.20-rc1.
++ * kdb v2.5-2.4.20-rc1-i386-1.
++
++2002-11-14 Keith Owens <kaos@sgi.com>
++
++ * General clean up of handling for breakpoints and single stepping over
++ software breakpoints.
++ * Accept ff 1x as well as ff dx for call *(%reg) in backtrace.
++ * kdb v2.5-2.4.19-i386-1.
++
++2002-11-01 Keith Owens <kaos@sgi.com>
++
++ * Prevent SMP IRQ overwriting KDB_ENTER().
++ * kdb v2.4-2.4.19-i386-2.
++
++2002-10-31 Keith Owens <kaos@sgi.com>
++
++ * Avoid KDB_VECTOR conflict with DUMP_VECTOR.
++ * Remove kdb_eframe_t.
++ * Sanity check if we have pt_regs.
++ * Remove kdba_getcurrentframe().
++ * Reinstate missing nmi_watchdog/kdb hook.
++ * kdb v2.4-2.4.19-i386-1.
++
++2002-10-17 Keith Owens <kaos@sgi.com>
++
++ * Correct compile with CONFIG_VT_CONSOLE=n.
++ * kdb v2.3-2.4.19-i386-5.
++
++2002-10-04 Keith Owens <kaos@sgi.com>
++
++ * Add USB keyboard option.
++ * Minimize differences between patches for 2.4 and 2.5 kernels.
++ * kdb v2.3-2.4.19-i386-4.
++
++2002-08-10 Keith Owens <kaos@sgi.com>
++
++ * Replace kdb_port with kdb_serial to support memory mapped I/O.
++ Note: This needs kdb v2.3-2.4.19-common-2 or later.
++ * kdb v2.3-2.4.19-i386-3.
++
++2002-08-09 Keith Owens <kaos@sgi.com>
++
++ * Use -fno-optimize-sibling-calls for kdb if gcc supports it.
++ * .text.lock does not consume an activation frame.
++ * kdb v2.3-2.4.19-i386-2.
++
++2002-08-07 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19.
++ * Remove individual SGI copyrights, the general SGI copyright applies.
++ * New .text.lock name. Hugh Dickins.
++ * Set KERNEL_CS in kdba_getcurrentframe. Hugh Dickins.
++ * Clean up disassembly layout. Hugh Dickins, Keith Owens.
++ * Replace hard coded stack size with THREAD_SIZE. Hugh Dickins.
++ * Better stack layout on bt with no frame pointers. Hugh Dickins.
++ * Make i386 IO breakpoints (bpha <address> IO) work again.
++ Martin Wilck, Keith Owens.
++ * Remove fixed KDB_MAX_COMMANDS size.
++ * Add set_fs() around __copy_to_user on kernel addresses.
++ Randolph Chung.
++ * Position i386 for CONFIG_NUMA_REPLICATE.
++ * kdb v2.3-2.4.19-i386-1.
++
++2002-07-09 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19-rc1.
++
++2002-06-14 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19-pre10.
++ * kdb v2.1-2.4.19-pre10-i386-1.
++
++2002-04-09 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19-pre6.
++ * kdb v2.1-2.4.19-pre6-i386-1.
++
++2002-02-26 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.18.
++ * kdb v2.1-2.4.18-i386-1.
++
++2002-01-18 Keith Owens <kaos@sgi.com>
++
++ * Use new kdb_get/put functions.
++ * Define kdba_{get,put}area_size functions for i386.
++ * Remove over-engineered dblist callback functions.
++ * Correctly handle failing call disp32 in backtrace.
++ * Remove bp_instvalid flag, redundant code.
++ * Remove dead code.
++ * kdb v2.1-2.4.17-i386-1.
++
++2002-01-04 Keith Owens <kaos@sgi.com>
++
++ * Sync xfs <-> kdb i386 code.
++
++2001-12-22 Keith Owens <kaos@sgi.com>
++
++ * Split kdb for i386 as kdb v2.0-2.4.17-i386-1.
+diff -Nurp linux-2.6.22-590/arch/i386/kdb/i386-dis.c linux-2.6.22-600/arch/i386/kdb/i386-dis.c
+--- linux-2.6.22-590/arch/i386/kdb/i386-dis.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/i386/kdb/i386-dis.c 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,4686 @@
++/* Print i386 instructions for GDB, the GNU debugger.
++ Copyright 1988, 1989, 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
++ 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
++
++/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
++ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as
++ * required.
++ * Keith Owens <kaos@sgi.com> 15 May 2006
++ */
++
++/* 80386 instruction printer by Pace Willisson (pace@prep.ai.mit.edu)
++ July 1988
++ modified by John Hassey (hassey@dg-rtp.dg.com)
++ x86-64 support added by Jan Hubicka (jh@suse.cz)
++ VIA PadLock support by Michal Ludvig (mludvig@suse.cz). */
++
++/* The main tables describing the instructions is essentially a copy
++ of the "Opcode Map" chapter (Appendix A) of the Intel 80386
++ Programmers Manual. Usually, there is a capital letter, followed
++ by a small letter. The capital letter tell the addressing mode,
++ and the small letter tells about the operand size. Refer to
++ the Intel manual for details. */
++
++#ifdef __KERNEL__
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/dis-asm.h>
++#include <linux/kdb.h>
++#define abort() BUG()
++#else /* __KERNEL__ */
++#include "dis-asm.h"
++#include "sysdep.h"
++#include "opintl.h"
++#endif /* __KERNEL__ */
++
++#define MAXLEN 20
++
++#ifndef __KERNEL__
++#include <setjmp.h>
++#endif /* __KERNEL__ */
++
++#ifndef UNIXWARE_COMPAT
++/* Set non-zero for broken, compatible instructions. Set to zero for
++ non-broken opcodes. */
++#define UNIXWARE_COMPAT 1
++#endif
++
++static int fetch_data (struct disassemble_info *, bfd_byte *);
++static void ckprefix (void);
++static const char *prefix_name (int, int);
++static int print_insn (bfd_vma, disassemble_info *);
++static void dofloat (int);
++static void OP_ST (int, int);
++static void OP_STi (int, int);
++static int putop (const char *, int);
++static void oappend (const char *);
++static void append_seg (void);
++static void OP_indirE (int, int);
++static void print_operand_value (char *, int, bfd_vma);
++static void OP_E (int, int);
++static void OP_G (int, int);
++static bfd_vma get64 (void);
++static bfd_signed_vma get32 (void);
++static bfd_signed_vma get32s (void);
++static int get16 (void);
++static void set_op (bfd_vma, int);
++static void OP_REG (int, int);
++static void OP_IMREG (int, int);
++static void OP_I (int, int);
++static void OP_I64 (int, int);
++static void OP_sI (int, int);
++static void OP_J (int, int);
++static void OP_SEG (int, int);
++static void OP_DIR (int, int);
++static void OP_OFF (int, int);
++static void OP_OFF64 (int, int);
++static void ptr_reg (int, int);
++static void OP_ESreg (int, int);
++static void OP_DSreg (int, int);
++static void OP_C (int, int);
++static void OP_D (int, int);
++static void OP_T (int, int);
++static void OP_Rd (int, int);
++static void OP_MMX (int, int);
++static void OP_XMM (int, int);
++static void OP_EM (int, int);
++static void OP_EX (int, int);
++static void OP_MS (int, int);
++static void OP_XS (int, int);
++static void OP_M (int, int);
++static void OP_VMX (int, int);
++static void OP_0fae (int, int);
++static void OP_0f07 (int, int);
++static void NOP_Fixup (int, int);
++static void OP_3DNowSuffix (int, int);
++static void OP_SIMD_Suffix (int, int);
++static void SIMD_Fixup (int, int);
++static void PNI_Fixup (int, int);
++static void SVME_Fixup (int, int);
++static void INVLPG_Fixup (int, int);
++static void BadOp (void);
++static void SEG_Fixup (int, int);
++static void VMX_Fixup (int, int);
++
++struct dis_private {
++ /* Points to first byte not fetched. */
++ bfd_byte *max_fetched;
++ bfd_byte the_buffer[MAXLEN];
++ bfd_vma insn_start;
++ int orig_sizeflag;
++#ifndef __KERNEL__
++ jmp_buf bailout;
++#endif /* __KERNEL__ */
++};
++
++/* The opcode for the fwait instruction, which we treat as a prefix
++ when we can. */
++#define FWAIT_OPCODE (0x9b)
++
++/* Set to 1 for 64bit mode disassembly. */
++static int mode_64bit;
++
++/* Flags for the prefixes for the current instruction. See below. */
++static int prefixes;
++
++/* REX prefix the current instruction. See below. */
++static int rex;
++/* Bits of REX we've already used. */
++static int rex_used;
++#define REX_MODE64 8
++#define REX_EXTX 4
++#define REX_EXTY 2
++#define REX_EXTZ 1
++/* Mark parts used in the REX prefix. When we are testing for
++ empty prefix (for 8bit register REX extension), just mask it
++ out. Otherwise test for REX bit is excuse for existence of REX
++ only in case value is nonzero. */
++#define USED_REX(value) \
++ { \
++ if (value) \
++ rex_used |= (rex & value) ? (value) | 0x40 : 0; \
++ else \
++ rex_used |= 0x40; \
++ }
++
++/* Flags for prefixes which we somehow handled when printing the
++ current instruction. */
++static int used_prefixes;
++
++/* Flags stored in PREFIXES. */
++#define PREFIX_REPZ 1
++#define PREFIX_REPNZ 2
++#define PREFIX_LOCK 4
++#define PREFIX_CS 8
++#define PREFIX_SS 0x10
++#define PREFIX_DS 0x20
++#define PREFIX_ES 0x40
++#define PREFIX_FS 0x80
++#define PREFIX_GS 0x100
++#define PREFIX_DATA 0x200
++#define PREFIX_ADDR 0x400
++#define PREFIX_FWAIT 0x800
++
++/* Make sure that bytes from INFO->PRIVATE_DATA->BUFFER (inclusive)
++ to ADDR (exclusive) are valid. Returns 1 for success, longjmps
++ on error. */
++#define FETCH_DATA(info, addr) \
++ ((addr) <= ((struct dis_private *) (info->private_data))->max_fetched \
++ ? 1 : fetch_data ((info), (addr)))
++
++static int
++fetch_data (struct disassemble_info *info, bfd_byte *addr)
++{
++ int status;
++ struct dis_private *priv = (struct dis_private *) info->private_data;
++ bfd_vma start = priv->insn_start + (priv->max_fetched - priv->the_buffer);
++
++ status = (*info->read_memory_func) (start,
++ priv->max_fetched,
++ addr - priv->max_fetched,
++ info);
++ if (status != 0)
++ {
++ /* If we did manage to read at least one byte, then
++ print_insn_i386 will do something sensible. Otherwise, print
++ an error. We do that here because this is where we know
++ STATUS. */
++ if (priv->max_fetched == priv->the_buffer)
++ (*info->memory_error_func) (status, start, info);
++#ifndef __KERNEL__
++ longjmp (priv->bailout, 1);
++#else /* __KERNEL__ */
++ /* XXX - what to do? */
++ kdb_printf("Hmm. longjmp.\n");
++#endif /* __KERNEL__ */
++ }
++ else
++ priv->max_fetched = addr;
++ return 1;
++}
++
++#define XX NULL, 0
++
++#define Eb OP_E, b_mode
++#define Ev OP_E, v_mode
++#define Ed OP_E, d_mode
++#define Eq OP_E, q_mode
++#define Edq OP_E, dq_mode
++#define Edqw OP_E, dqw_mode
++#define indirEv OP_indirE, branch_v_mode
++#define indirEp OP_indirE, f_mode
++#define Em OP_E, m_mode
++#define Ew OP_E, w_mode
++#define Ma OP_E, v_mode
++#define M OP_M, 0 /* lea, lgdt, etc. */
++#define Mp OP_M, f_mode /* 32 or 48 bit memory operand for LDS, LES etc */
++#define Gb OP_G, b_mode
++#define Gv OP_G, v_mode
++#define Gd OP_G, d_mode
++#define Gdq OP_G, dq_mode
++#define Gm OP_G, m_mode
++#define Gw OP_G, w_mode
++#define Rd OP_Rd, d_mode
++#define Rm OP_Rd, m_mode
++#define Ib OP_I, b_mode
++#define sIb OP_sI, b_mode /* sign extened byte */
++#define Iv OP_I, v_mode
++#define Iq OP_I, q_mode
++#define Iv64 OP_I64, v_mode
++#define Iw OP_I, w_mode
++#define I1 OP_I, const_1_mode
++#define Jb OP_J, b_mode
++#define Jv OP_J, v_mode
++#define Cm OP_C, m_mode
++#define Dm OP_D, m_mode
++#define Td OP_T, d_mode
++#define Sv SEG_Fixup, v_mode
++
++#define RMeAX OP_REG, eAX_reg
++#define RMeBX OP_REG, eBX_reg
++#define RMeCX OP_REG, eCX_reg
++#define RMeDX OP_REG, eDX_reg
++#define RMeSP OP_REG, eSP_reg
++#define RMeBP OP_REG, eBP_reg
++#define RMeSI OP_REG, eSI_reg
++#define RMeDI OP_REG, eDI_reg
++#define RMrAX OP_REG, rAX_reg
++#define RMrBX OP_REG, rBX_reg
++#define RMrCX OP_REG, rCX_reg
++#define RMrDX OP_REG, rDX_reg
++#define RMrSP OP_REG, rSP_reg
++#define RMrBP OP_REG, rBP_reg
++#define RMrSI OP_REG, rSI_reg
++#define RMrDI OP_REG, rDI_reg
++#define RMAL OP_REG, al_reg
++#define RMAL OP_REG, al_reg
++#define RMCL OP_REG, cl_reg
++#define RMDL OP_REG, dl_reg
++#define RMBL OP_REG, bl_reg
++#define RMAH OP_REG, ah_reg
++#define RMCH OP_REG, ch_reg
++#define RMDH OP_REG, dh_reg
++#define RMBH OP_REG, bh_reg
++#define RMAX OP_REG, ax_reg
++#define RMDX OP_REG, dx_reg
++
++#define eAX OP_IMREG, eAX_reg
++#define eBX OP_IMREG, eBX_reg
++#define eCX OP_IMREG, eCX_reg
++#define eDX OP_IMREG, eDX_reg
++#define eSP OP_IMREG, eSP_reg
++#define eBP OP_IMREG, eBP_reg
++#define eSI OP_IMREG, eSI_reg
++#define eDI OP_IMREG, eDI_reg
++#define AL OP_IMREG, al_reg
++#define AL OP_IMREG, al_reg
++#define CL OP_IMREG, cl_reg
++#define DL OP_IMREG, dl_reg
++#define BL OP_IMREG, bl_reg
++#define AH OP_IMREG, ah_reg
++#define CH OP_IMREG, ch_reg
++#define DH OP_IMREG, dh_reg
++#define BH OP_IMREG, bh_reg
++#define AX OP_IMREG, ax_reg
++#define DX OP_IMREG, dx_reg
++#define indirDX OP_IMREG, indir_dx_reg
++
++#define Sw OP_SEG, w_mode
++#define Ap OP_DIR, 0
++#define Ob OP_OFF, b_mode
++#define Ob64 OP_OFF64, b_mode
++#define Ov OP_OFF, v_mode
++#define Ov64 OP_OFF64, v_mode
++#define Xb OP_DSreg, eSI_reg
++#define Xv OP_DSreg, eSI_reg
++#define Yb OP_ESreg, eDI_reg
++#define Yv OP_ESreg, eDI_reg
++#define DSBX OP_DSreg, eBX_reg
++
++#define es OP_REG, es_reg
++#define ss OP_REG, ss_reg
++#define cs OP_REG, cs_reg
++#define ds OP_REG, ds_reg
++#define fs OP_REG, fs_reg
++#define gs OP_REG, gs_reg
++
++#define MX OP_MMX, 0
++#define XM OP_XMM, 0
++#define EM OP_EM, v_mode
++#define EX OP_EX, v_mode
++#define MS OP_MS, v_mode
++#define XS OP_XS, v_mode
++#define VM OP_VMX, q_mode
++#define OPSUF OP_3DNowSuffix, 0
++#define OPSIMD OP_SIMD_Suffix, 0
++
++#define cond_jump_flag NULL, cond_jump_mode
++#define loop_jcxz_flag NULL, loop_jcxz_mode
++
++/* bits in sizeflag */
++#define SUFFIX_ALWAYS 4
++#define AFLAG 2
++#define DFLAG 1
++
++#define b_mode 1 /* byte operand */
++#define v_mode 2 /* operand size depends on prefixes */
++#define w_mode 3 /* word operand */
++#define d_mode 4 /* double word operand */
++#define q_mode 5 /* quad word operand */
++#define t_mode 6 /* ten-byte operand */
++#define x_mode 7 /* 16-byte XMM operand */
++#define m_mode 8 /* d_mode in 32bit, q_mode in 64bit mode. */
++#define cond_jump_mode 9
++#define loop_jcxz_mode 10
++#define dq_mode 11 /* operand size depends on REX prefixes. */
++#define dqw_mode 12 /* registers like dq_mode, memory like w_mode. */
++#define f_mode 13 /* 4- or 6-byte pointer operand */
++#define const_1_mode 14
++#define branch_v_mode 15 /* v_mode for branch. */
++
++#define es_reg 100
++#define cs_reg 101
++#define ss_reg 102
++#define ds_reg 103
++#define fs_reg 104
++#define gs_reg 105
++
++#define eAX_reg 108
++#define eCX_reg 109
++#define eDX_reg 110
++#define eBX_reg 111
++#define eSP_reg 112
++#define eBP_reg 113
++#define eSI_reg 114
++#define eDI_reg 115
++
++#define al_reg 116
++#define cl_reg 117
++#define dl_reg 118
++#define bl_reg 119
++#define ah_reg 120
++#define ch_reg 121
++#define dh_reg 122
++#define bh_reg 123
++
++#define ax_reg 124
++#define cx_reg 125
++#define dx_reg 126
++#define bx_reg 127
++#define sp_reg 128
++#define bp_reg 129
++#define si_reg 130
++#define di_reg 131
++
++#define rAX_reg 132
++#define rCX_reg 133
++#define rDX_reg 134
++#define rBX_reg 135
++#define rSP_reg 136
++#define rBP_reg 137
++#define rSI_reg 138
++#define rDI_reg 139
++
++#define indir_dx_reg 150
++
++#define FLOATCODE 1
++#define USE_GROUPS 2
++#define USE_PREFIX_USER_TABLE 3
++#define X86_64_SPECIAL 4
++
++#define FLOAT NULL, NULL, FLOATCODE, NULL, 0, NULL, 0
++
++#define GRP1b NULL, NULL, USE_GROUPS, NULL, 0, NULL, 0
++#define GRP1S NULL, NULL, USE_GROUPS, NULL, 1, NULL, 0
++#define GRP1Ss NULL, NULL, USE_GROUPS, NULL, 2, NULL, 0
++#define GRP2b NULL, NULL, USE_GROUPS, NULL, 3, NULL, 0
++#define GRP2S NULL, NULL, USE_GROUPS, NULL, 4, NULL, 0
++#define GRP2b_one NULL, NULL, USE_GROUPS, NULL, 5, NULL, 0
++#define GRP2S_one NULL, NULL, USE_GROUPS, NULL, 6, NULL, 0
++#define GRP2b_cl NULL, NULL, USE_GROUPS, NULL, 7, NULL, 0
++#define GRP2S_cl NULL, NULL, USE_GROUPS, NULL, 8, NULL, 0
++#define GRP3b NULL, NULL, USE_GROUPS, NULL, 9, NULL, 0
++#define GRP3S NULL, NULL, USE_GROUPS, NULL, 10, NULL, 0
++#define GRP4 NULL, NULL, USE_GROUPS, NULL, 11, NULL, 0
++#define GRP5 NULL, NULL, USE_GROUPS, NULL, 12, NULL, 0
++#define GRP6 NULL, NULL, USE_GROUPS, NULL, 13, NULL, 0
++#define GRP7 NULL, NULL, USE_GROUPS, NULL, 14, NULL, 0
++#define GRP8 NULL, NULL, USE_GROUPS, NULL, 15, NULL, 0
++#define GRP9 NULL, NULL, USE_GROUPS, NULL, 16, NULL, 0
++#define GRP10 NULL, NULL, USE_GROUPS, NULL, 17, NULL, 0
++#define GRP11 NULL, NULL, USE_GROUPS, NULL, 18, NULL, 0
++#define GRP12 NULL, NULL, USE_GROUPS, NULL, 19, NULL, 0
++#define GRP13 NULL, NULL, USE_GROUPS, NULL, 20, NULL, 0
++#define GRP14 NULL, NULL, USE_GROUPS, NULL, 21, NULL, 0
++#define GRPAMD NULL, NULL, USE_GROUPS, NULL, 22, NULL, 0
++#define GRPPADLCK1 NULL, NULL, USE_GROUPS, NULL, 23, NULL, 0
++#define GRPPADLCK2 NULL, NULL, USE_GROUPS, NULL, 24, NULL, 0
++
++#define PREGRP0 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 0, NULL, 0
++#define PREGRP1 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 1, NULL, 0
++#define PREGRP2 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 2, NULL, 0
++#define PREGRP3 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 3, NULL, 0
++#define PREGRP4 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 4, NULL, 0
++#define PREGRP5 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 5, NULL, 0
++#define PREGRP6 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 6, NULL, 0
++#define PREGRP7 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 7, NULL, 0
++#define PREGRP8 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 8, NULL, 0
++#define PREGRP9 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 9, NULL, 0
++#define PREGRP10 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 10, NULL, 0
++#define PREGRP11 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 11, NULL, 0
++#define PREGRP12 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 12, NULL, 0
++#define PREGRP13 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 13, NULL, 0
++#define PREGRP14 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 14, NULL, 0
++#define PREGRP15 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 15, NULL, 0
++#define PREGRP16 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 16, NULL, 0
++#define PREGRP17 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 17, NULL, 0
++#define PREGRP18 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 18, NULL, 0
++#define PREGRP19 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 19, NULL, 0
++#define PREGRP20 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 20, NULL, 0
++#define PREGRP21 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 21, NULL, 0
++#define PREGRP22 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 22, NULL, 0
++#define PREGRP23 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 23, NULL, 0
++#define PREGRP24 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 24, NULL, 0
++#define PREGRP25 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 25, NULL, 0
++#define PREGRP26 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 26, NULL, 0
++#define PREGRP27 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 27, NULL, 0
++#define PREGRP28 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 28, NULL, 0
++#define PREGRP29 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 29, NULL, 0
++#define PREGRP30 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 30, NULL, 0
++#define PREGRP31 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 31, NULL, 0
++#define PREGRP32 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 32, NULL, 0
++
++#define X86_64_0 NULL, NULL, X86_64_SPECIAL, NULL, 0, NULL, 0
++
++typedef void (*op_rtn) (int bytemode, int sizeflag);
++
++struct dis386 {
++ const char *name;
++ op_rtn op1;
++ int bytemode1;
++ op_rtn op2;
++ int bytemode2;
++ op_rtn op3;
++ int bytemode3;
++};
++
++/* Upper case letters in the instruction names here are macros.
++ 'A' => print 'b' if no register operands or suffix_always is true
++ 'B' => print 'b' if suffix_always is true
++ 'C' => print 's' or 'l' ('w' or 'd' in Intel mode) depending on operand
++ . size prefix
++ 'E' => print 'e' if 32-bit form of jcxz
++ 'F' => print 'w' or 'l' depending on address size prefix (loop insns)
++ 'H' => print ",pt" or ",pn" branch hint
++ 'I' => honor following macro letter even in Intel mode (implemented only
++ . for some of the macro letters)
++ 'J' => print 'l'
++ 'L' => print 'l' if suffix_always is true
++ 'N' => print 'n' if instruction has no wait "prefix"
++ 'O' => print 'd', or 'o'
++ 'P' => print 'w', 'l' or 'q' if instruction has an operand size prefix,
++ . or suffix_always is true. print 'q' if rex prefix is present.
++ 'Q' => print 'w', 'l' or 'q' if no register operands or suffix_always
++ . is true
++ 'R' => print 'w', 'l' or 'q' ("wd" or "dq" in intel mode)
++ 'S' => print 'w', 'l' or 'q' if suffix_always is true
++ 'T' => print 'q' in 64bit mode and behave as 'P' otherwise
++ 'U' => print 'q' in 64bit mode and behave as 'Q' otherwise
++ 'W' => print 'b' or 'w' ("w" or "de" in intel mode)
++ 'X' => print 's', 'd' depending on data16 prefix (for XMM)
++ 'Y' => 'q' if instruction has an REX 64bit overwrite prefix
++
++ Many of the above letters print nothing in Intel mode. See "putop"
++ for the details.
++
++ Braces '{' and '}', and vertical bars '|', indicate alternative
++ mnemonic strings for AT&T, Intel, X86_64 AT&T, and X86_64 Intel
++ modes. In cases where there are only two alternatives, the X86_64
++ instruction is reserved, and "(bad)" is printed.
++*/
++
++static const struct dis386 dis386[] = {
++ /* 00 */
++ { "addB", Eb, Gb, XX },
++ { "addS", Ev, Gv, XX },
++ { "addB", Gb, Eb, XX },
++ { "addS", Gv, Ev, XX },
++ { "addB", AL, Ib, XX },
++ { "addS", eAX, Iv, XX },
++ { "push{T|}", es, XX, XX },
++ { "pop{T|}", es, XX, XX },
++ /* 08 */
++ { "orB", Eb, Gb, XX },
++ { "orS", Ev, Gv, XX },
++ { "orB", Gb, Eb, XX },
++ { "orS", Gv, Ev, XX },
++ { "orB", AL, Ib, XX },
++ { "orS", eAX, Iv, XX },
++ { "push{T|}", cs, XX, XX },
++ { "(bad)", XX, XX, XX }, /* 0x0f extended opcode escape */
++ /* 10 */
++ { "adcB", Eb, Gb, XX },
++ { "adcS", Ev, Gv, XX },
++ { "adcB", Gb, Eb, XX },
++ { "adcS", Gv, Ev, XX },
++ { "adcB", AL, Ib, XX },
++ { "adcS", eAX, Iv, XX },
++ { "push{T|}", ss, XX, XX },
++ { "popT|}", ss, XX, XX },
++ /* 18 */
++ { "sbbB", Eb, Gb, XX },
++ { "sbbS", Ev, Gv, XX },
++ { "sbbB", Gb, Eb, XX },
++ { "sbbS", Gv, Ev, XX },
++ { "sbbB", AL, Ib, XX },
++ { "sbbS", eAX, Iv, XX },
++ { "push{T|}", ds, XX, XX },
++ { "pop{T|}", ds, XX, XX },
++ /* 20 */
++ { "andB", Eb, Gb, XX },
++ { "andS", Ev, Gv, XX },
++ { "andB", Gb, Eb, XX },
++ { "andS", Gv, Ev, XX },
++ { "andB", AL, Ib, XX },
++ { "andS", eAX, Iv, XX },
++ { "(bad)", XX, XX, XX }, /* SEG ES prefix */
++ { "daa{|}", XX, XX, XX },
++ /* 28 */
++ { "subB", Eb, Gb, XX },
++ { "subS", Ev, Gv, XX },
++ { "subB", Gb, Eb, XX },
++ { "subS", Gv, Ev, XX },
++ { "subB", AL, Ib, XX },
++ { "subS", eAX, Iv, XX },
++ { "(bad)", XX, XX, XX }, /* SEG CS prefix */
++ { "das{|}", XX, XX, XX },
++ /* 30 */
++ { "xorB", Eb, Gb, XX },
++ { "xorS", Ev, Gv, XX },
++ { "xorB", Gb, Eb, XX },
++ { "xorS", Gv, Ev, XX },
++ { "xorB", AL, Ib, XX },
++ { "xorS", eAX, Iv, XX },
++ { "(bad)", XX, XX, XX }, /* SEG SS prefix */
++ { "aaa{|}", XX, XX, XX },
++ /* 38 */
++ { "cmpB", Eb, Gb, XX },
++ { "cmpS", Ev, Gv, XX },
++ { "cmpB", Gb, Eb, XX },
++ { "cmpS", Gv, Ev, XX },
++ { "cmpB", AL, Ib, XX },
++ { "cmpS", eAX, Iv, XX },
++ { "(bad)", XX, XX, XX }, /* SEG DS prefix */
++ { "aas{|}", XX, XX, XX },
++ /* 40 */
++ { "inc{S|}", RMeAX, XX, XX },
++ { "inc{S|}", RMeCX, XX, XX },
++ { "inc{S|}", RMeDX, XX, XX },
++ { "inc{S|}", RMeBX, XX, XX },
++ { "inc{S|}", RMeSP, XX, XX },
++ { "inc{S|}", RMeBP, XX, XX },
++ { "inc{S|}", RMeSI, XX, XX },
++ { "inc{S|}", RMeDI, XX, XX },
++ /* 48 */
++ { "dec{S|}", RMeAX, XX, XX },
++ { "dec{S|}", RMeCX, XX, XX },
++ { "dec{S|}", RMeDX, XX, XX },
++ { "dec{S|}", RMeBX, XX, XX },
++ { "dec{S|}", RMeSP, XX, XX },
++ { "dec{S|}", RMeBP, XX, XX },
++ { "dec{S|}", RMeSI, XX, XX },
++ { "dec{S|}", RMeDI, XX, XX },
++ /* 50 */
++ { "pushS", RMrAX, XX, XX },
++ { "pushS", RMrCX, XX, XX },
++ { "pushS", RMrDX, XX, XX },
++ { "pushS", RMrBX, XX, XX },
++ { "pushS", RMrSP, XX, XX },
++ { "pushS", RMrBP, XX, XX },
++ { "pushS", RMrSI, XX, XX },
++ { "pushS", RMrDI, XX, XX },
++ /* 58 */
++ { "popS", RMrAX, XX, XX },
++ { "popS", RMrCX, XX, XX },
++ { "popS", RMrDX, XX, XX },
++ { "popS", RMrBX, XX, XX },
++ { "popS", RMrSP, XX, XX },
++ { "popS", RMrBP, XX, XX },
++ { "popS", RMrSI, XX, XX },
++ { "popS", RMrDI, XX, XX },
++ /* 60 */
++ { "pusha{P|}", XX, XX, XX },
++ { "popa{P|}", XX, XX, XX },
++ { "bound{S|}", Gv, Ma, XX },
++ { X86_64_0 },
++ { "(bad)", XX, XX, XX }, /* seg fs */
++ { "(bad)", XX, XX, XX }, /* seg gs */
++ { "(bad)", XX, XX, XX }, /* op size prefix */
++ { "(bad)", XX, XX, XX }, /* adr size prefix */
++ /* 68 */
++ { "pushT", Iq, XX, XX },
++ { "imulS", Gv, Ev, Iv },
++ { "pushT", sIb, XX, XX },
++ { "imulS", Gv, Ev, sIb },
++ { "ins{b||b|}", Yb, indirDX, XX },
++ { "ins{R||R|}", Yv, indirDX, XX },
++ { "outs{b||b|}", indirDX, Xb, XX },
++ { "outs{R||R|}", indirDX, Xv, XX },
++ /* 70 */
++ { "joH", Jb, XX, cond_jump_flag },
++ { "jnoH", Jb, XX, cond_jump_flag },
++ { "jbH", Jb, XX, cond_jump_flag },
++ { "jaeH", Jb, XX, cond_jump_flag },
++ { "jeH", Jb, XX, cond_jump_flag },
++ { "jneH", Jb, XX, cond_jump_flag },
++ { "jbeH", Jb, XX, cond_jump_flag },
++ { "jaH", Jb, XX, cond_jump_flag },
++ /* 78 */
++ { "jsH", Jb, XX, cond_jump_flag },
++ { "jnsH", Jb, XX, cond_jump_flag },
++ { "jpH", Jb, XX, cond_jump_flag },
++ { "jnpH", Jb, XX, cond_jump_flag },
++ { "jlH", Jb, XX, cond_jump_flag },
++ { "jgeH", Jb, XX, cond_jump_flag },
++ { "jleH", Jb, XX, cond_jump_flag },
++ { "jgH", Jb, XX, cond_jump_flag },
++ /* 80 */
++ { GRP1b },
++ { GRP1S },
++ { "(bad)", XX, XX, XX },
++ { GRP1Ss },
++ { "testB", Eb, Gb, XX },
++ { "testS", Ev, Gv, XX },
++ { "xchgB", Eb, Gb, XX },
++ { "xchgS", Ev, Gv, XX },
++ /* 88 */
++ { "movB", Eb, Gb, XX },
++ { "movS", Ev, Gv, XX },
++ { "movB", Gb, Eb, XX },
++ { "movS", Gv, Ev, XX },
++ { "movQ", Sv, Sw, XX },
++ { "leaS", Gv, M, XX },
++ { "movQ", Sw, Sv, XX },
++ { "popU", Ev, XX, XX },
++ /* 90 */
++ { "nop", NOP_Fixup, 0, XX, XX },
++ { "xchgS", RMeCX, eAX, XX },
++ { "xchgS", RMeDX, eAX, XX },
++ { "xchgS", RMeBX, eAX, XX },
++ { "xchgS", RMeSP, eAX, XX },
++ { "xchgS", RMeBP, eAX, XX },
++ { "xchgS", RMeSI, eAX, XX },
++ { "xchgS", RMeDI, eAX, XX },
++ /* 98 */
++ { "cW{tR||tR|}", XX, XX, XX },
++ { "cR{tO||tO|}", XX, XX, XX },
++ { "Jcall{T|}", Ap, XX, XX },
++ { "(bad)", XX, XX, XX }, /* fwait */
++ { "pushfT", XX, XX, XX },
++ { "popfT", XX, XX, XX },
++ { "sahf{|}", XX, XX, XX },
++ { "lahf{|}", XX, XX, XX },
++ /* a0 */
++ { "movB", AL, Ob64, XX },
++ { "movS", eAX, Ov64, XX },
++ { "movB", Ob64, AL, XX },
++ { "movS", Ov64, eAX, XX },
++ { "movs{b||b|}", Yb, Xb, XX },
++ { "movs{R||R|}", Yv, Xv, XX },
++ { "cmps{b||b|}", Xb, Yb, XX },
++ { "cmps{R||R|}", Xv, Yv, XX },
++ /* a8 */
++ { "testB", AL, Ib, XX },
++ { "testS", eAX, Iv, XX },
++ { "stosB", Yb, AL, XX },
++ { "stosS", Yv, eAX, XX },
++ { "lodsB", AL, Xb, XX },
++ { "lodsS", eAX, Xv, XX },
++ { "scasB", AL, Yb, XX },
++ { "scasS", eAX, Yv, XX },
++ /* b0 */
++ { "movB", RMAL, Ib, XX },
++ { "movB", RMCL, Ib, XX },
++ { "movB", RMDL, Ib, XX },
++ { "movB", RMBL, Ib, XX },
++ { "movB", RMAH, Ib, XX },
++ { "movB", RMCH, Ib, XX },
++ { "movB", RMDH, Ib, XX },
++ { "movB", RMBH, Ib, XX },
++ /* b8 */
++ { "movS", RMeAX, Iv64, XX },
++ { "movS", RMeCX, Iv64, XX },
++ { "movS", RMeDX, Iv64, XX },
++ { "movS", RMeBX, Iv64, XX },
++ { "movS", RMeSP, Iv64, XX },
++ { "movS", RMeBP, Iv64, XX },
++ { "movS", RMeSI, Iv64, XX },
++ { "movS", RMeDI, Iv64, XX },
++ /* c0 */
++ { GRP2b },
++ { GRP2S },
++ { "retT", Iw, XX, XX },
++ { "retT", XX, XX, XX },
++ { "les{S|}", Gv, Mp, XX },
++ { "ldsS", Gv, Mp, XX },
++ { "movA", Eb, Ib, XX },
++ { "movQ", Ev, Iv, XX },
++ /* c8 */
++ { "enterT", Iw, Ib, XX },
++ { "leaveT", XX, XX, XX },
++ { "lretP", Iw, XX, XX },
++ { "lretP", XX, XX, XX },
++ { "int3", XX, XX, XX },
++ { "int", Ib, XX, XX },
++ { "into{|}", XX, XX, XX },
++ { "iretP", XX, XX, XX },
++ /* d0 */
++ { GRP2b_one },
++ { GRP2S_one },
++ { GRP2b_cl },
++ { GRP2S_cl },
++ { "aam{|}", sIb, XX, XX },
++ { "aad{|}", sIb, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "xlat", DSBX, XX, XX },
++ /* d8 */
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ /* e0 */
++ { "loopneFH", Jb, XX, loop_jcxz_flag },
++ { "loopeFH", Jb, XX, loop_jcxz_flag },
++ { "loopFH", Jb, XX, loop_jcxz_flag },
++ { "jEcxzH", Jb, XX, loop_jcxz_flag },
++ { "inB", AL, Ib, XX },
++ { "inS", eAX, Ib, XX },
++ { "outB", Ib, AL, XX },
++ { "outS", Ib, eAX, XX },
++ /* e8 */
++ { "callT", Jv, XX, XX },
++ { "jmpT", Jv, XX, XX },
++ { "Jjmp{T|}", Ap, XX, XX },
++ { "jmp", Jb, XX, XX },
++ { "inB", AL, indirDX, XX },
++ { "inS", eAX, indirDX, XX },
++ { "outB", indirDX, AL, XX },
++ { "outS", indirDX, eAX, XX },
++ /* f0 */
++ { "(bad)", XX, XX, XX }, /* lock prefix */
++ { "icebp", XX, XX, XX },
++ { "(bad)", XX, XX, XX }, /* repne */
++ { "(bad)", XX, XX, XX }, /* repz */
++ { "hlt", XX, XX, XX },
++ { "cmc", XX, XX, XX },
++ { GRP3b },
++ { GRP3S },
++ /* f8 */
++ { "clc", XX, XX, XX },
++ { "stc", XX, XX, XX },
++ { "cli", XX, XX, XX },
++ { "sti", XX, XX, XX },
++ { "cld", XX, XX, XX },
++ { "std", XX, XX, XX },
++ { GRP4 },
++ { GRP5 },
++};
++
++static const struct dis386 dis386_twobyte[] = {
++ /* 00 */
++ { GRP6 },
++ { GRP7 },
++ { "larS", Gv, Ew, XX },
++ { "lslS", Gv, Ew, XX },
++ { "(bad)", XX, XX, XX },
++ { "syscall", XX, XX, XX },
++ { "clts", XX, XX, XX },
++ { "sysretP", XX, XX, XX },
++ /* 08 */
++ { "invd", XX, XX, XX },
++ { "wbinvd", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "ud2a", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { GRPAMD },
++ { "femms", XX, XX, XX },
++ { "", MX, EM, OPSUF }, /* See OP_3DNowSuffix. */
++ /* 10 */
++ { PREGRP8 },
++ { PREGRP9 },
++ { PREGRP30 },
++ { "movlpX", EX, XM, SIMD_Fixup, 'h' },
++ { "unpcklpX", XM, EX, XX },
++ { "unpckhpX", XM, EX, XX },
++ { PREGRP31 },
++ { "movhpX", EX, XM, SIMD_Fixup, 'l' },
++ /* 18 */
++ { GRP14 },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ /* 20 */
++ { "movL", Rm, Cm, XX },
++ { "movL", Rm, Dm, XX },
++ { "movL", Cm, Rm, XX },
++ { "movL", Dm, Rm, XX },
++ { "movL", Rd, Td, XX },
++ { "(bad)", XX, XX, XX },
++ { "movL", Td, Rd, XX },
++ { "(bad)", XX, XX, XX },
++ /* 28 */
++ { "movapX", XM, EX, XX },
++ { "movapX", EX, XM, XX },
++ { PREGRP2 },
++ { "movntpX", Ev, XM, XX },
++ { PREGRP4 },
++ { PREGRP3 },
++ { "ucomisX", XM,EX, XX },
++ { "comisX", XM,EX, XX },
++ /* 30 */
++ { "wrmsr", XX, XX, XX },
++ { "rdtsc", XX, XX, XX },
++ { "rdmsr", XX, XX, XX },
++ { "rdpmc", XX, XX, XX },
++ { "sysenter", XX, XX, XX },
++ { "sysexit", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ /* 38 */
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ /* 40 */
++ { "cmovo", Gv, Ev, XX },
++ { "cmovno", Gv, Ev, XX },
++ { "cmovb", Gv, Ev, XX },
++ { "cmovae", Gv, Ev, XX },
++ { "cmove", Gv, Ev, XX },
++ { "cmovne", Gv, Ev, XX },
++ { "cmovbe", Gv, Ev, XX },
++ { "cmova", Gv, Ev, XX },
++ /* 48 */
++ { "cmovs", Gv, Ev, XX },
++ { "cmovns", Gv, Ev, XX },
++ { "cmovp", Gv, Ev, XX },
++ { "cmovnp", Gv, Ev, XX },
++ { "cmovl", Gv, Ev, XX },
++ { "cmovge", Gv, Ev, XX },
++ { "cmovle", Gv, Ev, XX },
++ { "cmovg", Gv, Ev, XX },
++ /* 50 */
++ { "movmskpX", Gdq, XS, XX },
++ { PREGRP13 },
++ { PREGRP12 },
++ { PREGRP11 },
++ { "andpX", XM, EX, XX },
++ { "andnpX", XM, EX, XX },
++ { "orpX", XM, EX, XX },
++ { "xorpX", XM, EX, XX },
++ /* 58 */
++ { PREGRP0 },
++ { PREGRP10 },
++ { PREGRP17 },
++ { PREGRP16 },
++ { PREGRP14 },
++ { PREGRP7 },
++ { PREGRP5 },
++ { PREGRP6 },
++ /* 60 */
++ { "punpcklbw", MX, EM, XX },
++ { "punpcklwd", MX, EM, XX },
++ { "punpckldq", MX, EM, XX },
++ { "packsswb", MX, EM, XX },
++ { "pcmpgtb", MX, EM, XX },
++ { "pcmpgtw", MX, EM, XX },
++ { "pcmpgtd", MX, EM, XX },
++ { "packuswb", MX, EM, XX },
++ /* 68 */
++ { "punpckhbw", MX, EM, XX },
++ { "punpckhwd", MX, EM, XX },
++ { "punpckhdq", MX, EM, XX },
++ { "packssdw", MX, EM, XX },
++ { PREGRP26 },
++ { PREGRP24 },
++ { "movd", MX, Edq, XX },
++ { PREGRP19 },
++ /* 70 */
++ { PREGRP22 },
++ { GRP10 },
++ { GRP11 },
++ { GRP12 },
++ { "pcmpeqb", MX, EM, XX },
++ { "pcmpeqw", MX, EM, XX },
++ { "pcmpeqd", MX, EM, XX },
++ { "emms", XX, XX, XX },
++ /* 78 */
++ { "vmread", Em, Gm, XX },
++ { "vmwrite", Gm, Em, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { PREGRP28 },
++ { PREGRP29 },
++ { PREGRP23 },
++ { PREGRP20 },
++ /* 80 */
++ { "joH", Jv, XX, cond_jump_flag },
++ { "jnoH", Jv, XX, cond_jump_flag },
++ { "jbH", Jv, XX, cond_jump_flag },
++ { "jaeH", Jv, XX, cond_jump_flag },
++ { "jeH", Jv, XX, cond_jump_flag },
++ { "jneH", Jv, XX, cond_jump_flag },
++ { "jbeH", Jv, XX, cond_jump_flag },
++ { "jaH", Jv, XX, cond_jump_flag },
++ /* 88 */
++ { "jsH", Jv, XX, cond_jump_flag },
++ { "jnsH", Jv, XX, cond_jump_flag },
++ { "jpH", Jv, XX, cond_jump_flag },
++ { "jnpH", Jv, XX, cond_jump_flag },
++ { "jlH", Jv, XX, cond_jump_flag },
++ { "jgeH", Jv, XX, cond_jump_flag },
++ { "jleH", Jv, XX, cond_jump_flag },
++ { "jgH", Jv, XX, cond_jump_flag },
++ /* 90 */
++ { "seto", Eb, XX, XX },
++ { "setno", Eb, XX, XX },
++ { "setb", Eb, XX, XX },
++ { "setae", Eb, XX, XX },
++ { "sete", Eb, XX, XX },
++ { "setne", Eb, XX, XX },
++ { "setbe", Eb, XX, XX },
++ { "seta", Eb, XX, XX },
++ /* 98 */
++ { "sets", Eb, XX, XX },
++ { "setns", Eb, XX, XX },
++ { "setp", Eb, XX, XX },
++ { "setnp", Eb, XX, XX },
++ { "setl", Eb, XX, XX },
++ { "setge", Eb, XX, XX },
++ { "setle", Eb, XX, XX },
++ { "setg", Eb, XX, XX },
++ /* a0 */
++ { "pushT", fs, XX, XX },
++ { "popT", fs, XX, XX },
++ { "cpuid", XX, XX, XX },
++ { "btS", Ev, Gv, XX },
++ { "shldS", Ev, Gv, Ib },
++ { "shldS", Ev, Gv, CL },
++ { GRPPADLCK2 },
++ { GRPPADLCK1 },
++ /* a8 */
++ { "pushT", gs, XX, XX },
++ { "popT", gs, XX, XX },
++ { "rsm", XX, XX, XX },
++ { "btsS", Ev, Gv, XX },
++ { "shrdS", Ev, Gv, Ib },
++ { "shrdS", Ev, Gv, CL },
++ { GRP13 },
++ { "imulS", Gv, Ev, XX },
++ /* b0 */
++ { "cmpxchgB", Eb, Gb, XX },
++ { "cmpxchgS", Ev, Gv, XX },
++ { "lssS", Gv, Mp, XX },
++ { "btrS", Ev, Gv, XX },
++ { "lfsS", Gv, Mp, XX },
++ { "lgsS", Gv, Mp, XX },
++ { "movz{bR|x|bR|x}", Gv, Eb, XX },
++ { "movz{wR|x|wR|x}", Gv, Ew, XX }, /* yes, there really is movzww ! */
++ /* b8 */
++ { "(bad)", XX, XX, XX },
++ { "ud2b", XX, XX, XX },
++ { GRP8 },
++ { "btcS", Ev, Gv, XX },
++ { "bsfS", Gv, Ev, XX },
++ { "bsrS", Gv, Ev, XX },
++ { "movs{bR|x|bR|x}", Gv, Eb, XX },
++ { "movs{wR|x|wR|x}", Gv, Ew, XX }, /* yes, there really is movsww ! */
++ /* c0 */
++ { "xaddB", Eb, Gb, XX },
++ { "xaddS", Ev, Gv, XX },
++ { PREGRP1 },
++ { "movntiS", Ev, Gv, XX },
++ { "pinsrw", MX, Edqw, Ib },
++ { "pextrw", Gdq, MS, Ib },
++ { "shufpX", XM, EX, Ib },
++ { GRP9 },
++ /* c8 */
++ { "bswap", RMeAX, XX, XX },
++ { "bswap", RMeCX, XX, XX },
++ { "bswap", RMeDX, XX, XX },
++ { "bswap", RMeBX, XX, XX },
++ { "bswap", RMeSP, XX, XX },
++ { "bswap", RMeBP, XX, XX },
++ { "bswap", RMeSI, XX, XX },
++ { "bswap", RMeDI, XX, XX },
++ /* d0 */
++ { PREGRP27 },
++ { "psrlw", MX, EM, XX },
++ { "psrld", MX, EM, XX },
++ { "psrlq", MX, EM, XX },
++ { "paddq", MX, EM, XX },
++ { "pmullw", MX, EM, XX },
++ { PREGRP21 },
++ { "pmovmskb", Gdq, MS, XX },
++ /* d8 */
++ { "psubusb", MX, EM, XX },
++ { "psubusw", MX, EM, XX },
++ { "pminub", MX, EM, XX },
++ { "pand", MX, EM, XX },
++ { "paddusb", MX, EM, XX },
++ { "paddusw", MX, EM, XX },
++ { "pmaxub", MX, EM, XX },
++ { "pandn", MX, EM, XX },
++ /* e0 */
++ { "pavgb", MX, EM, XX },
++ { "psraw", MX, EM, XX },
++ { "psrad", MX, EM, XX },
++ { "pavgw", MX, EM, XX },
++ { "pmulhuw", MX, EM, XX },
++ { "pmulhw", MX, EM, XX },
++ { PREGRP15 },
++ { PREGRP25 },
++ /* e8 */
++ { "psubsb", MX, EM, XX },
++ { "psubsw", MX, EM, XX },
++ { "pminsw", MX, EM, XX },
++ { "por", MX, EM, XX },
++ { "paddsb", MX, EM, XX },
++ { "paddsw", MX, EM, XX },
++ { "pmaxsw", MX, EM, XX },
++ { "pxor", MX, EM, XX },
++ /* f0 */
++ { PREGRP32 },
++ { "psllw", MX, EM, XX },
++ { "pslld", MX, EM, XX },
++ { "psllq", MX, EM, XX },
++ { "pmuludq", MX, EM, XX },
++ { "pmaddwd", MX, EM, XX },
++ { "psadbw", MX, EM, XX },
++ { PREGRP18 },
++ /* f8 */
++ { "psubb", MX, EM, XX },
++ { "psubw", MX, EM, XX },
++ { "psubd", MX, EM, XX },
++ { "psubq", MX, EM, XX },
++ { "paddb", MX, EM, XX },
++ { "paddw", MX, EM, XX },
++ { "paddd", MX, EM, XX },
++ { "(bad)", XX, XX, XX }
++};
++
++static const unsigned char onebyte_has_modrm[256] = {
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++ /* ------------------------------- */
++ /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
++ /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
++ /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
++ /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
++ /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
++ /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
++ /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
++ /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
++ /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
++ /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
++ /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
++ /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
++ /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
++ /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
++ /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
++ /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
++ /* ------------------------------- */
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++};
++
++static const unsigned char twobyte_has_modrm[256] = {
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++ /* ------------------------------- */
++ /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
++ /* 10 */ 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0, /* 1f */
++ /* 20 */ 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1, /* 2f */
++ /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
++ /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
++ /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
++ /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
++ /* 70 */ 1,1,1,1,1,1,1,0,1,1,0,0,1,1,1,1, /* 7f */
++ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
++ /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
++ /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
++ /* b0 */ 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1, /* bf */
++ /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
++ /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
++ /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
++ /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
++ /* ------------------------------- */
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++};
++
++static const unsigned char twobyte_uses_SSE_prefix[256] = {
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++ /* ------------------------------- */
++ /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */
++ /* 10 */ 1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0, /* 1f */
++ /* 20 */ 0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0, /* 2f */
++ /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
++ /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
++ /* 50 */ 0,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* 5f */
++ /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1, /* 6f */
++ /* 70 */ 1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, /* 7f */
++ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
++ /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
++ /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
++ /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
++ /* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
++ /* d0 */ 1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */
++ /* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */
++ /* f0 */ 1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0 /* ff */
++ /* ------------------------------- */
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++};
++
++static char obuf[100];
++static char *obufp;
++static char scratchbuf[100];
++static unsigned char *start_codep;
++static unsigned char *insn_codep;
++static unsigned char *codep;
++static disassemble_info *the_info;
++static int mod;
++static int rm;
++static int reg;
++static unsigned char need_modrm;
++
++/* If we are accessing mod/rm/reg without need_modrm set, then the
++ values are stale. Hitting this abort likely indicates that you
++ need to update onebyte_has_modrm or twobyte_has_modrm. */
++#define MODRM_CHECK if (!need_modrm) abort ()
++
++static const char **names64;
++static const char **names32;
++static const char **names16;
++static const char **names8;
++static const char **names8rex;
++static const char **names_seg;
++static const char **index16;
++
++static const char *intel_names64[] = {
++ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
++ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
++};
++static const char *intel_names32[] = {
++ "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
++ "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"
++};
++static const char *intel_names16[] = {
++ "ax", "cx", "dx", "bx", "sp", "bp", "si", "di",
++ "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
++};
++static const char *intel_names8[] = {
++ "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh",
++};
++static const char *intel_names8rex[] = {
++ "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
++ "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b"
++};
++static const char *intel_names_seg[] = {
++ "es", "cs", "ss", "ds", "fs", "gs", "?", "?",
++};
++static const char *intel_index16[] = {
++ "bx+si", "bx+di", "bp+si", "bp+di", "si", "di", "bp", "bx"
++};
++
++static const char *att_names64[] = {
++ "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
++ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15"
++};
++static const char *att_names32[] = {
++ "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
++ "%r8d", "%r9d", "%r10d", "%r11d", "%r12d", "%r13d", "%r14d", "%r15d"
++};
++static const char *att_names16[] = {
++ "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di",
++ "%r8w", "%r9w", "%r10w", "%r11w", "%r12w", "%r13w", "%r14w", "%r15w"
++};
++static const char *att_names8[] = {
++ "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh",
++};
++static const char *att_names8rex[] = {
++ "%al", "%cl", "%dl", "%bl", "%spl", "%bpl", "%sil", "%dil",
++ "%r8b", "%r9b", "%r10b", "%r11b", "%r12b", "%r13b", "%r14b", "%r15b"
++};
++static const char *att_names_seg[] = {
++ "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "%?", "%?",
++};
++static const char *att_index16[] = {
++ "%bx,%si", "%bx,%di", "%bp,%si", "%bp,%di", "%si", "%di", "%bp", "%bx"
++};
++
++static const struct dis386 grps[][8] = {
++ /* GRP1b */
++ {
++ { "addA", Eb, Ib, XX },
++ { "orA", Eb, Ib, XX },
++ { "adcA", Eb, Ib, XX },
++ { "sbbA", Eb, Ib, XX },
++ { "andA", Eb, Ib, XX },
++ { "subA", Eb, Ib, XX },
++ { "xorA", Eb, Ib, XX },
++ { "cmpA", Eb, Ib, XX }
++ },
++ /* GRP1S */
++ {
++ { "addQ", Ev, Iv, XX },
++ { "orQ", Ev, Iv, XX },
++ { "adcQ", Ev, Iv, XX },
++ { "sbbQ", Ev, Iv, XX },
++ { "andQ", Ev, Iv, XX },
++ { "subQ", Ev, Iv, XX },
++ { "xorQ", Ev, Iv, XX },
++ { "cmpQ", Ev, Iv, XX }
++ },
++ /* GRP1Ss */
++ {
++ { "addQ", Ev, sIb, XX },
++ { "orQ", Ev, sIb, XX },
++ { "adcQ", Ev, sIb, XX },
++ { "sbbQ", Ev, sIb, XX },
++ { "andQ", Ev, sIb, XX },
++ { "subQ", Ev, sIb, XX },
++ { "xorQ", Ev, sIb, XX },
++ { "cmpQ", Ev, sIb, XX }
++ },
++ /* GRP2b */
++ {
++ { "rolA", Eb, Ib, XX },
++ { "rorA", Eb, Ib, XX },
++ { "rclA", Eb, Ib, XX },
++ { "rcrA", Eb, Ib, XX },
++ { "shlA", Eb, Ib, XX },
++ { "shrA", Eb, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "sarA", Eb, Ib, XX },
++ },
++ /* GRP2S */
++ {
++ { "rolQ", Ev, Ib, XX },
++ { "rorQ", Ev, Ib, XX },
++ { "rclQ", Ev, Ib, XX },
++ { "rcrQ", Ev, Ib, XX },
++ { "shlQ", Ev, Ib, XX },
++ { "shrQ", Ev, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "sarQ", Ev, Ib, XX },
++ },
++ /* GRP2b_one */
++ {
++ { "rolA", Eb, I1, XX },
++ { "rorA", Eb, I1, XX },
++ { "rclA", Eb, I1, XX },
++ { "rcrA", Eb, I1, XX },
++ { "shlA", Eb, I1, XX },
++ { "shrA", Eb, I1, XX },
++ { "(bad)", XX, XX, XX },
++ { "sarA", Eb, I1, XX },
++ },
++ /* GRP2S_one */
++ {
++ { "rolQ", Ev, I1, XX },
++ { "rorQ", Ev, I1, XX },
++ { "rclQ", Ev, I1, XX },
++ { "rcrQ", Ev, I1, XX },
++ { "shlQ", Ev, I1, XX },
++ { "shrQ", Ev, I1, XX },
++ { "(bad)", XX, XX, XX},
++ { "sarQ", Ev, I1, XX },
++ },
++ /* GRP2b_cl */
++ {
++ { "rolA", Eb, CL, XX },
++ { "rorA", Eb, CL, XX },
++ { "rclA", Eb, CL, XX },
++ { "rcrA", Eb, CL, XX },
++ { "shlA", Eb, CL, XX },
++ { "shrA", Eb, CL, XX },
++ { "(bad)", XX, XX, XX },
++ { "sarA", Eb, CL, XX },
++ },
++ /* GRP2S_cl */
++ {
++ { "rolQ", Ev, CL, XX },
++ { "rorQ", Ev, CL, XX },
++ { "rclQ", Ev, CL, XX },
++ { "rcrQ", Ev, CL, XX },
++ { "shlQ", Ev, CL, XX },
++ { "shrQ", Ev, CL, XX },
++ { "(bad)", XX, XX, XX },
++ { "sarQ", Ev, CL, XX }
++ },
++ /* GRP3b */
++ {
++ { "testA", Eb, Ib, XX },
++ { "(bad)", Eb, XX, XX },
++ { "notA", Eb, XX, XX },
++ { "negA", Eb, XX, XX },
++ { "mulA", Eb, XX, XX }, /* Don't print the implicit %al register, */
++ { "imulA", Eb, XX, XX }, /* to distinguish these opcodes from other */
++ { "divA", Eb, XX, XX }, /* mul/imul opcodes. Do the same for div */
++ { "idivA", Eb, XX, XX } /* and idiv for consistency. */
++ },
++ /* GRP3S */
++ {
++ { "testQ", Ev, Iv, XX },
++ { "(bad)", XX, XX, XX },
++ { "notQ", Ev, XX, XX },
++ { "negQ", Ev, XX, XX },
++ { "mulQ", Ev, XX, XX }, /* Don't print the implicit register. */
++ { "imulQ", Ev, XX, XX },
++ { "divQ", Ev, XX, XX },
++ { "idivQ", Ev, XX, XX },
++ },
++ /* GRP4 */
++ {
++ { "incA", Eb, XX, XX },
++ { "decA", Eb, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRP5 */
++ {
++ { "incQ", Ev, XX, XX },
++ { "decQ", Ev, XX, XX },
++ { "callT", indirEv, XX, XX },
++ { "JcallT", indirEp, XX, XX },
++ { "jmpT", indirEv, XX, XX },
++ { "JjmpT", indirEp, XX, XX },
++ { "pushU", Ev, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRP6 */
++ {
++ { "sldtQ", Ev, XX, XX },
++ { "strQ", Ev, XX, XX },
++ { "lldt", Ew, XX, XX },
++ { "ltr", Ew, XX, XX },
++ { "verr", Ew, XX, XX },
++ { "verw", Ew, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX }
++ },
++ /* GRP7 */
++ {
++ { "sgdtIQ", VMX_Fixup, 0, XX, XX },
++ { "sidtIQ", PNI_Fixup, 0, XX, XX },
++ { "lgdt{Q|Q||}", M, XX, XX },
++ { "lidt{Q|Q||}", SVME_Fixup, 0, XX, XX },
++ { "smswQ", Ev, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "lmsw", Ew, XX, XX },
++ { "invlpg", INVLPG_Fixup, w_mode, XX, XX },
++ },
++ /* GRP8 */
++ {
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "btQ", Ev, Ib, XX },
++ { "btsQ", Ev, Ib, XX },
++ { "btrQ", Ev, Ib, XX },
++ { "btcQ", Ev, Ib, XX },
++ },
++ /* GRP9 */
++ {
++ { "(bad)", XX, XX, XX },
++ { "cmpxchg8b", Eq, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "", VM, XX, XX }, /* See OP_VMX. */
++ { "vmptrst", Eq, XX, XX },
++ },
++ /* GRP10 */
++ {
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "psrlw", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "psraw", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "psllw", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRP11 */
++ {
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "psrld", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "psrad", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "pslld", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRP12 */
++ {
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "psrlq", MS, Ib, XX },
++ { "psrldq", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "psllq", MS, Ib, XX },
++ { "pslldq", MS, Ib, XX },
++ },
++ /* GRP13 */
++ {
++ { "fxsave", Ev, XX, XX },
++ { "fxrstor", Ev, XX, XX },
++ { "ldmxcsr", Ev, XX, XX },
++ { "stmxcsr", Ev, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "lfence", OP_0fae, 0, XX, XX },
++ { "mfence", OP_0fae, 0, XX, XX },
++ { "clflush", OP_0fae, 0, XX, XX },
++ },
++ /* GRP14 */
++ {
++ { "prefetchnta", Ev, XX, XX },
++ { "prefetcht0", Ev, XX, XX },
++ { "prefetcht1", Ev, XX, XX },
++ { "prefetcht2", Ev, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRPAMD */
++ {
++ { "prefetch", Eb, XX, XX },
++ { "prefetchw", Eb, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRPPADLCK1 */
++ {
++ { "xstore-rng", OP_0f07, 0, XX, XX },
++ { "xcrypt-ecb", OP_0f07, 0, XX, XX },
++ { "xcrypt-cbc", OP_0f07, 0, XX, XX },
++ { "xcrypt-ctr", OP_0f07, 0, XX, XX },
++ { "xcrypt-cfb", OP_0f07, 0, XX, XX },
++ { "xcrypt-ofb", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ },
++ /* GRPPADLCK2 */
++ {
++ { "montmul", OP_0f07, 0, XX, XX },
++ { "xsha1", OP_0f07, 0, XX, XX },
++ { "xsha256", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ }
++};
++
++static const struct dis386 prefix_user_table[][4] = {
++ /* PREGRP0 */
++ {
++ { "addps", XM, EX, XX },
++ { "addss", XM, EX, XX },
++ { "addpd", XM, EX, XX },
++ { "addsd", XM, EX, XX },
++ },
++ /* PREGRP1 */
++ {
++ { "", XM, EX, OPSIMD }, /* See OP_SIMD_SUFFIX. */
++ { "", XM, EX, OPSIMD },
++ { "", XM, EX, OPSIMD },
++ { "", XM, EX, OPSIMD },
++ },
++ /* PREGRP2 */
++ {
++ { "cvtpi2ps", XM, EM, XX },
++ { "cvtsi2ssY", XM, Ev, XX },
++ { "cvtpi2pd", XM, EM, XX },
++ { "cvtsi2sdY", XM, Ev, XX },
++ },
++ /* PREGRP3 */
++ {
++ { "cvtps2pi", MX, EX, XX },
++ { "cvtss2siY", Gv, EX, XX },
++ { "cvtpd2pi", MX, EX, XX },
++ { "cvtsd2siY", Gv, EX, XX },
++ },
++ /* PREGRP4 */
++ {
++ { "cvttps2pi", MX, EX, XX },
++ { "cvttss2siY", Gv, EX, XX },
++ { "cvttpd2pi", MX, EX, XX },
++ { "cvttsd2siY", Gv, EX, XX },
++ },
++ /* PREGRP5 */
++ {
++ { "divps", XM, EX, XX },
++ { "divss", XM, EX, XX },
++ { "divpd", XM, EX, XX },
++ { "divsd", XM, EX, XX },
++ },
++ /* PREGRP6 */
++ {
++ { "maxps", XM, EX, XX },
++ { "maxss", XM, EX, XX },
++ { "maxpd", XM, EX, XX },
++ { "maxsd", XM, EX, XX },
++ },
++ /* PREGRP7 */
++ {
++ { "minps", XM, EX, XX },
++ { "minss", XM, EX, XX },
++ { "minpd", XM, EX, XX },
++ { "minsd", XM, EX, XX },
++ },
++ /* PREGRP8 */
++ {
++ { "movups", XM, EX, XX },
++ { "movss", XM, EX, XX },
++ { "movupd", XM, EX, XX },
++ { "movsd", XM, EX, XX },
++ },
++ /* PREGRP9 */
++ {
++ { "movups", EX, XM, XX },
++ { "movss", EX, XM, XX },
++ { "movupd", EX, XM, XX },
++ { "movsd", EX, XM, XX },
++ },
++ /* PREGRP10 */
++ {
++ { "mulps", XM, EX, XX },
++ { "mulss", XM, EX, XX },
++ { "mulpd", XM, EX, XX },
++ { "mulsd", XM, EX, XX },
++ },
++ /* PREGRP11 */
++ {
++ { "rcpps", XM, EX, XX },
++ { "rcpss", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP12 */
++ {
++ { "rsqrtps", XM, EX, XX },
++ { "rsqrtss", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP13 */
++ {
++ { "sqrtps", XM, EX, XX },
++ { "sqrtss", XM, EX, XX },
++ { "sqrtpd", XM, EX, XX },
++ { "sqrtsd", XM, EX, XX },
++ },
++ /* PREGRP14 */
++ {
++ { "subps", XM, EX, XX },
++ { "subss", XM, EX, XX },
++ { "subpd", XM, EX, XX },
++ { "subsd", XM, EX, XX },
++ },
++ /* PREGRP15 */
++ {
++ { "(bad)", XM, EX, XX },
++ { "cvtdq2pd", XM, EX, XX },
++ { "cvttpd2dq", XM, EX, XX },
++ { "cvtpd2dq", XM, EX, XX },
++ },
++ /* PREGRP16 */
++ {
++ { "cvtdq2ps", XM, EX, XX },
++ { "cvttps2dq",XM, EX, XX },
++ { "cvtps2dq",XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP17 */
++ {
++ { "cvtps2pd", XM, EX, XX },
++ { "cvtss2sd", XM, EX, XX },
++ { "cvtpd2ps", XM, EX, XX },
++ { "cvtsd2ss", XM, EX, XX },
++ },
++ /* PREGRP18 */
++ {
++ { "maskmovq", MX, MS, XX },
++ { "(bad)", XM, EX, XX },
++ { "maskmovdqu", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP19 */
++ {
++ { "movq", MX, EM, XX },
++ { "movdqu", XM, EX, XX },
++ { "movdqa", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP20 */
++ {
++ { "movq", EM, MX, XX },
++ { "movdqu", EX, XM, XX },
++ { "movdqa", EX, XM, XX },
++ { "(bad)", EX, XM, XX },
++ },
++ /* PREGRP21 */
++ {
++ { "(bad)", EX, XM, XX },
++ { "movq2dq", XM, MS, XX },
++ { "movq", EX, XM, XX },
++ { "movdq2q", MX, XS, XX },
++ },
++ /* PREGRP22 */
++ {
++ { "pshufw", MX, EM, Ib },
++ { "pshufhw", XM, EX, Ib },
++ { "pshufd", XM, EX, Ib },
++ { "pshuflw", XM, EX, Ib },
++ },
++ /* PREGRP23 */
++ {
++ { "movd", Edq, MX, XX },
++ { "movq", XM, EX, XX },
++ { "movd", Edq, XM, XX },
++ { "(bad)", Ed, XM, XX },
++ },
++ /* PREGRP24 */
++ {
++ { "(bad)", MX, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "punpckhqdq", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP25 */
++ {
++ { "movntq", EM, MX, XX },
++ { "(bad)", EM, XM, XX },
++ { "movntdq", EM, XM, XX },
++ { "(bad)", EM, XM, XX },
++ },
++ /* PREGRP26 */
++ {
++ { "(bad)", MX, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "punpcklqdq", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP27 */
++ {
++ { "(bad)", MX, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "addsubpd", XM, EX, XX },
++ { "addsubps", XM, EX, XX },
++ },
++ /* PREGRP28 */
++ {
++ { "(bad)", MX, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "haddpd", XM, EX, XX },
++ { "haddps", XM, EX, XX },
++ },
++ /* PREGRP29 */
++ {
++ { "(bad)", MX, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "hsubpd", XM, EX, XX },
++ { "hsubps", XM, EX, XX },
++ },
++ /* PREGRP30 */
++ {
++ { "movlpX", XM, EX, SIMD_Fixup, 'h' }, /* really only 2 operands */
++ { "movsldup", XM, EX, XX },
++ { "movlpd", XM, EX, XX },
++ { "movddup", XM, EX, XX },
++ },
++ /* PREGRP31 */
++ {
++ { "movhpX", XM, EX, SIMD_Fixup, 'l' },
++ { "movshdup", XM, EX, XX },
++ { "movhpd", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP32 */
++ {
++ { "(bad)", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "lddqu", XM, M, XX },
++ },
++};
++
++static const struct dis386 x86_64_table[][2] = {
++ {
++ { "arpl", Ew, Gw, XX },
++ { "movs{||lq|xd}", Gv, Ed, XX },
++ },
++};
++
++#ifdef __KERNEL__
++#define INTERNAL_DISASSEMBLER_ERROR "<internal disassembler error>"
++#else /* __KERNEL__ */
++#define INTERNAL_DISASSEMBLER_ERROR _("<internal disassembler error>")
++#endif /* __KERNEL__ */
++
++static void
++ckprefix (void)
++{
++ int newrex;
++ rex = 0;
++ prefixes = 0;
++ used_prefixes = 0;
++ rex_used = 0;
++ while (1)
++ {
++ FETCH_DATA (the_info, codep + 1);
++ newrex = 0;
++ switch (*codep)
++ {
++ /* REX prefixes family. */
++ case 0x40:
++ case 0x41:
++ case 0x42:
++ case 0x43:
++ case 0x44:
++ case 0x45:
++ case 0x46:
++ case 0x47:
++ case 0x48:
++ case 0x49:
++ case 0x4a:
++ case 0x4b:
++ case 0x4c:
++ case 0x4d:
++ case 0x4e:
++ case 0x4f:
++ if (mode_64bit)
++ newrex = *codep;
++ else
++ return;
++ break;
++ case 0xf3:
++ prefixes |= PREFIX_REPZ;
++ break;
++ case 0xf2:
++ prefixes |= PREFIX_REPNZ;
++ break;
++ case 0xf0:
++ prefixes |= PREFIX_LOCK;
++ break;
++ case 0x2e:
++ prefixes |= PREFIX_CS;
++ break;
++ case 0x36:
++ prefixes |= PREFIX_SS;
++ break;
++ case 0x3e:
++ prefixes |= PREFIX_DS;
++ break;
++ case 0x26:
++ prefixes |= PREFIX_ES;
++ break;
++ case 0x64:
++ prefixes |= PREFIX_FS;
++ break;
++ case 0x65:
++ prefixes |= PREFIX_GS;
++ break;
++ case 0x66:
++ prefixes |= PREFIX_DATA;
++ break;
++ case 0x67:
++ prefixes |= PREFIX_ADDR;
++ break;
++ case FWAIT_OPCODE:
++ /* fwait is really an instruction. If there are prefixes
++ before the fwait, they belong to the fwait, *not* to the
++ following instruction. */
++ if (prefixes)
++ {
++ prefixes |= PREFIX_FWAIT;
++ codep++;
++ return;
++ }
++ prefixes = PREFIX_FWAIT;
++ break;
++ default:
++ return;
++ }
++ /* Rex is ignored when followed by another prefix. */
++ if (rex)
++ {
++ oappend (prefix_name (rex, 0));
++ oappend (" ");
++ }
++ rex = newrex;
++ codep++;
++ }
++}
++
++/* Return the name of the prefix byte PREF, or NULL if PREF is not a
++ prefix byte. */
++
++static const char *
++prefix_name (int pref, int sizeflag)
++{
++ switch (pref)
++ {
++ /* REX prefixes family. */
++ case 0x40:
++ return "rex";
++ case 0x41:
++ return "rexZ";
++ case 0x42:
++ return "rexY";
++ case 0x43:
++ return "rexYZ";
++ case 0x44:
++ return "rexX";
++ case 0x45:
++ return "rexXZ";
++ case 0x46:
++ return "rexXY";
++ case 0x47:
++ return "rexXYZ";
++ case 0x48:
++ return "rex64";
++ case 0x49:
++ return "rex64Z";
++ case 0x4a:
++ return "rex64Y";
++ case 0x4b:
++ return "rex64YZ";
++ case 0x4c:
++ return "rex64X";
++ case 0x4d:
++ return "rex64XZ";
++ case 0x4e:
++ return "rex64XY";
++ case 0x4f:
++ return "rex64XYZ";
++ case 0xf3:
++ return "repz";
++ case 0xf2:
++ return "repnz";
++ case 0xf0:
++ return "lock";
++ case 0x2e:
++ return "cs";
++ case 0x36:
++ return "ss";
++ case 0x3e:
++ return "ds";
++ case 0x26:
++ return "es";
++ case 0x64:
++ return "fs";
++ case 0x65:
++ return "gs";
++ case 0x66:
++ return (sizeflag & DFLAG) ? "data16" : "data32";
++ case 0x67:
++ if (mode_64bit)
++ return (sizeflag & AFLAG) ? "addr32" : "addr64";
++ else
++ return (sizeflag & AFLAG) ? "addr16" : "addr32";
++ case FWAIT_OPCODE:
++ return "fwait";
++ default:
++ return NULL;
++ }
++}
++
++static char op1out[100], op2out[100], op3out[100];
++static int op_ad, op_index[3];
++static int two_source_ops;
++static bfd_vma op_address[3];
++static bfd_vma op_riprel[3];
++static bfd_vma start_pc;
++\f
++/*
++ * On the 386's of 1988, the maximum length of an instruction is 15 bytes.
++ * (see topic "Redundant prefixes" in the "Differences from 8086"
++ * section of the "Virtual 8086 Mode" chapter.)
++ * 'pc' should be the address of this instruction, it will
++ * be used to print the target address if this is a relative jump or call
++ * The function returns the length of this instruction in bytes.
++ */
++
++static char intel_syntax;
++static char open_char;
++static char close_char;
++static char separator_char;
++static char scale_char;
++
++/* Here for backwards compatibility. When gdb stops using
++ print_insn_i386_att and print_insn_i386_intel these functions can
++ disappear, and print_insn_i386 be merged into print_insn. */
++int
++print_insn_i386_att (bfd_vma pc, disassemble_info *info)
++{
++ intel_syntax = 0;
++
++ return print_insn (pc, info);
++}
++
++int
++print_insn_i386_intel (bfd_vma pc, disassemble_info *info)
++{
++ intel_syntax = 1;
++
++ return print_insn (pc, info);
++}
++
++int
++print_insn_i386 (bfd_vma pc, disassemble_info *info)
++{
++ intel_syntax = -1;
++
++ return print_insn (pc, info);
++}
++
++static int
++print_insn (bfd_vma pc, disassemble_info *info)
++{
++ const struct dis386 *dp;
++ int i;
++ char *first, *second, *third;
++ int needcomma;
++ unsigned char uses_SSE_prefix, uses_LOCK_prefix;
++ int sizeflag;
++ const char *p;
++ struct dis_private priv;
++
++ mode_64bit = (info->mach == bfd_mach_x86_64_intel_syntax
++ || info->mach == bfd_mach_x86_64);
++
++ if (intel_syntax == (char) -1)
++ intel_syntax = (info->mach == bfd_mach_i386_i386_intel_syntax
++ || info->mach == bfd_mach_x86_64_intel_syntax);
++
++ if (info->mach == bfd_mach_i386_i386
++ || info->mach == bfd_mach_x86_64
++ || info->mach == bfd_mach_i386_i386_intel_syntax
++ || info->mach == bfd_mach_x86_64_intel_syntax)
++ priv.orig_sizeflag = AFLAG | DFLAG;
++ else if (info->mach == bfd_mach_i386_i8086)
++ priv.orig_sizeflag = 0;
++ else
++ abort ();
++
++ for (p = info->disassembler_options; p != NULL; )
++ {
++ if (strncmp (p, "x86-64", 6) == 0)
++ {
++ mode_64bit = 1;
++ priv.orig_sizeflag = AFLAG | DFLAG;
++ }
++ else if (strncmp (p, "i386", 4) == 0)
++ {
++ mode_64bit = 0;
++ priv.orig_sizeflag = AFLAG | DFLAG;
++ }
++ else if (strncmp (p, "i8086", 5) == 0)
++ {
++ mode_64bit = 0;
++ priv.orig_sizeflag = 0;
++ }
++ else if (strncmp (p, "intel", 5) == 0)
++ {
++ intel_syntax = 1;
++ }
++ else if (strncmp (p, "att", 3) == 0)
++ {
++ intel_syntax = 0;
++ }
++ else if (strncmp (p, "addr", 4) == 0)
++ {
++ if (p[4] == '1' && p[5] == '6')
++ priv.orig_sizeflag &= ~AFLAG;
++ else if (p[4] == '3' && p[5] == '2')
++ priv.orig_sizeflag |= AFLAG;
++ }
++ else if (strncmp (p, "data", 4) == 0)
++ {
++ if (p[4] == '1' && p[5] == '6')
++ priv.orig_sizeflag &= ~DFLAG;
++ else if (p[4] == '3' && p[5] == '2')
++ priv.orig_sizeflag |= DFLAG;
++ }
++ else if (strncmp (p, "suffix", 6) == 0)
++ priv.orig_sizeflag |= SUFFIX_ALWAYS;
++
++ p = strchr (p, ',');
++ if (p != NULL)
++ p++;
++ }
++
++ if (intel_syntax)
++ {
++ names64 = intel_names64;
++ names32 = intel_names32;
++ names16 = intel_names16;
++ names8 = intel_names8;
++ names8rex = intel_names8rex;
++ names_seg = intel_names_seg;
++ index16 = intel_index16;
++ open_char = '[';
++ close_char = ']';
++ separator_char = '+';
++ scale_char = '*';
++ }
++ else
++ {
++ names64 = att_names64;
++ names32 = att_names32;
++ names16 = att_names16;
++ names8 = att_names8;
++ names8rex = att_names8rex;
++ names_seg = att_names_seg;
++ index16 = att_index16;
++ open_char = '(';
++ close_char = ')';
++ separator_char = ',';
++ scale_char = ',';
++ }
++
++ /* The output looks better if we put 7 bytes on a line, since that
++ puts most long word instructions on a single line. */
++ info->bytes_per_line = 7;
++
++ info->private_data = &priv;
++ priv.max_fetched = priv.the_buffer;
++ priv.insn_start = pc;
++
++ obuf[0] = 0;
++ op1out[0] = 0;
++ op2out[0] = 0;
++ op3out[0] = 0;
++
++ op_index[0] = op_index[1] = op_index[2] = -1;
++
++ the_info = info;
++ start_pc = pc;
++ start_codep = priv.the_buffer;
++ codep = priv.the_buffer;
++
++#ifndef __KERNEL__
++ if (setjmp (priv.bailout) != 0)
++ {
++ const char *name;
++
++ /* Getting here means we tried for data but didn't get it. That
++ means we have an incomplete instruction of some sort. Just
++ print the first byte as a prefix or a .byte pseudo-op. */
++ if (codep > priv.the_buffer)
++ {
++ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
++ if (name != NULL)
++ (*info->fprintf_func) (info->stream, "%s", name);
++ else
++ {
++ /* Just print the first byte as a .byte instruction. */
++ (*info->fprintf_func) (info->stream, ".byte 0x%x",
++ (unsigned int) priv.the_buffer[0]);
++ }
++
++ return 1;
++ }
++
++ return -1;
++ }
++#endif /* __KERNEL__ */
++
++ obufp = obuf;
++ ckprefix ();
++
++ insn_codep = codep;
++ sizeflag = priv.orig_sizeflag;
++
++ FETCH_DATA (info, codep + 1);
++ two_source_ops = (*codep == 0x62) || (*codep == 0xc8);
++
++ if ((prefixes & PREFIX_FWAIT)
++ && ((*codep < 0xd8) || (*codep > 0xdf)))
++ {
++ const char *name;
++
++ /* fwait not followed by floating point instruction. Print the
++ first prefix, which is probably fwait itself. */
++ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
++ if (name == NULL)
++ name = INTERNAL_DISASSEMBLER_ERROR;
++ (*info->fprintf_func) (info->stream, "%s", name);
++ return 1;
++ }
++
++ if (*codep == 0x0f)
++ {
++ FETCH_DATA (info, codep + 2);
++ dp = &dis386_twobyte[*++codep];
++ need_modrm = twobyte_has_modrm[*codep];
++ uses_SSE_prefix = twobyte_uses_SSE_prefix[*codep];
++ uses_LOCK_prefix = (*codep & ~0x02) == 0x20;
++ }
++ else
++ {
++ dp = &dis386[*codep];
++ need_modrm = onebyte_has_modrm[*codep];
++ uses_SSE_prefix = 0;
++ uses_LOCK_prefix = 0;
++ }
++ codep++;
++
++ if (!uses_SSE_prefix && (prefixes & PREFIX_REPZ))
++ {
++ oappend ("repz ");
++ used_prefixes |= PREFIX_REPZ;
++ }
++ if (!uses_SSE_prefix && (prefixes & PREFIX_REPNZ))
++ {
++ oappend ("repnz ");
++ used_prefixes |= PREFIX_REPNZ;
++ }
++ if (!uses_LOCK_prefix && (prefixes & PREFIX_LOCK))
++ {
++ oappend ("lock ");
++ used_prefixes |= PREFIX_LOCK;
++ }
++
++ if (prefixes & PREFIX_ADDR)
++ {
++ sizeflag ^= AFLAG;
++ if (dp->bytemode3 != loop_jcxz_mode || intel_syntax)
++ {
++ if ((sizeflag & AFLAG) || mode_64bit)
++ oappend ("addr32 ");
++ else
++ oappend ("addr16 ");
++ used_prefixes |= PREFIX_ADDR;
++ }
++ }
++
++ if (!uses_SSE_prefix && (prefixes & PREFIX_DATA))
++ {
++ sizeflag ^= DFLAG;
++ if (dp->bytemode3 == cond_jump_mode
++ && dp->bytemode1 == v_mode
++ && !intel_syntax)
++ {
++ if (sizeflag & DFLAG)
++ oappend ("data32 ");
++ else
++ oappend ("data16 ");
++ used_prefixes |= PREFIX_DATA;
++ }
++ }
++
++ if (need_modrm)
++ {
++ FETCH_DATA (info, codep + 1);
++ mod = (*codep >> 6) & 3;
++ reg = (*codep >> 3) & 7;
++ rm = *codep & 7;
++ }
++
++ if (dp->name == NULL && dp->bytemode1 == FLOATCODE)
++ {
++ dofloat (sizeflag);
++ }
++ else
++ {
++ int index;
++ if (dp->name == NULL)
++ {
++ switch (dp->bytemode1)
++ {
++ case USE_GROUPS:
++ dp = &grps[dp->bytemode2][reg];
++ break;
++
++ case USE_PREFIX_USER_TABLE:
++ index = 0;
++ used_prefixes |= (prefixes & PREFIX_REPZ);
++ if (prefixes & PREFIX_REPZ)
++ index = 1;
++ else
++ {
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (prefixes & PREFIX_DATA)
++ index = 2;
++ else
++ {
++ used_prefixes |= (prefixes & PREFIX_REPNZ);
++ if (prefixes & PREFIX_REPNZ)
++ index = 3;
++ }
++ }
++ dp = &prefix_user_table[dp->bytemode2][index];
++ break;
++
++ case X86_64_SPECIAL:
++ dp = &x86_64_table[dp->bytemode2][mode_64bit];
++ break;
++
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ break;
++ }
++ }
++
++ if (putop (dp->name, sizeflag) == 0)
++ {
++ obufp = op1out;
++ op_ad = 2;
++ if (dp->op1)
++ (*dp->op1) (dp->bytemode1, sizeflag);
++
++ obufp = op2out;
++ op_ad = 1;
++ if (dp->op2)
++ (*dp->op2) (dp->bytemode2, sizeflag);
++
++ obufp = op3out;
++ op_ad = 0;
++ if (dp->op3)
++ (*dp->op3) (dp->bytemode3, sizeflag);
++ }
++ }
++
++ /* See if any prefixes were not used. If so, print the first one
++ separately. If we don't do this, we'll wind up printing an
++ instruction stream which does not precisely correspond to the
++ bytes we are disassembling. */
++ if ((prefixes & ~used_prefixes) != 0)
++ {
++ const char *name;
++
++ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
++ if (name == NULL)
++ name = INTERNAL_DISASSEMBLER_ERROR;
++ (*info->fprintf_func) (info->stream, "%s", name);
++ return 1;
++ }
++ if (rex & ~rex_used)
++ {
++ const char *name;
++ name = prefix_name (rex | 0x40, priv.orig_sizeflag);
++ if (name == NULL)
++ name = INTERNAL_DISASSEMBLER_ERROR;
++ (*info->fprintf_func) (info->stream, "%s ", name);
++ }
++
++ obufp = obuf + strlen (obuf);
++ for (i = strlen (obuf); i < 6; i++)
++ oappend (" ");
++ oappend (" ");
++ (*info->fprintf_func) (info->stream, "%s", obuf);
++
++ /* The enter and bound instructions are printed with operands in the same
++ order as the intel book; everything else is printed in reverse order. */
++ if (intel_syntax || two_source_ops)
++ {
++ first = op1out;
++ second = op2out;
++ third = op3out;
++ op_ad = op_index[0];
++ op_index[0] = op_index[2];
++ op_index[2] = op_ad;
++ }
++ else
++ {
++ first = op3out;
++ second = op2out;
++ third = op1out;
++ }
++ needcomma = 0;
++ if (*first)
++ {
++ if (op_index[0] != -1 && !op_riprel[0])
++ (*info->print_address_func) ((bfd_vma) op_address[op_index[0]], info);
++ else
++ (*info->fprintf_func) (info->stream, "%s", first);
++ needcomma = 1;
++ }
++ if (*second)
++ {
++ if (needcomma)
++ (*info->fprintf_func) (info->stream, ",");
++ if (op_index[1] != -1 && !op_riprel[1])
++ (*info->print_address_func) ((bfd_vma) op_address[op_index[1]], info);
++ else
++ (*info->fprintf_func) (info->stream, "%s", second);
++ needcomma = 1;
++ }
++ if (*third)
++ {
++ if (needcomma)
++ (*info->fprintf_func) (info->stream, ",");
++ if (op_index[2] != -1 && !op_riprel[2])
++ (*info->print_address_func) ((bfd_vma) op_address[op_index[2]], info);
++ else
++ (*info->fprintf_func) (info->stream, "%s", third);
++ }
++ for (i = 0; i < 3; i++)
++ if (op_index[i] != -1 && op_riprel[i])
++ {
++ (*info->fprintf_func) (info->stream, " # ");
++ (*info->print_address_func) ((bfd_vma) (start_pc + codep - start_codep
++ + op_address[op_index[i]]), info);
++ }
++ return codep - priv.the_buffer;
++}
++
++static const char *float_mem[] = {
++ /* d8 */
++ "fadd{s||s|}",
++ "fmul{s||s|}",
++ "fcom{s||s|}",
++ "fcomp{s||s|}",
++ "fsub{s||s|}",
++ "fsubr{s||s|}",
++ "fdiv{s||s|}",
++ "fdivr{s||s|}",
++ /* d9 */
++ "fld{s||s|}",
++ "(bad)",
++ "fst{s||s|}",
++ "fstp{s||s|}",
++ "fldenvIC",
++ "fldcw",
++ "fNstenvIC",
++ "fNstcw",
++ /* da */
++ "fiadd{l||l|}",
++ "fimul{l||l|}",
++ "ficom{l||l|}",
++ "ficomp{l||l|}",
++ "fisub{l||l|}",
++ "fisubr{l||l|}",
++ "fidiv{l||l|}",
++ "fidivr{l||l|}",
++ /* db */
++ "fild{l||l|}",
++ "fisttp{l||l|}",
++ "fist{l||l|}",
++ "fistp{l||l|}",
++ "(bad)",
++ "fld{t||t|}",
++ "(bad)",
++ "fstp{t||t|}",
++ /* dc */
++ "fadd{l||l|}",
++ "fmul{l||l|}",
++ "fcom{l||l|}",
++ "fcomp{l||l|}",
++ "fsub{l||l|}",
++ "fsubr{l||l|}",
++ "fdiv{l||l|}",
++ "fdivr{l||l|}",
++ /* dd */
++ "fld{l||l|}",
++ "fisttp{ll||ll|}",
++ "fst{l||l|}",
++ "fstp{l||l|}",
++ "frstorIC",
++ "(bad)",
++ "fNsaveIC",
++ "fNstsw",
++ /* de */
++ "fiadd",
++ "fimul",
++ "ficom",
++ "ficomp",
++ "fisub",
++ "fisubr",
++ "fidiv",
++ "fidivr",
++ /* df */
++ "fild",
++ "fisttp",
++ "fist",
++ "fistp",
++ "fbld",
++ "fild{ll||ll|}",
++ "fbstp",
++ "fistp{ll||ll|}",
++};
++
++static const unsigned char float_mem_mode[] = {
++ /* d8 */
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ /* d9 */
++ d_mode,
++ 0,
++ d_mode,
++ d_mode,
++ 0,
++ w_mode,
++ 0,
++ w_mode,
++ /* da */
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ /* db */
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ 0,
++ t_mode,
++ 0,
++ t_mode,
++ /* dc */
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ /* dd */
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ 0,
++ 0,
++ 0,
++ w_mode,
++ /* de */
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ /* df */
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ t_mode,
++ q_mode,
++ t_mode,
++ q_mode
++};
++
++#define ST OP_ST, 0
++#define STi OP_STi, 0
++
++#define FGRPd9_2 NULL, NULL, 0, NULL, 0, NULL, 0
++#define FGRPd9_4 NULL, NULL, 1, NULL, 0, NULL, 0
++#define FGRPd9_5 NULL, NULL, 2, NULL, 0, NULL, 0
++#define FGRPd9_6 NULL, NULL, 3, NULL, 0, NULL, 0
++#define FGRPd9_7 NULL, NULL, 4, NULL, 0, NULL, 0
++#define FGRPda_5 NULL, NULL, 5, NULL, 0, NULL, 0
++#define FGRPdb_4 NULL, NULL, 6, NULL, 0, NULL, 0
++#define FGRPde_3 NULL, NULL, 7, NULL, 0, NULL, 0
++#define FGRPdf_4 NULL, NULL, 8, NULL, 0, NULL, 0
++
++static const struct dis386 float_reg[][8] = {
++ /* d8 */
++ {
++ { "fadd", ST, STi, XX },
++ { "fmul", ST, STi, XX },
++ { "fcom", STi, XX, XX },
++ { "fcomp", STi, XX, XX },
++ { "fsub", ST, STi, XX },
++ { "fsubr", ST, STi, XX },
++ { "fdiv", ST, STi, XX },
++ { "fdivr", ST, STi, XX },
++ },
++ /* d9 */
++ {
++ { "fld", STi, XX, XX },
++ { "fxch", STi, XX, XX },
++ { FGRPd9_2 },
++ { "(bad)", XX, XX, XX },
++ { FGRPd9_4 },
++ { FGRPd9_5 },
++ { FGRPd9_6 },
++ { FGRPd9_7 },
++ },
++ /* da */
++ {
++ { "fcmovb", ST, STi, XX },
++ { "fcmove", ST, STi, XX },
++ { "fcmovbe",ST, STi, XX },
++ { "fcmovu", ST, STi, XX },
++ { "(bad)", XX, XX, XX },
++ { FGRPda_5 },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* db */
++ {
++ { "fcmovnb",ST, STi, XX },
++ { "fcmovne",ST, STi, XX },
++ { "fcmovnbe",ST, STi, XX },
++ { "fcmovnu",ST, STi, XX },
++ { FGRPdb_4 },
++ { "fucomi", ST, STi, XX },
++ { "fcomi", ST, STi, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* dc */
++ {
++ { "fadd", STi, ST, XX },
++ { "fmul", STi, ST, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++#if UNIXWARE_COMPAT
++ { "fsub", STi, ST, XX },
++ { "fsubr", STi, ST, XX },
++ { "fdiv", STi, ST, XX },
++ { "fdivr", STi, ST, XX },
++#else
++ { "fsubr", STi, ST, XX },
++ { "fsub", STi, ST, XX },
++ { "fdivr", STi, ST, XX },
++ { "fdiv", STi, ST, XX },
++#endif
++ },
++ /* dd */
++ {
++ { "ffree", STi, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "fst", STi, XX, XX },
++ { "fstp", STi, XX, XX },
++ { "fucom", STi, XX, XX },
++ { "fucomp", STi, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* de */
++ {
++ { "faddp", STi, ST, XX },
++ { "fmulp", STi, ST, XX },
++ { "(bad)", XX, XX, XX },
++ { FGRPde_3 },
++#if UNIXWARE_COMPAT
++ { "fsubp", STi, ST, XX },
++ { "fsubrp", STi, ST, XX },
++ { "fdivp", STi, ST, XX },
++ { "fdivrp", STi, ST, XX },
++#else
++ { "fsubrp", STi, ST, XX },
++ { "fsubp", STi, ST, XX },
++ { "fdivrp", STi, ST, XX },
++ { "fdivp", STi, ST, XX },
++#endif
++ },
++ /* df */
++ {
++ { "ffreep", STi, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { FGRPdf_4 },
++ { "fucomip",ST, STi, XX },
++ { "fcomip", ST, STi, XX },
++ { "(bad)", XX, XX, XX },
++ },
++};
++
++static char *fgrps[][8] = {
++ /* d9_2 0 */
++ {
++ "fnop","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
++ },
++
++ /* d9_4 1 */
++ {
++ "fchs","fabs","(bad)","(bad)","ftst","fxam","(bad)","(bad)",
++ },
++
++ /* d9_5 2 */
++ {
++ "fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz","(bad)",
++ },
++
++ /* d9_6 3 */
++ {
++ "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp",
++ },
++
++ /* d9_7 4 */
++ {
++ "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos",
++ },
++
++ /* da_5 5 */
++ {
++ "(bad)","fucompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
++ },
++
++ /* db_4 6 */
++ {
++ "feni(287 only)","fdisi(287 only)","fNclex","fNinit",
++ "fNsetpm(287 only)","(bad)","(bad)","(bad)",
++ },
++
++ /* de_3 7 */
++ {
++ "(bad)","fcompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
++ },
++
++ /* df_4 8 */
++ {
++ "fNstsw","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
++ },
++};
++
++static void
++dofloat (int sizeflag)
++{
++ const struct dis386 *dp;
++ unsigned char floatop;
++
++ floatop = codep[-1];
++
++ if (mod != 3)
++ {
++ int fp_indx = (floatop - 0xd8) * 8 + reg;
++
++ putop (float_mem[fp_indx], sizeflag);
++ obufp = op1out;
++ OP_E (float_mem_mode[fp_indx], sizeflag);
++ return;
++ }
++ /* Skip mod/rm byte. */
++ MODRM_CHECK;
++ codep++;
++
++ dp = &float_reg[floatop - 0xd8][reg];
++ if (dp->name == NULL)
++ {
++ putop (fgrps[dp->bytemode1][rm], sizeflag);
++
++ /* Instruction fnstsw is only one with strange arg. */
++ if (floatop == 0xdf && codep[-1] == 0xe0)
++ strcpy (op1out, names16[0]);
++ }
++ else
++ {
++ putop (dp->name, sizeflag);
++
++ obufp = op1out;
++ if (dp->op1)
++ (*dp->op1) (dp->bytemode1, sizeflag);
++ obufp = op2out;
++ if (dp->op2)
++ (*dp->op2) (dp->bytemode2, sizeflag);
++ }
++}
++
++static void
++OP_ST (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ oappend ("%st");
++}
++
++static void
++OP_STi (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ sprintf (scratchbuf, "%%st(%d)", rm);
++ oappend (scratchbuf + intel_syntax);
++}
++
++/* Capital letters in template are macros. */
++static int
++putop (const char *template, int sizeflag)
++{
++ const char *p;
++ int alt = 0;
++
++ for (p = template; *p; p++)
++ {
++ switch (*p)
++ {
++ default:
++ *obufp++ = *p;
++ break;
++ case '{':
++ alt = 0;
++ if (intel_syntax)
++ alt += 1;
++ if (mode_64bit)
++ alt += 2;
++ while (alt != 0)
++ {
++ while (*++p != '|')
++ {
++ if (*p == '}')
++ {
++ /* Alternative not valid. */
++ strcpy (obuf, "(bad)");
++ obufp = obuf + 5;
++ return 1;
++ }
++ else if (*p == '\0')
++ abort ();
++ }
++ alt--;
++ }
++ /* Fall through. */
++ case 'I':
++ alt = 1;
++ continue;
++ case '|':
++ while (*++p != '}')
++ {
++ if (*p == '\0')
++ abort ();
++ }
++ break;
++ case '}':
++ break;
++ case 'A':
++ if (intel_syntax)
++ break;
++ if (mod != 3 || (sizeflag & SUFFIX_ALWAYS))
++ *obufp++ = 'b';
++ break;
++ case 'B':
++ if (intel_syntax)
++ break;
++ if (sizeflag & SUFFIX_ALWAYS)
++ *obufp++ = 'b';
++ break;
++ case 'C':
++ if (intel_syntax && !alt)
++ break;
++ if ((prefixes & PREFIX_DATA) || (sizeflag & SUFFIX_ALWAYS))
++ {
++ if (sizeflag & DFLAG)
++ *obufp++ = intel_syntax ? 'd' : 'l';
++ else
++ *obufp++ = intel_syntax ? 'w' : 's';
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ break;
++ case 'E': /* For jcxz/jecxz */
++ if (mode_64bit)
++ {
++ if (sizeflag & AFLAG)
++ *obufp++ = 'r';
++ else
++ *obufp++ = 'e';
++ }
++ else
++ if (sizeflag & AFLAG)
++ *obufp++ = 'e';
++ used_prefixes |= (prefixes & PREFIX_ADDR);
++ break;
++ case 'F':
++ if (intel_syntax)
++ break;
++ if ((prefixes & PREFIX_ADDR) || (sizeflag & SUFFIX_ALWAYS))
++ {
++ if (sizeflag & AFLAG)
++ *obufp++ = mode_64bit ? 'q' : 'l';
++ else
++ *obufp++ = mode_64bit ? 'l' : 'w';
++ used_prefixes |= (prefixes & PREFIX_ADDR);
++ }
++ break;
++ case 'H':
++ if (intel_syntax)
++ break;
++ if ((prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_CS
++ || (prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_DS)
++ {
++ used_prefixes |= prefixes & (PREFIX_CS | PREFIX_DS);
++ *obufp++ = ',';
++ *obufp++ = 'p';
++ if (prefixes & PREFIX_DS)
++ *obufp++ = 't';
++ else
++ *obufp++ = 'n';
++ }
++ break;
++ case 'J':
++ if (intel_syntax)
++ break;
++ *obufp++ = 'l';
++ break;
++ case 'L':
++ if (intel_syntax)
++ break;
++ if (sizeflag & SUFFIX_ALWAYS)
++ *obufp++ = 'l';
++ break;
++ case 'N':
++ if ((prefixes & PREFIX_FWAIT) == 0)
++ *obufp++ = 'n';
++ else
++ used_prefixes |= PREFIX_FWAIT;
++ break;
++ case 'O':
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ *obufp++ = 'o';
++ else
++ *obufp++ = 'd';
++ break;
++ case 'T':
++ if (intel_syntax)
++ break;
++ if (mode_64bit)
++ {
++ *obufp++ = 'q';
++ break;
++ }
++ /* Fall through. */
++ case 'P':
++ if (intel_syntax)
++ break;
++ if ((prefixes & PREFIX_DATA)
++ || (rex & REX_MODE64)
++ || (sizeflag & SUFFIX_ALWAYS))
++ {
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ *obufp++ = 'q';
++ else
++ {
++ if (sizeflag & DFLAG)
++ *obufp++ = 'l';
++ else
++ *obufp++ = 'w';
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ }
++ break;
++ case 'U':
++ if (intel_syntax)
++ break;
++ if (mode_64bit)
++ {
++ *obufp++ = 'q';
++ break;
++ }
++ /* Fall through. */
++ case 'Q':
++ if (intel_syntax && !alt)
++ break;
++ USED_REX (REX_MODE64);
++ if (mod != 3 || (sizeflag & SUFFIX_ALWAYS))
++ {
++ if (rex & REX_MODE64)
++ *obufp++ = 'q';
++ else
++ {
++ if (sizeflag & DFLAG)
++ *obufp++ = intel_syntax ? 'd' : 'l';
++ else
++ *obufp++ = 'w';
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ }
++ break;
++ case 'R':
++ USED_REX (REX_MODE64);
++ if (intel_syntax)
++ {
++ if (rex & REX_MODE64)
++ {
++ *obufp++ = 'q';
++ *obufp++ = 't';
++ }
++ else if (sizeflag & DFLAG)
++ {
++ *obufp++ = 'd';
++ *obufp++ = 'q';
++ }
++ else
++ {
++ *obufp++ = 'w';
++ *obufp++ = 'd';
++ }
++ }
++ else
++ {
++ if (rex & REX_MODE64)
++ *obufp++ = 'q';
++ else if (sizeflag & DFLAG)
++ *obufp++ = 'l';
++ else
++ *obufp++ = 'w';
++ }
++ if (!(rex & REX_MODE64))
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case 'S':
++ if (intel_syntax)
++ break;
++ if (sizeflag & SUFFIX_ALWAYS)
++ {
++ if (rex & REX_MODE64)
++ *obufp++ = 'q';
++ else
++ {
++ if (sizeflag & DFLAG)
++ *obufp++ = 'l';
++ else
++ *obufp++ = 'w';
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ }
++ break;
++ case 'X':
++ if (prefixes & PREFIX_DATA)
++ *obufp++ = 'd';
++ else
++ *obufp++ = 's';
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case 'Y':
++ if (intel_syntax)
++ break;
++ if (rex & REX_MODE64)
++ {
++ USED_REX (REX_MODE64);
++ *obufp++ = 'q';
++ }
++ break;
++ /* implicit operand size 'l' for i386 or 'q' for x86-64 */
++ case 'W':
++ /* operand size flag for cwtl, cbtw */
++ USED_REX (0);
++ if (rex)
++ *obufp++ = 'l';
++ else if (sizeflag & DFLAG)
++ *obufp++ = 'w';
++ else
++ *obufp++ = 'b';
++ if (intel_syntax)
++ {
++ if (rex)
++ {
++ *obufp++ = 'q';
++ *obufp++ = 'e';
++ }
++ if (sizeflag & DFLAG)
++ {
++ *obufp++ = 'd';
++ *obufp++ = 'e';
++ }
++ else
++ {
++ *obufp++ = 'w';
++ }
++ }
++ if (!rex)
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ }
++ alt = 0;
++ }
++ *obufp = 0;
++ return 0;
++}
++
++static void
++oappend (const char *s)
++{
++ strcpy (obufp, s);
++ obufp += strlen (s);
++}
++
++static void
++append_seg (void)
++{
++ if (prefixes & PREFIX_CS)
++ {
++ used_prefixes |= PREFIX_CS;
++ oappend ("%cs:" + intel_syntax);
++ }
++ if (prefixes & PREFIX_DS)
++ {
++ used_prefixes |= PREFIX_DS;
++ oappend ("%ds:" + intel_syntax);
++ }
++ if (prefixes & PREFIX_SS)
++ {
++ used_prefixes |= PREFIX_SS;
++ oappend ("%ss:" + intel_syntax);
++ }
++ if (prefixes & PREFIX_ES)
++ {
++ used_prefixes |= PREFIX_ES;
++ oappend ("%es:" + intel_syntax);
++ }
++ if (prefixes & PREFIX_FS)
++ {
++ used_prefixes |= PREFIX_FS;
++ oappend ("%fs:" + intel_syntax);
++ }
++ if (prefixes & PREFIX_GS)
++ {
++ used_prefixes |= PREFIX_GS;
++ oappend ("%gs:" + intel_syntax);
++ }
++}
++
++static void
++OP_indirE (int bytemode, int sizeflag)
++{
++ if (!intel_syntax)
++ oappend ("*");
++ OP_E (bytemode, sizeflag);
++}
++
++static void
++print_operand_value (char *buf, int hex, bfd_vma disp)
++{
++ if (mode_64bit)
++ {
++ if (hex)
++ {
++ char tmp[30];
++ int i;
++ buf[0] = '0';
++ buf[1] = 'x';
++ sprintf_vma (tmp, disp);
++ for (i = 0; tmp[i] == '0' && tmp[i + 1]; i++);
++ strcpy (buf + 2, tmp + i);
++ }
++ else
++ {
++ bfd_signed_vma v = disp;
++ char tmp[30];
++ int i;
++ if (v < 0)
++ {
++ *(buf++) = '-';
++ v = -disp;
++ /* Check for possible overflow on 0x8000000000000000. */
++ if (v < 0)
++ {
++ strcpy (buf, "9223372036854775808");
++ return;
++ }
++ }
++ if (!v)
++ {
++ strcpy (buf, "0");
++ return;
++ }
++
++ i = 0;
++ tmp[29] = 0;
++ while (v)
++ {
++ tmp[28 - i] = (v % 10) + '0';
++ v /= 10;
++ i++;
++ }
++ strcpy (buf, tmp + 29 - i);
++ }
++ }
++ else
++ {
++ if (hex)
++ sprintf (buf, "0x%x", (unsigned int) disp);
++ else
++ sprintf (buf, "%d", (int) disp);
++ }
++}
++
++static void
++OP_E (int bytemode, int sizeflag)
++{
++ bfd_vma disp;
++ int add = 0;
++ int riprel = 0;
++ USED_REX (REX_EXTZ);
++ if (rex & REX_EXTZ)
++ add += 8;
++
++ /* Skip mod/rm byte. */
++ MODRM_CHECK;
++ codep++;
++
++ if (mod == 3)
++ {
++ switch (bytemode)
++ {
++ case b_mode:
++ USED_REX (0);
++ if (rex)
++ oappend (names8rex[rm + add]);
++ else
++ oappend (names8[rm + add]);
++ break;
++ case w_mode:
++ oappend (names16[rm + add]);
++ break;
++ case d_mode:
++ oappend (names32[rm + add]);
++ break;
++ case q_mode:
++ oappend (names64[rm + add]);
++ break;
++ case m_mode:
++ if (mode_64bit)
++ oappend (names64[rm + add]);
++ else
++ oappend (names32[rm + add]);
++ break;
++ case branch_v_mode:
++ if (mode_64bit)
++ oappend (names64[rm + add]);
++ else
++ {
++ if ((sizeflag & DFLAG) || bytemode != branch_v_mode)
++ oappend (names32[rm + add]);
++ else
++ oappend (names16[rm + add]);
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ break;
++ case v_mode:
++ case dq_mode:
++ case dqw_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ oappend (names64[rm + add]);
++ else if ((sizeflag & DFLAG) || bytemode != v_mode)
++ oappend (names32[rm + add]);
++ else
++ oappend (names16[rm + add]);
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case 0:
++ break;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ break;
++ }
++ return;
++ }
++
++ disp = 0;
++ append_seg ();
++
++ if ((sizeflag & AFLAG) || mode_64bit) /* 32 bit address mode */
++ {
++ int havesib;
++ int havebase;
++ int base;
++ int index = 0;
++ int scale = 0;
++
++ havesib = 0;
++ havebase = 1;
++ base = rm;
++
++ if (base == 4)
++ {
++ havesib = 1;
++ FETCH_DATA (the_info, codep + 1);
++ index = (*codep >> 3) & 7;
++ if (mode_64bit || index != 0x4)
++ /* When INDEX == 0x4 in 32 bit mode, SCALE is ignored. */
++ scale = (*codep >> 6) & 3;
++ base = *codep & 7;
++ USED_REX (REX_EXTY);
++ if (rex & REX_EXTY)
++ index += 8;
++ codep++;
++ }
++ base += add;
++
++ switch (mod)
++ {
++ case 0:
++ if ((base & 7) == 5)
++ {
++ havebase = 0;
++ if (mode_64bit && !havesib)
++ riprel = 1;
++ disp = get32s ();
++ }
++ break;
++ case 1:
++ FETCH_DATA (the_info, codep + 1);
++ disp = *codep++;
++ if ((disp & 0x80) != 0)
++ disp -= 0x100;
++ break;
++ case 2:
++ disp = get32s ();
++ break;
++ }
++
++ if (!intel_syntax)
++ if (mod != 0 || (base & 7) == 5)
++ {
++ print_operand_value (scratchbuf, !riprel, disp);
++ oappend (scratchbuf);
++ if (riprel)
++ {
++ set_op (disp, 1);
++ oappend ("(%rip)");
++ }
++ }
++
++ if (havebase || (havesib && (index != 4 || scale != 0)))
++ {
++ if (intel_syntax)
++ {
++ switch (bytemode)
++ {
++ case b_mode:
++ oappend ("BYTE PTR ");
++ break;
++ case w_mode:
++ case dqw_mode:
++ oappend ("WORD PTR ");
++ break;
++ case branch_v_mode:
++ case v_mode:
++ case dq_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ oappend ("QWORD PTR ");
++ else if ((sizeflag & DFLAG) || bytemode == dq_mode)
++ oappend ("DWORD PTR ");
++ else
++ oappend ("WORD PTR ");
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case d_mode:
++ oappend ("DWORD PTR ");
++ break;
++ case q_mode:
++ oappend ("QWORD PTR ");
++ break;
++ case m_mode:
++ if (mode_64bit)
++ oappend ("QWORD PTR ");
++ else
++ oappend ("DWORD PTR ");
++ break;
++ case f_mode:
++ if (sizeflag & DFLAG)
++ {
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ oappend ("FWORD PTR ");
++ }
++ else
++ oappend ("DWORD PTR ");
++ break;
++ case t_mode:
++ oappend ("TBYTE PTR ");
++ break;
++ case x_mode:
++ oappend ("XMMWORD PTR ");
++ break;
++ default:
++ break;
++ }
++ }
++ *obufp++ = open_char;
++ if (intel_syntax && riprel)
++ oappend ("rip + ");
++ *obufp = '\0';
++ if (havebase)
++ oappend (mode_64bit && (sizeflag & AFLAG)
++ ? names64[base] : names32[base]);
++ if (havesib)
++ {
++ if (index != 4)
++ {
++ if (!intel_syntax || havebase)
++ {
++ *obufp++ = separator_char;
++ *obufp = '\0';
++ }
++ oappend (mode_64bit && (sizeflag & AFLAG)
++ ? names64[index] : names32[index]);
++ }
++ if (scale != 0 || (!intel_syntax && index != 4))
++ {
++ *obufp++ = scale_char;
++ *obufp = '\0';
++ sprintf (scratchbuf, "%d", 1 << scale);
++ oappend (scratchbuf);
++ }
++ }
++ if (intel_syntax && disp)
++ {
++ if ((bfd_signed_vma) disp > 0)
++ {
++ *obufp++ = '+';
++ *obufp = '\0';
++ }
++ else if (mod != 1)
++ {
++ *obufp++ = '-';
++ *obufp = '\0';
++ disp = - (bfd_signed_vma) disp;
++ }
++
++ print_operand_value (scratchbuf, mod != 1, disp);
++ oappend (scratchbuf);
++ }
++
++ *obufp++ = close_char;
++ *obufp = '\0';
++ }
++ else if (intel_syntax)
++ {
++ if (mod != 0 || (base & 7) == 5)
++ {
++ if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
++ | PREFIX_ES | PREFIX_FS | PREFIX_GS))
++ ;
++ else
++ {
++ oappend (names_seg[ds_reg - es_reg]);
++ oappend (":");
++ }
++ print_operand_value (scratchbuf, 1, disp);
++ oappend (scratchbuf);
++ }
++ }
++ }
++ else
++ { /* 16 bit address mode */
++ switch (mod)
++ {
++ case 0:
++ if (rm == 6)
++ {
++ disp = get16 ();
++ if ((disp & 0x8000) != 0)
++ disp -= 0x10000;
++ }
++ break;
++ case 1:
++ FETCH_DATA (the_info, codep + 1);
++ disp = *codep++;
++ if ((disp & 0x80) != 0)
++ disp -= 0x100;
++ break;
++ case 2:
++ disp = get16 ();
++ if ((disp & 0x8000) != 0)
++ disp -= 0x10000;
++ break;
++ }
++
++ if (!intel_syntax)
++ if (mod != 0 || rm == 6)
++ {
++ print_operand_value (scratchbuf, 0, disp);
++ oappend (scratchbuf);
++ }
++
++ if (mod != 0 || rm != 6)
++ {
++ *obufp++ = open_char;
++ *obufp = '\0';
++ oappend (index16[rm]);
++ if (intel_syntax && disp)
++ {
++ if ((bfd_signed_vma) disp > 0)
++ {
++ *obufp++ = '+';
++ *obufp = '\0';
++ }
++ else if (mod != 1)
++ {
++ *obufp++ = '-';
++ *obufp = '\0';
++ disp = - (bfd_signed_vma) disp;
++ }
++
++ print_operand_value (scratchbuf, mod != 1, disp);
++ oappend (scratchbuf);
++ }
++
++ *obufp++ = close_char;
++ *obufp = '\0';
++ }
++ else if (intel_syntax)
++ {
++ if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
++ | PREFIX_ES | PREFIX_FS | PREFIX_GS))
++ ;
++ else
++ {
++ oappend (names_seg[ds_reg - es_reg]);
++ oappend (":");
++ }
++ print_operand_value (scratchbuf, 1, disp & 0xffff);
++ oappend (scratchbuf);
++ }
++ }
++}
++
++static void
++OP_G (int bytemode, int sizeflag)
++{
++ int add = 0;
++ USED_REX (REX_EXTX);
++ if (rex & REX_EXTX)
++ add += 8;
++ switch (bytemode)
++ {
++ case b_mode:
++ USED_REX (0);
++ if (rex)
++ oappend (names8rex[reg + add]);
++ else
++ oappend (names8[reg + add]);
++ break;
++ case w_mode:
++ oappend (names16[reg + add]);
++ break;
++ case d_mode:
++ oappend (names32[reg + add]);
++ break;
++ case q_mode:
++ oappend (names64[reg + add]);
++ break;
++ case v_mode:
++ case dq_mode:
++ case dqw_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ oappend (names64[reg + add]);
++ else if ((sizeflag & DFLAG) || bytemode != v_mode)
++ oappend (names32[reg + add]);
++ else
++ oappend (names16[reg + add]);
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case m_mode:
++ if (mode_64bit)
++ oappend (names64[reg + add]);
++ else
++ oappend (names32[reg + add]);
++ break;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ break;
++ }
++}
++
++static bfd_vma
++get64 (void)
++{
++ bfd_vma x;
++#ifdef BFD64
++ unsigned int a;
++ unsigned int b;
++
++ FETCH_DATA (the_info, codep + 8);
++ a = *codep++ & 0xff;
++ a |= (*codep++ & 0xff) << 8;
++ a |= (*codep++ & 0xff) << 16;
++ a |= (*codep++ & 0xff) << 24;
++ b = *codep++ & 0xff;
++ b |= (*codep++ & 0xff) << 8;
++ b |= (*codep++ & 0xff) << 16;
++ b |= (*codep++ & 0xff) << 24;
++ x = a + ((bfd_vma) b << 32);
++#else
++ abort ();
++ x = 0;
++#endif
++ return x;
++}
++
++static bfd_signed_vma
++get32 (void)
++{
++ bfd_signed_vma x = 0;
++
++ FETCH_DATA (the_info, codep + 4);
++ x = *codep++ & (bfd_signed_vma) 0xff;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 8;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 16;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 24;
++ return x;
++}
++
++static bfd_signed_vma
++get32s (void)
++{
++ bfd_signed_vma x = 0;
++
++ FETCH_DATA (the_info, codep + 4);
++ x = *codep++ & (bfd_signed_vma) 0xff;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 8;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 16;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 24;
++
++ x = (x ^ ((bfd_signed_vma) 1 << 31)) - ((bfd_signed_vma) 1 << 31);
++
++ return x;
++}
++
++static int
++get16 (void)
++{
++ int x = 0;
++
++ FETCH_DATA (the_info, codep + 2);
++ x = *codep++ & 0xff;
++ x |= (*codep++ & 0xff) << 8;
++ return x;
++}
++
++static void
++set_op (bfd_vma op, int riprel)
++{
++ op_index[op_ad] = op_ad;
++ if (mode_64bit)
++ {
++ op_address[op_ad] = op;
++ op_riprel[op_ad] = riprel;
++ }
++ else
++ {
++ /* Mask to get a 32-bit address. */
++ op_address[op_ad] = op & 0xffffffff;
++ op_riprel[op_ad] = riprel & 0xffffffff;
++ }
++}
++
++static void
++OP_REG (int code, int sizeflag)
++{
++ const char *s;
++ int add = 0;
++ USED_REX (REX_EXTZ);
++ if (rex & REX_EXTZ)
++ add = 8;
++
++ switch (code)
++ {
++ case indir_dx_reg:
++ if (intel_syntax)
++ s = "[dx]";
++ else
++ s = "(%dx)";
++ break;
++ case ax_reg: case cx_reg: case dx_reg: case bx_reg:
++ case sp_reg: case bp_reg: case si_reg: case di_reg:
++ s = names16[code - ax_reg + add];
++ break;
++ case es_reg: case ss_reg: case cs_reg:
++ case ds_reg: case fs_reg: case gs_reg:
++ s = names_seg[code - es_reg + add];
++ break;
++ case al_reg: case ah_reg: case cl_reg: case ch_reg:
++ case dl_reg: case dh_reg: case bl_reg: case bh_reg:
++ USED_REX (0);
++ if (rex)
++ s = names8rex[code - al_reg + add];
++ else
++ s = names8[code - al_reg];
++ break;
++ case rAX_reg: case rCX_reg: case rDX_reg: case rBX_reg:
++ case rSP_reg: case rBP_reg: case rSI_reg: case rDI_reg:
++ if (mode_64bit)
++ {
++ s = names64[code - rAX_reg + add];
++ break;
++ }
++ code += eAX_reg - rAX_reg;
++ /* Fall through. */
++ case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg:
++ case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ s = names64[code - eAX_reg + add];
++ else if (sizeflag & DFLAG)
++ s = names32[code - eAX_reg + add];
++ else
++ s = names16[code - eAX_reg + add];
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ default:
++ s = INTERNAL_DISASSEMBLER_ERROR;
++ break;
++ }
++ oappend (s);
++}
++
++static void
++OP_IMREG (int code, int sizeflag)
++{
++ const char *s;
++
++ switch (code)
++ {
++ case indir_dx_reg:
++ if (intel_syntax)
++ s = "[dx]";
++ else
++ s = "(%dx)";
++ break;
++ case ax_reg: case cx_reg: case dx_reg: case bx_reg:
++ case sp_reg: case bp_reg: case si_reg: case di_reg:
++ s = names16[code - ax_reg];
++ break;
++ case es_reg: case ss_reg: case cs_reg:
++ case ds_reg: case fs_reg: case gs_reg:
++ s = names_seg[code - es_reg];
++ break;
++ case al_reg: case ah_reg: case cl_reg: case ch_reg:
++ case dl_reg: case dh_reg: case bl_reg: case bh_reg:
++ USED_REX (0);
++ if (rex)
++ s = names8rex[code - al_reg];
++ else
++ s = names8[code - al_reg];
++ break;
++ case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg:
++ case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ s = names64[code - eAX_reg];
++ else if (sizeflag & DFLAG)
++ s = names32[code - eAX_reg];
++ else
++ s = names16[code - eAX_reg];
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ default:
++ s = INTERNAL_DISASSEMBLER_ERROR;
++ break;
++ }
++ oappend (s);
++}
++
++static void
++OP_I (int bytemode, int sizeflag)
++{
++ bfd_signed_vma op;
++ bfd_signed_vma mask = -1;
++
++ switch (bytemode)
++ {
++ case b_mode:
++ FETCH_DATA (the_info, codep + 1);
++ op = *codep++;
++ mask = 0xff;
++ break;
++ case q_mode:
++ if (mode_64bit)
++ {
++ op = get32s ();
++ break;
++ }
++ /* Fall through. */
++ case v_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ op = get32s ();
++ else if (sizeflag & DFLAG)
++ {
++ op = get32 ();
++ mask = 0xffffffff;
++ }
++ else
++ {
++ op = get16 ();
++ mask = 0xfffff;
++ }
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case w_mode:
++ mask = 0xfffff;
++ op = get16 ();
++ break;
++ case const_1_mode:
++ if (intel_syntax)
++ oappend ("1");
++ return;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ return;
++ }
++
++ op &= mask;
++ scratchbuf[0] = '$';
++ print_operand_value (scratchbuf + 1, 1, op);
++ oappend (scratchbuf + intel_syntax);
++ scratchbuf[0] = '\0';
++}
++
++static void
++OP_I64 (int bytemode, int sizeflag)
++{
++ bfd_signed_vma op;
++ bfd_signed_vma mask = -1;
++
++ if (!mode_64bit)
++ {
++ OP_I (bytemode, sizeflag);
++ return;
++ }
++
++ switch (bytemode)
++ {
++ case b_mode:
++ FETCH_DATA (the_info, codep + 1);
++ op = *codep++;
++ mask = 0xff;
++ break;
++ case v_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ op = get64 ();
++ else if (sizeflag & DFLAG)
++ {
++ op = get32 ();
++ mask = 0xffffffff;
++ }
++ else
++ {
++ op = get16 ();
++ mask = 0xfffff;
++ }
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case w_mode:
++ mask = 0xfffff;
++ op = get16 ();
++ break;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ return;
++ }
++
++ op &= mask;
++ scratchbuf[0] = '$';
++ print_operand_value (scratchbuf + 1, 1, op);
++ oappend (scratchbuf + intel_syntax);
++ scratchbuf[0] = '\0';
++}
++
++static void
++OP_sI (int bytemode, int sizeflag)
++{
++ bfd_signed_vma op;
++ bfd_signed_vma mask = -1;
++
++ switch (bytemode)
++ {
++ case b_mode:
++ FETCH_DATA (the_info, codep + 1);
++ op = *codep++;
++ if ((op & 0x80) != 0)
++ op -= 0x100;
++ mask = 0xffffffff;
++ break;
++ case v_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ op = get32s ();
++ else if (sizeflag & DFLAG)
++ {
++ op = get32s ();
++ mask = 0xffffffff;
++ }
++ else
++ {
++ mask = 0xffffffff;
++ op = get16 ();
++ if ((op & 0x8000) != 0)
++ op -= 0x10000;
++ }
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case w_mode:
++ op = get16 ();
++ mask = 0xffffffff;
++ if ((op & 0x8000) != 0)
++ op -= 0x10000;
++ break;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ return;
++ }
++
++ scratchbuf[0] = '$';
++ print_operand_value (scratchbuf + 1, 1, op);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_J (int bytemode, int sizeflag)
++{
++ bfd_vma disp;
++ bfd_vma mask = -1;
++
++ switch (bytemode)
++ {
++ case b_mode:
++ FETCH_DATA (the_info, codep + 1);
++ disp = *codep++;
++ if ((disp & 0x80) != 0)
++ disp -= 0x100;
++ break;
++ case v_mode:
++ if (sizeflag & DFLAG)
++ disp = get32s ();
++ else
++ {
++ disp = get16 ();
++ /* For some reason, a data16 prefix on a jump instruction
++ means that the pc is masked to 16 bits after the
++ displacement is added! */
++ mask = 0xffff;
++ }
++ break;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ return;
++ }
++ disp = (start_pc + codep - start_codep + disp) & mask;
++ set_op (disp, 0);
++ print_operand_value (scratchbuf, 1, disp);
++ oappend (scratchbuf);
++}
++
++static void
++OP_SEG (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ oappend (names_seg[reg]);
++}
++
++static void
++OP_DIR (int dummy ATTRIBUTE_UNUSED, int sizeflag)
++{
++ int seg, offset;
++
++ if (sizeflag & DFLAG)
++ {
++ offset = get32 ();
++ seg = get16 ();
++ }
++ else
++ {
++ offset = get16 ();
++ seg = get16 ();
++ }
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (intel_syntax)
++ sprintf (scratchbuf, "0x%x,0x%x", seg, offset);
++ else
++ sprintf (scratchbuf, "$0x%x,$0x%x", seg, offset);
++ oappend (scratchbuf);
++}
++
++static void
++OP_OFF (int bytemode ATTRIBUTE_UNUSED, int sizeflag)
++{
++ bfd_vma off;
++
++ append_seg ();
++
++ if ((sizeflag & AFLAG) || mode_64bit)
++ off = get32 ();
++ else
++ off = get16 ();
++
++ if (intel_syntax)
++ {
++ if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
++ | PREFIX_ES | PREFIX_FS | PREFIX_GS)))
++ {
++ oappend (names_seg[ds_reg - es_reg]);
++ oappend (":");
++ }
++ }
++ print_operand_value (scratchbuf, 1, off);
++ oappend (scratchbuf);
++}
++
++static void
++OP_OFF64 (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ bfd_vma off;
++
++ if (!mode_64bit)
++ {
++ OP_OFF (bytemode, sizeflag);
++ return;
++ }
++
++ append_seg ();
++
++ off = get64 ();
++
++ if (intel_syntax)
++ {
++ if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
++ | PREFIX_ES | PREFIX_FS | PREFIX_GS)))
++ {
++ oappend (names_seg[ds_reg - es_reg]);
++ oappend (":");
++ }
++ }
++ print_operand_value (scratchbuf, 1, off);
++ oappend (scratchbuf);
++}
++
++static void
++ptr_reg (int code, int sizeflag)
++{
++ const char *s;
++
++ *obufp++ = open_char;
++ used_prefixes |= (prefixes & PREFIX_ADDR);
++ if (mode_64bit)
++ {
++ if (!(sizeflag & AFLAG))
++ s = names32[code - eAX_reg];
++ else
++ s = names64[code - eAX_reg];
++ }
++ else if (sizeflag & AFLAG)
++ s = names32[code - eAX_reg];
++ else
++ s = names16[code - eAX_reg];
++ oappend (s);
++ *obufp++ = close_char;
++ *obufp = 0;
++}
++
++static void
++OP_ESreg (int code, int sizeflag)
++{
++ if (intel_syntax)
++ {
++ if (codep[-1] & 1)
++ {
++ USED_REX (REX_MODE64);
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (rex & REX_MODE64)
++ oappend ("QWORD PTR ");
++ else if ((sizeflag & DFLAG))
++ oappend ("DWORD PTR ");
++ else
++ oappend ("WORD PTR ");
++ }
++ else
++ oappend ("BYTE PTR ");
++ }
++
++ oappend ("%es:" + intel_syntax);
++ ptr_reg (code, sizeflag);
++}
++
++static void
++OP_DSreg (int code, int sizeflag)
++{
++ if (intel_syntax)
++ {
++ if (codep[-1] != 0xd7 && (codep[-1] & 1))
++ {
++ USED_REX (REX_MODE64);
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (rex & REX_MODE64)
++ oappend ("QWORD PTR ");
++ else if ((sizeflag & DFLAG))
++ oappend ("DWORD PTR ");
++ else
++ oappend ("WORD PTR ");
++ }
++ else
++ oappend ("BYTE PTR ");
++ }
++
++ if ((prefixes
++ & (PREFIX_CS
++ | PREFIX_DS
++ | PREFIX_SS
++ | PREFIX_ES
++ | PREFIX_FS
++ | PREFIX_GS)) == 0)
++ prefixes |= PREFIX_DS;
++ append_seg ();
++ ptr_reg (code, sizeflag);
++}
++
++static void
++OP_C (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ int add = 0;
++ if (rex & REX_EXTX)
++ {
++ USED_REX (REX_EXTX);
++ add = 8;
++ }
++ else if (!mode_64bit && (prefixes & PREFIX_LOCK))
++ {
++ used_prefixes |= PREFIX_LOCK;
++ add = 8;
++ }
++ sprintf (scratchbuf, "%%cr%d", reg + add);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_D (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ int add = 0;
++ USED_REX (REX_EXTX);
++ if (rex & REX_EXTX)
++ add = 8;
++ if (intel_syntax)
++ sprintf (scratchbuf, "db%d", reg + add);
++ else
++ sprintf (scratchbuf, "%%db%d", reg + add);
++ oappend (scratchbuf);
++}
++
++static void
++OP_T (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ sprintf (scratchbuf, "%%tr%d", reg);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_Rd (int bytemode, int sizeflag)
++{
++ if (mod == 3)
++ OP_E (bytemode, sizeflag);
++ else
++ BadOp ();
++}
++
++static void
++OP_MMX (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (prefixes & PREFIX_DATA)
++ {
++ int add = 0;
++ USED_REX (REX_EXTX);
++ if (rex & REX_EXTX)
++ add = 8;
++ sprintf (scratchbuf, "%%xmm%d", reg + add);
++ }
++ else
++ sprintf (scratchbuf, "%%mm%d", reg);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_XMM (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ int add = 0;
++ USED_REX (REX_EXTX);
++ if (rex & REX_EXTX)
++ add = 8;
++ sprintf (scratchbuf, "%%xmm%d", reg + add);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_EM (int bytemode, int sizeflag)
++{
++ if (mod != 3)
++ {
++ if (intel_syntax && bytemode == v_mode)
++ {
++ bytemode = (prefixes & PREFIX_DATA) ? x_mode : q_mode;
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ OP_E (bytemode, sizeflag);
++ return;
++ }
++
++ /* Skip mod/rm byte. */
++ MODRM_CHECK;
++ codep++;
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (prefixes & PREFIX_DATA)
++ {
++ int add = 0;
++
++ USED_REX (REX_EXTZ);
++ if (rex & REX_EXTZ)
++ add = 8;
++ sprintf (scratchbuf, "%%xmm%d", rm + add);
++ }
++ else
++ sprintf (scratchbuf, "%%mm%d", rm);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_EX (int bytemode, int sizeflag)
++{
++ int add = 0;
++ if (mod != 3)
++ {
++ if (intel_syntax && bytemode == v_mode)
++ {
++ switch (prefixes & (PREFIX_DATA|PREFIX_REPZ|PREFIX_REPNZ))
++ {
++ case 0: bytemode = x_mode; break;
++ case PREFIX_REPZ: bytemode = d_mode; used_prefixes |= PREFIX_REPZ; break;
++ case PREFIX_DATA: bytemode = x_mode; used_prefixes |= PREFIX_DATA; break;
++ case PREFIX_REPNZ: bytemode = q_mode; used_prefixes |= PREFIX_REPNZ; break;
++ default: bytemode = 0; break;
++ }
++ }
++ OP_E (bytemode, sizeflag);
++ return;
++ }
++ USED_REX (REX_EXTZ);
++ if (rex & REX_EXTZ)
++ add = 8;
++
++ /* Skip mod/rm byte. */
++ MODRM_CHECK;
++ codep++;
++ sprintf (scratchbuf, "%%xmm%d", rm + add);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_MS (int bytemode, int sizeflag)
++{
++ if (mod == 3)
++ OP_EM (bytemode, sizeflag);
++ else
++ BadOp ();
++}
++
++static void
++OP_XS (int bytemode, int sizeflag)
++{
++ if (mod == 3)
++ OP_EX (bytemode, sizeflag);
++ else
++ BadOp ();
++}
++
++static void
++OP_M (int bytemode, int sizeflag)
++{
++ if (mod == 3)
++ BadOp (); /* bad lea,lds,les,lfs,lgs,lss modrm */
++ else
++ OP_E (bytemode, sizeflag);
++}
++
++static void
++OP_0f07 (int bytemode, int sizeflag)
++{
++ if (mod != 3 || rm != 0)
++ BadOp ();
++ else
++ OP_E (bytemode, sizeflag);
++}
++
++static void
++OP_0fae (int bytemode, int sizeflag)
++{
++ if (mod == 3)
++ {
++ if (reg == 7)
++ strcpy (obuf + strlen (obuf) - sizeof ("clflush") + 1, "sfence");
++
++ if (reg < 5 || rm != 0)
++ {
++ BadOp (); /* bad sfence, mfence, or lfence */
++ return;
++ }
++ }
++ else if (reg != 7)
++ {
++ BadOp (); /* bad clflush */
++ return;
++ }
++
++ OP_E (bytemode, sizeflag);
++}
++
++static void
++NOP_Fixup (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ /* NOP with REPZ prefix is called PAUSE. */
++ if (prefixes == PREFIX_REPZ)
++ strcpy (obuf, "pause");
++}
++
++static const char *const Suffix3DNow[] = {
++/* 00 */ NULL, NULL, NULL, NULL,
++/* 04 */ NULL, NULL, NULL, NULL,
++/* 08 */ NULL, NULL, NULL, NULL,
++/* 0C */ "pi2fw", "pi2fd", NULL, NULL,
++/* 10 */ NULL, NULL, NULL, NULL,
++/* 14 */ NULL, NULL, NULL, NULL,
++/* 18 */ NULL, NULL, NULL, NULL,
++/* 1C */ "pf2iw", "pf2id", NULL, NULL,
++/* 20 */ NULL, NULL, NULL, NULL,
++/* 24 */ NULL, NULL, NULL, NULL,
++/* 28 */ NULL, NULL, NULL, NULL,
++/* 2C */ NULL, NULL, NULL, NULL,
++/* 30 */ NULL, NULL, NULL, NULL,
++/* 34 */ NULL, NULL, NULL, NULL,
++/* 38 */ NULL, NULL, NULL, NULL,
++/* 3C */ NULL, NULL, NULL, NULL,
++/* 40 */ NULL, NULL, NULL, NULL,
++/* 44 */ NULL, NULL, NULL, NULL,
++/* 48 */ NULL, NULL, NULL, NULL,
++/* 4C */ NULL, NULL, NULL, NULL,
++/* 50 */ NULL, NULL, NULL, NULL,
++/* 54 */ NULL, NULL, NULL, NULL,
++/* 58 */ NULL, NULL, NULL, NULL,
++/* 5C */ NULL, NULL, NULL, NULL,
++/* 60 */ NULL, NULL, NULL, NULL,
++/* 64 */ NULL, NULL, NULL, NULL,
++/* 68 */ NULL, NULL, NULL, NULL,
++/* 6C */ NULL, NULL, NULL, NULL,
++/* 70 */ NULL, NULL, NULL, NULL,
++/* 74 */ NULL, NULL, NULL, NULL,
++/* 78 */ NULL, NULL, NULL, NULL,
++/* 7C */ NULL, NULL, NULL, NULL,
++/* 80 */ NULL, NULL, NULL, NULL,
++/* 84 */ NULL, NULL, NULL, NULL,
++/* 88 */ NULL, NULL, "pfnacc", NULL,
++/* 8C */ NULL, NULL, "pfpnacc", NULL,
++/* 90 */ "pfcmpge", NULL, NULL, NULL,
++/* 94 */ "pfmin", NULL, "pfrcp", "pfrsqrt",
++/* 98 */ NULL, NULL, "pfsub", NULL,
++/* 9C */ NULL, NULL, "pfadd", NULL,
++/* A0 */ "pfcmpgt", NULL, NULL, NULL,
++/* A4 */ "pfmax", NULL, "pfrcpit1", "pfrsqit1",
++/* A8 */ NULL, NULL, "pfsubr", NULL,
++/* AC */ NULL, NULL, "pfacc", NULL,
++/* B0 */ "pfcmpeq", NULL, NULL, NULL,
++/* B4 */ "pfmul", NULL, "pfrcpit2", "pfmulhrw",
++/* B8 */ NULL, NULL, NULL, "pswapd",
++/* BC */ NULL, NULL, NULL, "pavgusb",
++/* C0 */ NULL, NULL, NULL, NULL,
++/* C4 */ NULL, NULL, NULL, NULL,
++/* C8 */ NULL, NULL, NULL, NULL,
++/* CC */ NULL, NULL, NULL, NULL,
++/* D0 */ NULL, NULL, NULL, NULL,
++/* D4 */ NULL, NULL, NULL, NULL,
++/* D8 */ NULL, NULL, NULL, NULL,
++/* DC */ NULL, NULL, NULL, NULL,
++/* E0 */ NULL, NULL, NULL, NULL,
++/* E4 */ NULL, NULL, NULL, NULL,
++/* E8 */ NULL, NULL, NULL, NULL,
++/* EC */ NULL, NULL, NULL, NULL,
++/* F0 */ NULL, NULL, NULL, NULL,
++/* F4 */ NULL, NULL, NULL, NULL,
++/* F8 */ NULL, NULL, NULL, NULL,
++/* FC */ NULL, NULL, NULL, NULL,
++};
++
++static void
++OP_3DNowSuffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ const char *mnemonic;
++
++ FETCH_DATA (the_info, codep + 1);
++ /* AMD 3DNow! instructions are specified by an opcode suffix in the
++ place where an 8-bit immediate would normally go. ie. the last
++ byte of the instruction. */
++ obufp = obuf + strlen (obuf);
++ mnemonic = Suffix3DNow[*codep++ & 0xff];
++ if (mnemonic)
++ oappend (mnemonic);
++ else
++ {
++ /* Since a variable sized modrm/sib chunk is between the start
++ of the opcode (0x0f0f) and the opcode suffix, we need to do
++ all the modrm processing first, and don't know until now that
++ we have a bad opcode. This necessitates some cleaning up. */
++ op1out[0] = '\0';
++ op2out[0] = '\0';
++ BadOp ();
++ }
++}
++
++static const char *simd_cmp_op[] = {
++ "eq",
++ "lt",
++ "le",
++ "unord",
++ "neq",
++ "nlt",
++ "nle",
++ "ord"
++};
++
++static void
++OP_SIMD_Suffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ unsigned int cmp_type;
++
++ FETCH_DATA (the_info, codep + 1);
++ obufp = obuf + strlen (obuf);
++ cmp_type = *codep++ & 0xff;
++ if (cmp_type < 8)
++ {
++ char suffix1 = 'p', suffix2 = 's';
++ used_prefixes |= (prefixes & PREFIX_REPZ);
++ if (prefixes & PREFIX_REPZ)
++ suffix1 = 's';
++ else
++ {
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (prefixes & PREFIX_DATA)
++ suffix2 = 'd';
++ else
++ {
++ used_prefixes |= (prefixes & PREFIX_REPNZ);
++ if (prefixes & PREFIX_REPNZ)
++ suffix1 = 's', suffix2 = 'd';
++ }
++ }
++ sprintf (scratchbuf, "cmp%s%c%c",
++ simd_cmp_op[cmp_type], suffix1, suffix2);
++ used_prefixes |= (prefixes & PREFIX_REPZ);
++ oappend (scratchbuf);
++ }
++ else
++ {
++ /* We have a bad extension byte. Clean up. */
++ op1out[0] = '\0';
++ op2out[0] = '\0';
++ BadOp ();
++ }
++}
++
++static void
++SIMD_Fixup (int extrachar, int sizeflag ATTRIBUTE_UNUSED)
++{
++ /* Change movlps/movhps to movhlps/movlhps for 2 register operand
++ forms of these instructions. */
++ if (mod == 3)
++ {
++ char *p = obuf + strlen (obuf);
++ *(p + 1) = '\0';
++ *p = *(p - 1);
++ *(p - 1) = *(p - 2);
++ *(p - 2) = *(p - 3);
++ *(p - 3) = extrachar;
++ }
++}
++
++static void
++PNI_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag)
++{
++ if (mod == 3 && reg == 1 && rm <= 1)
++ {
++ /* Override "sidt". */
++ char *p = obuf + strlen (obuf) - 4;
++
++ /* We might have a suffix when disassembling with -Msuffix. */
++ if (*p == 'i')
++ --p;
++
++ if (rm)
++ {
++ /* mwait %eax,%ecx */
++ strcpy (p, "mwait");
++ if (!intel_syntax)
++ strcpy (op1out, names32[0]);
++ }
++ else
++ {
++ /* monitor %eax,%ecx,%edx" */
++ strcpy (p, "monitor");
++ if (!intel_syntax)
++ {
++ if (!mode_64bit)
++ strcpy (op1out, names32[0]);
++ else if (!(prefixes & PREFIX_ADDR))
++ strcpy (op1out, names64[0]);
++ else
++ {
++ strcpy (op1out, names32[0]);
++ used_prefixes |= PREFIX_ADDR;
++ }
++ strcpy (op3out, names32[2]);
++ }
++ }
++ if (!intel_syntax)
++ {
++ strcpy (op2out, names32[1]);
++ two_source_ops = 1;
++ }
++
++ codep++;
++ }
++ else
++ OP_M (0, sizeflag);
++}
++
++static void
++SVME_Fixup (int bytemode, int sizeflag)
++{
++ const char *alt;
++ char *p;
++
++ switch (*codep)
++ {
++ case 0xd8:
++ alt = "vmrun";
++ break;
++ case 0xd9:
++ alt = "vmmcall";
++ break;
++ case 0xda:
++ alt = "vmload";
++ break;
++ case 0xdb:
++ alt = "vmsave";
++ break;
++ case 0xdc:
++ alt = "stgi";
++ break;
++ case 0xdd:
++ alt = "clgi";
++ break;
++ case 0xde:
++ alt = "skinit";
++ break;
++ case 0xdf:
++ alt = "invlpga";
++ break;
++ default:
++ OP_M (bytemode, sizeflag);
++ return;
++ }
++ /* Override "lidt". */
++ p = obuf + strlen (obuf) - 4;
++ /* We might have a suffix. */
++ if (*p == 'i')
++ --p;
++ strcpy (p, alt);
++ if (!(prefixes & PREFIX_ADDR))
++ {
++ ++codep;
++ return;
++ }
++ used_prefixes |= PREFIX_ADDR;
++ switch (*codep++)
++ {
++ case 0xdf:
++ strcpy (op2out, names32[1]);
++ two_source_ops = 1;
++ /* Fall through. */
++ case 0xd8:
++ case 0xda:
++ case 0xdb:
++ *obufp++ = open_char;
++ if (mode_64bit || (sizeflag & AFLAG))
++ alt = names32[0];
++ else
++ alt = names16[0];
++ strcpy (obufp, alt);
++ obufp += strlen (alt);
++ *obufp++ = close_char;
++ *obufp = '\0';
++ break;
++ }
++}
++
++static void
++INVLPG_Fixup (int bytemode, int sizeflag)
++{
++ const char *alt;
++
++ switch (*codep)
++ {
++ case 0xf8:
++ alt = "swapgs";
++ break;
++ case 0xf9:
++ alt = "rdtscp";
++ break;
++ default:
++ OP_M (bytemode, sizeflag);
++ return;
++ }
++ /* Override "invlpg". */
++ strcpy (obuf + strlen (obuf) - 6, alt);
++ codep++;
++}
++
++static void
++BadOp (void)
++{
++ /* Throw away prefixes and 1st. opcode byte. */
++ codep = insn_codep + 1;
++ oappend ("(bad)");
++}
++
++static void
++SEG_Fixup (int extrachar, int sizeflag)
++{
++ if (mod == 3)
++ {
++ /* We need to add a proper suffix with
++
++ movw %ds,%ax
++ movl %ds,%eax
++ movq %ds,%rax
++ movw %ax,%ds
++ movl %eax,%ds
++ movq %rax,%ds
++ */
++ const char *suffix;
++
++ if (prefixes & PREFIX_DATA)
++ suffix = "w";
++ else
++ {
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ suffix = "q";
++ else
++ suffix = "l";
++ }
++ strcat (obuf, suffix);
++ }
++ else
++ {
++ /* We need to fix the suffix for
++
++ movw %ds,(%eax)
++ movw %ds,(%rax)
++ movw (%eax),%ds
++ movw (%rax),%ds
++
++ Override "mov[l|q]". */
++ char *p = obuf + strlen (obuf) - 1;
++
++ /* We might not have a suffix. */
++ if (*p == 'v')
++ ++p;
++ *p = 'w';
++ }
++
++ OP_E (extrachar, sizeflag);
++}
++
++static void
++VMX_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag)
++{
++ if (mod == 3 && reg == 0 && rm >=1 && rm <= 4)
++ {
++ /* Override "sgdt". */
++ char *p = obuf + strlen (obuf) - 4;
++
++ /* We might have a suffix when disassembling with -Msuffix. */
++ if (*p == 'g')
++ --p;
++
++ switch (rm)
++ {
++ case 1:
++ strcpy (p, "vmcall");
++ break;
++ case 2:
++ strcpy (p, "vmlaunch");
++ break;
++ case 3:
++ strcpy (p, "vmresume");
++ break;
++ case 4:
++ strcpy (p, "vmxoff");
++ break;
++ }
++
++ codep++;
++ }
++ else
++ OP_E (0, sizeflag);
++}
++
++static void
++OP_VMX (int bytemode, int sizeflag)
++{
++ used_prefixes |= (prefixes & (PREFIX_DATA | PREFIX_REPZ));
++ if (prefixes & PREFIX_DATA)
++ strcpy (obuf, "vmclear");
++ else if (prefixes & PREFIX_REPZ)
++ strcpy (obuf, "vmxon");
++ else
++ strcpy (obuf, "vmptrld");
++ OP_E (bytemode, sizeflag);
++}
+diff -Nurp linux-2.6.22-590/arch/i386/kdb/kdba_bp.c linux-2.6.22-600/arch/i386/kdb/kdba_bp.c
+--- linux-2.6.22-590/arch/i386/kdb/kdba_bp.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/i386/kdb/kdba_bp.c 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,780 @@
++/*
++ * Kernel Debugger Architecture Dependent Breakpoint Handling
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/smp.h>
++#include <linux/ptrace.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++
++
++static char *kdba_rwtypes[] = { "Instruction(Register)", "Data Write",
++ "I/O", "Data Access"};
++
++/*
++ * Table describing processor architecture hardware
++ * breakpoint registers.
++ */
++
++static kdbhard_bp_t kdb_hardbreaks[KDB_MAXHARDBPT];
++
++/*
++ * kdba_db_trap
++ *
++ * Perform breakpoint processing upon entry to the
++ * processor debugger fault. Determine and print
++ * the active breakpoint.
++ *
++ * Parameters:
++ * regs Exception frame containing machine register state
++ * error Error number passed to kdb.
++ * Outputs:
++ * None.
++ * Returns:
++ * KDB_DB_BPT Standard instruction or data breakpoint encountered
++ * KDB_DB_SS Single Step fault ('ss' command or end of 'ssb' command)
++ * KDB_DB_SSB Single Step fault, caller should continue ('ssb' command)
++ * KDB_DB_SSBPT Single step over breakpoint
++ * KDB_DB_NOBPT No existing kdb breakpoint matches this debug exception
++ * Locking:
++ * None.
++ * Remarks:
++ * Yup, there be goto's here.
++ *
++ * If multiple processors receive debug exceptions simultaneously,
++ * one may be waiting at the kdb fence in kdb() while the user
++ * issues a 'bc' command to clear the breakpoint the processor
++ * which is waiting has already encountered. If this is the case,
++ * the debug registers will no longer match any entry in the
++ * breakpoint table, and we'll return the value KDB_DB_NOBPT.
++ * This can cause a panic in die_if_kernel(). It is safer to
++ * disable the breakpoint (bd), go until all processors are past
++ * the breakpoint then clear the breakpoint (bc). This code
++ * recognises a breakpoint even when disabled but not when it has
++ * been cleared.
++ *
++ * WARNING: This routine clears the debug state. It should be called
++ * once per debug and the result cached.
++ */
++
++kdb_dbtrap_t
++kdba_db_trap(struct pt_regs *regs, int error_unused)
++{
++ kdb_machreg_t dr6;
++ kdb_machreg_t dr7;
++ int rw, reg;
++ int i;
++ kdb_dbtrap_t rv = KDB_DB_BPT;
++ kdb_bp_t *bp;
++
++ if (KDB_NULL_REGS(regs))
++ return KDB_DB_NOBPT;
++
++ dr6 = kdba_getdr6();
++ dr7 = kdba_getdr7();
++
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdb: dr6 0x%lx dr7 0x%lx\n", dr6, dr7);
++ if (dr6 & DR6_BS) {
++ if (KDB_STATE(SSBPT)) {
++ if (KDB_DEBUG(BP))
++ kdb_printf("ssbpt\n");
++ KDB_STATE_CLEAR(SSBPT);
++ for(i=0,bp=kdb_breakpoints;
++ i < KDB_MAXBPT;
++ i++, bp++) {
++ if (KDB_DEBUG(BP))
++ kdb_printf("bp 0x%p enabled %d delayed %d global %d cpu %d\n",
++ bp, bp->bp_enabled, bp->bp_delayed, bp->bp_global, bp->bp_cpu);
++ if (!bp->bp_enabled)
++ continue;
++ if (!bp->bp_global && bp->bp_cpu != smp_processor_id())
++ continue;
++ if (KDB_DEBUG(BP))
++ kdb_printf("bp for this cpu\n");
++ if (bp->bp_delayed) {
++ bp->bp_delayed = 0;
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdba_installbp\n");
++ kdba_installbp(regs, bp);
++ if (!KDB_STATE(DOING_SS)) {
++ regs->eflags &= ~EF_TF;
++ return(KDB_DB_SSBPT);
++ }
++ break;
++ }
++ }
++ if (i == KDB_MAXBPT) {
++ kdb_printf("kdb: Unable to find delayed breakpoint\n");
++ }
++ if (!KDB_STATE(DOING_SS)) {
++ regs->eflags &= ~EF_TF;
++ return(KDB_DB_NOBPT);
++ }
++ /* FALLTHROUGH */
++ }
++
++ /*
++ * KDB_STATE_DOING_SS is set when the kernel debugger is using
++ * the processor trap flag to single-step a processor. If a
++ * single step trap occurs and this flag is clear, the SS trap
++ * will be ignored by KDB and the kernel will be allowed to deal
++ * with it as necessary (e.g. for ptrace).
++ */
++ if (!KDB_STATE(DOING_SS))
++ goto unknown;
++
++ /* single step */
++ rv = KDB_DB_SS; /* Indicate single step */
++ if (KDB_STATE(DOING_SSB)) {
++ unsigned char instruction[2];
++
++ kdb_id1(regs->eip);
++ if (kdb_getarea(instruction, regs->eip) ||
++ (instruction[0]&0xf0) == 0xe0 || /* short disp jumps */
++ (instruction[0]&0xf0) == 0x70 || /* Misc. jumps */
++ instruction[0] == 0xc2 || /* ret */
++ instruction[0] == 0x9a || /* call */
++ (instruction[0]&0xf8) == 0xc8 || /* enter, leave, iret, int, */
++ ((instruction[0] == 0x0f) &&
++ ((instruction[1]&0xf0)== 0x80))
++ ) {
++ /*
++ * End the ssb command here.
++ */
++ KDB_STATE_CLEAR(DOING_SSB);
++ KDB_STATE_CLEAR(DOING_SS);
++ } else {
++ rv = KDB_DB_SSB; /* Indicate ssb - dismiss immediately */
++ }
++ } else {
++ /*
++ * Print current insn
++ */
++ kdb_printf("SS trap at ");
++ kdb_symbol_print(regs->eip, NULL, KDB_SP_DEFAULT|KDB_SP_NEWLINE);
++ kdb_id1(regs->eip);
++ KDB_STATE_CLEAR(DOING_SS);
++ }
++
++ if (rv != KDB_DB_SSB)
++ regs->eflags &= ~EF_TF;
++ }
++
++ if (dr6 & DR6_B0) {
++ rw = DR7_RW0(dr7);
++ reg = 0;
++ goto handle;
++ }
++
++ if (dr6 & DR6_B1) {
++ rw = DR7_RW1(dr7);
++ reg = 1;
++ goto handle;
++ }
++
++ if (dr6 & DR6_B2) {
++ rw = DR7_RW2(dr7);
++ reg = 2;
++ goto handle;
++ }
++
++ if (dr6 & DR6_B3) {
++ rw = DR7_RW3(dr7);
++ reg = 3;
++ goto handle;
++ }
++
++ if (rv > 0)
++ goto handled;
++
++ goto unknown; /* dismiss */
++
++handle:
++ /*
++ * Set Resume Flag
++ */
++ regs->eflags |= EF_RF;
++
++ /*
++ * Determine which breakpoint was encountered.
++ */
++ for(i=0, bp=kdb_breakpoints; i<KDB_MAXBPT; i++, bp++) {
++ if (!(bp->bp_free)
++ && (bp->bp_global || bp->bp_cpu == smp_processor_id())
++ && (bp->bp_hard)
++ && (bp->bp_hard->bph_reg == reg)) {
++ /*
++ * Hit this breakpoint.
++ */
++ kdb_printf("%s breakpoint #%d at " kdb_bfd_vma_fmt "\n",
++ kdba_rwtypes[rw],
++ i, bp->bp_addr);
++
++ /*
++ * For an instruction breakpoint, disassemble
++ * the current instruction.
++ */
++ if (rw == 0) {
++ kdb_id1(regs->eip);
++ }
++
++ goto handled;
++ }
++ }
++
++unknown:
++ regs->eflags |= EF_RF; /* Supress further faults */
++ rv = KDB_DB_NOBPT; /* Cause kdb() to return */
++
++handled:
++
++ /*
++ * Clear the pending exceptions.
++ */
++ kdba_putdr6(0);
++
++ return rv;
++}
++
++/*
++ * kdba_bp_trap
++ *
++ * Perform breakpoint processing upon entry to the
++ * processor breakpoint instruction fault. Determine and print
++ * the active breakpoint.
++ *
++ * Parameters:
++ * regs Exception frame containing machine register state
++ * error Error number passed to kdb.
++ * Outputs:
++ * None.
++ * Returns:
++ * 0 Standard instruction or data breakpoint encountered
++ * 1 Single Step fault ('ss' command)
++ * 2 Single Step fault, caller should continue ('ssb' command)
++ * 3 No existing kdb breakpoint matches this debug exception
++ * Locking:
++ * None.
++ * Remarks:
++ *
++ * If multiple processors receive debug exceptions simultaneously,
++ * one may be waiting at the kdb fence in kdb() while the user
++ * issues a 'bc' command to clear the breakpoint the processor which
++ * is waiting has already encountered. If this is the case, the
++ * debug registers will no longer match any entry in the breakpoint
++ * table, and we'll return the value '3'. This can cause a panic
++ * in die_if_kernel(). It is safer to disable the breakpoint (bd),
++ * 'go' until all processors are past the breakpoint then clear the
++ * breakpoint (bc). This code recognises a breakpoint even when
++ * disabled but not when it has been cleared.
++ *
++ * WARNING: This routine resets the eip. It should be called
++ * once per breakpoint and the result cached.
++ */
++
++kdb_dbtrap_t
++kdba_bp_trap(struct pt_regs *regs, int error_unused)
++{
++ int i;
++ kdb_dbtrap_t rv;
++ kdb_bp_t *bp;
++
++ if (KDB_NULL_REGS(regs))
++ return KDB_DB_NOBPT;
++
++ /*
++ * Determine which breakpoint was encountered.
++ */
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdba_bp_trap: eip=0x%lx (not adjusted) "
++ "eflags=0x%lx regs=0x%p esp=0x%lx\n",
++ regs->eip, regs->eflags, regs, regs->esp);
++
++ rv = KDB_DB_NOBPT; /* Cause kdb() to return */
++
++ for(i=0, bp=kdb_breakpoints; i<KDB_MAXBPT; i++, bp++) {
++ if (bp->bp_free)
++ continue;
++ if (!bp->bp_global && bp->bp_cpu != smp_processor_id())
++ continue;
++ if ((void *)bp->bp_addr == (void *)(regs->eip - bp->bp_adjust)) {
++ /* Hit this breakpoint. */
++ regs->eip -= bp->bp_adjust;
++ kdb_printf("Instruction(i) breakpoint #%d at 0x%lx (adjusted)\n",
++ i, regs->eip);
++ kdb_id1(regs->eip);
++ rv = KDB_DB_BPT;
++ bp->bp_delay = 1;
++ /* SSBPT is set when the kernel debugger must single
++ * step a task in order to re-establish an instruction
++ * breakpoint which uses the instruction replacement
++ * mechanism. It is cleared by any action that removes
++ * the need to single-step the breakpoint.
++ */
++ KDB_STATE_SET(SSBPT);
++ break;
++ }
++ }
++
++ return rv;
++}
++
++/*
++ * kdba_handle_bp
++ *
++ * Handle an instruction-breakpoint trap. Called when re-installing
++ * an enabled breakpoint which has has the bp_delay bit set.
++ *
++ * Parameters:
++ * Returns:
++ * Locking:
++ * Remarks:
++ *
++ * Ok, we really need to:
++ * 1) Restore the original instruction byte
++ * 2) Single Step
++ * 3) Restore breakpoint instruction
++ * 4) Continue.
++ *
++ *
++ */
++
++static void
++kdba_handle_bp(struct pt_regs *regs, kdb_bp_t *bp)
++{
++ if (KDB_NULL_REGS(regs))
++ return;
++
++ if (KDB_DEBUG(BP))
++ kdb_printf("regs->eip = 0x%lx\n", regs->eip);
++
++ /*
++ * Setup single step
++ */
++ kdba_setsinglestep(regs);
++
++ /*
++ * Reset delay attribute
++ */
++ bp->bp_delay = 0;
++ bp->bp_delayed = 1;
++}
++
++
++/*
++ * kdba_bptype
++ *
++ * Return a string describing type of breakpoint.
++ *
++ * Parameters:
++ * bph Pointer to hardware breakpoint description
++ * Outputs:
++ * None.
++ * Returns:
++ * Character string.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++char *
++kdba_bptype(kdbhard_bp_t *bph)
++{
++ char *mode;
++
++ mode = kdba_rwtypes[bph->bph_mode];
++
++ return mode;
++}
++
++/*
++ * kdba_printbpreg
++ *
++ * Print register name assigned to breakpoint
++ *
++ * Parameters:
++ * bph Pointer hardware breakpoint structure
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++static void
++kdba_printbpreg(kdbhard_bp_t *bph)
++{
++ kdb_printf(" in dr%ld", bph->bph_reg);
++}
++
++/*
++ * kdba_printbp
++ *
++ * Print string describing hardware breakpoint.
++ *
++ * Parameters:
++ * bph Pointer to hardware breakpoint description
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void
++kdba_printbp(kdb_bp_t *bp)
++{
++ kdb_printf("\n is enabled");
++ if (bp->bp_hardtype) {
++ kdba_printbpreg(bp->bp_hard);
++ if (bp->bp_hard->bph_mode != 0) {
++ kdb_printf(" for %d bytes",
++ bp->bp_hard->bph_length+1);
++ }
++ }
++}
++
++/*
++ * kdba_parsebp
++ *
++ * Parse architecture dependent portion of the
++ * breakpoint command.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic for failure
++ * Locking:
++ * None.
++ * Remarks:
++ * for Ia32 architure, data access, data write and
++ * I/O breakpoints are supported in addition to instruction
++ * breakpoints.
++ *
++ * {datar|dataw|io|inst} [length]
++ */
++
++int
++kdba_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp)
++{
++ int nextarg = *nextargp;
++ int diag;
++ kdbhard_bp_t *bph = &bp->bp_template;
++
++ bph->bph_mode = 0; /* Default to instruction breakpoint */
++ bph->bph_length = 0; /* Length must be zero for insn bp */
++ if ((argc + 1) != nextarg) {
++ if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0) {
++ bph->bph_mode = 3;
++ } else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0) {
++ bph->bph_mode = 1;
++ } else if (strnicmp(argv[nextarg], "io", sizeof("io")) == 0) {
++ bph->bph_mode = 2;
++ } else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0) {
++ bph->bph_mode = 0;
++ } else {
++ return KDB_ARGCOUNT;
++ }
++
++ bph->bph_length = 3; /* Default to 4 byte */
++
++ nextarg++;
++
++ if ((argc + 1) != nextarg) {
++ unsigned long len;
++
++ diag = kdbgetularg((char *)argv[nextarg],
++ &len);
++ if (diag)
++ return diag;
++
++
++ if ((len > 4) || (len == 3))
++ return KDB_BADLENGTH;
++
++ bph->bph_length = len;
++ bph->bph_length--; /* Normalize for debug register */
++ nextarg++;
++ }
++
++ if ((argc + 1) != nextarg)
++ return KDB_ARGCOUNT;
++
++ /*
++ * Indicate to architecture independent level that
++ * a hardware register assignment is required to enable
++ * this breakpoint.
++ */
++
++ bph->bph_free = 0;
++ } else {
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdba_bp: no args, forcehw is %d\n", bp->bp_forcehw);
++ if (bp->bp_forcehw) {
++ /*
++ * We are forced to use a hardware register for this
++ * breakpoint because either the bph or bpha
++ * commands were used to establish this breakpoint.
++ */
++ bph->bph_free = 0;
++ } else {
++ /*
++ * Indicate to architecture dependent level that
++ * the instruction replacement breakpoint technique
++ * should be used for this breakpoint.
++ */
++ bph->bph_free = 1;
++ bp->bp_adjust = 1; /* software, int 3 is one byte */
++ }
++ }
++
++ if (bph->bph_mode != 2 && kdba_verify_rw(bp->bp_addr, bph->bph_length+1)) {
++ kdb_printf("Invalid address for breakpoint, ignoring bp command\n");
++ return KDB_BADADDR;
++ }
++
++ *nextargp = nextarg;
++ return 0;
++}
++
++/*
++ * kdba_allocbp
++ *
++ * Associate a hardware register with a breakpoint.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * A pointer to the allocated register kdbhard_bp_t structure for
++ * success, Null and a non-zero diagnostic for failure.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++kdbhard_bp_t *
++kdba_allocbp(kdbhard_bp_t *bph, int *diagp)
++{
++ int i;
++ kdbhard_bp_t *newbph;
++
++ for(i=0,newbph=kdb_hardbreaks; i < KDB_MAXHARDBPT; i++, newbph++) {
++ if (newbph->bph_free) {
++ break;
++ }
++ }
++
++ if (i == KDB_MAXHARDBPT) {
++ *diagp = KDB_TOOMANYDBREGS;
++ return NULL;
++ }
++
++ *diagp = 0;
++
++ /*
++ * Copy data from template. Can't just copy the entire template
++ * here because the register number in kdb_hardbreaks must be
++ * preserved.
++ */
++ newbph->bph_data = bph->bph_data;
++ newbph->bph_write = bph->bph_write;
++ newbph->bph_mode = bph->bph_mode;
++ newbph->bph_length = bph->bph_length;
++
++ /*
++ * Mark entry allocated.
++ */
++ newbph->bph_free = 0;
++
++ return newbph;
++}
++
++/*
++ * kdba_freebp
++ *
++ * Deallocate a hardware breakpoint
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic for failure
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void
++kdba_freebp(kdbhard_bp_t *bph)
++{
++ bph->bph_free = 1;
++}
++
++/*
++ * kdba_initbp
++ *
++ * Initialize the breakpoint table for the hardware breakpoint
++ * register.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic for failure
++ * Locking:
++ * None.
++ * Remarks:
++ *
++ * There is one entry per register. On the ia32 architecture
++ * all the registers are interchangeable, so no special allocation
++ * criteria are required.
++ */
++
++void
++kdba_initbp(void)
++{
++ int i;
++ kdbhard_bp_t *bph;
++
++ /*
++ * Clear the hardware breakpoint table
++ */
++
++ memset(kdb_hardbreaks, '\0', sizeof(kdb_hardbreaks));
++
++ for(i=0,bph=kdb_hardbreaks; i<KDB_MAXHARDBPT; i++, bph++) {
++ bph->bph_reg = i;
++ bph->bph_free = 1;
++ }
++}
++
++/*
++ * kdba_installbp
++ *
++ * Install a breakpoint
++ *
++ * Parameters:
++ * regs Exception frame
++ * bp Breakpoint structure for the breakpoint to be installed
++ * Outputs:
++ * None.
++ * Returns:
++ * 0 if breakpoint installed.
++ * Locking:
++ * None.
++ * Remarks:
++ * For hardware breakpoints, a debug register is allocated
++ * and assigned to the breakpoint. If no debug register is
++ * available, a warning message is printed and the breakpoint
++ * is disabled.
++ *
++ * For instruction replacement breakpoints, we must single-step
++ * over the replaced instruction at this point so we can re-install
++ * the breakpoint instruction after the single-step. SSBPT is set
++ * when the breakpoint is initially hit and is cleared by any action
++ * that removes the need for single-step over the breakpoint.
++ */
++
++int
++kdba_installbp(struct pt_regs *regs, kdb_bp_t *bp)
++{
++ /*
++ * Install the breakpoint, if it is not already installed.
++ */
++
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdba_installbp bp_installed %d\n", bp->bp_installed);
++ }
++ if (!KDB_STATE(SSBPT))
++ bp->bp_delay = 0;
++ if (!bp->bp_installed) {
++ if (bp->bp_hardtype) {
++ kdba_installdbreg(bp);
++ bp->bp_installed = 1;
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdba_installbp hardware reg %ld at " kdb_bfd_vma_fmt "\n",
++ bp->bp_hard->bph_reg, bp->bp_addr);
++ }
++ } else if (bp->bp_delay) {
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdba_installbp delayed bp\n");
++ kdba_handle_bp(regs, bp);
++ } else {
++ if (kdb_getarea_size(&(bp->bp_inst), bp->bp_addr, 1) ||
++ kdb_putword(bp->bp_addr, IA32_BREAKPOINT_INSTRUCTION, 1)) {
++ kdb_printf("kdba_installbp failed to set software breakpoint at 0x%lx\n", bp->bp_addr);
++ return(1);
++ }
++ bp->bp_installed = 1;
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdba_installbp instruction 0x%x at " kdb_bfd_vma_fmt "\n",
++ IA32_BREAKPOINT_INSTRUCTION, bp->bp_addr);
++ }
++ }
++ return(0);
++}
++
++/*
++ * kdba_removebp
++ *
++ * Make a breakpoint ineffective.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++int
++kdba_removebp(kdb_bp_t *bp)
++{
++ /*
++ * For hardware breakpoints, remove it from the active register,
++ * for software breakpoints, restore the instruction stream.
++ */
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdba_removebp bp_installed %d\n", bp->bp_installed);
++ }
++ if (bp->bp_installed) {
++ if (bp->bp_hardtype) {
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdb: removing hardware reg %ld at " kdb_bfd_vma_fmt "\n",
++ bp->bp_hard->bph_reg, bp->bp_addr);
++ }
++ kdba_removedbreg(bp);
++ } else {
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdb: restoring instruction 0x%x at " kdb_bfd_vma_fmt "\n",
++ bp->bp_inst, bp->bp_addr);
++ if (kdb_putword(bp->bp_addr, bp->bp_inst, 1))
++ return(1);
++ }
++ bp->bp_installed = 0;
++ }
++ return(0);
++}
+diff -Nurp linux-2.6.22-590/arch/i386/kdb/kdba_bt.c linux-2.6.22-600/arch/i386/kdb/kdba_bt.c
+--- linux-2.6.22-590/arch/i386/kdb/kdba_bt.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/i386/kdb/kdba_bt.c 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,473 @@
++/*
++ * Kernel Debugger Architecture Dependent Stack Traceback
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/kallsyms.h>
++#include <linux/irq.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <asm/system.h>
++
++/* On a 4K stack kernel, hardirq_ctx and softirq_ctx are [NR_CPUS] arrays. The
++ * first element of each per-cpu stack is a struct thread_info.
++ */
++void
++kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
++ struct kdb_activation_record *ar)
++{
++#ifdef CONFIG_4KSTACKS
++ struct thread_info *tinfo;
++ static int first_time = 1;
++ static struct thread_info **kdba_hardirq_ctx, **kdba_softirq_ctx;
++ if (first_time) {
++ kdb_symtab_t symtab;
++ kdbgetsymval("hardirq_ctx", &symtab);
++ kdba_hardirq_ctx = (struct thread_info **)symtab.sym_start;
++ kdbgetsymval("softirq_ctx", &symtab);
++ kdba_softirq_ctx = (struct thread_info **)symtab.sym_start;
++ first_time = 0;
++ }
++ tinfo = (struct thread_info *)(addr & -THREAD_SIZE);
++ if (cpu < 0) {
++ /* Arbitrary address, see if it falls within any of the irq
++ * stacks
++ */
++ int found = 0;
++ for_each_online_cpu(cpu) {
++ if (tinfo == kdba_hardirq_ctx[cpu] ||
++ tinfo == kdba_softirq_ctx[cpu]) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found)
++ return;
++ }
++ if (tinfo == kdba_hardirq_ctx[cpu] ||
++ tinfo == kdba_softirq_ctx[cpu]) {
++ ar->stack.physical_start = (kdb_machreg_t)tinfo;
++ ar->stack.physical_end = ar->stack.physical_start + THREAD_SIZE;
++ ar->stack.logical_start = ar->stack.physical_start +
++ sizeof(struct thread_info);
++ ar->stack.logical_end = ar->stack.physical_end;
++ ar->stack.next = tinfo->previous_esp;
++ if (tinfo == kdba_hardirq_ctx[cpu])
++ ar->stack.id = "hardirq_ctx";
++ else
++ ar->stack.id = "softirq_ctx";
++ }
++#endif /* CONFIG_4KSTACKS */
++}
++
++/* Given an address which claims to be on a stack, an optional cpu number and
++ * an optional task address, get information about the stack.
++ *
++ * t == NULL, cpu < 0 indicates an arbitrary stack address with no associated
++ * struct task, the address can be in an alternate stack or any task's normal
++ * stack.
++ *
++ * t != NULL, cpu >= 0 indicates a running task, the address can be in an
++ * alternate stack or that task's normal stack.
++ *
++ * t != NULL, cpu < 0 indicates a blocked task, the address can only be in that
++ * task's normal stack.
++ *
++ * t == NULL, cpu >= 0 is not a valid combination.
++ */
++
++static void
++kdba_get_stack_info(kdb_machreg_t esp, int cpu,
++ struct kdb_activation_record *ar,
++ const struct task_struct *t)
++{
++ struct thread_info *tinfo;
++ struct task_struct *g, *p;
++ memset(&ar->stack, 0, sizeof(ar->stack));
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: esp=0x%lx cpu=%d task=%p\n",
++ __FUNCTION__, esp, cpu, t);
++ if (t == NULL || cpu >= 0) {
++ kdba_get_stack_info_alternate(esp, cpu, ar);
++ if (ar->stack.logical_start)
++ goto out;
++ }
++ esp &= -THREAD_SIZE;
++ tinfo = (struct thread_info *)esp;
++ if (t == NULL) {
++ /* Arbitrary stack address without an associated task, see if
++ * it falls within any normal process stack, including the idle
++ * tasks.
++ */
++ kdb_do_each_thread(g, p) {
++ if (tinfo == task_thread_info(p)) {
++ t = p;
++ goto found;
++ }
++ } kdb_while_each_thread(g, p);
++ for_each_online_cpu(cpu) {
++ p = idle_task(cpu);
++ if (tinfo == task_thread_info(p)) {
++ t = p;
++ goto found;
++ }
++ }
++ found:
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: found task %p\n", __FUNCTION__, t);
++ } else if (cpu >= 0) {
++ /* running task */
++ struct kdb_running_process *krp = kdb_running_process + cpu;
++ if (krp->p != t || tinfo != task_thread_info(t))
++ t = NULL;
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: running task %p\n", __FUNCTION__, t);
++ } else {
++ /* blocked task */
++ if (tinfo != task_thread_info(t))
++ t = NULL;
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: blocked task %p\n", __FUNCTION__, t);
++ }
++ if (t) {
++ ar->stack.physical_start = esp;
++ ar->stack.physical_end = esp + THREAD_SIZE;
++ ar->stack.logical_start = esp + sizeof(struct thread_info);
++ ar->stack.logical_end = ar->stack.physical_end;
++ ar->stack.next = 0;
++ ar->stack.id = "normal";
++ }
++out:
++ if (ar->stack.physical_start && KDB_DEBUG(ARA)) {
++ kdb_printf("%s: ar->stack\n", __FUNCTION__);
++ kdb_printf(" physical_start=0x%lx\n", ar->stack.physical_start);
++ kdb_printf(" physical_end=0x%lx\n", ar->stack.physical_end);
++ kdb_printf(" logical_start=0x%lx\n", ar->stack.logical_start);
++ kdb_printf(" logical_end=0x%lx\n", ar->stack.logical_end);
++ kdb_printf(" next=0x%lx\n", ar->stack.next);
++ kdb_printf(" id=%s\n", ar->stack.id);
++ }
++}
++
++/*
++ * bt_print_one
++ *
++ * Print one back trace entry.
++ *
++ * Inputs:
++ * eip Current program counter, or return address.
++ * esp Stack pointer esp when at eip.
++ * ar Activation record for this frame.
++ * symtab Information about symbol that eip falls within.
++ * argcount Maximum number of arguments to print.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ * None.
++ */
++
++static void
++bt_print_one(kdb_machreg_t eip, kdb_machreg_t esp,
++ const struct kdb_activation_record *ar,
++ const kdb_symtab_t *symtab, int argcount)
++{
++ int btsymarg = 0;
++ int nosect = 0;
++ kdb_machreg_t word;
++
++ kdbgetintenv("BTSYMARG", &btsymarg);
++ kdbgetintenv("NOSECT", &nosect);
++
++ kdb_printf(kdb_machreg_fmt0, esp);
++ kdb_symbol_print(eip, symtab, KDB_SP_SPACEB|KDB_SP_VALUE);
++ if (argcount && ar->args) {
++ int i, argc = ar->args;
++ kdb_printf(" (");
++ if (argc > argcount)
++ argc = argcount;
++ for (i = 0; i < argc; i++) {
++ kdb_machreg_t argp = ar->arg[i];
++ if (i)
++ kdb_printf(", ");
++ kdb_getword(&word, argp, sizeof(word));
++ kdb_printf("0x%lx", word);
++ }
++ kdb_printf(")");
++ }
++ if (symtab->sym_name) {
++ if (!nosect) {
++ kdb_printf("\n");
++ kdb_printf(" %s",
++ symtab->mod_name);
++ if (symtab->sec_name && symtab->sec_start)
++ kdb_printf(" 0x%lx 0x%lx",
++ symtab->sec_start, symtab->sec_end);
++ kdb_printf(" 0x%lx 0x%lx",
++ symtab->sym_start, symtab->sym_end);
++ }
++ }
++ kdb_printf("\n");
++ if (argcount && ar->args && btsymarg) {
++ int i, argc = ar->args;
++ kdb_symtab_t arg_symtab;
++ for (i = 0; i < argc; i++) {
++ kdb_machreg_t argp = ar->arg[i];
++ kdb_getword(&word, argp, sizeof(word));
++ if (kdbnearsym(word, &arg_symtab)) {
++ kdb_printf(" ");
++ kdb_symbol_print(word, &arg_symtab,
++ KDB_SP_DEFAULT|KDB_SP_NEWLINE);
++ }
++ }
++ }
++}
++
++/* Getting the starting point for a backtrace on a running process is
++ * moderately tricky. kdba_save_running() saved the esp in krp->arch.esp, but
++ * that esp is not 100% accurate, it can be offset by a frame pointer or by the
++ * size of local variables in kdba_main_loop() and kdb_save_running().
++ *
++ * The calling sequence is kdb() -> kdba_main_loop() -> kdb_save_running() ->
++ * kdba_save_running(). Walk up the stack until we find a return address
++ * inside the main kdb() function and start the backtrace from there.
++ */
++
++static int
++kdba_bt_stack_running(const struct task_struct *p,
++ const struct kdb_activation_record *ar,
++ kdb_machreg_t *eip, kdb_machreg_t *esp,
++ kdb_machreg_t *ebp)
++{
++ kdb_machreg_t addr, sp;
++ kdb_symtab_t symtab;
++ struct kdb_running_process *krp = kdb_running_process + task_cpu(p);
++ int found = 0;
++
++ if (kdbgetsymval("kdb", &symtab) == 0)
++ return 0;
++ if (kdbnearsym(symtab.sym_start, &symtab) == 0)
++ return 0;
++ sp = krp->arch.esp;
++ if (sp < ar->stack.logical_start || sp >= ar->stack.logical_end)
++ return 0;
++ while (sp < ar->stack.logical_end) {
++ addr = *(kdb_machreg_t *)sp;
++ if (addr >= symtab.sym_start && addr < symtab.sym_end) {
++ found = 1;
++ break;
++ }
++ sp += sizeof(kdb_machreg_t);
++ }
++ if (!found)
++ return 0;
++ *ebp = *esp = sp;
++ *eip = addr;
++ return 1;
++}
++
++/*
++ * kdba_bt_stack
++ *
++ * Inputs:
++ * addr Pointer to Address provided to 'bt' command, if any.
++ * argcount
++ * p Pointer to task for 'btp' command.
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * mds comes in handy when examining the stack to do a manual
++ * traceback.
++ */
++
++static int
++kdba_bt_stack(kdb_machreg_t addr, int argcount, const struct task_struct *p)
++{
++ struct kdb_activation_record ar;
++ kdb_machreg_t eip, esp, ebp, cs;
++ kdb_symtab_t symtab;
++ int first_time = 1, count = 0, btsp = 0, suppress;
++ struct pt_regs *regs = NULL;
++
++ kdbgetintenv("BTSP", &btsp);
++ suppress = !btsp;
++ memset(&ar, 0, sizeof(ar));
++
++ /*
++ * The caller may have supplied an address at which the
++ * stack traceback operation should begin. This address
++ * is assumed by this code to point to a return-address
++ * on the stack to be traced back.
++ *
++ * The end result of this will make it appear as if a function
++ * entitled '<unknown>' was called from the function which
++ * contains return-address.
++ */
++ if (addr) {
++ eip = 0;
++ ebp = 0;
++ esp = addr;
++ cs = __KERNEL_CS; /* have to assume kernel space */
++ suppress = 0;
++ kdba_get_stack_info(esp, -1, &ar, NULL);
++ } else {
++ if (task_curr(p)) {
++ struct kdb_running_process *krp =
++ kdb_running_process + task_cpu(p);
++
++ if (krp->seqno && krp->p == p
++ && krp->seqno >= kdb_seqno - 1) {
++ /* valid saved state, continue processing */
++ } else {
++ kdb_printf
++ ("Process did not save state, cannot backtrace\n");
++ kdb_ps1(p);
++ return 0;
++ }
++ regs = krp->regs;
++ if (KDB_NULL_REGS(regs))
++ return KDB_BADREG;
++ kdba_getregcontents("xcs", regs, &cs);
++ if ((cs & 0xffff) != __KERNEL_CS) {
++ kdb_printf("Stack is not in kernel space, backtrace not available\n");
++ return 0;
++ }
++ kdba_getregcontents("eip", regs, &eip);
++ kdba_getregcontents("ebp", regs, &ebp);
++ esp = krp->arch.esp;
++ kdba_get_stack_info(esp, kdb_process_cpu(p), &ar, p);
++ if (kdba_bt_stack_running(p, &ar, &eip, &esp, &ebp) == 0) {
++ kdb_printf("%s: cannot adjust esp=0x%lx for a running task\n",
++ __FUNCTION__, esp);
++ }
++ } else {
++ /* Not on cpu, assume blocked. Blocked tasks do not
++ * have pt_regs. p->thread.{esp,eip} are set, esp
++ * points to the ebp value, assume kernel space.
++ */
++ eip = p->thread.eip;
++ esp = p->thread.esp;
++ ebp = *(unsigned long *)esp;
++ cs = __KERNEL_CS;
++ suppress = 0;
++ kdba_get_stack_info(esp, -1, &ar, p);
++ }
++ }
++ if (!ar.stack.physical_start) {
++ kdb_printf("esp=0x%lx is not in a valid kernel stack, backtrace not available\n",
++ esp);
++ return 0;
++ }
++
++ kdb_printf("esp eip Function (args)\n");
++ if (ar.stack.next && !suppress)
++ kdb_printf(" ======================= <%s>\n",
++ ar.stack.id);
++
++ /* Run through all the stacks */
++ while (ar.stack.physical_start) {
++ if (!first_time)
++ eip = *(kdb_machreg_t *)esp;
++ first_time = 0;
++ if (!suppress && __kernel_text_address(eip)) {
++ kdbnearsym(eip, &symtab);
++ bt_print_one(eip, esp, &ar, &symtab, argcount);
++ ++count;
++ }
++ if ((struct pt_regs *)esp == regs) {
++ if (ar.stack.next && suppress)
++ kdb_printf(" ======================= <%s>\n",
++ ar.stack.id);
++ ++count;
++ suppress = 0;
++ }
++ esp += sizeof(eip);
++ if (count > 200)
++ break;
++ if (esp < ar.stack.logical_end)
++ continue;
++ if (!ar.stack.next)
++ break;
++ esp = ar.stack.next;
++ if (KDB_DEBUG(ARA))
++ kdb_printf("new esp=0x%lx\n", esp);
++ kdba_get_stack_info(esp, -1, &ar, NULL);
++ if (!ar.stack.physical_start) {
++ kdb_printf("+++ Cannot resolve next stack\n");
++ } else if (!suppress) {
++ kdb_printf(" ======================= <%s>\n",
++ ar.stack.id);
++ ++count;
++ }
++ }
++
++ if (count > 200)
++ kdb_printf("bt truncated, count limit reached\n");
++ else if (suppress)
++ kdb_printf
++ ("bt did not find pt_regs - no trace produced. Suggest 'set BTSP 1'\n");
++
++ return 0;
++}
++
++/*
++ * kdba_bt_address
++ *
++ * Do a backtrace starting at a specified stack address. Use this if the
++ * heuristics get the stack decode wrong.
++ *
++ * Inputs:
++ * addr Address provided to 'bt' command.
++ * argcount
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * mds %esp comes in handy when examining the stack to do a manual
++ * traceback.
++ */
++
++int kdba_bt_address(kdb_machreg_t addr, int argcount)
++{
++ return kdba_bt_stack(addr, argcount, NULL);
++}
++
++/*
++ * kdba_bt_process
++ *
++ * Do a backtrace for a specified process.
++ *
++ * Inputs:
++ * p Struct task pointer extracted by 'bt' command.
++ * argcount
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ */
++
++int kdba_bt_process(const struct task_struct *p, int argcount)
++{
++ return kdba_bt_stack(0, argcount, p);
++}
+diff -Nurp linux-2.6.22-590/arch/i386/kdb/kdba_id.c linux-2.6.22-600/arch/i386/kdb/kdba_id.c
+--- linux-2.6.22-590/arch/i386/kdb/kdba_id.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/i386/kdb/kdba_id.c 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,255 @@
++/*
++ * Kernel Debugger Architecture Dependent Instruction Disassembly
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <stdarg.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++
++/*
++ * kdba_dis_getsym
++ *
++ * Get a symbol for the disassembler.
++ *
++ * Parameters:
++ * addr Address for which to get symbol
++ * dip Pointer to disassemble_info
++ * Returns:
++ * 0
++ * Locking:
++ * Remarks:
++ * Not used for kdb.
++ */
++
++/* ARGSUSED */
++static int
++kdba_dis_getsym(bfd_vma addr, disassemble_info *dip)
++{
++
++ return 0;
++}
++
++/*
++ * kdba_printaddress
++ *
++ * Print (symbolically) an address.
++ *
++ * Parameters:
++ * addr Address for which to get symbol
++ * dip Pointer to disassemble_info
++ * flag True if a ":<tab>" sequence should follow the address
++ * Returns:
++ * 0
++ * Locking:
++ * Remarks:
++ *
++ */
++
++/* ARGSUSED */
++static void
++kdba_printaddress(kdb_machreg_t addr, disassemble_info *dip, int flag)
++{
++ kdb_symtab_t symtab;
++ int spaces = 5;
++ unsigned int offset;
++
++ /*
++ * Print a symbol name or address as necessary.
++ */
++ kdbnearsym(addr, &symtab);
++ if (symtab.sym_name) {
++ /* Do not use kdb_symbol_print here, it always does
++ * kdb_printf but we want dip->fprintf_func.
++ */
++ dip->fprintf_func(dip->stream,
++ "0x%0*lx %s",
++ (int)(2*sizeof(addr)), addr, symtab.sym_name);
++ if ((offset = addr - symtab.sym_start) == 0) {
++ spaces += 4;
++ }
++ else {
++ unsigned int o = offset;
++ while (o >>= 4)
++ --spaces;
++ dip->fprintf_func(dip->stream, "+0x%x", offset);
++ }
++
++ } else {
++ dip->fprintf_func(dip->stream, "0x%lx", addr);
++ }
++
++ if (flag) {
++ if (spaces < 1) {
++ spaces = 1;
++ }
++ dip->fprintf_func(dip->stream, ":%*s", spaces, " ");
++ }
++}
++
++/*
++ * kdba_dis_printaddr
++ *
++ * Print (symbolically) an address. Called by GNU disassembly
++ * code via disassemble_info structure.
++ *
++ * Parameters:
++ * addr Address for which to get symbol
++ * dip Pointer to disassemble_info
++ * Returns:
++ * 0
++ * Locking:
++ * Remarks:
++ * This function will never append ":<tab>" to the printed
++ * symbolic address.
++ */
++
++static void
++kdba_dis_printaddr(bfd_vma addr, disassemble_info *dip)
++{
++ kdba_printaddress(addr, dip, 0);
++}
++
++/*
++ * kdba_dis_getmem
++ *
++ * Fetch 'length' bytes from 'addr' into 'buf'.
++ *
++ * Parameters:
++ * addr Address for which to get symbol
++ * buf Address of buffer to fill with bytes from 'addr'
++ * length Number of bytes to fetch
++ * dip Pointer to disassemble_info
++ * Returns:
++ * 0 if data is available, otherwise error.
++ * Locking:
++ * Remarks:
++ *
++ */
++
++/* ARGSUSED */
++static int
++kdba_dis_getmem(bfd_vma addr, bfd_byte *buf, unsigned int length, disassemble_info *dip)
++{
++ return kdb_getarea_size(buf, addr, length);
++}
++
++/*
++ * kdba_id_parsemode
++ *
++ * Parse IDMODE environment variable string and
++ * set appropriate value into "disassemble_info" structure.
++ *
++ * Parameters:
++ * mode Mode string
++ * dip Disassemble_info structure pointer
++ * Returns:
++ * Locking:
++ * Remarks:
++ * We handle the values 'x86' and '8086' to enable either
++ * 32-bit instruction set or 16-bit legacy instruction set.
++ */
++
++int
++kdba_id_parsemode(const char *mode, disassemble_info *dip)
++{
++
++ if (mode) {
++ if (strcmp(mode, "x86") == 0) {
++ dip->mach = bfd_mach_i386_i386;
++ } else if (strcmp(mode, "8086") == 0) {
++ dip->mach = bfd_mach_i386_i8086;
++ } else {
++ return KDB_BADMODE;
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * kdba_check_pc
++ *
++ * Check that the pc is satisfactory.
++ *
++ * Parameters:
++ * pc Program Counter Value.
++ * Returns:
++ * None
++ * Locking:
++ * None.
++ * Remarks:
++ * Can change pc.
++ */
++
++void
++kdba_check_pc(kdb_machreg_t *pc)
++{
++ /* No action */
++}
++
++/*
++ * kdba_id_printinsn
++ *
++ * Format and print a single instruction at 'pc'. Return the
++ * length of the instruction.
++ *
++ * Parameters:
++ * pc Program Counter Value.
++ * dip Disassemble_info structure pointer
++ * Returns:
++ * Length of instruction, -1 for error.
++ * Locking:
++ * None.
++ * Remarks:
++ * Depends on 'IDMODE' environment variable.
++ */
++
++int
++kdba_id_printinsn(kdb_machreg_t pc, disassemble_info *dip)
++{
++ kdba_printaddress(pc, dip, 1);
++ return print_insn_i386_att(pc, dip);
++}
++
++/*
++ * kdba_id_init
++ *
++ * Initialize the architecture dependent elements of
++ * the disassembly information structure
++ * for the GNU disassembler.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void
++kdba_id_init(disassemble_info *dip)
++{
++ dip->read_memory_func = kdba_dis_getmem;
++ dip->print_address_func = kdba_dis_printaddr;
++ dip->symbol_at_address_func = kdba_dis_getsym;
++
++ dip->flavour = bfd_target_elf_flavour;
++ dip->arch = bfd_arch_i386;
++ dip->mach = bfd_mach_i386_i386;
++ dip->endian = BFD_ENDIAN_LITTLE;
++
++ dip->display_endian = BFD_ENDIAN_LITTLE;
++}
+diff -Nurp linux-2.6.22-590/arch/i386/kdb/kdba_io.c linux-2.6.22-600/arch/i386/kdb/kdba_io.c
+--- linux-2.6.22-590/arch/i386/kdb/kdba_io.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/i386/kdb/kdba_io.c 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,503 @@
++/*
++ * Kernel Debugger Architecture Dependent Console I/O handler
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <asm/io.h>
++#include <linux/delay.h>
++#include <linux/console.h>
++#include <linux/ctype.h>
++#include <linux/keyboard.h>
++#include <linux/serial.h>
++#include <linux/serial_reg.h>
++
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <pc_keyb.h>
++
++#ifdef CONFIG_VT_CONSOLE
++#define KDB_BLINK_LED 1
++#else
++#undef KDB_BLINK_LED
++#endif
++
++#ifdef CONFIG_KDB_USB
++struct kdb_usb_exchange kdb_usb_infos;
++
++EXPORT_SYMBOL(kdb_usb_infos);
++
++static unsigned char kdb_usb_keycode[256] = {
++ 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
++ 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3,
++ 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26,
++ 27, 43, 84, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64,
++ 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106,
++ 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71,
++ 72, 73, 82, 83, 86,127,116,117, 85, 89, 90, 91, 92, 93, 94, 95,
++ 120,121,122,123,134,138,130,132,128,129,131,137,133,135,136,113,
++ 115,114, 0, 0, 0,124, 0,181,182,183,184,185,186,187,188,189,
++ 190,191,192,193,194,195,196,197,198, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113,
++ 150,158,159,128,136,177,178,176,142,152,173,140
++};
++
++/* get_usb_char
++ * This function drives the UHCI controller,
++ * fetch the USB scancode and decode it
++ */
++static int get_usb_char(void)
++{
++ static int usb_lock;
++ unsigned char keycode, spec;
++ extern u_short plain_map[], shift_map[], ctrl_map[];
++
++ /* Is USB initialized ? */
++ if(!kdb_usb_infos.poll_func || !kdb_usb_infos.urb)
++ return -1;
++
++ /* Transfer char if they are present */
++ (*kdb_usb_infos.poll_func)(kdb_usb_infos.uhci, (struct urb *)kdb_usb_infos.urb);
++
++ spec = kdb_usb_infos.buffer[0];
++ keycode = kdb_usb_infos.buffer[2];
++ kdb_usb_infos.buffer[0] = (char)0;
++ kdb_usb_infos.buffer[2] = (char)0;
++
++ if(kdb_usb_infos.buffer[3])
++ return -1;
++
++ /* A normal key is pressed, decode it */
++ if(keycode)
++ keycode = kdb_usb_keycode[keycode];
++
++ /* 2 Keys pressed at one time ? */
++ if (spec && keycode) {
++ switch(spec)
++ {
++ case 0x2:
++ case 0x20: /* Shift */
++ return shift_map[keycode];
++ case 0x1:
++ case 0x10: /* Ctrl */
++ return ctrl_map[keycode];
++ case 0x4:
++ case 0x40: /* Alt */
++ break;
++ }
++ }
++ else {
++ if(keycode) { /* If only one key pressed */
++ switch(keycode)
++ {
++ case 0x1C: /* Enter */
++ return 13;
++
++ case 0x3A: /* Capslock */
++ usb_lock ? (usb_lock = 0) : (usb_lock = 1);
++ break;
++ case 0x0E: /* Backspace */
++ return 8;
++ case 0x0F: /* TAB */
++ return 9;
++ case 0x77: /* Pause */
++ break ;
++ default:
++ if(!usb_lock) {
++ return plain_map[keycode];
++ }
++ else {
++ return shift_map[keycode];
++ }
++ }
++ }
++ }
++ return -1;
++}
++#endif /* CONFIG_KDB_USB */
++
++/*
++ * This module contains code to read characters from the keyboard or a serial
++ * port.
++ *
++ * It is used by the kernel debugger, and is polled, not interrupt driven.
++ *
++ */
++
++#ifdef KDB_BLINK_LED
++/*
++ * send: Send a byte to the keyboard controller. Used primarily to
++ * alter LED settings.
++ */
++
++static void
++kdb_kbdsend(unsigned char byte)
++{
++ int timeout;
++ for (timeout = 200 * 1000; timeout && (inb(KBD_STATUS_REG) & KBD_STAT_IBF); timeout--);
++ outb(byte, KBD_DATA_REG);
++ udelay(40);
++ for (timeout = 200 * 1000; timeout && (~inb(KBD_STATUS_REG) & KBD_STAT_OBF); timeout--);
++ inb(KBD_DATA_REG);
++ udelay(40);
++}
++
++static void
++kdb_toggleled(int led)
++{
++ static int leds;
++
++ leds ^= led;
++
++ kdb_kbdsend(KBD_CMD_SET_LEDS);
++ kdb_kbdsend((unsigned char)leds);
++}
++#endif /* KDB_BLINK_LED */
++
++#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_CORE_CONSOLE)
++#define CONFIG_SERIAL_CONSOLE
++#endif
++
++#if defined(CONFIG_SERIAL_CONSOLE)
++
++struct kdb_serial kdb_serial;
++
++static unsigned int
++serial_inp(struct kdb_serial *kdb_serial, unsigned long offset)
++{
++ offset <<= kdb_serial->ioreg_shift;
++
++ switch (kdb_serial->io_type) {
++ case SERIAL_IO_MEM:
++ return readb((void __iomem *)(kdb_serial->iobase + offset));
++ break;
++ default:
++ return inb(kdb_serial->iobase + offset);
++ break;
++ }
++}
++
++/* Check if there is a byte ready at the serial port */
++static int get_serial_char(void)
++{
++ unsigned char ch;
++
++ if (kdb_serial.iobase == 0)
++ return -1;
++
++ if (serial_inp(&kdb_serial, UART_LSR) & UART_LSR_DR) {
++ ch = serial_inp(&kdb_serial, UART_RX);
++ if (ch == 0x7f)
++ ch = 8;
++ return ch;
++ }
++ return -1;
++}
++#endif /* CONFIG_SERIAL_CONSOLE */
++
++#ifdef CONFIG_VT_CONSOLE
++
++static int kbd_exists;
++
++/*
++ * Check if the keyboard controller has a keypress for us.
++ * Some parts (Enter Release, LED change) are still blocking polled here,
++ * but hopefully they are all short.
++ */
++static int get_kbd_char(void)
++{
++ int scancode, scanstatus;
++ static int shift_lock; /* CAPS LOCK state (0-off, 1-on) */
++ static int shift_key; /* Shift next keypress */
++ static int ctrl_key;
++ u_short keychar;
++ extern u_short plain_map[], shift_map[], ctrl_map[];
++
++ if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) ||
++ (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) {
++ kbd_exists = 0;
++ return -1;
++ }
++ kbd_exists = 1;
++
++ if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
++ return -1;
++
++ /*
++ * Fetch the scancode
++ */
++ scancode = inb(KBD_DATA_REG);
++ scanstatus = inb(KBD_STATUS_REG);
++
++ /*
++ * Ignore mouse events.
++ */
++ if (scanstatus & KBD_STAT_MOUSE_OBF)
++ return -1;
++
++ /*
++ * Ignore release, trigger on make
++ * (except for shift keys, where we want to
++ * keep the shift state so long as the key is
++ * held down).
++ */
++
++ if (((scancode&0x7f) == 0x2a) || ((scancode&0x7f) == 0x36)) {
++ /*
++ * Next key may use shift table
++ */
++ if ((scancode & 0x80) == 0) {
++ shift_key=1;
++ } else {
++ shift_key=0;
++ }
++ return -1;
++ }
++
++ if ((scancode&0x7f) == 0x1d) {
++ /*
++ * Left ctrl key
++ */
++ if ((scancode & 0x80) == 0) {
++ ctrl_key = 1;
++ } else {
++ ctrl_key = 0;
++ }
++ return -1;
++ }
++
++ if ((scancode & 0x80) != 0)
++ return -1;
++
++ scancode &= 0x7f;
++
++ /*
++ * Translate scancode
++ */
++
++ if (scancode == 0x3a) {
++ /*
++ * Toggle caps lock
++ */
++ shift_lock ^= 1;
++
++#ifdef KDB_BLINK_LED
++ kdb_toggleled(0x4);
++#endif
++ return -1;
++ }
++
++ if (scancode == 0x0e) {
++ /*
++ * Backspace
++ */
++ return 8;
++ }
++
++ /* Special Key */
++ switch (scancode) {
++ case 0xF: /* Tab */
++ return 9;
++ case 0x53: /* Del */
++ return 4;
++ case 0x47: /* Home */
++ return 1;
++ case 0x4F: /* End */
++ return 5;
++ case 0x4B: /* Left */
++ return 2;
++ case 0x48: /* Up */
++ return 16;
++ case 0x50: /* Down */
++ return 14;
++ case 0x4D: /* Right */
++ return 6;
++ }
++
++ if (scancode == 0xe0) {
++ return -1;
++ }
++
++ /*
++ * For Japanese 86/106 keyboards
++ * See comment in drivers/char/pc_keyb.c.
++ * - Masahiro Adegawa
++ */
++ if (scancode == 0x73) {
++ scancode = 0x59;
++ } else if (scancode == 0x7d) {
++ scancode = 0x7c;
++ }
++
++ if (!shift_lock && !shift_key && !ctrl_key) {
++ keychar = plain_map[scancode];
++ } else if (shift_lock || shift_key) {
++ keychar = shift_map[scancode];
++ } else if (ctrl_key) {
++ keychar = ctrl_map[scancode];
++ } else {
++ keychar = 0x0020;
++ kdb_printf("Unknown state/scancode (%d)\n", scancode);
++ }
++ keychar &= 0x0fff;
++ if (keychar == '\t')
++ keychar = ' ';
++ switch (KTYP(keychar)) {
++ case KT_LETTER:
++ case KT_LATIN:
++ if (isprint(keychar))
++ break; /* printable characters */
++ /* drop through */
++ case KT_SPEC:
++ if (keychar == K_ENTER)
++ break;
++ /* drop through */
++ default:
++ return(-1); /* ignore unprintables */
++ }
++
++ if ((scancode & 0x7f) == 0x1c) {
++ /*
++ * enter key. All done. Absorb the release scancode.
++ */
++ while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
++ ;
++
++ /*
++ * Fetch the scancode
++ */
++ scancode = inb(KBD_DATA_REG);
++ scanstatus = inb(KBD_STATUS_REG);
++
++ while (scanstatus & KBD_STAT_MOUSE_OBF) {
++ scancode = inb(KBD_DATA_REG);
++ scanstatus = inb(KBD_STATUS_REG);
++ }
++
++ if (scancode != 0x9c) {
++ /*
++ * Wasn't an enter-release, why not?
++ */
++ kdb_printf("kdb: expected enter got 0x%x status 0x%x\n",
++ scancode, scanstatus);
++ }
++
++ kdb_printf("\n");
++ return 13;
++ }
++
++ return keychar & 0xff;
++}
++#endif /* CONFIG_VT_CONSOLE */
++
++#ifdef KDB_BLINK_LED
++
++/* Leave numlock alone, setting it messes up laptop keyboards with the keypad
++ * mapped over normal keys.
++ */
++static int kdba_blink_mask = 0x1 | 0x4;
++
++#define BOGOMIPS (boot_cpu_data.loops_per_jiffy/(500000/HZ))
++static int blink_led(void)
++{
++ static long delay;
++
++ if (kbd_exists == 0)
++ return -1;
++
++ if (--delay < 0) {
++ if (BOGOMIPS == 0) /* early kdb */
++ delay = 150000000/1000; /* arbitrary bogomips */
++ else
++ delay = 150000000/BOGOMIPS; /* Roughly 1 second when polling */
++ kdb_toggleled(kdba_blink_mask);
++ }
++ return -1;
++}
++#endif
++
++get_char_func poll_funcs[] = {
++#if defined(CONFIG_VT_CONSOLE)
++ get_kbd_char,
++#endif
++#if defined(CONFIG_SERIAL_CONSOLE)
++ get_serial_char,
++#endif
++#ifdef KDB_BLINK_LED
++ blink_led,
++#endif
++#ifdef CONFIG_KDB_USB
++ get_usb_char,
++#endif
++ NULL
++};
++
++/*
++ * On some Compaq Deskpro's, there is a keyboard freeze many times after
++ * exiting from the kdb. As kdb's keyboard handler is not interrupt-driven and
++ * uses a polled interface, it makes more sense to disable motherboard keyboard
++ * controller's OBF interrupts during kdb's polling.In case, of interrupts
++ * remaining enabled during kdb's polling, it may cause un-necessary
++ * interrupts being signalled during keypresses, which are also sometimes seen
++ * as spurious interrupts after exiting from kdb. This hack to disable OBF
++ * interrupts before entry to kdb and re-enabling them at kdb exit point also
++ * solves the keyboard freeze issue. These functions are called from
++ * kdb_local(), hence these are arch. specific setup and cleanup functions
++ * executing only on the local processor - ashishk@sco.com
++ */
++
++void kdba_local_arch_setup(void)
++{
++#ifdef CONFIG_VT_CONSOLE
++ unsigned char c;
++
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_command(KBD_CCMD_READ_MODE);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ while ( !(kbd_read_status() & KBD_STAT_OBF) );
++ c = kbd_read_input();
++ c &= ~KBD_MODE_KBD_INT;
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_command(KBD_CCMD_WRITE_MODE);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_output(c);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ mdelay(1);
++#endif /* CONFIG_VT_CONSOLE */
++}
++
++void kdba_local_arch_cleanup(void)
++{
++#ifdef CONFIG_VT_CONSOLE
++ unsigned char c;
++
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_command(KBD_CCMD_READ_MODE);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ while ( !(kbd_read_status() & KBD_STAT_OBF) );
++ c = kbd_read_input();
++ c |= KBD_MODE_KBD_INT;
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_command(KBD_CCMD_WRITE_MODE);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_output(c);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ mdelay(1);
++#endif /* CONFIG_VT_CONSOLE */
++}
+diff -Nurp linux-2.6.22-590/arch/i386/kdb/kdbasupport.c linux-2.6.22-600/arch/i386/kdb/kdbasupport.c
+--- linux-2.6.22-590/arch/i386/kdb/kdbasupport.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/i386/kdb/kdbasupport.c 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,1066 @@
++/*
++ * Kernel Debugger Architecture Independent Support Functions
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/string.h>
++#include <linux/stddef.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/irq.h>
++#include <linux/ptrace.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/hardirq.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++
++#include <asm/processor.h>
++#include <asm/msr.h>
++#include <asm/uaccess.h>
++#include <asm/desc.h>
++
++static kdb_machreg_t
++kdba_getcr(int regnum)
++{
++ kdb_machreg_t contents = 0;
++ switch(regnum) {
++ case 0:
++ __asm__ ("movl %%cr0,%0\n\t":"=r"(contents));
++ break;
++ case 1:
++ break;
++ case 2:
++ __asm__ ("movl %%cr2,%0\n\t":"=r"(contents));
++ break;
++ case 3:
++ __asm__ ("movl %%cr3,%0\n\t":"=r"(contents));
++ break;
++ case 4:
++ __asm__ ("movl %%cr4,%0\n\t":"=r"(contents));
++ break;
++ default:
++ break;
++ }
++
++ return contents;
++}
++
++static void
++kdba_putdr(int regnum, kdb_machreg_t contents)
++{
++ switch(regnum) {
++ case 0:
++ __asm__ ("movl %0,%%db0\n\t"::"r"(contents));
++ break;
++ case 1:
++ __asm__ ("movl %0,%%db1\n\t"::"r"(contents));
++ break;
++ case 2:
++ __asm__ ("movl %0,%%db2\n\t"::"r"(contents));
++ break;
++ case 3:
++ __asm__ ("movl %0,%%db3\n\t"::"r"(contents));
++ break;
++ case 4:
++ case 5:
++ break;
++ case 6:
++ __asm__ ("movl %0,%%db6\n\t"::"r"(contents));
++ break;
++ case 7:
++ __asm__ ("movl %0,%%db7\n\t"::"r"(contents));
++ break;
++ default:
++ break;
++ }
++}
++
++static kdb_machreg_t
++kdba_getdr(int regnum)
++{
++ kdb_machreg_t contents = 0;
++ switch(regnum) {
++ case 0:
++ __asm__ ("movl %%db0,%0\n\t":"=r"(contents));
++ break;
++ case 1:
++ __asm__ ("movl %%db1,%0\n\t":"=r"(contents));
++ break;
++ case 2:
++ __asm__ ("movl %%db2,%0\n\t":"=r"(contents));
++ break;
++ case 3:
++ __asm__ ("movl %%db3,%0\n\t":"=r"(contents));
++ break;
++ case 4:
++ case 5:
++ break;
++ case 6:
++ __asm__ ("movl %%db6,%0\n\t":"=r"(contents));
++ break;
++ case 7:
++ __asm__ ("movl %%db7,%0\n\t":"=r"(contents));
++ break;
++ default:
++ break;
++ }
++
++ return contents;
++}
++
++kdb_machreg_t
++kdba_getdr6(void)
++{
++ return kdba_getdr(6);
++}
++
++kdb_machreg_t
++kdba_getdr7(void)
++{
++ return kdba_getdr(7);
++}
++
++void
++kdba_putdr6(kdb_machreg_t contents)
++{
++ kdba_putdr(6, contents);
++}
++
++static void
++kdba_putdr7(kdb_machreg_t contents)
++{
++ kdba_putdr(7, contents);
++}
++
++void
++kdba_installdbreg(kdb_bp_t *bp)
++{
++ kdb_machreg_t dr7;
++
++ dr7 = kdba_getdr7();
++
++ kdba_putdr(bp->bp_hard->bph_reg, bp->bp_addr);
++
++ dr7 |= DR7_GE;
++ if (cpu_has_de)
++ set_in_cr4(X86_CR4_DE);
++
++ switch (bp->bp_hard->bph_reg){
++ case 0:
++ DR7_RW0SET(dr7,bp->bp_hard->bph_mode);
++ DR7_LEN0SET(dr7,bp->bp_hard->bph_length);
++ DR7_G0SET(dr7);
++ break;
++ case 1:
++ DR7_RW1SET(dr7,bp->bp_hard->bph_mode);
++ DR7_LEN1SET(dr7,bp->bp_hard->bph_length);
++ DR7_G1SET(dr7);
++ break;
++ case 2:
++ DR7_RW2SET(dr7,bp->bp_hard->bph_mode);
++ DR7_LEN2SET(dr7,bp->bp_hard->bph_length);
++ DR7_G2SET(dr7);
++ break;
++ case 3:
++ DR7_RW3SET(dr7,bp->bp_hard->bph_mode);
++ DR7_LEN3SET(dr7,bp->bp_hard->bph_length);
++ DR7_G3SET(dr7);
++ break;
++ default:
++ kdb_printf("kdb: Bad debug register!! %ld\n",
++ bp->bp_hard->bph_reg);
++ break;
++ }
++
++ kdba_putdr7(dr7);
++ return;
++}
++
++void
++kdba_removedbreg(kdb_bp_t *bp)
++{
++ int regnum;
++ kdb_machreg_t dr7;
++
++ if (!bp->bp_hard)
++ return;
++
++ regnum = bp->bp_hard->bph_reg;
++
++ dr7 = kdba_getdr7();
++
++ kdba_putdr(regnum, 0);
++
++ switch (regnum) {
++ case 0:
++ DR7_G0CLR(dr7);
++ DR7_L0CLR(dr7);
++ break;
++ case 1:
++ DR7_G1CLR(dr7);
++ DR7_L1CLR(dr7);
++ break;
++ case 2:
++ DR7_G2CLR(dr7);
++ DR7_L2CLR(dr7);
++ break;
++ case 3:
++ DR7_G3CLR(dr7);
++ DR7_L3CLR(dr7);
++ break;
++ default:
++ kdb_printf("kdb: Bad debug register!! %d\n", regnum);
++ break;
++ }
++
++ kdba_putdr7(dr7);
++}
++
++
++/*
++ * kdba_getregcontents
++ *
++ * Return the contents of the register specified by the
++ * input string argument. Return an error if the string
++ * does not match a machine register.
++ *
++ * The following pseudo register names are supported:
++ * ®s - Prints address of exception frame
++ * kesp - Prints kernel stack pointer at time of fault
++ * cesp - Prints current kernel stack pointer, inside kdb
++ * ceflags - Prints current flags, inside kdb
++ * %<regname> - Uses the value of the registers at the
++ * last time the user process entered kernel
++ * mode, instead of the registers at the time
++ * kdb was entered.
++ *
++ * Parameters:
++ * regname Pointer to string naming register
++ * regs Pointer to structure containing registers.
++ * Outputs:
++ * *contents Pointer to unsigned long to recieve register contents
++ * Returns:
++ * 0 Success
++ * KDB_BADREG Invalid register name
++ * Locking:
++ * None.
++ * Remarks:
++ * If kdb was entered via an interrupt from the kernel itself then
++ * ss and esp are *not* on the stack.
++ */
++
++static struct kdbregs {
++ char *reg_name;
++ size_t reg_offset;
++} kdbreglist[] = {
++ { "eax", offsetof(struct pt_regs, eax) },
++ { "ebx", offsetof(struct pt_regs, ebx) },
++ { "ecx", offsetof(struct pt_regs, ecx) },
++ { "edx", offsetof(struct pt_regs, edx) },
++
++ { "esi", offsetof(struct pt_regs, esi) },
++ { "edi", offsetof(struct pt_regs, edi) },
++ { "esp", offsetof(struct pt_regs, esp) },
++ { "eip", offsetof(struct pt_regs, eip) },
++
++ { "ebp", offsetof(struct pt_regs, ebp) },
++ { "xss", offsetof(struct pt_regs, xss) },
++ { "xcs", offsetof(struct pt_regs, xcs) },
++ { "eflags", offsetof(struct pt_regs, eflags) },
++
++ { "xds", offsetof(struct pt_regs, xds) },
++ { "xes", offsetof(struct pt_regs, xes) },
++ { "origeax", offsetof(struct pt_regs, orig_eax) },
++
++};
++
++static const int nkdbreglist = sizeof(kdbreglist) / sizeof(struct kdbregs);
++
++static struct kdbregs dbreglist[] = {
++ { "dr0", 0 },
++ { "dr1", 1 },
++ { "dr2", 2 },
++ { "dr3", 3 },
++ { "dr6", 6 },
++ { "dr7", 7 },
++};
++
++static const int ndbreglist = sizeof(dbreglist) / sizeof(struct kdbregs);
++
++int
++kdba_getregcontents(const char *regname,
++ struct pt_regs *regs,
++ kdb_machreg_t *contents)
++{
++ int i;
++
++ if (strcmp(regname, "cesp") == 0) {
++ asm volatile("movl %%esp,%0":"=m" (*contents));
++ return 0;
++ }
++
++ if (strcmp(regname, "ceflags") == 0) {
++ unsigned long flags;
++ local_save_flags(flags);
++ *contents = flags;
++ return 0;
++ }
++
++ if (regname[0] == '%') {
++ /* User registers: %%e[a-c]x, etc */
++ regname++;
++ regs = (struct pt_regs *)
++ (kdb_current_task->thread.esp0 - sizeof(struct pt_regs));
++ }
++
++ for (i=0; i<ndbreglist; i++) {
++ if (strnicmp(dbreglist[i].reg_name,
++ regname,
++ strlen(regname)) == 0)
++ break;
++ }
++
++ if ((i < ndbreglist)
++ && (strlen(dbreglist[i].reg_name) == strlen(regname))) {
++ *contents = kdba_getdr(dbreglist[i].reg_offset);
++ return 0;
++ }
++
++ if (!regs) {
++ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__);
++ return KDB_BADREG;
++ }
++
++ if (strcmp(regname, "®s") == 0) {
++ *contents = (unsigned long)regs;
++ return 0;
++ }
++
++ if (strcmp(regname, "kesp") == 0) {
++ *contents = (unsigned long)regs + sizeof(struct pt_regs);
++ if ((regs->xcs & 0xffff) == __KERNEL_CS) {
++ /* esp and ss are not on stack */
++ *contents -= 2*4;
++ }
++ return 0;
++ }
++
++ for (i=0; i<nkdbreglist; i++) {
++ if (strnicmp(kdbreglist[i].reg_name,
++ regname,
++ strlen(regname)) == 0)
++ break;
++ }
++
++ if ((i < nkdbreglist)
++ && (strlen(kdbreglist[i].reg_name) == strlen(regname))) {
++ if ((regs->xcs & 0xffff) == __KERNEL_CS) {
++ /* No cpl switch, esp and ss are not on stack */
++ if (strcmp(kdbreglist[i].reg_name, "esp") == 0) {
++ *contents = (kdb_machreg_t)regs +
++ sizeof(struct pt_regs) - 2*4;
++ return(0);
++ }
++ if (strcmp(kdbreglist[i].reg_name, "xss") == 0) {
++ asm volatile(
++ "pushl %%ss\n"
++ "popl %0\n"
++ :"=m" (*contents));
++ return(0);
++ }
++ }
++ *contents = *(unsigned long *)((unsigned long)regs +
++ kdbreglist[i].reg_offset);
++ return(0);
++ }
++
++ return KDB_BADREG;
++}
++
++/*
++ * kdba_setregcontents
++ *
++ * Set the contents of the register specified by the
++ * input string argument. Return an error if the string
++ * does not match a machine register.
++ *
++ * Supports modification of user-mode registers via
++ * %<register-name>
++ *
++ * Parameters:
++ * regname Pointer to string naming register
++ * regs Pointer to structure containing registers.
++ * contents Unsigned long containing new register contents
++ * Outputs:
++ * Returns:
++ * 0 Success
++ * KDB_BADREG Invalid register name
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++int
++kdba_setregcontents(const char *regname,
++ struct pt_regs *regs,
++ unsigned long contents)
++{
++ int i;
++
++ if (regname[0] == '%') {
++ regname++;
++ regs = (struct pt_regs *)
++ (kdb_current_task->thread.esp0 - sizeof(struct pt_regs));
++ }
++
++ for (i=0; i<ndbreglist; i++) {
++ if (strnicmp(dbreglist[i].reg_name,
++ regname,
++ strlen(regname)) == 0)
++ break;
++ }
++
++ if ((i < ndbreglist)
++ && (strlen(dbreglist[i].reg_name) == strlen(regname))) {
++ kdba_putdr(dbreglist[i].reg_offset, contents);
++ return 0;
++ }
++
++ if (!regs) {
++ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__);
++ return KDB_BADREG;
++ }
++
++ for (i=0; i<nkdbreglist; i++) {
++ if (strnicmp(kdbreglist[i].reg_name,
++ regname,
++ strlen(regname)) == 0)
++ break;
++ }
++
++ if ((i < nkdbreglist)
++ && (strlen(kdbreglist[i].reg_name) == strlen(regname))) {
++ *(unsigned long *)((unsigned long)regs
++ + kdbreglist[i].reg_offset) = contents;
++ return 0;
++ }
++
++ return KDB_BADREG;
++}
++
++/*
++ * kdba_dumpregs
++ *
++ * Dump the specified register set to the display.
++ *
++ * Parameters:
++ * regs Pointer to structure containing registers.
++ * type Character string identifying register set to dump
++ * extra string further identifying register (optional)
++ * Outputs:
++ * Returns:
++ * 0 Success
++ * Locking:
++ * None.
++ * Remarks:
++ * This function will dump the general register set if the type
++ * argument is NULL (struct pt_regs). The alternate register
++ * set types supported by this function:
++ *
++ * d Debug registers
++ * c Control registers
++ * u User registers at most recent entry to kernel
++ * for the process currently selected with "pid" command.
++ * Following not yet implemented:
++ * r Memory Type Range Registers (extra defines register)
++ *
++ * MSR on i386/x86_64 are handled by rdmsr/wrmsr commands.
++ */
++
++int
++kdba_dumpregs(struct pt_regs *regs,
++ const char *type,
++ const char *extra)
++{
++ int i;
++ int count = 0;
++
++ if (type
++ && (type[0] == 'u')) {
++ type = NULL;
++ regs = (struct pt_regs *)
++ (kdb_current_task->thread.esp0 - sizeof(struct pt_regs));
++ }
++
++ if (type == NULL) {
++ struct kdbregs *rlp;
++ kdb_machreg_t contents;
++
++ if (!regs) {
++ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__);
++ return KDB_BADREG;
++ }
++
++ for (i=0, rlp=kdbreglist; i<nkdbreglist; i++,rlp++) {
++ kdb_printf("%s = ", rlp->reg_name);
++ kdba_getregcontents(rlp->reg_name, regs, &contents);
++ kdb_printf("0x%08lx ", contents);
++ if ((++count % 4) == 0)
++ kdb_printf("\n");
++ }
++
++ kdb_printf("®s = 0x%p\n", regs);
++
++ return 0;
++ }
++
++ switch (type[0]) {
++ case 'd':
++ {
++ unsigned long dr[8];
++
++ for(i=0; i<8; i++) {
++ if ((i == 4) || (i == 5)) continue;
++ dr[i] = kdba_getdr(i);
++ }
++ kdb_printf("dr0 = 0x%08lx dr1 = 0x%08lx dr2 = 0x%08lx dr3 = 0x%08lx\n",
++ dr[0], dr[1], dr[2], dr[3]);
++ kdb_printf("dr6 = 0x%08lx dr7 = 0x%08lx\n",
++ dr[6], dr[7]);
++ return 0;
++ }
++ case 'c':
++ {
++ unsigned long cr[5];
++
++ for (i=0; i<5; i++) {
++ cr[i] = kdba_getcr(i);
++ }
++ kdb_printf("cr0 = 0x%08lx cr1 = 0x%08lx cr2 = 0x%08lx cr3 = 0x%08lx\ncr4 = 0x%08lx\n",
++ cr[0], cr[1], cr[2], cr[3], cr[4]);
++ return 0;
++ }
++ case 'r':
++ break;
++ default:
++ return KDB_BADREG;
++ }
++
++ /* NOTREACHED */
++ return 0;
++}
++EXPORT_SYMBOL(kdba_dumpregs);
++
++kdb_machreg_t
++kdba_getpc(struct pt_regs *regs)
++{
++ return regs ? regs->eip : 0;
++}
++
++int
++kdba_setpc(struct pt_regs *regs, kdb_machreg_t newpc)
++{
++ if (KDB_NULL_REGS(regs))
++ return KDB_BADREG;
++ regs->eip = newpc;
++ KDB_STATE_SET(IP_ADJUSTED);
++ return 0;
++}
++
++/*
++ * kdba_main_loop
++ *
++ * Do any architecture specific set up before entering the main kdb loop.
++ * The primary function of this routine is to make all processes look the
++ * same to kdb, kdb must be able to list a process without worrying if the
++ * process is running or blocked, so make all process look as though they
++ * are blocked.
++ *
++ * Inputs:
++ * reason The reason KDB was invoked
++ * error The hardware-defined error code
++ * error2 kdb's current reason code. Initially error but can change
++ * acording to kdb state.
++ * db_result Result from break or debug point.
++ * regs The exception frame at time of fault/breakpoint. If reason
++ * is SILENT or CPU_UP then regs is NULL, otherwise it should
++ * always be valid.
++ * Returns:
++ * 0 KDB was invoked for an event which it wasn't responsible
++ * 1 KDB handled the event for which it was invoked.
++ * Outputs:
++ * Sets eip and esp in current->thread.
++ * Locking:
++ * None.
++ * Remarks:
++ * none.
++ */
++
++int
++kdba_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
++ kdb_dbtrap_t db_result, struct pt_regs *regs)
++{
++ int ret;
++ kdb_save_running(regs);
++ ret = kdb_main_loop(reason, reason2, error, db_result, regs);
++ kdb_unsave_running(regs);
++ return ret;
++}
++
++void
++kdba_disableint(kdb_intstate_t *state)
++{
++ unsigned long *fp = (unsigned long *)state;
++ unsigned long flags;
++
++ local_irq_save(flags);
++
++ *fp = flags;
++}
++
++void
++kdba_restoreint(kdb_intstate_t *state)
++{
++ unsigned long flags = *(int *)state;
++ local_irq_restore(flags);
++}
++
++void
++kdba_setsinglestep(struct pt_regs *regs)
++{
++ if (KDB_NULL_REGS(regs))
++ return;
++ if (regs->eflags & EF_IE)
++ KDB_STATE_SET(A_IF);
++ else
++ KDB_STATE_CLEAR(A_IF);
++ regs->eflags = (regs->eflags | EF_TF) & ~EF_IE;
++}
++
++void
++kdba_clearsinglestep(struct pt_regs *regs)
++{
++ if (KDB_NULL_REGS(regs))
++ return;
++ if (KDB_STATE(A_IF))
++ regs->eflags |= EF_IE;
++ else
++ regs->eflags &= ~EF_IE;
++}
++
++int asmlinkage
++kdba_setjmp(kdb_jmp_buf *jb)
++{
++#if defined(CONFIG_FRAME_POINTER)
++ __asm__ ("movl 8(%esp), %eax\n\t"
++ "movl %ebx, 0(%eax)\n\t"
++ "movl %esi, 4(%eax)\n\t"
++ "movl %edi, 8(%eax)\n\t"
++ "movl (%esp), %ecx\n\t"
++ "movl %ecx, 12(%eax)\n\t"
++ "leal 8(%esp), %ecx\n\t"
++ "movl %ecx, 16(%eax)\n\t"
++ "movl 4(%esp), %ecx\n\t"
++ "movl %ecx, 20(%eax)\n\t");
++#else /* CONFIG_FRAME_POINTER */
++ __asm__ ("movl 4(%esp), %eax\n\t"
++ "movl %ebx, 0(%eax)\n\t"
++ "movl %esi, 4(%eax)\n\t"
++ "movl %edi, 8(%eax)\n\t"
++ "movl %ebp, 12(%eax)\n\t"
++ "leal 4(%esp), %ecx\n\t"
++ "movl %ecx, 16(%eax)\n\t"
++ "movl 0(%esp), %ecx\n\t"
++ "movl %ecx, 20(%eax)\n\t");
++#endif /* CONFIG_FRAME_POINTER */
++ return 0;
++}
++
++void asmlinkage
++kdba_longjmp(kdb_jmp_buf *jb, int reason)
++{
++#if defined(CONFIG_FRAME_POINTER)
++ __asm__("movl 8(%esp), %ecx\n\t"
++ "movl 12(%esp), %eax\n\t"
++ "movl 20(%ecx), %edx\n\t"
++ "movl 0(%ecx), %ebx\n\t"
++ "movl 4(%ecx), %esi\n\t"
++ "movl 8(%ecx), %edi\n\t"
++ "movl 12(%ecx), %ebp\n\t"
++ "movl 16(%ecx), %esp\n\t"
++ "jmp *%edx\n");
++#else /* CONFIG_FRAME_POINTER */
++ __asm__("movl 4(%esp), %ecx\n\t"
++ "movl 8(%esp), %eax\n\t"
++ "movl 20(%ecx), %edx\n\t"
++ "movl 0(%ecx), %ebx\n\t"
++ "movl 4(%ecx), %esi\n\t"
++ "movl 8(%ecx), %edi\n\t"
++ "movl 12(%ecx), %ebp\n\t"
++ "movl 16(%ecx), %esp\n\t"
++ "jmp *%edx\n");
++#endif /* CONFIG_FRAME_POINTER */
++}
++
++/*
++ * kdba_pt_regs
++ *
++ * Format a struct pt_regs
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * If no address is supplied, it uses the last irq pt_regs.
++ */
++
++static int
++kdba_pt_regs(int argc, const char **argv)
++{
++ int diag;
++ kdb_machreg_t addr;
++ long offset = 0;
++ int nextarg;
++ struct pt_regs *p;
++ static const char *fmt = " %-11.11s 0x%lx\n";
++
++ if (argc == 0) {
++ addr = (kdb_machreg_t) get_irq_regs();
++ } else if (argc == 1) {
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (diag)
++ return diag;
++ } else {
++ return KDB_ARGCOUNT;
++ }
++
++ p = (struct pt_regs *) addr;
++ kdb_printf("struct pt_regs 0x%p-0x%p\n", p, (unsigned char *)p + sizeof(*p) - 1);
++ kdb_print_nameval("ebx", p->ebx);
++ kdb_print_nameval("ecx", p->ecx);
++ kdb_print_nameval("edx", p->edx);
++ kdb_print_nameval("esi", p->esi);
++ kdb_print_nameval("edi", p->edi);
++ kdb_print_nameval("ebp", p->ebp);
++ kdb_print_nameval("eax", p->eax);
++ kdb_printf(fmt, "xds", p->xds);
++ kdb_printf(fmt, "xes", p->xes);
++ kdb_print_nameval("orig_eax", p->orig_eax);
++ kdb_print_nameval("eip", p->eip);
++ kdb_printf(fmt, "xcs", p->xcs);
++ kdb_printf(fmt, "eflags", p->eflags);
++ kdb_printf(fmt, "esp", p->esp);
++ kdb_printf(fmt, "xss", p->xss);
++ return 0;
++}
++
++/*
++ * kdba_stackdepth
++ *
++ * Print processes that are using more than a specific percentage of their
++ * stack.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * If no percentage is supplied, it uses 60.
++ */
++
++static void
++kdba_stackdepth1(struct task_struct *p, unsigned long esp)
++{
++ struct thread_info *tinfo;
++ int used;
++ const char *type;
++ kdb_ps1(p);
++ do {
++ tinfo = (struct thread_info *)(esp & -THREAD_SIZE);
++ used = sizeof(*tinfo) + THREAD_SIZE - (esp & (THREAD_SIZE-1));
++ type = NULL;
++ if (kdb_task_has_cpu(p)) {
++ struct kdb_activation_record ar;
++ memset(&ar, 0, sizeof(ar));
++ kdba_get_stack_info_alternate(esp, -1, &ar);
++ type = ar.stack.id;
++ }
++ if (!type)
++ type = "process";
++ kdb_printf(" %s stack %p esp %lx used %d\n", type, tinfo, esp, used);
++ esp = tinfo->previous_esp;
++ } while (esp);
++}
++
++static int
++kdba_stackdepth(int argc, const char **argv)
++{
++ int diag, cpu, threshold, used, over;
++ unsigned long percentage;
++ unsigned long esp;
++ long offset = 0;
++ int nextarg;
++ struct task_struct *p, *g;
++ struct kdb_running_process *krp;
++ struct thread_info *tinfo;
++
++ if (argc == 0) {
++ percentage = 60;
++ } else if (argc == 1) {
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &percentage, &offset, NULL);
++ if (diag)
++ return diag;
++ } else {
++ return KDB_ARGCOUNT;
++ }
++ percentage = max_t(int, percentage, 1);
++ percentage = min_t(int, percentage, 100);
++ threshold = ((2 * THREAD_SIZE * percentage) / 100 + 1) >> 1;
++ kdb_printf("stackdepth: processes using more than %ld%% (%d bytes) of stack\n",
++ percentage, threshold);
++
++ /* Run the active tasks first, they can have multiple stacks */
++ for (cpu = 0, krp = kdb_running_process; cpu < NR_CPUS; ++cpu, ++krp) {
++ if (!cpu_online(cpu))
++ continue;
++ p = krp->p;
++ esp = krp->arch.esp;
++ over = 0;
++ do {
++ tinfo = (struct thread_info *)(esp & -THREAD_SIZE);
++ used = sizeof(*tinfo) + THREAD_SIZE - (esp & (THREAD_SIZE-1));
++ if (used >= threshold)
++ over = 1;
++ esp = tinfo->previous_esp;
++ } while (esp);
++ if (over)
++ kdba_stackdepth1(p, krp->arch.esp);
++ }
++ /* Now the tasks that are not on cpus */
++ kdb_do_each_thread(g, p) {
++ if (kdb_task_has_cpu(p))
++ continue;
++ esp = p->thread.esp;
++ used = sizeof(*tinfo) + THREAD_SIZE - (esp & (THREAD_SIZE-1));
++ over = used >= threshold;
++ if (over)
++ kdba_stackdepth1(p, esp);
++ } kdb_while_each_thread(g, p);
++
++ return 0;
++}
++
++asmlinkage int kdb_call(void);
++
++/* Executed once on each cpu at startup. */
++void
++kdba_cpu_up(void)
++{
++}
++
++static int __init
++kdba_arch_init(void)
++{
++#ifdef CONFIG_SMP
++ set_intr_gate(KDB_VECTOR, kdb_interrupt);
++#endif
++ set_intr_gate(KDBENTER_VECTOR, kdb_call);
++ return 0;
++}
++
++arch_initcall(kdba_arch_init);
++
++/*
++ * kdba_init
++ *
++ * Architecture specific initialization.
++ *
++ * Parameters:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ * None.
++ */
++
++void __init
++kdba_init(void)
++{
++ kdba_arch_init(); /* Need to register KDBENTER_VECTOR early */
++ kdb_register("pt_regs", kdba_pt_regs, "address", "Format struct pt_regs", 0);
++ kdb_register("stackdepth", kdba_stackdepth, "[percentage]", "Print processes using >= stack percentage", 0);
++
++ return;
++}
++
++/*
++ * kdba_adjust_ip
++ *
++ * Architecture specific adjustment of instruction pointer before leaving
++ * kdb.
++ *
++ * Parameters:
++ * reason The reason KDB was invoked
++ * error The hardware-defined error code
++ * regs The exception frame at time of fault/breakpoint. If reason
++ * is SILENT or CPU_UP then regs is NULL, otherwise it should
++ * always be valid.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ * noop on ix86.
++ */
++
++void
++kdba_adjust_ip(kdb_reason_t reason, int error, struct pt_regs *regs)
++{
++ return;
++}
++
++void
++kdba_set_current_task(const struct task_struct *p)
++{
++ kdb_current_task = p;
++ if (kdb_task_has_cpu(p)) {
++ struct kdb_running_process *krp = kdb_running_process + kdb_process_cpu(p);
++ kdb_current_regs = krp->regs;
++ return;
++ }
++ kdb_current_regs = NULL;
++}
++
++/*
++ * asm-i386 uaccess.h supplies __copy_to_user which relies on MMU to
++ * trap invalid addresses in the _xxx fields. Verify the other address
++ * of the pair is valid by accessing the first and last byte ourselves,
++ * then any access violations should only be caused by the _xxx
++ * addresses,
++ */
++
++int
++kdba_putarea_size(unsigned long to_xxx, void *from, size_t size)
++{
++ mm_segment_t oldfs = get_fs();
++ int r;
++ char c;
++ c = *((volatile char *)from);
++ c = *((volatile char *)from + size - 1);
++
++ if (to_xxx < PAGE_OFFSET) {
++ return kdb_putuserarea_size(to_xxx, from, size);
++ }
++
++ set_fs(KERNEL_DS);
++ r = __copy_to_user_inatomic((void __user *)to_xxx, from, size);
++ set_fs(oldfs);
++ return r;
++}
++
++int
++kdba_getarea_size(void *to, unsigned long from_xxx, size_t size)
++{
++ mm_segment_t oldfs = get_fs();
++ int r;
++ *((volatile char *)to) = '\0';
++ *((volatile char *)to + size - 1) = '\0';
++
++ if (from_xxx < PAGE_OFFSET) {
++ return kdb_getuserarea_size(to, from_xxx, size);
++ }
++
++ set_fs(KERNEL_DS);
++ switch (size) {
++ case 1:
++ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 1);
++ break;
++ case 2:
++ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 2);
++ break;
++ case 4:
++ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 4);
++ break;
++ case 8:
++ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, 8);
++ break;
++ default:
++ r = __copy_to_user_inatomic((void __user *)to, (void *)from_xxx, size);
++ break;
++ }
++ set_fs(oldfs);
++ return r;
++}
++
++int
++kdba_verify_rw(unsigned long addr, size_t size)
++{
++ unsigned char data[size];
++ return(kdba_getarea_size(data, addr, size) || kdba_putarea_size(addr, data, size));
++}
++
++#ifdef CONFIG_SMP
++
++#include <mach_ipi.h>
++
++/* When first entering KDB, try a normal IPI. That reduces backtrace problems
++ * on the other cpus.
++ */
++void
++smp_kdb_stop(void)
++{
++ if (!KDB_FLAG(NOIPI))
++ send_IPI_allbutself(KDB_VECTOR);
++}
++
++/* The normal KDB IPI handler */
++void
++smp_kdb_interrupt(struct pt_regs *regs)
++{
++ struct pt_regs *old_regs = set_irq_regs(regs);
++ ack_APIC_irq();
++ irq_enter();
++ kdb_ipi(regs, NULL);
++ irq_exit();
++ set_irq_regs(old_regs);
++}
++
++/* Invoked once from kdb_wait_for_cpus when waiting for cpus. For those cpus
++ * that have not responded to the normal KDB interrupt yet, hit them with an
++ * NMI event.
++ */
++void
++kdba_wait_for_cpus(void)
++{
++ int c;
++ if (KDB_FLAG(CATASTROPHIC))
++ return;
++ kdb_printf(" Sending NMI to cpus that have not responded yet\n");
++ for_each_online_cpu(c)
++ if (kdb_running_process[c].seqno < kdb_seqno - 1)
++ send_IPI_mask(cpumask_of_cpu(c), NMI_VECTOR);
++}
++
++#endif /* CONFIG_SMP */
+diff -Nurp linux-2.6.22-590/arch/i386/kdb/kdb_cmds linux-2.6.22-600/arch/i386/kdb/kdb_cmds
+--- linux-2.6.22-590/arch/i386/kdb/kdb_cmds 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/i386/kdb/kdb_cmds 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,17 @@
++# Standard architecture specific commands for kdb.
++# These commands are appended to those in kdb/kdb_cmds, see that file for
++# restrictions.
++
++# Standard debugging information for first level support, invoked from archkdb*
++# commands that are defined in kdb/kdb_cmds.
++
++defcmd archkdbcommon "" "Common arch debugging"
++ set LINES 2000000
++ set BTAPROMPT 0
++ -summary
++ -id %eip-24
++ -cpu
++ -ps
++ -dmesg 600
++ -bt
++endefcmd
+diff -Nurp linux-2.6.22-590/arch/i386/kdb/Makefile linux-2.6.22-600/arch/i386/kdb/Makefile
+--- linux-2.6.22-590/arch/i386/kdb/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/i386/kdb/Makefile 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,13 @@
++#
++# This file is subject to the terms and conditions of the GNU General Public
++# License. See the file "COPYING" in the main directory of this archive
++# for more details.
++#
++# Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++#
++
++obj-$(CONFIG_KDB) := kdba_bp.o kdba_id.o kdba_io.o kdbasupport.o i386-dis.o
++
++override CFLAGS := $(CFLAGS:%-pg=% )
++
++CFLAGS_kdba_io.o += -I $(TOPDIR)/arch/$(ARCH)/kdb
+diff -Nurp linux-2.6.22-590/arch/i386/kdb/pc_keyb.h linux-2.6.22-600/arch/i386/kdb/pc_keyb.h
+--- linux-2.6.22-590/arch/i386/kdb/pc_keyb.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/i386/kdb/pc_keyb.h 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,137 @@
++/*
++ * include/linux/pc_keyb.h
++ *
++ * PC Keyboard And Keyboard Controller
++ *
++ * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
++ */
++
++/*
++ * Configuration Switches
++ */
++
++#undef KBD_REPORT_ERR /* Report keyboard errors */
++#define KBD_REPORT_UNKN /* Report unknown scan codes */
++#define KBD_REPORT_TIMEOUTS /* Report keyboard timeouts */
++#undef KBD_IS_FOCUS_9000 /* We have the brain-damaged FOCUS-9000 keyboard */
++#undef INITIALIZE_MOUSE /* Define if your PS/2 mouse needs initialization. */
++
++
++
++#define KBD_INIT_TIMEOUT 1000 /* Timeout in ms for initializing the keyboard */
++#define KBC_TIMEOUT 250 /* Timeout in ms for sending to keyboard controller */
++#define KBD_TIMEOUT 1000 /* Timeout in ms for keyboard command acknowledge */
++
++/*
++ * Internal variables of the driver
++ */
++
++extern unsigned char pckbd_read_mask;
++extern unsigned char aux_device_present;
++
++/*
++ * Keyboard Controller Registers on normal PCs.
++ */
++
++#define KBD_STATUS_REG 0x64 /* Status register (R) */
++#define KBD_CNTL_REG 0x64 /* Controller command register (W) */
++#define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */
++
++/*
++ * Keyboard Controller Commands
++ */
++
++#define KBD_CCMD_READ_MODE 0x20 /* Read mode bits */
++#define KBD_CCMD_WRITE_MODE 0x60 /* Write mode bits */
++#define KBD_CCMD_GET_VERSION 0xA1 /* Get controller version */
++#define KBD_CCMD_MOUSE_DISABLE 0xA7 /* Disable mouse interface */
++#define KBD_CCMD_MOUSE_ENABLE 0xA8 /* Enable mouse interface */
++#define KBD_CCMD_TEST_MOUSE 0xA9 /* Mouse interface test */
++#define KBD_CCMD_SELF_TEST 0xAA /* Controller self test */
++#define KBD_CCMD_KBD_TEST 0xAB /* Keyboard interface test */
++#define KBD_CCMD_KBD_DISABLE 0xAD /* Keyboard interface disable */
++#define KBD_CCMD_KBD_ENABLE 0xAE /* Keyboard interface enable */
++#define KBD_CCMD_WRITE_AUX_OBUF 0xD3 /* Write to output buffer as if
++ initiated by the auxiliary device */
++#define KBD_CCMD_WRITE_MOUSE 0xD4 /* Write the following byte to the mouse */
++
++/*
++ * Keyboard Commands
++ */
++
++#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */
++#define KBD_CMD_SET_RATE 0xF3 /* Set typematic rate */
++#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */
++#define KBD_CMD_DISABLE 0xF5 /* Disable scanning */
++#define KBD_CMD_RESET 0xFF /* Reset */
++
++/*
++ * Keyboard Replies
++ */
++
++#define KBD_REPLY_POR 0xAA /* Power on reset */
++#define KBD_REPLY_ACK 0xFA /* Command ACK */
++#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */
++
++/*
++ * Status Register Bits
++ */
++
++#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */
++#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */
++#define KBD_STAT_SELFTEST 0x04 /* Self test successful */
++#define KBD_STAT_CMD 0x08 /* Last write was a command write (0=data) */
++#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */
++#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */
++#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */
++#define KBD_STAT_PERR 0x80 /* Parity error */
++
++#define AUX_STAT_OBF (KBD_STAT_OBF | KBD_STAT_MOUSE_OBF)
++
++/*
++ * Controller Mode Register Bits
++ */
++
++#define KBD_MODE_KBD_INT 0x01 /* Keyboard data generate IRQ1 */
++#define KBD_MODE_MOUSE_INT 0x02 /* Mouse data generate IRQ12 */
++#define KBD_MODE_SYS 0x04 /* The system flag (?) */
++#define KBD_MODE_NO_KEYLOCK 0x08 /* The keylock doesn't affect the keyboard if set */
++#define KBD_MODE_DISABLE_KBD 0x10 /* Disable keyboard interface */
++#define KBD_MODE_DISABLE_MOUSE 0x20 /* Disable mouse interface */
++#define KBD_MODE_KCC 0x40 /* Scan code conversion to PC format */
++#define KBD_MODE_RFU 0x80
++
++/*
++ * Mouse Commands
++ */
++
++#define AUX_SET_RES 0xE8 /* Set resolution */
++#define AUX_SET_SCALE11 0xE6 /* Set 1:1 scaling */
++#define AUX_SET_SCALE21 0xE7 /* Set 2:1 scaling */
++#define AUX_GET_SCALE 0xE9 /* Get scaling factor */
++#define AUX_SET_STREAM 0xEA /* Set stream mode */
++#define AUX_SET_SAMPLE 0xF3 /* Set sample rate */
++#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */
++#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */
++#define AUX_RESET 0xFF /* Reset aux device */
++#define AUX_ACK 0xFA /* Command byte ACK. */
++
++#define AUX_BUF_SIZE 2048 /* This might be better divisible by
++ three to make overruns stay in sync
++ but then the read function would need
++ a lock etc - ick */
++
++struct aux_queue {
++ unsigned long head;
++ unsigned long tail;
++ wait_queue_head_t proc_list;
++ struct fasync_struct *fasync;
++ unsigned char buf[AUX_BUF_SIZE];
++};
++
++
++/* How to access the keyboard macros on this platform. */
++#define kbd_read_input() inb(KBD_DATA_REG)
++#define kbd_read_status() inb(KBD_STATUS_REG)
++#define kbd_write_output(val) outb(val, KBD_DATA_REG)
++#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
+diff -Nurp linux-2.6.22-590/arch/i386/kernel/entry.S linux-2.6.22-600/arch/i386/kernel/entry.S
+--- linux-2.6.22-590/arch/i386/kernel/entry.S 2008-04-09 18:10:46.000000000 +0200
++++ linux-2.6.22-600/arch/i386/kernel/entry.S 2008-04-09 18:16:14.000000000 +0200
+@@ -976,6 +976,26 @@ ENTRY(alignment_check)
+ CFI_ENDPROC
+ END(alignment_check)
+
++#ifdef CONFIG_KDB
++
++ENTRY(kdb_call)
++ RING0_INT_FRAME
++ pushl %eax # save orig EAX
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ movl %esp,%ecx # struct pt_regs
++ movl $0,%edx # error_code
++ movl $1,%eax # KDB_REASON_ENTER
++ call kdb
++ jmp restore_all
++ CFI_ENDPROC
++
++#ifdef CONFIG_SMP
++BUILD_INTERRUPT(kdb_interrupt,KDB_VECTOR)
++#endif /* CONFIG_SMP */
++
++#endif /* CONFIG_KDB */
++
+ ENTRY(divide_error)
+ RING0_INT_FRAME
+ pushl $0 # no error code
+diff -Nurp linux-2.6.22-590/arch/i386/kernel/io_apic.c linux-2.6.22-600/arch/i386/kernel/io_apic.c
+--- linux-2.6.22-590/arch/i386/kernel/io_apic.c 2008-04-09 18:10:46.000000000 +0200
++++ linux-2.6.22-600/arch/i386/kernel/io_apic.c 2008-04-09 18:16:14.000000000 +0200
+@@ -32,6 +32,10 @@
+ #include <linux/sysdev.h>
+ #include <linux/pci.h>
+ #include <linux/msi.h>
++
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#endif /* CONFIG_KDB */
+ #include <linux/htirq.h>
+ #include <linux/freezer.h>
+ #include <linux/kthread.h>
+@@ -1244,6 +1248,10 @@ next:
+ return -ENOSPC;
+ if (vector == SYSCALL_VECTOR)
+ goto next;
++#ifdef CONFIG_KDB
++ if (vector == KDBENTER_VECTOR)
++ goto next;
++#endif /* CONFIG_KDB */
+ for (i = 0; i < NR_IRQ_VECTORS; i++)
+ if (irq_vector[i] == vector)
+ goto next;
+diff -Nurp linux-2.6.22-590/arch/i386/kernel/reboot.c linux-2.6.22-600/arch/i386/kernel/reboot.c
+--- linux-2.6.22-590/arch/i386/kernel/reboot.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/arch/i386/kernel/reboot.c 2008-04-09 18:16:14.000000000 +0200
+@@ -3,6 +3,9 @@
+ */
+
+ #include <linux/mm.h>
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#endif /* CONFIG_KDB */
+ #include <linux/module.h>
+ #include <linux/delay.h>
+ #include <linux/init.h>
+@@ -313,6 +316,14 @@ static void native_machine_shutdown(void
+ * all of the others, and disable their local APICs.
+ */
+
++#ifdef CONFIG_KDB
++ /*
++ * If this restart is occuring while kdb is running (e.g. reboot
++ * command), the other CPU's are already stopped. Don't try to
++ * stop them yet again.
++ */
++ if (!KDB_IS_RUNNING())
++#endif /* CONFIG_KDB */
+ smp_send_stop();
+ #endif /* CONFIG_SMP */
+
+diff -Nurp linux-2.6.22-590/arch/i386/kernel/traps.c linux-2.6.22-600/arch/i386/kernel/traps.c
+--- linux-2.6.22-590/arch/i386/kernel/traps.c 2008-04-09 18:10:52.000000000 +0200
++++ linux-2.6.22-600/arch/i386/kernel/traps.c 2008-04-09 18:16:14.000000000 +0200
+@@ -41,6 +41,10 @@
+ #include <linux/mca.h>
+ #endif
+
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#endif /* CONFIG_KDB */
++
+ #include <asm/processor.h>
+ #include <asm/system.h>
+ #include <asm/io.h>
+@@ -438,6 +442,10 @@ void die(const char * str, struct pt_reg
+ bust_spinlocks(0);
+ die.lock_owner = -1;
+ spin_unlock_irqrestore(&die.lock, flags);
++#ifdef CONFIG_KDB
++ kdb_diemsg = str;
++ kdb(KDB_REASON_OOPS, err, regs);
++#endif /* CONFIG_KDB */
+
+ if (!regs)
+ return;
+@@ -561,7 +569,7 @@ fastcall void do_##name(struct pt_regs *
+ }
+
+ DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
+-#ifndef CONFIG_KPROBES
++#if !defined(CONFIG_KPROBES) && !defined(CONFIG_KDB)
+ DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
+ #endif
+ DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
+@@ -670,6 +678,9 @@ io_check_error(unsigned char reason, str
+ static __kprobes void
+ unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+ {
++#ifdef CONFIG_KDB
++ (void)kdb(KDB_REASON_NMI, reason, regs);
++#endif /* CONFIG_KDB */
+ #ifdef CONFIG_MCA
+ /* Might actually be able to figure out what the guilty party
+ * is. */
+@@ -705,6 +716,9 @@ void __kprobes die_nmi(struct pt_regs *r
+ printk(" on CPU%d, eip %08lx, registers:\n",
+ smp_processor_id(), regs->eip);
+ show_registers(regs);
++#ifdef CONFIG_KDB
++ kdb(KDB_REASON_NMI, 0, regs);
++#endif /* CONFIG_KDB */
+ console_silent();
+ spin_unlock(&nmi_print_lock);
+ bust_spinlocks(0);
+@@ -727,7 +741,17 @@ static __kprobes void default_do_nmi(str
+ /* Only the BSP gets external NMIs from the system. */
+ if (!smp_processor_id())
+ reason = get_nmi_reason();
+-
++
++#if defined(CONFIG_SMP) && defined(CONFIG_KDB)
++ /*
++ * Call the kernel debugger to see if this NMI is due
++ * to an KDB requested IPI. If so, kdb will handle it.
++ */
++ if (kdb_ipi(regs, NULL)) {
++ return;
++ }
++#endif /* defined(CONFIG_SMP) && defined(CONFIG_KDB) */
++
+ if (!(reason & 0xc0)) {
+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
+ == NOTIFY_STOP)
+@@ -776,6 +800,10 @@ fastcall __kprobes void do_nmi(struct pt
+ #ifdef CONFIG_KPROBES
+ fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
+ {
++#ifdef CONFIG_KDB
++ if (kdb(KDB_REASON_BREAK, error_code, regs))
++ return;
++#endif
+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+ == NOTIFY_STOP)
+ return;
+@@ -815,6 +843,11 @@ fastcall void __kprobes do_debug(struct
+
+ get_debugreg(condition, 6);
+
++#ifdef CONFIG_KDB
++ if (kdb(KDB_REASON_DEBUG, error_code, regs))
++ return;
++#endif /* CONFIG_KDB */
++
+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
+ SIGTRAP) == NOTIFY_STOP)
+ return;
+@@ -868,6 +901,16 @@ clear_TF_reenable:
+ return;
+ }
+
++#if defined(CONFIG_KDB) && !defined(CONFIG_KPROBES)
++fastcall void do_int3(struct pt_regs * regs, long error_code)
++{
++ if (kdb(KDB_REASON_BREAK, error_code, regs))
++ return;
++ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
++}
++#endif /* CONFIG_KDB && !CONFIG_KPROBES */
++
++
+ /*
+ * Note that we play around with the 'TS' bit in an attempt to get
+ * the correct behaviour even in the presence of the asynchronous
+diff -Nurp linux-2.6.22-590/arch/i386/Makefile linux-2.6.22-600/arch/i386/Makefile
+--- linux-2.6.22-590/arch/i386/Makefile 2008-04-09 18:10:46.000000000 +0200
++++ linux-2.6.22-600/arch/i386/Makefile 2008-04-09 18:16:14.000000000 +0200
+@@ -108,6 +108,7 @@ drivers-$(CONFIG_PCI) += arch/i386/pci
+ # must be linked after kernel/
+ drivers-$(CONFIG_OPROFILE) += arch/i386/oprofile/
+ drivers-$(CONFIG_PM) += arch/i386/power/
++drivers-$(CONFIG_KDB) += arch/i386/kdb/
+
+ CFLAGS += $(mflags-y)
+ AFLAGS += $(mflags-y)
+diff -Nurp linux-2.6.22-590/arch/x86_64/Kconfig.debug linux-2.6.22-600/arch/x86_64/Kconfig.debug
+--- linux-2.6.22-590/arch/x86_64/Kconfig.debug 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/arch/x86_64/Kconfig.debug 2008-04-09 18:16:24.000000000 +0200
+@@ -16,6 +16,76 @@ config DEBUG_RODATA
+ of the kernel code won't be covered by a 2MB TLB anymore.
+ If in doubt, say "N".
+
++config KDB
++ bool "Built-in Kernel Debugger support"
++ depends on DEBUG_KERNEL
++ select KALLSYMS
++ select KALLSYMS_ALL
++ help
++ This option provides a built-in kernel debugger. The built-in
++ kernel debugger contains commands which allow memory to be examined,
++ instructions to be disassembled and breakpoints to be set. For details,
++ see Documentation/kdb/kdb.mm and the manual pages kdb_bt, kdb_ss, etc.
++ Kdb can also be used via the serial port. Set up the system to
++ have a serial console (see Documentation/serial-console.txt).
++ The key sequence <escape>KDB on the serial port will cause the
++ kernel debugger to be entered with input from the serial port and
++ output to the serial console. If unsure, say N.
++
++config KDB_MODULES
++ tristate "KDB modules"
++ depends on KDB
++ help
++ KDB can be extended by adding your own modules, in directory
++ kdb/modules. This option selects the way that these modules should
++ be compiled, as free standing modules (select M) or built into the
++ kernel (select Y). If unsure say M.
++
++config KDB_OFF
++ bool "KDB off by default"
++ depends on KDB
++ help
++ Normally kdb is activated by default, as long as CONFIG_KDB is set.
++ If you want to ship a kernel with kdb support but only have kdb
++ turned on when the user requests it then select this option. When
++ compiled with CONFIG_KDB_OFF, kdb ignores all events unless you boot
++ with kdb=on or you echo "1" > /proc/sys/kernel/kdb. This option also
++ works in reverse, if kdb is normally activated, you can boot with
++ kdb=off or echo "0" > /proc/sys/kernel/kdb to deactivate kdb. If
++ unsure, say N.
++
++config KDB_CONTINUE_CATASTROPHIC
++ int "KDB continues after catastrophic errors"
++ depends on KDB
++ default "0"
++ help
++ This integer controls the behaviour of kdb when the kernel gets a
++ catastrophic error, i.e. for a panic, oops, NMI or other watchdog
++ tripping. CONFIG_KDB_CONTINUE_CATASTROPHIC interacts with
++ /proc/sys/kernel/kdb and CONFIG_DUMP (if your kernel has the LKCD
++ patch).
++ When KDB is active (/proc/sys/kernel/kdb == 1) and a catastrophic
++ error occurs, nothing extra happens until you type 'go'.
++ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time
++ you type 'go', kdb warns you. The second time you type 'go', KDB
++ tries to continue - no guarantees that the kernel is still usable.
++ CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue - no
++ guarantees that the kernel is still usable.
++ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. If your kernel has the LKCD
++ patch and LKCD is configured to take a dump then KDB forces a dump.
++ Whether or not a dump is taken, KDB forces a reboot.
++ When KDB is not active (/proc/sys/kernel/kdb == 0) and a catastrophic
++ error occurs, the following steps are automatic, no human
++ intervention is required.
++ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default) or 1. KDB attempts
++ to continue - no guarantees that the kernel is still usable.
++ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. If your kernel has the LKCD
++ patch and LKCD is configured to take a dump then KDB automatically
++ forces a dump. Whether or not a dump is taken, KDB forces a
++ reboot.
++ If you are not sure, say 0. Read Documentation/kdb/dump.txt before
++ setting to 2.
++
+ config IOMMU_DEBUG
+ depends on IOMMU && DEBUG_KERNEL
+ bool "Enable IOMMU debugging"
+diff -Nurp linux-2.6.22-590/arch/x86_64/kdb/ChangeLog linux-2.6.22-600/arch/x86_64/kdb/ChangeLog
+--- linux-2.6.22-590/arch/x86_64/kdb/ChangeLog 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/x86_64/kdb/ChangeLog 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,387 @@
++2007-07-26 Keith Owens <kaos@sgi.com>
++
++ * New x86 backtrace code.
++ * kdb v4.4-2.6.22-x86_64-2.
++
++2007-07-09 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-x86_64-1.
++
++2007-07-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc7-x86_64-1.
++
++2007-06-25 Keith Owens <kaos@sgi.com>
++
++ * Hook into DIE_NMIWATCHDOG.
++ * kdb v4.4-2.6.22-rc5-x86_64-2.
++
++2007-06-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc5-x86_64-1.
++
++2007-06-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc4-x86_64-1.
++
++2007-05-28 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc3-x86_64-1.
++
++2007-05-22 Keith Owens <kaos@sgi.com>
++
++ * Register KDBENTER_VECTOR early on the boot cpu.
++ * kdb v4.4-2.6.22-rc2-x86_64-2.
++
++2007-05-22 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc2-x86_64-1.
++
++2007-05-22 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc1-x86_64-1.
++
++2007-05-17 Keith Owens <kaos@sgi.com>
++
++ * Update dumpregs comments for rdmsr and wrmsr commands.
++ Bernardo Innocenti.
++ * kdb v4.4-2.6.21-x86_64-3.
++
++2007-05-15 Keith Owens <kaos@sgi.com>
++
++ * Change kdba_late_init to kdba_arch_init so KDB_ENTER() can be used
++ earlier.
++ * kdb v4.4-2.6.21-x86_64-2.
++
++2007-04-29 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-x86_64-1.
++
++2007-04-16 Keith Owens <kaos@sgi.com>
++
++ * Select KALLSYMS and KALLSYMS_ALL when KDB is selected.
++ * kdb v4.4-2.6.21-rc7-x86_64-2.
++
++2007-04-16 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc7-x86_64-1.
++
++2007-04-10 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc6-x86_64-1.
++
++2007-04-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc5-x86_64-1.
++
++2007-03-19 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc4-x86_64-1.
++
++2007-03-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc3-x86_64-1.
++
++2007-03-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc2-x86_64-1.
++
++2007-03-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc1-x86_64-1.
++
++2007-03-01 Keith Owens <kaos@sgi.com>
++
++ * Remove sparse warnings.
++ * kdb v4.4-2.6.20-x86_64-3.
++
++2007-02-16 Keith Owens <kaos@sgi.com>
++
++ * Initialise variable bits of struct disassemble_info each time.
++ * kdb v4.4-2.6.20-x86_64-2.
++
++2007-02-06 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-x86_64-1.
++
++2007-02-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc7-x86_64-1.
++
++2007-01-10 Keith Owens <kaos@sgi.com>
++
++ * Correct setjmp for the FRAME_POINTER=y case.
++ * Remove duplicate longjmp code for FRAME_POINTER=n/y.
++ * kdb v4.4-2.6.20-rc4-x86_64-2.
++
++2007-01-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc4-x86_64-1.
++
++2007-01-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc3-x86_64-1.
++
++2006-12-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc1-x86_64-1.
++
++2006-12-07 Keith Owens <kaos@sgi.com>
++
++ * Export kdba_dumpregs.
++ * kdb v4.4-2.6.19-x86_64-2.
++
++2006-11-30 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-x86_64-1.
++
++2006-11-27 Keith Owens <kaos@sgi.com>
++
++ * Only use VT keyboard if the command line allows it and ACPI indicates
++ that there is an i8042.
++ * kdb v4.4-2.6.19-rc6-x86_64-2.
++
++2006-11-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc6-x86_64-1.
++
++2006-11-09 Keith Owens <kaos@sgi.com>
++
++ * Only use VT console if the command line allows it.
++ * kdb v4.4-2.6.19-rc5-x86_64-2.
++
++2006-11-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc5-x86_64-1.
++
++2006-11-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc4-x86_64-1.
++
++2006-10-24 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc3-x86_64-1.
++
++2006-10-24 Keith Owens <kaos@sgi.com>
++
++ * Remove redundant regs and envp parameters.
++ * kdb v4.4-2.6.19-rc2-x86_64-2.
++
++2006-10-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc2-x86_64-1.
++
++2006-10-11 Keith Owens <kaos@sgi.com>
++
++ * Make the KDBENTER_VECTOR an interrupt gate instead of a trap gate, it
++ simplifies the code and disables interrupts on KDB_ENTER().
++ * Exclude the KDBENTER_VECTOR from irq assignment.
++ * Enable KDB_ENTER() again.
++ * kdb v4.4-2.6.19-rc1-x86_64-2.
++
++2006-10-09 Keith Owens <kaos@sgi.com>
++
++ * KDB_ENTER() is getting spurious activations on some x86_64 hardware.
++ Deactivate KDB_ENTER() until it is fixed.
++ * kdb v4.4-2.6.19-rc1-x86_64-1.
++
++2006-10-06 Keith Owens <kaos@sgi.com>
++
++ * Remove #include <linux/config.h>
++ * kdb v4.4-2.6.18-x86_64-2.
++
++2006-09-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-x86_64-1.
++
++2006-09-15 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc7-x86_64-1.
++
++2006-08-30 Keith Owens <kaos@sgi.com>
++
++ * Do not print debugstackptr in cpu_pda, it will be deleted soon.
++ * Add KDB_ENTER().
++ * Add warning for problems when following alternate stacks.
++ * kdb v4.4-2.6.18-rc5-x86_64-3.
++
++2006-08-29 Keith Owens <kaos@sgi.com>
++
++ * Rewrite all backtrace code.
++ * Add pt_regs and cpu_pda commands.
++ * Include patch to define orig_ist, to be removed once that patch is in
++ the community tree.
++ * kdb v4.4-2.6.18-rc5-x86_64-2.
++
++2006-08-28 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc5-x86_64-1.
++
++2006-08-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc4-x86_64-1.
++
++2006-08-04 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc3-x86_64-1.
++
++2006-07-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc2-x86_64-1.
++
++2006-07-12 Keith Owens <kaos@sgi.com>
++
++ * sparse cleanups
++ * kdb v4.4-2.6.18-rc1-x86_64-2.
++
++2006-07-07 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc1-x86_64-1.
++
++2006-07-04 Keith Owens <kaos@sgi.com>
++
++ * Make KDB rendezvous on x86_64 a two stage approach.
++ * Move smp_kdb_stop() and smp_kdb_interrupt() to kdbasupport.c.
++ * Move setting of interrupt traps to kdbasupport.c.
++ * Add KDB_REASON_CPU_UP support.
++ * Move per cpu setup to kdba_cpu_up().
++ * Delete kdba_enable_mce, architectures now do their own setup.
++ * Delete kdba_enable_lbr, kdba_disable_lbr, kdba_print_lbr,
++ page_fault_mca. Only ever implemented on x86, difficult to maintain
++ and rarely used in the field.
++ * Replace #ifdef KDB_HAVE_LONGJMP with #ifdef kdba_setjmp.
++ * kdb v4.4-2.6.17-x86_64-2.
++
++2006-06-19 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-x86_64-1.
++
++2006-05-31 Keith Owens <kaos@sgi.com>
++
++ * Define arch/x86_64/kdb/kdb_cmds.
++ * kdb v4.4-2.6.17-rc5-x86_64-2.
++
++2006-05-25 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc5-x86_64-1.
++
++2006-05-15 Keith Owens <kaos@sgi.com>
++
++ * Refresh bfd related files from binutils 2.16.91.0.2.
++ * kdb v4.4-2.6.17-rc4-x86_64-2.
++
++2006-05-12 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6-17-rc4-x86_64-1.
++
++2006-04-22 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6-17-rc2-x86_64-1.
++
++2006-04-13 Keith Owens <kaos@sgi.com>
++
++ * Remove trailing white space.
++ * kdb v4.4-2.6-17-rc1-x86_64-1.
++
++2006-03-25 Jack F. Vogel <jfv@bluesong.net>
++ * Sync with Keith's changes for 2.6.16
++ * code from Andi Kleen to support above
++
++2005-09-30 Jack F. Vogel <jfv@bluesong.net>
++ * Port to 2.6.14-rc2
++ * sync with a couple changes from Keith
++ * Add backtrace code from Jim Houston
++ (thanks Jim)
++
++2005-08-31 Jack F. Vogel <jfv@bluesong.net>
++ * Change to linker script for kexec
++ thanks to Steven Dake <sdake@mvista.com>
++
++2005-08-30 Jack F. Vogel <jfv@bluesong.net>
++ * Notify struct should not be devinit
++ thanks IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
++
++2005-08-25 Jack F. Vogel <jfv@bluesong.net>
++ * Update to 2.6.11
++ * Fix to synchronize with the notify changes
++ thanks to Jim Houston.
++
++2004-09-30 Keith Owens <kaos@sgi.com>
++ * Port to 2.6.9-rc2
++ * Fix line editting characters. Jim Houston, Comcast.
++ * kdb v4.4-2.6.9-rc2-x86-64-1.
++
++2004-08-15 Jack F. Vogel <jfv@bluesong.net>
++ * Port to 2.6.8
++ * tighten up the code, using the built-in
++ die_chain notify interface, thanks to
++ Andi Kleen for pointing this out.
++
++2004-05-15 Jack F. Vogel <jfv@bluesong.net>
++ * port to 2.6.6 for x86_64
++
++2003-12-15 Cliff Neighbors <cliff@fabric7.com>
++ * initial port from i386 to x86_64
++
++2002-08-10 Keith Owens <kaos@sgi.com>
++
++ * Replace kdb_port with kdb_serial to support memory mapped I/O.
++ Note: This needs kdb v2.3-2.4.19-common-2 or later.
++ * kdb v2.3-2.4.19-i386-3.
++
++2002-08-09 Keith Owens <kaos@sgi.com>
++
++ * Use -fno-optimize-sibling-calls for kdb if gcc supports it.
++ * .text.lock does not consume an activation frame.
++ * kdb v2.3-2.4.19-i386-2.
++
++2002-08-07 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19.
++ * Remove individual SGI copyrights, the general SGI copyright applies.
++ * New .text.lock name. Hugh Dickins.
++ * Set KERNEL_CS in kdba_getcurrentframe. Hugh Dickins.
++ * Clean up disassembly layout. Hugh Dickins, Keith Owens.
++ * Replace hard coded stack size with THREAD_SIZE. Hugh Dickins.
++ * Better stack layout on bt with no frame pointers. Hugh Dickins.
++ * Make i386 IO breakpoints (bpha <address> IO) work again.
++ Martin Wilck, Keith Owens.
++ * Remove fixed KDB_MAX_COMMANDS size.
++ * Add set_fs() around __copy_to_user on kernel addresses.
++ Randolph Chung.
++ * Position i386 for CONFIG_NUMA_REPLICATE.
++ * kdb v2.3-2.4.19-i386-1.
++
++2002-07-09 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19-rc1.
++
++2002-06-14 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19-pre10.
++ * kdb v2.1-2.4.19-pre10-i386-1.
++
++2002-04-09 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19-pre6.
++ * kdb v2.1-2.4.19-pre6-i386-1.
++
++2002-02-26 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.18.
++ * kdb v2.1-2.4.18-i386-1.
++
++2002-01-18 Keith Owens <kaos@sgi.com>
++
++ * Use new kdb_get/put functions.
++ * Define kdba_{get,put}area_size functions for i386.
++ * Remove over-engineered dblist callback functions.
++ * Correctly handle failing call disp32 in backtrace.
++ * Remove bp_instvalid flag, redundant code.
++ * Remove dead code.
++ * kdb v2.1-2.4.17-i386-1.
++
++2002-01-04 Keith Owens <kaos@sgi.com>
++
++ * Sync xfs <-> kdb i386 code.
++
+diff -Nurp linux-2.6.22-590/arch/x86_64/kdb/kdba_bp.c linux-2.6.22-600/arch/x86_64/kdb/kdba_bp.c
+--- linux-2.6.22-590/arch/x86_64/kdb/kdba_bp.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/x86_64/kdb/kdba_bp.c 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,777 @@
++/*
++ * Kernel Debugger Architecture Dependent Breakpoint Handling
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/smp.h>
++#include <linux/ptrace.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++
++
++static char *kdba_rwtypes[] = { "Instruction(Register)", "Data Write",
++ "I/O", "Data Access"};
++
++/*
++ * Table describing processor architecture hardware
++ * breakpoint registers.
++ */
++
++kdbhard_bp_t kdb_hardbreaks[KDB_MAXHARDBPT];
++
++/*
++ * kdba_db_trap
++ *
++ * Perform breakpoint processing upon entry to the
++ * processor debugger fault. Determine and print
++ * the active breakpoint.
++ *
++ * Parameters:
++ * regs Exception frame containing machine register state
++ * error Error number passed to kdb.
++ * Outputs:
++ * None.
++ * Returns:
++ * KDB_DB_BPT Standard instruction or data breakpoint encountered
++ * KDB_DB_SS Single Step fault ('ss' command or end of 'ssb' command)
++ * KDB_DB_SSB Single Step fault, caller should continue ('ssb' command)
++ * KDB_DB_SSBPT Single step over breakpoint
++ * KDB_DB_NOBPT No existing kdb breakpoint matches this debug exception
++ * Locking:
++ * None.
++ * Remarks:
++ * Yup, there be goto's here.
++ *
++ * If multiple processors receive debug exceptions simultaneously,
++ * one may be waiting at the kdb fence in kdb() while the user
++ * issues a 'bc' command to clear the breakpoint the processor
++ * which is waiting has already encountered. If this is the case,
++ * the debug registers will no longer match any entry in the
++ * breakpoint table, and we'll return the value KDB_DB_NOBPT.
++ * This can cause a panic in die_if_kernel(). It is safer to
++ * disable the breakpoint (bd), go until all processors are past
++ * the breakpoint then clear the breakpoint (bc). This code
++ * recognises a breakpoint even when disabled but not when it has
++ * been cleared.
++ *
++ * WARNING: This routine clears the debug state. It should be called
++ * once per debug and the result cached.
++ */
++
++kdb_dbtrap_t
++kdba_db_trap(struct pt_regs *regs, int error_unused)
++{
++ kdb_machreg_t dr6;
++ kdb_machreg_t dr7;
++ int rw, reg;
++ int i;
++ kdb_dbtrap_t rv = KDB_DB_BPT;
++ kdb_bp_t *bp;
++
++ if (KDB_NULL_REGS(regs))
++ return KDB_DB_NOBPT;
++
++ dr6 = kdba_getdr6();
++ dr7 = kdba_getdr7();
++
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdb: dr6 0x%lx dr7 0x%lx\n", dr6, dr7);
++ if (dr6 & DR6_BS) {
++ if (KDB_STATE(SSBPT)) {
++ if (KDB_DEBUG(BP))
++ kdb_printf("ssbpt\n");
++ KDB_STATE_CLEAR(SSBPT);
++ for(i=0,bp=kdb_breakpoints;
++ i < KDB_MAXBPT;
++ i++, bp++) {
++ if (KDB_DEBUG(BP))
++ kdb_printf("bp 0x%p enabled %d delayed %d global %d cpu %d\n",
++ bp, bp->bp_enabled, bp->bp_delayed, bp->bp_global, bp->bp_cpu);
++ if (!bp->bp_enabled)
++ continue;
++ if (!bp->bp_global && bp->bp_cpu != smp_processor_id())
++ continue;
++ if (KDB_DEBUG(BP))
++ kdb_printf("bp for this cpu\n");
++ if (bp->bp_delayed) {
++ bp->bp_delayed = 0;
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdba_installbp\n");
++ kdba_installbp(regs, bp);
++ if (!KDB_STATE(DOING_SS)) {
++ regs->eflags &= ~EF_TF;
++ return(KDB_DB_SSBPT);
++ }
++ break;
++ }
++ }
++ if (i == KDB_MAXBPT) {
++ kdb_printf("kdb: Unable to find delayed breakpoint\n");
++ }
++ if (!KDB_STATE(DOING_SS)) {
++ regs->eflags &= ~EF_TF;
++ return(KDB_DB_NOBPT);
++ }
++ /* FALLTHROUGH */
++ }
++
++ /*
++ * KDB_STATE_DOING_SS is set when the kernel debugger is using
++ * the processor trap flag to single-step a processor. If a
++ * single step trap occurs and this flag is clear, the SS trap
++ * will be ignored by KDB and the kernel will be allowed to deal
++ * with it as necessary (e.g. for ptrace).
++ */
++ if (!KDB_STATE(DOING_SS))
++ goto unknown;
++
++ /* single step */
++ rv = KDB_DB_SS; /* Indicate single step */
++ if (KDB_STATE(DOING_SSB)) {
++ unsigned char instruction[2];
++
++ kdb_id1(regs->rip);
++ if (kdb_getarea(instruction, regs->rip) ||
++ (instruction[0]&0xf0) == 0xe0 || /* short disp jumps */
++ (instruction[0]&0xf0) == 0x70 || /* Misc. jumps */
++ instruction[0] == 0xc2 || /* ret */
++ instruction[0] == 0x9a || /* call */
++ (instruction[0]&0xf8) == 0xc8 || /* enter, leave, iret, int, */
++ ((instruction[0] == 0x0f) &&
++ ((instruction[1]&0xf0)== 0x80))
++ ) {
++ /*
++ * End the ssb command here.
++ */
++ KDB_STATE_CLEAR(DOING_SSB);
++ KDB_STATE_CLEAR(DOING_SS);
++ } else {
++ rv = KDB_DB_SSB; /* Indicate ssb - dismiss immediately */
++ }
++ } else {
++ /*
++ * Print current insn
++ */
++ kdb_printf("SS trap at ");
++ kdb_symbol_print(regs->rip, NULL, KDB_SP_DEFAULT|KDB_SP_NEWLINE);
++ kdb_id1(regs->rip);
++ KDB_STATE_CLEAR(DOING_SS);
++ }
++
++ if (rv != KDB_DB_SSB)
++ regs->eflags &= ~EF_TF;
++ }
++
++ if (dr6 & DR6_B0) {
++ rw = DR7_RW0(dr7);
++ reg = 0;
++ goto handle;
++ }
++
++ if (dr6 & DR6_B1) {
++ rw = DR7_RW1(dr7);
++ reg = 1;
++ goto handle;
++ }
++
++ if (dr6 & DR6_B2) {
++ rw = DR7_RW2(dr7);
++ reg = 2;
++ goto handle;
++ }
++
++ if (dr6 & DR6_B3) {
++ rw = DR7_RW3(dr7);
++ reg = 3;
++ goto handle;
++ }
++
++ if (rv > 0)
++ goto handled;
++
++ goto unknown; /* dismiss */
++
++handle:
++ /*
++ * Set Resume Flag
++ */
++ regs->eflags |= EF_RF;
++
++ /*
++ * Determine which breakpoint was encountered.
++ */
++ for(i=0, bp=kdb_breakpoints; i<KDB_MAXBPT; i++, bp++) {
++ if (!(bp->bp_free)
++ && (bp->bp_global || bp->bp_cpu == smp_processor_id())
++ && (bp->bp_hard)
++ && (bp->bp_hard->bph_reg == reg)) {
++ /*
++ * Hit this breakpoint.
++ */
++ kdb_printf("%s breakpoint #%d at " kdb_bfd_vma_fmt "\n",
++ kdba_rwtypes[rw],
++ i, bp->bp_addr);
++ /*
++ * For an instruction breakpoint, disassemble
++ * the current instruction.
++ */
++ if (rw == 0) {
++ kdb_id1(regs->rip);
++ }
++
++ goto handled;
++ }
++ }
++
++unknown:
++ regs->eflags |= EF_RF; /* Supress further faults */
++ rv = KDB_DB_NOBPT; /* Cause kdb() to return */
++
++handled:
++
++ /*
++ * Clear the pending exceptions.
++ */
++ kdba_putdr6(0);
++
++ return rv;
++}
++
++/*
++ * kdba_bp_trap
++ *
++ * Perform breakpoint processing upon entry to the
++ * processor breakpoint instruction fault. Determine and print
++ * the active breakpoint.
++ *
++ * Parameters:
++ * regs Exception frame containing machine register state
++ * error Error number passed to kdb.
++ * Outputs:
++ * None.
++ * Returns:
++ * 0 Standard instruction or data breakpoint encountered
++ * 1 Single Step fault ('ss' command)
++ * 2 Single Step fault, caller should continue ('ssb' command)
++ * 3 No existing kdb breakpoint matches this debug exception
++ * Locking:
++ * None.
++ * Remarks:
++ *
++ * If multiple processors receive debug exceptions simultaneously,
++ * one may be waiting at the kdb fence in kdb() while the user
++ * issues a 'bc' command to clear the breakpoint the processor which
++ * is waiting has already encountered. If this is the case, the
++ * debug registers will no longer match any entry in the breakpoint
++ * table, and we'll return the value '3'. This can cause a panic
++ * in die_if_kernel(). It is safer to disable the breakpoint (bd),
++ * 'go' until all processors are past the breakpoint then clear the
++ * breakpoint (bc). This code recognises a breakpoint even when
++ * disabled but not when it has been cleared.
++ *
++ * WARNING: This routine resets the rip. It should be called
++ * once per breakpoint and the result cached.
++ */
++
++kdb_dbtrap_t
++kdba_bp_trap(struct pt_regs *regs, int error_unused)
++{
++ int i;
++ kdb_dbtrap_t rv;
++ kdb_bp_t *bp;
++
++ if (KDB_NULL_REGS(regs))
++ return KDB_DB_NOBPT;
++ /*
++ * Determine which breakpoint was encountered.
++ */
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdba_bp_trap: rip=0x%lx (not adjusted) "
++ "eflags=0x%lx ef=0x%p rsp=0x%lx\n",
++ regs->rip, regs->eflags, regs, regs->rsp);
++
++ rv = KDB_DB_NOBPT; /* Cause kdb() to return */
++
++ for(i=0, bp=kdb_breakpoints; i<KDB_MAXBPT; i++, bp++) {
++ if (bp->bp_free)
++ continue;
++ if (!bp->bp_global && bp->bp_cpu != smp_processor_id())
++ continue;
++ if ((void *)bp->bp_addr == (void *)(regs->rip - bp->bp_adjust)) {
++ /* Hit this breakpoint. */
++ regs->rip -= bp->bp_adjust;
++ kdb_printf("Instruction(i) breakpoint #%d at 0x%lx (adjusted)\n",
++ i, regs->rip);
++ kdb_id1(regs->rip);
++ rv = KDB_DB_BPT;
++ bp->bp_delay = 1;
++ /* SSBPT is set when the kernel debugger must single
++ * step a task in order to re-establish an instruction
++ * breakpoint which uses the instruction replacement
++ * mechanism. It is cleared by any action that removes
++ * the need to single-step the breakpoint.
++ */
++ KDB_STATE_SET(SSBPT);
++ break;
++ }
++ }
++
++ return rv;
++}
++
++/*
++ * kdba_handle_bp
++ *
++ * Handle an instruction-breakpoint trap. Called when re-installing
++ * an enabled breakpoint which has has the bp_delay bit set.
++ *
++ * Parameters:
++ * Returns:
++ * Locking:
++ * Remarks:
++ *
++ * Ok, we really need to:
++ * 1) Restore the original instruction byte
++ * 2) Single Step
++ * 3) Restore breakpoint instruction
++ * 4) Continue.
++ *
++ *
++ */
++
++static void
++kdba_handle_bp(struct pt_regs *regs, kdb_bp_t *bp)
++{
++
++ if (KDB_NULL_REGS(regs))
++ return;
++
++ if (KDB_DEBUG(BP))
++ kdb_printf("regs->rip = 0x%lx\n", regs->rip);
++
++ /*
++ * Setup single step
++ */
++ kdba_setsinglestep(regs);
++
++ /*
++ * Reset delay attribute
++ */
++ bp->bp_delay = 0;
++ bp->bp_delayed = 1;
++}
++
++
++/*
++ * kdba_bptype
++ *
++ * Return a string describing type of breakpoint.
++ *
++ * Parameters:
++ * bph Pointer to hardware breakpoint description
++ * Outputs:
++ * None.
++ * Returns:
++ * Character string.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++char *
++kdba_bptype(kdbhard_bp_t *bph)
++{
++ char *mode;
++
++ mode = kdba_rwtypes[bph->bph_mode];
++
++ return mode;
++}
++
++/*
++ * kdba_printbpreg
++ *
++ * Print register name assigned to breakpoint
++ *
++ * Parameters:
++ * bph Pointer hardware breakpoint structure
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++static void
++kdba_printbpreg(kdbhard_bp_t *bph)
++{
++ kdb_printf(" in dr%ld", bph->bph_reg);
++}
++
++/*
++ * kdba_printbp
++ *
++ * Print string describing hardware breakpoint.
++ *
++ * Parameters:
++ * bph Pointer to hardware breakpoint description
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void
++kdba_printbp(kdb_bp_t *bp)
++{
++ kdb_printf("\n is enabled");
++ if (bp->bp_hardtype) {
++ kdba_printbpreg(bp->bp_hard);
++ if (bp->bp_hard->bph_mode != 0) {
++ kdb_printf(" for %d bytes",
++ bp->bp_hard->bph_length+1);
++ }
++ }
++}
++
++/*
++ * kdba_parsebp
++ *
++ * Parse architecture dependent portion of the
++ * breakpoint command.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic for failure
++ * Locking:
++ * None.
++ * Remarks:
++ * for Ia32 architure, data access, data write and
++ * I/O breakpoints are supported in addition to instruction
++ * breakpoints.
++ *
++ * {datar|dataw|io|inst} [length]
++ */
++
++int
++kdba_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp)
++{
++ int nextarg = *nextargp;
++ int diag;
++ kdbhard_bp_t *bph = &bp->bp_template;
++
++ bph->bph_mode = 0; /* Default to instruction breakpoint */
++ bph->bph_length = 0; /* Length must be zero for insn bp */
++ if ((argc + 1) != nextarg) {
++ if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0) {
++ bph->bph_mode = 3;
++ } else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0) {
++ bph->bph_mode = 1;
++ } else if (strnicmp(argv[nextarg], "io", sizeof("io")) == 0) {
++ bph->bph_mode = 2;
++ } else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0) {
++ bph->bph_mode = 0;
++ } else {
++ return KDB_ARGCOUNT;
++ }
++
++ bph->bph_length = 3; /* Default to 4 byte */
++
++ nextarg++;
++
++ if ((argc + 1) != nextarg) {
++ unsigned long len;
++
++ diag = kdbgetularg((char *)argv[nextarg],
++ &len);
++ if (diag)
++ return diag;
++
++
++ if ((len > 4) || (len == 3))
++ return KDB_BADLENGTH;
++
++ bph->bph_length = len;
++ bph->bph_length--; /* Normalize for debug register */
++ nextarg++;
++ }
++
++ if ((argc + 1) != nextarg)
++ return KDB_ARGCOUNT;
++
++ /*
++ * Indicate to architecture independent level that
++ * a hardware register assignment is required to enable
++ * this breakpoint.
++ */
++
++ bph->bph_free = 0;
++ } else {
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdba_bp: no args, forcehw is %d\n", bp->bp_forcehw);
++ if (bp->bp_forcehw) {
++ /*
++ * We are forced to use a hardware register for this
++ * breakpoint because either the bph or bpha
++ * commands were used to establish this breakpoint.
++ */
++ bph->bph_free = 0;
++ } else {
++ /*
++ * Indicate to architecture dependent level that
++ * the instruction replacement breakpoint technique
++ * should be used for this breakpoint.
++ */
++ bph->bph_free = 1;
++ bp->bp_adjust = 1; /* software, int 3 is one byte */
++ }
++ }
++
++ if (bph->bph_mode != 2 && kdba_verify_rw(bp->bp_addr, bph->bph_length+1)) {
++ kdb_printf("Invalid address for breakpoint, ignoring bp command\n");
++ return KDB_BADADDR;
++ }
++
++ *nextargp = nextarg;
++ return 0;
++}
++
++/*
++ * kdba_allocbp
++ *
++ * Associate a hardware register with a breakpoint.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * A pointer to the allocated register kdbhard_bp_t structure for
++ * success, Null and a non-zero diagnostic for failure.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++kdbhard_bp_t *
++kdba_allocbp(kdbhard_bp_t *bph, int *diagp)
++{
++ int i;
++ kdbhard_bp_t *newbph;
++
++ for(i=0,newbph=kdb_hardbreaks; i < KDB_MAXHARDBPT; i++, newbph++) {
++ if (newbph->bph_free) {
++ break;
++ }
++ }
++
++ if (i == KDB_MAXHARDBPT) {
++ *diagp = KDB_TOOMANYDBREGS;
++ return NULL;
++ }
++
++ *diagp = 0;
++
++ /*
++ * Copy data from template. Can't just copy the entire template
++ * here because the register number in kdb_hardbreaks must be
++ * preserved.
++ */
++ newbph->bph_data = bph->bph_data;
++ newbph->bph_write = bph->bph_write;
++ newbph->bph_mode = bph->bph_mode;
++ newbph->bph_length = bph->bph_length;
++
++ /*
++ * Mark entry allocated.
++ */
++ newbph->bph_free = 0;
++
++ return newbph;
++}
++
++/*
++ * kdba_freebp
++ *
++ * Deallocate a hardware breakpoint
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic for failure
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void
++kdba_freebp(kdbhard_bp_t *bph)
++{
++ bph->bph_free = 1;
++}
++
++/*
++ * kdba_initbp
++ *
++ * Initialize the breakpoint table for the hardware breakpoint
++ * register.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic for failure
++ * Locking:
++ * None.
++ * Remarks:
++ *
++ * There is one entry per register. On the ia32 architecture
++ * all the registers are interchangeable, so no special allocation
++ * criteria are required.
++ */
++
++void
++kdba_initbp(void)
++{
++ int i;
++ kdbhard_bp_t *bph;
++
++ /*
++ * Clear the hardware breakpoint table
++ */
++
++ memset(kdb_hardbreaks, '\0', sizeof(kdb_hardbreaks));
++
++ for(i=0,bph=kdb_hardbreaks; i<KDB_MAXHARDBPT; i++, bph++) {
++ bph->bph_reg = i;
++ bph->bph_free = 1;
++ }
++}
++
++/*
++ * kdba_installbp
++ *
++ * Install a breakpoint
++ *
++ * Parameters:
++ * regs Exception frame
++ * bp Breakpoint structure for the breakpoint to be installed
++ * Outputs:
++ * None.
++ * Returns:
++ * 0 if breakpoint installed.
++ * Locking:
++ * None.
++ * Remarks:
++ * For hardware breakpoints, a debug register is allocated
++ * and assigned to the breakpoint. If no debug register is
++ * available, a warning message is printed and the breakpoint
++ * is disabled.
++ *
++ * For instruction replacement breakpoints, we must single-step
++ * over the replaced instruction at this point so we can re-install
++ * the breakpoint instruction after the single-step.
++ */
++
++int
++kdba_installbp(struct pt_regs *regs, kdb_bp_t *bp)
++{
++ /*
++ * Install the breakpoint, if it is not already installed.
++ */
++
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdba_installbp bp_installed %d\n", bp->bp_installed);
++ }
++ if (!KDB_STATE(SSBPT))
++ bp->bp_delay = 0;
++ if (!bp->bp_installed) {
++ if (bp->bp_hardtype) {
++ kdba_installdbreg(bp);
++ bp->bp_installed = 1;
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdba_installbp hardware reg %ld at " kdb_bfd_vma_fmt "\n",
++ bp->bp_hard->bph_reg, bp->bp_addr);
++ }
++ } else if (bp->bp_delay) {
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdba_installbp delayed bp\n");
++ kdba_handle_bp(regs, bp);
++ } else {
++ if (kdb_getarea_size(&(bp->bp_inst), bp->bp_addr, 1) ||
++ kdb_putword(bp->bp_addr, IA32_BREAKPOINT_INSTRUCTION, 1)) {
++ kdb_printf("kdba_installbp failed to set software breakpoint at " kdb_bfd_vma_fmt "\n", bp->bp_addr);
++ return(1);
++ }
++ bp->bp_installed = 1;
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdba_installbp instruction 0x%x at " kdb_bfd_vma_fmt "\n",
++ IA32_BREAKPOINT_INSTRUCTION, bp->bp_addr);
++ }
++ }
++ return(0);
++}
++
++/*
++ * kdba_removebp
++ *
++ * Make a breakpoint ineffective.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++int
++kdba_removebp(kdb_bp_t *bp)
++{
++ /*
++ * For hardware breakpoints, remove it from the active register,
++ * for software breakpoints, restore the instruction stream.
++ */
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdba_removebp bp_installed %d\n", bp->bp_installed);
++ }
++ if (bp->bp_installed) {
++ if (bp->bp_hardtype) {
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdb: removing hardware reg %ld at " kdb_bfd_vma_fmt "\n",
++ bp->bp_hard->bph_reg, bp->bp_addr);
++ }
++ kdba_removedbreg(bp);
++ } else {
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdb: restoring instruction 0x%x at " kdb_bfd_vma_fmt "\n",
++ bp->bp_inst, bp->bp_addr);
++ if (kdb_putword(bp->bp_addr, bp->bp_inst, 1))
++ return(1);
++ }
++ bp->bp_installed = 0;
++ }
++ return(0);
++}
+diff -Nurp linux-2.6.22-590/arch/x86_64/kdb/kdba_bt.c linux-2.6.22-600/arch/x86_64/kdb/kdba_bt.c
+--- linux-2.6.22-590/arch/x86_64/kdb/kdba_bt.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/x86_64/kdb/kdba_bt.c 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,535 @@
++/*
++ * Kernel Debugger Architecture Dependent Stack Traceback
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/kallsyms.h>
++#include <linux/irq.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <asm/system.h>
++
++/* x86_64 has multiple alternate stacks, with different sizes and different
++ * offsets to get the link from one stack to the next. Some of the stacks are
++ * referenced via cpu_pda, some via per_cpu orig_ist. Debug events can even
++ * have multiple nested stacks within the single physical stack, each nested
++ * stack has its own link and some of those links are wrong.
++ *
++ * Consistent it's not!
++ *
++ * Do not assume that these stacks are aligned on their size.
++ */
++#define INTERRUPT_STACK (N_EXCEPTION_STACKS + 1)
++void
++kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
++ struct kdb_activation_record *ar)
++{
++ static struct {
++ const char *id;
++ unsigned int total_size;
++ unsigned int nested_size;
++ unsigned int next;
++ } *sdp, stack_data[] = {
++ [STACKFAULT_STACK - 1] = { "stackfault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
++ [DOUBLEFAULT_STACK - 1] = { "doublefault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
++ [NMI_STACK - 1] = { "nmi", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
++ [DEBUG_STACK - 1] = { "debug", DEBUG_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
++ [MCE_STACK - 1] = { "machine check", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
++ [INTERRUPT_STACK - 1] = { "interrupt", IRQSTACKSIZE, IRQSTACKSIZE, IRQSTACKSIZE - sizeof(void *) },
++ };
++ unsigned long total_start = 0, total_size, total_end;
++ int sd, found = 0;
++
++ for (sd = 0, sdp = stack_data;
++ sd < ARRAY_SIZE(stack_data);
++ ++sd, ++sdp) {
++ total_size = sdp->total_size;
++ if (!total_size)
++ continue; /* in case stack_data[] has any holes */
++ if (cpu < 0) {
++ /* Arbitrary address which can be on any cpu, see if it
++ * falls within any of the alternate stacks
++ */
++ int c;
++ for_each_online_cpu(c) {
++ if (sd == INTERRUPT_STACK - 1)
++ total_end = (unsigned long)cpu_pda(c)->irqstackptr;
++ else
++ total_end = per_cpu(orig_ist, c).ist[sd];
++ total_start = total_end - total_size;
++ if (addr >= total_start && addr < total_end) {
++ found = 1;
++ cpu = c;
++ break;
++ }
++ }
++ if (!found)
++ continue;
++ }
++ /* Only check the supplied or found cpu */
++ if (sd == INTERRUPT_STACK - 1)
++ total_end = (unsigned long)cpu_pda(cpu)->irqstackptr;
++ else
++ total_end = per_cpu(orig_ist, cpu).ist[sd];
++ total_start = total_end - total_size;
++ if (addr >= total_start && addr < total_end) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found)
++ return;
++ /* find which nested stack the address is in */
++ while (addr > total_start + sdp->nested_size)
++ total_start += sdp->nested_size;
++ ar->stack.physical_start = total_start;
++ ar->stack.physical_end = total_start + sdp->nested_size;
++ ar->stack.logical_start = total_start;
++ ar->stack.logical_end = total_start + sdp->next;
++ ar->stack.next = *(unsigned long *)ar->stack.logical_end;
++ ar->stack.id = sdp->id;
++
++ /* Nasty: common_interrupt builds a partial pt_regs, with r15 through
++ * rbx not being filled in. It passes struct pt_regs* to do_IRQ (in
++ * rdi) but the stack pointer is not adjusted to account for r15
++ * through rbx. This has two effects :-
++ *
++ * (1) struct pt_regs on an external interrupt actually overlaps with
++ * the local stack area used by do_IRQ. Not only are r15-rbx
++ * undefined, the area that claims to hold their values can even
++ * change as the irq is processed..
++ *
++ * (2) The back stack pointer saved for the new frame is not pointing
++ * at pt_regs, it is pointing at rbx within the pt_regs passed to
++ * do_IRQ.
++ *
++ * There is nothing that I can do about (1) but I have to fix (2)
++ * because kdb backtrace looks for pt_regs.
++ */
++
++ if (sd == INTERRUPT_STACK - 1)
++ ar->stack.next -= offsetof(struct pt_regs, rbx);
++}
++
++/* Given an address which claims to be on a stack, an optional cpu number and
++ * an optional task address, get information about the stack.
++ *
++ * t == NULL, cpu < 0 indicates an arbitrary stack address with no associated
++ * struct task, the address can be in an alternate stack or any task's normal
++ * stack.
++ *
++ * t != NULL, cpu >= 0 indicates a running task, the address can be in an
++ * alternate stack or that task's normal stack.
++ *
++ * t != NULL, cpu < 0 indicates a blocked task, the address can only be in that
++ * task's normal stack.
++ *
++ * t == NULL, cpu >= 0 is not a valid combination.
++ */
++
++static void
++kdba_get_stack_info(kdb_machreg_t rsp, int cpu,
++ struct kdb_activation_record *ar,
++ const struct task_struct *t)
++{
++ struct thread_info *tinfo;
++ struct task_struct *g, *p;
++ memset(&ar->stack, 0, sizeof(ar->stack));
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: rsp=0x%lx cpu=%d task=%p\n",
++ __FUNCTION__, rsp, cpu, t);
++ if (t == NULL || cpu >= 0) {
++ kdba_get_stack_info_alternate(rsp, cpu, ar);
++ if (ar->stack.logical_start)
++ goto out;
++ }
++ rsp &= -THREAD_SIZE;
++ tinfo = (struct thread_info *)rsp;
++ if (t == NULL) {
++ /* Arbitrary stack address without an associated task, see if
++ * it falls within any normal process stack, including the idle
++ * tasks.
++ */
++ kdb_do_each_thread(g, p) {
++ if (tinfo == task_thread_info(p)) {
++ t = p;
++ goto found;
++ }
++ } kdb_while_each_thread(g, p);
++ for_each_online_cpu(cpu) {
++ p = idle_task(cpu);
++ if (tinfo == task_thread_info(p)) {
++ t = p;
++ goto found;
++ }
++ }
++ found:
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: found task %p\n", __FUNCTION__, t);
++ } else if (cpu >= 0) {
++ /* running task */
++ struct kdb_running_process *krp = kdb_running_process + cpu;
++ if (krp->p != t || tinfo != task_thread_info(t))
++ t = NULL;
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: running task %p\n", __FUNCTION__, t);
++ } else {
++ /* blocked task */
++ if (tinfo != task_thread_info(t))
++ t = NULL;
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: blocked task %p\n", __FUNCTION__, t);
++ }
++ if (t) {
++ ar->stack.physical_start = rsp;
++ ar->stack.physical_end = rsp + THREAD_SIZE;
++ ar->stack.logical_start = rsp + sizeof(struct thread_info);
++ ar->stack.logical_end = ar->stack.physical_end;
++ ar->stack.next = 0;
++ ar->stack.id = "normal";
++ }
++out:
++ if (ar->stack.physical_start && KDB_DEBUG(ARA)) {
++ kdb_printf("%s: ar->stack\n", __FUNCTION__);
++ kdb_printf(" physical_start=0x%lx\n", ar->stack.physical_start);
++ kdb_printf(" physical_end=0x%lx\n", ar->stack.physical_end);
++ kdb_printf(" logical_start=0x%lx\n", ar->stack.logical_start);
++ kdb_printf(" logical_end=0x%lx\n", ar->stack.logical_end);
++ kdb_printf(" next=0x%lx\n", ar->stack.next);
++ kdb_printf(" id=%s\n", ar->stack.id);
++ }
++}
++
++/*
++ * bt_print_one
++ *
++ * Print one back trace entry.
++ *
++ * Inputs:
++ * rip Current program counter, or return address.
++ * rsp Stack pointer rsp when at rip.
++ * ar Activation record for this frame.
++ * symtab Information about symbol that rip falls within.
++ * argcount Maximum number of arguments to print.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ * None.
++ */
++
++static void
++bt_print_one(kdb_machreg_t rip, kdb_machreg_t rsp,
++ const struct kdb_activation_record *ar,
++ const kdb_symtab_t *symtab, int argcount)
++{
++ int btsymarg = 0;
++ int nosect = 0;
++ kdb_machreg_t word;
++
++ kdbgetintenv("BTSYMARG", &btsymarg);
++ kdbgetintenv("NOSECT", &nosect);
++
++ kdb_printf(kdb_machreg_fmt0, rsp);
++ kdb_symbol_print(rip, symtab, KDB_SP_SPACEB|KDB_SP_VALUE);
++ if (argcount && ar->args) {
++ int i, argc = ar->args;
++ kdb_printf(" (");
++ if (argc > argcount)
++ argc = argcount;
++ for (i = 0; i < argc; i++) {
++ kdb_machreg_t argp = ar->arg[i];
++ if (i)
++ kdb_printf(", ");
++ kdb_getword(&word, argp, sizeof(word));
++ kdb_printf("0x%lx", word);
++ }
++ kdb_printf(")");
++ }
++ if (symtab->sym_name) {
++ if (!nosect) {
++ kdb_printf("\n");
++ kdb_printf(" %s",
++ symtab->mod_name);
++ if (symtab->sec_name && symtab->sec_start)
++ kdb_printf(" 0x%lx 0x%lx",
++ symtab->sec_start, symtab->sec_end);
++ kdb_printf(" 0x%lx 0x%lx",
++ symtab->sym_start, symtab->sym_end);
++ }
++ }
++ kdb_printf("\n");
++ if (argcount && ar->args && btsymarg) {
++ int i, argc = ar->args;
++ kdb_symtab_t arg_symtab;
++ for (i = 0; i < argc; i++) {
++ kdb_machreg_t argp = ar->arg[i];
++ kdb_getword(&word, argp, sizeof(word));
++ if (kdbnearsym(word, &arg_symtab)) {
++ kdb_printf(" ");
++ kdb_symbol_print(word, &arg_symtab,
++ KDB_SP_DEFAULT|KDB_SP_NEWLINE);
++ }
++ }
++ }
++}
++
++/* Getting the starting point for a backtrace on a running process is
++ * moderately tricky. kdba_save_running() saved the rsp in krp->arch.rsp, but
++ * that rsp is not 100% accurate, it can be offset by a frame pointer or by the
++ * size of local variables in kdba_main_loop() and kdb_save_running().
++ *
++ * The calling sequence is kdb() -> kdba_main_loop() -> kdb_save_running() ->
++ * kdba_save_running(). Walk up the stack until we find a return address
++ * inside the main kdb() function and start the backtrace from there.
++ */
++
++static int
++kdba_bt_stack_running(const struct task_struct *p,
++ const struct kdb_activation_record *ar,
++ kdb_machreg_t *rip, kdb_machreg_t *rsp,
++ kdb_machreg_t *rbp)
++{
++ kdb_machreg_t addr, sp;
++ kdb_symtab_t symtab;
++ struct kdb_running_process *krp = kdb_running_process + task_cpu(p);
++ int found = 0;
++
++ if (kdbgetsymval("kdb", &symtab) == 0)
++ return 0;
++ if (kdbnearsym(symtab.sym_start, &symtab) == 0)
++ return 0;
++ sp = krp->arch.rsp;
++ if (sp < ar->stack.logical_start || sp >= ar->stack.logical_end)
++ return 0;
++ while (sp < ar->stack.logical_end) {
++ addr = *(kdb_machreg_t *)sp;
++ if (addr >= symtab.sym_start && addr < symtab.sym_end) {
++ found = 1;
++ break;
++ }
++ sp += sizeof(kdb_machreg_t);
++ }
++ if (!found)
++ return 0;
++ *rbp = *rsp = sp;
++ *rip = addr;
++ return 1;
++}
++
++/*
++ * kdba_bt_stack
++ *
++ * Inputs:
++ * addr Pointer to Address provided to 'bt' command, if any.
++ * argcount
++ * p Pointer to task for 'btp' command.
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * mds comes in handy when examining the stack to do a manual
++ * traceback.
++ */
++
++static int
++kdba_bt_stack(kdb_machreg_t addr, int argcount, const struct task_struct *p)
++{
++ struct kdb_activation_record ar;
++ kdb_machreg_t rip, rsp, rbp, cs;
++ kdb_symtab_t symtab;
++ int first_time = 1, count = 0, btsp = 0, suppress;
++ struct pt_regs *regs = NULL;
++
++ kdbgetintenv("BTSP", &btsp);
++ suppress = !btsp;
++ memset(&ar, 0, sizeof(ar));
++
++ /*
++ * The caller may have supplied an address at which the
++ * stack traceback operation should begin. This address
++ * is assumed by this code to point to a return-address
++ * on the stack to be traced back.
++ *
++ * The end result of this will make it appear as if a function
++ * entitled '<unknown>' was called from the function which
++ * contains return-address.
++ */
++ if (addr) {
++ rip = 0;
++ rbp = 0;
++ rsp = addr;
++ cs = __KERNEL_CS; /* have to assume kernel space */
++ suppress = 0;
++ kdba_get_stack_info(rsp, -1, &ar, NULL);
++ } else {
++ if (task_curr(p)) {
++ struct kdb_running_process *krp =
++ kdb_running_process + task_cpu(p);
++
++ if (krp->seqno && krp->p == p
++ && krp->seqno >= kdb_seqno - 1) {
++ /* valid saved state, continue processing */
++ } else {
++ kdb_printf
++ ("Process did not save state, cannot backtrace\n");
++ kdb_ps1(p);
++ return 0;
++ }
++ regs = krp->regs;
++ if (KDB_NULL_REGS(regs))
++ return KDB_BADREG;
++ kdba_getregcontents("cs", regs, &cs);
++ if ((cs & 0xffff) != __KERNEL_CS) {
++ kdb_printf("Stack is not in kernel space, backtrace not available\n");
++ return 0;
++ }
++ kdba_getregcontents("rip", regs, &rip);
++ kdba_getregcontents("rbp", regs, &rbp);
++ rsp = krp->arch.rsp;
++ kdba_get_stack_info(rsp, kdb_process_cpu(p), &ar, p);
++ if (kdba_bt_stack_running(p, &ar, &rip, &rsp, &rbp) == 0) {
++ kdb_printf("%s: cannot adjust rsp=0x%lx for a running task\n",
++ __FUNCTION__, rsp);
++ }
++ } else {
++ /* Not on cpu, assume blocked. Blocked tasks do
++ * not have pt_regs. p->thread.rsp is set, rsp
++ * points to the rbp value, assume kernel space.
++ *
++ * The rip is no longer in the thread struct. We know
++ * that the stack value was saved in schedule near the
++ * label thread_return. Setting rip to thread_return-1
++ * lets the stack trace find that we are in schedule
++ * and correctly decode its prologue. We extract the
++ * saved rbp and adjust the stack to undo the effects
++ * of the inline assembly code which switches the
++ * stack.
++ */
++ extern void thread_return(void);
++ rip = (kdb_machreg_t)&thread_return-1;
++ rsp = p->thread.rsp;
++ rbp = *(unsigned long *)rsp;
++ rsp += 16;
++ cs = __KERNEL_CS;
++ suppress = 0;
++ kdba_get_stack_info(rsp, -1, &ar, p);
++ }
++ }
++ if (!ar.stack.physical_start) {
++ kdb_printf("rsp=0x%lx is not in a valid kernel stack, backtrace not available\n",
++ rsp);
++ return 0;
++ }
++
++ kdb_printf("rsp rip Function (args)\n");
++ if (ar.stack.next && !suppress)
++ kdb_printf(" ======================= <%s>\n",
++ ar.stack.id);
++
++ /* Run through all the stacks */
++ while (ar.stack.physical_start) {
++ if (!first_time)
++ rip = *(kdb_machreg_t *)rsp;
++ first_time = 0;
++ if (!suppress && __kernel_text_address(rip)) {
++ kdbnearsym(rip, &symtab);
++ bt_print_one(rip, rsp, &ar, &symtab, argcount);
++ ++count;
++ }
++ if ((struct pt_regs *)rsp == regs) {
++ if (ar.stack.next && suppress)
++ kdb_printf(" ======================= <%s>\n",
++ ar.stack.id);
++ ++count;
++ suppress = 0;
++ }
++ rsp += sizeof(rip);
++ if (count > 200)
++ break;
++ if (rsp < ar.stack.logical_end)
++ continue;
++ if (!ar.stack.next)
++ break;
++ rsp = ar.stack.next;
++ if (KDB_DEBUG(ARA))
++ kdb_printf("new rsp=0x%lx\n", rsp);
++ kdba_get_stack_info(rsp, -1, &ar, NULL);
++ if (!ar.stack.physical_start) {
++ kdb_printf("+++ Cannot resolve next stack\n");
++ } else if (!suppress) {
++ kdb_printf(" ======================= <%s>\n",
++ ar.stack.id);
++ ++count;
++ }
++ }
++
++ if (count > 200)
++ kdb_printf("bt truncated, count limit reached\n");
++ else if (suppress)
++ kdb_printf
++ ("bt did not find pt_regs - no trace produced. Suggest 'set BTSP 1'\n");
++
++ return 0;
++}
++
++/*
++ * kdba_bt_address
++ *
++ * Do a backtrace starting at a specified stack address. Use this if the
++ * heuristics get the stack decode wrong.
++ *
++ * Inputs:
++ * addr Address provided to 'bt' command.
++ * argcount
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * mds %rsp comes in handy when examining the stack to do a manual
++ * traceback.
++ */
++
++int kdba_bt_address(kdb_machreg_t addr, int argcount)
++{
++ return kdba_bt_stack(addr, argcount, NULL);
++}
++
++/*
++ * kdba_bt_process
++ *
++ * Do a backtrace for a specified process.
++ *
++ * Inputs:
++ * p Struct task pointer extracted by 'bt' command.
++ * argcount
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ */
++
++int kdba_bt_process(const struct task_struct *p, int argcount)
++{
++ return kdba_bt_stack(0, argcount, p);
++}
+diff -Nurp linux-2.6.22-590/arch/x86_64/kdb/kdba_id.c linux-2.6.22-600/arch/x86_64/kdb/kdba_id.c
+--- linux-2.6.22-590/arch/x86_64/kdb/kdba_id.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/x86_64/kdb/kdba_id.c 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,256 @@
++/*
++ * Kernel Debugger Architecture Dependent Instruction Disassembly
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <stdarg.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++
++/*
++ * kdba_dis_getsym
++ *
++ * Get a symbol for the disassembler.
++ *
++ * Parameters:
++ * addr Address for which to get symbol
++ * dip Pointer to disassemble_info
++ * Returns:
++ * 0
++ * Locking:
++ * Remarks:
++ * Not used for kdb.
++ */
++
++/* ARGSUSED */
++static int
++kdba_dis_getsym(bfd_vma addr, disassemble_info *dip)
++{
++
++ return 0;
++}
++
++/*
++ * kdba_printaddress
++ *
++ * Print (symbolically) an address.
++ *
++ * Parameters:
++ * addr Address for which to get symbol
++ * dip Pointer to disassemble_info
++ * flag True if a ":<tab>" sequence should follow the address
++ * Returns:
++ * 0
++ * Locking:
++ * Remarks:
++ *
++ */
++
++/* ARGSUSED */
++static void
++kdba_printaddress(kdb_machreg_t addr, disassemble_info *dip, int flag)
++{
++ kdb_symtab_t symtab;
++ int spaces = 5;
++ unsigned int offset;
++
++ /*
++ * Print a symbol name or address as necessary.
++ */
++ kdbnearsym(addr, &symtab);
++ if (symtab.sym_name) {
++ /* Do not use kdb_symbol_print here, it always does
++ * kdb_printf but we want dip->fprintf_func.
++ */
++ dip->fprintf_func(dip->stream,
++ "0x%0*lx %s",
++ (int)(2*sizeof(addr)), addr, symtab.sym_name);
++ if ((offset = addr - symtab.sym_start) == 0) {
++ spaces += 4;
++ }
++ else {
++ unsigned int o = offset;
++ while (o >>= 4)
++ --spaces;
++ dip->fprintf_func(dip->stream, "+0x%x", offset);
++ }
++
++ } else {
++ dip->fprintf_func(dip->stream, "0x%lx", addr);
++ }
++
++ if (flag) {
++ if (spaces < 1) {
++ spaces = 1;
++ }
++ dip->fprintf_func(dip->stream, ":%*s", spaces, " ");
++ }
++}
++
++/*
++ * kdba_dis_printaddr
++ *
++ * Print (symbolically) an address. Called by GNU disassembly
++ * code via disassemble_info structure.
++ *
++ * Parameters:
++ * addr Address for which to get symbol
++ * dip Pointer to disassemble_info
++ * Returns:
++ * 0
++ * Locking:
++ * Remarks:
++ * This function will never append ":<tab>" to the printed
++ * symbolic address.
++ */
++
++static void
++kdba_dis_printaddr(bfd_vma addr, disassemble_info *dip)
++{
++ kdba_printaddress(addr, dip, 0);
++}
++
++/*
++ * kdba_dis_getmem
++ *
++ * Fetch 'length' bytes from 'addr' into 'buf'.
++ *
++ * Parameters:
++ * addr Address for which to get symbol
++ * buf Address of buffer to fill with bytes from 'addr'
++ * length Number of bytes to fetch
++ * dip Pointer to disassemble_info
++ * Returns:
++ * 0 if data is available, otherwise error.
++ * Locking:
++ * Remarks:
++ *
++ */
++
++/* ARGSUSED */
++static int
++kdba_dis_getmem(bfd_vma addr, bfd_byte *buf, unsigned int length, disassemble_info *dip)
++{
++ return kdb_getarea_size(buf, addr, length);
++}
++
++/*
++ * kdba_id_parsemode
++ *
++ * Parse IDMODE environment variable string and
++ * set appropriate value into "disassemble_info" structure.
++ *
++ * Parameters:
++ * mode Mode string
++ * dip Disassemble_info structure pointer
++ * Returns:
++ * Locking:
++ * Remarks:
++ * We handle the values 'x86' and '8086' to enable either
++ * 32-bit instruction set or 16-bit legacy instruction set.
++ */
++
++int
++kdba_id_parsemode(const char *mode, disassemble_info *dip)
++{
++ if (mode) {
++ if (strcmp(mode, "x86_64") == 0) {
++ dip->mach = bfd_mach_x86_64;
++ } else if (strcmp(mode, "x86") == 0) {
++ dip->mach = bfd_mach_i386_i386;
++ } else if (strcmp(mode, "8086") == 0) {
++ dip->mach = bfd_mach_i386_i8086;
++ } else {
++ return KDB_BADMODE;
++ }
++ }
++
++ return 0;
++}
++
++/*
++ * kdba_check_pc
++ *
++ * Check that the pc is satisfactory.
++ *
++ * Parameters:
++ * pc Program Counter Value.
++ * Returns:
++ * None
++ * Locking:
++ * None.
++ * Remarks:
++ * Can change pc.
++ */
++
++void
++kdba_check_pc(kdb_machreg_t *pc)
++{
++ /* No action */
++}
++
++/*
++ * kdba_id_printinsn
++ *
++ * Format and print a single instruction at 'pc'. Return the
++ * length of the instruction.
++ *
++ * Parameters:
++ * pc Program Counter Value.
++ * dip Disassemble_info structure pointer
++ * Returns:
++ * Length of instruction, -1 for error.
++ * Locking:
++ * None.
++ * Remarks:
++ * Depends on 'IDMODE' environment variable.
++ */
++
++int
++kdba_id_printinsn(kdb_machreg_t pc, disassemble_info *dip)
++{
++ kdba_printaddress(pc, dip, 1);
++ return print_insn_i386_att(pc, dip);
++}
++
++/*
++ * kdba_id_init
++ *
++ * Initialize the architecture dependent elements of
++ * the disassembly information structure
++ * for the GNU disassembler.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void
++kdba_id_init(disassemble_info *dip)
++{
++ dip->read_memory_func = kdba_dis_getmem;
++ dip->print_address_func = kdba_dis_printaddr;
++ dip->symbol_at_address_func = kdba_dis_getsym;
++
++ dip->flavour = bfd_target_elf_flavour;
++ dip->arch = bfd_arch_i386;
++ dip->mach = bfd_mach_x86_64;
++ dip->endian = BFD_ENDIAN_LITTLE;
++
++ dip->display_endian = BFD_ENDIAN_LITTLE;
++}
+diff -Nurp linux-2.6.22-590/arch/x86_64/kdb/kdba_io.c linux-2.6.22-600/arch/x86_64/kdb/kdba_io.c
+--- linux-2.6.22-590/arch/x86_64/kdb/kdba_io.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/x86_64/kdb/kdba_io.c 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,503 @@
++/*
++ * Kernel Debugger Architecture Dependent Console I/O handler
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <asm/io.h>
++#include <linux/delay.h>
++#include <linux/console.h>
++#include <linux/ctype.h>
++#include <linux/keyboard.h>
++#include <linux/serial.h>
++#include <linux/serial_reg.h>
++
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <pc_keyb.h>
++
++#ifdef CONFIG_VT_CONSOLE
++#define KDB_BLINK_LED 1
++#else
++#undef KDB_BLINK_LED
++#endif
++
++#ifdef CONFIG_KDB_USB
++struct kdb_usb_exchange kdb_usb_infos;
++
++EXPORT_SYMBOL(kdb_usb_infos);
++
++static unsigned char kdb_usb_keycode[256] = {
++ 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38,
++ 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3,
++ 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26,
++ 27, 43, 84, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64,
++ 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106,
++ 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71,
++ 72, 73, 82, 83, 86,127,116,117, 85, 89, 90, 91, 92, 93, 94, 95,
++ 120,121,122,123,134,138,130,132,128,129,131,137,133,135,136,113,
++ 115,114, 0, 0, 0,124, 0,181,182,183,184,185,186,187,188,189,
++ 190,191,192,193,194,195,196,197,198, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113,
++ 150,158,159,128,136,177,178,176,142,152,173,140
++};
++
++/* get_usb_char
++ * This function drives the UHCI controller,
++ * fetch the USB scancode and decode it
++ */
++static int get_usb_char(void)
++{
++ static int usb_lock;
++ unsigned char keycode, spec;
++ extern u_short plain_map[], shift_map[], ctrl_map[];
++
++ /* Is USB initialized ? */
++ if(!kdb_usb_infos.poll_func || !kdb_usb_infos.urb)
++ return -1;
++
++ /* Transfer char if they are present */
++ (*kdb_usb_infos.poll_func)(kdb_usb_infos.uhci, (struct urb *)kdb_usb_infos.urb);
++
++ spec = kdb_usb_infos.buffer[0];
++ keycode = kdb_usb_infos.buffer[2];
++ kdb_usb_infos.buffer[0] = (char)0;
++ kdb_usb_infos.buffer[2] = (char)0;
++
++ if(kdb_usb_infos.buffer[3])
++ return -1;
++
++ /* A normal key is pressed, decode it */
++ if(keycode)
++ keycode = kdb_usb_keycode[keycode];
++
++ /* 2 Keys pressed at one time ? */
++ if (spec && keycode) {
++ switch(spec)
++ {
++ case 0x2:
++ case 0x20: /* Shift */
++ return shift_map[keycode];
++ case 0x1:
++ case 0x10: /* Ctrl */
++ return ctrl_map[keycode];
++ case 0x4:
++ case 0x40: /* Alt */
++ break;
++ }
++ }
++ else {
++ if(keycode) { /* If only one key pressed */
++ switch(keycode)
++ {
++ case 0x1C: /* Enter */
++ return 13;
++
++ case 0x3A: /* Capslock */
++ usb_lock ? (usb_lock = 0) : (usb_lock = 1);
++ break;
++ case 0x0E: /* Backspace */
++ return 8;
++ case 0x0F: /* TAB */
++ return 9;
++ case 0x77: /* Pause */
++ break ;
++ default:
++ if(!usb_lock) {
++ return plain_map[keycode];
++ }
++ else {
++ return shift_map[keycode];
++ }
++ }
++ }
++ }
++ return -1;
++}
++#endif /* CONFIG_KDB_USB */
++
++/*
++ * This module contains code to read characters from the keyboard or a serial
++ * port.
++ *
++ * It is used by the kernel debugger, and is polled, not interrupt driven.
++ *
++ */
++
++#ifdef KDB_BLINK_LED
++/*
++ * send: Send a byte to the keyboard controller. Used primarily to
++ * alter LED settings.
++ */
++
++static void
++kdb_kbdsend(unsigned char byte)
++{
++ int timeout;
++ for (timeout = 200 * 1000; timeout && (inb(KBD_STATUS_REG) & KBD_STAT_IBF); timeout--);
++ outb(byte, KBD_DATA_REG);
++ udelay(40);
++ for (timeout = 200 * 1000; timeout && (~inb(KBD_STATUS_REG) & KBD_STAT_OBF); timeout--);
++ inb(KBD_DATA_REG);
++ udelay(40);
++}
++
++static void
++kdb_toggleled(int led)
++{
++ static int leds;
++
++ leds ^= led;
++
++ kdb_kbdsend(KBD_CMD_SET_LEDS);
++ kdb_kbdsend((unsigned char)leds);
++}
++#endif /* KDB_BLINK_LED */
++
++#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_CORE_CONSOLE)
++#define CONFIG_SERIAL_CONSOLE
++#endif
++
++#if defined(CONFIG_SERIAL_CONSOLE)
++
++struct kdb_serial kdb_serial;
++
++static unsigned int
++serial_inp(struct kdb_serial *kdb_serial, unsigned long offset)
++{
++ offset <<= kdb_serial->ioreg_shift;
++
++ switch (kdb_serial->io_type) {
++ case SERIAL_IO_MEM:
++ return readb((void __iomem *)(kdb_serial->iobase + offset));
++ break;
++ default:
++ return inb(kdb_serial->iobase + offset);
++ break;
++ }
++}
++
++/* Check if there is a byte ready at the serial port */
++static int get_serial_char(void)
++{
++ unsigned char ch;
++
++ if (kdb_serial.iobase == 0)
++ return -1;
++
++ if (serial_inp(&kdb_serial, UART_LSR) & UART_LSR_DR) {
++ ch = serial_inp(&kdb_serial, UART_RX);
++ if (ch == 0x7f)
++ ch = 8;
++ return ch;
++ }
++ return -1;
++}
++#endif /* CONFIG_SERIAL_CONSOLE */
++
++#ifdef CONFIG_VT_CONSOLE
++
++static int kbd_exists;
++
++/*
++ * Check if the keyboard controller has a keypress for us.
++ * Some parts (Enter Release, LED change) are still blocking polled here,
++ * but hopefully they are all short.
++ */
++static int get_kbd_char(void)
++{
++ int scancode, scanstatus;
++ static int shift_lock; /* CAPS LOCK state (0-off, 1-on) */
++ static int shift_key; /* Shift next keypress */
++ static int ctrl_key;
++ u_short keychar;
++ extern u_short plain_map[], shift_map[], ctrl_map[];
++
++ if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) ||
++ (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) {
++ kbd_exists = 0;
++ return -1;
++ }
++ kbd_exists = 1;
++
++ if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
++ return -1;
++
++ /*
++ * Fetch the scancode
++ */
++ scancode = inb(KBD_DATA_REG);
++ scanstatus = inb(KBD_STATUS_REG);
++
++ /*
++ * Ignore mouse events.
++ */
++ if (scanstatus & KBD_STAT_MOUSE_OBF)
++ return -1;
++
++ /*
++ * Ignore release, trigger on make
++ * (except for shift keys, where we want to
++ * keep the shift state so long as the key is
++ * held down).
++ */
++
++ if (((scancode&0x7f) == 0x2a) || ((scancode&0x7f) == 0x36)) {
++ /*
++ * Next key may use shift table
++ */
++ if ((scancode & 0x80) == 0) {
++ shift_key=1;
++ } else {
++ shift_key=0;
++ }
++ return -1;
++ }
++
++ if ((scancode&0x7f) == 0x1d) {
++ /*
++ * Left ctrl key
++ */
++ if ((scancode & 0x80) == 0) {
++ ctrl_key = 1;
++ } else {
++ ctrl_key = 0;
++ }
++ return -1;
++ }
++
++ if ((scancode & 0x80) != 0)
++ return -1;
++
++ scancode &= 0x7f;
++
++ /*
++ * Translate scancode
++ */
++
++ if (scancode == 0x3a) {
++ /*
++ * Toggle caps lock
++ */
++ shift_lock ^= 1;
++
++#ifdef KDB_BLINK_LED
++ kdb_toggleled(0x4);
++#endif
++ return -1;
++ }
++
++ if (scancode == 0x0e) {
++ /*
++ * Backspace
++ */
++ return 8;
++ }
++
++ /* Special Key */
++ switch (scancode) {
++ case 0xF: /* Tab */
++ return 9;
++ case 0x53: /* Del */
++ return 4;
++ case 0x47: /* Home */
++ return 1;
++ case 0x4F: /* End */
++ return 5;
++ case 0x4B: /* Left */
++ return 2;
++ case 0x48: /* Up */
++ return 16;
++ case 0x50: /* Down */
++ return 14;
++ case 0x4D: /* Right */
++ return 6;
++ }
++
++ if (scancode == 0xe0) {
++ return -1;
++ }
++
++ /*
++ * For Japanese 86/106 keyboards
++ * See comment in drivers/char/pc_keyb.c.
++ * - Masahiro Adegawa
++ */
++ if (scancode == 0x73) {
++ scancode = 0x59;
++ } else if (scancode == 0x7d) {
++ scancode = 0x7c;
++ }
++
++ if (!shift_lock && !shift_key && !ctrl_key) {
++ keychar = plain_map[scancode];
++ } else if (shift_lock || shift_key) {
++ keychar = shift_map[scancode];
++ } else if (ctrl_key) {
++ keychar = ctrl_map[scancode];
++ } else {
++ keychar = 0x0020;
++ kdb_printf("Unknown state/scancode (%d)\n", scancode);
++ }
++ keychar &= 0x0fff;
++ if (keychar == '\t')
++ keychar = ' ';
++ switch (KTYP(keychar)) {
++ case KT_LETTER:
++ case KT_LATIN:
++ if (isprint(keychar))
++ break; /* printable characters */
++ /* drop through */
++ case KT_SPEC:
++ if (keychar == K_ENTER)
++ break;
++ /* drop through */
++ default:
++ return(-1); /* ignore unprintables */
++ }
++
++ if ((scancode & 0x7f) == 0x1c) {
++ /*
++ * enter key. All done. Absorb the release scancode.
++ */
++ while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
++ ;
++
++ /*
++ * Fetch the scancode
++ */
++ scancode = inb(KBD_DATA_REG);
++ scanstatus = inb(KBD_STATUS_REG);
++
++ while (scanstatus & KBD_STAT_MOUSE_OBF) {
++ scancode = inb(KBD_DATA_REG);
++ scanstatus = inb(KBD_STATUS_REG);
++ }
++
++ if (scancode != 0x9c) {
++ /*
++ * Wasn't an enter-release, why not?
++ */
++ kdb_printf("kdb: expected enter got 0x%x status 0x%x\n",
++ scancode, scanstatus);
++ }
++
++ kdb_printf("\n");
++ return 13;
++ }
++
++ return keychar & 0xff;
++}
++#endif /* CONFIG_VT_CONSOLE */
++
++#ifdef KDB_BLINK_LED
++
++/* Leave numlock alone, setting it messes up laptop keyboards with the keypad
++ * mapped over normal keys.
++ */
++static int kdba_blink_mask = 0x1 | 0x4;
++
++#define BOGOMIPS (boot_cpu_data.loops_per_jiffy/(500000/HZ))
++static int blink_led(void)
++{
++ static long delay;
++
++ if (kbd_exists == 0)
++ return -1;
++
++ if (--delay < 0) {
++ if (BOGOMIPS == 0) /* early kdb */
++ delay = 150000000/1000; /* arbitrary bogomips */
++ else
++ delay = 150000000/BOGOMIPS; /* Roughly 1 second when polling */
++ kdb_toggleled(kdba_blink_mask);
++ }
++ return -1;
++}
++#endif
++
++get_char_func poll_funcs[] = {
++#if defined(CONFIG_VT_CONSOLE)
++ get_kbd_char,
++#endif
++#if defined(CONFIG_SERIAL_CONSOLE)
++ get_serial_char,
++#endif
++#ifdef KDB_BLINK_LED
++ blink_led,
++#endif
++#ifdef CONFIG_KDB_USB
++ get_usb_char,
++#endif
++ NULL
++};
++
++/*
++ * On some Compaq Deskpro's, there is a keyboard freeze many times after
++ * exiting from the kdb. As kdb's keyboard handler is not interrupt-driven and
++ * uses a polled interface, it makes more sense to disable motherboard keyboard
++ * controller's OBF interrupts during kdb's polling.In case, of interrupts
++ * remaining enabled during kdb's polling, it may cause un-necessary
++ * interrupts being signalled during keypresses, which are also sometimes seen
++ * as spurious interrupts after exiting from kdb. This hack to disable OBF
++ * interrupts before entry to kdb and re-enabling them at kdb exit point also
++ * solves the keyboard freeze issue. These functions are called from
++ * kdb_local(), hence these are arch. specific setup and cleanup functions
++ * executing only on the local processor - ashishk@sco.com
++ */
++
++void kdba_local_arch_setup(void)
++{
++#ifdef CONFIG_VT_CONSOLE
++ unsigned char c;
++
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_command(KBD_CCMD_READ_MODE);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ while ( !(kbd_read_status() & KBD_STAT_OBF) );
++ c = kbd_read_input();
++ c &= ~KBD_MODE_KBD_INT;
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_command(KBD_CCMD_WRITE_MODE);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_output(c);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ mdelay(1);
++#endif /* CONFIG_VT_CONSOLE */
++}
++
++void kdba_local_arch_cleanup(void)
++{
++#ifdef CONFIG_VT_CONSOLE
++ unsigned char c;
++
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_command(KBD_CCMD_READ_MODE);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ while ( !(kbd_read_status() & KBD_STAT_OBF) );
++ c = kbd_read_input();
++ c |= KBD_MODE_KBD_INT;
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_command(KBD_CCMD_WRITE_MODE);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ kbd_write_output(c);
++ mdelay(1);
++ while (kbd_read_status() & KBD_STAT_IBF);
++ mdelay(1);
++#endif /* CONFIG_VT_CONSOLE */
++}
+diff -Nurp linux-2.6.22-590/arch/x86_64/kdb/kdbasupport.c linux-2.6.22-600/arch/x86_64/kdb/kdbasupport.c
+--- linux-2.6.22-590/arch/x86_64/kdb/kdbasupport.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/x86_64/kdb/kdbasupport.c 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,1015 @@
++/*
++ * Kernel Debugger Architecture Independent Support Functions
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/string.h>
++#include <linux/stddef.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/ptrace.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/hardirq.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/kdebug.h>
++#include <asm/processor.h>
++#include <asm/msr.h>
++#include <asm/uaccess.h>
++#include <asm/mach_apic.h>
++#include <asm/hw_irq.h>
++#include <asm/desc.h>
++
++kdb_machreg_t
++kdba_getdr6(void)
++{
++ return kdba_getdr(6);
++}
++
++kdb_machreg_t
++kdba_getdr7(void)
++{
++ return kdba_getdr(7);
++}
++
++void
++kdba_putdr6(kdb_machreg_t contents)
++{
++ kdba_putdr(6, contents);
++}
++
++static void
++kdba_putdr7(kdb_machreg_t contents)
++{
++ kdba_putdr(7, contents);
++}
++
++void
++kdba_installdbreg(kdb_bp_t *bp)
++{
++ kdb_machreg_t dr7;
++
++ dr7 = kdba_getdr7();
++
++ kdba_putdr(bp->bp_hard->bph_reg, bp->bp_addr);
++
++ dr7 |= DR7_GE;
++ if (cpu_has_de)
++ set_in_cr4(X86_CR4_DE);
++
++ switch (bp->bp_hard->bph_reg){
++ case 0:
++ DR7_RW0SET(dr7,bp->bp_hard->bph_mode);
++ DR7_LEN0SET(dr7,bp->bp_hard->bph_length);
++ DR7_G0SET(dr7);
++ break;
++ case 1:
++ DR7_RW1SET(dr7,bp->bp_hard->bph_mode);
++ DR7_LEN1SET(dr7,bp->bp_hard->bph_length);
++ DR7_G1SET(dr7);
++ break;
++ case 2:
++ DR7_RW2SET(dr7,bp->bp_hard->bph_mode);
++ DR7_LEN2SET(dr7,bp->bp_hard->bph_length);
++ DR7_G2SET(dr7);
++ break;
++ case 3:
++ DR7_RW3SET(dr7,bp->bp_hard->bph_mode);
++ DR7_LEN3SET(dr7,bp->bp_hard->bph_length);
++ DR7_G3SET(dr7);
++ break;
++ default:
++ kdb_printf("kdb: Bad debug register!! %ld\n",
++ bp->bp_hard->bph_reg);
++ break;
++ }
++
++ kdba_putdr7(dr7);
++ return;
++}
++
++void
++kdba_removedbreg(kdb_bp_t *bp)
++{
++ int regnum;
++ kdb_machreg_t dr7;
++
++ if (!bp->bp_hard)
++ return;
++
++ regnum = bp->bp_hard->bph_reg;
++
++ dr7 = kdba_getdr7();
++
++ kdba_putdr(regnum, 0);
++
++ switch (regnum) {
++ case 0:
++ DR7_G0CLR(dr7);
++ DR7_L0CLR(dr7);
++ break;
++ case 1:
++ DR7_G1CLR(dr7);
++ DR7_L1CLR(dr7);
++ break;
++ case 2:
++ DR7_G2CLR(dr7);
++ DR7_L2CLR(dr7);
++ break;
++ case 3:
++ DR7_G3CLR(dr7);
++ DR7_L3CLR(dr7);
++ break;
++ default:
++ kdb_printf("kdb: Bad debug register!! %d\n", regnum);
++ break;
++ }
++
++ kdba_putdr7(dr7);
++}
++
++kdb_machreg_t
++kdba_getdr(int regnum)
++{
++ kdb_machreg_t contents = 0;
++ switch(regnum) {
++ case 0:
++ __asm__ ("movq %%db0,%0\n\t":"=r"(contents));
++ break;
++ case 1:
++ __asm__ ("movq %%db1,%0\n\t":"=r"(contents));
++ break;
++ case 2:
++ __asm__ ("movq %%db2,%0\n\t":"=r"(contents));
++ break;
++ case 3:
++ __asm__ ("movq %%db3,%0\n\t":"=r"(contents));
++ break;
++ case 4:
++ case 5:
++ break;
++ case 6:
++ __asm__ ("movq %%db6,%0\n\t":"=r"(contents));
++ break;
++ case 7:
++ __asm__ ("movq %%db7,%0\n\t":"=r"(contents));
++ break;
++ default:
++ break;
++ }
++
++ return contents;
++}
++
++
++kdb_machreg_t
++kdb_getcr(int regnum)
++{
++ kdb_machreg_t contents = 0;
++ switch(regnum) {
++ case 0:
++ __asm__ ("movq %%cr0,%0\n\t":"=r"(contents));
++ break;
++ case 1:
++ break;
++ case 2:
++ __asm__ ("movq %%cr2,%0\n\t":"=r"(contents));
++ break;
++ case 3:
++ __asm__ ("movq %%cr3,%0\n\t":"=r"(contents));
++ break;
++ case 4:
++ __asm__ ("movq %%cr4,%0\n\t":"=r"(contents));
++ break;
++ default:
++ break;
++ }
++
++ return contents;
++}
++
++void
++kdba_putdr(int regnum, kdb_machreg_t contents)
++{
++ switch(regnum) {
++ case 0:
++ __asm__ ("movq %0,%%db0\n\t"::"r"(contents));
++ break;
++ case 1:
++ __asm__ ("movq %0,%%db1\n\t"::"r"(contents));
++ break;
++ case 2:
++ __asm__ ("movq %0,%%db2\n\t"::"r"(contents));
++ break;
++ case 3:
++ __asm__ ("movq %0,%%db3\n\t"::"r"(contents));
++ break;
++ case 4:
++ case 5:
++ break;
++ case 6:
++ __asm__ ("movq %0,%%db6\n\t"::"r"(contents));
++ break;
++ case 7:
++ __asm__ ("movq %0,%%db7\n\t"::"r"(contents));
++ break;
++ default:
++ break;
++ }
++}
++
++/*
++ * kdba_getregcontents
++ *
++ * Return the contents of the register specified by the
++ * input string argument. Return an error if the string
++ * does not match a machine register.
++ *
++ * The following pseudo register names are supported:
++ * ®s - Prints address of exception frame
++ * krsp - Prints kernel stack pointer at time of fault
++ * crsp - Prints current kernel stack pointer, inside kdb
++ * ceflags - Prints current flags, inside kdb
++ * %<regname> - Uses the value of the registers at the
++ * last time the user process entered kernel
++ * mode, instead of the registers at the time
++ * kdb was entered.
++ *
++ * Parameters:
++ * regname Pointer to string naming register
++ * regs Pointer to structure containing registers.
++ * Outputs:
++ * *contents Pointer to unsigned long to recieve register contents
++ * Returns:
++ * 0 Success
++ * KDB_BADREG Invalid register name
++ * Locking:
++ * None.
++ * Remarks:
++ * If kdb was entered via an interrupt from the kernel itself then
++ * ss and rsp are *not* on the stack.
++ */
++
++static struct kdbregs {
++ char *reg_name;
++ size_t reg_offset;
++} kdbreglist[] = {
++ { "r15", offsetof(struct pt_regs, r15) },
++ { "r14", offsetof(struct pt_regs, r14) },
++ { "r13", offsetof(struct pt_regs, r13) },
++ { "r12", offsetof(struct pt_regs, r12) },
++ { "rbp", offsetof(struct pt_regs, rbp) },
++ { "rbx", offsetof(struct pt_regs, rbx) },
++ { "r11", offsetof(struct pt_regs, r11) },
++ { "r10", offsetof(struct pt_regs, r10) },
++ { "r9", offsetof(struct pt_regs, r9) },
++ { "r8", offsetof(struct pt_regs, r8) },
++ { "rax", offsetof(struct pt_regs, rax) },
++ { "rcx", offsetof(struct pt_regs, rcx) },
++ { "rdx", offsetof(struct pt_regs, rdx) },
++ { "rsi", offsetof(struct pt_regs, rsi) },
++ { "rdi", offsetof(struct pt_regs, rdi) },
++ { "orig_rax", offsetof(struct pt_regs, orig_rax) },
++ { "rip", offsetof(struct pt_regs, rip) },
++ { "cs", offsetof(struct pt_regs, cs) },
++ { "eflags", offsetof(struct pt_regs, eflags) },
++ { "rsp", offsetof(struct pt_regs, rsp) },
++ { "ss", offsetof(struct pt_regs, ss) },
++};
++
++static const int nkdbreglist = sizeof(kdbreglist) / sizeof(struct kdbregs);
++
++static struct kdbregs dbreglist[] = {
++ { "dr0", 0 },
++ { "dr1", 1 },
++ { "dr2", 2 },
++ { "dr3", 3 },
++ { "dr6", 6 },
++ { "dr7", 7 },
++};
++
++static const int ndbreglist = sizeof(dbreglist) / sizeof(struct kdbregs);
++
++int
++kdba_getregcontents(const char *regname,
++ struct pt_regs *regs,
++ kdb_machreg_t *contents)
++{
++ int i;
++
++ if (strcmp(regname, "®s") == 0) {
++ *contents = (unsigned long)regs;
++ return 0;
++ }
++
++ if (strcmp(regname, "krsp") == 0) {
++ *contents = (unsigned long)regs + sizeof(struct pt_regs);
++ if ((regs->cs & 0xffff) == __KERNEL_CS) {
++ /* rsp and ss are not on stack */
++ *contents -= 2*4;
++ }
++ return 0;
++ }
++
++ if (strcmp(regname, "crsp") == 0) {
++ asm volatile("movq %%rsp,%0":"=m" (*contents));
++ return 0;
++ }
++
++ if (strcmp(regname, "ceflags") == 0) {
++ unsigned long flags;
++ local_save_flags(flags);
++ *contents = flags;
++ return 0;
++ }
++
++ if (regname[0] == '%') {
++ /* User registers: %%r[a-c]x, etc */
++ regname++;
++ regs = (struct pt_regs *)
++ (current->thread.rsp0 - sizeof(struct pt_regs));
++ }
++
++ for (i=0; i<nkdbreglist; i++) {
++ if (strnicmp(kdbreglist[i].reg_name,
++ regname,
++ strlen(regname)) == 0)
++ break;
++ }
++
++ if ((i < nkdbreglist)
++ && (strlen(kdbreglist[i].reg_name) == strlen(regname))) {
++ if ((regs->cs & 0xffff) == __KERNEL_CS) {
++ /* No cpl switch, rsp is not on stack */
++ if (strcmp(kdbreglist[i].reg_name, "rsp") == 0) {
++ *contents = (kdb_machreg_t)regs +
++ sizeof(struct pt_regs) - 2*8;
++ return(0);
++ }
++#if 0 /* FIXME */
++ if (strcmp(kdbreglist[i].reg_name, "ss") == 0) {
++ kdb_machreg_t r;
++
++ r = (kdb_machreg_t)regs +
++ sizeof(struct pt_regs) - 2*8;
++ *contents = (kdb_machreg_t)SS(r); /* XXX */
++ return(0);
++ }
++#endif
++ }
++ *contents = *(unsigned long *)((unsigned long)regs +
++ kdbreglist[i].reg_offset);
++ return(0);
++ }
++
++ for (i=0; i<ndbreglist; i++) {
++ if (strnicmp(dbreglist[i].reg_name,
++ regname,
++ strlen(regname)) == 0)
++ break;
++ }
++
++ if ((i < ndbreglist)
++ && (strlen(dbreglist[i].reg_name) == strlen(regname))) {
++ *contents = kdba_getdr(dbreglist[i].reg_offset);
++ return 0;
++ }
++ return KDB_BADREG;
++}
++
++/*
++ * kdba_setregcontents
++ *
++ * Set the contents of the register specified by the
++ * input string argument. Return an error if the string
++ * does not match a machine register.
++ *
++ * Supports modification of user-mode registers via
++ * %<register-name>
++ *
++ * Parameters:
++ * regname Pointer to string naming register
++ * regs Pointer to structure containing registers.
++ * contents Unsigned long containing new register contents
++ * Outputs:
++ * Returns:
++ * 0 Success
++ * KDB_BADREG Invalid register name
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++int
++kdba_setregcontents(const char *regname,
++ struct pt_regs *regs,
++ unsigned long contents)
++{
++ int i;
++
++ if (regname[0] == '%') {
++ regname++;
++ regs = (struct pt_regs *)
++ (current->thread.rsp0 - sizeof(struct pt_regs));
++ }
++
++ for (i=0; i<nkdbreglist; i++) {
++ if (strnicmp(kdbreglist[i].reg_name,
++ regname,
++ strlen(regname)) == 0)
++ break;
++ }
++
++ if ((i < nkdbreglist)
++ && (strlen(kdbreglist[i].reg_name) == strlen(regname))) {
++ *(unsigned long *)((unsigned long)regs
++ + kdbreglist[i].reg_offset) = contents;
++ return 0;
++ }
++
++ for (i=0; i<ndbreglist; i++) {
++ if (strnicmp(dbreglist[i].reg_name,
++ regname,
++ strlen(regname)) == 0)
++ break;
++ }
++
++ if ((i < ndbreglist)
++ && (strlen(dbreglist[i].reg_name) == strlen(regname))) {
++ kdba_putdr(dbreglist[i].reg_offset, contents);
++ return 0;
++ }
++
++ return KDB_BADREG;
++}
++
++/*
++ * kdba_dumpregs
++ *
++ * Dump the specified register set to the display.
++ *
++ * Parameters:
++ * regs Pointer to structure containing registers.
++ * type Character string identifying register set to dump
++ * extra string further identifying register (optional)
++ * Outputs:
++ * Returns:
++ * 0 Success
++ * Locking:
++ * None.
++ * Remarks:
++ * This function will dump the general register set if the type
++ * argument is NULL (struct pt_regs). The alternate register
++ * set types supported by this function:
++ *
++ * d Debug registers
++ * c Control registers
++ * u User registers at most recent entry to kernel
++ * Following not yet implemented:
++ * r Memory Type Range Registers (extra defines register)
++ *
++ * MSR on i386/x86_64 are handled by rdmsr/wrmsr commands.
++ */
++
++int
++kdba_dumpregs(struct pt_regs *regs,
++ const char *type,
++ const char *extra)
++{
++ int i;
++ int count = 0;
++
++ if (type
++ && (type[0] == 'u')) {
++ type = NULL;
++ regs = (struct pt_regs *)
++ (current->thread.rsp0 - sizeof(struct pt_regs));
++ }
++
++ if (type == NULL) {
++ struct kdbregs *rlp;
++ kdb_machreg_t contents;
++
++ for (i=0, rlp=kdbreglist; i<nkdbreglist; i++,rlp++) {
++ kdb_printf("%8s = ", rlp->reg_name);
++ kdba_getregcontents(rlp->reg_name, regs, &contents);
++ kdb_printf("0x%016lx ", contents);
++ if ((++count % 2) == 0)
++ kdb_printf("\n");
++ }
++
++ kdb_printf("®s = 0x%p\n", regs);
++
++ return 0;
++ }
++
++ switch (type[0]) {
++ case 'd':
++ {
++ unsigned long dr[8];
++
++ for(i=0; i<8; i++) {
++ if ((i == 4) || (i == 5)) continue;
++ dr[i] = kdba_getdr(i);
++ }
++ kdb_printf("dr0 = 0x%08lx dr1 = 0x%08lx dr2 = 0x%08lx dr3 = 0x%08lx\n",
++ dr[0], dr[1], dr[2], dr[3]);
++ kdb_printf("dr6 = 0x%08lx dr7 = 0x%08lx\n",
++ dr[6], dr[7]);
++ return 0;
++ }
++ case 'c':
++ {
++ unsigned long cr[5];
++
++ for (i=0; i<5; i++) {
++ cr[i] = kdb_getcr(i);
++ }
++ kdb_printf("cr0 = 0x%08lx cr1 = 0x%08lx cr2 = 0x%08lx cr3 = 0x%08lx\ncr4 = 0x%08lx\n",
++ cr[0], cr[1], cr[2], cr[3], cr[4]);
++ return 0;
++ }
++ case 'r':
++ break;
++ default:
++ return KDB_BADREG;
++ }
++
++ /* NOTREACHED */
++ return 0;
++}
++EXPORT_SYMBOL(kdba_dumpregs);
++
++kdb_machreg_t
++kdba_getpc(struct pt_regs *regs)
++{
++ return regs->rip;
++}
++
++int
++kdba_setpc(struct pt_regs *regs, kdb_machreg_t newpc)
++{
++ if (KDB_NULL_REGS(regs))
++ return KDB_BADREG;
++ regs->rip = newpc;
++ KDB_STATE_SET(IP_ADJUSTED);
++ return 0;
++}
++
++/*
++ * kdba_main_loop
++ *
++ * Do any architecture specific set up before entering the main kdb loop.
++ * The primary function of this routine is to make all processes look the
++ * same to kdb, kdb must be able to list a process without worrying if the
++ * process is running or blocked, so make all process look as though they
++ * are blocked.
++ *
++ * Inputs:
++ * reason The reason KDB was invoked
++ * error The hardware-defined error code
++ * error2 kdb's current reason code. Initially error but can change
++ * acording to kdb state.
++ * db_result Result from break or debug point.
++ * ef The exception frame at time of fault/breakpoint. If reason
++ * is SILENT or CPU_UP then regs is NULL, otherwise it should
++ * always be valid.
++ * Returns:
++ * 0 KDB was invoked for an event which it wasn't responsible
++ * 1 KDB handled the event for which it was invoked.
++ * Outputs:
++ * Sets rip and rsp in current->thread.
++ * Locking:
++ * None.
++ * Remarks:
++ * none.
++ */
++
++int
++kdba_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
++ kdb_dbtrap_t db_result, struct pt_regs *regs)
++{
++ int ret;
++
++ if (regs)
++ kdba_getregcontents("rsp", regs, &(current->thread.rsp));
++ kdb_save_running(regs);
++ ret = kdb_main_loop(reason, reason2, error, db_result, regs);
++ kdb_unsave_running(regs);
++ return ret;
++}
++
++void
++kdba_disableint(kdb_intstate_t *state)
++{
++ unsigned long *fp = (unsigned long *)state;
++ unsigned long flags;
++
++ local_irq_save(flags);
++ *fp = flags;
++}
++
++void
++kdba_restoreint(kdb_intstate_t *state)
++{
++ unsigned long flags = *(unsigned long *)state;
++ local_irq_restore(flags);
++}
++
++void
++kdba_setsinglestep(struct pt_regs *regs)
++{
++ if (KDB_NULL_REGS(regs))
++ return;
++ if (regs->eflags & EF_IE)
++ KDB_STATE_SET(A_IF);
++ else
++ KDB_STATE_CLEAR(A_IF);
++ regs->eflags = (regs->eflags | EF_TF) & ~EF_IE;
++}
++
++void
++kdba_clearsinglestep(struct pt_regs *regs)
++{
++ if (KDB_NULL_REGS(regs))
++ return;
++ if (KDB_STATE(A_IF))
++ regs->eflags |= EF_IE;
++ else
++ regs->eflags &= ~EF_IE;
++}
++
++int asmlinkage
++kdba_setjmp(kdb_jmp_buf *jb)
++{
++#ifdef CONFIG_FRAME_POINTER
++ __asm__ __volatile__
++ ("movq %%rbx, (0*8)(%%rdi);"
++ "movq %%rcx, (1*8)(%%rdi);"
++ "movq %%r12, (2*8)(%%rdi);"
++ "movq %%r13, (3*8)(%%rdi);"
++ "movq %%r14, (4*8)(%%rdi);"
++ "movq %%r15, (5*8)(%%rdi);"
++ "leaq 16(%%rsp), %%rdx;"
++ "movq %%rdx, (6*8)(%%rdi);"
++ "movq %%rax, (7*8)(%%rdi)"
++ :
++ : "a" (__builtin_return_address(0)),
++ "c" (__builtin_frame_address(1))
++ );
++#else /* !CONFIG_FRAME_POINTER */
++ __asm__ __volatile__
++ ("movq %%rbx, (0*8)(%%rdi);"
++ "movq %%rbp, (1*8)(%%rdi);"
++ "movq %%r12, (2*8)(%%rdi);"
++ "movq %%r13, (3*8)(%%rdi);"
++ "movq %%r14, (4*8)(%%rdi);"
++ "movq %%r15, (5*8)(%%rdi);"
++ "leaq 8(%%rsp), %%rdx;"
++ "movq %%rdx, (6*8)(%%rdi);"
++ "movq %%rax, (7*8)(%%rdi)"
++ :
++ : "a" (__builtin_return_address(0))
++ );
++#endif /* CONFIG_FRAME_POINTER */
++ return 0;
++}
++
++void asmlinkage
++kdba_longjmp(kdb_jmp_buf *jb, int reason)
++{
++ __asm__("movq (0*8)(%rdi),%rbx;"
++ "movq (1*8)(%rdi),%rbp;"
++ "movq (2*8)(%rdi),%r12;"
++ "movq (3*8)(%rdi),%r13;"
++ "movq (4*8)(%rdi),%r14;"
++ "movq (5*8)(%rdi),%r15;"
++ "movq (7*8)(%rdi),%rdx;"
++ "movq (6*8)(%rdi),%rsp;"
++ "mov %rsi, %rax;"
++ "jmpq *%rdx");
++}
++
++/*
++ * kdba_pt_regs
++ *
++ * Format a struct pt_regs
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * If no address is supplied, it uses the current irq pt_regs.
++ */
++
++static int
++kdba_pt_regs(int argc, const char **argv)
++{
++ int diag;
++ kdb_machreg_t addr;
++ long offset = 0;
++ int nextarg;
++ struct pt_regs *p;
++ static const char *fmt = " %-11.11s 0x%lx\n";
++ static int first_time = 1;
++
++ if (argc == 0) {
++ addr = (kdb_machreg_t) get_irq_regs();
++ } else if (argc == 1) {
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (diag)
++ return diag;
++ } else {
++ return KDB_ARGCOUNT;
++ }
++
++ p = (struct pt_regs *) addr;
++ if (first_time) {
++ first_time = 0;
++ kdb_printf("\n+++ Warning: x86_64 pt_regs are not always "
++ "completely defined, r15-rbx may be invalid\n\n");
++ }
++ kdb_printf("struct pt_regs 0x%p-0x%p\n", p, (unsigned char *)p + sizeof(*p) - 1);
++ kdb_print_nameval("r15", p->r15);
++ kdb_print_nameval("r14", p->r14);
++ kdb_print_nameval("r13", p->r13);
++ kdb_print_nameval("r12", p->r12);
++ kdb_print_nameval("rbp", p->rbp);
++ kdb_print_nameval("rbx", p->rbx);
++ kdb_print_nameval("r11", p->r11);
++ kdb_print_nameval("r10", p->r10);
++ kdb_print_nameval("r9", p->r9);
++ kdb_print_nameval("r8", p->r8);
++ kdb_print_nameval("rax", p->rax);
++ kdb_print_nameval("rcx", p->rcx);
++ kdb_print_nameval("rdx", p->rdx);
++ kdb_print_nameval("rsi", p->rsi);
++ kdb_print_nameval("rdi", p->rdi);
++ kdb_print_nameval("orig_rax", p->orig_rax);
++ kdb_print_nameval("rip", p->rip);
++ kdb_printf(fmt, "cs", p->cs);
++ kdb_printf(fmt, "eflags", p->eflags);
++ kdb_printf(fmt, "rsp", p->rsp);
++ kdb_printf(fmt, "ss", p->ss);
++ return 0;
++}
++
++/*
++ * kdba_cpu_pda
++ *
++ * Format a struct cpu_pda
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * If no cpu is supplied, it prints the current cpu. If the cpu is '*'
++ * then it prints all cpus.
++ */
++
++static int
++kdba_cpu_pda(int argc, const char **argv)
++{
++ int diag, nextarg, all_cpus = 0;
++ long offset = 0;
++ unsigned long cpu;
++ struct x8664_pda *c;
++ static const char *fmtl = " %-17.17s 0x%lx\n";
++ static const char *fmtd = " %-17.17s %d\n";
++ static const char *fmtp = " %-17.17s 0x%p\n";
++
++ if (argc == 0) {
++ cpu = smp_processor_id();
++ } else if (argc == 1) {
++ if (strcmp(argv[1], "*") == 0) {
++ all_cpus = 1;
++ cpu = 0;
++ } else {
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &cpu, &offset, NULL);
++ if (diag)
++ return diag;
++ }
++ } else {
++ return KDB_ARGCOUNT;
++ }
++
++ for (; cpu < NR_CPUS; ++cpu) {
++ if (cpu_online(cpu)) {
++ c = cpu_pda(cpu);
++ kdb_printf("struct cpu_pda 0x%p-0x%p\n", c, (unsigned char *)c + sizeof(*c) - 1);
++ kdb_printf(fmtp, "pcurrent", c->pcurrent);
++ kdb_printf(fmtl, "data_offset", c->data_offset);
++ kdb_printf(fmtl, "kernelstack", c->kernelstack);
++ kdb_printf(fmtl, "oldrsp", c->oldrsp);
++ kdb_printf(fmtd, "irqcount", c->irqcount);
++ kdb_printf(fmtd, "cpunumber", c->cpunumber);
++ kdb_printf(fmtp, "irqstackptr", c->irqstackptr);
++ kdb_printf(fmtd, "nodenumber", c->nodenumber);
++ kdb_printf(fmtd, "__softirq_pending", c->__softirq_pending);
++ kdb_printf(fmtd, "__nmi_count", c->__nmi_count);
++ kdb_printf(fmtd, "mmu_state", c->mmu_state);
++ kdb_printf(fmtp, "active_mm", c->active_mm);
++ kdb_printf(fmtd, "apic_timer_irqs", c->apic_timer_irqs);
++ }
++ if (!all_cpus)
++ break;
++ }
++ return 0;
++}
++
++/*
++ * kdba_entry
++ *
++ * This is the interface routine between
++ * the notifier die_chain and kdb
++ */
++static int kdba_entry( struct notifier_block *b, unsigned long val, void *v)
++{
++ struct die_args *args = v;
++ int err, trap, ret = 0;
++ struct pt_regs *regs;
++
++ regs = args->regs;
++ err = args->err;
++ trap = args->trapnr;
++ switch (val){
++#ifdef CONFIG_SMP
++ case DIE_NMI_IPI:
++ ret = kdb_ipi(regs, NULL);
++ break;
++#endif /* CONFIG_SMP */
++ case DIE_OOPS:
++ ret = kdb(KDB_REASON_OOPS, err, regs);
++ break;
++ case DIE_CALL:
++ ret = kdb(KDB_REASON_ENTER, err, regs);
++ break;
++ case DIE_DEBUG:
++ ret = kdb(KDB_REASON_DEBUG, err, regs);
++ break;
++ case DIE_NMIWATCHDOG:
++ ret = kdb(KDB_REASON_NMI, err, regs);
++ break;
++ case DIE_INT3:
++ ret = kdb(KDB_REASON_BREAK, err, regs);
++ // falls thru
++ default:
++ break;
++ }
++ return (ret ? NOTIFY_STOP : NOTIFY_DONE);
++}
++
++/*
++ * notifier block for kdb entry
++ */
++static struct notifier_block kdba_notifier = {
++ .notifier_call = kdba_entry
++};
++
++asmlinkage int kdb_call(void);
++
++/* Executed once on each cpu at startup. */
++void
++kdba_cpu_up(void)
++{
++}
++
++static int __init
++kdba_arch_init(void)
++{
++#ifdef CONFIG_SMP
++ set_intr_gate(KDB_VECTOR, kdb_interrupt);
++#endif
++ set_intr_gate(KDBENTER_VECTOR, kdb_call);
++ return 0;
++}
++
++arch_initcall(kdba_arch_init);
++
++/*
++ * kdba_init
++ *
++ * Architecture specific initialization.
++ *
++ * Parameters:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ * None.
++ */
++
++void __init
++kdba_init(void)
++{
++ kdba_arch_init(); /* Need to register KDBENTER_VECTOR early */
++ kdb_register("pt_regs", kdba_pt_regs, "address", "Format struct pt_regs", 0);
++ kdb_register("cpu_pda", kdba_cpu_pda, "<cpu>", "Format struct cpu_pda", 0);
++ register_die_notifier(&kdba_notifier);
++ return;
++}
++
++/*
++ * kdba_adjust_ip
++ *
++ * Architecture specific adjustment of instruction pointer before leaving
++ * kdb.
++ *
++ * Parameters:
++ * reason The reason KDB was invoked
++ * error The hardware-defined error code
++ * ef The exception frame at time of fault/breakpoint. If reason
++ * is SILENT or CPU_UP then regs is NULL, otherwise it should
++ * always be valid.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ * noop on ix86.
++ */
++
++void
++kdba_adjust_ip(kdb_reason_t reason, int error, struct pt_regs *ef)
++{
++ return;
++}
++
++void
++kdba_set_current_task(const struct task_struct *p)
++{
++ kdb_current_task = p;
++ if (kdb_task_has_cpu(p)) {
++ struct kdb_running_process *krp = kdb_running_process + kdb_process_cpu(p);
++ kdb_current_regs = krp->regs;
++ return;
++ }
++ kdb_current_regs = NULL;
++}
++
++#ifdef CONFIG_SMP
++
++/* When first entering KDB, try a normal IPI. That reduces backtrace problems
++ * on the other cpus.
++ */
++void
++smp_kdb_stop(void)
++{
++ if (!KDB_FLAG(NOIPI))
++ send_IPI_allbutself(KDB_VECTOR);
++}
++
++/* The normal KDB IPI handler */
++extern asmlinkage void smp_kdb_interrupt(struct pt_regs *regs); /* for sparse */
++asmlinkage void
++smp_kdb_interrupt(struct pt_regs *regs)
++{
++ struct pt_regs *old_regs = set_irq_regs(regs);
++ ack_APIC_irq();
++ irq_enter();
++ kdb_ipi(regs, NULL);
++ irq_exit();
++ set_irq_regs(old_regs);
++}
++
++/* Invoked once from kdb_wait_for_cpus when waiting for cpus. For those cpus
++ * that have not responded to the normal KDB interrupt yet, hit them with an
++ * NMI event.
++ */
++void
++kdba_wait_for_cpus(void)
++{
++ int c;
++ if (KDB_FLAG(CATASTROPHIC))
++ return;
++ kdb_printf(" Sending NMI to cpus that have not responded yet\n");
++ for_each_online_cpu(c)
++ if (kdb_running_process[c].seqno < kdb_seqno - 1)
++ send_IPI_mask(cpumask_of_cpu(c), NMI_VECTOR);
++}
++
++#endif /* CONFIG_SMP */
+diff -Nurp linux-2.6.22-590/arch/x86_64/kdb/kdb_cmds linux-2.6.22-600/arch/x86_64/kdb/kdb_cmds
+--- linux-2.6.22-590/arch/x86_64/kdb/kdb_cmds 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/x86_64/kdb/kdb_cmds 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,18 @@
++# Standard architecture specific commands for kdb.
++# These commands are appended to those in kdb/kdb_cmds, see that file for
++# restrictions.
++
++# Standard debugging information for first level support, invoked from archkdb*
++# commands that are defined in kdb/kdb_cmds.
++
++defcmd archkdbcommon "" "Common arch debugging"
++ set LINES 2000000
++ set BTAPROMPT 0
++ -summary
++ -id %rip-24
++ -cpu
++ -ps
++ -dmesg 600
++ -bt
++ -cpu_pda *
++endefcmd
+diff -Nurp linux-2.6.22-590/arch/x86_64/kdb/Makefile linux-2.6.22-600/arch/x86_64/kdb/Makefile
+--- linux-2.6.22-590/arch/x86_64/kdb/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/x86_64/kdb/Makefile 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,13 @@
++#
++# This file is subject to the terms and conditions of the GNU General Public
++# License. See the file "COPYING" in the main directory of this archive
++# for more details.
++#
++# Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++#
++
++obj-$(CONFIG_KDB) := kdba_bp.o kdba_id.o kdba_io.o kdbasupport.o x86_64-dis.o
++
++override CFLAGS := $(CFLAGS:%-pg=% )
++
++CFLAGS_kdba_io.o += -I $(TOPDIR)/arch/$(ARCH)/kdb
+diff -Nurp linux-2.6.22-590/arch/x86_64/kdb/pc_keyb.h linux-2.6.22-600/arch/x86_64/kdb/pc_keyb.h
+--- linux-2.6.22-590/arch/x86_64/kdb/pc_keyb.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/x86_64/kdb/pc_keyb.h 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,137 @@
++/*
++ * include/linux/pc_keyb.h
++ *
++ * PC Keyboard And Keyboard Controller
++ *
++ * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
++ */
++
++/*
++ * Configuration Switches
++ */
++
++#undef KBD_REPORT_ERR /* Report keyboard errors */
++#define KBD_REPORT_UNKN /* Report unknown scan codes */
++#define KBD_REPORT_TIMEOUTS /* Report keyboard timeouts */
++#undef KBD_IS_FOCUS_9000 /* We have the brain-damaged FOCUS-9000 keyboard */
++#undef INITIALIZE_MOUSE /* Define if your PS/2 mouse needs initialization. */
++
++
++
++#define KBD_INIT_TIMEOUT 1000 /* Timeout in ms for initializing the keyboard */
++#define KBC_TIMEOUT 250 /* Timeout in ms for sending to keyboard controller */
++#define KBD_TIMEOUT 1000 /* Timeout in ms for keyboard command acknowledge */
++
++/*
++ * Internal variables of the driver
++ */
++
++extern unsigned char pckbd_read_mask;
++extern unsigned char aux_device_present;
++
++/*
++ * Keyboard Controller Registers on normal PCs.
++ */
++
++#define KBD_STATUS_REG 0x64 /* Status register (R) */
++#define KBD_CNTL_REG 0x64 /* Controller command register (W) */
++#define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */
++
++/*
++ * Keyboard Controller Commands
++ */
++
++#define KBD_CCMD_READ_MODE 0x20 /* Read mode bits */
++#define KBD_CCMD_WRITE_MODE 0x60 /* Write mode bits */
++#define KBD_CCMD_GET_VERSION 0xA1 /* Get controller version */
++#define KBD_CCMD_MOUSE_DISABLE 0xA7 /* Disable mouse interface */
++#define KBD_CCMD_MOUSE_ENABLE 0xA8 /* Enable mouse interface */
++#define KBD_CCMD_TEST_MOUSE 0xA9 /* Mouse interface test */
++#define KBD_CCMD_SELF_TEST 0xAA /* Controller self test */
++#define KBD_CCMD_KBD_TEST 0xAB /* Keyboard interface test */
++#define KBD_CCMD_KBD_DISABLE 0xAD /* Keyboard interface disable */
++#define KBD_CCMD_KBD_ENABLE 0xAE /* Keyboard interface enable */
++#define KBD_CCMD_WRITE_AUX_OBUF 0xD3 /* Write to output buffer as if
++ initiated by the auxiliary device */
++#define KBD_CCMD_WRITE_MOUSE 0xD4 /* Write the following byte to the mouse */
++
++/*
++ * Keyboard Commands
++ */
++
++#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */
++#define KBD_CMD_SET_RATE 0xF3 /* Set typematic rate */
++#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */
++#define KBD_CMD_DISABLE 0xF5 /* Disable scanning */
++#define KBD_CMD_RESET 0xFF /* Reset */
++
++/*
++ * Keyboard Replies
++ */
++
++#define KBD_REPLY_POR 0xAA /* Power on reset */
++#define KBD_REPLY_ACK 0xFA /* Command ACK */
++#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */
++
++/*
++ * Status Register Bits
++ */
++
++#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */
++#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */
++#define KBD_STAT_SELFTEST 0x04 /* Self test successful */
++#define KBD_STAT_CMD 0x08 /* Last write was a command write (0=data) */
++#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */
++#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */
++#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */
++#define KBD_STAT_PERR 0x80 /* Parity error */
++
++#define AUX_STAT_OBF (KBD_STAT_OBF | KBD_STAT_MOUSE_OBF)
++
++/*
++ * Controller Mode Register Bits
++ */
++
++#define KBD_MODE_KBD_INT 0x01 /* Keyboard data generate IRQ1 */
++#define KBD_MODE_MOUSE_INT 0x02 /* Mouse data generate IRQ12 */
++#define KBD_MODE_SYS 0x04 /* The system flag (?) */
++#define KBD_MODE_NO_KEYLOCK 0x08 /* The keylock doesn't affect the keyboard if set */
++#define KBD_MODE_DISABLE_KBD 0x10 /* Disable keyboard interface */
++#define KBD_MODE_DISABLE_MOUSE 0x20 /* Disable mouse interface */
++#define KBD_MODE_KCC 0x40 /* Scan code conversion to PC format */
++#define KBD_MODE_RFU 0x80
++
++/*
++ * Mouse Commands
++ */
++
++#define AUX_SET_RES 0xE8 /* Set resolution */
++#define AUX_SET_SCALE11 0xE6 /* Set 1:1 scaling */
++#define AUX_SET_SCALE21 0xE7 /* Set 2:1 scaling */
++#define AUX_GET_SCALE 0xE9 /* Get scaling factor */
++#define AUX_SET_STREAM 0xEA /* Set stream mode */
++#define AUX_SET_SAMPLE 0xF3 /* Set sample rate */
++#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */
++#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */
++#define AUX_RESET 0xFF /* Reset aux device */
++#define AUX_ACK 0xFA /* Command byte ACK. */
++
++#define AUX_BUF_SIZE 2048 /* This might be better divisible by
++ three to make overruns stay in sync
++ but then the read function would need
++ a lock etc - ick */
++
++struct aux_queue {
++ unsigned long head;
++ unsigned long tail;
++ wait_queue_head_t proc_list;
++ struct fasync_struct *fasync;
++ unsigned char buf[AUX_BUF_SIZE];
++};
++
++
++/* How to access the keyboard macros on this platform. */
++#define kbd_read_input() inb(KBD_DATA_REG)
++#define kbd_read_status() inb(KBD_STATUS_REG)
++#define kbd_write_output(val) outb(val, KBD_DATA_REG)
++#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
+diff -Nurp linux-2.6.22-590/arch/x86_64/kdb/x86_64-dis.c linux-2.6.22-600/arch/x86_64/kdb/x86_64-dis.c
+--- linux-2.6.22-590/arch/x86_64/kdb/x86_64-dis.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/arch/x86_64/kdb/x86_64-dis.c 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,4686 @@
++/* Print i386 instructions for GDB, the GNU debugger.
++ Copyright 1988, 1989, 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
++ 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
++
++ This file is part of GDB.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
++
++/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
++ * Run through col -b to remove trailing whitespace and various #ifdef/ifndef
++ * __KERNEL__ added.
++ * Keith Owens <kaos@sgi.com> 15 May 2006
++ */
++
++/* 80386 instruction printer by Pace Willisson (pace@prep.ai.mit.edu)
++ July 1988
++ modified by John Hassey (hassey@dg-rtp.dg.com)
++ x86-64 support added by Jan Hubicka (jh@suse.cz)
++ VIA PadLock support by Michal Ludvig (mludvig@suse.cz). */
++
++/* The main tables describing the instructions is essentially a copy
++ of the "Opcode Map" chapter (Appendix A) of the Intel 80386
++ Programmers Manual. Usually, there is a capital letter, followed
++ by a small letter. The capital letter tell the addressing mode,
++ and the small letter tells about the operand size. Refer to
++ the Intel manual for details. */
++
++#ifdef __KERNEL__
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/dis-asm.h>
++#include <linux/kdb.h>
++#define abort() BUG()
++#else /* __KERNEL__ */
++#include "dis-asm.h"
++#include "sysdep.h"
++#include "opintl.h"
++#endif /* __KERNEL__ */
++
++#define MAXLEN 20
++
++#ifndef __KERNEL__
++#include <setjmp.h>
++#endif /* __KERNEL__ */
++
++#ifndef UNIXWARE_COMPAT
++/* Set non-zero for broken, compatible instructions. Set to zero for
++ non-broken opcodes. */
++#define UNIXWARE_COMPAT 1
++#endif
++
++static int fetch_data (struct disassemble_info *, bfd_byte *);
++static void ckprefix (void);
++static const char *prefix_name (int, int);
++static int print_insn (bfd_vma, disassemble_info *);
++static void dofloat (int);
++static void OP_ST (int, int);
++static void OP_STi (int, int);
++static int putop (const char *, int);
++static void oappend (const char *);
++static void append_seg (void);
++static void OP_indirE (int, int);
++static void print_operand_value (char *, int, bfd_vma);
++static void OP_E (int, int);
++static void OP_G (int, int);
++static bfd_vma get64 (void);
++static bfd_signed_vma get32 (void);
++static bfd_signed_vma get32s (void);
++static int get16 (void);
++static void set_op (bfd_vma, int);
++static void OP_REG (int, int);
++static void OP_IMREG (int, int);
++static void OP_I (int, int);
++static void OP_I64 (int, int);
++static void OP_sI (int, int);
++static void OP_J (int, int);
++static void OP_SEG (int, int);
++static void OP_DIR (int, int);
++static void OP_OFF (int, int);
++static void OP_OFF64 (int, int);
++static void ptr_reg (int, int);
++static void OP_ESreg (int, int);
++static void OP_DSreg (int, int);
++static void OP_C (int, int);
++static void OP_D (int, int);
++static void OP_T (int, int);
++static void OP_Rd (int, int);
++static void OP_MMX (int, int);
++static void OP_XMM (int, int);
++static void OP_EM (int, int);
++static void OP_EX (int, int);
++static void OP_MS (int, int);
++static void OP_XS (int, int);
++static void OP_M (int, int);
++static void OP_VMX (int, int);
++static void OP_0fae (int, int);
++static void OP_0f07 (int, int);
++static void NOP_Fixup (int, int);
++static void OP_3DNowSuffix (int, int);
++static void OP_SIMD_Suffix (int, int);
++static void SIMD_Fixup (int, int);
++static void PNI_Fixup (int, int);
++static void SVME_Fixup (int, int);
++static void INVLPG_Fixup (int, int);
++static void BadOp (void);
++static void SEG_Fixup (int, int);
++static void VMX_Fixup (int, int);
++
++struct dis_private {
++ /* Points to first byte not fetched. */
++ bfd_byte *max_fetched;
++ bfd_byte the_buffer[MAXLEN];
++ bfd_vma insn_start;
++ int orig_sizeflag;
++#ifndef __KERNEL__
++ jmp_buf bailout;
++#endif /* __KERNEL__ */
++};
++
++/* The opcode for the fwait instruction, which we treat as a prefix
++ when we can. */
++#define FWAIT_OPCODE (0x9b)
++
++/* Set to 1 for 64bit mode disassembly. */
++static int mode_64bit;
++
++/* Flags for the prefixes for the current instruction. See below. */
++static int prefixes;
++
++/* REX prefix the current instruction. See below. */
++static int rex;
++/* Bits of REX we've already used. */
++static int rex_used;
++#define REX_MODE64 8
++#define REX_EXTX 4
++#define REX_EXTY 2
++#define REX_EXTZ 1
++/* Mark parts used in the REX prefix. When we are testing for
++ empty prefix (for 8bit register REX extension), just mask it
++ out. Otherwise test for REX bit is excuse for existence of REX
++ only in case value is nonzero. */
++#define USED_REX(value) \
++ { \
++ if (value) \
++ rex_used |= (rex & value) ? (value) | 0x40 : 0; \
++ else \
++ rex_used |= 0x40; \
++ }
++
++/* Flags for prefixes which we somehow handled when printing the
++ current instruction. */
++static int used_prefixes;
++
++/* Flags stored in PREFIXES. */
++#define PREFIX_REPZ 1
++#define PREFIX_REPNZ 2
++#define PREFIX_LOCK 4
++#define PREFIX_CS 8
++#define PREFIX_SS 0x10
++#define PREFIX_DS 0x20
++#define PREFIX_ES 0x40
++#define PREFIX_FS 0x80
++#define PREFIX_GS 0x100
++#define PREFIX_DATA 0x200
++#define PREFIX_ADDR 0x400
++#define PREFIX_FWAIT 0x800
++
++/* Make sure that bytes from INFO->PRIVATE_DATA->BUFFER (inclusive)
++ to ADDR (exclusive) are valid. Returns 1 for success, longjmps
++ on error. */
++#define FETCH_DATA(info, addr) \
++ ((addr) <= ((struct dis_private *) (info->private_data))->max_fetched \
++ ? 1 : fetch_data ((info), (addr)))
++
++static int
++fetch_data (struct disassemble_info *info, bfd_byte *addr)
++{
++ int status;
++ struct dis_private *priv = (struct dis_private *) info->private_data;
++ bfd_vma start = priv->insn_start + (priv->max_fetched - priv->the_buffer);
++
++ status = (*info->read_memory_func) (start,
++ priv->max_fetched,
++ addr - priv->max_fetched,
++ info);
++ if (status != 0)
++ {
++ /* If we did manage to read at least one byte, then
++ print_insn_i386 will do something sensible. Otherwise, print
++ an error. We do that here because this is where we know
++ STATUS. */
++ if (priv->max_fetched == priv->the_buffer)
++ (*info->memory_error_func) (status, start, info);
++#ifndef __KERNEL__
++ longjmp (priv->bailout, 1);
++#else /* __KERNEL__ */
++ /* XXX - what to do? */
++ kdb_printf("Hmm. longjmp.\n");
++#endif /* __KERNEL__ */
++ }
++ else
++ priv->max_fetched = addr;
++ return 1;
++}
++
++#define XX NULL, 0
++
++#define Eb OP_E, b_mode
++#define Ev OP_E, v_mode
++#define Ed OP_E, d_mode
++#define Eq OP_E, q_mode
++#define Edq OP_E, dq_mode
++#define Edqw OP_E, dqw_mode
++#define indirEv OP_indirE, branch_v_mode
++#define indirEp OP_indirE, f_mode
++#define Em OP_E, m_mode
++#define Ew OP_E, w_mode
++#define Ma OP_E, v_mode
++#define M OP_M, 0 /* lea, lgdt, etc. */
++#define Mp OP_M, f_mode /* 32 or 48 bit memory operand for LDS, LES etc */
++#define Gb OP_G, b_mode
++#define Gv OP_G, v_mode
++#define Gd OP_G, d_mode
++#define Gdq OP_G, dq_mode
++#define Gm OP_G, m_mode
++#define Gw OP_G, w_mode
++#define Rd OP_Rd, d_mode
++#define Rm OP_Rd, m_mode
++#define Ib OP_I, b_mode
++#define sIb OP_sI, b_mode /* sign extened byte */
++#define Iv OP_I, v_mode
++#define Iq OP_I, q_mode
++#define Iv64 OP_I64, v_mode
++#define Iw OP_I, w_mode
++#define I1 OP_I, const_1_mode
++#define Jb OP_J, b_mode
++#define Jv OP_J, v_mode
++#define Cm OP_C, m_mode
++#define Dm OP_D, m_mode
++#define Td OP_T, d_mode
++#define Sv SEG_Fixup, v_mode
++
++#define RMeAX OP_REG, eAX_reg
++#define RMeBX OP_REG, eBX_reg
++#define RMeCX OP_REG, eCX_reg
++#define RMeDX OP_REG, eDX_reg
++#define RMeSP OP_REG, eSP_reg
++#define RMeBP OP_REG, eBP_reg
++#define RMeSI OP_REG, eSI_reg
++#define RMeDI OP_REG, eDI_reg
++#define RMrAX OP_REG, rAX_reg
++#define RMrBX OP_REG, rBX_reg
++#define RMrCX OP_REG, rCX_reg
++#define RMrDX OP_REG, rDX_reg
++#define RMrSP OP_REG, rSP_reg
++#define RMrBP OP_REG, rBP_reg
++#define RMrSI OP_REG, rSI_reg
++#define RMrDI OP_REG, rDI_reg
++#define RMAL OP_REG, al_reg
++#define RMAL OP_REG, al_reg
++#define RMCL OP_REG, cl_reg
++#define RMDL OP_REG, dl_reg
++#define RMBL OP_REG, bl_reg
++#define RMAH OP_REG, ah_reg
++#define RMCH OP_REG, ch_reg
++#define RMDH OP_REG, dh_reg
++#define RMBH OP_REG, bh_reg
++#define RMAX OP_REG, ax_reg
++#define RMDX OP_REG, dx_reg
++
++#define eAX OP_IMREG, eAX_reg
++#define eBX OP_IMREG, eBX_reg
++#define eCX OP_IMREG, eCX_reg
++#define eDX OP_IMREG, eDX_reg
++#define eSP OP_IMREG, eSP_reg
++#define eBP OP_IMREG, eBP_reg
++#define eSI OP_IMREG, eSI_reg
++#define eDI OP_IMREG, eDI_reg
++#define AL OP_IMREG, al_reg
++#define AL OP_IMREG, al_reg
++#define CL OP_IMREG, cl_reg
++#define DL OP_IMREG, dl_reg
++#define BL OP_IMREG, bl_reg
++#define AH OP_IMREG, ah_reg
++#define CH OP_IMREG, ch_reg
++#define DH OP_IMREG, dh_reg
++#define BH OP_IMREG, bh_reg
++#define AX OP_IMREG, ax_reg
++#define DX OP_IMREG, dx_reg
++#define indirDX OP_IMREG, indir_dx_reg
++
++#define Sw OP_SEG, w_mode
++#define Ap OP_DIR, 0
++#define Ob OP_OFF, b_mode
++#define Ob64 OP_OFF64, b_mode
++#define Ov OP_OFF, v_mode
++#define Ov64 OP_OFF64, v_mode
++#define Xb OP_DSreg, eSI_reg
++#define Xv OP_DSreg, eSI_reg
++#define Yb OP_ESreg, eDI_reg
++#define Yv OP_ESreg, eDI_reg
++#define DSBX OP_DSreg, eBX_reg
++
++#define es OP_REG, es_reg
++#define ss OP_REG, ss_reg
++#define cs OP_REG, cs_reg
++#define ds OP_REG, ds_reg
++#define fs OP_REG, fs_reg
++#define gs OP_REG, gs_reg
++
++#define MX OP_MMX, 0
++#define XM OP_XMM, 0
++#define EM OP_EM, v_mode
++#define EX OP_EX, v_mode
++#define MS OP_MS, v_mode
++#define XS OP_XS, v_mode
++#define VM OP_VMX, q_mode
++#define OPSUF OP_3DNowSuffix, 0
++#define OPSIMD OP_SIMD_Suffix, 0
++
++#define cond_jump_flag NULL, cond_jump_mode
++#define loop_jcxz_flag NULL, loop_jcxz_mode
++
++/* bits in sizeflag */
++#define SUFFIX_ALWAYS 4
++#define AFLAG 2
++#define DFLAG 1
++
++#define b_mode 1 /* byte operand */
++#define v_mode 2 /* operand size depends on prefixes */
++#define w_mode 3 /* word operand */
++#define d_mode 4 /* double word operand */
++#define q_mode 5 /* quad word operand */
++#define t_mode 6 /* ten-byte operand */
++#define x_mode 7 /* 16-byte XMM operand */
++#define m_mode 8 /* d_mode in 32bit, q_mode in 64bit mode. */
++#define cond_jump_mode 9
++#define loop_jcxz_mode 10
++#define dq_mode 11 /* operand size depends on REX prefixes. */
++#define dqw_mode 12 /* registers like dq_mode, memory like w_mode. */
++#define f_mode 13 /* 4- or 6-byte pointer operand */
++#define const_1_mode 14
++#define branch_v_mode 15 /* v_mode for branch. */
++
++#define es_reg 100
++#define cs_reg 101
++#define ss_reg 102
++#define ds_reg 103
++#define fs_reg 104
++#define gs_reg 105
++
++#define eAX_reg 108
++#define eCX_reg 109
++#define eDX_reg 110
++#define eBX_reg 111
++#define eSP_reg 112
++#define eBP_reg 113
++#define eSI_reg 114
++#define eDI_reg 115
++
++#define al_reg 116
++#define cl_reg 117
++#define dl_reg 118
++#define bl_reg 119
++#define ah_reg 120
++#define ch_reg 121
++#define dh_reg 122
++#define bh_reg 123
++
++#define ax_reg 124
++#define cx_reg 125
++#define dx_reg 126
++#define bx_reg 127
++#define sp_reg 128
++#define bp_reg 129
++#define si_reg 130
++#define di_reg 131
++
++#define rAX_reg 132
++#define rCX_reg 133
++#define rDX_reg 134
++#define rBX_reg 135
++#define rSP_reg 136
++#define rBP_reg 137
++#define rSI_reg 138
++#define rDI_reg 139
++
++#define indir_dx_reg 150
++
++#define FLOATCODE 1
++#define USE_GROUPS 2
++#define USE_PREFIX_USER_TABLE 3
++#define X86_64_SPECIAL 4
++
++#define FLOAT NULL, NULL, FLOATCODE, NULL, 0, NULL, 0
++
++#define GRP1b NULL, NULL, USE_GROUPS, NULL, 0, NULL, 0
++#define GRP1S NULL, NULL, USE_GROUPS, NULL, 1, NULL, 0
++#define GRP1Ss NULL, NULL, USE_GROUPS, NULL, 2, NULL, 0
++#define GRP2b NULL, NULL, USE_GROUPS, NULL, 3, NULL, 0
++#define GRP2S NULL, NULL, USE_GROUPS, NULL, 4, NULL, 0
++#define GRP2b_one NULL, NULL, USE_GROUPS, NULL, 5, NULL, 0
++#define GRP2S_one NULL, NULL, USE_GROUPS, NULL, 6, NULL, 0
++#define GRP2b_cl NULL, NULL, USE_GROUPS, NULL, 7, NULL, 0
++#define GRP2S_cl NULL, NULL, USE_GROUPS, NULL, 8, NULL, 0
++#define GRP3b NULL, NULL, USE_GROUPS, NULL, 9, NULL, 0
++#define GRP3S NULL, NULL, USE_GROUPS, NULL, 10, NULL, 0
++#define GRP4 NULL, NULL, USE_GROUPS, NULL, 11, NULL, 0
++#define GRP5 NULL, NULL, USE_GROUPS, NULL, 12, NULL, 0
++#define GRP6 NULL, NULL, USE_GROUPS, NULL, 13, NULL, 0
++#define GRP7 NULL, NULL, USE_GROUPS, NULL, 14, NULL, 0
++#define GRP8 NULL, NULL, USE_GROUPS, NULL, 15, NULL, 0
++#define GRP9 NULL, NULL, USE_GROUPS, NULL, 16, NULL, 0
++#define GRP10 NULL, NULL, USE_GROUPS, NULL, 17, NULL, 0
++#define GRP11 NULL, NULL, USE_GROUPS, NULL, 18, NULL, 0
++#define GRP12 NULL, NULL, USE_GROUPS, NULL, 19, NULL, 0
++#define GRP13 NULL, NULL, USE_GROUPS, NULL, 20, NULL, 0
++#define GRP14 NULL, NULL, USE_GROUPS, NULL, 21, NULL, 0
++#define GRPAMD NULL, NULL, USE_GROUPS, NULL, 22, NULL, 0
++#define GRPPADLCK1 NULL, NULL, USE_GROUPS, NULL, 23, NULL, 0
++#define GRPPADLCK2 NULL, NULL, USE_GROUPS, NULL, 24, NULL, 0
++
++#define PREGRP0 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 0, NULL, 0
++#define PREGRP1 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 1, NULL, 0
++#define PREGRP2 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 2, NULL, 0
++#define PREGRP3 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 3, NULL, 0
++#define PREGRP4 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 4, NULL, 0
++#define PREGRP5 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 5, NULL, 0
++#define PREGRP6 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 6, NULL, 0
++#define PREGRP7 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 7, NULL, 0
++#define PREGRP8 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 8, NULL, 0
++#define PREGRP9 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 9, NULL, 0
++#define PREGRP10 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 10, NULL, 0
++#define PREGRP11 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 11, NULL, 0
++#define PREGRP12 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 12, NULL, 0
++#define PREGRP13 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 13, NULL, 0
++#define PREGRP14 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 14, NULL, 0
++#define PREGRP15 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 15, NULL, 0
++#define PREGRP16 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 16, NULL, 0
++#define PREGRP17 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 17, NULL, 0
++#define PREGRP18 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 18, NULL, 0
++#define PREGRP19 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 19, NULL, 0
++#define PREGRP20 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 20, NULL, 0
++#define PREGRP21 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 21, NULL, 0
++#define PREGRP22 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 22, NULL, 0
++#define PREGRP23 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 23, NULL, 0
++#define PREGRP24 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 24, NULL, 0
++#define PREGRP25 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 25, NULL, 0
++#define PREGRP26 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 26, NULL, 0
++#define PREGRP27 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 27, NULL, 0
++#define PREGRP28 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 28, NULL, 0
++#define PREGRP29 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 29, NULL, 0
++#define PREGRP30 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 30, NULL, 0
++#define PREGRP31 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 31, NULL, 0
++#define PREGRP32 NULL, NULL, USE_PREFIX_USER_TABLE, NULL, 32, NULL, 0
++
++#define X86_64_0 NULL, NULL, X86_64_SPECIAL, NULL, 0, NULL, 0
++
++typedef void (*op_rtn) (int bytemode, int sizeflag);
++
++struct dis386 {
++ const char *name;
++ op_rtn op1;
++ int bytemode1;
++ op_rtn op2;
++ int bytemode2;
++ op_rtn op3;
++ int bytemode3;
++};
++
++/* Upper case letters in the instruction names here are macros.
++ 'A' => print 'b' if no register operands or suffix_always is true
++ 'B' => print 'b' if suffix_always is true
++ 'C' => print 's' or 'l' ('w' or 'd' in Intel mode) depending on operand
++ . size prefix
++ 'E' => print 'e' if 32-bit form of jcxz
++ 'F' => print 'w' or 'l' depending on address size prefix (loop insns)
++ 'H' => print ",pt" or ",pn" branch hint
++ 'I' => honor following macro letter even in Intel mode (implemented only
++ . for some of the macro letters)
++ 'J' => print 'l'
++ 'L' => print 'l' if suffix_always is true
++ 'N' => print 'n' if instruction has no wait "prefix"
++ 'O' => print 'd', or 'o'
++ 'P' => print 'w', 'l' or 'q' if instruction has an operand size prefix,
++ . or suffix_always is true. print 'q' if rex prefix is present.
++ 'Q' => print 'w', 'l' or 'q' if no register operands or suffix_always
++ . is true
++ 'R' => print 'w', 'l' or 'q' ("wd" or "dq" in intel mode)
++ 'S' => print 'w', 'l' or 'q' if suffix_always is true
++ 'T' => print 'q' in 64bit mode and behave as 'P' otherwise
++ 'U' => print 'q' in 64bit mode and behave as 'Q' otherwise
++ 'W' => print 'b' or 'w' ("w" or "de" in intel mode)
++ 'X' => print 's', 'd' depending on data16 prefix (for XMM)
++ 'Y' => 'q' if instruction has an REX 64bit overwrite prefix
++
++ Many of the above letters print nothing in Intel mode. See "putop"
++ for the details.
++
++ Braces '{' and '}', and vertical bars '|', indicate alternative
++ mnemonic strings for AT&T, Intel, X86_64 AT&T, and X86_64 Intel
++ modes. In cases where there are only two alternatives, the X86_64
++ instruction is reserved, and "(bad)" is printed.
++*/
++
++static const struct dis386 dis386[] = {
++ /* 00 */
++ { "addB", Eb, Gb, XX },
++ { "addS", Ev, Gv, XX },
++ { "addB", Gb, Eb, XX },
++ { "addS", Gv, Ev, XX },
++ { "addB", AL, Ib, XX },
++ { "addS", eAX, Iv, XX },
++ { "push{T|}", es, XX, XX },
++ { "pop{T|}", es, XX, XX },
++ /* 08 */
++ { "orB", Eb, Gb, XX },
++ { "orS", Ev, Gv, XX },
++ { "orB", Gb, Eb, XX },
++ { "orS", Gv, Ev, XX },
++ { "orB", AL, Ib, XX },
++ { "orS", eAX, Iv, XX },
++ { "push{T|}", cs, XX, XX },
++ { "(bad)", XX, XX, XX }, /* 0x0f extended opcode escape */
++ /* 10 */
++ { "adcB", Eb, Gb, XX },
++ { "adcS", Ev, Gv, XX },
++ { "adcB", Gb, Eb, XX },
++ { "adcS", Gv, Ev, XX },
++ { "adcB", AL, Ib, XX },
++ { "adcS", eAX, Iv, XX },
++ { "push{T|}", ss, XX, XX },
++ { "popT|}", ss, XX, XX },
++ /* 18 */
++ { "sbbB", Eb, Gb, XX },
++ { "sbbS", Ev, Gv, XX },
++ { "sbbB", Gb, Eb, XX },
++ { "sbbS", Gv, Ev, XX },
++ { "sbbB", AL, Ib, XX },
++ { "sbbS", eAX, Iv, XX },
++ { "push{T|}", ds, XX, XX },
++ { "pop{T|}", ds, XX, XX },
++ /* 20 */
++ { "andB", Eb, Gb, XX },
++ { "andS", Ev, Gv, XX },
++ { "andB", Gb, Eb, XX },
++ { "andS", Gv, Ev, XX },
++ { "andB", AL, Ib, XX },
++ { "andS", eAX, Iv, XX },
++ { "(bad)", XX, XX, XX }, /* SEG ES prefix */
++ { "daa{|}", XX, XX, XX },
++ /* 28 */
++ { "subB", Eb, Gb, XX },
++ { "subS", Ev, Gv, XX },
++ { "subB", Gb, Eb, XX },
++ { "subS", Gv, Ev, XX },
++ { "subB", AL, Ib, XX },
++ { "subS", eAX, Iv, XX },
++ { "(bad)", XX, XX, XX }, /* SEG CS prefix */
++ { "das{|}", XX, XX, XX },
++ /* 30 */
++ { "xorB", Eb, Gb, XX },
++ { "xorS", Ev, Gv, XX },
++ { "xorB", Gb, Eb, XX },
++ { "xorS", Gv, Ev, XX },
++ { "xorB", AL, Ib, XX },
++ { "xorS", eAX, Iv, XX },
++ { "(bad)", XX, XX, XX }, /* SEG SS prefix */
++ { "aaa{|}", XX, XX, XX },
++ /* 38 */
++ { "cmpB", Eb, Gb, XX },
++ { "cmpS", Ev, Gv, XX },
++ { "cmpB", Gb, Eb, XX },
++ { "cmpS", Gv, Ev, XX },
++ { "cmpB", AL, Ib, XX },
++ { "cmpS", eAX, Iv, XX },
++ { "(bad)", XX, XX, XX }, /* SEG DS prefix */
++ { "aas{|}", XX, XX, XX },
++ /* 40 */
++ { "inc{S|}", RMeAX, XX, XX },
++ { "inc{S|}", RMeCX, XX, XX },
++ { "inc{S|}", RMeDX, XX, XX },
++ { "inc{S|}", RMeBX, XX, XX },
++ { "inc{S|}", RMeSP, XX, XX },
++ { "inc{S|}", RMeBP, XX, XX },
++ { "inc{S|}", RMeSI, XX, XX },
++ { "inc{S|}", RMeDI, XX, XX },
++ /* 48 */
++ { "dec{S|}", RMeAX, XX, XX },
++ { "dec{S|}", RMeCX, XX, XX },
++ { "dec{S|}", RMeDX, XX, XX },
++ { "dec{S|}", RMeBX, XX, XX },
++ { "dec{S|}", RMeSP, XX, XX },
++ { "dec{S|}", RMeBP, XX, XX },
++ { "dec{S|}", RMeSI, XX, XX },
++ { "dec{S|}", RMeDI, XX, XX },
++ /* 50 */
++ { "pushS", RMrAX, XX, XX },
++ { "pushS", RMrCX, XX, XX },
++ { "pushS", RMrDX, XX, XX },
++ { "pushS", RMrBX, XX, XX },
++ { "pushS", RMrSP, XX, XX },
++ { "pushS", RMrBP, XX, XX },
++ { "pushS", RMrSI, XX, XX },
++ { "pushS", RMrDI, XX, XX },
++ /* 58 */
++ { "popS", RMrAX, XX, XX },
++ { "popS", RMrCX, XX, XX },
++ { "popS", RMrDX, XX, XX },
++ { "popS", RMrBX, XX, XX },
++ { "popS", RMrSP, XX, XX },
++ { "popS", RMrBP, XX, XX },
++ { "popS", RMrSI, XX, XX },
++ { "popS", RMrDI, XX, XX },
++ /* 60 */
++ { "pusha{P|}", XX, XX, XX },
++ { "popa{P|}", XX, XX, XX },
++ { "bound{S|}", Gv, Ma, XX },
++ { X86_64_0 },
++ { "(bad)", XX, XX, XX }, /* seg fs */
++ { "(bad)", XX, XX, XX }, /* seg gs */
++ { "(bad)", XX, XX, XX }, /* op size prefix */
++ { "(bad)", XX, XX, XX }, /* adr size prefix */
++ /* 68 */
++ { "pushT", Iq, XX, XX },
++ { "imulS", Gv, Ev, Iv },
++ { "pushT", sIb, XX, XX },
++ { "imulS", Gv, Ev, sIb },
++ { "ins{b||b|}", Yb, indirDX, XX },
++ { "ins{R||R|}", Yv, indirDX, XX },
++ { "outs{b||b|}", indirDX, Xb, XX },
++ { "outs{R||R|}", indirDX, Xv, XX },
++ /* 70 */
++ { "joH", Jb, XX, cond_jump_flag },
++ { "jnoH", Jb, XX, cond_jump_flag },
++ { "jbH", Jb, XX, cond_jump_flag },
++ { "jaeH", Jb, XX, cond_jump_flag },
++ { "jeH", Jb, XX, cond_jump_flag },
++ { "jneH", Jb, XX, cond_jump_flag },
++ { "jbeH", Jb, XX, cond_jump_flag },
++ { "jaH", Jb, XX, cond_jump_flag },
++ /* 78 */
++ { "jsH", Jb, XX, cond_jump_flag },
++ { "jnsH", Jb, XX, cond_jump_flag },
++ { "jpH", Jb, XX, cond_jump_flag },
++ { "jnpH", Jb, XX, cond_jump_flag },
++ { "jlH", Jb, XX, cond_jump_flag },
++ { "jgeH", Jb, XX, cond_jump_flag },
++ { "jleH", Jb, XX, cond_jump_flag },
++ { "jgH", Jb, XX, cond_jump_flag },
++ /* 80 */
++ { GRP1b },
++ { GRP1S },
++ { "(bad)", XX, XX, XX },
++ { GRP1Ss },
++ { "testB", Eb, Gb, XX },
++ { "testS", Ev, Gv, XX },
++ { "xchgB", Eb, Gb, XX },
++ { "xchgS", Ev, Gv, XX },
++ /* 88 */
++ { "movB", Eb, Gb, XX },
++ { "movS", Ev, Gv, XX },
++ { "movB", Gb, Eb, XX },
++ { "movS", Gv, Ev, XX },
++ { "movQ", Sv, Sw, XX },
++ { "leaS", Gv, M, XX },
++ { "movQ", Sw, Sv, XX },
++ { "popU", Ev, XX, XX },
++ /* 90 */
++ { "nop", NOP_Fixup, 0, XX, XX },
++ { "xchgS", RMeCX, eAX, XX },
++ { "xchgS", RMeDX, eAX, XX },
++ { "xchgS", RMeBX, eAX, XX },
++ { "xchgS", RMeSP, eAX, XX },
++ { "xchgS", RMeBP, eAX, XX },
++ { "xchgS", RMeSI, eAX, XX },
++ { "xchgS", RMeDI, eAX, XX },
++ /* 98 */
++ { "cW{tR||tR|}", XX, XX, XX },
++ { "cR{tO||tO|}", XX, XX, XX },
++ { "Jcall{T|}", Ap, XX, XX },
++ { "(bad)", XX, XX, XX }, /* fwait */
++ { "pushfT", XX, XX, XX },
++ { "popfT", XX, XX, XX },
++ { "sahf{|}", XX, XX, XX },
++ { "lahf{|}", XX, XX, XX },
++ /* a0 */
++ { "movB", AL, Ob64, XX },
++ { "movS", eAX, Ov64, XX },
++ { "movB", Ob64, AL, XX },
++ { "movS", Ov64, eAX, XX },
++ { "movs{b||b|}", Yb, Xb, XX },
++ { "movs{R||R|}", Yv, Xv, XX },
++ { "cmps{b||b|}", Xb, Yb, XX },
++ { "cmps{R||R|}", Xv, Yv, XX },
++ /* a8 */
++ { "testB", AL, Ib, XX },
++ { "testS", eAX, Iv, XX },
++ { "stosB", Yb, AL, XX },
++ { "stosS", Yv, eAX, XX },
++ { "lodsB", AL, Xb, XX },
++ { "lodsS", eAX, Xv, XX },
++ { "scasB", AL, Yb, XX },
++ { "scasS", eAX, Yv, XX },
++ /* b0 */
++ { "movB", RMAL, Ib, XX },
++ { "movB", RMCL, Ib, XX },
++ { "movB", RMDL, Ib, XX },
++ { "movB", RMBL, Ib, XX },
++ { "movB", RMAH, Ib, XX },
++ { "movB", RMCH, Ib, XX },
++ { "movB", RMDH, Ib, XX },
++ { "movB", RMBH, Ib, XX },
++ /* b8 */
++ { "movS", RMeAX, Iv64, XX },
++ { "movS", RMeCX, Iv64, XX },
++ { "movS", RMeDX, Iv64, XX },
++ { "movS", RMeBX, Iv64, XX },
++ { "movS", RMeSP, Iv64, XX },
++ { "movS", RMeBP, Iv64, XX },
++ { "movS", RMeSI, Iv64, XX },
++ { "movS", RMeDI, Iv64, XX },
++ /* c0 */
++ { GRP2b },
++ { GRP2S },
++ { "retT", Iw, XX, XX },
++ { "retT", XX, XX, XX },
++ { "les{S|}", Gv, Mp, XX },
++ { "ldsS", Gv, Mp, XX },
++ { "movA", Eb, Ib, XX },
++ { "movQ", Ev, Iv, XX },
++ /* c8 */
++ { "enterT", Iw, Ib, XX },
++ { "leaveT", XX, XX, XX },
++ { "lretP", Iw, XX, XX },
++ { "lretP", XX, XX, XX },
++ { "int3", XX, XX, XX },
++ { "int", Ib, XX, XX },
++ { "into{|}", XX, XX, XX },
++ { "iretP", XX, XX, XX },
++ /* d0 */
++ { GRP2b_one },
++ { GRP2S_one },
++ { GRP2b_cl },
++ { GRP2S_cl },
++ { "aam{|}", sIb, XX, XX },
++ { "aad{|}", sIb, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "xlat", DSBX, XX, XX },
++ /* d8 */
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ { FLOAT },
++ /* e0 */
++ { "loopneFH", Jb, XX, loop_jcxz_flag },
++ { "loopeFH", Jb, XX, loop_jcxz_flag },
++ { "loopFH", Jb, XX, loop_jcxz_flag },
++ { "jEcxzH", Jb, XX, loop_jcxz_flag },
++ { "inB", AL, Ib, XX },
++ { "inS", eAX, Ib, XX },
++ { "outB", Ib, AL, XX },
++ { "outS", Ib, eAX, XX },
++ /* e8 */
++ { "callT", Jv, XX, XX },
++ { "jmpT", Jv, XX, XX },
++ { "Jjmp{T|}", Ap, XX, XX },
++ { "jmp", Jb, XX, XX },
++ { "inB", AL, indirDX, XX },
++ { "inS", eAX, indirDX, XX },
++ { "outB", indirDX, AL, XX },
++ { "outS", indirDX, eAX, XX },
++ /* f0 */
++ { "(bad)", XX, XX, XX }, /* lock prefix */
++ { "icebp", XX, XX, XX },
++ { "(bad)", XX, XX, XX }, /* repne */
++ { "(bad)", XX, XX, XX }, /* repz */
++ { "hlt", XX, XX, XX },
++ { "cmc", XX, XX, XX },
++ { GRP3b },
++ { GRP3S },
++ /* f8 */
++ { "clc", XX, XX, XX },
++ { "stc", XX, XX, XX },
++ { "cli", XX, XX, XX },
++ { "sti", XX, XX, XX },
++ { "cld", XX, XX, XX },
++ { "std", XX, XX, XX },
++ { GRP4 },
++ { GRP5 },
++};
++
++static const struct dis386 dis386_twobyte[] = {
++ /* 00 */
++ { GRP6 },
++ { GRP7 },
++ { "larS", Gv, Ew, XX },
++ { "lslS", Gv, Ew, XX },
++ { "(bad)", XX, XX, XX },
++ { "syscall", XX, XX, XX },
++ { "clts", XX, XX, XX },
++ { "sysretP", XX, XX, XX },
++ /* 08 */
++ { "invd", XX, XX, XX },
++ { "wbinvd", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "ud2a", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { GRPAMD },
++ { "femms", XX, XX, XX },
++ { "", MX, EM, OPSUF }, /* See OP_3DNowSuffix. */
++ /* 10 */
++ { PREGRP8 },
++ { PREGRP9 },
++ { PREGRP30 },
++ { "movlpX", EX, XM, SIMD_Fixup, 'h' },
++ { "unpcklpX", XM, EX, XX },
++ { "unpckhpX", XM, EX, XX },
++ { PREGRP31 },
++ { "movhpX", EX, XM, SIMD_Fixup, 'l' },
++ /* 18 */
++ { GRP14 },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ /* 20 */
++ { "movL", Rm, Cm, XX },
++ { "movL", Rm, Dm, XX },
++ { "movL", Cm, Rm, XX },
++ { "movL", Dm, Rm, XX },
++ { "movL", Rd, Td, XX },
++ { "(bad)", XX, XX, XX },
++ { "movL", Td, Rd, XX },
++ { "(bad)", XX, XX, XX },
++ /* 28 */
++ { "movapX", XM, EX, XX },
++ { "movapX", EX, XM, XX },
++ { PREGRP2 },
++ { "movntpX", Ev, XM, XX },
++ { PREGRP4 },
++ { PREGRP3 },
++ { "ucomisX", XM,EX, XX },
++ { "comisX", XM,EX, XX },
++ /* 30 */
++ { "wrmsr", XX, XX, XX },
++ { "rdtsc", XX, XX, XX },
++ { "rdmsr", XX, XX, XX },
++ { "rdpmc", XX, XX, XX },
++ { "sysenter", XX, XX, XX },
++ { "sysexit", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ /* 38 */
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ /* 40 */
++ { "cmovo", Gv, Ev, XX },
++ { "cmovno", Gv, Ev, XX },
++ { "cmovb", Gv, Ev, XX },
++ { "cmovae", Gv, Ev, XX },
++ { "cmove", Gv, Ev, XX },
++ { "cmovne", Gv, Ev, XX },
++ { "cmovbe", Gv, Ev, XX },
++ { "cmova", Gv, Ev, XX },
++ /* 48 */
++ { "cmovs", Gv, Ev, XX },
++ { "cmovns", Gv, Ev, XX },
++ { "cmovp", Gv, Ev, XX },
++ { "cmovnp", Gv, Ev, XX },
++ { "cmovl", Gv, Ev, XX },
++ { "cmovge", Gv, Ev, XX },
++ { "cmovle", Gv, Ev, XX },
++ { "cmovg", Gv, Ev, XX },
++ /* 50 */
++ { "movmskpX", Gdq, XS, XX },
++ { PREGRP13 },
++ { PREGRP12 },
++ { PREGRP11 },
++ { "andpX", XM, EX, XX },
++ { "andnpX", XM, EX, XX },
++ { "orpX", XM, EX, XX },
++ { "xorpX", XM, EX, XX },
++ /* 58 */
++ { PREGRP0 },
++ { PREGRP10 },
++ { PREGRP17 },
++ { PREGRP16 },
++ { PREGRP14 },
++ { PREGRP7 },
++ { PREGRP5 },
++ { PREGRP6 },
++ /* 60 */
++ { "punpcklbw", MX, EM, XX },
++ { "punpcklwd", MX, EM, XX },
++ { "punpckldq", MX, EM, XX },
++ { "packsswb", MX, EM, XX },
++ { "pcmpgtb", MX, EM, XX },
++ { "pcmpgtw", MX, EM, XX },
++ { "pcmpgtd", MX, EM, XX },
++ { "packuswb", MX, EM, XX },
++ /* 68 */
++ { "punpckhbw", MX, EM, XX },
++ { "punpckhwd", MX, EM, XX },
++ { "punpckhdq", MX, EM, XX },
++ { "packssdw", MX, EM, XX },
++ { PREGRP26 },
++ { PREGRP24 },
++ { "movd", MX, Edq, XX },
++ { PREGRP19 },
++ /* 70 */
++ { PREGRP22 },
++ { GRP10 },
++ { GRP11 },
++ { GRP12 },
++ { "pcmpeqb", MX, EM, XX },
++ { "pcmpeqw", MX, EM, XX },
++ { "pcmpeqd", MX, EM, XX },
++ { "emms", XX, XX, XX },
++ /* 78 */
++ { "vmread", Em, Gm, XX },
++ { "vmwrite", Gm, Em, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { PREGRP28 },
++ { PREGRP29 },
++ { PREGRP23 },
++ { PREGRP20 },
++ /* 80 */
++ { "joH", Jv, XX, cond_jump_flag },
++ { "jnoH", Jv, XX, cond_jump_flag },
++ { "jbH", Jv, XX, cond_jump_flag },
++ { "jaeH", Jv, XX, cond_jump_flag },
++ { "jeH", Jv, XX, cond_jump_flag },
++ { "jneH", Jv, XX, cond_jump_flag },
++ { "jbeH", Jv, XX, cond_jump_flag },
++ { "jaH", Jv, XX, cond_jump_flag },
++ /* 88 */
++ { "jsH", Jv, XX, cond_jump_flag },
++ { "jnsH", Jv, XX, cond_jump_flag },
++ { "jpH", Jv, XX, cond_jump_flag },
++ { "jnpH", Jv, XX, cond_jump_flag },
++ { "jlH", Jv, XX, cond_jump_flag },
++ { "jgeH", Jv, XX, cond_jump_flag },
++ { "jleH", Jv, XX, cond_jump_flag },
++ { "jgH", Jv, XX, cond_jump_flag },
++ /* 90 */
++ { "seto", Eb, XX, XX },
++ { "setno", Eb, XX, XX },
++ { "setb", Eb, XX, XX },
++ { "setae", Eb, XX, XX },
++ { "sete", Eb, XX, XX },
++ { "setne", Eb, XX, XX },
++ { "setbe", Eb, XX, XX },
++ { "seta", Eb, XX, XX },
++ /* 98 */
++ { "sets", Eb, XX, XX },
++ { "setns", Eb, XX, XX },
++ { "setp", Eb, XX, XX },
++ { "setnp", Eb, XX, XX },
++ { "setl", Eb, XX, XX },
++ { "setge", Eb, XX, XX },
++ { "setle", Eb, XX, XX },
++ { "setg", Eb, XX, XX },
++ /* a0 */
++ { "pushT", fs, XX, XX },
++ { "popT", fs, XX, XX },
++ { "cpuid", XX, XX, XX },
++ { "btS", Ev, Gv, XX },
++ { "shldS", Ev, Gv, Ib },
++ { "shldS", Ev, Gv, CL },
++ { GRPPADLCK2 },
++ { GRPPADLCK1 },
++ /* a8 */
++ { "pushT", gs, XX, XX },
++ { "popT", gs, XX, XX },
++ { "rsm", XX, XX, XX },
++ { "btsS", Ev, Gv, XX },
++ { "shrdS", Ev, Gv, Ib },
++ { "shrdS", Ev, Gv, CL },
++ { GRP13 },
++ { "imulS", Gv, Ev, XX },
++ /* b0 */
++ { "cmpxchgB", Eb, Gb, XX },
++ { "cmpxchgS", Ev, Gv, XX },
++ { "lssS", Gv, Mp, XX },
++ { "btrS", Ev, Gv, XX },
++ { "lfsS", Gv, Mp, XX },
++ { "lgsS", Gv, Mp, XX },
++ { "movz{bR|x|bR|x}", Gv, Eb, XX },
++ { "movz{wR|x|wR|x}", Gv, Ew, XX }, /* yes, there really is movzww ! */
++ /* b8 */
++ { "(bad)", XX, XX, XX },
++ { "ud2b", XX, XX, XX },
++ { GRP8 },
++ { "btcS", Ev, Gv, XX },
++ { "bsfS", Gv, Ev, XX },
++ { "bsrS", Gv, Ev, XX },
++ { "movs{bR|x|bR|x}", Gv, Eb, XX },
++ { "movs{wR|x|wR|x}", Gv, Ew, XX }, /* yes, there really is movsww ! */
++ /* c0 */
++ { "xaddB", Eb, Gb, XX },
++ { "xaddS", Ev, Gv, XX },
++ { PREGRP1 },
++ { "movntiS", Ev, Gv, XX },
++ { "pinsrw", MX, Edqw, Ib },
++ { "pextrw", Gdq, MS, Ib },
++ { "shufpX", XM, EX, Ib },
++ { GRP9 },
++ /* c8 */
++ { "bswap", RMeAX, XX, XX },
++ { "bswap", RMeCX, XX, XX },
++ { "bswap", RMeDX, XX, XX },
++ { "bswap", RMeBX, XX, XX },
++ { "bswap", RMeSP, XX, XX },
++ { "bswap", RMeBP, XX, XX },
++ { "bswap", RMeSI, XX, XX },
++ { "bswap", RMeDI, XX, XX },
++ /* d0 */
++ { PREGRP27 },
++ { "psrlw", MX, EM, XX },
++ { "psrld", MX, EM, XX },
++ { "psrlq", MX, EM, XX },
++ { "paddq", MX, EM, XX },
++ { "pmullw", MX, EM, XX },
++ { PREGRP21 },
++ { "pmovmskb", Gdq, MS, XX },
++ /* d8 */
++ { "psubusb", MX, EM, XX },
++ { "psubusw", MX, EM, XX },
++ { "pminub", MX, EM, XX },
++ { "pand", MX, EM, XX },
++ { "paddusb", MX, EM, XX },
++ { "paddusw", MX, EM, XX },
++ { "pmaxub", MX, EM, XX },
++ { "pandn", MX, EM, XX },
++ /* e0 */
++ { "pavgb", MX, EM, XX },
++ { "psraw", MX, EM, XX },
++ { "psrad", MX, EM, XX },
++ { "pavgw", MX, EM, XX },
++ { "pmulhuw", MX, EM, XX },
++ { "pmulhw", MX, EM, XX },
++ { PREGRP15 },
++ { PREGRP25 },
++ /* e8 */
++ { "psubsb", MX, EM, XX },
++ { "psubsw", MX, EM, XX },
++ { "pminsw", MX, EM, XX },
++ { "por", MX, EM, XX },
++ { "paddsb", MX, EM, XX },
++ { "paddsw", MX, EM, XX },
++ { "pmaxsw", MX, EM, XX },
++ { "pxor", MX, EM, XX },
++ /* f0 */
++ { PREGRP32 },
++ { "psllw", MX, EM, XX },
++ { "pslld", MX, EM, XX },
++ { "psllq", MX, EM, XX },
++ { "pmuludq", MX, EM, XX },
++ { "pmaddwd", MX, EM, XX },
++ { "psadbw", MX, EM, XX },
++ { PREGRP18 },
++ /* f8 */
++ { "psubb", MX, EM, XX },
++ { "psubw", MX, EM, XX },
++ { "psubd", MX, EM, XX },
++ { "psubq", MX, EM, XX },
++ { "paddb", MX, EM, XX },
++ { "paddw", MX, EM, XX },
++ { "paddd", MX, EM, XX },
++ { "(bad)", XX, XX, XX }
++};
++
++static const unsigned char onebyte_has_modrm[256] = {
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++ /* ------------------------------- */
++ /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
++ /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
++ /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
++ /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
++ /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
++ /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
++ /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
++ /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
++ /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
++ /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
++ /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
++ /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
++ /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
++ /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
++ /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
++ /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
++ /* ------------------------------- */
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++};
++
++static const unsigned char twobyte_has_modrm[256] = {
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++ /* ------------------------------- */
++ /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
++ /* 10 */ 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0, /* 1f */
++ /* 20 */ 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1, /* 2f */
++ /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
++ /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
++ /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
++ /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
++ /* 70 */ 1,1,1,1,1,1,1,0,1,1,0,0,1,1,1,1, /* 7f */
++ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
++ /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
++ /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
++ /* b0 */ 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1, /* bf */
++ /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
++ /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
++ /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
++ /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
++ /* ------------------------------- */
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++};
++
++static const unsigned char twobyte_uses_SSE_prefix[256] = {
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++ /* ------------------------------- */
++ /* 00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */
++ /* 10 */ 1,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0, /* 1f */
++ /* 20 */ 0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0, /* 2f */
++ /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */
++ /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 4f */
++ /* 50 */ 0,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* 5f */
++ /* 60 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1, /* 6f */
++ /* 70 */ 1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1, /* 7f */
++ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
++ /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 9f */
++ /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* af */
++ /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* bf */
++ /* c0 */ 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, /* cf */
++ /* d0 */ 1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* df */
++ /* e0 */ 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, /* ef */
++ /* f0 */ 1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0 /* ff */
++ /* ------------------------------- */
++ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
++};
++
++static char obuf[100];
++static char *obufp;
++static char scratchbuf[100];
++static unsigned char *start_codep;
++static unsigned char *insn_codep;
++static unsigned char *codep;
++static disassemble_info *the_info;
++static int mod;
++static int rm;
++static int reg;
++static unsigned char need_modrm;
++
++/* If we are accessing mod/rm/reg without need_modrm set, then the
++ values are stale. Hitting this abort likely indicates that you
++ need to update onebyte_has_modrm or twobyte_has_modrm. */
++#define MODRM_CHECK if (!need_modrm) abort ()
++
++static const char **names64;
++static const char **names32;
++static const char **names16;
++static const char **names8;
++static const char **names8rex;
++static const char **names_seg;
++static const char **index16;
++
++static const char *intel_names64[] = {
++ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
++ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
++};
++static const char *intel_names32[] = {
++ "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
++ "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"
++};
++static const char *intel_names16[] = {
++ "ax", "cx", "dx", "bx", "sp", "bp", "si", "di",
++ "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
++};
++static const char *intel_names8[] = {
++ "al", "cl", "dl", "bl", "ah", "ch", "dh", "bh",
++};
++static const char *intel_names8rex[] = {
++ "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
++ "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", "r14b", "r15b"
++};
++static const char *intel_names_seg[] = {
++ "es", "cs", "ss", "ds", "fs", "gs", "?", "?",
++};
++static const char *intel_index16[] = {
++ "bx+si", "bx+di", "bp+si", "bp+di", "si", "di", "bp", "bx"
++};
++
++static const char *att_names64[] = {
++ "%rax", "%rcx", "%rdx", "%rbx", "%rsp", "%rbp", "%rsi", "%rdi",
++ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15"
++};
++static const char *att_names32[] = {
++ "%eax", "%ecx", "%edx", "%ebx", "%esp", "%ebp", "%esi", "%edi",
++ "%r8d", "%r9d", "%r10d", "%r11d", "%r12d", "%r13d", "%r14d", "%r15d"
++};
++static const char *att_names16[] = {
++ "%ax", "%cx", "%dx", "%bx", "%sp", "%bp", "%si", "%di",
++ "%r8w", "%r9w", "%r10w", "%r11w", "%r12w", "%r13w", "%r14w", "%r15w"
++};
++static const char *att_names8[] = {
++ "%al", "%cl", "%dl", "%bl", "%ah", "%ch", "%dh", "%bh",
++};
++static const char *att_names8rex[] = {
++ "%al", "%cl", "%dl", "%bl", "%spl", "%bpl", "%sil", "%dil",
++ "%r8b", "%r9b", "%r10b", "%r11b", "%r12b", "%r13b", "%r14b", "%r15b"
++};
++static const char *att_names_seg[] = {
++ "%es", "%cs", "%ss", "%ds", "%fs", "%gs", "%?", "%?",
++};
++static const char *att_index16[] = {
++ "%bx,%si", "%bx,%di", "%bp,%si", "%bp,%di", "%si", "%di", "%bp", "%bx"
++};
++
++static const struct dis386 grps[][8] = {
++ /* GRP1b */
++ {
++ { "addA", Eb, Ib, XX },
++ { "orA", Eb, Ib, XX },
++ { "adcA", Eb, Ib, XX },
++ { "sbbA", Eb, Ib, XX },
++ { "andA", Eb, Ib, XX },
++ { "subA", Eb, Ib, XX },
++ { "xorA", Eb, Ib, XX },
++ { "cmpA", Eb, Ib, XX }
++ },
++ /* GRP1S */
++ {
++ { "addQ", Ev, Iv, XX },
++ { "orQ", Ev, Iv, XX },
++ { "adcQ", Ev, Iv, XX },
++ { "sbbQ", Ev, Iv, XX },
++ { "andQ", Ev, Iv, XX },
++ { "subQ", Ev, Iv, XX },
++ { "xorQ", Ev, Iv, XX },
++ { "cmpQ", Ev, Iv, XX }
++ },
++ /* GRP1Ss */
++ {
++ { "addQ", Ev, sIb, XX },
++ { "orQ", Ev, sIb, XX },
++ { "adcQ", Ev, sIb, XX },
++ { "sbbQ", Ev, sIb, XX },
++ { "andQ", Ev, sIb, XX },
++ { "subQ", Ev, sIb, XX },
++ { "xorQ", Ev, sIb, XX },
++ { "cmpQ", Ev, sIb, XX }
++ },
++ /* GRP2b */
++ {
++ { "rolA", Eb, Ib, XX },
++ { "rorA", Eb, Ib, XX },
++ { "rclA", Eb, Ib, XX },
++ { "rcrA", Eb, Ib, XX },
++ { "shlA", Eb, Ib, XX },
++ { "shrA", Eb, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "sarA", Eb, Ib, XX },
++ },
++ /* GRP2S */
++ {
++ { "rolQ", Ev, Ib, XX },
++ { "rorQ", Ev, Ib, XX },
++ { "rclQ", Ev, Ib, XX },
++ { "rcrQ", Ev, Ib, XX },
++ { "shlQ", Ev, Ib, XX },
++ { "shrQ", Ev, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "sarQ", Ev, Ib, XX },
++ },
++ /* GRP2b_one */
++ {
++ { "rolA", Eb, I1, XX },
++ { "rorA", Eb, I1, XX },
++ { "rclA", Eb, I1, XX },
++ { "rcrA", Eb, I1, XX },
++ { "shlA", Eb, I1, XX },
++ { "shrA", Eb, I1, XX },
++ { "(bad)", XX, XX, XX },
++ { "sarA", Eb, I1, XX },
++ },
++ /* GRP2S_one */
++ {
++ { "rolQ", Ev, I1, XX },
++ { "rorQ", Ev, I1, XX },
++ { "rclQ", Ev, I1, XX },
++ { "rcrQ", Ev, I1, XX },
++ { "shlQ", Ev, I1, XX },
++ { "shrQ", Ev, I1, XX },
++ { "(bad)", XX, XX, XX},
++ { "sarQ", Ev, I1, XX },
++ },
++ /* GRP2b_cl */
++ {
++ { "rolA", Eb, CL, XX },
++ { "rorA", Eb, CL, XX },
++ { "rclA", Eb, CL, XX },
++ { "rcrA", Eb, CL, XX },
++ { "shlA", Eb, CL, XX },
++ { "shrA", Eb, CL, XX },
++ { "(bad)", XX, XX, XX },
++ { "sarA", Eb, CL, XX },
++ },
++ /* GRP2S_cl */
++ {
++ { "rolQ", Ev, CL, XX },
++ { "rorQ", Ev, CL, XX },
++ { "rclQ", Ev, CL, XX },
++ { "rcrQ", Ev, CL, XX },
++ { "shlQ", Ev, CL, XX },
++ { "shrQ", Ev, CL, XX },
++ { "(bad)", XX, XX, XX },
++ { "sarQ", Ev, CL, XX }
++ },
++ /* GRP3b */
++ {
++ { "testA", Eb, Ib, XX },
++ { "(bad)", Eb, XX, XX },
++ { "notA", Eb, XX, XX },
++ { "negA", Eb, XX, XX },
++ { "mulA", Eb, XX, XX }, /* Don't print the implicit %al register, */
++ { "imulA", Eb, XX, XX }, /* to distinguish these opcodes from other */
++ { "divA", Eb, XX, XX }, /* mul/imul opcodes. Do the same for div */
++ { "idivA", Eb, XX, XX } /* and idiv for consistency. */
++ },
++ /* GRP3S */
++ {
++ { "testQ", Ev, Iv, XX },
++ { "(bad)", XX, XX, XX },
++ { "notQ", Ev, XX, XX },
++ { "negQ", Ev, XX, XX },
++ { "mulQ", Ev, XX, XX }, /* Don't print the implicit register. */
++ { "imulQ", Ev, XX, XX },
++ { "divQ", Ev, XX, XX },
++ { "idivQ", Ev, XX, XX },
++ },
++ /* GRP4 */
++ {
++ { "incA", Eb, XX, XX },
++ { "decA", Eb, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRP5 */
++ {
++ { "incQ", Ev, XX, XX },
++ { "decQ", Ev, XX, XX },
++ { "callT", indirEv, XX, XX },
++ { "JcallT", indirEp, XX, XX },
++ { "jmpT", indirEv, XX, XX },
++ { "JjmpT", indirEp, XX, XX },
++ { "pushU", Ev, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRP6 */
++ {
++ { "sldtQ", Ev, XX, XX },
++ { "strQ", Ev, XX, XX },
++ { "lldt", Ew, XX, XX },
++ { "ltr", Ew, XX, XX },
++ { "verr", Ew, XX, XX },
++ { "verw", Ew, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX }
++ },
++ /* GRP7 */
++ {
++ { "sgdtIQ", VMX_Fixup, 0, XX, XX },
++ { "sidtIQ", PNI_Fixup, 0, XX, XX },
++ { "lgdt{Q|Q||}", M, XX, XX },
++ { "lidt{Q|Q||}", SVME_Fixup, 0, XX, XX },
++ { "smswQ", Ev, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "lmsw", Ew, XX, XX },
++ { "invlpg", INVLPG_Fixup, w_mode, XX, XX },
++ },
++ /* GRP8 */
++ {
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "btQ", Ev, Ib, XX },
++ { "btsQ", Ev, Ib, XX },
++ { "btrQ", Ev, Ib, XX },
++ { "btcQ", Ev, Ib, XX },
++ },
++ /* GRP9 */
++ {
++ { "(bad)", XX, XX, XX },
++ { "cmpxchg8b", Eq, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "", VM, XX, XX }, /* See OP_VMX. */
++ { "vmptrst", Eq, XX, XX },
++ },
++ /* GRP10 */
++ {
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "psrlw", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "psraw", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "psllw", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRP11 */
++ {
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "psrld", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "psrad", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "pslld", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRP12 */
++ {
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "psrlq", MS, Ib, XX },
++ { "psrldq", MS, Ib, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "psllq", MS, Ib, XX },
++ { "pslldq", MS, Ib, XX },
++ },
++ /* GRP13 */
++ {
++ { "fxsave", Ev, XX, XX },
++ { "fxrstor", Ev, XX, XX },
++ { "ldmxcsr", Ev, XX, XX },
++ { "stmxcsr", Ev, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "lfence", OP_0fae, 0, XX, XX },
++ { "mfence", OP_0fae, 0, XX, XX },
++ { "clflush", OP_0fae, 0, XX, XX },
++ },
++ /* GRP14 */
++ {
++ { "prefetchnta", Ev, XX, XX },
++ { "prefetcht0", Ev, XX, XX },
++ { "prefetcht1", Ev, XX, XX },
++ { "prefetcht2", Ev, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRPAMD */
++ {
++ { "prefetch", Eb, XX, XX },
++ { "prefetchw", Eb, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* GRPPADLCK1 */
++ {
++ { "xstore-rng", OP_0f07, 0, XX, XX },
++ { "xcrypt-ecb", OP_0f07, 0, XX, XX },
++ { "xcrypt-cbc", OP_0f07, 0, XX, XX },
++ { "xcrypt-ctr", OP_0f07, 0, XX, XX },
++ { "xcrypt-cfb", OP_0f07, 0, XX, XX },
++ { "xcrypt-ofb", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ },
++ /* GRPPADLCK2 */
++ {
++ { "montmul", OP_0f07, 0, XX, XX },
++ { "xsha1", OP_0f07, 0, XX, XX },
++ { "xsha256", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ { "(bad)", OP_0f07, 0, XX, XX },
++ }
++};
++
++static const struct dis386 prefix_user_table[][4] = {
++ /* PREGRP0 */
++ {
++ { "addps", XM, EX, XX },
++ { "addss", XM, EX, XX },
++ { "addpd", XM, EX, XX },
++ { "addsd", XM, EX, XX },
++ },
++ /* PREGRP1 */
++ {
++ { "", XM, EX, OPSIMD }, /* See OP_SIMD_SUFFIX. */
++ { "", XM, EX, OPSIMD },
++ { "", XM, EX, OPSIMD },
++ { "", XM, EX, OPSIMD },
++ },
++ /* PREGRP2 */
++ {
++ { "cvtpi2ps", XM, EM, XX },
++ { "cvtsi2ssY", XM, Ev, XX },
++ { "cvtpi2pd", XM, EM, XX },
++ { "cvtsi2sdY", XM, Ev, XX },
++ },
++ /* PREGRP3 */
++ {
++ { "cvtps2pi", MX, EX, XX },
++ { "cvtss2siY", Gv, EX, XX },
++ { "cvtpd2pi", MX, EX, XX },
++ { "cvtsd2siY", Gv, EX, XX },
++ },
++ /* PREGRP4 */
++ {
++ { "cvttps2pi", MX, EX, XX },
++ { "cvttss2siY", Gv, EX, XX },
++ { "cvttpd2pi", MX, EX, XX },
++ { "cvttsd2siY", Gv, EX, XX },
++ },
++ /* PREGRP5 */
++ {
++ { "divps", XM, EX, XX },
++ { "divss", XM, EX, XX },
++ { "divpd", XM, EX, XX },
++ { "divsd", XM, EX, XX },
++ },
++ /* PREGRP6 */
++ {
++ { "maxps", XM, EX, XX },
++ { "maxss", XM, EX, XX },
++ { "maxpd", XM, EX, XX },
++ { "maxsd", XM, EX, XX },
++ },
++ /* PREGRP7 */
++ {
++ { "minps", XM, EX, XX },
++ { "minss", XM, EX, XX },
++ { "minpd", XM, EX, XX },
++ { "minsd", XM, EX, XX },
++ },
++ /* PREGRP8 */
++ {
++ { "movups", XM, EX, XX },
++ { "movss", XM, EX, XX },
++ { "movupd", XM, EX, XX },
++ { "movsd", XM, EX, XX },
++ },
++ /* PREGRP9 */
++ {
++ { "movups", EX, XM, XX },
++ { "movss", EX, XM, XX },
++ { "movupd", EX, XM, XX },
++ { "movsd", EX, XM, XX },
++ },
++ /* PREGRP10 */
++ {
++ { "mulps", XM, EX, XX },
++ { "mulss", XM, EX, XX },
++ { "mulpd", XM, EX, XX },
++ { "mulsd", XM, EX, XX },
++ },
++ /* PREGRP11 */
++ {
++ { "rcpps", XM, EX, XX },
++ { "rcpss", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP12 */
++ {
++ { "rsqrtps", XM, EX, XX },
++ { "rsqrtss", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP13 */
++ {
++ { "sqrtps", XM, EX, XX },
++ { "sqrtss", XM, EX, XX },
++ { "sqrtpd", XM, EX, XX },
++ { "sqrtsd", XM, EX, XX },
++ },
++ /* PREGRP14 */
++ {
++ { "subps", XM, EX, XX },
++ { "subss", XM, EX, XX },
++ { "subpd", XM, EX, XX },
++ { "subsd", XM, EX, XX },
++ },
++ /* PREGRP15 */
++ {
++ { "(bad)", XM, EX, XX },
++ { "cvtdq2pd", XM, EX, XX },
++ { "cvttpd2dq", XM, EX, XX },
++ { "cvtpd2dq", XM, EX, XX },
++ },
++ /* PREGRP16 */
++ {
++ { "cvtdq2ps", XM, EX, XX },
++ { "cvttps2dq",XM, EX, XX },
++ { "cvtps2dq",XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP17 */
++ {
++ { "cvtps2pd", XM, EX, XX },
++ { "cvtss2sd", XM, EX, XX },
++ { "cvtpd2ps", XM, EX, XX },
++ { "cvtsd2ss", XM, EX, XX },
++ },
++ /* PREGRP18 */
++ {
++ { "maskmovq", MX, MS, XX },
++ { "(bad)", XM, EX, XX },
++ { "maskmovdqu", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP19 */
++ {
++ { "movq", MX, EM, XX },
++ { "movdqu", XM, EX, XX },
++ { "movdqa", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP20 */
++ {
++ { "movq", EM, MX, XX },
++ { "movdqu", EX, XM, XX },
++ { "movdqa", EX, XM, XX },
++ { "(bad)", EX, XM, XX },
++ },
++ /* PREGRP21 */
++ {
++ { "(bad)", EX, XM, XX },
++ { "movq2dq", XM, MS, XX },
++ { "movq", EX, XM, XX },
++ { "movdq2q", MX, XS, XX },
++ },
++ /* PREGRP22 */
++ {
++ { "pshufw", MX, EM, Ib },
++ { "pshufhw", XM, EX, Ib },
++ { "pshufd", XM, EX, Ib },
++ { "pshuflw", XM, EX, Ib },
++ },
++ /* PREGRP23 */
++ {
++ { "movd", Edq, MX, XX },
++ { "movq", XM, EX, XX },
++ { "movd", Edq, XM, XX },
++ { "(bad)", Ed, XM, XX },
++ },
++ /* PREGRP24 */
++ {
++ { "(bad)", MX, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "punpckhqdq", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP25 */
++ {
++ { "movntq", EM, MX, XX },
++ { "(bad)", EM, XM, XX },
++ { "movntdq", EM, XM, XX },
++ { "(bad)", EM, XM, XX },
++ },
++ /* PREGRP26 */
++ {
++ { "(bad)", MX, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "punpcklqdq", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP27 */
++ {
++ { "(bad)", MX, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "addsubpd", XM, EX, XX },
++ { "addsubps", XM, EX, XX },
++ },
++ /* PREGRP28 */
++ {
++ { "(bad)", MX, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "haddpd", XM, EX, XX },
++ { "haddps", XM, EX, XX },
++ },
++ /* PREGRP29 */
++ {
++ { "(bad)", MX, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "hsubpd", XM, EX, XX },
++ { "hsubps", XM, EX, XX },
++ },
++ /* PREGRP30 */
++ {
++ { "movlpX", XM, EX, SIMD_Fixup, 'h' }, /* really only 2 operands */
++ { "movsldup", XM, EX, XX },
++ { "movlpd", XM, EX, XX },
++ { "movddup", XM, EX, XX },
++ },
++ /* PREGRP31 */
++ {
++ { "movhpX", XM, EX, SIMD_Fixup, 'l' },
++ { "movshdup", XM, EX, XX },
++ { "movhpd", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ },
++ /* PREGRP32 */
++ {
++ { "(bad)", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "(bad)", XM, EX, XX },
++ { "lddqu", XM, M, XX },
++ },
++};
++
++static const struct dis386 x86_64_table[][2] = {
++ {
++ { "arpl", Ew, Gw, XX },
++ { "movs{||lq|xd}", Gv, Ed, XX },
++ },
++};
++
++#ifdef __KERNEL__
++#define INTERNAL_DISASSEMBLER_ERROR "<internal disassembler error>"
++#else /* __KERNEL__ */
++#define INTERNAL_DISASSEMBLER_ERROR _("<internal disassembler error>")
++#endif /* __KERNEL__ */
++
++static void
++ckprefix (void)
++{
++ int newrex;
++ rex = 0;
++ prefixes = 0;
++ used_prefixes = 0;
++ rex_used = 0;
++ while (1)
++ {
++ FETCH_DATA (the_info, codep + 1);
++ newrex = 0;
++ switch (*codep)
++ {
++ /* REX prefixes family. */
++ case 0x40:
++ case 0x41:
++ case 0x42:
++ case 0x43:
++ case 0x44:
++ case 0x45:
++ case 0x46:
++ case 0x47:
++ case 0x48:
++ case 0x49:
++ case 0x4a:
++ case 0x4b:
++ case 0x4c:
++ case 0x4d:
++ case 0x4e:
++ case 0x4f:
++ if (mode_64bit)
++ newrex = *codep;
++ else
++ return;
++ break;
++ case 0xf3:
++ prefixes |= PREFIX_REPZ;
++ break;
++ case 0xf2:
++ prefixes |= PREFIX_REPNZ;
++ break;
++ case 0xf0:
++ prefixes |= PREFIX_LOCK;
++ break;
++ case 0x2e:
++ prefixes |= PREFIX_CS;
++ break;
++ case 0x36:
++ prefixes |= PREFIX_SS;
++ break;
++ case 0x3e:
++ prefixes |= PREFIX_DS;
++ break;
++ case 0x26:
++ prefixes |= PREFIX_ES;
++ break;
++ case 0x64:
++ prefixes |= PREFIX_FS;
++ break;
++ case 0x65:
++ prefixes |= PREFIX_GS;
++ break;
++ case 0x66:
++ prefixes |= PREFIX_DATA;
++ break;
++ case 0x67:
++ prefixes |= PREFIX_ADDR;
++ break;
++ case FWAIT_OPCODE:
++ /* fwait is really an instruction. If there are prefixes
++ before the fwait, they belong to the fwait, *not* to the
++ following instruction. */
++ if (prefixes)
++ {
++ prefixes |= PREFIX_FWAIT;
++ codep++;
++ return;
++ }
++ prefixes = PREFIX_FWAIT;
++ break;
++ default:
++ return;
++ }
++ /* Rex is ignored when followed by another prefix. */
++ if (rex)
++ {
++ oappend (prefix_name (rex, 0));
++ oappend (" ");
++ }
++ rex = newrex;
++ codep++;
++ }
++}
++
++/* Return the name of the prefix byte PREF, or NULL if PREF is not a
++ prefix byte. */
++
++static const char *
++prefix_name (int pref, int sizeflag)
++{
++ switch (pref)
++ {
++ /* REX prefixes family. */
++ case 0x40:
++ return "rex";
++ case 0x41:
++ return "rexZ";
++ case 0x42:
++ return "rexY";
++ case 0x43:
++ return "rexYZ";
++ case 0x44:
++ return "rexX";
++ case 0x45:
++ return "rexXZ";
++ case 0x46:
++ return "rexXY";
++ case 0x47:
++ return "rexXYZ";
++ case 0x48:
++ return "rex64";
++ case 0x49:
++ return "rex64Z";
++ case 0x4a:
++ return "rex64Y";
++ case 0x4b:
++ return "rex64YZ";
++ case 0x4c:
++ return "rex64X";
++ case 0x4d:
++ return "rex64XZ";
++ case 0x4e:
++ return "rex64XY";
++ case 0x4f:
++ return "rex64XYZ";
++ case 0xf3:
++ return "repz";
++ case 0xf2:
++ return "repnz";
++ case 0xf0:
++ return "lock";
++ case 0x2e:
++ return "cs";
++ case 0x36:
++ return "ss";
++ case 0x3e:
++ return "ds";
++ case 0x26:
++ return "es";
++ case 0x64:
++ return "fs";
++ case 0x65:
++ return "gs";
++ case 0x66:
++ return (sizeflag & DFLAG) ? "data16" : "data32";
++ case 0x67:
++ if (mode_64bit)
++ return (sizeflag & AFLAG) ? "addr32" : "addr64";
++ else
++ return (sizeflag & AFLAG) ? "addr16" : "addr32";
++ case FWAIT_OPCODE:
++ return "fwait";
++ default:
++ return NULL;
++ }
++}
++
++static char op1out[100], op2out[100], op3out[100];
++static int op_ad, op_index[3];
++static int two_source_ops;
++static bfd_vma op_address[3];
++static bfd_vma op_riprel[3];
++static bfd_vma start_pc;
++
++/*
++ * On the 386's of 1988, the maximum length of an instruction is 15 bytes.
++ * (see topic "Redundant prefixes" in the "Differences from 8086"
++ * section of the "Virtual 8086 Mode" chapter.)
++ * 'pc' should be the address of this instruction, it will
++ * be used to print the target address if this is a relative jump or call
++ * The function returns the length of this instruction in bytes.
++ */
++
++static char intel_syntax;
++static char open_char;
++static char close_char;
++static char separator_char;
++static char scale_char;
++
++/* Here for backwards compatibility. When gdb stops using
++ print_insn_i386_att and print_insn_i386_intel these functions can
++ disappear, and print_insn_i386 be merged into print_insn. */
++int
++print_insn_i386_att (bfd_vma pc, disassemble_info *info)
++{
++ intel_syntax = 0;
++
++ return print_insn (pc, info);
++}
++
++int
++print_insn_i386_intel (bfd_vma pc, disassemble_info *info)
++{
++ intel_syntax = 1;
++
++ return print_insn (pc, info);
++}
++
++int
++print_insn_i386 (bfd_vma pc, disassemble_info *info)
++{
++ intel_syntax = -1;
++
++ return print_insn (pc, info);
++}
++
++static int
++print_insn (bfd_vma pc, disassemble_info *info)
++{
++ const struct dis386 *dp;
++ int i;
++ char *first, *second, *third;
++ int needcomma;
++ unsigned char uses_SSE_prefix, uses_LOCK_prefix;
++ int sizeflag;
++ const char *p;
++ struct dis_private priv;
++
++ mode_64bit = (info->mach == bfd_mach_x86_64_intel_syntax
++ || info->mach == bfd_mach_x86_64);
++
++ if (intel_syntax == (char) -1)
++ intel_syntax = (info->mach == bfd_mach_i386_i386_intel_syntax
++ || info->mach == bfd_mach_x86_64_intel_syntax);
++
++ if (info->mach == bfd_mach_i386_i386
++ || info->mach == bfd_mach_x86_64
++ || info->mach == bfd_mach_i386_i386_intel_syntax
++ || info->mach == bfd_mach_x86_64_intel_syntax)
++ priv.orig_sizeflag = AFLAG | DFLAG;
++ else if (info->mach == bfd_mach_i386_i8086)
++ priv.orig_sizeflag = 0;
++ else
++ abort ();
++
++ for (p = info->disassembler_options; p != NULL; )
++ {
++ if (strncmp (p, "x86-64", 6) == 0)
++ {
++ mode_64bit = 1;
++ priv.orig_sizeflag = AFLAG | DFLAG;
++ }
++ else if (strncmp (p, "i386", 4) == 0)
++ {
++ mode_64bit = 0;
++ priv.orig_sizeflag = AFLAG | DFLAG;
++ }
++ else if (strncmp (p, "i8086", 5) == 0)
++ {
++ mode_64bit = 0;
++ priv.orig_sizeflag = 0;
++ }
++ else if (strncmp (p, "intel", 5) == 0)
++ {
++ intel_syntax = 1;
++ }
++ else if (strncmp (p, "att", 3) == 0)
++ {
++ intel_syntax = 0;
++ }
++ else if (strncmp (p, "addr", 4) == 0)
++ {
++ if (p[4] == '1' && p[5] == '6')
++ priv.orig_sizeflag &= ~AFLAG;
++ else if (p[4] == '3' && p[5] == '2')
++ priv.orig_sizeflag |= AFLAG;
++ }
++ else if (strncmp (p, "data", 4) == 0)
++ {
++ if (p[4] == '1' && p[5] == '6')
++ priv.orig_sizeflag &= ~DFLAG;
++ else if (p[4] == '3' && p[5] == '2')
++ priv.orig_sizeflag |= DFLAG;
++ }
++ else if (strncmp (p, "suffix", 6) == 0)
++ priv.orig_sizeflag |= SUFFIX_ALWAYS;
++
++ p = strchr (p, ',');
++ if (p != NULL)
++ p++;
++ }
++
++ if (intel_syntax)
++ {
++ names64 = intel_names64;
++ names32 = intel_names32;
++ names16 = intel_names16;
++ names8 = intel_names8;
++ names8rex = intel_names8rex;
++ names_seg = intel_names_seg;
++ index16 = intel_index16;
++ open_char = '[';
++ close_char = ']';
++ separator_char = '+';
++ scale_char = '*';
++ }
++ else
++ {
++ names64 = att_names64;
++ names32 = att_names32;
++ names16 = att_names16;
++ names8 = att_names8;
++ names8rex = att_names8rex;
++ names_seg = att_names_seg;
++ index16 = att_index16;
++ open_char = '(';
++ close_char = ')';
++ separator_char = ',';
++ scale_char = ',';
++ }
++
++ /* The output looks better if we put 7 bytes on a line, since that
++ puts most long word instructions on a single line. */
++ info->bytes_per_line = 7;
++
++ info->private_data = &priv;
++ priv.max_fetched = priv.the_buffer;
++ priv.insn_start = pc;
++
++ obuf[0] = 0;
++ op1out[0] = 0;
++ op2out[0] = 0;
++ op3out[0] = 0;
++
++ op_index[0] = op_index[1] = op_index[2] = -1;
++
++ the_info = info;
++ start_pc = pc;
++ start_codep = priv.the_buffer;
++ codep = priv.the_buffer;
++
++#ifndef __KERNEL__
++ if (setjmp (priv.bailout) != 0)
++ {
++ const char *name;
++
++ /* Getting here means we tried for data but didn't get it. That
++ means we have an incomplete instruction of some sort. Just
++ print the first byte as a prefix or a .byte pseudo-op. */
++ if (codep > priv.the_buffer)
++ {
++ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
++ if (name != NULL)
++ (*info->fprintf_func) (info->stream, "%s", name);
++ else
++ {
++ /* Just print the first byte as a .byte instruction. */
++ (*info->fprintf_func) (info->stream, ".byte 0x%x",
++ (unsigned int) priv.the_buffer[0]);
++ }
++
++ return 1;
++ }
++
++ return -1;
++ }
++#endif /* __KERNEL__ */
++
++ obufp = obuf;
++ ckprefix ();
++
++ insn_codep = codep;
++ sizeflag = priv.orig_sizeflag;
++
++ FETCH_DATA (info, codep + 1);
++ two_source_ops = (*codep == 0x62) || (*codep == 0xc8);
++
++ if ((prefixes & PREFIX_FWAIT)
++ && ((*codep < 0xd8) || (*codep > 0xdf)))
++ {
++ const char *name;
++
++ /* fwait not followed by floating point instruction. Print the
++ first prefix, which is probably fwait itself. */
++ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
++ if (name == NULL)
++ name = INTERNAL_DISASSEMBLER_ERROR;
++ (*info->fprintf_func) (info->stream, "%s", name);
++ return 1;
++ }
++
++ if (*codep == 0x0f)
++ {
++ FETCH_DATA (info, codep + 2);
++ dp = &dis386_twobyte[*++codep];
++ need_modrm = twobyte_has_modrm[*codep];
++ uses_SSE_prefix = twobyte_uses_SSE_prefix[*codep];
++ uses_LOCK_prefix = (*codep & ~0x02) == 0x20;
++ }
++ else
++ {
++ dp = &dis386[*codep];
++ need_modrm = onebyte_has_modrm[*codep];
++ uses_SSE_prefix = 0;
++ uses_LOCK_prefix = 0;
++ }
++ codep++;
++
++ if (!uses_SSE_prefix && (prefixes & PREFIX_REPZ))
++ {
++ oappend ("repz ");
++ used_prefixes |= PREFIX_REPZ;
++ }
++ if (!uses_SSE_prefix && (prefixes & PREFIX_REPNZ))
++ {
++ oappend ("repnz ");
++ used_prefixes |= PREFIX_REPNZ;
++ }
++ if (!uses_LOCK_prefix && (prefixes & PREFIX_LOCK))
++ {
++ oappend ("lock ");
++ used_prefixes |= PREFIX_LOCK;
++ }
++
++ if (prefixes & PREFIX_ADDR)
++ {
++ sizeflag ^= AFLAG;
++ if (dp->bytemode3 != loop_jcxz_mode || intel_syntax)
++ {
++ if ((sizeflag & AFLAG) || mode_64bit)
++ oappend ("addr32 ");
++ else
++ oappend ("addr16 ");
++ used_prefixes |= PREFIX_ADDR;
++ }
++ }
++
++ if (!uses_SSE_prefix && (prefixes & PREFIX_DATA))
++ {
++ sizeflag ^= DFLAG;
++ if (dp->bytemode3 == cond_jump_mode
++ && dp->bytemode1 == v_mode
++ && !intel_syntax)
++ {
++ if (sizeflag & DFLAG)
++ oappend ("data32 ");
++ else
++ oappend ("data16 ");
++ used_prefixes |= PREFIX_DATA;
++ }
++ }
++
++ if (need_modrm)
++ {
++ FETCH_DATA (info, codep + 1);
++ mod = (*codep >> 6) & 3;
++ reg = (*codep >> 3) & 7;
++ rm = *codep & 7;
++ }
++
++ if (dp->name == NULL && dp->bytemode1 == FLOATCODE)
++ {
++ dofloat (sizeflag);
++ }
++ else
++ {
++ int index;
++ if (dp->name == NULL)
++ {
++ switch (dp->bytemode1)
++ {
++ case USE_GROUPS:
++ dp = &grps[dp->bytemode2][reg];
++ break;
++
++ case USE_PREFIX_USER_TABLE:
++ index = 0;
++ used_prefixes |= (prefixes & PREFIX_REPZ);
++ if (prefixes & PREFIX_REPZ)
++ index = 1;
++ else
++ {
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (prefixes & PREFIX_DATA)
++ index = 2;
++ else
++ {
++ used_prefixes |= (prefixes & PREFIX_REPNZ);
++ if (prefixes & PREFIX_REPNZ)
++ index = 3;
++ }
++ }
++ dp = &prefix_user_table[dp->bytemode2][index];
++ break;
++
++ case X86_64_SPECIAL:
++ dp = &x86_64_table[dp->bytemode2][mode_64bit];
++ break;
++
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ break;
++ }
++ }
++
++ if (putop (dp->name, sizeflag) == 0)
++ {
++ obufp = op1out;
++ op_ad = 2;
++ if (dp->op1)
++ (*dp->op1) (dp->bytemode1, sizeflag);
++
++ obufp = op2out;
++ op_ad = 1;
++ if (dp->op2)
++ (*dp->op2) (dp->bytemode2, sizeflag);
++
++ obufp = op3out;
++ op_ad = 0;
++ if (dp->op3)
++ (*dp->op3) (dp->bytemode3, sizeflag);
++ }
++ }
++
++ /* See if any prefixes were not used. If so, print the first one
++ separately. If we don't do this, we'll wind up printing an
++ instruction stream which does not precisely correspond to the
++ bytes we are disassembling. */
++ if ((prefixes & ~used_prefixes) != 0)
++ {
++ const char *name;
++
++ name = prefix_name (priv.the_buffer[0], priv.orig_sizeflag);
++ if (name == NULL)
++ name = INTERNAL_DISASSEMBLER_ERROR;
++ (*info->fprintf_func) (info->stream, "%s", name);
++ return 1;
++ }
++ if (rex & ~rex_used)
++ {
++ const char *name;
++ name = prefix_name (rex | 0x40, priv.orig_sizeflag);
++ if (name == NULL)
++ name = INTERNAL_DISASSEMBLER_ERROR;
++ (*info->fprintf_func) (info->stream, "%s ", name);
++ }
++
++ obufp = obuf + strlen (obuf);
++ for (i = strlen (obuf); i < 6; i++)
++ oappend (" ");
++ oappend (" ");
++ (*info->fprintf_func) (info->stream, "%s", obuf);
++
++ /* The enter and bound instructions are printed with operands in the same
++ order as the intel book; everything else is printed in reverse order. */
++ if (intel_syntax || two_source_ops)
++ {
++ first = op1out;
++ second = op2out;
++ third = op3out;
++ op_ad = op_index[0];
++ op_index[0] = op_index[2];
++ op_index[2] = op_ad;
++ }
++ else
++ {
++ first = op3out;
++ second = op2out;
++ third = op1out;
++ }
++ needcomma = 0;
++ if (*first)
++ {
++ if (op_index[0] != -1 && !op_riprel[0])
++ (*info->print_address_func) ((bfd_vma) op_address[op_index[0]], info);
++ else
++ (*info->fprintf_func) (info->stream, "%s", first);
++ needcomma = 1;
++ }
++ if (*second)
++ {
++ if (needcomma)
++ (*info->fprintf_func) (info->stream, ",");
++ if (op_index[1] != -1 && !op_riprel[1])
++ (*info->print_address_func) ((bfd_vma) op_address[op_index[1]], info);
++ else
++ (*info->fprintf_func) (info->stream, "%s", second);
++ needcomma = 1;
++ }
++ if (*third)
++ {
++ if (needcomma)
++ (*info->fprintf_func) (info->stream, ",");
++ if (op_index[2] != -1 && !op_riprel[2])
++ (*info->print_address_func) ((bfd_vma) op_address[op_index[2]], info);
++ else
++ (*info->fprintf_func) (info->stream, "%s", third);
++ }
++ for (i = 0; i < 3; i++)
++ if (op_index[i] != -1 && op_riprel[i])
++ {
++ (*info->fprintf_func) (info->stream, " # ");
++ (*info->print_address_func) ((bfd_vma) (start_pc + codep - start_codep
++ + op_address[op_index[i]]), info);
++ }
++ return codep - priv.the_buffer;
++}
++
++static const char *float_mem[] = {
++ /* d8 */
++ "fadd{s||s|}",
++ "fmul{s||s|}",
++ "fcom{s||s|}",
++ "fcomp{s||s|}",
++ "fsub{s||s|}",
++ "fsubr{s||s|}",
++ "fdiv{s||s|}",
++ "fdivr{s||s|}",
++ /* d9 */
++ "fld{s||s|}",
++ "(bad)",
++ "fst{s||s|}",
++ "fstp{s||s|}",
++ "fldenvIC",
++ "fldcw",
++ "fNstenvIC",
++ "fNstcw",
++ /* da */
++ "fiadd{l||l|}",
++ "fimul{l||l|}",
++ "ficom{l||l|}",
++ "ficomp{l||l|}",
++ "fisub{l||l|}",
++ "fisubr{l||l|}",
++ "fidiv{l||l|}",
++ "fidivr{l||l|}",
++ /* db */
++ "fild{l||l|}",
++ "fisttp{l||l|}",
++ "fist{l||l|}",
++ "fistp{l||l|}",
++ "(bad)",
++ "fld{t||t|}",
++ "(bad)",
++ "fstp{t||t|}",
++ /* dc */
++ "fadd{l||l|}",
++ "fmul{l||l|}",
++ "fcom{l||l|}",
++ "fcomp{l||l|}",
++ "fsub{l||l|}",
++ "fsubr{l||l|}",
++ "fdiv{l||l|}",
++ "fdivr{l||l|}",
++ /* dd */
++ "fld{l||l|}",
++ "fisttp{ll||ll|}",
++ "fst{l||l|}",
++ "fstp{l||l|}",
++ "frstorIC",
++ "(bad)",
++ "fNsaveIC",
++ "fNstsw",
++ /* de */
++ "fiadd",
++ "fimul",
++ "ficom",
++ "ficomp",
++ "fisub",
++ "fisubr",
++ "fidiv",
++ "fidivr",
++ /* df */
++ "fild",
++ "fisttp",
++ "fist",
++ "fistp",
++ "fbld",
++ "fild{ll||ll|}",
++ "fbstp",
++ "fistp{ll||ll|}",
++};
++
++static const unsigned char float_mem_mode[] = {
++ /* d8 */
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ /* d9 */
++ d_mode,
++ 0,
++ d_mode,
++ d_mode,
++ 0,
++ w_mode,
++ 0,
++ w_mode,
++ /* da */
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ /* db */
++ d_mode,
++ d_mode,
++ d_mode,
++ d_mode,
++ 0,
++ t_mode,
++ 0,
++ t_mode,
++ /* dc */
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ /* dd */
++ q_mode,
++ q_mode,
++ q_mode,
++ q_mode,
++ 0,
++ 0,
++ 0,
++ w_mode,
++ /* de */
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ /* df */
++ w_mode,
++ w_mode,
++ w_mode,
++ w_mode,
++ t_mode,
++ q_mode,
++ t_mode,
++ q_mode
++};
++
++#define ST OP_ST, 0
++#define STi OP_STi, 0
++
++#define FGRPd9_2 NULL, NULL, 0, NULL, 0, NULL, 0
++#define FGRPd9_4 NULL, NULL, 1, NULL, 0, NULL, 0
++#define FGRPd9_5 NULL, NULL, 2, NULL, 0, NULL, 0
++#define FGRPd9_6 NULL, NULL, 3, NULL, 0, NULL, 0
++#define FGRPd9_7 NULL, NULL, 4, NULL, 0, NULL, 0
++#define FGRPda_5 NULL, NULL, 5, NULL, 0, NULL, 0
++#define FGRPdb_4 NULL, NULL, 6, NULL, 0, NULL, 0
++#define FGRPde_3 NULL, NULL, 7, NULL, 0, NULL, 0
++#define FGRPdf_4 NULL, NULL, 8, NULL, 0, NULL, 0
++
++static const struct dis386 float_reg[][8] = {
++ /* d8 */
++ {
++ { "fadd", ST, STi, XX },
++ { "fmul", ST, STi, XX },
++ { "fcom", STi, XX, XX },
++ { "fcomp", STi, XX, XX },
++ { "fsub", ST, STi, XX },
++ { "fsubr", ST, STi, XX },
++ { "fdiv", ST, STi, XX },
++ { "fdivr", ST, STi, XX },
++ },
++ /* d9 */
++ {
++ { "fld", STi, XX, XX },
++ { "fxch", STi, XX, XX },
++ { FGRPd9_2 },
++ { "(bad)", XX, XX, XX },
++ { FGRPd9_4 },
++ { FGRPd9_5 },
++ { FGRPd9_6 },
++ { FGRPd9_7 },
++ },
++ /* da */
++ {
++ { "fcmovb", ST, STi, XX },
++ { "fcmove", ST, STi, XX },
++ { "fcmovbe",ST, STi, XX },
++ { "fcmovu", ST, STi, XX },
++ { "(bad)", XX, XX, XX },
++ { FGRPda_5 },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* db */
++ {
++ { "fcmovnb",ST, STi, XX },
++ { "fcmovne",ST, STi, XX },
++ { "fcmovnbe",ST, STi, XX },
++ { "fcmovnu",ST, STi, XX },
++ { FGRPdb_4 },
++ { "fucomi", ST, STi, XX },
++ { "fcomi", ST, STi, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* dc */
++ {
++ { "fadd", STi, ST, XX },
++ { "fmul", STi, ST, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++#if UNIXWARE_COMPAT
++ { "fsub", STi, ST, XX },
++ { "fsubr", STi, ST, XX },
++ { "fdiv", STi, ST, XX },
++ { "fdivr", STi, ST, XX },
++#else
++ { "fsubr", STi, ST, XX },
++ { "fsub", STi, ST, XX },
++ { "fdivr", STi, ST, XX },
++ { "fdiv", STi, ST, XX },
++#endif
++ },
++ /* dd */
++ {
++ { "ffree", STi, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "fst", STi, XX, XX },
++ { "fstp", STi, XX, XX },
++ { "fucom", STi, XX, XX },
++ { "fucomp", STi, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ },
++ /* de */
++ {
++ { "faddp", STi, ST, XX },
++ { "fmulp", STi, ST, XX },
++ { "(bad)", XX, XX, XX },
++ { FGRPde_3 },
++#if UNIXWARE_COMPAT
++ { "fsubp", STi, ST, XX },
++ { "fsubrp", STi, ST, XX },
++ { "fdivp", STi, ST, XX },
++ { "fdivrp", STi, ST, XX },
++#else
++ { "fsubrp", STi, ST, XX },
++ { "fsubp", STi, ST, XX },
++ { "fdivrp", STi, ST, XX },
++ { "fdivp", STi, ST, XX },
++#endif
++ },
++ /* df */
++ {
++ { "ffreep", STi, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { "(bad)", XX, XX, XX },
++ { FGRPdf_4 },
++ { "fucomip",ST, STi, XX },
++ { "fcomip", ST, STi, XX },
++ { "(bad)", XX, XX, XX },
++ },
++};
++
++static char *fgrps[][8] = {
++ /* d9_2 0 */
++ {
++ "fnop","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
++ },
++
++ /* d9_4 1 */
++ {
++ "fchs","fabs","(bad)","(bad)","ftst","fxam","(bad)","(bad)",
++ },
++
++ /* d9_5 2 */
++ {
++ "fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz","(bad)",
++ },
++
++ /* d9_6 3 */
++ {
++ "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp",
++ },
++
++ /* d9_7 4 */
++ {
++ "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos",
++ },
++
++ /* da_5 5 */
++ {
++ "(bad)","fucompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
++ },
++
++ /* db_4 6 */
++ {
++ "feni(287 only)","fdisi(287 only)","fNclex","fNinit",
++ "fNsetpm(287 only)","(bad)","(bad)","(bad)",
++ },
++
++ /* de_3 7 */
++ {
++ "(bad)","fcompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
++ },
++
++ /* df_4 8 */
++ {
++ "fNstsw","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)",
++ },
++};
++
++static void
++dofloat (int sizeflag)
++{
++ const struct dis386 *dp;
++ unsigned char floatop;
++
++ floatop = codep[-1];
++
++ if (mod != 3)
++ {
++ int fp_indx = (floatop - 0xd8) * 8 + reg;
++
++ putop (float_mem[fp_indx], sizeflag);
++ obufp = op1out;
++ OP_E (float_mem_mode[fp_indx], sizeflag);
++ return;
++ }
++ /* Skip mod/rm byte. */
++ MODRM_CHECK;
++ codep++;
++
++ dp = &float_reg[floatop - 0xd8][reg];
++ if (dp->name == NULL)
++ {
++ putop (fgrps[dp->bytemode1][rm], sizeflag);
++
++ /* Instruction fnstsw is only one with strange arg. */
++ if (floatop == 0xdf && codep[-1] == 0xe0)
++ strcpy (op1out, names16[0]);
++ }
++ else
++ {
++ putop (dp->name, sizeflag);
++
++ obufp = op1out;
++ if (dp->op1)
++ (*dp->op1) (dp->bytemode1, sizeflag);
++ obufp = op2out;
++ if (dp->op2)
++ (*dp->op2) (dp->bytemode2, sizeflag);
++ }
++}
++
++static void
++OP_ST (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ oappend ("%st");
++}
++
++static void
++OP_STi (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ sprintf (scratchbuf, "%%st(%d)", rm);
++ oappend (scratchbuf + intel_syntax);
++}
++
++/* Capital letters in template are macros. */
++static int
++putop (const char *template, int sizeflag)
++{
++ const char *p;
++ int alt = 0;
++
++ for (p = template; *p; p++)
++ {
++ switch (*p)
++ {
++ default:
++ *obufp++ = *p;
++ break;
++ case '{':
++ alt = 0;
++ if (intel_syntax)
++ alt += 1;
++ if (mode_64bit)
++ alt += 2;
++ while (alt != 0)
++ {
++ while (*++p != '|')
++ {
++ if (*p == '}')
++ {
++ /* Alternative not valid. */
++ strcpy (obuf, "(bad)");
++ obufp = obuf + 5;
++ return 1;
++ }
++ else if (*p == '\0')
++ abort ();
++ }
++ alt--;
++ }
++ /* Fall through. */
++ case 'I':
++ alt = 1;
++ continue;
++ case '|':
++ while (*++p != '}')
++ {
++ if (*p == '\0')
++ abort ();
++ }
++ break;
++ case '}':
++ break;
++ case 'A':
++ if (intel_syntax)
++ break;
++ if (mod != 3 || (sizeflag & SUFFIX_ALWAYS))
++ *obufp++ = 'b';
++ break;
++ case 'B':
++ if (intel_syntax)
++ break;
++ if (sizeflag & SUFFIX_ALWAYS)
++ *obufp++ = 'b';
++ break;
++ case 'C':
++ if (intel_syntax && !alt)
++ break;
++ if ((prefixes & PREFIX_DATA) || (sizeflag & SUFFIX_ALWAYS))
++ {
++ if (sizeflag & DFLAG)
++ *obufp++ = intel_syntax ? 'd' : 'l';
++ else
++ *obufp++ = intel_syntax ? 'w' : 's';
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ break;
++ case 'E': /* For jcxz/jecxz */
++ if (mode_64bit)
++ {
++ if (sizeflag & AFLAG)
++ *obufp++ = 'r';
++ else
++ *obufp++ = 'e';
++ }
++ else
++ if (sizeflag & AFLAG)
++ *obufp++ = 'e';
++ used_prefixes |= (prefixes & PREFIX_ADDR);
++ break;
++ case 'F':
++ if (intel_syntax)
++ break;
++ if ((prefixes & PREFIX_ADDR) || (sizeflag & SUFFIX_ALWAYS))
++ {
++ if (sizeflag & AFLAG)
++ *obufp++ = mode_64bit ? 'q' : 'l';
++ else
++ *obufp++ = mode_64bit ? 'l' : 'w';
++ used_prefixes |= (prefixes & PREFIX_ADDR);
++ }
++ break;
++ case 'H':
++ if (intel_syntax)
++ break;
++ if ((prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_CS
++ || (prefixes & (PREFIX_CS | PREFIX_DS)) == PREFIX_DS)
++ {
++ used_prefixes |= prefixes & (PREFIX_CS | PREFIX_DS);
++ *obufp++ = ',';
++ *obufp++ = 'p';
++ if (prefixes & PREFIX_DS)
++ *obufp++ = 't';
++ else
++ *obufp++ = 'n';
++ }
++ break;
++ case 'J':
++ if (intel_syntax)
++ break;
++ *obufp++ = 'l';
++ break;
++ case 'L':
++ if (intel_syntax)
++ break;
++ if (sizeflag & SUFFIX_ALWAYS)
++ *obufp++ = 'l';
++ break;
++ case 'N':
++ if ((prefixes & PREFIX_FWAIT) == 0)
++ *obufp++ = 'n';
++ else
++ used_prefixes |= PREFIX_FWAIT;
++ break;
++ case 'O':
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ *obufp++ = 'o';
++ else
++ *obufp++ = 'd';
++ break;
++ case 'T':
++ if (intel_syntax)
++ break;
++ if (mode_64bit)
++ {
++ *obufp++ = 'q';
++ break;
++ }
++ /* Fall through. */
++ case 'P':
++ if (intel_syntax)
++ break;
++ if ((prefixes & PREFIX_DATA)
++ || (rex & REX_MODE64)
++ || (sizeflag & SUFFIX_ALWAYS))
++ {
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ *obufp++ = 'q';
++ else
++ {
++ if (sizeflag & DFLAG)
++ *obufp++ = 'l';
++ else
++ *obufp++ = 'w';
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ }
++ break;
++ case 'U':
++ if (intel_syntax)
++ break;
++ if (mode_64bit)
++ {
++ *obufp++ = 'q';
++ break;
++ }
++ /* Fall through. */
++ case 'Q':
++ if (intel_syntax && !alt)
++ break;
++ USED_REX (REX_MODE64);
++ if (mod != 3 || (sizeflag & SUFFIX_ALWAYS))
++ {
++ if (rex & REX_MODE64)
++ *obufp++ = 'q';
++ else
++ {
++ if (sizeflag & DFLAG)
++ *obufp++ = intel_syntax ? 'd' : 'l';
++ else
++ *obufp++ = 'w';
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ }
++ break;
++ case 'R':
++ USED_REX (REX_MODE64);
++ if (intel_syntax)
++ {
++ if (rex & REX_MODE64)
++ {
++ *obufp++ = 'q';
++ *obufp++ = 't';
++ }
++ else if (sizeflag & DFLAG)
++ {
++ *obufp++ = 'd';
++ *obufp++ = 'q';
++ }
++ else
++ {
++ *obufp++ = 'w';
++ *obufp++ = 'd';
++ }
++ }
++ else
++ {
++ if (rex & REX_MODE64)
++ *obufp++ = 'q';
++ else if (sizeflag & DFLAG)
++ *obufp++ = 'l';
++ else
++ *obufp++ = 'w';
++ }
++ if (!(rex & REX_MODE64))
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case 'S':
++ if (intel_syntax)
++ break;
++ if (sizeflag & SUFFIX_ALWAYS)
++ {
++ if (rex & REX_MODE64)
++ *obufp++ = 'q';
++ else
++ {
++ if (sizeflag & DFLAG)
++ *obufp++ = 'l';
++ else
++ *obufp++ = 'w';
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ }
++ break;
++ case 'X':
++ if (prefixes & PREFIX_DATA)
++ *obufp++ = 'd';
++ else
++ *obufp++ = 's';
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case 'Y':
++ if (intel_syntax)
++ break;
++ if (rex & REX_MODE64)
++ {
++ USED_REX (REX_MODE64);
++ *obufp++ = 'q';
++ }
++ break;
++ /* implicit operand size 'l' for i386 or 'q' for x86-64 */
++ case 'W':
++ /* operand size flag for cwtl, cbtw */
++ USED_REX (0);
++ if (rex)
++ *obufp++ = 'l';
++ else if (sizeflag & DFLAG)
++ *obufp++ = 'w';
++ else
++ *obufp++ = 'b';
++ if (intel_syntax)
++ {
++ if (rex)
++ {
++ *obufp++ = 'q';
++ *obufp++ = 'e';
++ }
++ if (sizeflag & DFLAG)
++ {
++ *obufp++ = 'd';
++ *obufp++ = 'e';
++ }
++ else
++ {
++ *obufp++ = 'w';
++ }
++ }
++ if (!rex)
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ }
++ alt = 0;
++ }
++ *obufp = 0;
++ return 0;
++}
++
++static void
++oappend (const char *s)
++{
++ strcpy (obufp, s);
++ obufp += strlen (s);
++}
++
++static void
++append_seg (void)
++{
++ if (prefixes & PREFIX_CS)
++ {
++ used_prefixes |= PREFIX_CS;
++ oappend ("%cs:" + intel_syntax);
++ }
++ if (prefixes & PREFIX_DS)
++ {
++ used_prefixes |= PREFIX_DS;
++ oappend ("%ds:" + intel_syntax);
++ }
++ if (prefixes & PREFIX_SS)
++ {
++ used_prefixes |= PREFIX_SS;
++ oappend ("%ss:" + intel_syntax);
++ }
++ if (prefixes & PREFIX_ES)
++ {
++ used_prefixes |= PREFIX_ES;
++ oappend ("%es:" + intel_syntax);
++ }
++ if (prefixes & PREFIX_FS)
++ {
++ used_prefixes |= PREFIX_FS;
++ oappend ("%fs:" + intel_syntax);
++ }
++ if (prefixes & PREFIX_GS)
++ {
++ used_prefixes |= PREFIX_GS;
++ oappend ("%gs:" + intel_syntax);
++ }
++}
++
++static void
++OP_indirE (int bytemode, int sizeflag)
++{
++ if (!intel_syntax)
++ oappend ("*");
++ OP_E (bytemode, sizeflag);
++}
++
++static void
++print_operand_value (char *buf, int hex, bfd_vma disp)
++{
++ if (mode_64bit)
++ {
++ if (hex)
++ {
++ char tmp[30];
++ int i;
++ buf[0] = '0';
++ buf[1] = 'x';
++ sprintf_vma (tmp, disp);
++ for (i = 0; tmp[i] == '0' && tmp[i + 1]; i++);
++ strcpy (buf + 2, tmp + i);
++ }
++ else
++ {
++ bfd_signed_vma v = disp;
++ char tmp[30];
++ int i;
++ if (v < 0)
++ {
++ *(buf++) = '-';
++ v = -disp;
++ /* Check for possible overflow on 0x8000000000000000. */
++ if (v < 0)
++ {
++ strcpy (buf, "9223372036854775808");
++ return;
++ }
++ }
++ if (!v)
++ {
++ strcpy (buf, "0");
++ return;
++ }
++
++ i = 0;
++ tmp[29] = 0;
++ while (v)
++ {
++ tmp[28 - i] = (v % 10) + '0';
++ v /= 10;
++ i++;
++ }
++ strcpy (buf, tmp + 29 - i);
++ }
++ }
++ else
++ {
++ if (hex)
++ sprintf (buf, "0x%x", (unsigned int) disp);
++ else
++ sprintf (buf, "%d", (int) disp);
++ }
++}
++
++static void
++OP_E (int bytemode, int sizeflag)
++{
++ bfd_vma disp;
++ int add = 0;
++ int riprel = 0;
++ USED_REX (REX_EXTZ);
++ if (rex & REX_EXTZ)
++ add += 8;
++
++ /* Skip mod/rm byte. */
++ MODRM_CHECK;
++ codep++;
++
++ if (mod == 3)
++ {
++ switch (bytemode)
++ {
++ case b_mode:
++ USED_REX (0);
++ if (rex)
++ oappend (names8rex[rm + add]);
++ else
++ oappend (names8[rm + add]);
++ break;
++ case w_mode:
++ oappend (names16[rm + add]);
++ break;
++ case d_mode:
++ oappend (names32[rm + add]);
++ break;
++ case q_mode:
++ oappend (names64[rm + add]);
++ break;
++ case m_mode:
++ if (mode_64bit)
++ oappend (names64[rm + add]);
++ else
++ oappend (names32[rm + add]);
++ break;
++ case branch_v_mode:
++ if (mode_64bit)
++ oappend (names64[rm + add]);
++ else
++ {
++ if ((sizeflag & DFLAG) || bytemode != branch_v_mode)
++ oappend (names32[rm + add]);
++ else
++ oappend (names16[rm + add]);
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ break;
++ case v_mode:
++ case dq_mode:
++ case dqw_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ oappend (names64[rm + add]);
++ else if ((sizeflag & DFLAG) || bytemode != v_mode)
++ oappend (names32[rm + add]);
++ else
++ oappend (names16[rm + add]);
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case 0:
++ break;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ break;
++ }
++ return;
++ }
++
++ disp = 0;
++ append_seg ();
++
++ if ((sizeflag & AFLAG) || mode_64bit) /* 32 bit address mode */
++ {
++ int havesib;
++ int havebase;
++ int base;
++ int index = 0;
++ int scale = 0;
++
++ havesib = 0;
++ havebase = 1;
++ base = rm;
++
++ if (base == 4)
++ {
++ havesib = 1;
++ FETCH_DATA (the_info, codep + 1);
++ index = (*codep >> 3) & 7;
++ if (mode_64bit || index != 0x4)
++ /* When INDEX == 0x4 in 32 bit mode, SCALE is ignored. */
++ scale = (*codep >> 6) & 3;
++ base = *codep & 7;
++ USED_REX (REX_EXTY);
++ if (rex & REX_EXTY)
++ index += 8;
++ codep++;
++ }
++ base += add;
++
++ switch (mod)
++ {
++ case 0:
++ if ((base & 7) == 5)
++ {
++ havebase = 0;
++ if (mode_64bit && !havesib)
++ riprel = 1;
++ disp = get32s ();
++ }
++ break;
++ case 1:
++ FETCH_DATA (the_info, codep + 1);
++ disp = *codep++;
++ if ((disp & 0x80) != 0)
++ disp -= 0x100;
++ break;
++ case 2:
++ disp = get32s ();
++ break;
++ }
++
++ if (!intel_syntax)
++ if (mod != 0 || (base & 7) == 5)
++ {
++ print_operand_value (scratchbuf, !riprel, disp);
++ oappend (scratchbuf);
++ if (riprel)
++ {
++ set_op (disp, 1);
++ oappend ("(%rip)");
++ }
++ }
++
++ if (havebase || (havesib && (index != 4 || scale != 0)))
++ {
++ if (intel_syntax)
++ {
++ switch (bytemode)
++ {
++ case b_mode:
++ oappend ("BYTE PTR ");
++ break;
++ case w_mode:
++ case dqw_mode:
++ oappend ("WORD PTR ");
++ break;
++ case branch_v_mode:
++ case v_mode:
++ case dq_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ oappend ("QWORD PTR ");
++ else if ((sizeflag & DFLAG) || bytemode == dq_mode)
++ oappend ("DWORD PTR ");
++ else
++ oappend ("WORD PTR ");
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case d_mode:
++ oappend ("DWORD PTR ");
++ break;
++ case q_mode:
++ oappend ("QWORD PTR ");
++ break;
++ case m_mode:
++ if (mode_64bit)
++ oappend ("QWORD PTR ");
++ else
++ oappend ("DWORD PTR ");
++ break;
++ case f_mode:
++ if (sizeflag & DFLAG)
++ {
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ oappend ("FWORD PTR ");
++ }
++ else
++ oappend ("DWORD PTR ");
++ break;
++ case t_mode:
++ oappend ("TBYTE PTR ");
++ break;
++ case x_mode:
++ oappend ("XMMWORD PTR ");
++ break;
++ default:
++ break;
++ }
++ }
++ *obufp++ = open_char;
++ if (intel_syntax && riprel)
++ oappend ("rip + ");
++ *obufp = '\0';
++ if (havebase)
++ oappend (mode_64bit && (sizeflag & AFLAG)
++ ? names64[base] : names32[base]);
++ if (havesib)
++ {
++ if (index != 4)
++ {
++ if (!intel_syntax || havebase)
++ {
++ *obufp++ = separator_char;
++ *obufp = '\0';
++ }
++ oappend (mode_64bit && (sizeflag & AFLAG)
++ ? names64[index] : names32[index]);
++ }
++ if (scale != 0 || (!intel_syntax && index != 4))
++ {
++ *obufp++ = scale_char;
++ *obufp = '\0';
++ sprintf (scratchbuf, "%d", 1 << scale);
++ oappend (scratchbuf);
++ }
++ }
++ if (intel_syntax && disp)
++ {
++ if ((bfd_signed_vma) disp > 0)
++ {
++ *obufp++ = '+';
++ *obufp = '\0';
++ }
++ else if (mod != 1)
++ {
++ *obufp++ = '-';
++ *obufp = '\0';
++ disp = - (bfd_signed_vma) disp;
++ }
++
++ print_operand_value (scratchbuf, mod != 1, disp);
++ oappend (scratchbuf);
++ }
++
++ *obufp++ = close_char;
++ *obufp = '\0';
++ }
++ else if (intel_syntax)
++ {
++ if (mod != 0 || (base & 7) == 5)
++ {
++ if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
++ | PREFIX_ES | PREFIX_FS | PREFIX_GS))
++ ;
++ else
++ {
++ oappend (names_seg[ds_reg - es_reg]);
++ oappend (":");
++ }
++ print_operand_value (scratchbuf, 1, disp);
++ oappend (scratchbuf);
++ }
++ }
++ }
++ else
++ { /* 16 bit address mode */
++ switch (mod)
++ {
++ case 0:
++ if (rm == 6)
++ {
++ disp = get16 ();
++ if ((disp & 0x8000) != 0)
++ disp -= 0x10000;
++ }
++ break;
++ case 1:
++ FETCH_DATA (the_info, codep + 1);
++ disp = *codep++;
++ if ((disp & 0x80) != 0)
++ disp -= 0x100;
++ break;
++ case 2:
++ disp = get16 ();
++ if ((disp & 0x8000) != 0)
++ disp -= 0x10000;
++ break;
++ }
++
++ if (!intel_syntax)
++ if (mod != 0 || rm == 6)
++ {
++ print_operand_value (scratchbuf, 0, disp);
++ oappend (scratchbuf);
++ }
++
++ if (mod != 0 || rm != 6)
++ {
++ *obufp++ = open_char;
++ *obufp = '\0';
++ oappend (index16[rm]);
++ if (intel_syntax && disp)
++ {
++ if ((bfd_signed_vma) disp > 0)
++ {
++ *obufp++ = '+';
++ *obufp = '\0';
++ }
++ else if (mod != 1)
++ {
++ *obufp++ = '-';
++ *obufp = '\0';
++ disp = - (bfd_signed_vma) disp;
++ }
++
++ print_operand_value (scratchbuf, mod != 1, disp);
++ oappend (scratchbuf);
++ }
++
++ *obufp++ = close_char;
++ *obufp = '\0';
++ }
++ else if (intel_syntax)
++ {
++ if (prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
++ | PREFIX_ES | PREFIX_FS | PREFIX_GS))
++ ;
++ else
++ {
++ oappend (names_seg[ds_reg - es_reg]);
++ oappend (":");
++ }
++ print_operand_value (scratchbuf, 1, disp & 0xffff);
++ oappend (scratchbuf);
++ }
++ }
++}
++
++static void
++OP_G (int bytemode, int sizeflag)
++{
++ int add = 0;
++ USED_REX (REX_EXTX);
++ if (rex & REX_EXTX)
++ add += 8;
++ switch (bytemode)
++ {
++ case b_mode:
++ USED_REX (0);
++ if (rex)
++ oappend (names8rex[reg + add]);
++ else
++ oappend (names8[reg + add]);
++ break;
++ case w_mode:
++ oappend (names16[reg + add]);
++ break;
++ case d_mode:
++ oappend (names32[reg + add]);
++ break;
++ case q_mode:
++ oappend (names64[reg + add]);
++ break;
++ case v_mode:
++ case dq_mode:
++ case dqw_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ oappend (names64[reg + add]);
++ else if ((sizeflag & DFLAG) || bytemode != v_mode)
++ oappend (names32[reg + add]);
++ else
++ oappend (names16[reg + add]);
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case m_mode:
++ if (mode_64bit)
++ oappend (names64[reg + add]);
++ else
++ oappend (names32[reg + add]);
++ break;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ break;
++ }
++}
++
++static bfd_vma
++get64 (void)
++{
++ bfd_vma x;
++#ifdef BFD64
++ unsigned int a;
++ unsigned int b;
++
++ FETCH_DATA (the_info, codep + 8);
++ a = *codep++ & 0xff;
++ a |= (*codep++ & 0xff) << 8;
++ a |= (*codep++ & 0xff) << 16;
++ a |= (*codep++ & 0xff) << 24;
++ b = *codep++ & 0xff;
++ b |= (*codep++ & 0xff) << 8;
++ b |= (*codep++ & 0xff) << 16;
++ b |= (*codep++ & 0xff) << 24;
++ x = a + ((bfd_vma) b << 32);
++#else
++ abort ();
++ x = 0;
++#endif
++ return x;
++}
++
++static bfd_signed_vma
++get32 (void)
++{
++ bfd_signed_vma x = 0;
++
++ FETCH_DATA (the_info, codep + 4);
++ x = *codep++ & (bfd_signed_vma) 0xff;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 8;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 16;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 24;
++ return x;
++}
++
++static bfd_signed_vma
++get32s (void)
++{
++ bfd_signed_vma x = 0;
++
++ FETCH_DATA (the_info, codep + 4);
++ x = *codep++ & (bfd_signed_vma) 0xff;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 8;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 16;
++ x |= (*codep++ & (bfd_signed_vma) 0xff) << 24;
++
++ x = (x ^ ((bfd_signed_vma) 1 << 31)) - ((bfd_signed_vma) 1 << 31);
++
++ return x;
++}
++
++static int
++get16 (void)
++{
++ int x = 0;
++
++ FETCH_DATA (the_info, codep + 2);
++ x = *codep++ & 0xff;
++ x |= (*codep++ & 0xff) << 8;
++ return x;
++}
++
++static void
++set_op (bfd_vma op, int riprel)
++{
++ op_index[op_ad] = op_ad;
++ if (mode_64bit)
++ {
++ op_address[op_ad] = op;
++ op_riprel[op_ad] = riprel;
++ }
++ else
++ {
++ /* Mask to get a 32-bit address. */
++ op_address[op_ad] = op & 0xffffffff;
++ op_riprel[op_ad] = riprel & 0xffffffff;
++ }
++}
++
++static void
++OP_REG (int code, int sizeflag)
++{
++ const char *s;
++ int add = 0;
++ USED_REX (REX_EXTZ);
++ if (rex & REX_EXTZ)
++ add = 8;
++
++ switch (code)
++ {
++ case indir_dx_reg:
++ if (intel_syntax)
++ s = "[dx]";
++ else
++ s = "(%dx)";
++ break;
++ case ax_reg: case cx_reg: case dx_reg: case bx_reg:
++ case sp_reg: case bp_reg: case si_reg: case di_reg:
++ s = names16[code - ax_reg + add];
++ break;
++ case es_reg: case ss_reg: case cs_reg:
++ case ds_reg: case fs_reg: case gs_reg:
++ s = names_seg[code - es_reg + add];
++ break;
++ case al_reg: case ah_reg: case cl_reg: case ch_reg:
++ case dl_reg: case dh_reg: case bl_reg: case bh_reg:
++ USED_REX (0);
++ if (rex)
++ s = names8rex[code - al_reg + add];
++ else
++ s = names8[code - al_reg];
++ break;
++ case rAX_reg: case rCX_reg: case rDX_reg: case rBX_reg:
++ case rSP_reg: case rBP_reg: case rSI_reg: case rDI_reg:
++ if (mode_64bit)
++ {
++ s = names64[code - rAX_reg + add];
++ break;
++ }
++ code += eAX_reg - rAX_reg;
++ /* Fall through. */
++ case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg:
++ case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ s = names64[code - eAX_reg + add];
++ else if (sizeflag & DFLAG)
++ s = names32[code - eAX_reg + add];
++ else
++ s = names16[code - eAX_reg + add];
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ default:
++ s = INTERNAL_DISASSEMBLER_ERROR;
++ break;
++ }
++ oappend (s);
++}
++
++static void
++OP_IMREG (int code, int sizeflag)
++{
++ const char *s;
++
++ switch (code)
++ {
++ case indir_dx_reg:
++ if (intel_syntax)
++ s = "[dx]";
++ else
++ s = "(%dx)";
++ break;
++ case ax_reg: case cx_reg: case dx_reg: case bx_reg:
++ case sp_reg: case bp_reg: case si_reg: case di_reg:
++ s = names16[code - ax_reg];
++ break;
++ case es_reg: case ss_reg: case cs_reg:
++ case ds_reg: case fs_reg: case gs_reg:
++ s = names_seg[code - es_reg];
++ break;
++ case al_reg: case ah_reg: case cl_reg: case ch_reg:
++ case dl_reg: case dh_reg: case bl_reg: case bh_reg:
++ USED_REX (0);
++ if (rex)
++ s = names8rex[code - al_reg];
++ else
++ s = names8[code - al_reg];
++ break;
++ case eAX_reg: case eCX_reg: case eDX_reg: case eBX_reg:
++ case eSP_reg: case eBP_reg: case eSI_reg: case eDI_reg:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ s = names64[code - eAX_reg];
++ else if (sizeflag & DFLAG)
++ s = names32[code - eAX_reg];
++ else
++ s = names16[code - eAX_reg];
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ default:
++ s = INTERNAL_DISASSEMBLER_ERROR;
++ break;
++ }
++ oappend (s);
++}
++
++static void
++OP_I (int bytemode, int sizeflag)
++{
++ bfd_signed_vma op;
++ bfd_signed_vma mask = -1;
++
++ switch (bytemode)
++ {
++ case b_mode:
++ FETCH_DATA (the_info, codep + 1);
++ op = *codep++;
++ mask = 0xff;
++ break;
++ case q_mode:
++ if (mode_64bit)
++ {
++ op = get32s ();
++ break;
++ }
++ /* Fall through. */
++ case v_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ op = get32s ();
++ else if (sizeflag & DFLAG)
++ {
++ op = get32 ();
++ mask = 0xffffffff;
++ }
++ else
++ {
++ op = get16 ();
++ mask = 0xfffff;
++ }
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case w_mode:
++ mask = 0xfffff;
++ op = get16 ();
++ break;
++ case const_1_mode:
++ if (intel_syntax)
++ oappend ("1");
++ return;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ return;
++ }
++
++ op &= mask;
++ scratchbuf[0] = '$';
++ print_operand_value (scratchbuf + 1, 1, op);
++ oappend (scratchbuf + intel_syntax);
++ scratchbuf[0] = '\0';
++}
++
++static void
++OP_I64 (int bytemode, int sizeflag)
++{
++ bfd_signed_vma op;
++ bfd_signed_vma mask = -1;
++
++ if (!mode_64bit)
++ {
++ OP_I (bytemode, sizeflag);
++ return;
++ }
++
++ switch (bytemode)
++ {
++ case b_mode:
++ FETCH_DATA (the_info, codep + 1);
++ op = *codep++;
++ mask = 0xff;
++ break;
++ case v_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ op = get64 ();
++ else if (sizeflag & DFLAG)
++ {
++ op = get32 ();
++ mask = 0xffffffff;
++ }
++ else
++ {
++ op = get16 ();
++ mask = 0xfffff;
++ }
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case w_mode:
++ mask = 0xfffff;
++ op = get16 ();
++ break;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ return;
++ }
++
++ op &= mask;
++ scratchbuf[0] = '$';
++ print_operand_value (scratchbuf + 1, 1, op);
++ oappend (scratchbuf + intel_syntax);
++ scratchbuf[0] = '\0';
++}
++
++static void
++OP_sI (int bytemode, int sizeflag)
++{
++ bfd_signed_vma op;
++ bfd_signed_vma mask = -1;
++
++ switch (bytemode)
++ {
++ case b_mode:
++ FETCH_DATA (the_info, codep + 1);
++ op = *codep++;
++ if ((op & 0x80) != 0)
++ op -= 0x100;
++ mask = 0xffffffff;
++ break;
++ case v_mode:
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ op = get32s ();
++ else if (sizeflag & DFLAG)
++ {
++ op = get32s ();
++ mask = 0xffffffff;
++ }
++ else
++ {
++ mask = 0xffffffff;
++ op = get16 ();
++ if ((op & 0x8000) != 0)
++ op -= 0x10000;
++ }
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ break;
++ case w_mode:
++ op = get16 ();
++ mask = 0xffffffff;
++ if ((op & 0x8000) != 0)
++ op -= 0x10000;
++ break;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ return;
++ }
++
++ scratchbuf[0] = '$';
++ print_operand_value (scratchbuf + 1, 1, op);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_J (int bytemode, int sizeflag)
++{
++ bfd_vma disp;
++ bfd_vma mask = -1;
++
++ switch (bytemode)
++ {
++ case b_mode:
++ FETCH_DATA (the_info, codep + 1);
++ disp = *codep++;
++ if ((disp & 0x80) != 0)
++ disp -= 0x100;
++ break;
++ case v_mode:
++ if (sizeflag & DFLAG)
++ disp = get32s ();
++ else
++ {
++ disp = get16 ();
++ /* For some reason, a data16 prefix on a jump instruction
++ means that the pc is masked to 16 bits after the
++ displacement is added! */
++ mask = 0xffff;
++ }
++ break;
++ default:
++ oappend (INTERNAL_DISASSEMBLER_ERROR);
++ return;
++ }
++ disp = (start_pc + codep - start_codep + disp) & mask;
++ set_op (disp, 0);
++ print_operand_value (scratchbuf, 1, disp);
++ oappend (scratchbuf);
++}
++
++static void
++OP_SEG (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ oappend (names_seg[reg]);
++}
++
++static void
++OP_DIR (int dummy ATTRIBUTE_UNUSED, int sizeflag)
++{
++ int seg, offset;
++
++ if (sizeflag & DFLAG)
++ {
++ offset = get32 ();
++ seg = get16 ();
++ }
++ else
++ {
++ offset = get16 ();
++ seg = get16 ();
++ }
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (intel_syntax)
++ sprintf (scratchbuf, "0x%x,0x%x", seg, offset);
++ else
++ sprintf (scratchbuf, "$0x%x,$0x%x", seg, offset);
++ oappend (scratchbuf);
++}
++
++static void
++OP_OFF (int bytemode ATTRIBUTE_UNUSED, int sizeflag)
++{
++ bfd_vma off;
++
++ append_seg ();
++
++ if ((sizeflag & AFLAG) || mode_64bit)
++ off = get32 ();
++ else
++ off = get16 ();
++
++ if (intel_syntax)
++ {
++ if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
++ | PREFIX_ES | PREFIX_FS | PREFIX_GS)))
++ {
++ oappend (names_seg[ds_reg - es_reg]);
++ oappend (":");
++ }
++ }
++ print_operand_value (scratchbuf, 1, off);
++ oappend (scratchbuf);
++}
++
++static void
++OP_OFF64 (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ bfd_vma off;
++
++ if (!mode_64bit)
++ {
++ OP_OFF (bytemode, sizeflag);
++ return;
++ }
++
++ append_seg ();
++
++ off = get64 ();
++
++ if (intel_syntax)
++ {
++ if (!(prefixes & (PREFIX_CS | PREFIX_SS | PREFIX_DS
++ | PREFIX_ES | PREFIX_FS | PREFIX_GS)))
++ {
++ oappend (names_seg[ds_reg - es_reg]);
++ oappend (":");
++ }
++ }
++ print_operand_value (scratchbuf, 1, off);
++ oappend (scratchbuf);
++}
++
++static void
++ptr_reg (int code, int sizeflag)
++{
++ const char *s;
++
++ *obufp++ = open_char;
++ used_prefixes |= (prefixes & PREFIX_ADDR);
++ if (mode_64bit)
++ {
++ if (!(sizeflag & AFLAG))
++ s = names32[code - eAX_reg];
++ else
++ s = names64[code - eAX_reg];
++ }
++ else if (sizeflag & AFLAG)
++ s = names32[code - eAX_reg];
++ else
++ s = names16[code - eAX_reg];
++ oappend (s);
++ *obufp++ = close_char;
++ *obufp = 0;
++}
++
++static void
++OP_ESreg (int code, int sizeflag)
++{
++ if (intel_syntax)
++ {
++ if (codep[-1] & 1)
++ {
++ USED_REX (REX_MODE64);
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (rex & REX_MODE64)
++ oappend ("QWORD PTR ");
++ else if ((sizeflag & DFLAG))
++ oappend ("DWORD PTR ");
++ else
++ oappend ("WORD PTR ");
++ }
++ else
++ oappend ("BYTE PTR ");
++ }
++
++ oappend ("%es:" + intel_syntax);
++ ptr_reg (code, sizeflag);
++}
++
++static void
++OP_DSreg (int code, int sizeflag)
++{
++ if (intel_syntax)
++ {
++ if (codep[-1] != 0xd7 && (codep[-1] & 1))
++ {
++ USED_REX (REX_MODE64);
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (rex & REX_MODE64)
++ oappend ("QWORD PTR ");
++ else if ((sizeflag & DFLAG))
++ oappend ("DWORD PTR ");
++ else
++ oappend ("WORD PTR ");
++ }
++ else
++ oappend ("BYTE PTR ");
++ }
++
++ if ((prefixes
++ & (PREFIX_CS
++ | PREFIX_DS
++ | PREFIX_SS
++ | PREFIX_ES
++ | PREFIX_FS
++ | PREFIX_GS)) == 0)
++ prefixes |= PREFIX_DS;
++ append_seg ();
++ ptr_reg (code, sizeflag);
++}
++
++static void
++OP_C (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ int add = 0;
++ if (rex & REX_EXTX)
++ {
++ USED_REX (REX_EXTX);
++ add = 8;
++ }
++ else if (!mode_64bit && (prefixes & PREFIX_LOCK))
++ {
++ used_prefixes |= PREFIX_LOCK;
++ add = 8;
++ }
++ sprintf (scratchbuf, "%%cr%d", reg + add);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_D (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ int add = 0;
++ USED_REX (REX_EXTX);
++ if (rex & REX_EXTX)
++ add = 8;
++ if (intel_syntax)
++ sprintf (scratchbuf, "db%d", reg + add);
++ else
++ sprintf (scratchbuf, "%%db%d", reg + add);
++ oappend (scratchbuf);
++}
++
++static void
++OP_T (int dummy ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ sprintf (scratchbuf, "%%tr%d", reg);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_Rd (int bytemode, int sizeflag)
++{
++ if (mod == 3)
++ OP_E (bytemode, sizeflag);
++ else
++ BadOp ();
++}
++
++static void
++OP_MMX (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (prefixes & PREFIX_DATA)
++ {
++ int add = 0;
++ USED_REX (REX_EXTX);
++ if (rex & REX_EXTX)
++ add = 8;
++ sprintf (scratchbuf, "%%xmm%d", reg + add);
++ }
++ else
++ sprintf (scratchbuf, "%%mm%d", reg);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_XMM (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ int add = 0;
++ USED_REX (REX_EXTX);
++ if (rex & REX_EXTX)
++ add = 8;
++ sprintf (scratchbuf, "%%xmm%d", reg + add);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_EM (int bytemode, int sizeflag)
++{
++ if (mod != 3)
++ {
++ if (intel_syntax && bytemode == v_mode)
++ {
++ bytemode = (prefixes & PREFIX_DATA) ? x_mode : q_mode;
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ }
++ OP_E (bytemode, sizeflag);
++ return;
++ }
++
++ /* Skip mod/rm byte. */
++ MODRM_CHECK;
++ codep++;
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (prefixes & PREFIX_DATA)
++ {
++ int add = 0;
++
++ USED_REX (REX_EXTZ);
++ if (rex & REX_EXTZ)
++ add = 8;
++ sprintf (scratchbuf, "%%xmm%d", rm + add);
++ }
++ else
++ sprintf (scratchbuf, "%%mm%d", rm);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_EX (int bytemode, int sizeflag)
++{
++ int add = 0;
++ if (mod != 3)
++ {
++ if (intel_syntax && bytemode == v_mode)
++ {
++ switch (prefixes & (PREFIX_DATA|PREFIX_REPZ|PREFIX_REPNZ))
++ {
++ case 0: bytemode = x_mode; break;
++ case PREFIX_REPZ: bytemode = d_mode; used_prefixes |= PREFIX_REPZ; break;
++ case PREFIX_DATA: bytemode = x_mode; used_prefixes |= PREFIX_DATA; break;
++ case PREFIX_REPNZ: bytemode = q_mode; used_prefixes |= PREFIX_REPNZ; break;
++ default: bytemode = 0; break;
++ }
++ }
++ OP_E (bytemode, sizeflag);
++ return;
++ }
++ USED_REX (REX_EXTZ);
++ if (rex & REX_EXTZ)
++ add = 8;
++
++ /* Skip mod/rm byte. */
++ MODRM_CHECK;
++ codep++;
++ sprintf (scratchbuf, "%%xmm%d", rm + add);
++ oappend (scratchbuf + intel_syntax);
++}
++
++static void
++OP_MS (int bytemode, int sizeflag)
++{
++ if (mod == 3)
++ OP_EM (bytemode, sizeflag);
++ else
++ BadOp ();
++}
++
++static void
++OP_XS (int bytemode, int sizeflag)
++{
++ if (mod == 3)
++ OP_EX (bytemode, sizeflag);
++ else
++ BadOp ();
++}
++
++static void
++OP_M (int bytemode, int sizeflag)
++{
++ if (mod == 3)
++ BadOp (); /* bad lea,lds,les,lfs,lgs,lss modrm */
++ else
++ OP_E (bytemode, sizeflag);
++}
++
++static void
++OP_0f07 (int bytemode, int sizeflag)
++{
++ if (mod != 3 || rm != 0)
++ BadOp ();
++ else
++ OP_E (bytemode, sizeflag);
++}
++
++static void
++OP_0fae (int bytemode, int sizeflag)
++{
++ if (mod == 3)
++ {
++ if (reg == 7)
++ strcpy (obuf + strlen (obuf) - sizeof ("clflush") + 1, "sfence");
++
++ if (reg < 5 || rm != 0)
++ {
++ BadOp (); /* bad sfence, mfence, or lfence */
++ return;
++ }
++ }
++ else if (reg != 7)
++ {
++ BadOp (); /* bad clflush */
++ return;
++ }
++
++ OP_E (bytemode, sizeflag);
++}
++
++static void
++NOP_Fixup (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ /* NOP with REPZ prefix is called PAUSE. */
++ if (prefixes == PREFIX_REPZ)
++ strcpy (obuf, "pause");
++}
++
++static const char *const Suffix3DNow[] = {
++/* 00 */ NULL, NULL, NULL, NULL,
++/* 04 */ NULL, NULL, NULL, NULL,
++/* 08 */ NULL, NULL, NULL, NULL,
++/* 0C */ "pi2fw", "pi2fd", NULL, NULL,
++/* 10 */ NULL, NULL, NULL, NULL,
++/* 14 */ NULL, NULL, NULL, NULL,
++/* 18 */ NULL, NULL, NULL, NULL,
++/* 1C */ "pf2iw", "pf2id", NULL, NULL,
++/* 20 */ NULL, NULL, NULL, NULL,
++/* 24 */ NULL, NULL, NULL, NULL,
++/* 28 */ NULL, NULL, NULL, NULL,
++/* 2C */ NULL, NULL, NULL, NULL,
++/* 30 */ NULL, NULL, NULL, NULL,
++/* 34 */ NULL, NULL, NULL, NULL,
++/* 38 */ NULL, NULL, NULL, NULL,
++/* 3C */ NULL, NULL, NULL, NULL,
++/* 40 */ NULL, NULL, NULL, NULL,
++/* 44 */ NULL, NULL, NULL, NULL,
++/* 48 */ NULL, NULL, NULL, NULL,
++/* 4C */ NULL, NULL, NULL, NULL,
++/* 50 */ NULL, NULL, NULL, NULL,
++/* 54 */ NULL, NULL, NULL, NULL,
++/* 58 */ NULL, NULL, NULL, NULL,
++/* 5C */ NULL, NULL, NULL, NULL,
++/* 60 */ NULL, NULL, NULL, NULL,
++/* 64 */ NULL, NULL, NULL, NULL,
++/* 68 */ NULL, NULL, NULL, NULL,
++/* 6C */ NULL, NULL, NULL, NULL,
++/* 70 */ NULL, NULL, NULL, NULL,
++/* 74 */ NULL, NULL, NULL, NULL,
++/* 78 */ NULL, NULL, NULL, NULL,
++/* 7C */ NULL, NULL, NULL, NULL,
++/* 80 */ NULL, NULL, NULL, NULL,
++/* 84 */ NULL, NULL, NULL, NULL,
++/* 88 */ NULL, NULL, "pfnacc", NULL,
++/* 8C */ NULL, NULL, "pfpnacc", NULL,
++/* 90 */ "pfcmpge", NULL, NULL, NULL,
++/* 94 */ "pfmin", NULL, "pfrcp", "pfrsqrt",
++/* 98 */ NULL, NULL, "pfsub", NULL,
++/* 9C */ NULL, NULL, "pfadd", NULL,
++/* A0 */ "pfcmpgt", NULL, NULL, NULL,
++/* A4 */ "pfmax", NULL, "pfrcpit1", "pfrsqit1",
++/* A8 */ NULL, NULL, "pfsubr", NULL,
++/* AC */ NULL, NULL, "pfacc", NULL,
++/* B0 */ "pfcmpeq", NULL, NULL, NULL,
++/* B4 */ "pfmul", NULL, "pfrcpit2", "pfmulhrw",
++/* B8 */ NULL, NULL, NULL, "pswapd",
++/* BC */ NULL, NULL, NULL, "pavgusb",
++/* C0 */ NULL, NULL, NULL, NULL,
++/* C4 */ NULL, NULL, NULL, NULL,
++/* C8 */ NULL, NULL, NULL, NULL,
++/* CC */ NULL, NULL, NULL, NULL,
++/* D0 */ NULL, NULL, NULL, NULL,
++/* D4 */ NULL, NULL, NULL, NULL,
++/* D8 */ NULL, NULL, NULL, NULL,
++/* DC */ NULL, NULL, NULL, NULL,
++/* E0 */ NULL, NULL, NULL, NULL,
++/* E4 */ NULL, NULL, NULL, NULL,
++/* E8 */ NULL, NULL, NULL, NULL,
++/* EC */ NULL, NULL, NULL, NULL,
++/* F0 */ NULL, NULL, NULL, NULL,
++/* F4 */ NULL, NULL, NULL, NULL,
++/* F8 */ NULL, NULL, NULL, NULL,
++/* FC */ NULL, NULL, NULL, NULL,
++};
++
++static void
++OP_3DNowSuffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ const char *mnemonic;
++
++ FETCH_DATA (the_info, codep + 1);
++ /* AMD 3DNow! instructions are specified by an opcode suffix in the
++ place where an 8-bit immediate would normally go. ie. the last
++ byte of the instruction. */
++ obufp = obuf + strlen (obuf);
++ mnemonic = Suffix3DNow[*codep++ & 0xff];
++ if (mnemonic)
++ oappend (mnemonic);
++ else
++ {
++ /* Since a variable sized modrm/sib chunk is between the start
++ of the opcode (0x0f0f) and the opcode suffix, we need to do
++ all the modrm processing first, and don't know until now that
++ we have a bad opcode. This necessitates some cleaning up. */
++ op1out[0] = '\0';
++ op2out[0] = '\0';
++ BadOp ();
++ }
++}
++
++static const char *simd_cmp_op[] = {
++ "eq",
++ "lt",
++ "le",
++ "unord",
++ "neq",
++ "nlt",
++ "nle",
++ "ord"
++};
++
++static void
++OP_SIMD_Suffix (int bytemode ATTRIBUTE_UNUSED, int sizeflag ATTRIBUTE_UNUSED)
++{
++ unsigned int cmp_type;
++
++ FETCH_DATA (the_info, codep + 1);
++ obufp = obuf + strlen (obuf);
++ cmp_type = *codep++ & 0xff;
++ if (cmp_type < 8)
++ {
++ char suffix1 = 'p', suffix2 = 's';
++ used_prefixes |= (prefixes & PREFIX_REPZ);
++ if (prefixes & PREFIX_REPZ)
++ suffix1 = 's';
++ else
++ {
++ used_prefixes |= (prefixes & PREFIX_DATA);
++ if (prefixes & PREFIX_DATA)
++ suffix2 = 'd';
++ else
++ {
++ used_prefixes |= (prefixes & PREFIX_REPNZ);
++ if (prefixes & PREFIX_REPNZ)
++ suffix1 = 's', suffix2 = 'd';
++ }
++ }
++ sprintf (scratchbuf, "cmp%s%c%c",
++ simd_cmp_op[cmp_type], suffix1, suffix2);
++ used_prefixes |= (prefixes & PREFIX_REPZ);
++ oappend (scratchbuf);
++ }
++ else
++ {
++ /* We have a bad extension byte. Clean up. */
++ op1out[0] = '\0';
++ op2out[0] = '\0';
++ BadOp ();
++ }
++}
++
++static void
++SIMD_Fixup (int extrachar, int sizeflag ATTRIBUTE_UNUSED)
++{
++ /* Change movlps/movhps to movhlps/movlhps for 2 register operand
++ forms of these instructions. */
++ if (mod == 3)
++ {
++ char *p = obuf + strlen (obuf);
++ *(p + 1) = '\0';
++ *p = *(p - 1);
++ *(p - 1) = *(p - 2);
++ *(p - 2) = *(p - 3);
++ *(p - 3) = extrachar;
++ }
++}
++
++static void
++PNI_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag)
++{
++ if (mod == 3 && reg == 1 && rm <= 1)
++ {
++ /* Override "sidt". */
++ char *p = obuf + strlen (obuf) - 4;
++
++ /* We might have a suffix when disassembling with -Msuffix. */
++ if (*p == 'i')
++ --p;
++
++ if (rm)
++ {
++ /* mwait %eax,%ecx */
++ strcpy (p, "mwait");
++ if (!intel_syntax)
++ strcpy (op1out, names32[0]);
++ }
++ else
++ {
++ /* monitor %eax,%ecx,%edx" */
++ strcpy (p, "monitor");
++ if (!intel_syntax)
++ {
++ if (!mode_64bit)
++ strcpy (op1out, names32[0]);
++ else if (!(prefixes & PREFIX_ADDR))
++ strcpy (op1out, names64[0]);
++ else
++ {
++ strcpy (op1out, names32[0]);
++ used_prefixes |= PREFIX_ADDR;
++ }
++ strcpy (op3out, names32[2]);
++ }
++ }
++ if (!intel_syntax)
++ {
++ strcpy (op2out, names32[1]);
++ two_source_ops = 1;
++ }
++
++ codep++;
++ }
++ else
++ OP_M (0, sizeflag);
++}
++
++static void
++SVME_Fixup (int bytemode, int sizeflag)
++{
++ const char *alt;
++ char *p;
++
++ switch (*codep)
++ {
++ case 0xd8:
++ alt = "vmrun";
++ break;
++ case 0xd9:
++ alt = "vmmcall";
++ break;
++ case 0xda:
++ alt = "vmload";
++ break;
++ case 0xdb:
++ alt = "vmsave";
++ break;
++ case 0xdc:
++ alt = "stgi";
++ break;
++ case 0xdd:
++ alt = "clgi";
++ break;
++ case 0xde:
++ alt = "skinit";
++ break;
++ case 0xdf:
++ alt = "invlpga";
++ break;
++ default:
++ OP_M (bytemode, sizeflag);
++ return;
++ }
++ /* Override "lidt". */
++ p = obuf + strlen (obuf) - 4;
++ /* We might have a suffix. */
++ if (*p == 'i')
++ --p;
++ strcpy (p, alt);
++ if (!(prefixes & PREFIX_ADDR))
++ {
++ ++codep;
++ return;
++ }
++ used_prefixes |= PREFIX_ADDR;
++ switch (*codep++)
++ {
++ case 0xdf:
++ strcpy (op2out, names32[1]);
++ two_source_ops = 1;
++ /* Fall through. */
++ case 0xd8:
++ case 0xda:
++ case 0xdb:
++ *obufp++ = open_char;
++ if (mode_64bit || (sizeflag & AFLAG))
++ alt = names32[0];
++ else
++ alt = names16[0];
++ strcpy (obufp, alt);
++ obufp += strlen (alt);
++ *obufp++ = close_char;
++ *obufp = '\0';
++ break;
++ }
++}
++
++static void
++INVLPG_Fixup (int bytemode, int sizeflag)
++{
++ const char *alt;
++
++ switch (*codep)
++ {
++ case 0xf8:
++ alt = "swapgs";
++ break;
++ case 0xf9:
++ alt = "rdtscp";
++ break;
++ default:
++ OP_M (bytemode, sizeflag);
++ return;
++ }
++ /* Override "invlpg". */
++ strcpy (obuf + strlen (obuf) - 6, alt);
++ codep++;
++}
++
++static void
++BadOp (void)
++{
++ /* Throw away prefixes and 1st. opcode byte. */
++ codep = insn_codep + 1;
++ oappend ("(bad)");
++}
++
++static void
++SEG_Fixup (int extrachar, int sizeflag)
++{
++ if (mod == 3)
++ {
++ /* We need to add a proper suffix with
++
++ movw %ds,%ax
++ movl %ds,%eax
++ movq %ds,%rax
++ movw %ax,%ds
++ movl %eax,%ds
++ movq %rax,%ds
++ */
++ const char *suffix;
++
++ if (prefixes & PREFIX_DATA)
++ suffix = "w";
++ else
++ {
++ USED_REX (REX_MODE64);
++ if (rex & REX_MODE64)
++ suffix = "q";
++ else
++ suffix = "l";
++ }
++ strcat (obuf, suffix);
++ }
++ else
++ {
++ /* We need to fix the suffix for
++
++ movw %ds,(%eax)
++ movw %ds,(%rax)
++ movw (%eax),%ds
++ movw (%rax),%ds
++
++ Override "mov[l|q]". */
++ char *p = obuf + strlen (obuf) - 1;
++
++ /* We might not have a suffix. */
++ if (*p == 'v')
++ ++p;
++ *p = 'w';
++ }
++
++ OP_E (extrachar, sizeflag);
++}
++
++static void
++VMX_Fixup (int extrachar ATTRIBUTE_UNUSED, int sizeflag)
++{
++ if (mod == 3 && reg == 0 && rm >=1 && rm <= 4)
++ {
++ /* Override "sgdt". */
++ char *p = obuf + strlen (obuf) - 4;
++
++ /* We might have a suffix when disassembling with -Msuffix. */
++ if (*p == 'g')
++ --p;
++
++ switch (rm)
++ {
++ case 1:
++ strcpy (p, "vmcall");
++ break;
++ case 2:
++ strcpy (p, "vmlaunch");
++ break;
++ case 3:
++ strcpy (p, "vmresume");
++ break;
++ case 4:
++ strcpy (p, "vmxoff");
++ break;
++ }
++
++ codep++;
++ }
++ else
++ OP_E (0, sizeflag);
++}
++
++static void
++OP_VMX (int bytemode, int sizeflag)
++{
++ used_prefixes |= (prefixes & (PREFIX_DATA | PREFIX_REPZ));
++ if (prefixes & PREFIX_DATA)
++ strcpy (obuf, "vmclear");
++ else if (prefixes & PREFIX_REPZ)
++ strcpy (obuf, "vmxon");
++ else
++ strcpy (obuf, "vmptrld");
++ OP_E (bytemode, sizeflag);
++}
+diff -Nurp linux-2.6.22-590/arch/x86_64/kernel/entry.S linux-2.6.22-600/arch/x86_64/kernel/entry.S
+--- linux-2.6.22-590/arch/x86_64/kernel/entry.S 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/arch/x86_64/kernel/entry.S 2008-04-09 18:16:24.000000000 +0200
+@@ -1170,3 +1170,30 @@ KPROBE_ENTRY(ignore_sysret)
+ sysret
+ CFI_ENDPROC
+ ENDPROC(ignore_sysret)
++
++#ifdef CONFIG_KDB
++
++#ifdef CONFIG_SMP
++ENTRY(kdb_interrupt)
++ apicinterrupt KDB_VECTOR,smp_kdb_interrupt
++END(kdb_interrupt)
++#endif /* CONFIG_SMP */
++
++ENTRY(kdb_call)
++ INTR_FRAME
++ cld
++ pushq $-1 # orig_eax
++ CFI_ADJUST_CFA_OFFSET 8
++ SAVE_ALL
++ movq $1,%rdi # KDB_REASON_ENTER
++ movq $0,%rsi # error_code
++ movq %rsp,%rdx # struct pt_regs
++ call kdb
++ RESTORE_ALL
++ addq $8,%rsp # forget orig_eax
++ CFI_ADJUST_CFA_OFFSET -8
++ iretq
++ CFI_ENDPROC
++END(kdb_call)
++
++#endif /* CONFIG_KDB */
+diff -Nurp linux-2.6.22-590/arch/x86_64/kernel/io_apic.c linux-2.6.22-600/arch/x86_64/kernel/io_apic.c
+--- linux-2.6.22-590/arch/x86_64/kernel/io_apic.c 2008-04-09 18:10:46.000000000 +0200
++++ linux-2.6.22-600/arch/x86_64/kernel/io_apic.c 2008-04-09 18:16:24.000000000 +0200
+@@ -35,6 +35,10 @@
+ #include <acpi/acpi_bus.h>
+ #endif
+
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#endif /* CONFIG_KDB */
++
+ #include <asm/idle.h>
+ #include <asm/io.h>
+ #include <asm/smp.h>
+@@ -697,6 +701,10 @@ next:
+ continue;
+ if (vector == IA32_SYSCALL_VECTOR)
+ goto next;
++#ifdef CONFIG_KDB
++ if (vector == KDBENTER_VECTOR)
++ goto next;
++#endif /* CONFIG_KDB */
+ for_each_cpu_mask(new_cpu, new_mask)
+ if (per_cpu(vector_irq, new_cpu)[vector] != -1)
+ goto next;
+diff -Nurp linux-2.6.22-590/arch/x86_64/kernel/traps.c linux-2.6.22-600/arch/x86_64/kernel/traps.c
+--- linux-2.6.22-590/arch/x86_64/kernel/traps.c 2008-04-09 18:10:53.000000000 +0200
++++ linux-2.6.22-600/arch/x86_64/kernel/traps.c 2008-04-09 18:17:59.000000000 +0200
+@@ -555,6 +555,8 @@ void __kprobes die_nmi(char *str, struct
+ printk(str, smp_processor_id());
+ show_registers(regs);
+ vxh_dump_history();
++ if (strncmp(str, "NMI Watchdog", 12) == 0)
++ notify_die(DIE_NMIWATCHDOG, "nmi_watchdog", regs, 0, 2, SIGINT);
+ if (kexec_should_crash(current))
+ crash_kexec(regs);
+ if (do_panic || panic_on_oops)
+diff -Nurp linux-2.6.22-590/arch/x86_64/Makefile linux-2.6.22-600/arch/x86_64/Makefile
+--- linux-2.6.22-590/arch/x86_64/Makefile 2008-04-09 18:10:46.000000000 +0200
++++ linux-2.6.22-600/arch/x86_64/Makefile 2008-04-09 18:16:24.000000000 +0200
+@@ -80,6 +80,7 @@ core-y += arch/x86_64/kernel/ \
+ core-$(CONFIG_IA32_EMULATION) += arch/x86_64/ia32/
+ drivers-$(CONFIG_PCI) += arch/x86_64/pci/
+ drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/
++drivers-$(CONFIG_KDB) += arch/x86_64/kdb/
+
+ boot := arch/x86_64/boot
+
+diff -Nurp linux-2.6.22-590/Documentation/kdb/bt_x86 linux-2.6.22-600/Documentation/kdb/bt_x86
+--- linux-2.6.22-590/Documentation/kdb/bt_x86 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/bt_x86 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,1837 @@
++Copyright Keith Owens, 2007.
++
++How the KDB backtrace for x86 works, how to diagnose problems and submit a bug
++==============================================================================
++
++Unlike ia64, x86 architectures do not mandate unwind information in the kernel.
++gcc will include some unwind information for C functions, but not for assembler
++code. Attempts have been made to add unwind information to the assembler code
++by hand, with little success. Eventually Linus rejected the x86 unwind code
++because it was breaking too often and destroying useful debugging data.
++
++Even if the x86 unwinder worked correctly, it would only give an accurate
++backtrace, it would not give function arguments. Needless to say, function
++arguments are what people really want. To get function arguments requires much
++more support from the compiler than simple unwind data, the compiler has to
++track line by line where each argument is held and make that data available to
++the debugger. Compiling with gcc -g will provide that information, but the
++resulting kernel is several times larger than normal.
++
++Although the gcc -g data can be stored on another machine, there are constructs
++in the kernel that cannot be tracked by this method. i386 builds with 4K stacks
++and all x86_64 builds have multiple kernel stacks. The compiler knows nothing
++about these extra stacks and cannot backtrace through them correctly. The
++assembler code in arch/{i386,x86_64}/kernel/entry.S is a maze of twisty logic
++paths, some of which eventually jump to common labels. Describing this twisty
++logic to an unwinder is very difficult, expecially when you try to describe
++where arguments and/or registers are held).
++
++KDB gets an accurate x86 backtrace and extracts the arguments by performing code
++decomposition and analysis at run time. This avoids the need to bloat the
++running kernel to several times its normal size with gcc -g data. KDB (unlike
++gdb) also knows about the additional kernel stacks and twisty assembler code
++paths.
++
++The x86 backtrace code for i386 is very similar to the x86_64 code, with 80%
++common code and data. Most of the differences between the backtrace for the two
++architectures is due to the assembler code and stack handling. To avoid code
++duplication between KDB patches, the x86 backtrace code is actually stored in
++the kdb common patch, in source kdb/kdba_bt_x86.c. kdb/Makefile only builds
++kdba_bt_x86.o for i386 or x86_64. Most of the code reads as if the architecture
++is x86_64, using register names like rsp and rip. i386 is treated as a subset
++of x86_64, with fewer registers and printing the names as esp and eip. When
++this documentation refers to rsp and rip, read it as esp and eip for i386. The
++20% of code and data that is different in held in two large #ifdef sections,
++scan kdba_bt_x86.c for CONFIG_X86_64. Be careful when changing anything in the
++architecture specific sections, you will need to review the other architecture
++to see if it needs changes as well.
++
++The idea behind the x86 backtrace is to trace one function at a time, which
++gives us the calling function. Then apply the same algorithm to the calling
++function until you unwind to the first function in the process. The starting
++point for tracing any process is to extract the current stack pointer and
++current instruction pointer (rsp and rip). The way that these values are
++extracted varies between running tasks and blocked tasks, the method is
++described later (Process Starting Point) but ignore it for now, just assume that
++we have a starting rsp and rip.
++
++Given the instruction pointer (rip), we identify the start and end of the kernel
++or module function it is in, using the kernel symbol table. This is easy for C
++code, it is significantly harder for assembler code because of the twisty code
++paths that branch to common labels. The method of identifying the current
++function is described later (Identifying The Current Function) but ignore it for
++now, just assumes that we have the start and end address of the function plus
++its name.
++
++After the rip has been mapped to a function name with sensible start and end
++addresses, the next step is to analyse the code paths in that function. KDB
++already has a built in disassembler (copied with slight modifications from
++binutils) which knows how to decode each x86 instruction. Instead of
++duplicating that logic in kdba_bt_x86, it takes advantage of the fact that you
++can override the disassembler's print function, sending the output line to a
++buffer instead of printing it. kdba_bt_x86 stills has to decode the buffer but
++that is a lot easier than decoding the x86 instruction set.
++
++The code analysis consists of two main passes. There are example below of the
++analysis with basic block (bb) debugging activated (Examples of Basic Block
++Debugging Output).
++
++The first pass (bb_pass1) identifies all the basic blocks in the function. For
++our purposes, a basic block has a single entry point and one or more exit
++points. The start of the function is the start of basic block 0, all other
++blocks are the target of jump instructions (conditional or unconditional) from
++within the rest of the code. A block ends with an unconditional jump or with a
++terminating instruction such as ret, iret, sysexit, sysret or ud2a (BUG). A
++block can also end because the next instruction is the start of a new block
++(target of jmp or jcc), in this case there is an implied drop through from one
++block to the next.
++
++Although a call instruction also transfers control, it returns to the next
++instruction so call is not treated as a transfer. Instead call is treated as a
++normal instruction with side effects, the scratch registers are cleared after a
++call.
++
++At the end of the first pass over the function we have a directed graph that
++starts at bb[0]. The nodes of the graph (bb_list[]) are the basic blocks with
++their start and end address. The vertices are the jmp or jcc instructions
++(bb_jmp_list[]) that transfer control between blocks, plus any implied drop
++through transfers between consecutive blocks. This graph can have cycles, many
++functions have loops in them which transfer control back to earlier in the code
++body.
++
++The second pass (bb_pass2) runs over the directed graph analysing the effect of
++each instruction on the register and memory state. It is important to
++understand that the analysis in this pass is an abstract one, it does not use
++actual hex values for the register contents, instead it uses symbolic values.
++When the basic block code says that "register rsi contains value rax" it means
++that whatever value was in rax on entry to the function has also been copied to
++register rsi at this point in the logic flow.
++
++At an abstract level, all C functions start with exactly the same state, each
++register contains its own symbolic value (except for the stack pointer, see
++later) with no local stack variables defined yet. Assembler functions tend to
++have unusual starting points, with some registers and/or memory contents defined
++differently on entry. For example, ret_from_intr on i386 already has a struct
++pt_regs on its stack, ret_from_intr on x86_64 already has a partial struct
++pt_regs plus another two words stacked on top of it. The special starting cases
++are listed in the arch specific bb_special_cases[].
++
++Once the input state of bb[0] has been defined (including any special cases),
++bb_pass2_do_changed_blocks() runs over all the nodes in bb_list[]. Each
++instruction in each block is analysed (Tracking the Effects of Instructions) to
++see what effect it has on the abstract register state, the analysis of each
++instruction is done in bb_usage(). An instruction can copy one register to
++another, it can copy a register to stack, move from stack to a register or it
++can invalidate the contents of a register or memory location. A general rule in
++bb_usage() is that any operation whose results cannot be calculated in terms of
++an original input value gives an undefined result. Remember that it is the
++abstract value that becomes undefined, moving a constant to a register gives a
++defined value for the view of the program but it is undefined as far as the
++abstract state is concerned.
++
++References to data on the stack are a little awkward because the stack pointer
++frequently changes. To overcome this, kdba_bt_x86 defines a pseudo register
++called the 'original stack pointer' (osp). This always represents the stack
++pointer on entry to the function, so on entry rsp contains osp+0x0. As rsp is
++modified, it still points at osp, but its offset from osp changes. Copying rsp
++to another register (e.g. mov %rsp,%rbp) copies the osp offset as well. At the
++point that this function calls the next function down the stack, kdba_bt_x86
++knows the delta from osp to rsp. Applying that delta to the actual value of the
++stack pointer gives the stack pointer value on input to the current function,
++that location contains the return address so we can go up one stack frame and
++repeat the process.
++
++After doing basic block analysis on the current function, kdba_bt_x86 knows what
++the abstract register and memory state is at the point this function was
++interrupted or it called the next function down the stack, this is the exit
++state. For an interrupt the actual register values are saved in a struct
++pt_regs, for a call we have unwound from the KDB interrupt back to the called
++function so we have some idea of what the register values are in the called
++function. The abstract exit state is merged with the known actual register
++values to derive the original stack pointer. That in turn gives us any
++registers that were saved on stack. The original stack pointer gives the return
++address from the calling function, go up one stack frame and repeat the
++analysis.
++
++
++Process Starting Point
++======================
++
++All backtrace code needs a starting point which defines at least the stack
++pointer and instruction pointer, it may define other registers as well. The
++first part of kdba_bt_stack() extracts the starting point. Processes can be in
++one of three states, running (currently on a cpu), blocked (sleeping or ready to
++run but not currently on a cpu) or unknown.
++
++For running processes, the current rsp and rip are dynamic. Because KDB stops
++the entire machine by sending an interrupt to the other cpus, KDB can save the
++rsp and rip for each cpu at the point where KDB is entered. This data is held
++in array kdb_running_process and is stored by kdb_save_running() and the arch
++specific kdba_save_running() functions. When backtracing a running process, KDB
++uses the data in kdb_running_process as its starting point.
++
++For blocked processes we always have the saved rsp, it is held in the process's
++thread_info. For i386 blocked processes, thread_info also contains the saved
++rip. For x86_64 blocked processes, rip is no longer saved in thread_info, it is
++assumed that all blocked processes will resume at assembler label thread_return,
++so that rip is used on x86_64. See arch specific kdba_bt_stack_rip().
++
++Unknown process state only occurs when the user does 'bt <stack_address>'.
++Unlike other bt commands, 'bt <stack_address>' does not identify any specific
++process, instead it identifies a kernel stack. <stack_address> must be inside a
++valid kernel stack and must point to a saved rip from a call instruction.
++kdba_bt_x86.c uses the common kdba_get_stack_info() and arch specific
++kdba_get_stack_info_alternate() functions to check that the address falls within
++a valid kernel stack. If the user gives a stack address that does not point to
++a saved rip from a call instruction then the backtrace will be garbage.
++
++
++Identifying The Current Function
++================================
++
++Given a rip value, KDB uses the kallsyms data to find the start of the function
++(first address <= rip) and the end of the function (next symbol in kallsyms).
++This works for plain C code because gcc only generates one label per function.
++It does not work for assembler code or for assembler code embedded in C
++functions, because the assembler labels appear as global entries in kallsyms.
++For example, arch/i386/kernel/entry.S has function ret_from_exception which
++contains three global labels ret_from_intr, check_userspace and
++resume_userspace. If rip points to any of those global labels, KDB wants the
++start of the real function, i.e. ret_from_exception. In addition, if rip points
++to ret_from_exception, KDB wants the end of the function to be after the last
++global label in that function, i.e. after resume_userspace.
++
++The simplest way to handle these unwanted global labels is to list the spurious
++assembler labels, which is done in the arch specific array bb_spurious. After
++mapping rip to the nearest start and end labels from kallsyms, kdb_bb() works
++backwards until it finds a non-spurious label then works forwards to the next
++non-spurious label. That gives a real start and end address and a real name for
++the current function.
++
++Note that this algorithm only applies in kdb_bb() when it maps rip to a suitable
++start and end address. When disassembling the code, you will still see the
++spurious label names, users need to see the extra labels. ret_from_exception on
++i386 disassembles like this (2.6.22) :-
++
++[0]kdb> id ret_from_exception
++0xc0102554 ret_from_exception: cli
++0xc0102555 ret_from_intr: mov $0xfffff000,%ebp
++0xc010255a ret_from_intr+0x5: and %esp,%ebp
++0xc010255c check_userspace: mov 0x34(%esp),%eax
++0xc0102560 check_userspace+0x4: mov 0x30(%esp),%al
++0xc0102564 check_userspace+0x8: and $0x20003,%eax
++0xc0102569 check_userspace+0xd: cmp $0x3,%eax
++0xc010256c check_userspace+0x10: jb 0xc010258c resume_kernel
++0xc0102572 check_userspace+0x16: mov %esi,%esi
++0xc0102574 resume_userspace: cli
++0xc0102575 resume_userspace+0x1: mov 0x8(%ebp),%ecx
++0xc0102578 resume_userspace+0x4: and $0xfe3e,%ecx
++0xc010257e resume_userspace+0xa: jne 0xc01026f4 work_pending
++0xc0102584 resume_userspace+0x10: jmp 0xc01026a7 restore_all
++0xc0102589 resume_userspace+0x15: lea 0x0(%esi),%esi
++0xc010258c resume_kernel: cli
++
++For the purposes of kdba_bt_x86.c, any rip from 0xc0102554 to 0xc0102589 needs
++to map to the range 0xc0102554 (start) to 0xc010258c (end) with function name
++ret_from_exception. Therefore ret_from_intr, check_userspace and
++resume_userspace are listed in bb_spurious[] for i386 so those symbols are
++ignored. The comments in bb_spurious[] list the function that encloses each
++spurious label, those comments are only for humans, they do not affect the code.
++
++Once rip has been mapped to non-spurious labels, the module name, function name,
++start and end address are stored in variables bb_mod_name, bb_func_name,
++bb_func_start, bb_func_end. These variables are used throughout kdba_bt_x86.c
++for processing each function in turn.
++
++Watch for changes to assembler code, especially in arch/i386/kernel/entry.S,
++arch/x86_64/kernel/entry.S and arch/x86_64/ia32/ia32entry.S. When new labels
++are added you may need to adjust bb_spurious[] for that architecture. Running
++bb_all can help identify assembler labels that have been added or deleted.
++
++
++Tracking the Effects of Instructions
++====================================
++
++bb_pass2_do_changed_blocks() uses the KDB disassembler to decode the x86
++instructions to something a human can read. bb_dis_pass2() is used as a print
++routine to store data for a single instruction in a buffer then
++bb_parse_buffer() starts the analysis. Any instruction prefixes like lock or
++rep are stripped out. The opcode string is isolated then up to 3 operands are
++extracted (imul can have 3 operands), these are src, dst and dst2. The operand
++is matched against table bb_opcode_usage_all[] which lists all the instructions
++that actually appear in i386 and x86_64 kernels. A lot of the x86 instrcution
++set is not used by the kernel so instructions such as SSE do not appear in
++bb_opcode_usage_all[].
++
++Each operand is decoded by bb_parse_operand() to see whether it has a segment
++prefix, displacement, base, index or scale. An indirect call or jmp is
++identified. Operands consisting only of a register are classified as 'reg'
++type, displacements starting with '$' are immediate values otherwise the operand
++refers to a memory location. Any base or index register name is mapped to the
++abstract register name that contains it, this takes care of mapping (say) %ah to
++rax.
++
++After decoding the opcode and all its operands, bb_usage() decides what effect
++the instruction will have on the abstract state machine. Some x86 instructions
++list all the affected registers in their operands and these can be handled as
++generic cases. Alas many x86 instructions have side effects and change
++registers that are not listed in the operands, these have to be handled as
++special cases. enum bb_operand_usage lists the generic and special cases.
++
++bb_usage() is basically one huge switch statement over the special values in
++enum bb_operand_usage. For each special case it tracks the side effects of the
++instruction. Once all the special cases have been handled and converted to
++generic cases then bb_usage() handles the generic cases.
++
++bb_usage() detects when a register is copied to another register, a register is
++copied to stack or a known stack value is copied to a register and updates the
++state data accordingly. It is particularly important that all stack pointer
++updates and copies of the stack pointer are tracked, much of the saved state is
++on stack and can be accessed via any register that points to the stack, not just
++via rsp.
++
++i386 built with 4K stacks and all x86_64 builds have multiple kernel stacks.
++bb_usage() knows which instructions or locations are used to switch stacks and
++pretends that these instructions have no effect on the contents of rsp. The
++higher level backtrace code knows how to handle stack switching, it is too
++complicated for basic block analysis.
++
++
++Transfer of Control Outside the Current Function
++================================================
++
++Ignoring call instructions, most C code does not transfer control outside the
++current function, IOW there are no jump instructions to instructions outside the
++function. There are a few cases that this can occur for C code, inline
++assembler and tail recursion.
++
++Tail recursion occurs when a function ends by returning the value from a second
++function and that second function has exactly the same arguments and return
++value as the current function. For example,
++
++ int bar(int i, char *p)
++ {
++ ... do some work and return an int ...
++ }
++
++ int foo(int i, char *p)
++ {
++ return bar(i, p);
++ }
++
++If tail recursion is active (gcc -foptimize-sibling-calls) then instead of foo
++calling bar, bar returning to foo then foo returning to its caller, gcc will end
++foo with a direct jmp to bar. The source code says that something called foo
++but the stack trace will show bar is active, with no sign of foo on stack. When
++bar returns it will use the return address from the code that called foo.
++
++bb_transfer() detects an unconditional jmp to code outside the function body and
++assumes that this represents tail recursion. For tail recursion to work
++correctly, all the preserved registers must contain their original values,
++bb_sanity_check() validates this. Any deviation from the expected state will
++stop basic block analysis and fall back on the old unreliable backtrace code.
++
++Besides tail recursion in C code, assembler code can jump to labels outside the
++current function. Unfortunately this occurs all the time in the twisty
++assembler code and, to make things worse, many of these transfers are done with
++non-standard register or memory state. bb_special_case() and the arch specific
++bb_special_cases[] handle all the known special cases, including what the
++register and/or memory state should be. Any deviation from the expected state
++will stop basic block analysis and fall back on the old unreliable backtrace
++code.
++
++
++Locating Arguments
++==================
++
++Function arguments can be passed in registers or on stack. The full ABI for
++passing arguments is described in
++
++ http://www.caldera.com/developers/devspecs/abi386-4.pdf
++ http://www.x86-64.org/documentation/abi.pdf
++
++The short description, ignoring special cases like passing structures by name
++and floating point arguments which tend not to apply to the kernel, is :-
++
++i386. With -mpregparm=0, all arguments are passed on stack, except for
++ functions defined as FASTCALL, where the first 3 arguments are passed in
++ registers.
++
++ With -mregparm=3, the first 3 arguments are passed in registers except
++ for functions defined as asmlinkage or with variable number of
++ arguments, when arguments are still passed on stack. -mpregparm=3 used
++ to be a config option, in recent kernels it is the default.
++
++ Arguments defined as long long (64 bit) are passed in two registers or
++ in two locations on stack. Being passed in two pieces makes a 64 bit
++ argument look like two 32 bit arguments to KDB, it will be printed as
++ two separate arguments.
++
++ When compiled with -mregparm=3, if a 64 bit argument is argument number
++ 2 then it will not be split between register and stack, instead it will
++ all be on stack and the third argument register will not be used. This
++ makes it look like there is an extra argument in the list. There is
++ nothing that KDB can do to detect these corner cases with 64 bit
++ arguments on i386, which is a pity because they can confuse users.
++
++ The preserved registers are ebx, ebp, esp, esi, edi. Arguments are
++ passed in eax, edx, ecx. The return value is in eax.
++
++x86_64. The first 6 arguments are passed in registers, the 7th and later
++ arguments are passed on stack. Except for functions with a variable
++ number of arguments (e.g. printk) where all arguments are on stack
++ except for rax which contains the number of SSE arguments (always 0 for
++ the kernel).
++
++ The preserved registers are rbx, rbp, rsp, r12, r13, r14, r15.
++ Arguments are passed in rdi, rsi, rdx, rcx, r8, r9. The return value is
++ in rax.
++
++For both architectures, kdba_bt detects an argument that is passed in a register
++by the fact that the function code reads from that argument type register while
++it contains its original value. IOW, testing the value of rax, copying rax to
++another register or storing it on stack without first overwriting rax means that
++rax contains a useful input value. Reading from memory which is above the
++original stack pointer means that there is a argument at that location on
++stack.
++
++There are some functions in the kernel whose definition contains arguments that
++are not actually used. Typically these functions are instantiations of generic
++function definitions where some, but not all, instantiations use all the
++arguments. For example, a filesystem function may take flags that are not used
++by this particular filesystem, but the function definition has to match the
++generic filesystem declarations. If the unused arguments are at the end of the
++list then there is no way of telling from the object code that they exist, the
++function that does not use the trailing aguments will have no code that refers
++to them. KDB will print a truncated argument list for this case.
++
++If the unused arguments are not at the end of the list then KDB can detect the
++presence of the unused arguments, because there is code that refers to later
++arguments. KDB will print the unused argument, although gcc may have
++overwritten the register it is in, in which case KDB prints "invalid".
++
++Originally kdba_bt_x86 would detect that there was no reference to arguments in
++registers but there were still references to arguments on stack and assume that
++the function had all its arguments on stack. Unfortunately this did not work
++with the large number of 'pass through' functions in the kernel. A 'pass
++through' function is one which calls another function with roughly the same
++argument list and makes no other reference to the register arguments. For
++example, ipv4_doint_and_flush_strategy() takes 7 arguments, calls
++devinet_conf_sysctl() with those 7 arguments in the same order and has no other
++reference to any of its arguments.
++
++Pass through functions do not touch the arguments that are passed in registers
++because they are already in the right location for the routine about to be
++called, so the pass through function has no code that references the argument
++registers. No code means that kdba_bt_x86 cannot tell if the function has
++register arguments or not. The arguments passed on stack must always be copied
++to the new stack frame, even for pass through functions, so the arguments on
++stack can always be detected.
++
++kdba_bt_x86 was changed to assume that if there are any arguments on stack then
++there are always arguments in registers, except for a list of functions that are
++known to be asmlinkage or to have a variable number of arguments.
++bb_assume_pass_through() ignores the known special cases, for other functions
++which have stack arguments but no register arguments it assumes the function is
++pass through and prints a warning about that assumption.
++
++The above heuristics mean that there is one case that kdba_bt_x86 cannot detect:
++pass through functions where all the arguments are in registers. These have no
++argument references at all in their code, so they are printed with no arguments.
++All they do is call another function so this class of functions never fails, or
++if it does fail then it is due to something that is not argument related. If
++the failure is further down the call stack then the arguments are printed at the
++next function down the stack, so the user still has the arguments.
++
++This list of limitations on getting the x86 arguments may seem to be a long one,
++but kdba_bt_x86 gives sensible results for most functions. For kernel
++debugging, any arguments are far better than none at all.
++
++
++Kernel Stack Switching
++======================
++
++Understanding the unusual way that x86 kernel stacks are used is very important
++when diagnosing backtrace problems. Every process has its own normal kernel
++stack, even processes that run entirely within the kernel such as kthread or the
++per cpu migration processes. The normal stacks are 4K or 8K on i386 (depending
++on CONFIG_4KSTACKS) and 8K on x86_64. The normal stacks are global, they are
++not tied to any cpu.
++
++For i386 with 8K stacks there are no other kernel stacks so there is no stack
++switching to worry about.
++
++For i386 with 4K process stacks, each cpu also has a 4K soft irq stack and a 4K
++hard irq stack. It is possible for a process to be running on its own process
++stack, for the process to be interrupted by a soft irq which is then interrupted
++by a hard irq. At that point the backtrace is split between the hard irq, the
++soft irq and the normal normal stacks.
++
++On x86_64, each cpu always has stacks for stackfault, doublefault, nmi, debug,
++mce and interrupts. See Documentation/x86_64/kernel-stacks.
++
++The arch specific kdba_get_stack_info_alternate() function works out which stack
++the backtrace starts on, how big the stack is and how to switch to the next
++stack. This information is stored in the kdb_activation_record and used by the
++higher level backtrace code to detect a stack switch.
++
++The normal stack has some padding at the end, this reflects the stack pointer
++when the process was created in the kernel. kdba_bt_x86 cannot backtrace
++through this padding data, mainly because the code that set the nitial stack
++pointer no longer exists after boot. ARCH_NORMAL_PADDING defines how many words
++to ignore at the end of the normal stack.
++
++
++Debugging KDB
++=============
++
++KDB has conditional debugging print statements scattered throughout the code.
++If KDB is not behaving as expected, you can turn on debugging and rerun the
++command. Before debugging KDB, set LINES 10000 and capture the output via a
++serial console. If using minicom, turn on word wrap (control-A W) and capture
++mode (control-A L). If you are using a serial console via a serial to Ethernet
++interface using ssh or telnet, use the 'script' command to start the session.
++
++The various KDB_DEBUG_FLAG_* flags are listed in include/linux/kdbprivate.h.
++You set debug with 'set KDBDEBUG 0xnn' where nn is the or'd value of the desired
++flags. 'set KDBDEBUG 0' turns off KDB debugging. When diagnosing x86 backtrace
++problems, the most useful debug flags are
++
++ KDB_DEBUG_FLAG_ARA 0x10 Activation record, arch specific
++ KDB_DEBUG_FLAG_BB_SUMM 0x04 Basic block analysis, summary only
++ KDB_DEBUG_FLAG_BB 0x20 All basic block analysis
++
++ARA prints information about the different kernel stacks as kdba_bt_x86 unwinds
++through the switched kernel stacks. BB_SUMM prints a summary of the basic block
++analysis for each function, including the abstract exit state and the rollback
++calculations. BB prints a huge amount of basic block debugging, you probably
++only want to turn this for the full backtrace on as a last resort.
++
++I find 'set KDBDEBUG 0x14' to be best to get an overview of a problem. It gives
++both the kernel stack information plus the abstract state and actual location of
++data for each function.
++
++Command 'bb1' does a detailed debug session for a single function, bb1 takes a
++single parameter, the address of the exit point from the function, by number,
++not by name. bb1 turns on KDB_DEBUG_FLAG_BB, does basic block analysis for the
++function that contains the exit point then resets the debug flags to their
++previous value.
++
++Command 'bb_all' runs through every function in the base kernel (not module
++functions) and does a basic block analysis of every function. It also validates
++the various tables in kdba_bt_x86 where possible. bb_all is meant for the KDB
++maintainer to check that all the base kernel function pass the sanity checks, it
++can also be used by end users when reporting a bug. bb_all takes no parameters.
++It prints a '.' for every 100 functions it has analysed and allows for up to 20
++errors before giving up. The output from bb_all also includes the config
++variables that affect basic block analysis plus any assumptions about 'pass
++through' functions.
++
++
++Submitting a Bug Report Against kdba_bt_x86
++===========================================
++
++Capture the KDB output via a serial console.
++
++set LINES 10000
++set BTSP 1
++set KDBDEBUG 0x14
++Reproduce the problem.
++set KDBDEBUG 0
++bb_all
++
++If you can identify the rip/eip where kdba_bt_x86 gets confused, run bb1 with
++that address.
++
++Find each set of output from kdba_get_stack_info in the trace, extract the last
++two lines and type those lines into KDB. That will give a hex and symbolic dump
++of the raw kernel stacks. For example, if the trace data is
++
++kdba_get_stack_info: esp=0xc04fbef8 cpu=0 task=c047b3e0
++kdba_get_stack_info: ar->stack
++ physical_start=0xc04fb000
++ physical_end=0xc04fc000
++ logical_start=0xc04fb038
++ logical_end=0xc04fc000
++ next=0xc04b4f44
++ id=hardirq_ctx
++ set MDCOUNT 1024
++ mds 0xc04fb000
++
++then type the last two lines into KDB. Repeat this for each stack listed by
++kdba_get_stack_info on the failing backtrace.
++
++Send all the console output to the KDB maintainer.
++
++
++Examples of Basic Block Debugging Output
++========================================
++
++Example of the basic block analysis of fs/namei::getname() on i386. Kernel
++2.6.22, i386, compiled with frame pointers, gcc 4.1.0.
++
++Basic block debugging is very verbose, so set a high number of output lines.
++You really need a reliable serial console to capture this amount of output.
++
++ [0]kdb> set LINES 10000
++
++A simple disassemble of getname(). This is not required for debugging purposes
++since each instruction is printed as part of basic block debugging, but this can
++be easier to read.
++
++ [0]kdb> id getname
++ 0xc015cce8 getname: push %ebp
++ 0xc015cce9 getname+0x1: mov %esp,%ebp
++ 0xc015cceb getname+0x3: push %edi
++ 0xc015ccec getname+0x4: push %esi
++ 0xc015cced getname+0x5: push %ebx
++ 0xc015ccee getname+0x6: sub $0x4,%esp
++ 0xc015ccf1 getname+0x9: mov %eax,%edi
++ 0xc015ccf3 getname+0xb: mov $0xd0,%edx
++ 0xc015ccf8 getname+0x10: mov 0xc04b2120,%eax
++ 0xc015ccfd getname+0x15: call 0xc0153009 kmem_cache_alloc
++ 0xc015cd02 getname+0x1a: mov %eax,0xfffffff0(%ebp)
++ 0xc015cd05 getname+0x1d: mov $0xfffffff4,%eax
++ 0xc015cd0a getname+0x22: cmpl $0x0,0xfffffff0(%ebp)
++ 0xc015cd0e getname+0x26: je 0xc015cd7d getname+0x95
++ 0xc015cd10 getname+0x28: mov %esp,%eax
++ 0xc015cd12 getname+0x2a: and $0xfffff000,%eax
++ 0xc015cd17 getname+0x2f: cmpl $0xffffffff,0x18(%eax)
++ 0xc015cd1b getname+0x33: je 0xc015cd39 getname+0x51
++ 0xc015cd1d getname+0x35: mov $0xfffffff2,%esi
++ 0xc015cd22 getname+0x3a: cmp $0xbfffffff,%edi
++ 0xc015cd28 getname+0x40: ja 0xc015cd60 getname+0x78
++ 0xc015cd2a getname+0x42: mov $0xc0000000,%ebx
++ 0xc015cd2f getname+0x47: sub %edi,%ebx
++ 0xc015cd31 getname+0x49: cmp $0xfff,%ebx
++ 0xc015cd37 getname+0x4f: jbe 0xc015cd3e getname+0x56
++ 0xc015cd39 getname+0x51: mov $0x1000,%ebx
++ 0xc015cd3e getname+0x56: mov %ebx,%ecx
++ 0xc015cd40 getname+0x58: mov %edi,%edx
++ 0xc015cd42 getname+0x5a: mov 0xfffffff0(%ebp),%eax
++ 0xc015cd45 getname+0x5d: call 0xc023dbb4 strncpy_from_user
++ 0xc015cd4a getname+0x62: cmp $0x0,%eax
++ 0xc015cd4d getname+0x65: jle 0xc015cd5a getname+0x72
++ 0xc015cd4f getname+0x67: mov $0xffffffdc,%esi
++ 0xc015cd54 getname+0x6c: cmp %ebx,%eax
++ 0xc015cd56 getname+0x6e: jae 0xc015cd60 getname+0x78
++ 0xc015cd58 getname+0x70: jmp 0xc015cd71 getname+0x89
++ 0xc015cd5a getname+0x72: je 0xc015cd76 getname+0x8e
++ 0xc015cd5c getname+0x74: jge 0xc015cd71 getname+0x89
++ 0xc015cd5e getname+0x76: mov %eax,%esi
++ 0xc015cd60 getname+0x78: mov 0xfffffff0(%ebp),%edx
++ 0xc015cd63 getname+0x7b: mov 0xc04b2120,%eax
++ 0xc015cd68 getname+0x80: call 0xc01521f1 kmem_cache_free
++ 0xc015cd6d getname+0x85: mov %esi,%eax
++ 0xc015cd6f getname+0x87: jmp 0xc015cd7d getname+0x95
++ 0xc015cd71 getname+0x89: mov 0xfffffff0(%ebp),%eax
++ 0xc015cd74 getname+0x8c: jmp 0xc015cd7d getname+0x95
++ 0xc015cd76 getname+0x8e: mov $0xfffffffe,%esi
++ 0xc015cd7b getname+0x93: jmp 0xc015cd60 getname+0x78
++ 0xc015cd7d getname+0x95: pop %edx
++ 0xc015cd7e getname+0x96: pop %ebx
++ 0xc015cd7f getname+0x97: pop %esi
++ 0xc015cd80 getname+0x98: pop %edi
++ 0xc015cd81 getname+0x99: pop %ebp
++ 0xc015cd82 getname+0x9a: ret
++
++The bb1 command only one argument which must be an address, not a name. bb1
++turns on full basic block debugging and analyses the function containing the
++supplied address. Give bb1 the address of the exit point from this function,
++IOW the return address that is stored on stack due to a call from this function
++to the next function down the call stack. Assume that getname() has called
++kmem_cache_free() and something went wrong in kmem_cache_free() or one of the
++functions that it calls. The call to kmem_cache_free is at 0xc015cd68 and the
++return address on stack is the instruction after the call, i.e. 0xc015cd6d, so
++
++ [0]kdb> bb1 0xc015cd6d
++ bb_pass1: func_name getname func_start 0xc015cce8 func_end 0xc015cd83
++
++bb_pass1 has identified the function name and its start and end address. For C
++functions these are just the function start address and the next symbol in
++kallsyms. For Assembler code there may be spurious labels so the function name
++may not match the label prior to the address given to bb1. For an example of
++that on i386, find the address of resume_userspace then pass that address to the
++bb1 KDB command.
++
++ bb_pass1: end
++ bb[0] start 0xc015cce8 end 0xc015cd38 drop_through 1
++ bb[1] start 0xc015cd39 end 0xc015cd3d drop_through 1
++ bb[2] start 0xc015cd3e end 0xc015cd58 drop_through 0
++ bb[3] start 0xc015cd5a end 0xc015cd5f drop_through 1
++ bb[4] start 0xc015cd60 end 0xc015cd6f drop_through 0
++ bb[5] start 0xc015cd71 end 0xc015cd74 drop_through 0
++ bb[6] start 0xc015cd76 end 0xc015cd7b drop_through 0
++ bb[7] start 0xc015cd7d end 0xc015cd82 drop_through 0
++ bb_jmp[0] from 0xc015cd0e to 0xc015cd7d drop_through 0
++ bb_jmp[1] from 0xc015cd1b to 0xc015cd39 drop_through 0
++ bb_jmp[2] from 0xc015cd28 to 0xc015cd60 drop_through 0
++ bb_jmp[3] from 0xc015cd37 to 0xc015cd3e drop_through 0
++ bb_jmp[4] from 0xc015cd4d to 0xc015cd5a drop_through 0
++ bb_jmp[5] from 0xc015cd56 to 0xc015cd60 drop_through 0
++ bb_jmp[6] from 0xc015cd58 to 0xc015cd71 drop_through 0
++ bb_jmp[7] from 0xc015cd5a to 0xc015cd76 drop_through 0
++ bb_jmp[8] from 0xc015cd5c to 0xc015cd71 drop_through 0
++ bb_jmp[9] from 0xc015cd6f to 0xc015cd7d drop_through 0
++ bb_jmp[10] from 0xc015cd74 to 0xc015cd7d drop_through 0
++ bb_jmp[11] from 0xc015cd7b to 0xc015cd60 drop_through 0
++ bb_jmp[12] from 0xc015cd38 to 0xc015cd39 drop_through 1
++ bb_jmp[13] from 0xc015cd3d to 0xc015cd3e drop_through 1
++ bb_jmp[14] from 0xc015cd5f to 0xc015cd60 drop_through 1
++
++After analysing the logic flow, we can see that getname() consists of 8 basic
++blocks (nodes in bb_list[]). 5 of these blocks end in unconditional jumps, the
++other 3 drop through to the next block. There are 15 transfers of control
++(vertices in bb_jmp_list[]). 12 of these transfers are explicit jmp or jcc
++instructions, the other 3 are implicit transfers when dropping through from one
++block to the next. The node list is sorted by start address, the vertex list is
++not sorted.
++
++Basic block 0 starts at the function start (0xc015cce8) and ends at 0xc015cd38.
++0xc015cd39 is the target of a jump instruction (0xc015cd1b: je 0xc015cd39) so
++0xc015cd39 starts a new block, which means that 0xc015cd38 becomes the end of
++the previous block. Because bb[0] does not end in an explicit jmp instruction,
++there is a drop through from the end of bb[0] to the start of bb[1], see
++bb_jmp[12].
++
++ bb_pass2: start
++
++To get the most accurate results from pass2, try to scan the directed graph by
++only looking at nodes whose inputs are all defined. Initially only process
++nodes with no missing inputs.
++
++ bb_pass2_do_changed_blocks: allow_missing 0
++
++ bb[0]
++ bb_reg_state c07282e0
++ rax = rax
++ rbx = rbx
++ rcx = rcx
++ rdx = rdx
++ rdi = rdi
++ rsi = rsi
++ rbp = rbp
++ rsp = osp+0x0
++
++The initial state for bb[0] is the same for all C functions. Each register
++contains its own abstract value, except for rsp which is defined in terms of the
++original stack pointer (osp).
++
++ '0xc015cce8 getname: push %ebp'
++
++The first instruction of getname() saves the frame pointer.
++
++ opcode 'push' matched by 'push', usage 44
++ src R: %ebp base_rc 8 (rbp)
++
++bb_usage() reports how the instruction was recognised and how its operands were
++decoded. Although this is i386 (ebp), it is reported as rbp. Using the x86_64
++names for registers throughout makes it easier to create common code for the two
++architecures.
++
++ rsp osp offset +0x0 -> -0x4
++
++A push instruction decrements rsp by 4 (i386) or 8 (x86_64) bytes. rsp
++originally contained the original stack pointer (osp), now it contains the
++original stack pointer - 4.
++
++ *(rsp+0x0 osp-0x4) = rbp slot 0
++
++The stack location pointed to by *rsp now contains the original value of rbp.
++Since rsp contains (osp-0x4), *(osp-0x4) contains rbp. It is slot 0 in the
++memory array associated with the register state.
++
++ '0xc015cce9 getname+0x1: mov %esp,%ebp'
++ opcode 'mov' matched by 'mov', usage 36
++ src R: %esp base_rc 9 (rsp)
++ dst R: %ebp base_rc 8 (rbp)
++ rbp = rsp (osp-0x4)
++
++Copy esp (rsp) to ebp (rbp). rsp contained (osp-0x4) so rbp also contains
++(osp-0x4). Any reference to data via either rbp or rsp will now be tracked as a
++stack location.
++
++ '0xc015cceb getname+0x3: push %edi'
++ opcode 'push' matched by 'push', usage 44
++ src R: %edi base_rc 6 (rdi)
++ rsp osp offset -0x4 -> -0x8
++ *(rsp+0x0 osp-0x8) = rdi slot 1
++ '0xc015ccec getname+0x4: push %esi'
++ opcode 'push' matched by 'push', usage 44
++ src R: %esi base_rc 7 (rsi)
++ rsp osp offset -0x8 -> -0xc
++ *(rsp+0x0 osp-0xc) = rsi slot 2
++ '0xc015cced getname+0x5: push %ebx'
++ opcode 'push' matched by 'push', usage 44
++ src R: %ebx base_rc 3 (rbx)
++ rsp osp offset -0xc -> -0x10
++ *(rsp+0x0 osp-0x10) = rbx slot 3
++
++Push 3 registers to stack. rsp is adjusted for each push and stack locations
++are assigned to contain the values of edi, esi and ebx. This sequence is very
++common in i386 C functions. edi, esi and ebx are preserved registers on i386,
++but gcc wants to use them for scratch space. The original contents iof these
++registers must be saved on stack and restored before returning to our caller.
++
++ '0xc015ccee getname+0x6: sub $0x4,%esp'
++ opcode 'sub' matched by 'sub', usage 51
++ src I: $0x4
++ dst R: %esp base_rc 9 (rsp)
++ rsp osp offset -0x10 -> -0x14
++
++Subtract 4 bytes from esp. This defines the local stack variables. Sorry,
++names for local stack variables are not available to KDB.
++
++ '0xc015ccf1 getname+0x9: mov %eax,%edi'
++ opcode 'mov' matched by 'mov', usage 36
++ src R: %eax base_rc 2 (rax)
++ dst R: %edi base_rc 6 (rdi)
++ rdi = rax (rax)
++
++Having saved edi on stack, gcc now overwrites edi with eax. At this point rax
++still contains its original value, so rdi now contains a copy of rax, as well as
++the original value which is still in rax. This is a common sequence in C code.
++rax contains argument 0 but it is also a scratch register. If the code needs to
++use argument 0 later then its value must be saved somewhere before executing any
++instruction that changes rax. edi is a preserved register so its contents will
++not be changed by any function that we call, or if it is changed then it will be
++restored before returning to this function.
++
++rax is listed in the arch specific bb_param_reg[] list and the code is reading
++from rax while it still contains its original value. The only way that makes
++any sense is when rax is an input argument to getname(). We note that fact in
++bb_reg_read().
++
++ '0xc015ccf3 getname+0xb: mov $0xd0,%edx'
++ opcode 'mov' matched by 'mov', usage 36
++ src I: $0xd0
++ dst R: %edx base_rc 5 (rdx)
++ rdx = undefined
++
++Moving an constant value to edx. Although this is a constant, it does not refer
++to any of the original values that were supplied to this function. Therefore
++rdx becomes undefined for the purposes of the code analysis.
++
++ '0xc015ccf8 getname+0x10: mov 0xc04b2120,%eax'
++ opcode 'mov' matched by 'mov', usage 36
++ src M: 0xc04b2120
++ dst R: %eax base_rc 2 (rax)
++ rax = undefined
++
++Moving a constant value to eax makes rax undefined.
++
++ '0xc015ccfd getname+0x15: call 0xc0153009 <kmem_cache_alloc>'
++ opcode 'call' matched by 'call', usage 17
++ src M: 0xc0153009
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = rbx
++ rcx = rcx
++ rdx = undefined
++ rdi = rax
++ rsi = rsi
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++
++Basic block debugging prints the register and memory state when transfering
++control between blocks and when issuing call instructions. The call state is
++mainly useful when C code calls assembler routines, especially if you are not
++sure what state the assembler code expects. Not all of our assembler is as well
++documented as it could be :(
++
++ rax = undefined
++ rcx = undefined
++ rdx = undefined
++
++The i386 ABI says that some registers are preserved across calls, see the arch
++specific bb_preserved_reg[] list. Any registers not in that list automatically
++become undefined after a call instruction.
++
++ '0xc015cd02 getname+0x1a: mov %eax,0xfffffff0(%ebp)'
++ opcode 'mov' matched by 'mov', usage 36
++ src R: %eax base_rc 2 (rax)
++ dst M: 0xfffffff0(%ebp) base_rc 8 (rbp)
++
++eax is the return value from the call, it is being saved at offset 0xfffffff0
++(-0x10) from ebp. Since rbp contains (osp-0x4) the return value is being stored
++at (osp-0x14). This is a stack location but we have no record of any data being
++held at that location, it is part of the local stack variables.
++
++ '0xc015cd05 getname+0x1d: mov $0xfffffff4,%eax'
++ opcode 'mov' matched by 'mov', usage 36
++ src I: $0xfffffff4
++ dst R: %eax base_rc 2 (rax)
++ rax = undefined
++ '0xc015cd0a getname+0x22: cmpl $0x0,0xfffffff0(%ebp)'
++ opcode 'cmpl' matched by 'cmp', usage 3
++ src I: $0x0
++ dst M: 0xfffffff0(%ebp) base_rc 8 (rbp)
++ '0xc015cd0e getname+0x26: je 0xc015cd7d <getname+0x95>'
++ opcode 'je' matched by 'j', usage 28
++ src M: 0xc015cd7d
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = rbx
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = rsi
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++
++Transfer of control, print the register and memory state.
++
++ matched: from 0xc015cd0e to 0xc015cd7d drop_through 0 bb_jmp[0]
++
++Which bb_jmp_list[] entry matches this transfer.
++
++ new state c07286b8
++
++The current abstract register and memory state is cloned at address c07286b8.
++This state becomes one of the inputs to the basic block whose start address is
++0xc015cd7d.
++
++ '0xc015cd10 getname+0x28: mov %esp,%eax'
++ opcode 'mov' matched by 'mov', usage 36
++ src R: %esp base_rc 9 (rsp)
++ dst R: %eax base_rc 2 (rax)
++ rax = rsp (osp-0x14)
++
++Copy rsp which contains (osp-0x14) to rax. rax contains a valid stack pointer.
++
++ '0xc015cd12 getname+0x2a: and $0xfffff000,%eax'
++ opcode 'and' matched by 'and', usage 11
++ src I: $0xfffff000
++ dst R: %eax base_rc 2 (rax)
++ rax = undefined
++
++But not for long.
++
++ '0xc015cd17 getname+0x2f: cmpl $0xffffffff,0x18(%eax)'
++ opcode 'cmpl' matched by 'cmp', usage 3
++ src I: $0xffffffff
++ dst M: 0x18(%eax) base_rc 2 (rax)
++ '0xc015cd1b getname+0x33: je 0xc015cd39 <getname+0x51>'
++ opcode 'je' matched by 'j', usage 28
++ src M: 0xc015cd39
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = rbx
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = rsi
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++
++Another transfer of control, print the state.
++
++ matched: from 0xc015cd1b to 0xc015cd39 drop_through 0 bb_jmp[1]
++
++Which bb_jmp_list[] entry was used.
++
++ reuse bb_jmp[0]
++
++To save space, we only clone the state if it is different. Otherwise we reuse
++the state from another vertex and bump the reference count.
++
++ '0xc015cd1d getname+0x35: mov $0xfffffff2,%esi'
++ opcode 'mov' matched by 'mov', usage 36
++ src I: $0xfffffff2
++ dst R: %esi base_rc 7 (rsi)
++ rsi = undefined
++
++Using esi as a scratch register, even though the i386 ABi says that esi is a
++preserved register. Not to worry, the original value of rsi was saved on stack
++on entry and it will be restored before exit.
++
++ '0xc015cd22 getname+0x3a: cmp $0xbfffffff,%edi'
++ opcode 'cmp' matched by 'cmp', usage 3
++ src I: $0xbfffffff
++ dst R: %edi base_rc 6 (rdi)
++ '0xc015cd28 getname+0x40: ja 0xc015cd60 <getname+0x78>'
++ opcode 'ja' matched by 'j', usage 28
++ src M: 0xc015cd60
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = rbx
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd28 to 0xc015cd60 drop_through 0 bb_jmp[2]
++ new state c0728710
++
++This state is different from any states already saved, clone to a new entry.
++
++ '0xc015cd2a getname+0x42: mov $0xc0000000,%ebx'
++ opcode 'mov' matched by 'mov', usage 36
++ src I: $0xc0000000
++ dst R: %ebx base_rc 3 (rbx)
++ rbx = undefined
++ '0xc015cd2f getname+0x47: sub %edi,%ebx'
++ opcode 'sub' matched by 'sub', usage 51
++ src R: %edi base_rc 6 (rdi)
++ dst R: %ebx base_rc 3 (rbx)
++ rbx = undefined
++ '0xc015cd31 getname+0x49: cmp $0xfff,%ebx'
++ opcode 'cmp' matched by 'cmp', usage 3
++ src I: $0xfff
++ dst R: %ebx base_rc 3 (rbx)
++ '0xc015cd37 getname+0x4f: jbe 0xc015cd3e <getname+0x56>'
++ opcode 'jbe' matched by 'j', usage 28
++ src M: 0xc015cd3e
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd37 to 0xc015cd3e drop_through 0 bb_jmp[3]
++ new state c0728768
++
++This state is different from any states already saved, clone to a new entry.
++
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd38 to 0xc015cd39 drop_through 1 bb_jmp[12]
++ reuse bb_jmp[3]
++
++Basic block 0 drops through to basic block 1, treat it as an implicit transfer
++of control. The state is the same as the previous jump instruction so reuse it
++and bump the reference count.
++
++That ends basic block 0, now pick the next block in the list that (a) needs to
++be scanned and (b) has all its input states defined. In this case bb[1].
++
++ bb[1]
++
++bb[1] starts at 0xc015cd39 and has two paths that transfer control to it.
++bb_jmp[1] from an explicit jump at 0xc015cd1b and a drop through at bb_jmp[12].
++Where there is more than one input state we have to merge them and reconcile the
++final value.
++
++ first state c07286b8
++
++The first input state is stored at c07286b8. Looking back through the trace we
++find that entry associated with bb_jmp[0], not bb_jmp[1] as expected. However
++bb_jmp[1] reused the state that was stored for bb_jmp[0] so all is well.
++
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = rbx
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = rsi
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++
++The first state for bb[1].
++
++ merging state c0728768
++
++Now merge the second state, which is held at c0728768.
++
++ rbx = undefined
++ rsi = undefined
++
++The two states disagree on the values being tracked in rbx and rsi. Compiler
++theory 101 says that if two or more paths to a basic block have different values
++for a register then that register cannot be relied on at the start of the block,
++so make it undefined. The same logic applies to memory locations.
++
++ final state
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++
++After merging all the input states, this is the final starting state for bb[1].
++Now track what bb[1] does to the state.
++
++ '0xc015cd39 getname+0x51: mov $0x1000,%ebx'
++ opcode 'mov' matched by 'mov', usage 36
++ src I: $0x1000
++ dst R: %ebx base_rc 3 (rbx)
++ rbx = undefined
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd3d to 0xc015cd3e drop_through 1 bb_jmp[13]
++ reuse bb_jmp[3]
++
++bb[1] is a single instruction which drops through to bb[2].
++
++ bb[2]
++ first state c0728768
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ merging state c0728768
++
++bb[2] has two inputs, both vertices are pointing to input state c0728768.
++Merging an entry with itself has no effect.
++
++ '0xc015cd3e getname+0x56: mov %ebx,%ecx'
++ opcode 'mov' matched by 'mov', usage 36
++ src R: %ebx base_rc 3 (rbx)
++ dst R: %ecx base_rc 4 (rcx)
++ rcx = rbx (undefined)
++ '0xc015cd40 getname+0x58: mov %edi,%edx'
++ opcode 'mov' matched by 'mov', usage 36
++ src R: %edi base_rc 6 (rdi)
++ dst R: %edx base_rc 5 (rdx)
++ rdx = rdi (rax)
++ '0xc015cd42 getname+0x5a: mov 0xfffffff0(%ebp),%eax'
++ opcode 'mov' matched by 'mov', usage 36
++ src M: 0xfffffff0(%ebp) base_rc 8 (rbp)
++ dst R: %eax base_rc 2 (rax)
++ rax = *(rbp-0x10) (osp-0x14) rax = undefined
++ '0xc015cd45 getname+0x5d: call 0xc023dbb4 <strncpy_from_user>'
++ opcode 'call' matched by 'call', usage 17
++ src M: 0xc023dbb4
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = rax
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ rax = undefined
++ rcx = undefined
++ rdx = undefined
++ '0xc015cd4a getname+0x62: cmp $0x0,%eax'
++ opcode 'cmp' matched by 'cmp', usage 3
++ src I: $0x0
++ dst R: %eax base_rc 2 (rax)
++ '0xc015cd4d getname+0x65: jle 0xc015cd5a <getname+0x72>'
++ opcode 'jle' matched by 'j', usage 28
++ src M: 0xc015cd5a
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd4d to 0xc015cd5a drop_through 0 bb_jmp[4]
++ reuse bb_jmp[3]
++ '0xc015cd4f getname+0x67: mov $0xffffffdc,%esi'
++ opcode 'mov' matched by 'mov', usage 36
++ src I: $0xffffffdc
++ dst R: %esi base_rc 7 (rsi)
++ rsi = undefined
++ '0xc015cd54 getname+0x6c: cmp %ebx,%eax'
++ opcode 'cmp' matched by 'cmp', usage 3
++ src R: %ebx base_rc 3 (rbx)
++ dst R: %eax base_rc 2 (rax)
++ '0xc015cd56 getname+0x6e: jae 0xc015cd60 <getname+0x78>'
++ opcode 'jae' matched by 'j', usage 28
++ src M: 0xc015cd60
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd56 to 0xc015cd60 drop_through 0 bb_jmp[5]
++ reuse bb_jmp[3]
++ '0xc015cd58 getname+0x70: jmp 0xc015cd71 <getname+0x89>'
++ opcode 'jmp' matched by 'j', usage 28
++ src M: 0xc015cd71
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd58 to 0xc015cd71 drop_through 0 bb_jmp[6]
++ reuse bb_jmp[3]
++
++ bb[3]
++ first state c0728768
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++
++bb[3] only has one input, nothing to merge.
++
++ '0xc015cd5a getname+0x72: je 0xc015cd76 <getname+0x8e>'
++ opcode 'je' matched by 'j', usage 28
++ src M: 0xc015cd76
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd5a to 0xc015cd76 drop_through 0 bb_jmp[7]
++ reuse bb_jmp[3]
++ '0xc015cd5c getname+0x74: jge 0xc015cd71 <getname+0x89>'
++ opcode 'jge' matched by 'j', usage 28
++ src M: 0xc015cd71
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd5c to 0xc015cd71 drop_through 0 bb_jmp[8]
++ reuse bb_jmp[3]
++ '0xc015cd5e getname+0x76: mov %eax,%esi'
++ opcode 'mov' matched by 'mov', usage 36
++ src R: %eax base_rc 2 (rax)
++ dst R: %esi base_rc 7 (rsi)
++ rsi = rax (undefined)
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd5f to 0xc015cd60 drop_through 1 bb_jmp[14]
++ reuse bb_jmp[3]
++
++ bb[5]
++ first state c0728768
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ merging state c0728768
++ '0xc015cd71 getname+0x89: mov 0xfffffff0(%ebp),%eax'
++ opcode 'mov' matched by 'mov', usage 36
++ src M: 0xfffffff0(%ebp) base_rc 8 (rbp)
++ dst R: %eax base_rc 2 (rax)
++ rax = *(rbp-0x10) (osp-0x14) rax = undefined
++ '0xc015cd74 getname+0x8c: jmp 0xc015cd7d <getname+0x95>'
++ opcode 'jmp' matched by 'j', usage 28
++ src M: 0xc015cd7d
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd74 to 0xc015cd7d drop_through 0 bb_jmp[10]
++ reuse bb_jmp[3]
++
++ bb[6]
++ first state c0728768
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ '0xc015cd76 getname+0x8e: mov $0xfffffffe,%esi'
++ opcode 'mov' matched by 'mov', usage 36
++ src I: $0xfffffffe
++ dst R: %esi base_rc 7 (rsi)
++ rsi = undefined
++ '0xc015cd7b getname+0x93: jmp 0xc015cd60 <getname+0x78>'
++ opcode 'jmp' matched by 'j', usage 28
++ src M: 0xc015cd60
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd7b to 0xc015cd60 drop_through 0 bb_jmp[11]
++ reuse bb_jmp[3]
++
++ bb[4]
++ first state c0728710
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = rbx
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ merging state c0728768
++ rbx = undefined
++ merging state c0728768
++ merging state c0728768
++
++bb[4] has 4 inputs, 3 of which have the same state. One one path (state
++c0728710) rbx is defined, on the others (c0728768) rbx is undefined so the final
++state has rbx as undefined.
++
++ final state
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ '0xc015cd60 getname+0x78: mov 0xfffffff0(%ebp),%edx'
++ opcode 'mov' matched by 'mov', usage 36
++ src M: 0xfffffff0(%ebp) base_rc 8 (rbp)
++ dst R: %edx base_rc 5 (rdx)
++ rdx = *(rbp-0x10) (osp-0x14) rdx = undefined
++ '0xc015cd63 getname+0x7b: mov 0xc04b2120,%eax'
++ opcode 'mov' matched by 'mov', usage 36
++ src M: 0xc04b2120
++ dst R: %eax base_rc 2 (rax)
++ rax = undefined
++ '0xc015cd68 getname+0x80: call 0xc01521f1 <kmem_cache_free>'
++ opcode 'call' matched by 'call', usage 17
++ src M: 0xc01521f1
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ rax = undefined
++ rcx = undefined
++ rdx = undefined
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ '0xc015cd6d getname+0x85: mov %esi,%eax'
++ opcode 'mov' matched by 'mov', usage 36
++ src R: %esi base_rc 7 (rsi)
++ dst R: %eax base_rc 2 (rax)
++ rax = rsi (undefined)
++ '0xc015cd6f getname+0x87: jmp 0xc015cd7d <getname+0x95>'
++ opcode 'jmp' matched by 'j', usage 28
++ src M: 0xc015cd7d
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ matched: from 0xc015cd6f to 0xc015cd7d drop_through 0 bb_jmp[9]
++ reuse bb_jmp[3]
++
++ bb[7]
++ first state c07286b8
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = rbx
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = rsi
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ merging state c0728768
++ rbx = undefined
++ rsi = undefined
++ merging state c0728768
++ final state
++ bb_reg_state c0728658
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++ '0xc015cd7d getname+0x95: pop %edx'
++ opcode 'pop' matched by 'pop', usage 42
++ src R: %edx base_rc 5 (rdx)
++ rdx = *(rsp+0x0) (osp-0x14) rdx = undefined
++ rsp osp offset -0x14 -> -0x10
++
++This instruction is a bit misleading. It looks like gcc is restoring a value
++from the stack *(osp-0x14) to edx, but we have no record of any useful data
++being stored at osp-0x14. In fact gcc is just reducing the stack pointer by 4
++bytes to reverse the effect of 0xc015ccee: sub $0x4,%esp, the value popped into
++edx contains nothing useful. Why gcc does pop instead of add $0x4,%esp is a
++puzzle, probably some micro optimization.
++
++ '0xc015cd7e getname+0x96: pop %ebx'
++ opcode 'pop' matched by 'pop', usage 42
++ src R: %ebx base_rc 3 (rbx)
++ rbx = *(rsp+0x0) (osp-0x10) value rbx
++ rsp osp offset -0x10 -> -0xc
++ delete rbx from osp-0x10 slot 3
++
++This pop is doing something useful. It is restoring the original value of the
++preserved register ebx from stack, reversing 0xc015cced: push %ebx. Note that
++incrementing rsp from osp-0x10 to osp-0xc invalidates the data held in memory at
++osp-0x10, so we delete our record of it.
++
++ '0xc015cd7f getname+0x97: pop %esi'
++ opcode 'pop' matched by 'pop', usage 42
++ src R: %esi base_rc 7 (rsi)
++ rsi = *(rsp+0x0) (osp-0xc) value rsi
++ rsp osp offset -0xc -> -0x8
++ delete rsi from osp-0xc slot 2
++ '0xc015cd80 getname+0x98: pop %edi'
++ opcode 'pop' matched by 'pop', usage 42
++ src R: %edi base_rc 6 (rdi)
++ rdi = *(rsp+0x0) (osp-0x8) value rdi
++ rsp osp offset -0x8 -> -0x4
++ delete rdi from osp-0x8 slot 1
++
++Pop the other preserved registers, in reverse order to the push sequence at the
++start.
++
++ '0xc015cd81 getname+0x99: pop %ebp'
++ opcode 'pop' matched by 'pop', usage 42
++ src R: %ebp base_rc 8 (rbp)
++ rbp = *(rsp+0x0) (osp-0x4) value rbp
++ rsp osp offset -0x4 -> +0x0
++ delete rbp from osp-0x4 slot 0
++
++Pop the previous frame pointer.
++
++ '0xc015cd82 getname+0x9a: ret '
++ opcode 'ret' matched by 'ret', usage 48
++
++When a ret instruction is executed, all the preserved registers must be back to
++their original value and the stack pointer must contain osp+0.
++bb_sanity_check() will complain and abort the backtrace if this is not true. No
++complaints here.
++
++ bb_pass2: end bb_reg_params 1 bb_memory_params 0
++
++We identified one argument passed in a register (the read of rax at 0xc015ccf1)
++and no reference to memory locations above the stack frame. So we have one
++argument being passed in a register and no arguments being passed on stack.
++This matches
++
++ char * getname(const char __user * filename)
++
++ bb_pass2: bb_exit_state at 0xc015cd6d
++ bb_reg_state c07287c0
++ rax = undefined
++ rbx = undefined
++ rcx = undefined
++ rdx = undefined
++ rdi = rax
++ rsi = undefined
++ rbp = osp-0x4
++ rsp = osp-0x14
++ slot 0 offset_address -0x4 rbp
++ slot 1 offset_address -0x8 rdi
++ slot 2 offset_address -0xc rsi
++ slot 3 offset_address -0x10 rbx
++
++We told bb1 that the exit address from this function is 0xc015cd6d. The
++abstract state at this exit point was saved, it defines how we rollback the
++actual register values from the next function down the stack (kmem_cache_free)
++to get the actual register values on entry to this function (getname). See
++bb_actual_rollback() which updates bb_actual[].
++
++Looking at the exit state above, we see that rsp contains the abstracte value
++osp-0x14. It is a given that we have the actual value of rsp after the call
++from getname() to kmem_cache_free(), otherwise we would not have found the
++return address on stack and we would not be analysing getname(). Adding 0x14
++(the delta from osp to rsp) to our current actual rsp gives us the actual value
++of osp on entry to getname().
++
++The main aim of doing all this work is to track the function arguments so we can
++print them if possible. getname() only has one argument which was passed in
++eax. According to the abstract exit state, the original value of rax is
++currently in rdi, so by looking at the actual value of rdi from the next stack
++frame down we are able to get the argument to getname().
++
++It is not always possible to get register arguments, gcc will only preserve
++input arguments as long as it needs them so there may be no saved copy of
++arguments that are passed in register. In this case, bt_print_one() prints
++"invalid".
++
++If basic block analysis detected any arguments were passed on stack, their
++contents can now be extracted based on the known value of the stack pointer.
++bt_print_one() prints the arguments, if BT_ARGS is non-zero then any argument
++that might be a kernel address is printed as a symbol.
++
++Once rsp has been rolled back to osp, we can calculate that actual address of
++the stack locations that contain useful data. The previous values of rbp, rdi,
++rsi and rbx are then copied from those locations into bb_actual[]. That gives
++the values for those registers at the exit point from the function that called
++getname(). Go up one level and repeat the analysis.
++
++There are two references to rdi in the exit state, which can be confusing.
++
++ rdi = rax
++ slot 1 offset_address -0x8 rdi
++
++The first reference says that "register rdi contains the original value of rax",
++the second reference says that "*(osp-0x8) contains the original value of rdi".
++Do not confuse the two, one is by name, the other is by value.
++
++getname() is a fairly simple function, it has no loops. __follow_mount is more
++complicated, it has loops as well as BUG() statements.
++
++ [0]kdb> id __follow_mount
++ 0xc015be76 __follow_mount: push %ebp
++ 0xc015be77 __follow_mount+0x1: mov %esp,%ebp
++ 0xc015be79 __follow_mount+0x3: push %edi
++ 0xc015be7a __follow_mount+0x4: push %esi
++ 0xc015be7b __follow_mount+0x5: push %ebx
++ 0xc015be7c __follow_mount+0x6: mov %eax,%esi
++ 0xc015be7e __follow_mount+0x8: xor %edi,%edi
++ 0xc015be80 __follow_mount+0xa: jmp 0xc015beca __follow_mount+0x54
++ 0xc015be82 __follow_mount+0xc: mov (%esi),%eax
++ 0xc015be84 __follow_mount+0xe: call 0xc0169664 lookup_mnt
++ 0xc015be89 __follow_mount+0x13: mov %eax,%ebx
++ 0xc015be8b __follow_mount+0x15: test %eax,%eax
++ 0xc015be8d __follow_mount+0x17: je 0xc015bed3 __follow_mount+0x5d
++ 0xc015be8f __follow_mount+0x19: mov 0x4(%esi),%eax
++ 0xc015be92 __follow_mount+0x1c: call 0xc0163de2 dput
++ 0xc015be97 __follow_mount+0x21: test %edi,%edi
++ 0xc015be99 __follow_mount+0x23: je 0xc015bead __follow_mount+0x37
++ 0xc015be9b __follow_mount+0x25: mov (%esi),%eax
++ 0xc015be9d __follow_mount+0x27: test %eax,%eax
++ 0xc015be9f __follow_mount+0x29: je 0xc015bead __follow_mount+0x37
++ 0xc015bea1 __follow_mount+0x2b: movl $0x0,0x64(%eax)
++ 0xc015bea8 __follow_mount+0x32: call 0xc016835b mntput_no_expire
++ 0xc015bead __follow_mount+0x37: mov %ebx,(%esi)
++ 0xc015beaf __follow_mount+0x39: mov 0x10(%ebx),%eax
++ 0xc015beb2 __follow_mount+0x3c: test %eax,%eax
++ 0xc015beb4 __follow_mount+0x3e: je 0xc015bec2 __follow_mount+0x4c
++ 0xc015beb6 __follow_mount+0x40: cmpl $0x0,(%eax)
++ 0xc015beb9 __follow_mount+0x43: jne 0xc015bebf __follow_mount+0x49
++ 0xc015bebb __follow_mount+0x45: ud2a
++ 0xc015bebd __follow_mount+0x47: jmp 0xc015bebd __follow_mount+0x47
++ 0xc015bebf __follow_mount+0x49: lock incl (%eax)
++ 0xc015bec2 __follow_mount+0x4c: mov %eax,0x4(%esi)
++ 0xc015bec5 __follow_mount+0x4f: mov $0x1,%edi
++ 0xc015beca __follow_mount+0x54: mov 0x4(%esi),%edx
++ 0xc015becd __follow_mount+0x57: cmpl $0x0,0x74(%edx)
++ 0xc015bed1 __follow_mount+0x5b: jne 0xc015be82 __follow_mount+0xc
++ 0xc015bed3 __follow_mount+0x5d: mov %edi,%eax
++ 0xc015bed5 __follow_mount+0x5f: pop %ebx
++ 0xc015bed6 __follow_mount+0x60: pop %esi
++ 0xc015bed7 __follow_mount+0x61: pop %edi
++ 0xc015bed8 __follow_mount+0x62: pop %ebp
++ 0xc015bed9 __follow_mount+0x63: ret
++
++ [0]kdb> bb1 0xc015bed9
++ bb_pass1: func_name __follow_mount func_start 0xc015be76 func_end 0xc015beda
++ bb_pass1: end
++ bb[0] start 0xc015be76 end 0xc015be80 drop_through 0
++ bb[1] start 0xc015be82 end 0xc015beac drop_through 1
++ bb[2] start 0xc015bead end 0xc015bebb drop_through 0
++
++Note that the ud2a (BUG) instruction at 0xc015bebb ends bb[2].
++
++ bb[3] start 0xc015bebd end 0xc015bebd drop_through 0
++
++bb[3] is peculiar, it is a jmp to itself, nothing else refers to 0xc015bebd and
++you cannot drop through from the previous instruction because ud2a kills the
++kernel. The i386 and x86_64 BUG() macros contain for(;;) after ud2a, for no
++good reason that I can see (is there old hardware that does not abort on ud2a?).
++ia64 and the generic versions of BUG() do not contain for(;;). for(;;) after
++ud2a generates a branch to itself than can never be executed.
++
++ bb[4] start 0xc015bebf end 0xc015bec1 drop_through 1
++ bb[5] start 0xc015bec2 end 0xc015bec9 drop_through 1
++ bb[6] start 0xc015beca end 0xc015bed2 drop_through 1
++ bb[7] start 0xc015bed3 end 0xc015bed9 drop_through 0
++ bb_jmp[0] from 0xc015be80 to 0xc015beca drop_through 0
++ bb_jmp[1] from 0xc015be8d to 0xc015bed3 drop_through 0
++ bb_jmp[2] from 0xc015be99 to 0xc015bead drop_through 0
++ bb_jmp[3] from 0xc015be9f to 0xc015bead drop_through 0
++ bb_jmp[4] from 0xc015beb4 to 0xc015bec2 drop_through 0
++ bb_jmp[5] from 0xc015beb9 to 0xc015bebf drop_through 0
++ bb_jmp[6] from 0xc015bebd to 0xc015bebd drop_through 0
++ bb_jmp[7] from 0xc015bed1 to 0xc015be82 drop_through 0
++ bb_jmp[8] from 0xc015beac to 0xc015bead drop_through 1
++ bb_jmp[9] from 0xc015bec1 to 0xc015bec2 drop_through 1
++ bb_jmp[10] from 0xc015bec9 to 0xc015beca drop_through 1
++ bb_jmp[11] from 0xc015bed2 to 0xc015bed3 drop_through 1
++
++Apart from bb[0] and the special case bb[3], all the other blocks are part of a
++cycle. That cycle goes bb[0] -> bb[6]. bb[6] -> {bb[1], bb[7]}. bb[1] ->
++{bb[2], bb[7]}. bb[2] -> {bb[4], bb[5]}. bb[4] -> bb[5]. bb[5] -> bb[6] and
++back to the start. bb[7] ends with 'ret', it does not feed into other blocks.
++
++ bb_pass2: start
++
++ bb_pass2_do_changed_blocks: allow_missing 0
++
++ bb[0]
++ [ ... detail snipped ... ]
++ matched: from 0xc015be80 to 0xc015beca drop_through 0 bb_jmp[0]
++ new state c07286d8
++
++ bb_pass2_do_changed_blocks: allow_missing 1
++
++Because of the cycle, only bb[0] can be processed with 0 missing inputs, all the
++other blocks have at least one missing input. Call bb_pass2_do_changed_blocks()
++again, this time allowing one missing input per blocks.
++
++ bb[6]
++ first state c07286d8
++ [ ... detail snipped ... ]
++ matched: from 0xc015bed2 to 0xc015bed3 drop_through 1 bb_jmp[11]
++ reuse bb_jmp[7]
++
++ bb[7]
++ first state c0728730
++ [ ... detail snipped ... ]
++
++ bb[1]
++ first state c0728730
++ [ ... detail snipped ... ]
++ matched: from 0xc015beac to 0xc015bead drop_through 1 bb_jmp[8]
++ reuse bb_jmp[1]
++
++ bb[2]
++ first state c0728788
++ [ ... detail snipped ... ]
++ merging state c0728788
++ merging state c0728788
++ [ ... detail snipped ... ]
++ matched: from 0xc015beb9 to 0xc015bebf drop_through 0 bb_jmp[5]
++ reuse bb_jmp[1]
++
++ bb[4]
++ first state c0728788
++ [ ... detail snipped ... ]
++ matched: from 0xc015bec1 to 0xc015bec2 drop_through 1 bb_jmp[9]
++ reuse bb_jmp[1]
++
++ bb[5]
++ first state c0728788
++ [ ... detail snipped ... ]
++ merging state c0728788
++ [ ... detail snipped ... ]
++ matched: from 0xc015bec9 to 0xc015beca drop_through 1 bb_jmp[10]
++ reuse bb_jmp[1]
++
++ bb[6]
++ first state c07286d8
++ [ ... detail snipped ... ]
++ merging state c0728788
++ matched: from 0xc015bed2 to 0xc015bed3 drop_through 1 bb_jmp[11]
++ reuse bb_jmp[1]
++
++Note the rescan of bb[6]. The first scan only had one input from bb[0]. After
++traversing the cycle and getting back from bb[5] to bb[6], bb[6] now has more
++inputs so we need to rescan it. With the additional input, the output state
++from bb[6] has changed since the first scan, which means that every block it
++feeds has to be rescanned. bb[6] feeds bb[1] and bb[7].
++
++ bb[7]
++ first state c0728788
++ [ ... detail snipped ... ]
++ merging state c0728788
++ [ ... detail snipped ... ]
++
++bb[7] being rescanned, this time it has data for both its inputs.
++
++ bb[1]
++ first state c0728788
++ [ ... detail snipped ... ]
++ matched: from 0xc015beac to 0xc015bead drop_through 1 bb_jmp[8]
++ no state change
++
++bb[1] is being rescanned because the input from bb[6] has changed, however the
++rescan of bb[1] reports 'no state change', the changed input from bb[6] did not
++affect the final output state from bb[1]. Because the output state from bb[1]
++has not changed since the previous scan, there is no need to rescan bb[2], bb[7]
++or bb[4]. Since bb[4] is not being rescanned, there is no need to rescan bb[5]
++or bb[6] and the cycle is closed.
+diff -Nurp linux-2.6.22-590/Documentation/kdb/kdb_bp.man linux-2.6.22-600/Documentation/kdb/kdb_bp.man
+--- linux-2.6.22-590/Documentation/kdb/kdb_bp.man 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/kdb_bp.man 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,197 @@
++.TH BD 1 "July 12, 2004"
++.SH NAME
++bp, bpa, bph, bpha, bd, bc, be, bl \- breakpoint commands
++.SH SYNOPSIS
++bp \fIaddress-expression\fP
++.LP
++bpa \fIaddress-expression\fP
++.LP
++bph \fIaddress-expression\fP [\f(CWDATAR|DATAW|DATAA|IO\fP [\fIlength\fP]]
++.LP
++bpha \fIaddress-expression\fP [\f(CWDATAR|DATAW|DATAA|IO\fP [\fIlength\fP]]
++.LP
++bd \fIbreakpoint-number\fP
++.LP
++bc \fIbreakpoint-number\fP
++.LP
++be \fIbreakpoint-number\fP
++.LP
++bl
++.SH DESCRIPTION
++.hy 0
++The
++.B bp
++family of commands are used to establish a breakpoint.
++The \fIaddress-expression\fP may be a numeric value (decimal or
++hexidecimal), a symbol name, a register name preceeded by a
++percent symbol '%', or a simple expression consisting of a
++symbol name, an addition or subtraction character and a numeric
++value (decimal or hexidecimal).
++.P
++\fBbph\fP and \fBbpha\fP will force the use of a hardware register, provided
++the processor architecture supports them.
++.P
++The \fIaddress-expression\fP may also consist of a single
++asterisk '*' symbol which indicates that the command should
++operate on all existing breakpoints (valid only for \fBbc\fP,
++\fBbd\fP and \fBbe\fP).
++.P
++Four different types of
++breakpoints may be set:
++
++.TP 8
++Instruction
++Causes the kernel debugger to be invoked from the debug exception
++path when an instruction is fetched from the specified address. This
++is the default if no other type of breakpoint is requested or when
++the \fBbp\fP command is used.
++
++.TP 8
++DATAR
++Causes the kernel debugger to be entered when data of length
++\fIlength\fP is read from or written to the specified address.
++This type of breakpoint must use a processor debug register which
++places an architecture dependent limit on the number of data and I/O
++breakpoints that may be established. On arm mode XScale platform
++(thumb mode is not supported yet),
++debugger is triggered by reading from the specified address.
++The \fBbph\fP or \fBbpha\fP commands must be used.
++
++.TP 8
++DATAW
++Enters the kernel debugger when data of length \fIlength\fP
++is written to the specified address. \fIlength\fP defaults
++to four bytes if it is not explicitly specified.
++Note that the processor may have already overwritten the prior data at
++the breakpoint location before the kernel debugger is invoked.
++The prior data should be saved before establishing the breakpoint, if
++required. On arm mode XScale platform, the debugger is triggered
++after having overwritten the specified address.
++The \fBbph\fP or \fBbpha\fP commands must be used.
++
++.TP 8
++IO
++Enters the kernel debugger when an \fBin\fP or \fBout\fP instruction
++targets the specified I/O address. The \fBbph\fP or \fBbpha\fP
++commands must be used. This type of breakpoint is not valid in
++arm mode XScale platform. This option is not valid in arm
++mode XScale platform.
++
++.TP 8
++DATAA
++Enters the kernel debugger after the data in specified address has
++been accessed (read or write), this option is only used in arm
++mode XScale platform.
++
++.P
++The
++.B bpha
++command will establish a breakpoint on all processors in an
++SMP system. This command is not available in an uniprocessor
++kernel.
++.P
++The
++.B bd
++command will disable a breakpoint without removing it from the kernel
++debugger's breakpoint table.
++This can be used to keep breakpoints in the table without exceeding the
++architecture limit on breakpoint registers.
++A breakpoint-number of \fI*\fR will disable all break points.
++.P
++The
++.B be
++command will re-enable a disabled breakpoint.
++A breakpoint-number of \fI*\fR will enable all break points.
++.P
++The
++.B bc
++command will clear a breakpoint from the breakpoint table.
++A breakpoint-number of \fI*\fR will clear all break points.
++.P
++The
++.B bl
++command will list the existing set of breakpoints.
++.SH LIMITATIONS
++There is a compile time limit of sixteen entries in the
++breakpoint table at any one time.
++.P
++There are architecture dependent limits on the number of hardware
++breakpoints that can be set.
++.IP ix86 8
++Four.
++.PD 0
++.IP xscale 8
++Two for insruction breakpoints and another two for data breakpoint.
++.PD 0
++.IP ia64 8
++?
++.PD 0
++.IP sparc64 8
++None.
++.PD 1
++When issuing the "go" command after entering the debugger due to
++a breakpoint, kdb will silently perform a single step in order to
++reapply the breakpoint. The sparc64 port has some limitations on
++single stepping, which may limit where a breakpoint may be safely
++set. Please read the man page for \fBss\fP for more information.
++.SH ENVIRONMENT
++The breakpoint subsystem does not currently use any environment
++variables.
++.SH SMP CONSIDERATIONS
++Using
++.B bc
++is risky on SMP systems.
++If you clear a breakpoint when another cpu has hit that breakpoint but
++has not been processed then it may not be recognised as a kdb
++breakpoint, usually resulting in incorrect program counters and kernel
++panics.
++It is safer to disable the breakpoint with
++.BR bd ,
++then
++.B go
++to let any other processors that are waiting on the breakpoint to
++clear.
++After all processors are clear of the disabled breakpoint then it is
++safe to clear it using
++.BR bc .
++.P
++Breakpoints which use the processor breakpoint registers
++are only established on the processor which is
++currently active. If you wish breakpoints to be universal
++use the
++.B bpa
++or
++.B bpha
++commands.
++.SH EXAMPLES
++.TP 8
++bp schedule
++Sets an instruction breakpoint at the begining of the
++function \fBschedule\fP.
++
++.TP 8
++bp schedule+0x12e
++Sets an instruction breakpoint at the instruction located
++at \fBschedule\fP+\fI0x12e\fP.
++
++.TP 8
++bph ttybuffer+0x24 dataw
++Sets a data write breakpoint at the location referenced by
++\fBttybuffer\fP+\fI0x24\fP for a length of four bytes.
++
++.TP 8
++bph 0xc0254010 datar 1
++Establishes a data reference breakpoint at address \fB0xc0254010\fP
++for a length of one byte.
++
++.TP 8
++bp
++List current breakpoint table.
++
++.TP 8
++bd 0
++Disable breakpoint #0.
++
++.TP 8
++bc *
++Clear all breakpoints
+diff -Nurp linux-2.6.22-590/Documentation/kdb/kdb_bt.man linux-2.6.22-600/Documentation/kdb/kdb_bt.man
+--- linux-2.6.22-590/Documentation/kdb/kdb_bt.man 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/kdb_bt.man 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,315 @@
++.TH BT 1 "July 20, 2007"
++.SH NAME
++bt \- Stack Traceback command
++.SH SYNOPSIS
++bt [ <stack-frame-address> ]
++.LP
++btp <pid>
++.LP
++btt <struct-task-address>
++.LP
++bta [ DRSTZUIMA ]
++.LP
++btc [<cpu>]
++.SH DESCRIPTION
++.hy 0
++The
++.B bt
++command is used to print a stack traceback. It uses the
++current registers (see \fBrd\fP command) to determine
++the starting context and attempts to provide a complete
++stack traceback for the active thread. If \fIstack-frame-address\fP
++is supplied, it is assumed to point to the start of a valid
++stack frame and the stack will be traced back from that
++point.
++On x86 architecture, \fIstack-frame-address\fP must be the stack address of a
++saved \fB%eip\fP (\fB%rip\fP for x86_64) value from a \fBcall\fP instruction.
++.P
++The \fBbtp\fP command will analyze the stack for the given
++process identification (see the \fBps\fP command).
++\fBbtp\fP sets the current process for any following register display or update
++commands.
++.P
++The \fBbtt\fP command will analyze the stack for the given task
++structure.
++It is exactly equivalent to \fBbtp\fR on the pid extracted from the
++task structure.
++\fBbtt\fP sets the current process for any following register display or update
++commands.
++.P
++The \fBbta\fP command lists the stack for all processes in the desired
++state.
++Without any parameters, \fBbta\fP gives a backtrace for all useful processes.
++If a parameter is specified, it is a single string consisting of the
++letters D, R, S, T, Z, U, I, M and A in any order.
++See the kdb \fBps\fR man page for more details.
++\fBbta\fP does not change the current process.
++.P
++The \fBbtc\fP command will analyze the stack for the current process on
++a specified cpu or, if no cpu number is supplied, for the current
++process on all cpus.
++It does not switch to the other cpus, instead it uses the task
++structures to identify and issue \fBbtt\fR against the current task on
++the desired cpus.
++\fBbtc\fP with no arguments does not change the current process.
++\fBbtc\fP with a cpu number sets the current process for any following register
++display or update commands.
++.P
++For each function, the stack trace prints at least two lines.
++The first line contains four or five fields\ :-
++.IP * 3
++The pointer to the stack frame.
++.PD 0
++.IP * 3
++The current address within this frame.
++.IP * 3
++The address converted to a function name (actually the first non-local
++label which is <= the address).
++.IP * 3
++The offset of the address within the function.
++.IP * 3
++Any parameters to the function.
++.PD 1
++.PP
++If environment variable NOSECT is set to 0 then the next line contains
++five fields which are designed to make it easier to match the trace
++against the kernel code\ :-
++.IP * 3
++The module name that contains the address, "kernel" if it is in the
++base kernel.
++.PD 0
++.IP * 3
++The section name that contains the address (not available on 2.6 kernels).
++.IP * 3
++The start address of the section (not available on 2.6 kernels).
++.IP * 3
++The start address of the function.
++.IP * 3
++The end address of the function (the first non-local label which is >
++the address).
++.PD 1
++.PP
++If arguments are being converted to symbols, any argument which
++converts to a kernel or module address is printed as\ :-
++.IP * 3
++Argument address.
++.PD 0
++.IP * 3
++The module name that contains the address, "kernel" if it is in the
++base kernel.
++.IP * 3
++The symbol name the argument maps to.
++.IP * 3
++The offset of the argument from the symbol, suppressed if 0.
++.PD 1
++.P
++On architectures that use nested stacks, the backtrace will indicate a
++switch to a new stack by printing a line of equal signs and the type of
++stack.
++.SH MATCHING TRACE TO KERNEL CODE
++The command "objdump\ -S" will disassemble an object and, if the code
++was compiled with debugging (gcc flag -g), objdump will interleave the
++C source lines with the generated object.
++.PP
++A complete objdump of the kernel or a module is too big, normally you
++only want specific functions.
++By default objdump will only print the .text section but Linux uses
++other section names for executable code.
++When objdump prints relocatable objects (modules) it uses an offset of
++0 which is awkward to relate to the stack trace.
++The five fields which are printed for each function are designed to
++make it easier to match the stack trace against the kernel code using
++"objdump\ -S".
++.PP
++If the function is in the kernel then you need the section name, the
++start and end address of the function. The command is
++.PP
++.nf
++ objdump -S -j <section_name> \\
++ --start-address=<start-address> \\
++ --stop-address=<end-address> \\
++ /usr/src/linux/vmlinux
++.fi
++.PP
++If the function is in a module then you need the section name, the
++start address of the section, the start and end address of the
++function, the module name. The command is
++.PP
++.nf
++ objdump -S -j <section_name> \\
++ --adjust-vma=<section-start> \\
++ --start-address=<start-address> \\
++ --stop-address=<end-address> \\
++ /path/to/module/<module-name>.o
++.fi
++.PP
++Unfortunately the 2.6 kernel does not provide the information required
++to locate the start of the section, which makes it very difficult to
++perform a reliable objdump on a module.
++.PP
++All addresses to objdump must be preceded by '0x' if they are in hex,
++objdump does not assume hex.
++The stack trace values are printed with leading '0x' to make it easy to
++run objdump.
++.SH LIMITATIONS
++Some architectures pass parameters in registers; ia64, x86_64 and i386 (with
++gcc flag -mregparm) fall into this category.
++On these architectures, the compiler may reuse input parameter registers as
++scratch space.
++For example, if a function takes a pointer to a structure and only accesses one
++field in that structure, the compiler may calculate the address of the field by
++adding a value to the input register.
++Once the input register has been updated, it no longer points to the
++start of the structure, but to some field within it.
++This also occurs with array pointers, the compiler may update the input pointer
++directly, leaving it pointing to some element of the array instead of the start
++of the array.
++Always treat parameter values that have been passed in registers with extreme
++suspicion, the compiler may have changed the value.
++The x86 backtrace can generally identify register parameters that are no longer
++valid, it prints them as 'invalid' instead of as a misleading number.
++The ia64 backtrace cannot identify parameter registers that have been
++overwritten.
++.P
++x86 architectures do not have full unwind information in the kernel.
++The KDB backtrace on x86 performs code decomposition and analysis to track the
++frames on the call stack (including stack switches) and to locate parameters.
++if this code analysis does not yield a valid result, KDB falls back on the old
++method of scanning the process stack and printing anything that looks like a
++kernel address.
++This old method is unreliable (it produces lots of false positives in the
++trace) and cannot track parameters at all, so no parameters are printed.
++If you get an x86 backtrace that falls back to the old method, read
++Documentation/kdb/bt_x86 and follow the steps listed to get diagnostics and to
++submit a bug report.
++.P
++There are a lot of functions in the kernel which take some arguments then do
++nothing except call another function with the same initial arguments, sometimes
++adding parameters at the end. For example\ :-
++.nf
++.na
++.ft CW
++
++int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
++ void __user *oldval, size_t __user *oldlenp,
++ void __user *newval, size_t newlen)
++{
++ int ret = devinet_conf_sysctl(table, name, nlen, oldval, oldlenp,
++ newval, newlen);
++
++ if (ret == 1)
++ rt_cache_flush(0);
++
++ return ret;
++}
++.ad b
++.fi
++.P
++ipv4_doint_and_flush_strategy() passes all its parameters directly to
++devinet_conf_sysctl() and makes no other use of those parameters,
++so ipv4_doint_and_flush_strategy is a 'pass through' function.
++The x86_64 calling sequence mandates that the first 6 parameters are passed in
++registers, with other parameters being passed on stack.
++The i386 calling sequence with -mregparm=3 (which is the default since about
++2.6.18) passes the first 3 parameters in registers, with other parameters being
++passed on stack.
++The only exceptions to the above calling sequence are for functions declared as
++asmlinkage or functions with a variable number of parameters (e.g. printk).
++.P
++When a pass through function calls another function, the first 3 (i386) or 6
++(x86) parameters are already in their correct registers so the pass through
++function does not need to access the registers, which means that there are no
++references to these registers in the assembler code for the function.
++Users still want to see those arguments so the x86 backtrace has to assume that
++if\ :-
++.IP * 2
++There are parameters passed on the stack and
++.IP *
++There are no code references to parameters passed in registers and
++.IP *
++The function is not a known asmlinkage or variadic function, then
++there are pass through register arguments.
++.P
++The x86 backtrace will warn you when it makes this assumption, like this\ :-
++.nf
++.na
++.ft CW
++
++ <function_name> has memory parameters but no register parameters.
++ Assuming it is a 'pass through' function that does not refer to its register
++ parameters and setting <n> register parameters
++.ad b
++.fi
++.P
++The above 3 line message is only printed once, any future assumptions will
++print a shorter message.
++.P
++The \fBbt\fP command may print more or less arguments for a function
++than that function accepts.
++For x86, trailing arguments that are passed in but not used by the function
++will not be printed, resulting in fewer arguments than expected.
++For ia64, the hardware does not distinguish between input and local registers,
++some local registers may be printed as function arguments, resulting in more
++arguments than expected.
++.P
++On i386, 64 bit arguments (long long) occupy two adjacent 32 bit fields.
++There is no way for KDB to tell that this has occurred, so 64 bit arguments
++will be printed as two separate 32 bit arguments.
++.SH ENVIRONMENT
++The \fBBTARGS\fP environment variable governs the maximum number
++of arguments that are printed for any single function.
++On IA64 hardware, there is no difference between input and local registers, the
++first \fBBTARGS\fP registers are printed, up to the total limit of input plus
++local registers.
++Use a large value for \fBBTARGS\fP if you want to see the local registers on
++IA64.
++.PP
++If the \fBBTSP\fP environment variable is non-zero then the entire backtrace is
++printed, otherwise only the backtrace to the point of the last interrupt is
++printed.
++Printing the entire backtrace with 'set\ BTSP\ 1' is useful for diagnosing
++problems with the backtrace algorithms.
++In addition, when BTSP is non-zero, each backtrace frame may print extra lines
++giving information about the stack pointers, this is architecture specific.
++.PP
++If the \fBBTSYMARG\fP environment variable is non-zero then any
++arguments that fall within the kernel or modules are converted to symbols.
++.PP
++If the \fBNOSECT\fP environment variable is non-zero then the
++section information is suppressed.
++The default is NOSECT=1 so section data is suppressed; use set\ NOSECT=0
++to see section information.
++.PP
++The \fBBTAPROMPT\fP environment variable controls the prompt after each
++process is listed by the \fBbta\fP command. If \fBBTAPROMPT\fP is not
++set or is non-zero then \fBbta\fP issues a prompt after each process is
++listed. If \fBBTAPROMPT\fP is set to zero then no prompt is issued and
++all processes are listed without human intervention.
++.PP
++\fBbt\fR with no parameters uses the \fBPS\fR environment variable, see
++the kdb \fBps\fR man page.
++.SH SMP CONSIDERATIONS
++None.
++.SH EXAMPLES
++.nf
++.na
++.ft CW
++[0]kdb> bt
++Stack traceback for pid 2873
++0xc2efc0f0 2873 2836 1 0 R 0xc2efc2a0 *mount
++esp eip Function (args)
++0xf65a3c88 0xc0201f9f xfs_mount_validate_sb (0xf68bcb08, 0xf68bcb48, 0x0)
++0xf65a3c94 0xc0202f17 xfs_readsb+0x9d (0xf68bcb08, 0x0)
++0xf65a3cc0 0xc020a72e xfs_mount+0x21d (invalid, 0xf68bc2f0, 0x0)
++0xf65a3cf4 0xc021a84a vfs_mount+0x1a (invalid)
++0xf65a3d04 0xc021a721 xfs_fs_fill_super+0x76 (0xf76b6200, invalid, invalid)
++0xf65a3d78 0xc015ad81 get_sb_bdev+0xd4 (invalid, invalid, invalid, 0xf7257000, 0xc021a6ab, 0xf7594b38)
++ xfs_fs_get_sb has memory parameters but no register parameters.
++ Assuming it is a 'pass through' function that does not refer to its register
++ parameters and setting 3 register parameters
++0xf65a3db4 0xc0219a3a xfs_fs_get_sb+0x21 (invalid, invalid, invalid, 0xf7257000, 0xf7594b38)
++0xf65a3dcc 0xc015a992 vfs_kern_mount+0x41 (0xc04847e0, 0x0, 0xf68e9000, 0xf7257000)
++0xf65a3df0 0xc015aa11 do_kern_mount+0x38 (0xf6818000, 0x0, 0xf68e9000, 0xf7257000)
++0xf65a3e10 0xc016c8b0 do_mount+0x5df (0xf68e9000, 0xf65d6000, 0xf6818000, 0xc0ed0000, 0xf7257000)
++0xf65a3f90 0xc016c996 sys_mount+0x6f (0x8069b50, 0x8069b60, 0x8069b70, 0xc0ed0000, 0x8069ba0)
++0xf65a3fb4 0xc0102646 sysenter_past_esp+0x5f (invalid, invalid, invalid, 0x73, 0x246, 0xbfe52f50)
+diff -Nurp linux-2.6.22-590/Documentation/kdb/kdb_env.man linux-2.6.22-600/Documentation/kdb/kdb_env.man
+--- linux-2.6.22-590/Documentation/kdb/kdb_env.man 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/kdb_env.man 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,46 @@
++.TH ENV 1 "24 September 2000"
++.SH NAME
++env, set \- Environment manipulation commands
++.SH SYNOPSIS
++env
++.LP
++set \fIenvironment-variable\fP=\fIvalue\fP
++.SH DESCRIPTION
++The kernel debugger contains an environment which contains a series
++of name-value pairs. Some environment variables are known to the
++various kernel debugger commands and have specific meaning to the
++command; such are enumerated on the respective reference material.
++.P
++Arbitrary environment variables may be created and used with
++many commands (those which require an \fIaddress-expression\fP).
++.P
++The
++.B env
++command is used to display the current environment.
++.P
++The
++.B set
++command is used to alter an existing environment variable or
++establish a new environment variable.
++.SH LIMITATIONS
++There is a compile-time limit of 33 environment variables.
++.P
++There is a compile-time limit of 512 bytes (\fBKDB_ENVBUFSIZE\fP)
++of heap space available for new environment variables and for
++environment variables changed from their compile-time values.
++.SH ENVIRONMENT
++These commands explicitly manipulate the environment.
++.SH SMP CONSIDERATIONS
++None.
++.SH USER SETTINGS
++You can include "set" commands in kdb/kdb_cmds (see kdb.mm) to define
++your environment variables at kernel startup.
++.SH EXAMPLES
++.TP 8
++env
++Display current environment settings.
++
++.TP 8
++set IDCOUNT=100
++Set the number of lines to display for the \fBid\fP command
++to the value \fI100\fP.
+diff -Nurp linux-2.6.22-590/Documentation/kdb/kdb_ll.man linux-2.6.22-600/Documentation/kdb/kdb_ll.man
+--- linux-2.6.22-590/Documentation/kdb/kdb_ll.man 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/kdb_ll.man 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,134 @@
++.TH LL 1 "19 April 1999"
++.SH NAME
++ll \- Linked List examination
++.SH SYNOPSIS
++ll <addr> <link-offset> <cmd>
++.SH DESCRIPTION
++The
++.B ll
++command is used to execute a single command repetitively for
++each element of a linked list.
++.P
++The command specified by <cmd> will be executed with a single
++argument, the address of the current element.
++.SH LIMITATIONS
++Be careful if using this command recursively.
++.SH ENVIRONMENT
++None.
++.SH SMP CONSIDERATIONS
++None.
++.SH EXAMPLES
++.nf
++.na
++.ft CW
++# cd modules
++# insmod kdbm_vm.o
++# Entering kdb on processor 0 due to PAUSE
++kdb> ps
++Task Addr Pid Parent cpu lcpu Tss Command
++0xc03de000 0000000001 0000000000 0000 0000 0xc03de2d4 init
++0xc0090000 0000000002 0000000001 0000 0000 0xc00902d4 kflushd
++0xc000e000 0000000003 0000000001 0000 0000 0xc000e2d4 kpiod
++0xc000c000 0000000004 0000000001 0000 0000 0xc000c2d4 kswapd
++0xc7de2000 0000000056 0000000001 0000 0000 0xc7de22d4 kerneld
++0xc7d3a000 0000000179 0000000001 0000 0000 0xc7d3a2d4 syslogd
++0xc7a7e000 0000000188 0000000001 0000 0000 0xc7a7e2d4 klogd
++0xc7a04000 0000000199 0000000001 0000 0000 0xc7a042d4 atd
++0xc7b84000 0000000210 0000000001 0000 0000 0xc7b842d4 crond
++0xc79d6000 0000000221 0000000001 0000 0000 0xc79d62d4 portmap
++0xc798e000 0000000232 0000000001 0000 0000 0xc798e2d4 snmpd
++0xc7904000 0000000244 0000000001 0000 0000 0xc79042d4 inetd
++0xc78fc000 0000000255 0000000001 0000 0000 0xc78fc2d4 lpd
++0xc77ec000 0000000270 0000000001 0000 0000 0xc77ec2d4 sendmail
++0xc77b8000 0000000282 0000000001 0000 0000 0xc77b82d4 gpm
++0xc7716000 0000000300 0000000001 0000 0000 0xc77162d4 smbd
++0xc7ee2000 0000000322 0000000001 0000 0000 0xc7ee22d4 mingetty
++0xc7d6e000 0000000323 0000000001 0000 0000 0xc7d6e2d4 login
++0xc778c000 0000000324 0000000001 0000 0000 0xc778c2d4 mingetty
++0xc78b6000 0000000325 0000000001 0000 0000 0xc78b62d4 mingetty
++0xc77e8000 0000000326 0000000001 0000 0000 0xc77e82d4 mingetty
++0xc7708000 0000000327 0000000001 0000 0000 0xc77082d4 mingetty
++0xc770e000 0000000328 0000000001 0000 0000 0xc770e2d4 mingetty
++0xc76b0000 0000000330 0000000001 0000 0000 0xc76b02d4 update
++0xc7592000 0000000331 0000000323 0000 0000 0xc75922d4 ksh
++0xc7546000 0000000338 0000000331 0000 0000 0xc75462d4 su
++0xc74dc000 0000000339 0000000338 0000 0000 0xc74dc2d4 ksh
++kdb> md 0xc74dc2d4
++c74dc2d4: 00000000 c74de000 00000018 00000000 .....`MG........
++c74dc2e4: 00000000 00000000 00000000 074de000 .............`M.
++c74dc2f4: c01123ff 00000000 00000000 00000000 #.@............
++c74dc304: 00000000 00000000 c74dded0 00000000 ........P^MG....
++[omitted]
++c74dc474: 00000000 00000000 00000000 00000000 ................
++c74dc484: 00000000 c7c15d00 c77b0900 c026fbe0 .....]AG..{G`{&@
++c74dc494: 00000000 c76c2000 00000000 00000000 ..... lG........
++c74dc4a4: 00000000 00000000 00000000 c74dc4ac ............,DMG
++kdb> md 0xc026fbe0
++c026fbe0: c0262b60 00000000 c7594940 c74de000 @HYG....@IYG.`MG
++[omitted]
++kdb> md 0xc0262b60
++c0262b60: c0266660 08048000 0804c000 c7bec360 `f&@.....@..`C>G
++kdb> ll c0262b60 12 md
++c0262b60: c0266660 08048000 0804c000 c7bec360 `f&@.....@..`C>G
++c7bec360: c0266660 0804c000 0804d000 c7becb20 `f&@.@...P.. K>G
++c7becb20: c0266660 0804d000 08050000 c7bec3a0 `f&@.P...... C>G
++c7bec3a0: c0266660 40000000 40009000 c7bec420 `f&@...@...@ D>G
++c7bec420: c0266660 40009000 4000b000 c7bec4a0 `f&@...@.0.@ D>G
++c7bec4a0: c0266660 4000b000 40010000 c7bec8e0 `f&@.0.@...@`H>G
++c7bec8e0: c0266660 40010000 400a1000 c7becbe0 `f&@...@...@`K>G
++c7becbe0: c0266660 400a1000 400a8000 c7becc60 `f&@...@...@`L>G
++c7becc60: c0266660 400a8000 400b4000 c7952300 `f&@...@.@.@.#.G
++c7952300: c0266660 400b5000 400bc000 c79521c0 `f&@.P.@.@.@@!.G
++c79521c0: c0266660 400bc000 400bd000 c7bec6e0 `f&@.@.@.P.@`F>G
++c7bec6e0: c0266660 bffff000 c0000000 00000000 `f&@.p?...@....
++kdb>
++kdb> ll c0262b60 12 vm
++struct vm_area_struct at 0xc0262b60 for 56 bytes
++vm_start = 0x8048000 vm_end = 0x804c000
++page_prot = 0x25 avl_height = 2244 vm_offset = 0x0
++flags: READ EXEC MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE
++struct vm_area_struct at 0xc7bec360 for 56 bytes
++vm_start = 0x804c000 vm_end = 0x804d000
++page_prot = 0x25 avl_height = -31808 vm_offset = 0x3000
++flags: READ WRITE MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE
++struct vm_area_struct at 0xc7becb20 for 56 bytes
++vm_start = 0x804d000 vm_end = 0x8050000
++page_prot = 0x25 avl_height = -28664 vm_offset = 0x0
++flags: READ WRITE EXEC MAYREAD MAYWRITE MAYEXEC
++struct vm_area_struct at 0xc7bec3a0 for 56 bytes
++vm_start = 0x40000000 vm_end = 0x40009000
++page_prot = 0x25 avl_height = 30126 vm_offset = 0x0
++flags: READ EXEC MAYREAD MAYWRITE MAYEXEC DENYWRITE
++struct vm_area_struct at 0xc7bec420 for 56 bytes
++vm_start = 0x40009000 vm_end = 0x4000b000
++page_prot = 0x25 avl_height = 30126 vm_offset = 0x8000
++flags: READ WRITE MAYREAD MAYWRITE MAYEXEC DENYWRITE
++struct vm_area_struct at 0xc7bec4a0 for 56 bytes
++vm_start = 0x4000b000 vm_end = 0x40010000
++page_prot = 0x25 avl_height = 26853 vm_offset = 0x0
++flags: READ MAYREAD MAYWRITE MAYEXEC
++struct vm_area_struct at 0xc7bec8e0 for 56 bytes
++vm_start = 0x40010000 vm_end = 0x400a1000
++page_prot = 0x25 avl_height = 2244 vm_offset = 0x0
++flags: READ EXEC MAYREAD MAYWRITE MAYEXEC
++struct vm_area_struct at 0xc7becbe0 for 56 bytes
++vm_start = 0x400a1000 vm_end = 0x400a8000
++page_prot = 0x25 avl_height = 30126 vm_offset = 0x90000
++flags: READ WRITE MAYREAD MAYWRITE MAYEXEC
++struct vm_area_struct at 0xc7becc60 for 56 bytes
++vm_start = 0x400a8000 vm_end = 0x400b4000
++page_prot = 0x25 avl_height = 2244 vm_offset = 0x0
++flags: READ WRITE MAYREAD MAYWRITE MAYEXEC
++struct vm_area_struct at 0xc7952300 for 56 bytes
++vm_start = 0x400b5000 vm_end = 0x400bc000
++page_prot = 0x25 avl_height = 30126 vm_offset = 0x0
++flags: READ EXEC MAYREAD MAYWRITE MAYEXEC
++struct vm_area_struct at 0xc79521c0 for 56 bytes
++vm_start = 0x400bc000 vm_end = 0x400bd000
++page_prot = 0x25 avl_height = -16344 vm_offset = 0x6000
++flags: READ WRITE MAYREAD MAYWRITE MAYEXEC
++struct vm_area_struct at 0xc7bec6e0 for 56 bytes
++vm_start = 0xbffff000 vm_end = 0xc0000000
++page_prot = 0x25 avl_height = 2244 vm_offset = 0x0
++flags: READ WRITE EXEC MAYREAD MAYWRITE MAYEXEC GROWSDOWN
++kdb>
+diff -Nurp linux-2.6.22-590/Documentation/kdb/kdb_md.man linux-2.6.22-600/Documentation/kdb/kdb_md.man
+--- linux-2.6.22-590/Documentation/kdb/kdb_md.man 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/kdb_md.man 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,136 @@
++.TH MD 1 "August 4, 2004"
++.SH NAME
++md, mdWcN, mdr, mds, mm, mmW\- Memory manipulation commands
++.SH SYNOPSIS
++md [ \fIaddress-expression\fP [ \fIline-count\fP [\fIoutput-radix\fP ] ] ]
++.LP
++md\fIW\fRc\fIn\fR [ \fIaddress-expression\fP [ \fIline-count\fP [\fIoutput-radix\fP ] ] ]
++.LP
++mdp \fIphysical-address-expression\fP,\fIbytes\fP
++.LP
++mdr \fIaddress-expression\fP,\fIbytes\fP
++.LP
++mds [ \fIaddress-expression\fP [ \fIline-count\fP [\fIoutput-radix\fP ] ] ]
++.LP
++mm \fIaddress-expression\fP \fInew-contents\fP
++.LP
++mm\fIW\fR \fIaddress-expression\fP \fInew-contents\fP
++.SH DESCRIPTION
++The
++.B md
++command is used to display the contents of memory.
++The \fIaddress-expression\fP may be a numeric value (decimal or
++hexidecimal), a symbol name, a register name preceeded by one or more
++percent symbols '%', an environment variable name preceeded by
++a currency symbol '$', or a simple expression consisting of a
++symbol name, an addition or subtraction character and a numeric
++value (decimal or hexidecimal).
++.P
++If an address is specified and the \fIline-count\fP or \fIradix\fP arguments
++are omitted, they default to the values of the \fBMDCOUNT\fP and \fBRADIX\fP
++environment variables respectively. If the \fBMDCOUNT\fP or \fBRADIX\fP
++environment variables are unset, the appropriate defaults will be used [see
++\fBENVIRONMENT\fP below]. If no address is specified then md resumes
++after the last address printed, using the previous values of count and
++radix. The start address is rounded down to a multiple of the
++BYTESPERWORD (md) or width (md\fIW\fR).
++.P
++md uses the current value of environment variable \fBBYTESPERWORD\fP to
++read the data. When reading hardware registers that require special
++widths, it is more convenient to use md\fIW\fRc\fIn\fR where \fIW\fR is
++the width for this command and \fRc\fIn\fR is the number of entries to
++read. For example, md1c20 reads 20 bytes, 1 at a time. To continue
++printing just type md, the width and count apply to following md
++commands with no parameters. \fBNote:\fR The count is the number of
++repeats of the width, unlike MDCOUNT which gives the number of md lines
++to print.
++.P
++The
++.B mdp
++command displays the contents of physical memory, starting at the
++specified physical address for the specified number of bytes.
++The address is preceded by 'phys'.
++.P
++The
++.B mdr
++command displays the raw contents of memory, starting at the specified
++address for the specified number of bytes.
++The data is printed in one line without a leading address and no
++trailing character conversion.
++.B mdr
++is intended for interfacing with external debuggers, it is of little
++use to humans.
++.P
++The
++.B mds
++command displays the contents of memory one word per line and
++attempts to correlate the contents of each word with a symbol
++in the symbol table. If no symbol is found, the ascii representation
++of the word is printed, otherwise the symbol name and offset from
++symbol value are printed.
++By default the section data is printed for kernel symbols.
++.P
++The
++.B mm
++and
++\fBmm\fIW\fR
++commands allow modification of memory. The bytes at the address
++represented by \fIaddress-expression\fP are changed to
++\fInew-contents\fP. \fInew-contents\fP is allowed to be an
++\fIaddress-expression\fP.
++.B mm
++changes a machine word, \fBmm\fIW\fR changes \fIW\fR bytes at that
++address.
++.SH LIMITATIONS
++None.
++.SH ENVIRONMENT
++.TP 8
++MDCOUNT
++This environment variable (default=8) defines the number of lines
++that will be displayed by each invocation of the \fBmd\fP command.
++
++.TP 8
++RADIX
++This environment variable (default=16) defines the radix used to
++print the memory contents.
++
++.TP 8
++BYTESPERWORD
++This environment variable (default=4) selects the width of output
++data when printing memory contents. Select the value two to get
++16-bit word output, select the value one to get byte output.
++
++.TP 8
++LINES
++This environment variable governs the number of lines of output
++that will be presented before the kernel debugger built-in pager
++pauses the output. This variable only affects the functioning
++of the \fBmd\fP and \fBmds\fP if the \fBMDCOUNT\fP variable
++is set to a value greater than the \fBLINES\fP variable.
++
++.TP 8
++NOSECT
++If the \fBNOSECT\fP environment variable is non-zero then the
++section information is suppressed.
++The default is NOSECT=1 so section data is suppressed; use set\ NOSECT=0
++to see section information.
++.SH SMP CONSIDERATIONS
++None.
++.SH EXAMPLES
++.TP 8
++md %edx
++Display memory starting at the address contained in register \fB%edx\fP.
++
++.TP 8
++mds %esp
++Display stack contents symbolically. This command is quite useful
++in manual stack traceback.
++
++.TP 8
++mm 0xc0252110 0x25
++Change the memory location at 0xc0252110 to the value 0x25.
++
++.TP 8
++md chrdev_table 15
++Display 15 lines (at 16 bytes per line) starting at address
++represented by the symbol \fIchrdev_table\fP.
+diff -Nurp linux-2.6.22-590/Documentation/kdb/kdb.mm linux-2.6.22-600/Documentation/kdb/kdb.mm
+--- linux-2.6.22-590/Documentation/kdb/kdb.mm 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/kdb.mm 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,492 @@
++.TH KDB 8 "September 21, 2005"
++.hy 0
++.SH NAME
++Built-in Kernel Debugger for Linux - v4.4
++.SH "Overview"
++This document describes the built-in kernel debugger available
++for linux. This debugger allows the programmer to interactively
++examine kernel memory, disassemble kernel functions, set breakpoints
++in the kernel code and display and modify register contents.
++.P
++A symbol table is included in the kernel image and in modules which
++enables all non-stack symbols (including static symbols) to be used as
++arguments to the kernel debugger commands.
++.SH "Getting Started"
++To include the kernel debugger in a linux kernel, use a
++configuration mechanism (e.g. xconfig, menuconfig, et. al.)
++to enable the \fBCONFIG_KDB\fP option. Additionally, for accurate
++stack tracebacks, it is recommended that the \fBCONFIG_FRAME_POINTER\fP
++option be enabled (if present). \fBCONFIG_FRAME_POINTER\fP changes the compiler
++flags so that the frame pointer register will be used as a frame
++pointer rather than a general purpose register.
++.P
++After linux has been configured to include the kernel debugger,
++make a new kernel with the new configuration file (a make clean
++is recommended before making the kernel), and install the kernel
++as normal.
++.P
++You can compile a kernel with kdb support but have kdb off by default,
++select \fBCONFIG_KDB_OFF\fR. Then the user has to explicitly activate
++kdb by booting with the 'kdb=on' flag or, after /proc is mounted, by
++.nf
++ echo "1" > /proc/sys/kernel/kdb
++.fi
++You can also do the reverse, compile a kernel with kdb on and
++deactivate kdb with the boot flag 'kdb=off' or, after /proc is mounted,
++by
++.nf
++ echo "0" > /proc/sys/kernel/kdb
++.fi
++.P
++When booting the new kernel, the 'kdb=early' flag
++may be added after the image name on the boot line to
++force the kernel to stop in the kernel debugger early in the
++kernel initialization process. 'kdb=early' implies 'kdb=on'.
++If the 'kdb=early' flag isn't provided, then kdb will automatically be
++invoked upon system panic or when the \fBPAUSE\fP key is used from the
++keyboard, assuming that kdb is on. Older versions of kdb used just a
++boot flag of 'kdb' to activate kdb early, this is no longer supported.
++.P
++KDB can also be used via the serial port. Set up the system to
++have a serial console (see \fIDocumentation/serial-console.txt\fP), you
++must also have a user space program such as agetty set up to read from
++the serial console.
++The control sequence \fB<esc>KDB\fP on the serial port will cause the
++kernel debugger to be entered, assuming that kdb is on, that some
++program is reading from the serial console, at least one cpu is
++accepting interrupts and the serial console driver is still usable.
++.P
++\fBNote:\fR\ When the serial console sequence consists of multiple
++characters such as <esc>KDB then all but the last character are passed
++through to the application that is reading from the serial console.
++After exiting from kdb, you should use backspace to delete the rest of
++the control sequence.
++.P
++You can boot with kdb activated but without the ability to enter kdb
++via any keyboard sequence.
++In this mode, kdb will only be entered after a system failure.
++Booting with kdb=on-nokey will activate kdb but ignore keyboard
++sequences that would normally drop you into kdb.
++kdb=on-nokey is mainly useful when you are using a PC keyboard and your
++application needs to use the Pause key.
++You can also activate this mode by
++.nf
++ echo "2" > /proc/sys/kernel/kdb
++.fi
++.P
++If the console is sitting on the login prompt when you enter kdb, then
++the login command may switch into upper case mode.
++This is not a kdb bug, it is a "feature" of login - if the userid is
++all upper case then login assumes that you using a TeleType (circa
++1960) which does not have lower case characters.
++Wait 60 seconds for login to timeout and it will switch back to lower
++case mode.
++.P
++\fBNote:\fR\ Your distributor may have chosen a different kdb
++activation sequence for the serial console.
++Consult your distribution documentation.
++.P
++If you have both a keyboard+video and a serial console, you can use
++either for kdb.
++Define both video and serial consoles with boot parameters
++.P
++.nf
++ console=tty0 console=ttyS0,38400
++.fi
++.P
++Any kdb data entered on the keyboard or the serial console will be echoed
++to both.
++.P
++If you are using a USB keyboard then kdb commands cannot be entered
++until the kernel has initialised the USB subsystem and recognised the
++keyboard.
++Using kdb=early with a USB keyboard will not work, the USB subsystem is
++initialised too late.
++.P
++While kdb is active, the keyboard (not serial console) indicators may strobe.
++The caps lock and scroll lock lights will turn on and off, num lock is not used
++because it can confuse laptop keyboards where the numeric keypad is mapped over
++the normal keys.
++On exit from kdb the keyboard indicators will probably be wrong, they will not match the kernel state.
++Pressing caps lock twice should get the indicators back in sync with
++the kernel.
++.SH "Basic Commands"
++There are several categories of commands available to the
++kernel debugger user including commands providing memory
++display and modification, register display and modification,
++instruction disassemble, breakpoints and stack tracebacks.
++Any command can be prefixed with '-' which will cause kdb to ignore any
++errors on that command, this is useful when packaging commands using
++defcmd.
++A line whose first non-space character is '#' is printed and ignored.
++.P
++The following table shows the currently implemented standard commands,
++these are always available. Other commands can be added by extra
++debugging modules, type '?' at the kdb prompt to get a list of all
++available commands.
++.DS
++.TS
++box, center;
++l | l
++l | l.
++Command Description
++_
++bc Clear Breakpoint
++bd Disable Breakpoint
++be Enable Breakpoint
++bl Display breakpoints
++bp Set or Display breakpoint
++bph Set or Display hardware breakpoint
++bpa Set or Display breakpoint globally
++bpha Set or Display hardware breakpoint globally
++bt Stack backtrace for current process
++btp Stack backtrace for specific process
++bta Stack backtrace for all processes
++btc Cycle over all live cpus and backtrace each one
++cpu Display or switch cpus
++dmesg Display system messages
++defcmd Define a command as a set of other commands
++ef Print exception frame
++env Show environment
++go Restart execution
++handlers Control the display of IA64 MCA/INIT handlers
++help Display help message
++id Disassemble Instructions
++kill Send a signal to a process
++ll Follow Linked Lists
++lsmod List loaded modules
++md Display memory contents
++mdWcN Display memory contents with width W and count N.
++mdp Display memory based on a physical address
++mdr Display raw memory contents
++mds Display memory contents symbolically
++mm Modify memory contents, words
++mmW Modify memory contents, bytes
++per_cpu Display per_cpu variables
++pid Change the default process context
++ps Display process status
++reboot Reboot the machine
++rd Display register contents
++rm Modify register contents
++rq Display runqueue for one cpu
++rqa Display runqueue for all cpus
++set Add/change environment variable
++sr Invoke SysReq commands
++ss Single step a cpu
++ssb Single step a cpu until a branch instruction
++stackdepth Print the stack depth for selected processes
++summary Summarize the system
++.TE
++.DE
++.P
++Some commands can be abbreviated, such commands are indicated by a
++non-zero \fIminlen\fP parameter to \fBkdb_register\fP; the value of
++\fIminlen\fP being the minimum length to which the command can be
++abbreviated (for example, the \fBgo\fP command can be abbreviated
++legally to \fBg\fP).
++.P
++If an input string does not match a command in the command table,
++it is treated as an address expression and the corresponding address
++value and nearest symbol are shown.
++.P
++Some of the commands are described here.
++Information on the more complicated commands can be found in the
++appropriate manual pages.
++.TP 8
++cpu
++With no parameters, it lists the available cpus.
++\&'*' after a cpu number indicates a cpu that did not respond to the kdb
++stop signal.
++\&'+' after a cpu number indicates a cpu for which kdb has some data, but
++that cpu is no longer responding to kdb, so you cannot switch to it.
++This could be a cpu that has failed after entering kdb, or the cpu may
++have saved its state for debugging then entered the prom, this is
++normal for an IA64 MCA event.
++\&'I' after a cpu number means that the cpu was idle before it entered
++kdb, it is unlikely to contain any useful data.
++\&'F' after a cpu number means that the cpu is offline.
++There is currenly no way to distinguish between cpus that used to be
++online but are now offline and cpus that were never online, the kernel
++does not maintain the information required to separate those two cases.
++.I cpu
++followed by a number will switch to that cpu, you cannot switch to
++a cpu marked '*', '+' or 'F'.
++This command is only available if the kernel was configured for SMP.
++.TP 8
++dmesg [lines] [adjust]
++Displays the system messages from the kernel buffer.
++If kdb logging is on, it is disabled by dmesg and is left as disabled.
++With no parameters or a zero value for 'lines', dmesg dumps the entire
++kernel buffer.
++If lines is specified and is positive, dmesg dumps the last 'lines'
++from the buffer.
++If lines is specified and is negative, dmesg dumps the first 'lines'
++from the buffer.
++If adjust is specified, adjust the starting point for the lines that
++are printed.
++When 'lines' is positive, move the starting point back by 'adjust'
++lines, when 'lines' is negative, move the starting point forward by
++\&'adjust' lines.
++.I dmesg -100
++will dump 100 lines, from the start of the buffer.
++.I dmesg 100
++will dump 100 lines, starting 100 lines from the end of the buffer,
++.I dmesg 100 100
++will dump 100 lines, starting 200 lines from the end of the buffer.
++.I dmesg -100 100
++will dump 100 lines, starting 100 lines from the start of the buffer.
++.TP 8
++defcmd
++Defines a new command as a set of other commands, all input until
++.I endefcmd
++is saved and executed as a package.
++.I defcmd
++takes three parameters, the command name to be defined and used to
++invoke the package, a quoted string containing the usage text and a
++quoted string containing the help text for the command.
++When using defcmd, it is a good idea to prefix commands that might fail
++with '-', this ignores errors so the following commands are still
++executed.
++For example,
++.P
++.nf
++ defcmd diag "" "Standard diagnostics"
++ set LINES 2000
++ set BTAPROMPT 0
++ -id %eip-0x40
++ -cpu
++ -ps
++ -dmesg 80
++ -bt
++ -bta
++ endefcmd
++.fi
++.P
++When used with no parameters, defcmd prints all the defined commands.
++.TP 8
++go
++Continue normal execution.
++Active breakpoints are reestablished and the processor(s) allowed to
++run normally.
++To continue at a specific address, use
++.I rm
++to change the instruction pointer then go.
++.TP 8
++handlers
++Control the display of IA64 MCA/INIT handlers.
++The IA64 MCA/INIT handlers run on separate tasks.
++During an MCA/INIT event, the active tasks are typically the handlers,
++rather than the original tasks, which is not very useful for debugging.
++By default, KDB hides the MCA/INIT handlers so commands such as ps and
++btc will display the original task.
++You can change this behaviour by using
++.I handlers show
++to display the MCA/INIT handlers instead of the original tasks or use
++.I handlers hide
++(the default) to hide the MCA/INIT handlers and display the original
++tasks.
++.I handlers status
++will list the address of the handler task and the original task for
++each cpu.
++\fBNote:\fR\ If the original task was running in user space or it
++failed any of the MCA/INIT verification tests then there is no original
++task to display.
++In this case, the handler will be displayed even if
++.I handlers hide
++is set and
++.I handlers status
++will not show an original task.
++.TP 8
++id
++Disassemble instructions starting at an address.
++Environment variable IDCOUNT controls how many lines of disassembly
++output the command produces.
++.TP 8
++kill
++Internal command to send a signal (like kill(1)) to a process.
++kill -signal pid.
++.TP 8
++lsmod
++Internal command to list modules.
++This does not use any kernel nor user space services so can be used at any time.
++.TP 8
++per_cpu <variable_name> [<length>] [<cpu>]
++Display the values of a per_cpu variable, the variable_name is
++specified without the \fIper_cpu__\fR prefix.
++Length is the length of the variable, 1-8, if omitted or 0 it defaults
++to the size of the machine's register.
++To display the variable on a specific cpu, the third parameter is the
++cpu number.
++When the third parameter is omitted, the variable's value is printed
++from all cpus, except that zero values are suppressed.
++For each cpu, per_cpu prints the cpu number, the address of the
++variable and its value.
++.TP 8
++pid <number>
++Change the current process context, with no parameters it displays the
++current process.
++The current process is used to display registers, both kernel and user
++space.
++It is also used when dumping user pages.
++.I pid R
++resets to the original process that was running when kdb was entered.
++This command is useful if you have been looking at other processes and/or
++cpus and you want to get back to the original process.
++It does not switch cpus, it only resets the context to the original process.
++.TP 8
++reboot
++Reboot the system, with no attempt to do a clean close down.
++.TP 8
++rq <cpu>
++Display the runqueues for the specified cpu.
++.TP 8
++rqa
++Display the runqueues for all cpus.
++.TP 8
++stackdepth <percentage>
++Print the stack usage for processes using more than the specified
++percentage of their stack.
++If percentage is not supplied, it defaults to 60.
++This command is only implemented on i386 and ia64 architectures,
++patches for other architectures will be gratefully accepted.
++.TP 8
++summary
++Print a summary of the system, including the time (no timezone is
++applied), uname information and various critical system counters.
++.SH INITIAL KDB COMMANDS
++kdb/kdb_cmds is a plain text file where you can define kdb commands
++which are to be issued during kdb_init(). One command per line, blank
++lines are ignored, lines starting with '#' are ignored. kdb_cmds is
++intended for per user customization of kdb, you can use it to set
++environment variables to suit your hardware or to set standard
++breakpoints for the problem you are debugging. This file is converted
++to a small C object, compiled and linked into the kernel. You must
++rebuild and reinstall the kernel after changing kdb_cmds. This file
++will never be shipped with any useful data so you can always override
++it with your local copy. Sample kdb_cmds:
++.P
++.nf
++# Initial commands for kdb, alter to suit your needs.
++# These commands are executed in kdb_init() context, no SMP, no
++# processes. Commands that require process data (including stack or
++# registers) are not reliable this early. set and bp commands should
++# be safe. Global breakpoint commands affect each cpu as it is booted.
++
++set LINES=50
++set MDCOUNT=25
++set RECURSE=1
++bp sys_init_module
++.fi
++.SH INTERRUPTS AND KDB
++When a kdb event occurs, one cpu (the initial cpu) enters kdb state.
++It uses a cross system interrupt to interrupt the
++other cpus and bring them all into kdb state. All cpus run with
++interrupts disabled while they are inside kdb, this prevents most
++external events from disturbing the kernel while kdb is running.
++.B Note:
++Disabled interrupts means that any I/O that relies on interrupts cannot
++proceed while kdb is in control, devices can time out. The clock tick
++is also disabled, machines will lose track of time while they are
++inside kdb.
++.P
++Even with interrupts disabled, some non-maskable interrupt events will
++still occur, these can disturb the kernel while you are debugging it.
++The initial cpu will still accept NMI events, assuming that kdb was not
++entered for an NMI event. Any cpu where you use the SS or SSB commands
++will accept NMI events, even after the instruction has finished and the
++cpu is back in kdb. This is an unavoidable side effect of the fact that
++doing SS[B] requires the cpu to drop all the way out of kdb, including
++exiting from the event that brought the cpu into kdb. Under normal
++circumstances the only NMI event is for the NMI oopser and that is kdb
++aware so it does not disturb the kernel while kdb is running.
++.P
++Sometimes doing SS or SSB on ix86 will allow one interrupt to proceed,
++even though the cpu is disabled for interrupts. I have not been able
++to track this one down but I suspect that the interrupt was pending
++when kdb was entered and it runs when kdb exits through IRET even
++though the popped flags are marked as cli(). If any ix86 hardware
++expert can shed some light on this problem, please notify the kdb
++maintainer.
++.SH RECOVERING FROM KDB ERRORS
++If a kdb command breaks and kdb has enough of a recovery environment
++then kdb will abort the command and drop back into mainline kdb code.
++This means that user written kdb commands can follow bad pointers
++without killing kdb. Ideally all code should verify that data areas
++are valid (using kdb_getarea) before accessing it but lots of calls to
++kdb_getarea can be clumsy.
++.P
++The sparc64 port does not currently provide this error recovery.
++If someone would volunteer to write the necessary longjmp/setjmp
++code, their efforts would be greatly appreciated. In the
++meantime, it is possible for kdb to trigger a panic by accessing
++a bad address.
++.SH DEBUGGING THE DEBUGGER
++kdb has limited support for debugging problems within kdb. If you
++suspect that kdb is failing, you can set environment variable KDBDEBUG
++to a bit pattern which will activate kdb_printf statements within kdb.
++See include/linux/kdb.h, KDB_DEBUG_FLAG_xxx defines. For example
++.nf
++ set KDBDEBUG=0x60
++.fi
++activates the event callbacks into kdb plus state tracing in sections
++of kdb.
++.nf
++ set KDBDEBUG=0x18
++.fi
++gives lots of tracing as kdb tries to decode the process stack.
++.P
++You can also perform one level of recursion in kdb. If environment
++variable RECURSE is not set or is 0 then kdb will either recover from
++an error (if the recovery environment is satisfactory) or kdb will
++allow the error to percolate, usually resulting in a dead system. When
++RECURSE is 1 then kdb will recover from an error or, if there is no
++satisfactory recovery environment, it will drop into kdb state to let
++you diagnose the problem. When RECURSE is 2 then all errors drop into
++kdb state, kdb does not attempt recovery first. Errors while in
++recursive state all drop through, kdb does not even attempt to recover
++from recursive errors.
++.SH KEYBOARD EDITING
++kdb supports a command history, which can be accessed via keyboard
++sequences.
++It supports the special keys on PC keyboards, control characters and
++vt100 sequences on a serial console or a PC keyboard.
++.P
++.DS
++.TS
++box, center;
++l | l | l l | l
++l | l | l l | l.
++PC Special keys Control VT100 key Codes Action
++_
++Backspace ctrl-H Backspace 0x7f Delete character to the left of the cursor
++Delete ctrl-D Delete \\e[3~ Delete character to the right of the cursor
++Home ctrl-A Home \\e[1~ Go to start of line
++End ctrl-E End \\e[4~ Go to end of line
++Up arrow ctrl-P Up arrow \\e[A Up one command in history
++Down arrow ctrl-N Down arrow \\e[B Down one command in history
++Left arrow ctrl-B Left arrow \\e[D Left one character in current command
++Right arrow ctrl-F Right arrow \\e[C Right one character in current command
++.TE
++.DE
++.P
++There is no toggle for insert/replace mode, kdb editing is always in
++insert mode.
++Use delete and backspace to delete characters.
++.P
++kdb also supports tab completion for kernel symbols
++Type the start of a kernel symbol and press tab (ctrl-I) to complete
++the name
++If there is more than one possible match, kdb will append any common
++characters and wait for more input, pressing tab a second time will
++display the possible matches
++The number of matches is limited by environment variable DTABCOUNT,
++with a default of 30 if that variable is not set.
++.SH AUTHORS
++Scott Lurndal, Richard Bass, Scott Foehner, Srinivasa Thirumalachar,
++Masahiro Adegawa, Marc Esipovich, Ted Kline, Steve Lord, Andi Kleen,
++Sonic Zhang.
++.br
++Keith Owens <kaos@sgi.com> - kdb maintainer.
++.SH SEE ALSO
++.P
++linux/Documentation/kdb/kdb_{bp,bt,env,ll,md,ps,rd,sr,ss}.man
+diff -Nurp linux-2.6.22-590/Documentation/kdb/kdb_ps.man linux-2.6.22-600/Documentation/kdb/kdb_ps.man
+--- linux-2.6.22-590/Documentation/kdb/kdb_ps.man 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/kdb_ps.man 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,96 @@
++.TH PS 1 "September 14, 2004"
++.SH NAME
++ps \- Display processes
++.SH SYNOPSIS
++ps [ DRSTCZEUIMA ]
++.SH DESCRIPTION
++The
++.B ps
++command displays the status of all processes in the desired state.
++This command does not take any locks (all cpus should be frozen while
++kdb is running) so it can safely be used to debug lock problems with
++the process table.
++.P
++Without any parameters, \fBps\fP displays all the interesting
++processes, excluding idle tasks and sleeping system daemons.
++If a parameter is specified, it is a single string consisting of the
++letters D, R, S, T, C, Z, E, U, I and M, in any order.
++Each letter selects processes in a specific state, when multiple
++letters are specified, a process will be displayed if it is in any of
++the specified states.
++The states are\ :-
++.P
++.DS
++.TS
++box, center;
++l | l
++l | l.
++D Uninterruptible sleep
++R Running
++S Interruptible sleep
++T Stopped
++C Traced
++Z Zombie
++E Dead
++U Unrunnable
++I Idle task
++M Sleeping system daemon
++A All
++.TE
++.DE
++.P
++For state R (running), the process may not be on a cpu at the moment,
++but it is ready to run.
++The header line above the backtrace contains '1' in the fourth field if
++the process is actually on a cpu.
++.P
++The idle task is run on each cpu when there is no work for that cpu to do.
++Unless the idle task is servicing an interrupt, there is no point in
++printing the idle task.
++An idle task that is not servicing a interrupt is marked as state I,
++while servicing an interrupt it is in state R.
++By default, idle tasks are not printed, use \fBps\ I\fR to print them.
++If the idle tasks are not being printed, the start of the \fBps\R
++output contains a list of which cpus are idle.
++.P
++Each cpu has one or more system daemons to handle per cpu work such as
++soft irqs.
++A system daemon (idenified by a NULL mm pointer) that is sleeping is
++marked as state M.
++These processes rarely have any useful data and generate a lot of
++output on large machines, so sleeping system daemons are not printed by
++default.
++Use \fBps\ M\fR to print them.
++.P
++At the start of the \fBps\fR output is a line giving the cpu status,
++see the kdb \fBcpu\fR command.
++.SH LIMITATIONS
++None.
++.SH ENVIRONMENT
++.TP 8
++PS
++This environment variable (default=DRSTCZEU) is used when \fBps\fR
++is issued with no parameters.
++
++.SH SMP CONSIDERATIONS
++None.
++.SH EXAMPLES
++.TP 8
++\fBps\fR
++displays the useful tasks, suppressing idle tasks and sleeping
++system daemons.
++
++.TP 8
++\fBps\ RD\fR
++displays only tasks that are running or are in an uninterruptible
++sleep.
++
++.TP 8
++\fBps\ DRSTCZEUIM\fR
++displays all tasks.
++
++.TP 8
++\fBps\ A\fR
++displays all tasks.
++This is easier than remembering DRSTCZEUIM.
++
+diff -Nurp linux-2.6.22-590/Documentation/kdb/kdb_rd.man linux-2.6.22-600/Documentation/kdb/kdb_rd.man
+--- linux-2.6.22-590/Documentation/kdb/kdb_rd.man 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/kdb_rd.man 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,170 @@
++.TH RD 1 "September 20, 2005"
++.SH NAME
++rd, rm\- Register manipulation commands
++.SH SYNOPSIS
++rd [[c [n]]|d|u]
++.LP
++rm \fIregister-name\fP \fInew-contents\fP
++.LP
++ef <address>
++.SH DESCRIPTION
++The
++.B rd
++command is used to display the contents of processor and coprocessor registers.
++Without any arguments, the rd command displays the contents of the general
++register set at the point at which the kernel debugger was entered.
++If the bt* or pid commands have been used to change the current process then
++.B rd
++and
++.B rm
++may not be able to display any registers.
++'n' argument is only used for XScale platform to identify the desired
++coprocessor number, while 'd' option is not valid for XScale platform.
++.P
++On IA32 and IA64, with the 'c' argument, the processor control registers
++%cr0, %cr1, %cr2 and %cr4 are displayed, while with the 'd' argument
++the processor debug registers are displayed. If the 'u' argument
++is supplied, the registers for the current task as of the last
++time the current task entered the kernel are displayed.
++.P
++On XScale, 'c' argument is used to display the
++all coprocessor control registers or specified coprocessor registers by
++argumnet 'n'. Argument 'u' is used to display the
++registers for the current task as of the last time the current task
++entered the kernel. Argument 'd' is not supported.
++.P
++On ix86, the
++.B rm
++command allows modification of a register. The following
++register names are valid: \fB%eax\fP, \fB%ebx\fP, \fB%ecx\fP,
++\fB%edx\fP, \fB%esi\fP, \fB%edi\fP, \fB%esp\fP, \fB%eip\fP,
++and \fB%ebp\fP. Note that if two '%' symbols are used
++consecutively, the register set displayed by the 'u' argument
++to the \fBrd\fP command is modified.
++.P
++The debug registers, \fBdr0\fP through \fBdr3\fP and both
++\fBdr6\fP and \fBdr7\fP can also be modified with the \fBrm\fP
++command.
++.P
++On sparc64, the valid registers are named \fB%g0\fP through
++\fB%g7\fP, \fB%l0\fP through \fB%l7\fP, \fB%o0\fP through
++\fB%o7\fP, and \fB%i0\fP through \fB%i7\fP, with the exceptions
++that \fB%o6\fP is called \fB%sp\fP and that \fB%i6\fP is called
++\fB%fp\fP. The registers \fB%tstate\fP, \fB%tpc\fP, \fB%tnpc\fP,
++\fB%y\fP, and \fB%fprs\fP provide state information at the time
++the system entered kdb. Additionally, when viewing registers, two
++convenience names are provided: \fB%®s\fP shows the
++address on the stack of the current registers, and \fB%csp\fP
++shows the current stack pointer within kdb itself.
++.P
++While on XScale, both the cpu registers and most coprocessor
++registers can be be modified. \fIregister-name\fP can be followings like
++r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14,
++r15, cpsr to address cpu registers. For the coprocessor registers in XSacle,
++either alias name or \fICpcc[CRndd[CRmbb[Opaa]]]\fP can be used to address
++the register in coprocessor cc with CRn=dd, CRm=bb and opcode2=aa. All aa, bb, cc, dd can be
++1 or 2 decimal digitals, the default value is 0 when any of them is omitted. Name
++acc0_h and acc0_l are used to identify the high byte and
++low word of accumulator in coprocessor 0.
++.P
++On IA64, the parameter to
++.B rd
++can be d (debug registers), u (user registers at most recent entry to kernel),
++i (interrupt registers), %isr (current interrupt status), s (stacked
++registers), k (kernel registers). You can also specify these individual
++registers -
++psr,
++ifs,
++ip,
++unat,
++pfs,
++rsc,
++rnat,
++bsps,
++pr,
++ldrs,
++ccv,
++fpsr,
++b0,
++b6,
++b7,
++r1,
++r2,
++r3,
++r8,
++r9,
++r10,
++r11,
++r12,
++r13,
++r14,
++r15,
++r16,
++r17,
++r18,
++r19,
++r20,
++r21,
++r22,
++r23,
++r24,
++r25,
++r26,
++r27,
++r28,
++r29,
++r30,
++r31.
++.B rm
++can change any of the individual registers or the stacked registers.
++.P
++The
++.B ef
++command displays an exception frame at the specified address.
++.SH LIMITATIONS
++Currently the \fBrm\fP command will not allow modification of the
++control registers.
++.P
++Currently neither the \fBrd\fP command nor the \fBrm\fP command will
++display or modify the model specific registers on the Pentium
++and Pentium Pro families.
++.SH ENVIRONMENT
++None.
++.SH SMP CONSIDERATIONS
++None.
++.SH EXAMPLES
++.TP 8
++rd
++Display general register set from kdb's current task.
++
++.TP 8
++rd c 0
++Display coprocessor 0 registers.
++
++.TP 8
++rm %eax 0
++Set the contents of \fB%eax\fP to zero. This will be the
++value of %eax when kdb returns from the condition which
++invoked it.
++
++.TP 8
++rm %%eax 0
++Set the value of the \fB%eax\fP register to zero. This will
++be the value the user-mode application will see upon returning
++from the kernel.
++
++.TP 8
++rm %acc0_h 0
++Set the contents of high byte of accumulator to zero.
++
++.TP 8
++rm dr0 0xc1287220
++Set the value of the \fBdr0\fB register to \f(CW0xc1287220\fP.
++
++.TP 8
++rm %InVLD_BTB 0
++Write 0 to coprocessor 15 register with CRn=7, CRm=5, opcode2=6.
++
++.TP 8
++rm %CP15CRn7CRm5Op6 0
++Same with above.
+diff -Nurp linux-2.6.22-590/Documentation/kdb/kdb_sr.man linux-2.6.22-600/Documentation/kdb/kdb_sr.man
+--- linux-2.6.22-590/Documentation/kdb/kdb_sr.man 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/kdb_sr.man 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,68 @@
++.TH SR 1 "7 October 2002"
++.SH NAME
++sr \- invoke sysrq commands from kdb
++.SH SYNOPSIS
++sr \fIx\fP
++.SH DESCRIPTION
++.hy 0
++The
++.B sr
++command invokes the existing sysrq handler code in the kernel.
++This command takes a single character which is passed to sysrq
++processing, as if you had entered the sysrq key sequence followed by
++that character.
++.P
++.B Caveats:
++.P
++kdb will always call the sysrq code but sysrq may be disabled.
++If you expect to use sysrq functions during debugging then
++.IP ""
++echo "1" > /proc/sys/kernel/sysrq
++.P
++before starting the debug session.
++Alternatively issue
++.IP ""
++mm4 sysrq_enabled 1
++.P
++during debugging.
++.P
++The sysrq code prints a heading using console loglevel 7 then reverts
++to the original loglevel for the rest of the sysrq processing.
++If the rest of the sysrq output is printed at a level below your
++current loglevel then you will not see the output on the kdb console,
++the output will only appear in the printk buffer.
++It is the user's responsibility to set the loglevel correctly if they
++want to see the sysrq output on the console.
++Issue
++.IP ""
++sr 7
++.P
++before any other
++.B sr
++commands if you want to see the output on the console.
++You may even have to adjust the default message loglevel in order to
++see any output from
++.BR sr .
++See Documentation/sysctl/kernel.txt for details on setting console
++loglevels via /proc.
++You can also adjust the loglevel variables via kdb
++.BR mm ;
++on older kernels there are variables such as default_message_level, on
++newer kernels all the loglevel variables are in array console_printk,
++see kernel/printk.c for your kernel.
++.P
++Operations that require interrupt driven I/O can be invoked from kdb
++.BR sr ,
++but they will not do anything until you type 'go' to exit from kdb
++(interrupts are disabled while in kdb).
++There is no guarantee that these operations will work, if the machine
++entered kdb because of an error then interrupt driven I/O may already
++be dead.
++Do not assume that
++.B sr\ s
++does anything useful.
++.P
++The sysrq handler uses locks and calls printk which also uses locks.
++If the sysrq handler or any of the sysrq functions have to wait for a
++lock then they will never return and kdb will appear to hang.
++Invoking sysrq code from kdb is inherently unsafe.
+diff -Nurp linux-2.6.22-590/Documentation/kdb/kdb_ss.man linux-2.6.22-600/Documentation/kdb/kdb_ss.man
+--- linux-2.6.22-590/Documentation/kdb/kdb_ss.man 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/kdb_ss.man 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,109 @@
++.TH SS 1 "17 January 2002"
++.SH NAME
++ss, ssb \- Single Step
++.SH SYNOPSIS
++ss
++.LP
++ssb
++.SH DESCRIPTION
++The
++.B ss
++command is used to execute a single instruction and return
++to the kernel debugger.
++.P
++Both the instruction that was single-stepped and the next
++instruction to execute are printed.
++.P
++The \fBssb\fP command will execute instructions from the
++current value of the instruction pointer. Each instruction
++may be printed as it is executed, depending upon architecture;
++execution will stop at any instruction which would cause the flow
++of control to change (e.g. branch, call, interrupt instruction,
++return, etc.)
++.SH LIMITATIONS
++On sparc64, there are some circumstances where single-stepping
++can be dangerous. Do not single-step across an instruction which
++changes the interrupt-enable bit in %tstate. Do not single step
++through code which is invoked when entering or leaving the
++kernel, particularly any kernel entry code before %tl is set to
++0, or any kernel exit code after %tl is set to 1.
++.SH ENVIRONMENT
++None.
++.SH SMP CONSIDERATIONS
++Other processors are held in the kernel debugger when the instruction
++is traced. Single stepping though code that requires a lock which is
++in use by another processor is an exercise in futility, it will never
++succeed.
++.SH INTERRUPT CONSIDERATIONS
++When a kdb event occurs, one cpu (the initial cpu) enters kdb state.
++It uses a cross system interrupt to interrupt the
++other cpus and bring them all into kdb state. All cpus run with
++interrupts disabled while they are inside kdb, this prevents most
++external events from disturbing the kernel while kdb is running.
++.B Note:
++Disabled interrupts means that any I/O that relies on interrupts cannot
++proceed while kdb is in control, devices can time out. The clock tick
++is also disabled, machines will lose track of time while they are
++inside kdb.
++.P
++Even with interrupts disabled, some non-maskable interrupt events
++will still occur, these can disturb the kernel while you are
++debugging it. The initial cpu will still accept NMI events,
++assuming that kdb was not entered for an NMI event. Any cpu
++where you use the SS or SSB commands will accept NMI events, even
++after the instruction has finished and the cpu is back in kdb.
++This is an unavoidable side effect of the fact that doing SS[B]
++requires the cpu to drop all the way out of kdb, including
++exiting from the NMI event that brought the cpu into kdb. Under
++normal circumstances the only NMI event is for the NMI oopser and
++that is kdb aware so it does not disturb the kernel while kdb is
++running.
++.P
++Sometimes doing SS or SSB on ix86 will allow one interrupt to proceed,
++even though the cpu is disabled for interrupts. I have not been able
++to track this one down but I suspect that the interrupt was pending
++when kdb was entered and it runs when kdb exits through IRET even
++though the popped flags are marked as cli(). If any ix86 hardware
++expert can shed some light on this problem, please notify the kdb
++maintainer.
++.SH EXAMPLES
++.nf
++.na
++.ft CW
++kdb> bp gendisk_head datar 4
++Data Access Breakpoint #0 at 0xc024ddf4 (gendisk_head) in dr0 is enabled on cpu 0
++for 4 bytes
++kdb> go
++...
++[root@host /root]# cat /proc/partitions
++Entering kdb on processor 0 due to Debug Exception @ 0xc01845e3
++Read/Write breakpoint #0 at 0xc024ddf4
++[0]kdb> ssb
++sd_finish+0x7b: movzbl 0xc02565d4,%edx
++sd_finish+0x82: leal 0xf(%edx),%eax
++sd_finish+0x85: sarl $0x4,%eax
++sd_finish+0x88: movl 0xc0256654,%ecx
++sd_finish+0x8e: leal (%eax,%eax,4),%edx
++sd_finish+0x91: leal (%eax,%edx,2),%edx
++sd_finish+0x94: movl 0xc0251108,%eax
++sd_finish+0x99: movl %eax,0xffffffc(%ecx,%edx,4)
++sd_finish+0x9d: movl %ecx,0xc0251108
++sd_finish+0xa3: xorl %ebx,%ebx
++sd_finish+0xa5: cmpb $0x0,0xc02565d4
++[0]kdb> go
++[root@host /root]#
++
++[0]kdb> ss
++sys_read: pushl %ebp
++SS trap at 0xc01274c1
++sys_read+0x1: movl %esp,%ebp
++[0]kdb> ss
++sys_read+0x1: movl %esp,%ebp
++SS trap at 0xc01274c3
++sys_read+0x3: subl $0xc,%esp
++[0]kdb> ss
++sys_read+0x3: subl $0xc,%esp
++SS trap at 0xc01274c6
++sys_read+0x6: pushl %edi
++[0]kdb>
++
+diff -Nurp linux-2.6.22-590/Documentation/kdb/slides linux-2.6.22-600/Documentation/kdb/slides
+--- linux-2.6.22-590/Documentation/kdb/slides 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/Documentation/kdb/slides 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,1382 @@
++#! /opt/cpg/bin/do-mgp
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%%
++%deffont "standard" tfont "comic.ttf"
++%deffont "thick" tfont "arialb.ttf"
++%deffont "typewriter" xfont "courier new-bold-r"
++%deffont "type2writer" xfont "arial narrow-bold-r"
++%%
++%% Default settings per each line numbers.
++%%
++#%default 1 leftfill, size 2, fore "black", back "LemonChiffon2", font "thick"
++%default 1 leftfill, size 2, fore "black", back "white", font "thick"
++%default 2 size 10, vgap 10, prefix " ", center
++%default 3 size 2, bar "gray70", vgap 10
++%default 4 size 6, fore "black", vgap 30, prefix " ", font "standard", left
++%%
++%% Default settings that are applied to TAB-indented lines.
++%%
++%tab 1 size 4, vgap 35, prefix " ", icon arc "red" 40
++%tab 2 size 4, vgap 20, prefix " ", icon delta3 "blue" 40
++%tab 3 size 4, vgap 20, prefix " ", icon delta3 "green" 40
++%%
++%%
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++KDB - Kernel Debugger
++
++
++
++%size 7,center, font "thick"
++Introduction
++
++And
++
++Demonstration
++
++
++%size 3
++
++February 5, 2002 IBM Linux Technology Center Paul Dorwin
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++IBM Legal
++
++
++ IBM Legal requires this information:
++
++%size 3
++
++ THE INFORMATION IN THE FOLLOWING PRESENTATION IS PREPARED
++ SOLELY FOR THE INFORMATION OF THE READER, AND COMES "AS IS"
++ AND WITHOUT WARRANTY OR REPRESENATION OF ANY KIND.
++
++ ANY PARTY USING THE MATERIALS IN THIS PRESENTATION DOES SO
++ AT ITS OWN RISK LIABILITY AND THE PROVIDER OF THE MATERIALS
++ ACCEPTS NO RISK OR LIABILITY FOR SUCH USE OR RESULTING FROM
++ DISSEMINATION TO OR USE BY ANY OTHER PARTY
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Agenda
++
++%size 5
++
++ Installing and Configuring KDB
++
++ KDB Commands
++
++ Scull Demo
++
++ Setting Breakpoints
++
++ Displaying Data Structures
++
++ Kernel Data structures
++
++ Take a walk through an IO operation
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Installing Configuring KDB
++
++
++ Install KDB patch.
++ Start with a clean source tree
++ Apply architecture specific patches
++ Obtain patch for your kernel version
++ see http://oss.sgi.com/projects/kdb/
++ Apply the kdb patch
++ patch -p 1 -N -u -i /path/to/patch
++ Apply any other patches
++ Build and reboot on your kdb enabled kernel
++ Man pages can be found at Documentation/kdb
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Configuring KDB
++
++
++ Config kernel with the following options:
++ These are documented in Documentation/Configure.help
++
++ CONFIG_KDB=y
++ Enable compilation of KDB in the kernel..
++ Setting this also sets CONFIG_KALLSYMS=y.
++ CONFIG_KDB_MODULES=n
++ KDB may be extended, compiling kdb/modules.
++ CONFIG_KDB_OFF=n
++ y = KDB is disabled by default.
++ boot with kdb=on to enable at boot.
++ /proc/sys/kernel/kdb to enable/disable when system is up.
++ CONFIG_KALLSYMS=y
++ This causes all symbols to be exported.
++ CONFIG_FRAME_POINTER=y
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Invoking KDB
++
++
++ KDB can be invoked in the following ways:
++
++ Early init with "kdb=early" lilo flag
++ Hits breakpoint prior to fork_init() (init/main.c)
++
++ Serial console with CNTRL-A
++
++ Console with PAUSE key
++
++ When a pre-set breakpoint is hit
++
++ On panic
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++KDB Commands
++
++
++ KDB environment
++ env Show environment variables
++ set Set environment variables
++ help Display Help Message
++ ? Display Help Message
++
++ System related
++ sections List kernel and module sections
++ lsmod List loaded kernel modules
++ reboot Reboot the machine immediately
++ cpu <cpunum> Switch to new cpu
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++KDB Commands
++
++
++ Memory Manipulation
++ md <vaddr> Display Memory Contents
++ mdr <vaddr> <bytes> Display Raw Memory
++ mds <vaddr> Display Symbolically
++ mm <vaddr> <value> Modify Memory Contents
++ id <vaddr> Display Instructions
++
++ Register Manipulation
++ rd Display Registers
++ rm <reg> <value> Modify Registers
++ ef <vaddr> Display exception frame
++
++ Stack
++ bt [<vaddr>] Stack traceback
++ btp <pid> Display stack for <pid>
++ bta Display all stacks
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++KDB Commands
++
++
++ Breakpoint
++ bc <bpnum> Clear Breakpoint
++ bd <bpnum> Disable Breakpoint
++ be <bpnum> Enable Breakpoint
++ bl [<vaddr>] Display breakpoints
++ bp [<vaddr>] Set/Display breakpoints
++ bpa [<vaddr>] Set/Display global breakpoints
++ bph [<vaddr>] Set hardware breakpoint
++ bpha [<vaddr>] Set global hardware breakpoint
++ bp* modifiers:
++ instruction - break on instruction fetch (default)
++ datar - break on read at vaddr
++ dataw - break on write at vaddr
++ IO - break on in or out op at vaddress
++
++ Execution control
++ go [<vaddr>] Continue Execution
++ ss [<#steps>] Single Step
++ ssb Single step to branch/call
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++KDB Commands
++
++
++ Kernel structures
++ ll <vaddr> <offset> <command> Traverse list and execute command
++ ps Display active task list
++ vm <vaddr> Display vm_area_struct
++ dentry <dentry> Display interesting dentry stuff
++ filp <filp> Display interesting filp stuff
++ sh <vaddr> Show scsi_host
++ sd <vaddr> Show scsi_device
++ sc <vaddr> Show scsi_cmnd
++ kiobuf <vaddr> Display kiobuf
++ page <vaddr> Display page
++ inode <vaddr> Display inode
++ bh <vaddr> Display buffer head
++ inode_pages <inode *> Display pages in an inode
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo
++
++
++ Objective
++ Find and display the data associated with a scull device
++
++ The sequence of events
++ Populate the scull device with data
++ Identify the breakpoints
++ Set breakpoint in the device read function
++ Identify the data structure elements
++ Identify device structures used to track data
++ Display data structures containing the data
++ Show the usage of the filp command
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo: Populate Device
++
++
++ Obtain the code
++ Surf to http://examples.oreilly.com/linuxdrive2/
++ Download the tarball
++ Untar it to /usr/src
++
++ Build and install the module
++ cd /usr/src/ldd2-samples-1.0.1/scull
++ make
++ ./scull.init start
++
++ Populate the scull device
++ cat main.c > /dev/scull0
++ cat /dev/scull0
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo: Driver Details
++
++
++ cat /dev/scull0
++ fd =
++%fore "blue", cont
++open
++%fore "black", cont
++("/dev/scull0", O_RDONLY);
++ Kernel finds the file_operations structure
++ Kernel then invokes the open function
++%fore "blue"
++ read
++%fore "black", cont
++(fd, buf, size);
++ Kernel finds the file_operations structure
++ Kernel then invokes the read function
++
++ Scull device file operations structure
++
++%font "typewriter", size 3
++ struct file_operations scull_fops = {
++ llseek: scull_llseek,
++%fore "blue"
++ read: scull_read,
++%fore "black"
++ write: scull_write,
++ ioctl: scull_ioctl,
++%fore "blue"
++ open: scull_open,
++%fore "black"
++ release: scull_release,
++ };
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo: Driver Details
++
++%font "typewriter", size 3
++ scull_open code
++%font "typewriter", size 3
++ int
++%fore "blue", cont
++scull_open
++%fore "black", cont
++(struct inode *inode, struct file *filp)
++ {
++ Scull_Dev *dev; /* device information */
++ int num = NUM(inode->i_rdev);
++
++ <snip>
++
++ dev = (Scull_Dev *)filp->private_data;
++ if (!dev) {
++ if (num >= scull_nr_devs) return -ENODEV;
++%fore "blue"
++ dev = &scull_devices[num];
++ filp->private_data = dev;
++%fore "black"
++ }
++
++ <snip>
++
++ }
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo: Driver Details
++
++%font "typewriter", size 3
++ scull_read code
++%font "typewriter", size 3
++ ssize_t
++%fore "blue", cont
++scull_read
++%fore "black", cont
++(struct file *filp, char *buf, size_t count,
++ loff_t *f_pos)
++ {
++
++%fore "blue", cont
++ Scull_Dev *dev = filp->private_data;
++%fore "black", cont
++ /* the first listitem */
++%fore "blue"
++ Scull_Dev *dptr;
++%fore "black"
++ int quantum = dev->quantum;
++ int qset = dev->qset;
++ int itemsize = quantum * qset;
++ if (down_interruptible(&dev->sem))
++ return -ERESTARTSYS;
++ if (*f_pos + count > dev->size)
++ count = dev->size - *f_pos;
++
++ /* find listitem, qset index, and offset in the quantum */
++ item = (long)*f_pos / itemsize;
++ rest = (long)*f_pos % itemsize;
++ s_pos = rest / quantum; q_pos = rest % quantum;
++
++ /* follow the list up to the right position */
++%fore "blue"
++ dptr = scull_follow(dev, item);
++%fore "black"
++
++ <snip>
++
++ }
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo: Breakpoints
++
++
++%font "typewriter", size 3
++ Determine where to set breakpoint
++%font "typewriter", size 3
++%fore "blue"
++ dptr = scull_follow(dev, item);
++%fore "black"
++
++%font "typewriter", size 3
++ Disassemble scull_read
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue", cont
++id scull_read
++%fore "black"
++ 0xf8c083b4 scull_read: push %ebp
++ 0xf8c083b5 scull_read+0x1:mov %esp,%ebp
++ 0xf8c083b7 scull_read+0x3:push %edi
++ <snip>
++ 0xf8c08465 scull_read+0xb1:sub $0x8,%esp
++%fore "blue"
++ 0xf8c08468 scull_read+0xb4:push %ecx
++ 0xf8c08469 scull_read+0xb5:push %esi
++ 0xf8c0846a scull_read+0xb6:call 0xf8c08364 scull_follow:
++%fore "black"
++ 0xf8c0846f scull_read+0xbb:mov %eax,
++%fore "blue", cont
++ %edx
++%fore "black"
++ 0xf8c08471
++%fore "blue", cont
++scull_read+0xbd
++%fore "black", cont
++:add $0x10,%esp
++ <snip>
++
++ Set breakpoint in driver read
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue",cont
++bp scull_read+0xbd
++%fore "black"
++ Instruction(i) BP #0 at 0xf8c08471 ([scull]scull_read+0xbd)
++ is enabled globally adjust 1
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo: Breakpoints
++
++
++%font "typewriter", size 3
++ Restart the system
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue", cont
++go
++%fore "black"
++
++ Hit the Breakpoint
++%font "typewriter", size 3
++ [root@elm3b77 root]#
++%fore "blue", cont
++cat /dev/scull0
++%fore "black"
++ Instruction(i) breakpoint #0 at 0xf8c08471 (adjusted)
++ 0xf8c08471 scull_read+0xbd:int3
++ Entering kdb (current=0xf73ec000, pid 1249) on processor 2
++ due to Breakpoint @ 0xf8c08471
++
++ Display the registers
++%font "typewriter", size 3
++ [2]kdb>
++%fore "blue", cont
++rd
++%fore "black"
++ eax = 0xf77d7b60 ebx = 0x00000000 ecx = 0x00000000 edx =
++%fore "blue", cont
++0xf77d7b60
++%fore "black"
++ esi =
++%fore "blue", cont
++0xf77d7b60
++%fore "black", cont
++ edi = 0x00001000 esp = 0xf7415f40 eip = 0xf8c08471
++ ebp = 0xf7415f78 xss = 0x00000018 xcs = 0x00000010 eflags = 0x00000246
++ xds = 0xf7590018 xes = 0x00000018 origeax = 0xffffffff ®s = 0xf7415f0c
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo: Data Structures
++
++%font "typewriter", size 3
++ Display the Scull_Dev structure
++%font "typewriter", size 3
++ [2]kdb>
++%fore "blue", cont
++md 0xf77d7b60 2
++%fore "black"
++ 0xf77d7b60
++%fore "blue", cont
++f7400000
++%fore "black", cont
++ 00000000 00000fa0 000003e8 ..@w.... ...h...
++ 0xf77d7b70 0000534e 00000000 00000000 00000000 NS..............
++
++ Scull Device Structure
++%font "typewriter", size 3
++ typedef struct Scull_Dev {
++%fore "blue"
++ void **data;
++%fore "black"
++ struct Scull_Dev *next; /* next listitem */
++ int quantum; /* the current quantum size */
++ int qset; /* the current array size */
++ unsigned long size;
++ devfs_handle_t handle; /* only used if devfs is there */
++ unsigned int access_key; /* used by sculluid and scullpriv */
++ struct semaphore sem; /* mutual exclusion semaphore */
++ } Scull_Dev;
++%size 6
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo: Data Structures
++
++
++%font "typewriter", size 3
++ Display the quantum set (dev->data)
++%font "typewriter", size 3
++ [2]kdb>
++%fore "blue", cont
++md f7400000 2
++%fore "black"
++ 0xf7400000
++%fore "blue", cont
++f73ea000
++%fore "black", cont
++ f73f1000 f740c000 f7ab4000 . >w..?w.@@w.@+w
++ 0xf7400010 f73ef000 f755b000 00000000 00000000 .p>w.0Uw........
++
++ Display the first quantum (dev->data[0])
++%font "typewriter", size 3
++ [2]kdb>
++%fore "blue", cont
++md f73ea000
++%fore "black"
++ 0xf73ea000 200a2a2f 616d202a 632e6e69 202d2d20 /*. * main.c --
++ 0xf73ea010 20656874 65726162 75637320 63206c6c the bare scull c
++ 0xf73ea020 20726168 75646f6d 200a656c 2a200a2a har module. *. *
++ 0xf73ea030 706f4320 67697279 28207468 32202943 Copyright (C) 2
++ 0xf73ea040 20313030 73656c41 646e6173 52206f72 001 Alessandro R
++ 0xf73ea050 6e696275 6e612069 6f4a2064 6874616e ubini and Jonath
++ 0xf73ea060 43206e61 6562726f 2a200a74 706f4320 an Corbet. * Cop
++ 0xf73ea070 67697279 28207468 32202943 20313030 yright (C) 2001
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo: filp command
++
++
++%font "typewriter", size 3
++ Show filp usage - here is the scull_read prototype
++%font "typewriter", size 3
++ ssize_t scull_read(
++%fore "blue", cont
++struct file *filp
++%fore "black", cont
++, char *buf,
++ size_t count, loff_t *f_pos);
++ Show the stack trace:
++%font "typewriter", size 3
++[2]kdb>
++%fore "blue", cont
++bt
++%fore "black"
++ EBP EIP Function(args)
++ 0xee9dbf78 0xf8c08471 [scull]scull_read+0xbd (
++%fore "blue", cont
++0xeaf6c0c0
++%fore "black", cont
++, 0x804e128,
++ 0x1000, 0xeaf6c0e0, 0x804f000)
++ scull .text 0xf8c08060 0xf8c083b4 0xf8c084dc
++ 0xee9dbfbc 0xc0136278 sys_read+0x98 (0x3, 0x804e128, 0x1000, ...
++ kernel .text 0xc0100000 0xc01361e0 0xc01362b0
++ 0xc010702b system_call+0x33
++ kernel .text 0xc0100000 0xc0106ff8 0xc0107030
++ And show the filp output
++%font "typewriter", size 3
++ [2]kdb>
++%fore "blue", cont
++filp 0xeaf6c0c0
++%fore "black"
++ name.name 0xe93889fc name.len 6
++ File Pointer at 0xeaf6c0c0
++ f_list.nxt = 0xe42deca0 f_list.prv = 0xf7e69070
++%fore "blue"
++ f_dentry = 0xe93889a0
++%fore "black", cont
++ f_op = 0xf8c0a200
++ f_count = 2 f_flags = 0x8000 f_mode = 0x1
++ f_pos = 0 f_reada = 0 f_ramax = 0
++ f_raend = 0 f_ralen = 0 f_rawin = 0
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Scull Demo: filp command
++
++
++%font "typewriter", size 3
++ filp output - continued
++%font "typewriter", size 3
++%fore "blue"
++ Directory Entry at 0xe93889a0
++%fore "black"
++ d_name.len = 6
++%fore "orange", cont
++d_name.name = 0xe93889fc
++%fore "black", cont
++>
++ d_count = 1 d_flags = 0x0
++%fore "blue", cont
++d_inode = 0xe827b680
++%fore "black"
++ d_hash.nxt = 0xc215aec8 d_hash.prv = 0xc215aec8
++ d_lru.nxt = 0xe93889b8 d_lru.prv = 0xe93889b8
++ d_child.nxt = 0xe89e1e80 d_child.prv = 0xe9388940
++ d_subdirs.nxt = 0xe93889c8 d_subdirs.prv = 0xe93889c8
++ d_alias.nxt = 0xe827b690 d_alias.prv = 0xe827b690
++ d_op = 0x00000000 d_sb = 0xf7e69000
++
++%fore "blue"
++ Inode Entry at 0xe827b680
++%fore "black"
++ i_mode = 0x21a4 i_nlink = 1 i_rdev = 0xfe00
++ i_ino = 37182 i_count = 1 i_dev = 0x821
++ i_hash.nxt = 0xc20e6be8 i_hash.prv = 0xc20e6be8
++ i_list.nxt = 0xe827b2c8 i_list.prv = 0xe827b868
++ i_dentry.nxt = 0xe93889d0 i_dentry.prv = 0xe93889d0
++
++ Check the filename (display d_name.name)
++%font "typewriter", size 3
++ [2]kdb>
++%fore "orange", cont
++md 0xe93889fc 1
++%fore "black"
++ 0xe93889fc 6c756373 0000306c 00000000 00000000 scull0..........
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Kernel Structures
++
++
++ Objective
++ Show output from various kernel related kdb commands
++
++ Sequence of events
++ Simple Program
++ Write a simple program which allocates memory and hangs
++ Show usage of the ps, vm, and ll commands
++ Walk an IO operation
++ Hit a breakpoint in qlogic driver (isp1020_queuecommand)
++ Show usage of scsi related commands (sc, sh, and sd)
++ Show usage of vm related commands (bh, page, inode, inode_pages)
++
++
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Simple program
++
++%font "typewriter", size 3
++ simple.c - simple program which allocates memory
++%font "typewriter", size 3
++%fore "blue"
++ int foo_global[8192];
++%fore "black"
++ main()
++ {
++ int *
++%fore "blue", cont
++foo_malloc;
++%fore "black"
++ int i;
++ foo_malloc = (int *)malloc(0x8192);
++ for(i = 0; i < 0x100; i++) {
++ foo_global[i] = 0xdead0000 | i;
++ foo_malloc[i] = 0xbeef0000 | i;
++ }
++ printf("foo_global at %x\n", (int)foo_global);
++ printf("foo_malloc at %x\n", (int)foo_malloc);
++ printf("sleep forever\n");
++ sleep(2000000);
++ }
++
++ simple output
++%font "typewriter", size 3
++ [root@elm3b77 scull]# cc -o simple simple.c
++ [root@elm3b77 scull]# ./simple
++ foo_global at
++%fore "blue", cont
++8049780
++%fore "black"
++ foo_malloc at
++%fore "blue", cont
++8051788
++%fore "black"
++ sleep forever
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Simple Program
++
++%font "typewriter", size 3
++ Show the output of the ps command
++%font "typewriter", size 3
++ Entering kdb (current=0xc2010000, pid 0) on processor 3 due to
++ Keyboard Entry
++ [3]kdb>
++%fore "blue", cont
++ps
++%fore "black"
++ Task Addr Pid Parent [*] cpu State Thread Command
++ 0xf7efe000 00000001 00000000 0 003 stop 0xf7efe370 init
++ 0xf7ef0000 00000002 00000001 0 001 stop 0xf7ef0370 keventd
++ 0xf7eec000 00000003 00000000 0 000 stop 0xf7eec370 ksoftirqd_CPU0
++ 0xf7eea000 00000004 00000000 0 001 stop 0xf7eea370 ksoftirqd_CPU1
++ 0xf7ee8000 00000005 00000000 0 002 stop 0xf7ee8370 ksoftirqd_CPU2
++ 0xf7ee6000 00000006 00000000 0 003 stop 0xf7ee6370 ksoftirqd_CPU3
++
++ <snip>
++
++ 0xf7b46000 00001006 00000737 0 003 stop 0xf7b46370 sshd
++ 0xf7ace000 00001007 00001006 0 000 stop 0xf7ace370 bash
++ 0xef06a000 00001066 00001007 0 003 stop 0xef06a370 su
++ 0xeef88000 00001067 00001066 0 000 stop 0xeef88370 bash
++ 0xeef64000 00001119 00000770 0 001 stop 0xeef64370 in.ftpd
++%fore "blue"
++ 0xeeeac000
++%fore "black", cont
++ 00001138 00001067 0 001 stop 0xeeeac370
++%fore "blue", cont
++simple
++%fore "black"
++ [3]kdb>
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Simple Program
++
++%font "typewriter", size 3
++ Display the task struct
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++md 0xeeeac000
++%fore "black"
++ 0xeeeac000 00000001 00000000 00000000 c0000000 ................
++ 0xeeeac010 c0339880 00000000 00000000 ffffffff ................
++ 0xeeeac020 0000000a 00000000 00000000
++%fore "blue", cont
++f7e10f00
++%fore "black", cont
++ ..............aw
++ 0xeeeac030 00000001 ffffffff ffffffff 00000000 ................
++
++%font "typewriter", size 3
++ Determine offset of mm_struct ptr in task_struct
++%font "typewriter", size 3
++ struct task_struct {
++ [0] volatile long state;
++ [4] unsigned long flags;
++ [8] int sigpending;
++ [c] mm_segment_t addr_limit;
++ [10] struct exec_domain *exec_domain;
++ [14] volatile long need_resched;
++ [18] unsigned long ptrace;
++ [1c] int lock_depth;
++ [20] long counter;
++ [24] long nice;
++ [28] unsigned long policy;
++%fore "blue"
++ [2c] struct mm_struct *mm;
++%fore "black"
++ [30] int processor;
++ [34] unsigned long cpus_runnable, cpus_allowed;
++ <snip>
++ };
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Simple Program
++
++
++%font "typewriter", size 3
++ Display the mm_struct associated with simple process
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++md f7e10f00
++%fore "black"
++ 0xf7e10f00
++%fore "blue", cont
++e8357a80
++%fore "black", cont
++ e8357978 f7ac77e0 eb15eac0 .z5hxy5h`w,w@j.k
++ 0xf7e10f10 00000001 00000002 0000000b 00000000 ................
++ 0xf7e10f20 00000001 f7e10f24 f7e10f24 00000001 ................
++ 0xf7e10f30 f7e35e70 eea7e8f0 08048000 0804862b ................
++ 0xf7e10f40 0804962c 08049744 08051780 0805a000 ................
++ 0xf7e10f50 bffffd10 bffffe00 bffffe09 bffffe09 ................
++ 0xf7e10f60 bffffff3 0000005a 00000168 00000000 ................
++ 0xf7e10f70 00000000 00000002 00000000 00000001 ................
++
++%font "typewriter", size 3
++ Determine offset of the first vma in the process
++%font "typewriter", size 3
++ struct mm_struct {
++%fore "blue"
++ struct vm_area_struct * mmap;
++%fore "black"
++ rb_root_t mm_rb;
++ struct vm_area_struct * mmap_cache;
++ <snip>
++ };
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Simple Program
++
++%font "typewriter", size 3
++ Display the first vma using md
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++md e8357a80
++%fore "black"
++ 0xe8357a80 f7e10f00 08048000 08049000
++%fore "blue", cont
++e8727e00
++%fore "black",cont
++ ..aw.........~rh
++ 0xe8357a90 00000025 00001875 e8727e18 00000001 %...u....~rh....
++
++ Display the first vma using vma
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++vma e8357a80
++%fore "black"
++ struct vm_area_struct at 0xe8357a80 for 68 bytes
++ vm_start = 0x8048000 vm_end = 0x8049000
++ page_prot = 0x25
++ flags: READ EXEC MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE
++%font "typewriter", size 3
++
++ Determine the offset to the vma list
++%font "typewriter", size 3
++ struct vm_area_struct {
++ [0] struct mm_struct * vm_mm;
++ [4] unsigned long vm_start;
++ [8] unsigned long vm_end;
++%fore "blue"
++ [c] struct vm_area_struct *vm_next;
++%fore "black"
++ <snip>
++ };
++ Display the next vma
++%font "typewriter", size 3
++ [3]kdb> vma e8727e00
++ struct vm_area_struct at 0xe8727e00 for 68 bytes
++ vm_start = 0x8049000 vm_end = 0x804a000
++ page_prot = 0x25
++ flags: READ WRITE MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Simple Program
++
++%font "typewriter", size 3
++ Use the ll command to display the list of vma's
++%font "typewriter", size 3
++ [3]kdb> ll e8357a80 0xc vma
++.
++ struct vm_area_struct at 0xe8357a80 for 68 bytes
++ vm_start = 0x8048000 vm_end = 0x8049000
++ page_prot = 0x25
++ flags: READ EXEC MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE
++.
++ struct vm_area_struct at 0xe8727e00 for 68 bytes
++ vm_start =
++%fore "orange", cont
++0x8049000
++%fore "black", cont
++ vm_end =
++%fore "orange", cont
++0x804a000
++%fore "black"
++ page_prot = 0x25
++ flags: READ WRITE MAYREAD MAYWRITE MAYEXEC DENYWRITE EXECUTABLE
++.
++ struct vm_area_struct at 0xe8727c80 for 68 bytes
++ vm_start =
++%fore "blue", cont
++0x804a000
++%fore "black", cont
++ vm_end =
++%fore "blue", cont
++0x805a000
++%fore "black"
++ page_prot = 0x25
++ flags: READ WRITE EXEC MAYREAD MAYWRITE MAYEXEC
++ <snip>
++ struct vm_area_struct at 0xe8357900 for 68 bytes
++ vm_start = 0xbfffe000 vm_end = 0xc0000000
++ page_prot = 0x25
++ flags: READ WRITE EXEC MAYREAD MAYWRITE MAYEXEC GROWSDOWN
++
++ Match the vma to the displayed addresses
++%font "typewriter", size 3
++ foo_global at
++%fore "orange", cont
++8049780
++%fore "black"
++ foo_malloc at
++%fore "blue", cont
++8051788
++%fore "black"
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++ Objective
++ Show usage of various scsi and vm related kdb commands
++
++ Sequence:
++ Set a breakpoint in the scsi driver
++ Stops when queueing a command to the controller
++ Cause IO on an idle disk
++ Show various IO stack traces
++ Display the IO data structures
++ Display vm information about the data
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Set the breakpoint
++
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++bp isp1020_queuecommand
++%fore "black"
++ Instruction(i) BP #0 at 0xc01ecfe0 (isp1020_queuecommand)
++ is enabled globally adjust 1
++
++%font "typewriter", size 3
++ Create some activity on a previously unused disk
++
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++go
++%fore "black"
++ [root@elm3b77 root]#
++%fore "blue", cont
++ls /rh62
++%fore "black"
++
++ Instruction(i) breakpoint #0 at 0xc01ecfe0 (adjusted)
++ 0xc01ecfe0 isp1020_queuecommand:int3
++
++ Entering kdb (current=0xf75ba000, pid 1181) on processor 3 due to
++ Breakpoint @ 0xc01ecfe0
++
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Show the stack.
++ This is a read of the /rh62 directory
++
++%font "typewriter", size 3
++ [1]kdb>
++%fore "blue", cont
++bt
++%fore "black"
++ EBP EIP Function(args)
++ 0xf75bbdf4 0xc01ecfe0 isp1020_queuecommand
++ 0xc01e2c77 scsi_dispatch_cmd+0x1f7
++ 0xf75bbe24 0xc01e99b1 scsi_request_fn+0x2f1
++ 0xf75bbe34 0xc01c84fd generic_unplug_device+0x2d
++ 0xf75bbe50 0xc011b3af __run_task_queue+0x5f
++ 0xf75bbe6c 0xc013a63c block_sync_page+0x1c
++ 0xf75bbe98 0xc0128127 __lock_page+0x77
++ 0xf75bbea4 0xc0128178 lock_page+0x18
++ 0xf75bbec8 0xc012a4b3 read_cache_page+0xc3
++ 0xf75bbef4 0xc0168e23 ext2_get_page+0x23
++ 0xf75bbf48 0xc0168fdd ext2_readdir+0xfd
++ 0xf75bbf68 0xc0143d2e vfs_readdir+0x7e
++ 0xf75bbfbc 0xc01442ed
++%fore "blue", cont
++sys_getdents64+0x4d
++%fore "black"
++ 0xc010702b system_call+0x33
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Allow the operation to complete
++
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++go
++%fore "black"
++ bench build etc lib mnt oldsys rh72 spv usr
++ bin data h linux mnt1 opt root test var
++ boot dev home lost+found mnt2 proc sbin tmp
++
++%font "typewriter", size 3
++ Force some more activity
++
++%font "typewriter", size 3
++ [root@elm3b77 root]#
++%fore "blue", cont
++cd /rh62/tmp
++%fore "black"
++ Instruction(i) breakpoint #0 at 0xc01ecfe0 (adjusted)
++ 0xc01ecfe0 isp1020_queuecommand:int3
++
++ Entering kdb (current=0xf768a000, pid 981) on processor 3 due to
++ Breakpoint @ 0xc01ecfe0
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Show the stack.
++ This is an inode read for /rh62/tmp
++
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++bt
++%fore "black"
++ EBP EIP Function(args)
++ 0xf768bd68 0xc01ecfe0 isp1020_queuecommand
++ 0xc01e2c77 scsi_dispatch_cmd+0x1f7
++ 0xf768bd98 0xc01e99b1 scsi_request_fn+0x2f1
++ 0xf768bda8 0xc01c84fd generic_unplug_device+0x2d
++ 0xf768bdc4 0xc011b3af __run_task_queue+0x5f
++ 0xf768bdfc 0xc0137216 __wait_on_buffer+0x56
++ 0xf768be1c 0xc0138600 bread+0x50
++ 0xf768be5c 0xc016b684 ext2_read_inode+0x114
++ 0xf768bf0c 0xc013fbec real_lookup+0x7c
++ 0xf768bf78 0xc014035d link_path_walk+0x5ad
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Create a new file, causing yet more disk activity
++
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++go
++%fore "black"
++
++ [root@elm3b77 tmp]#
++%fore "blue", cont
++echo "Hello linux reading group" > j1;sync
++%fore "black"
++
++ Instruction(i) breakpoint #0 at 0xc01ecfe0 (adjusted)
++ 0xc01ecfe0 isp1020_queuecommand:int3
++
++ Entering kdb (current=0xf768a000, pid 981) on processor 3 due to
++ Breakpoint @ 0xc01ecfe0
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Show the stack
++ This is an inode read in response to the open
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++bt
++%fore "black"
++ EBP EIP Function(args)
++ 0xf768bd78 0xc01ecfe0 isp1020_queuecommand
++ 0xc01e2c77 scsi_dispatch_cmd+0x1f7
++ 0xf768bda8 0xc01e99b1 scsi_request_fn+0x2f1
++ 0xf768bdb8 0xc01c84fd generic_unplug_device+0x2d
++ 0xf768bdd4 0xc011b3af __run_task_queue+0x5f
++ 0xf768bdf0 0xc013a63c block_sync_page+0x1c
++ 0xf768be1c 0xc0128127 __lock_page+0x77
++ 0xf768be28 0xc0128178 lock_page+0x18
++ 0xf768be4c 0xc012a4b3 read_cache_page+0xc3
++ 0xf768be78 0xc0168e23 ext2_get_page+0x23
++ 0xf768beb8 0xc01691ed ext2_find_entry+0x8d
++ 0xf768bed4 0xc016933a ext2_inode_by_name+0x1a
++ 0xf768befc 0xc016c077 ext2_lookup+0x27
++ 0xf768bf1c 0xc014094a lookup_hash+0x9a
++ 0xf768bf64 0xc0140c4d open_namei+0xfd
++ 0xf768bfa0 0xc0135907 filp_open+0x37
++ 0xf768bfbc 0xc0135c64 sys_open+0x34
++ 0xc010702b system_call+0x33
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Let the operation continue
++%font "typewriter", size 3
++ [3]kdb>
++%fore "blue", cont
++go
++%fore "black"
++ Instruction(i) breakpoint #0 at 0xc01ecfe0 (adjusted)
++ 0xc01ecfe0 isp1020_queuecommand: int3
++ Entering kdb (current=0xc0352000, pid 0) on processor 0 due to
++ Breakpoint @ 0xc01ecfe0
++ Show the stack
++ This is an io completion queuing the next request
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue", cont
++bt
++%fore "black"
++ EBP EIP Function(args)
++ 0xc0353df4 0xc01ecfe0 isp1020_queuecommand(
++%fore "blue", cont
++0xf7e63a00
++%fore "black", cont
++,0xc01e7fc0...
++ 0xc01e2c77 scsi_dispatch_cmd+0x1f7
++ 0xc0353e24 0xc01e99b1 scsi_request_fn+0x2f1
++ 0xc0353e40 0xc01e8f6a
++%fore "blue", cont
++scsi_queue_next_request+0x4a
++%fore "black"
++ 0xc0353e5c 0xc01e9166 __scsi_end_request+0x116
++ 0xc0353ea8 0xc01e93e0
++%fore "blue", cont
++scsi_io_completion+0x170
++%fore "black"
++ 0xc0353ecc 0xc01f658e rw_intr+0x14e
++ 0xc0353ef8 0xc01e8668 scsi_old_done+0x6a8
++ 0xc0353fd4 0xc01052c2 cpu_idle+0x52
++ Function prototype
++%font "typewriter", size 3
++ int isp1020_queuecommand(
++%fore "blue", cont
++Scsi_Cmnd *Cmnd,
++%fore "black"
++ void (*done)(Scsi_Cmnd *))
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Show the command being queued
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue", cont
++sc 0xf7e63a00
++%fore "black"
++ scsi_cmnd at 0xf7e63a00
++%fore "blue"
++ host = 0xf7e91400
++%fore "black", cont
++ state = 4099 owner = 258
++%fore "blue", cont
++device = 0xf7ed5d80
++%fore "black"
++ bnext = 0x00000000 reset_chain = 0x00000000 eh_state = 0
++ done = 0xc01f6440
++ serial_number = 3402 serial_num_at_to = 0 retries = 0 timeout = 0
++ id/lun/cmnd = [0/0/0] cmd_len = 10 old_cmd_len = 10
++ cmnd = [2a/00/00/28/00/3f/00/00/10/00/ef/f7]
++ data_cmnd = [2a/00/00/28/00/3f/00/00/10/00/ef/f7]
++ request_buffer = 0xc03fd000 bh_next = 0x00000000
++ request_bufflen = 8192
++ use_sg = 2 old_use_sg = 2 sglist_len = 512 abore_reason = 0
++ bufflen = 8192 buffer = 0xc03fd000 underflow = 8192
++ transfersize = 512
++ tag = 0 pid = 3401
++ request struct
++ rq_status = RQ_ACTIVE rq_dev = [8/1] errors = 1 cmd = 0
++ sector = 2621440 nr_sectors = 16 current_nr_sectors = 8
++ buffer = 0xf7599000
++%fore "blue", cont
++bh = 0xf75ca300
++%fore "black", cont
++ bhtail = 0xf75ca3c0
++
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Display the host adapter
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue", cont
++sh 0xf7e91400
++%fore "black"
++ Scsi_Host at 0xf7e91400
++ next = 0x00000000
++%fore "blue", cont
++host_queue = 0xf7ed5d80
++%fore "black"
++ ehandler = 0x00000000 eh_wait = 0x00000000 en_notify = 0x00000000
++ eh_action = 0x00000000
++ h_active = 0x0 host_wait = 0xc0353ac4 hostt = 0xc034bce0
++ host_busy = 1
++ host_failed = 0 extra_bytes = 524 host_no = 0 resetting = 0
++ max id/lun/channel = [16/8/0] this_id = 7
++ can_queue = 64 cmd_per_lun = 1 sg_tablesize = 427 u_isa_dma = 0
++ host_blocked = 0 reverse_ordering = 0
++
++%font "typewriter", size 3
++ Display the scsi device
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue", cont
++sd 0xf7ed5d80
++%fore "black"
++ scsi_device at 0xf7ed5d80
++ next = 0xf7ed5c80 prev = 0x00000000 host = 0xf7e91400
++ device_busy = 1
++%fore "blue", cont
++device_queue 0xf7e63a00
++%fore "black"
++ id/lun/chan = [0/0/0] single_lun = 0 device_blocked = 0
++ queue_depth = 1 current_tag = 0 scsi_level = 4
++ IBM DGHS18X 0360
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Display the Buffer header associated with the command
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue", cont
++bh 0xf75ca300
++%fore "black"
++ buffer_head at 0xf75ca300
++ next 0x00000000 bno 327680 rsec 2621440 size 4096
++ dev 0x801 rdev 0x801
++ count 2 state 0x1d [Uptodate Lock Req Mapped] ftime 0x7695e
++ b_list 1 b_reqnext 0xf75ca3c0 b_data 0xf7599000
++%fore "blue"
++ b_page 0xc1dd6640
++%fore "black", cont
++ b_this_page 0xf75ca300 b_private 0x00000000
++
++ Display the associated page structure
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue", cont
++page 0xc1dd6640
++%fore "black"
++ struct page at 0xc1dd6640
++ next 0xc1dd7300 prev 0xc1dd6240
++%fore "blue", cont
++addr space 0xf7af04d0
++%fore "black"
++ index 327680 (offset 0x50000000)
++ count 2 flags PG_referenced PG_lru virtual 0xf7599000
++ buffers 0xf75ca300
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Display the Address space associated with the page
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue", cont
++md 0xf7af04d0
++%fore "black"
++ 0xf7af04d0 c1dd6240 c1dea740 f7af04d8 f7af04d8 @b]A@'^AX./wX./w
++ 0xf7af04e0 f7af04e0 f7af04e0 00000007 c033b700 `./w`./w.....73@
++ 0xf7af04f0
++%fore "blue", cont
++f7af0420
++%fore "black", cont
++ 00000000 00000000 00000001 ./w............
++ 0xf7af0500 000001d0 00000000 00000000 f7af050c P............./w
++ 0xf7af0510 f7af050c 00000000 f7a8afa0 00000000 ../w.... /(w....
++
++ The structure looks like:
++%size 3
++ struct address_space {
++ struct list_head clean_pages; /* list of clean pages */
++ struct list_head dirty_pages; /* list of dirty pages */
++ struct list_head locked_pages;/* list of locked pages */
++ unsigned long nrpages; /* number of total pages */
++ spinlock_t page_lock; /* spinlock protecting them*/
++ struct address_space_operations *a_ops; /* methods */
++%fore "blue"
++ struct inode *host; /* owner: inode, block_dev */
++%fore "black"
++ <snip>
++ };
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Display the inode associated with the address space
++ I think htis is the inode for the block device.
++
++%font "typewriter", size 3
++ [1]kdb>
++%fore "blue", cont
++inode f7af0420
++%fore "black"
++ struct inode at 0xf7af0420
++ i_ino = 289 i_count = 1 i_dev = 0x801 i_size 4301789184
++ i_mode = 0x8000 i_nlink = 1 i_rdev = 0x801
++ i_hash.nxt = 0xf7af0420 i_hash.prv = 0xf7af0420
++ i_list.nxt = 0xf7af0608 i_list.prv = 0xf7af0068
++ i_dentry.nxt = 0xf7af0430 i_dentry.prv = 0xf7af0430
++ i_dirty_buffers.nxt = 0xf7af0438 i_dirty_buffers.prv = 0xf7af0438
++ i_sb = 0xc201f200 i_op = 0xc03cfdc0 i_data = 0xf7af04d0 nrpages = 6
++ i_mapping = 0xf7af04d0
++ i_flags 0x0 i_state 0x0 [] fs specific info @ 0xf7af0540
++%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
++%page
++
++Walking IO structures
++
++
++%font "typewriter", size 3
++ Display the page list associated with the inode
++%font "typewriter", size 3
++ [0]kdb>
++%fore "blue", cont
++inode_pages f7af0420
++%fore "black"
++CLEAN page_struct index cnt flags
++ 0xc1dd6240 327735 2 0x44 bh 0xf75caae0 bno 327735
++ [Lock Req Mapped]
++%fore "blue"
++ 0xc1dd6640 327680 2 0x44 bh 0xf75ca300 bno 327680
++ [Uptodate Lock Req Mapped]
++%fore "black"
++ 0xc1dd7300 327681 2 0x44 bh 0xf75ca3c0 bno 327681
++ [Uptodate Lock Req Mapped]
++ 0xc1dd6e00 327684 2 0x44 bh 0xf75ca420 bno 327684
++ [Uptodate Req Mapped]
++ 0xc1de8fc0 4 2 0xc0 bh 0xf7b5ade0 bno 4
++ [Uptodate Req Mapped]
++ 0xc1dea700 1 2 0x44 bh 0xf7e02740 bno 1
++ [Uptodate Req Mapped]
++ 0xc1dea740 0 2 0x44 bh 0xf7e028c0 bno 0
++ [Uptodate Req Mapped]
++DIRTY page_struct index cnt flags
++LOCKED page_struct index cnt flags
+diff -Nurp linux-2.6.22-590/drivers/char/keyboard.c linux-2.6.22-600/drivers/char/keyboard.c
+--- linux-2.6.22-590/drivers/char/keyboard.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/drivers/char/keyboard.c 2008-04-09 18:14:28.000000000 +0200
+@@ -40,6 +40,9 @@
+ #include <linux/sysrq.h>
+ #include <linux/input.h>
+ #include <linux/reboot.h>
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#endif /* CONFIG_KDB */
+
+ extern void ctrl_alt_del(void);
+
+@@ -1138,6 +1141,13 @@ static void kbd_keycode(unsigned int key
+ if (keycode < BTN_MISC && printk_ratelimit())
+ printk(KERN_WARNING "keyboard.c: can't emulate rawmode for keycode %d\n", keycode);
+
++#ifdef CONFIG_KDB
++ if (down && !rep && keycode == KEY_PAUSE && kdb_on == 1) {
++ kdb(KDB_REASON_KEYBOARD, 0, get_irq_regs());
++ return;
++ }
++#endif /* CONFIG_KDB */
++
+ #ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
+ if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) {
+ if (!sysrq_down) {
+diff -Nurp linux-2.6.22-590/drivers/hid/usbhid/hid-core.c linux-2.6.22-600/drivers/hid/usbhid/hid-core.c
+--- linux-2.6.22-590/drivers/hid/usbhid/hid-core.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/drivers/hid/usbhid/hid-core.c 2008-04-09 18:14:28.000000000 +0200
+@@ -43,6 +43,10 @@
+ #define DRIVER_DESC "USB HID core driver"
+ #define DRIVER_LICENSE "GPL"
+
++#ifdef CONFIG_KDB_USB
++#include <linux/kdb.h>
++#endif
++
+ static char *hid_types[] = {"Device", "Pointer", "Mouse", "Device", "Joystick",
+ "Gamepad", "Keyboard", "Keypad", "Multi-Axis Controller"};
+ /*
+@@ -948,6 +952,12 @@ static void hid_disconnect(struct usb_in
+
+ usbhid = hid->driver_data;
+
++#ifdef CONFIG_KDB_USB
++ /* Unlink the KDB USB struct */
++ if (usbhid->urbin == kdb_usb_infos.urb)
++ memset(&kdb_usb_infos, 0, sizeof(kdb_usb_infos));
++#endif
++
+ spin_lock_irq(&usbhid->inlock); /* Sync with error handler */
+ usb_set_intfdata(intf, NULL);
+ spin_unlock_irq(&usbhid->inlock);
+@@ -1033,6 +1043,16 @@ static int hid_probe(struct usb_interfac
+ printk(": USB HID v%x.%02x %s [%s] on %s\n",
+ hid->version >> 8, hid->version & 0xff, c, hid->name, path);
+
++#ifdef CONFIG_KDB_USB
++ /* Initialization of the KDB structure */
++ if (!strcmp(c, "Keyboard")) {
++ struct usbhid_device *usbhid = hid->driver_data;
++ kdb_usb_infos.urb = usbhid->urbin;
++ kdb_usb_infos.buffer = usbhid->inbuf;
++ kdb_usb_infos.reset_timer = NULL;
++ }
++#endif
++
+ return 0;
+ }
+
+diff -Nurp linux-2.6.22-590/drivers/hid/usbhid/usbkbd.c linux-2.6.22-600/drivers/hid/usbhid/usbkbd.c
+--- linux-2.6.22-590/drivers/hid/usbhid/usbkbd.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/drivers/hid/usbhid/usbkbd.c 2008-04-09 18:14:28.000000000 +0200
+@@ -32,6 +32,9 @@
+ #include <linux/init.h>
+ #include <linux/usb/input.h>
+ #include <linux/hid.h>
++#ifdef CONFIG_KDB_USB
++#include <linux/kdb.h>
++#endif
+
+ /*
+ * Version Information
+@@ -288,6 +291,13 @@ static int usb_kbd_probe(struct usb_inte
+ usb_fill_int_urb(kbd->irq, dev, pipe,
+ kbd->new, (maxp > 8 ? 8 : maxp),
+ usb_kbd_irq, kbd, endpoint->bInterval);
++
++#ifdef CONFIG_KDB_USB
++ /* Init the KDB structure */
++ kdb_usb_infos.urb = kbd->irq;
++ kdb_usb_infos.buffer = kbd->new;
++ kdb_usb_infos.reset_timer = NULL;
++#endif
+ kbd->irq->transfer_dma = kbd->new_dma;
+ kbd->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+@@ -324,6 +334,11 @@ static void usb_kbd_disconnect(struct us
+ struct usb_kbd *kbd = usb_get_intfdata (intf);
+
+ usb_set_intfdata(intf, NULL);
++#ifdef CONFIG_KDB_USB
++ /* Unlink the KDB USB struct */
++ if (kbd && kbd->irq == kdb_usb_infos.urb)
++ memset(&kdb_usb_infos, 0, sizeof(kdb_usb_infos));
++#endif /* CONFIG_KDB_USB */
+ if (kbd) {
+ usb_kill_urb(kbd->irq);
+ input_unregister_device(kbd->dev);
+diff -Nurp linux-2.6.22-590/drivers/serial/8250.c linux-2.6.22-600/drivers/serial/8250.c
+--- linux-2.6.22-590/drivers/serial/8250.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/drivers/serial/8250.c 2008-04-09 18:14:28.000000000 +0200
+@@ -45,6 +45,19 @@
+ #include <asm/irq.h>
+
+ #include "8250.h"
++#include <linux/kdb.h>
++#ifdef CONFIG_KDB
++/*
++ * kdb_serial_line records the serial line number of the first serial console.
++ * NOTE: The kernel ignores characters on the serial line unless a user space
++ * program has opened the line first. To enter kdb before user space has opened
++ * the serial line, you can use the 'kdb=early' flag to lilo and set the
++ * appropriate breakpoints.
++ */
++
++static int kdb_serial_line = -1;
++static const char *kdb_serial_ptr = kdb_serial_str;
++#endif /* CONFIG_KDB */
+
+ /*
+ * Configuration:
+@@ -1287,6 +1300,20 @@ receive_chars(struct uart_8250_port *up,
+
+ do {
+ ch = serial_inp(up, UART_RX);
++#ifdef CONFIG_KDB
++ if ((up->port.line == kdb_serial_line) && kdb_on == 1) {
++ if (ch == *kdb_serial_ptr) {
++ if (!(*++kdb_serial_ptr)) {
++ atomic_inc(&kdb_8250);
++ kdb(KDB_REASON_KEYBOARD, 0, get_irq_regs());
++ atomic_dec(&kdb_8250);
++ kdb_serial_ptr = kdb_serial_str;
++ break;
++ }
++ } else
++ kdb_serial_ptr = kdb_serial_str;
++ }
++#endif /* CONFIG_KDB */
+ flag = TTY_NORMAL;
+ up->port.icount.rx++;
+
+@@ -2460,7 +2487,7 @@ serial8250_console_write(struct console
+ if (up->port.sysrq) {
+ /* serial8250_handle_port() already took the lock */
+ locked = 0;
+- } else if (oops_in_progress) {
++ } else if (oops_in_progress || KDB_8250()) {
+ locked = spin_trylock(&up->port.lock);
+ } else
+ spin_lock(&up->port.lock);
+@@ -2508,6 +2535,30 @@ static int __init serial8250_console_set
+ if (!port->iobase && !port->membase)
+ return -ENODEV;
+
++#ifdef CONFIG_KDB
++ /*
++ * Remember the line number of the first serial
++ * console. We'll make this the kdb serial console too.
++ */
++ if (co && kdb_serial_line == -1) {
++ kdb_serial_line = co->index;
++ kdb_serial.io_type = port->iotype;
++ switch (port->iotype) {
++ case SERIAL_IO_MEM:
++#ifdef SERIAL_IO_MEM32
++ case SERIAL_IO_MEM32:
++#endif
++ kdb_serial.iobase = (unsigned long)(port->membase);
++ kdb_serial.ioreg_shift = port->regshift;
++ break;
++ default:
++ kdb_serial.iobase = port->iobase;
++ kdb_serial.ioreg_shift = 0;
++ break;
++ }
++ }
++#endif /* CONFIG_KDB */
++
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+diff -Nurp linux-2.6.22-590/drivers/serial/8250_early.c linux-2.6.22-600/drivers/serial/8250_early.c
+--- linux-2.6.22-590/drivers/serial/8250_early.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/drivers/serial/8250_early.c 2008-04-09 18:14:28.000000000 +0200
+@@ -35,6 +35,13 @@
+ #include <asm/io.h>
+ #include <asm/serial.h>
+
++
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++
++static int kdb_serial_line = -1;
++#endif /* CONFIG_KDB */
++
+ struct early_uart_device {
+ struct uart_port port;
+ char options[16]; /* e.g., 115200n8 */
+@@ -186,6 +193,31 @@ static int __init early_uart_setup(struc
+ if ((err = parse_options(device, options)) < 0)
+ return err;
+
++
++#ifdef CONFIG_KDB
++ /*
++ * Remember the line number of the first serial
++ * console. We'll make this the kdb serial console too.
++ */
++ if (console && kdb_serial_line == -1) {
++ kdb_serial_line = console->index;
++ kdb_serial.io_type = device->port.iotype;
++ switch (device->port.iotype) {
++ case SERIAL_IO_MEM:
++#ifdef SERIAL_IO_MEM32
++ case SERIAL_IO_MEM32:
++#endif
++ kdb_serial.iobase = (unsigned long)(device->port.membase);
++ kdb_serial.ioreg_shift = device->port.regshift;
++ break;
++ default:
++ kdb_serial.iobase = device->port.iobase;
++ kdb_serial.ioreg_shift = 0;
++ break;
++ }
++ }
++#endif /* CONFIG_KDB */
++
+ init_port(device);
+ return 0;
+ }
+@@ -218,7 +250,7 @@ int __init early_serial_console_init(cha
+ return -ENODEV;
+
+ options = strchr(cmdline, ',') + 1;
+- if ((err = early_uart_setup(NULL, options)) < 0)
++ if ((err = early_uart_setup(&early_uart_console, options)) < 0)
+ return err;
+ return early_uart_console_init();
+ }
+diff -Nurp linux-2.6.22-590/drivers/serial/sn_console.c linux-2.6.22-600/drivers/serial/sn_console.c
+--- linux-2.6.22-590/drivers/serial/sn_console.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/drivers/serial/sn_console.c 2008-04-09 18:14:28.000000000 +0200
+@@ -48,6 +48,22 @@
+ #include <linux/delay.h> /* for mdelay */
+ #include <linux/miscdevice.h>
+ #include <linux/serial_core.h>
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/serial_reg.h>
++/*
++ * kdb_serial_line records the serial line number of the first serial console.
++ * NOTE: The kernel ignores characters on the serial line unless a user space
++ * program has opened the line first. To enter kdb before user space has opened
++ * the serial line, you can use the 'kdb=early' flag to lilo and set the
++ * appropriate breakpoints.
++ */
++
++static int kdb_serial_line = -1;
++static char *kdb_serial_ptr = (char *)kdb_serial_str;
++#endif /* CONFIG_KDB */
++
+
+ #include <asm/io.h>
+ #include <asm/sn/simulator.h>
+@@ -485,6 +501,26 @@ sn_receive_chars(struct sn_cons_port *po
+ "obtaining data from the console (0x%0x)\n", ch);
+ break;
+ }
++#ifdef CONFIG_KDB
++ if (kdb_on == 1) {
++ if (ch == *kdb_serial_ptr) {
++ if (!(*++kdb_serial_ptr)) {
++ spin_unlock_irqrestore(&port->sc_port.lock, flags);
++ if (!get_irq_regs()) {
++ KDB_STATE_SET(KEYBOARD);
++ KDB_ENTER(); /* to get some registers */
++ } else
++ kdb(KDB_REASON_KEYBOARD, 0, get_irq_regs());
++ kdb_serial_ptr = (char *)kdb_serial_str;
++ spin_lock_irqsave(&port->sc_port.lock, flags);
++ break;
++ }
++ }
++ else
++ kdb_serial_ptr = (char *)kdb_serial_str;
++ }
++#endif /* CONFIG_KDB */
++
+ #ifdef CONFIG_MAGIC_SYSRQ
+ if (sysrq_requested) {
+ unsigned long sysrq_timeout = sysrq_requested + HZ*5;
+@@ -1008,6 +1044,15 @@ sn_sal_console_write(struct console *co,
+ */
+ static int __init sn_sal_console_setup(struct console *co, char *options)
+ {
++#ifdef CONFIG_KDB
++ /*
++ * Remember the line number of the first serial
++ * console. We'll make this the kdb serial console too.
++ */
++ if (kdb_serial_line == -1) {
++ kdb_serial_line = co->index;
++ }
++#endif /* CONFIG_KDB */
+ return 0;
+ }
+
+@@ -1083,3 +1128,31 @@ static int __init sn_sal_serial_console_
+ }
+
+ console_initcall(sn_sal_serial_console_init);
++
++#ifdef CONFIG_KDB
++int
++l1_control_in_polled(int offset)
++{
++ int sal_call_status = 0, input;
++ int ret = 0;
++ if (offset == UART_LSR) {
++ ret = (UART_LSR_THRE | UART_LSR_TEMT); /* can send anytime */
++ sal_call_status = ia64_sn_console_check(&input);
++ if (!sal_call_status && input) {
++ /* input pending */
++ ret |= UART_LSR_DR;
++ }
++ }
++ return ret;
++}
++
++int
++l1_serial_in_polled(void)
++{
++ int ch;
++ if (!ia64_sn_console_getc(&ch))
++ return ch;
++ else
++ return 0;
++}
++#endif /* CONFIG_KDB */
+diff -Nurp linux-2.6.22-590/drivers/usb/host/ohci-hcd.c linux-2.6.22-600/drivers/usb/host/ohci-hcd.c
+--- linux-2.6.22-590/drivers/usb/host/ohci-hcd.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/drivers/usb/host/ohci-hcd.c 2008-04-09 18:14:28.000000000 +0200
+@@ -843,6 +843,53 @@ static int ohci_restart (struct ohci_hcd
+
+ /*-------------------------------------------------------------------------*/
+
++#ifdef CONFIG_KDB_USB
++
++static void
++ohci_kdb_poll (void * __ohci, struct urb *urb)
++{
++ struct ohci_hcd *ohci;
++ struct ohci_regs * regs;
++
++ /*
++ * NOTE - we use the ohci_hcd from the urb rather than the
++ * __ohci parameter (which is NULL anyway). This ensures
++ * that we will process the proper controller for the urb.
++ */
++
++ if (!urb) /* can happen if no keyboard attached */
++ return;
++
++ ohci = (struct ohci_hcd *) hcd_to_ohci(bus_to_hcd(urb->dev->bus));
++ regs = ohci->regs;
++
++ /* if the urb is not currently in progress resubmit it */
++ if (urb->status != -EINPROGRESS) {
++
++ if (usb_submit_urb (urb, GFP_ATOMIC))
++ return;
++
++ /* make sure the HC registers are set correctly */
++ writel (OHCI_INTR_WDH, ®s->intrenable);
++ writel (OHCI_INTR_WDH, ®s->intrstatus);
++ writel (OHCI_INTR_MIE, ®s->intrenable);
++
++ // flush those pci writes
++ (void) readl (&ohci->regs->control);
++ }
++
++ if (ohci->hcca->done_head) {
++ dl_done_list_kdb (ohci, urb);
++ writel (OHCI_INTR_WDH, ®s->intrstatus);
++ // flush the pci write
++ (void) readl (&ohci->regs->control);
++ }
++}
++
++#endif /* CONFIG_KDB_USB */
++
++/*-------------------------------------------------------------------------*/
++
+ #define DRIVER_INFO DRIVER_VERSION " " DRIVER_DESC
+
+ MODULE_AUTHOR (DRIVER_AUTHOR);
+diff -Nurp linux-2.6.22-590/drivers/usb/host/ohci-pci.c linux-2.6.22-600/drivers/usb/host/ohci-pci.c
+--- linux-2.6.22-590/drivers/usb/host/ohci-pci.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/drivers/usb/host/ohci-pci.c 2008-04-09 18:14:28.000000000 +0200
+@@ -18,6 +18,10 @@
+ #error "This file is PCI bus glue. CONFIG_PCI must be defined."
+ #endif
+
++#ifdef CONFIG_KDB_USB
++#include <linux/kdb.h>
++#endif
++
+ /*-------------------------------------------------------------------------*/
+
+ static int broken_suspend(struct usb_hcd *hcd)
+@@ -199,6 +203,12 @@ static int __devinit ohci_pci_start (str
+ ohci_err (ohci, "can't start\n");
+ ohci_stop (hcd);
+ }
++#ifdef CONFIG_KDB_USB
++ if (ret >= 0) {
++ kdb_usb_infos.poll_func = ohci_kdb_poll;
++ kdb_usb_infos.uhci = NULL; /* not used */
++ }
++#endif
+ return ret;
+ }
+
+diff -Nurp linux-2.6.22-590/drivers/usb/host/ohci-q.c linux-2.6.22-600/drivers/usb/host/ohci-q.c
+--- linux-2.6.22-590/drivers/usb/host/ohci-q.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/drivers/usb/host/ohci-q.c 2008-04-09 18:14:28.000000000 +0200
+@@ -1115,3 +1115,65 @@ dl_done_list (struct ohci_hcd *ohci)
+ td = td_next;
+ }
+ }
++
++
++/*-------------------------------------------------------------------------*/
++
++#ifdef CONFIG_KDB_USB
++static void
++dl_done_list_kdb (struct ohci_hcd *ohci, struct urb *kdburb)
++{
++ struct td *td = dl_reverse_done_list (ohci);
++
++ while (td) {
++ struct td *td_next = td->next_dl_td;
++ struct urb *urb = td->urb;
++ urb_priv_t *urb_priv = urb->hcpriv;
++ struct ed *ed = td->ed;
++
++ if (urb != kdburb) {
++ td = td_next;
++ continue;
++ }
++
++ /* update URB's length and status from TD */
++ td_done (ohci, urb, td);
++ urb_priv->td_cnt++;
++
++ /* If all this urb's TDs are done, just resubmit it */
++ if (urb_priv->td_cnt == urb_priv->length) {
++ urb->actual_length = 0;
++ urb->status = -EINPROGRESS;
++ td_submit_urb (ohci, urb);
++ }
++
++ /* clean schedule: unlink EDs that are no longer busy */
++ if (list_empty (&ed->td_list)) {
++ if (ed->state == ED_OPER)
++ start_ed_unlink (ohci, ed);
++
++ /* ... reenabling halted EDs only after fault cleanup */
++ } else if ((ed->hwINFO & cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE))
++ == cpu_to_hc32 (ohci, ED_SKIP)) {
++ td = list_entry (ed->td_list.next, struct td, td_list);
++ if (!(td->hwINFO & cpu_to_hc32 (ohci, TD_DONE))) {
++ ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP);
++ /* ... hc may need waking-up */
++ switch (ed->type) {
++ case PIPE_CONTROL:
++ ohci_writel (ohci, OHCI_CLF,
++ &ohci->regs->cmdstatus);
++ break;
++ case PIPE_BULK:
++ ohci_writel (ohci, OHCI_BLF,
++ &ohci->regs->cmdstatus);
++ break;
++ }
++ }
++ }
++
++ td = td_next;
++ }
++}
++
++#endif /* CONFIG_KDB_USB */
+diff -Nurp linux-2.6.22-590/fs/proc/mmu.c linux-2.6.22-600/fs/proc/mmu.c
+--- linux-2.6.22-590/fs/proc/mmu.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/fs/proc/mmu.c 2008-04-09 18:14:28.000000000 +0200
+@@ -31,11 +31,21 @@
+ #include <asm/div64.h>
+ #include "internal.h"
+
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#endif
++
+ void get_vmalloc_info(struct vmalloc_info *vmi)
+ {
+ struct vm_struct *vma;
+ unsigned long free_area_size;
+ unsigned long prev_end;
++#ifdef CONFIG_KDB
++ int get_lock = !KDB_IS_RUNNING();
++#else
++#define get_lock 1
++#endif
++
+
+ vmi->used = 0;
+
+@@ -47,7 +57,8 @@ void get_vmalloc_info(struct vmalloc_inf
+
+ prev_end = VMALLOC_START;
+
+- read_lock(&vmlist_lock);
++ if (get_lock)
++ read_lock(&vmlist_lock);
+
+ for (vma = vmlist; vma; vma = vma->next) {
+ unsigned long addr = (unsigned long) vma->addr;
+@@ -72,6 +83,7 @@ void get_vmalloc_info(struct vmalloc_inf
+ if (VMALLOC_END - prev_end > vmi->largest_chunk)
+ vmi->largest_chunk = VMALLOC_END - prev_end;
+
+- read_unlock(&vmlist_lock);
++ if (get_lock)
++ read_unlock(&vmlist_lock);
+ }
+ }
+diff -Nurp linux-2.6.22-590/fs/proc/proc_misc.c linux-2.6.22-600/fs/proc/proc_misc.c
+--- linux-2.6.22-590/fs/proc/proc_misc.c 2008-04-09 18:10:53.000000000 +0200
++++ linux-2.6.22-600/fs/proc/proc_misc.c 2008-04-09 18:14:28.000000000 +0200
+@@ -235,6 +235,120 @@ static int meminfo_read_proc(char *page,
+ #undef K
+ }
+
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++/* Like meminfo_read_proc() but without the locks and using kdb_printf() */
++void
++kdb_meminfo_read_proc(void)
++{
++ struct sysinfo i;
++ unsigned long committed;
++ unsigned long allowed;
++ struct vmalloc_info vmi;
++ long cached;
++
++/*
++ * display in kilobytes.
++ */
++#define K(x) ((x) << (PAGE_SHIFT - 10))
++ si_meminfo(&i);
++ kdb_si_swapinfo(&i);
++ committed = atomic_read(&vm_committed_space);
++ allowed = ((totalram_pages - hugetlb_total_pages())
++ * sysctl_overcommit_ratio / 100) + total_swap_pages;
++
++ cached = global_page_state(NR_FILE_PAGES) -
++ total_swapcache_pages - i.bufferram;
++ if (cached < 0)
++ cached = 0;
++
++ get_vmalloc_info(&vmi);
++
++ kdb_printf(
++ "MemTotal: %8lu kB\n"
++ "MemFree: %8lu kB\n"
++ "Buffers: %8lu kB\n",
++ K(i.totalram),
++ K(i.freeram),
++ K(i.bufferram)
++ );
++ kdb_printf(
++ "Cached: %8lu kB\n"
++ "SwapCached: %8lu kB\n"
++ "Active: %8lu kB\n"
++ "Inactive: %8lu kB\n",
++ K(cached),
++ K(total_swapcache_pages),
++ K(global_page_state(NR_ACTIVE)),
++ K(global_page_state(NR_INACTIVE))
++ );
++#ifdef CONFIG_HIGHMEM
++ kdb_printf(
++ "HighTotal: %8lu kB\n"
++ "HighFree: %8lu kB\n"
++ "LowTotal: %8lu kB\n"
++ "LowFree: %8lu kB\n",
++ K(i.totalhigh),
++ K(i.freehigh),
++ K(i.totalram-i.totalhigh),
++ K(i.freeram-i.freehigh)
++ );
++#endif
++ kdb_printf(
++ "SwapTotal: %8lu kB\n"
++ "SwapFree: %8lu kB\n"
++ "Dirty: %8lu kB\n",
++ K(i.totalswap),
++ K(i.freeswap),
++ K(global_page_state(NR_FILE_DIRTY))
++ );
++ kdb_printf(
++ "Writeback: %8lu kB\n"
++ "AnonPages: %8lu kB\n"
++ "Mapped: %8lu kB\n",
++ K(global_page_state(NR_WRITEBACK)),
++ K(global_page_state(NR_ANON_PAGES)),
++ K(global_page_state(NR_FILE_MAPPED))
++ );
++ kdb_printf(
++ "Slab: %8lu kB\n"
++ "SReclaimable: %8lu kB\n"
++ "SUnreclaim: %8lu kB\n",
++ K(global_page_state(NR_SLAB_RECLAIMABLE) +
++ global_page_state(NR_SLAB_UNRECLAIMABLE)),
++ K(global_page_state(NR_SLAB_RECLAIMABLE)),
++ K(global_page_state(NR_SLAB_UNRECLAIMABLE))
++ );
++ kdb_printf(
++ "PageTables: %8lu kB\n"
++ "NFS_Unstable: %8lu kB\n"
++ "Bounce: %8lu kB\n",
++ K(global_page_state(NR_PAGETABLE)),
++ K(global_page_state(NR_UNSTABLE_NFS)),
++ K(global_page_state(NR_BOUNCE))
++ );
++ kdb_printf(
++ "CommitLimit: %8lu kB\n"
++ "Committed_AS: %8lu kB\n"
++ "VmallocTotal: %8lu kB\n",
++ K(allowed),
++ K(committed),
++ (unsigned long)VMALLOC_TOTAL >> 10
++ );
++ kdb_printf(
++ "VmallocUsed: %8lu kB\n"
++ "VmallocChunk: %8lu kB\n",
++ vmi.used >> 10,
++ vmi.largest_chunk >> 10
++ );
++
++#ifdef CONFIG_HUGETLBFS
++ kdb_hugetlb_report_meminfo();
++#endif
++}
++#endif /* CONFIG_KDB */
++
+ extern struct seq_operations fragmentation_op;
+ static int fragmentation_open(struct inode *inode, struct file *file)
+ {
+diff -Nurp linux-2.6.22-590/include/asm-i386/ansidecl.h linux-2.6.22-600/include/asm-i386/ansidecl.h
+--- linux-2.6.22-590/include/asm-i386/ansidecl.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/asm-i386/ansidecl.h 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,383 @@
++/* ANSI and traditional C compatability macros
++ Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001
++ Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++This program is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 2 of the License, or
++(at your option) any later version.
++
++This program is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this program; if not, write to the Free Software
++Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
++
++/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
++ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as
++ * required.
++ * Keith Owens <kaos@sgi.com> 15 May 2006
++ */
++
++/* ANSI and traditional C compatibility macros
++
++ ANSI C is assumed if __STDC__ is #defined.
++
++ Macro ANSI C definition Traditional C definition
++ ----- ---- - ---------- ----------- - ----------
++ ANSI_PROTOTYPES 1 not defined
++ PTR `void *' `char *'
++ PTRCONST `void *const' `char *'
++ LONG_DOUBLE `long double' `double'
++ const not defined `'
++ volatile not defined `'
++ signed not defined `'
++ VA_START(ap, var) va_start(ap, var) va_start(ap)
++
++ Note that it is safe to write "void foo();" indicating a function
++ with no return value, in all K+R compilers we have been able to test.
++
++ For declaring functions with prototypes, we also provide these:
++
++ PARAMS ((prototype))
++ -- for functions which take a fixed number of arguments. Use this
++ when declaring the function. When defining the function, write a
++ K+R style argument list. For example:
++
++ char *strcpy PARAMS ((char *dest, char *source));
++ ...
++ char *
++ strcpy (dest, source)
++ char *dest;
++ char *source;
++ { ... }
++
++
++ VPARAMS ((prototype, ...))
++ -- for functions which take a variable number of arguments. Use
++ PARAMS to declare the function, VPARAMS to define it. For example:
++
++ int printf PARAMS ((const char *format, ...));
++ ...
++ int
++ printf VPARAMS ((const char *format, ...))
++ {
++ ...
++ }
++
++ For writing functions which take variable numbers of arguments, we
++ also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These
++ hide the differences between K+R <varargs.h> and C89 <stdarg.h> more
++ thoroughly than the simple VA_START() macro mentioned above.
++
++ VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end.
++ Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls
++ corresponding to the list of fixed arguments. Then use va_arg
++ normally to get the variable arguments, or pass your va_list object
++ around. You do not declare the va_list yourself; VA_OPEN does it
++ for you.
++
++ Here is a complete example:
++
++ int
++ printf VPARAMS ((const char *format, ...))
++ {
++ int result;
++
++ VA_OPEN (ap, format);
++ VA_FIXEDARG (ap, const char *, format);
++
++ result = vfprintf (stdout, format, ap);
++ VA_CLOSE (ap);
++
++ return result;
++ }
++
++
++ You can declare variables either before or after the VA_OPEN,
++ VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning
++ and end of a block. They must appear at the same nesting level,
++ and any variables declared after VA_OPEN go out of scope at
++ VA_CLOSE. Unfortunately, with a K+R compiler, that includes the
++ argument list. You can have multiple instances of VA_OPEN/VA_CLOSE
++ pairs in a single function in case you need to traverse the
++ argument list more than once.
++
++ For ease of writing code which uses GCC extensions but needs to be
++ portable to other compilers, we provide the GCC_VERSION macro that
++ simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various
++ wrappers around __attribute__. Also, __extension__ will be #defined
++ to nothing if it doesn't work. See below.
++
++ This header also defines a lot of obsolete macros:
++ CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID,
++ AND, DOTS, NOARGS. Don't use them. */
++
++#ifndef _ANSIDECL_H
++#define _ANSIDECL_H 1
++
++/* Every source file includes this file,
++ so they will all get the switch for lint. */
++/* LINTLIBRARY */
++
++/* Using MACRO(x,y) in cpp #if conditionals does not work with some
++ older preprocessors. Thus we can't define something like this:
++
++#define HAVE_GCC_VERSION(MAJOR, MINOR) \
++ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR)))
++
++and then test "#if HAVE_GCC_VERSION(2,7)".
++
++So instead we use the macro below and test it against specific values. */
++
++/* This macro simplifies testing whether we are using gcc, and if it
++ is of a particular minimum version. (Both major & minor numbers are
++ significant.) This macro will evaluate to 0 if we are not using
++ gcc at all. */
++#ifndef GCC_VERSION
++#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
++#endif /* GCC_VERSION */
++
++#if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) || (defined(__alpha) && defined(__cplusplus))
++/* All known AIX compilers implement these things (but don't always
++ define __STDC__). The RISC/OS MIPS compiler defines these things
++ in SVR4 mode, but does not define __STDC__. */
++/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other
++ C++ compilers, does not define __STDC__, though it acts as if this
++ was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */
++
++#define ANSI_PROTOTYPES 1
++#define PTR void *
++#define PTRCONST void *const
++#define LONG_DOUBLE long double
++
++/* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in
++ a #ifndef. */
++#ifndef PARAMS
++#define PARAMS(ARGS) ARGS
++#endif
++
++#define VPARAMS(ARGS) ARGS
++#define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR)
++
++/* variadic function helper macros */
++/* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's
++ use without inhibiting further decls and without declaring an
++ actual variable. */
++#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy
++#define VA_CLOSE(AP) } va_end(AP); }
++#define VA_FIXEDARG(AP, T, N) struct Qdmy
++
++#undef const
++#undef volatile
++#undef signed
++
++#ifdef __KERNEL__
++#ifndef __STDC_VERSION__
++#define __STDC_VERSION__ 0
++#endif
++#endif /* __KERNEL__ */
++
++/* inline requires special treatment; it's in C99, and GCC >=2.7 supports
++ it too, but it's not in C89. */
++#undef inline
++#if __STDC_VERSION__ > 199901L
++/* it's a keyword */
++#else
++# if GCC_VERSION >= 2007
++# define inline __inline__ /* __inline__ prevents -pedantic warnings */
++# else
++# define inline /* nothing */
++# endif
++#endif
++
++/* These are obsolete. Do not use. */
++#ifndef IN_GCC
++#define CONST const
++#define VOLATILE volatile
++#define SIGNED signed
++
++#define PROTO(type, name, arglist) type name arglist
++#define EXFUN(name, proto) name proto
++#define DEFUN(name, arglist, args) name(args)
++#define DEFUN_VOID(name) name(void)
++#define AND ,
++#define DOTS , ...
++#define NOARGS void
++#endif /* ! IN_GCC */
++
++#else /* Not ANSI C. */
++
++#undef ANSI_PROTOTYPES
++#define PTR char *
++#define PTRCONST PTR
++#define LONG_DOUBLE double
++
++#define PARAMS(args) ()
++#define VPARAMS(args) (va_alist) va_dcl
++#define VA_START(va_list, var) va_start(va_list)
++
++#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy
++#define VA_CLOSE(AP) } va_end(AP); }
++#define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE)
++
++/* some systems define these in header files for non-ansi mode */
++#undef const
++#undef volatile
++#undef signed
++#undef inline
++#define const
++#define volatile
++#define signed
++#define inline
++
++#ifndef IN_GCC
++#define CONST
++#define VOLATILE
++#define SIGNED
++
++#define PROTO(type, name, arglist) type name ()
++#define EXFUN(name, proto) name()
++#define DEFUN(name, arglist, args) name arglist args;
++#define DEFUN_VOID(name) name()
++#define AND ;
++#define DOTS
++#define NOARGS
++#endif /* ! IN_GCC */
++
++#endif /* ANSI C. */
++
++/* Define macros for some gcc attributes. This permits us to use the
++ macros freely, and know that they will come into play for the
++ version of gcc in which they are supported. */
++
++#if (GCC_VERSION < 2007)
++# define __attribute__(x)
++#endif
++
++/* Attribute __malloc__ on functions was valid as of gcc 2.96. */
++#ifndef ATTRIBUTE_MALLOC
++# if (GCC_VERSION >= 2096)
++# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__))
++# else
++# define ATTRIBUTE_MALLOC
++# endif /* GNUC >= 2.96 */
++#endif /* ATTRIBUTE_MALLOC */
++
++/* Attributes on labels were valid as of gcc 2.93. */
++#ifndef ATTRIBUTE_UNUSED_LABEL
++# if (!defined (__cplusplus) && GCC_VERSION >= 2093)
++# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED
++# else
++# define ATTRIBUTE_UNUSED_LABEL
++# endif /* !__cplusplus && GNUC >= 2.93 */
++#endif /* ATTRIBUTE_UNUSED_LABEL */
++
++#ifndef ATTRIBUTE_UNUSED
++#define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
++#endif /* ATTRIBUTE_UNUSED */
++
++/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the
++ identifier name. */
++#if ! defined(__cplusplus) || (GCC_VERSION >= 3004)
++# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED
++#else /* !__cplusplus || GNUC >= 3.4 */
++# define ARG_UNUSED(NAME) NAME
++#endif /* !__cplusplus || GNUC >= 3.4 */
++
++#ifndef ATTRIBUTE_NORETURN
++#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__))
++#endif /* ATTRIBUTE_NORETURN */
++
++/* Attribute `nonnull' was valid as of gcc 3.3. */
++#ifndef ATTRIBUTE_NONNULL
++# if (GCC_VERSION >= 3003)
++# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m)))
++# else
++# define ATTRIBUTE_NONNULL(m)
++# endif /* GNUC >= 3.3 */
++#endif /* ATTRIBUTE_NONNULL */
++
++/* Attribute `pure' was valid as of gcc 3.0. */
++#ifndef ATTRIBUTE_PURE
++# if (GCC_VERSION >= 3000)
++# define ATTRIBUTE_PURE __attribute__ ((__pure__))
++# else
++# define ATTRIBUTE_PURE
++# endif /* GNUC >= 3.0 */
++#endif /* ATTRIBUTE_PURE */
++
++/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL.
++ This was the case for the `printf' format attribute by itself
++ before GCC 3.3, but as of 3.3 we need to add the `nonnull'
++ attribute to retain this behavior. */
++#ifndef ATTRIBUTE_PRINTF
++#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m)
++#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2)
++#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3)
++#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4)
++#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5)
++#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6)
++#endif /* ATTRIBUTE_PRINTF */
++
++/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on
++ a function pointer. Format attributes were allowed on function
++ pointers as of gcc 3.1. */
++#ifndef ATTRIBUTE_FPTR_PRINTF
++# if (GCC_VERSION >= 3001)
++# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n)
++# else
++# define ATTRIBUTE_FPTR_PRINTF(m, n)
++# endif /* GNUC >= 3.1 */
++# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2)
++# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3)
++# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4)
++# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5)
++# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6)
++#endif /* ATTRIBUTE_FPTR_PRINTF */
++
++/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A
++ NULL format specifier was allowed as of gcc 3.3. */
++#ifndef ATTRIBUTE_NULL_PRINTF
++# if (GCC_VERSION >= 3003)
++# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n)))
++# else
++# define ATTRIBUTE_NULL_PRINTF(m, n)
++# endif /* GNUC >= 3.3 */
++# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2)
++# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3)
++# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4)
++# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5)
++# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6)
++#endif /* ATTRIBUTE_NULL_PRINTF */
++
++/* Attribute `sentinel' was valid as of gcc 3.5. */
++#ifndef ATTRIBUTE_SENTINEL
++# if (GCC_VERSION >= 3005)
++# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__))
++# else
++# define ATTRIBUTE_SENTINEL
++# endif /* GNUC >= 3.5 */
++#endif /* ATTRIBUTE_SENTINEL */
++
++
++#ifndef ATTRIBUTE_ALIGNED_ALIGNOF
++# if (GCC_VERSION >= 3000)
++# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m))))
++# else
++# define ATTRIBUTE_ALIGNED_ALIGNOF(m)
++# endif /* GNUC >= 3.0 */
++#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */
++
++/* We use __extension__ in some places to suppress -pedantic warnings
++ about GCC extensions. This feature didn't work properly before
++ gcc 2.8. */
++#if GCC_VERSION < 2008
++#define __extension__
++#endif
++
++#endif /* ansidecl.h */
+diff -Nurp linux-2.6.22-590/include/asm-i386/bfd.h linux-2.6.22-600/include/asm-i386/bfd.h
+--- linux-2.6.22-590/include/asm-i386/bfd.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/asm-i386/bfd.h 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,4921 @@
++/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically
++ generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c",
++ "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c",
++ "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c",
++ "linker.c" and "simple.c".
++ Run "make headers" in your build bfd/ to regenerate. */
++
++/* Main header file for the bfd library -- portable access to object files.
++
++ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
++ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
++
++ Contributed by Cygnus Support.
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
++
++/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
++ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as
++ * required.
++ * Keith Owens <kaos@sgi.com> 15 May 2006
++ */
++
++#ifndef __BFD_H_SEEN__
++#define __BFD_H_SEEN__
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#ifdef __KERNEL__
++#include <asm/ansidecl.h>
++#else /* __KERNEL__ */
++#include "ansidecl.h"
++#include "symcat.h"
++#endif /* __KERNEL__ */
++#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE)
++#ifndef SABER
++/* This hack is to avoid a problem with some strict ANSI C preprocessors.
++ The problem is, "32_" is not a valid preprocessing token, and we don't
++ want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will
++ cause the inner CONCAT2 macros to be evaluated first, producing
++ still-valid pp-tokens. Then the final concatenation can be done. */
++#undef CONCAT4
++#define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d))
++#endif
++#endif
++
++/* The word size used by BFD on the host. This may be 64 with a 32
++ bit target if the host is 64 bit, or if other 64 bit targets have
++ been selected with --enable-targets, or if --enable-64-bit-bfd. */
++#ifdef __KERNEL__
++#define BFD_ARCH_SIZE 32
++#else /* __KERNEL__ */
++#define BFD_ARCH_SIZE 64
++#endif /* __KERNEL__ */
++
++/* The word size of the default bfd target. */
++#define BFD_DEFAULT_TARGET_SIZE 32
++
++#define BFD_HOST_64BIT_LONG 0
++#define BFD_HOST_LONG_LONG 1
++#if 1
++#define BFD_HOST_64_BIT long long
++#define BFD_HOST_U_64_BIT unsigned long long
++typedef BFD_HOST_64_BIT bfd_int64_t;
++typedef BFD_HOST_U_64_BIT bfd_uint64_t;
++#endif
++
++#if BFD_ARCH_SIZE >= 64
++#define BFD64
++#endif
++
++#ifndef INLINE
++#if __GNUC__ >= 2
++#define INLINE __inline__
++#else
++#define INLINE
++#endif
++#endif
++
++/* Forward declaration. */
++typedef struct bfd bfd;
++
++/* Boolean type used in bfd. Too many systems define their own
++ versions of "boolean" for us to safely typedef a "boolean" of
++ our own. Using an enum for "bfd_boolean" has its own set of
++ problems, with strange looking casts required to avoid warnings
++ on some older compilers. Thus we just use an int.
++
++ General rule: Functions which are bfd_boolean return TRUE on
++ success and FALSE on failure (unless they're a predicate). */
++
++typedef int bfd_boolean;
++#undef FALSE
++#undef TRUE
++#define FALSE 0
++#define TRUE 1
++
++#ifdef BFD64
++
++#ifndef BFD_HOST_64_BIT
++ #error No 64 bit integer type available
++#endif /* ! defined (BFD_HOST_64_BIT) */
++
++typedef BFD_HOST_U_64_BIT bfd_vma;
++typedef BFD_HOST_64_BIT bfd_signed_vma;
++typedef BFD_HOST_U_64_BIT bfd_size_type;
++typedef BFD_HOST_U_64_BIT symvalue;
++
++#ifndef fprintf_vma
++#if BFD_HOST_64BIT_LONG
++#define sprintf_vma(s,x) sprintf (s, "%016lx", x)
++#define fprintf_vma(f,x) fprintf (f, "%016lx", x)
++#else
++#define _bfd_int64_low(x) ((unsigned long) (((x) & 0xffffffff)))
++#define _bfd_int64_high(x) ((unsigned long) (((x) >> 32) & 0xffffffff))
++#define fprintf_vma(s,x) \
++ fprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x))
++#define sprintf_vma(s,x) \
++ sprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x))
++#endif
++#endif
++
++#else /* not BFD64 */
++
++/* Represent a target address. Also used as a generic unsigned type
++ which is guaranteed to be big enough to hold any arithmetic types
++ we need to deal with. */
++typedef unsigned long bfd_vma;
++
++/* A generic signed type which is guaranteed to be big enough to hold any
++ arithmetic types we need to deal with. Can be assumed to be compatible
++ with bfd_vma in the same way that signed and unsigned ints are compatible
++ (as parameters, in assignment, etc). */
++typedef long bfd_signed_vma;
++
++typedef unsigned long symvalue;
++typedef unsigned long bfd_size_type;
++
++/* Print a bfd_vma x on stream s. */
++#define fprintf_vma(s,x) fprintf (s, "%08lx", x)
++#define sprintf_vma(s,x) sprintf (s, "%08lx", x)
++
++#endif /* not BFD64 */
++
++#define HALF_BFD_SIZE_TYPE \
++ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2))
++
++#ifndef BFD_HOST_64_BIT
++/* Fall back on a 32 bit type. The idea is to make these types always
++ available for function return types, but in the case that
++ BFD_HOST_64_BIT is undefined such a function should abort or
++ otherwise signal an error. */
++typedef bfd_signed_vma bfd_int64_t;
++typedef bfd_vma bfd_uint64_t;
++#endif
++
++/* An offset into a file. BFD always uses the largest possible offset
++ based on the build time availability of fseek, fseeko, or fseeko64. */
++typedef BFD_HOST_64_BIT file_ptr;
++typedef unsigned BFD_HOST_64_BIT ufile_ptr;
++
++extern void bfd_sprintf_vma (bfd *, char *, bfd_vma);
++extern void bfd_fprintf_vma (bfd *, void *, bfd_vma);
++
++#define printf_vma(x) fprintf_vma(stdout,x)
++#define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x)
++
++typedef unsigned int flagword; /* 32 bits of flags */
++typedef unsigned char bfd_byte;
++\f
++/* File formats. */
++
++typedef enum bfd_format
++{
++ bfd_unknown = 0, /* File format is unknown. */
++ bfd_object, /* Linker/assembler/compiler output. */
++ bfd_archive, /* Object archive file. */
++ bfd_core, /* Core dump. */
++ bfd_type_end /* Marks the end; don't use it! */
++}
++bfd_format;
++
++/* Values that may appear in the flags field of a BFD. These also
++ appear in the object_flags field of the bfd_target structure, where
++ they indicate the set of flags used by that backend (not all flags
++ are meaningful for all object file formats) (FIXME: at the moment,
++ the object_flags values have mostly just been copied from backend
++ to another, and are not necessarily correct). */
++
++/* No flags. */
++#define BFD_NO_FLAGS 0x00
++
++/* BFD contains relocation entries. */
++#define HAS_RELOC 0x01
++
++/* BFD is directly executable. */
++#define EXEC_P 0x02
++
++/* BFD has line number information (basically used for F_LNNO in a
++ COFF header). */
++#define HAS_LINENO 0x04
++
++/* BFD has debugging information. */
++#define HAS_DEBUG 0x08
++
++/* BFD has symbols. */
++#define HAS_SYMS 0x10
++
++/* BFD has local symbols (basically used for F_LSYMS in a COFF
++ header). */
++#define HAS_LOCALS 0x20
++
++/* BFD is a dynamic object. */
++#define DYNAMIC 0x40
++
++/* Text section is write protected (if D_PAGED is not set, this is
++ like an a.out NMAGIC file) (the linker sets this by default, but
++ clears it for -r or -N). */
++#define WP_TEXT 0x80
++
++/* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the
++ linker sets this by default, but clears it for -r or -n or -N). */
++#define D_PAGED 0x100
++
++/* BFD is relaxable (this means that bfd_relax_section may be able to
++ do something) (sometimes bfd_relax_section can do something even if
++ this is not set). */
++#define BFD_IS_RELAXABLE 0x200
++
++/* This may be set before writing out a BFD to request using a
++ traditional format. For example, this is used to request that when
++ writing out an a.out object the symbols not be hashed to eliminate
++ duplicates. */
++#define BFD_TRADITIONAL_FORMAT 0x400
++
++/* This flag indicates that the BFD contents are actually cached in
++ memory. If this is set, iostream points to a bfd_in_memory struct. */
++#define BFD_IN_MEMORY 0x800
++
++/* The sections in this BFD specify a memory page. */
++#define HAS_LOAD_PAGE 0x1000
++
++/* This BFD has been created by the linker and doesn't correspond
++ to any input file. */
++#define BFD_LINKER_CREATED 0x2000
++\f
++/* Symbols and relocation. */
++
++/* A count of carsyms (canonical archive symbols). */
++typedef unsigned long symindex;
++
++/* How to perform a relocation. */
++typedef const struct reloc_howto_struct reloc_howto_type;
++
++#define BFD_NO_MORE_SYMBOLS ((symindex) ~0)
++
++/* General purpose part of a symbol X;
++ target specific parts are in libcoff.h, libaout.h, etc. */
++
++#define bfd_get_section(x) ((x)->section)
++#define bfd_get_output_section(x) ((x)->section->output_section)
++#define bfd_set_section(x,y) ((x)->section) = (y)
++#define bfd_asymbol_base(x) ((x)->section->vma)
++#define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value)
++#define bfd_asymbol_name(x) ((x)->name)
++/*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/
++#define bfd_asymbol_bfd(x) ((x)->the_bfd)
++#define bfd_asymbol_flavour(x) (bfd_asymbol_bfd(x)->xvec->flavour)
++
++/* A canonical archive symbol. */
++/* This is a type pun with struct ranlib on purpose! */
++typedef struct carsym
++{
++ char *name;
++ file_ptr file_offset; /* Look here to find the file. */
++}
++carsym; /* To make these you call a carsymogen. */
++
++/* Used in generating armaps (archive tables of contents).
++ Perhaps just a forward definition would do? */
++struct orl /* Output ranlib. */
++{
++ char **name; /* Symbol name. */
++ union
++ {
++ file_ptr pos;
++ bfd *abfd;
++ } u; /* bfd* or file position. */
++ int namidx; /* Index into string table. */
++};
++\f
++/* Linenumber stuff. */
++typedef struct lineno_cache_entry
++{
++ unsigned int line_number; /* Linenumber from start of function. */
++ union
++ {
++ struct bfd_symbol *sym; /* Function name. */
++ bfd_vma offset; /* Offset into section. */
++ } u;
++}
++alent;
++\f
++/* Object and core file sections. */
++
++#define align_power(addr, align) \
++ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align)))
++
++typedef struct bfd_section *sec_ptr;
++
++#define bfd_get_section_name(bfd, ptr) ((ptr)->name + 0)
++#define bfd_get_section_vma(bfd, ptr) ((ptr)->vma + 0)
++#define bfd_get_section_lma(bfd, ptr) ((ptr)->lma + 0)
++#define bfd_get_section_alignment(bfd, ptr) ((ptr)->alignment_power + 0)
++#define bfd_section_name(bfd, ptr) ((ptr)->name)
++#define bfd_section_size(bfd, ptr) ((ptr)->size)
++#define bfd_get_section_size(ptr) ((ptr)->size)
++#define bfd_section_vma(bfd, ptr) ((ptr)->vma)
++#define bfd_section_lma(bfd, ptr) ((ptr)->lma)
++#define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power)
++#define bfd_get_section_flags(bfd, ptr) ((ptr)->flags + 0)
++#define bfd_get_section_userdata(bfd, ptr) ((ptr)->userdata)
++
++#define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0)
++
++#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE)
++#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE)
++#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE)
++/* Find the address one past the end of SEC. */
++#define bfd_get_section_limit(bfd, sec) \
++ (((sec)->rawsize ? (sec)->rawsize : (sec)->size) \
++ / bfd_octets_per_byte (bfd))
++
++typedef struct stat stat_type;
++\f
++typedef enum bfd_print_symbol
++{
++ bfd_print_symbol_name,
++ bfd_print_symbol_more,
++ bfd_print_symbol_all
++} bfd_print_symbol_type;
++
++/* Information about a symbol that nm needs. */
++
++typedef struct _symbol_info
++{
++ symvalue value;
++ char type;
++ const char *name; /* Symbol name. */
++ unsigned char stab_type; /* Stab type. */
++ char stab_other; /* Stab other. */
++ short stab_desc; /* Stab desc. */
++ const char *stab_name; /* String for stab type. */
++} symbol_info;
++
++/* Get the name of a stabs type code. */
++
++extern const char *bfd_get_stab_name (int);
++\f
++/* Hash table routines. There is no way to free up a hash table. */
++
++/* An element in the hash table. Most uses will actually use a larger
++ structure, and an instance of this will be the first field. */
++
++struct bfd_hash_entry
++{
++ /* Next entry for this hash code. */
++ struct bfd_hash_entry *next;
++ /* String being hashed. */
++ const char *string;
++ /* Hash code. This is the full hash code, not the index into the
++ table. */
++ unsigned long hash;
++};
++
++/* A hash table. */
++
++struct bfd_hash_table
++{
++ /* The hash array. */
++ struct bfd_hash_entry **table;
++ /* The number of slots in the hash table. */
++ unsigned int size;
++ /* A function used to create new elements in the hash table. The
++ first entry is itself a pointer to an element. When this
++ function is first invoked, this pointer will be NULL. However,
++ having the pointer permits a hierarchy of method functions to be
++ built each of which calls the function in the superclass. Thus
++ each function should be written to allocate a new block of memory
++ only if the argument is NULL. */
++ struct bfd_hash_entry *(*newfunc)
++ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
++ /* An objalloc for this hash table. This is a struct objalloc *,
++ but we use void * to avoid requiring the inclusion of objalloc.h. */
++ void *memory;
++};
++
++/* Initialize a hash table. */
++extern bfd_boolean bfd_hash_table_init
++ (struct bfd_hash_table *,
++ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
++ struct bfd_hash_table *,
++ const char *));
++
++/* Initialize a hash table specifying a size. */
++extern bfd_boolean bfd_hash_table_init_n
++ (struct bfd_hash_table *,
++ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
++ struct bfd_hash_table *,
++ const char *),
++ unsigned int size);
++
++/* Free up a hash table. */
++extern void bfd_hash_table_free
++ (struct bfd_hash_table *);
++
++/* Look up a string in a hash table. If CREATE is TRUE, a new entry
++ will be created for this string if one does not already exist. The
++ COPY argument must be TRUE if this routine should copy the string
++ into newly allocated memory when adding an entry. */
++extern struct bfd_hash_entry *bfd_hash_lookup
++ (struct bfd_hash_table *, const char *, bfd_boolean create,
++ bfd_boolean copy);
++
++/* Replace an entry in a hash table. */
++extern void bfd_hash_replace
++ (struct bfd_hash_table *, struct bfd_hash_entry *old,
++ struct bfd_hash_entry *nw);
++
++/* Base method for creating a hash table entry. */
++extern struct bfd_hash_entry *bfd_hash_newfunc
++ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
++
++/* Grab some space for a hash table entry. */
++extern void *bfd_hash_allocate
++ (struct bfd_hash_table *, unsigned int);
++
++/* Traverse a hash table in a random order, calling a function on each
++ element. If the function returns FALSE, the traversal stops. The
++ INFO argument is passed to the function. */
++extern void bfd_hash_traverse
++ (struct bfd_hash_table *,
++ bfd_boolean (*) (struct bfd_hash_entry *, void *),
++ void *info);
++
++/* Allows the default size of a hash table to be configured. New hash
++ tables allocated using bfd_hash_table_init will be created with
++ this size. */
++extern void bfd_hash_set_default_size (bfd_size_type);
++
++/* This structure is used to keep track of stabs in sections
++ information while linking. */
++
++struct stab_info
++{
++ /* A hash table used to hold stabs strings. */
++ struct bfd_strtab_hash *strings;
++ /* The header file hash table. */
++ struct bfd_hash_table includes;
++ /* The first .stabstr section. */
++ struct bfd_section *stabstr;
++};
++
++#define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table
++
++/* User program access to BFD facilities. */
++
++/* Direct I/O routines, for programs which know more about the object
++ file than BFD does. Use higher level routines if possible. */
++
++extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *);
++extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *);
++extern int bfd_seek (bfd *, file_ptr, int);
++extern file_ptr bfd_tell (bfd *);
++extern int bfd_flush (bfd *);
++extern int bfd_stat (bfd *, struct stat *);
++
++/* Deprecated old routines. */
++#if __GNUC__
++#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
++ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \
++ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
++#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
++ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \
++ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
++#else
++#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
++ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \
++ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
++#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
++ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\
++ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
++#endif
++extern void warn_deprecated (const char *, const char *, int, const char *);
++
++/* Cast from const char * to char * so that caller can assign to
++ a char * without a warning. */
++#define bfd_get_filename(abfd) ((char *) (abfd)->filename)
++#define bfd_get_cacheable(abfd) ((abfd)->cacheable)
++#define bfd_get_format(abfd) ((abfd)->format)
++#define bfd_get_target(abfd) ((abfd)->xvec->name)
++#define bfd_get_flavour(abfd) ((abfd)->xvec->flavour)
++#define bfd_family_coff(abfd) \
++ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \
++ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour)
++#define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
++#define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE)
++#define bfd_header_big_endian(abfd) \
++ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG)
++#define bfd_header_little_endian(abfd) \
++ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE)
++#define bfd_get_file_flags(abfd) ((abfd)->flags)
++#define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags)
++#define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags)
++#define bfd_my_archive(abfd) ((abfd)->my_archive)
++#define bfd_has_map(abfd) ((abfd)->has_armap)
++
++#define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types)
++#define bfd_usrdata(abfd) ((abfd)->usrdata)
++
++#define bfd_get_start_address(abfd) ((abfd)->start_address)
++#define bfd_get_symcount(abfd) ((abfd)->symcount)
++#define bfd_get_outsymbols(abfd) ((abfd)->outsymbols)
++#define bfd_count_sections(abfd) ((abfd)->section_count)
++
++#define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount)
++
++#define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char)
++
++#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE)
++
++extern bfd_boolean bfd_cache_close
++ (bfd *abfd);
++/* NB: This declaration should match the autogenerated one in libbfd.h. */
++
++extern bfd_boolean bfd_cache_close_all (void);
++
++extern bfd_boolean bfd_record_phdr
++ (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma,
++ bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **);
++
++/* Byte swapping routines. */
++
++bfd_uint64_t bfd_getb64 (const void *);
++bfd_uint64_t bfd_getl64 (const void *);
++bfd_int64_t bfd_getb_signed_64 (const void *);
++bfd_int64_t bfd_getl_signed_64 (const void *);
++bfd_vma bfd_getb32 (const void *);
++bfd_vma bfd_getl32 (const void *);
++bfd_signed_vma bfd_getb_signed_32 (const void *);
++bfd_signed_vma bfd_getl_signed_32 (const void *);
++bfd_vma bfd_getb16 (const void *);
++bfd_vma bfd_getl16 (const void *);
++bfd_signed_vma bfd_getb_signed_16 (const void *);
++bfd_signed_vma bfd_getl_signed_16 (const void *);
++void bfd_putb64 (bfd_uint64_t, void *);
++void bfd_putl64 (bfd_uint64_t, void *);
++void bfd_putb32 (bfd_vma, void *);
++void bfd_putl32 (bfd_vma, void *);
++void bfd_putb16 (bfd_vma, void *);
++void bfd_putl16 (bfd_vma, void *);
++
++/* Byte swapping routines which take size and endiannes as arguments. */
++
++bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean);
++void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean);
++
++extern bfd_boolean bfd_section_already_linked_table_init (void);
++extern void bfd_section_already_linked_table_free (void);
++\f
++/* Externally visible ECOFF routines. */
++
++#if defined(__STDC__) || defined(ALMOST_STDC)
++struct ecoff_debug_info;
++struct ecoff_debug_swap;
++struct ecoff_extr;
++struct bfd_symbol;
++struct bfd_link_info;
++struct bfd_link_hash_entry;
++struct bfd_elf_version_tree;
++#endif
++extern bfd_vma bfd_ecoff_get_gp_value
++ (bfd * abfd);
++extern bfd_boolean bfd_ecoff_set_gp_value
++ (bfd *abfd, bfd_vma gp_value);
++extern bfd_boolean bfd_ecoff_set_regmasks
++ (bfd *abfd, unsigned long gprmask, unsigned long fprmask,
++ unsigned long *cprmask);
++extern void *bfd_ecoff_debug_init
++ (bfd *output_bfd, struct ecoff_debug_info *output_debug,
++ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
++extern void bfd_ecoff_debug_free
++ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
++ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
++extern bfd_boolean bfd_ecoff_debug_accumulate
++ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
++ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
++ struct ecoff_debug_info *input_debug,
++ const struct ecoff_debug_swap *input_swap, struct bfd_link_info *);
++extern bfd_boolean bfd_ecoff_debug_accumulate_other
++ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
++ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
++ struct bfd_link_info *);
++extern bfd_boolean bfd_ecoff_debug_externals
++ (bfd *abfd, struct ecoff_debug_info *debug,
++ const struct ecoff_debug_swap *swap, bfd_boolean relocatable,
++ bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *),
++ void (*set_index) (struct bfd_symbol *, bfd_size_type));
++extern bfd_boolean bfd_ecoff_debug_one_external
++ (bfd *abfd, struct ecoff_debug_info *debug,
++ const struct ecoff_debug_swap *swap, const char *name,
++ struct ecoff_extr *esym);
++extern bfd_size_type bfd_ecoff_debug_size
++ (bfd *abfd, struct ecoff_debug_info *debug,
++ const struct ecoff_debug_swap *swap);
++extern bfd_boolean bfd_ecoff_write_debug
++ (bfd *abfd, struct ecoff_debug_info *debug,
++ const struct ecoff_debug_swap *swap, file_ptr where);
++extern bfd_boolean bfd_ecoff_write_accumulated_debug
++ (void *handle, bfd *abfd, struct ecoff_debug_info *debug,
++ const struct ecoff_debug_swap *swap,
++ struct bfd_link_info *info, file_ptr where);
++
++/* Externally visible ELF routines. */
++
++struct bfd_link_needed_list
++{
++ struct bfd_link_needed_list *next;
++ bfd *by;
++ const char *name;
++};
++
++enum dynamic_lib_link_class {
++ DYN_NORMAL = 0,
++ DYN_AS_NEEDED = 1,
++ DYN_DT_NEEDED = 2,
++ DYN_NO_ADD_NEEDED = 4,
++ DYN_NO_NEEDED = 8
++};
++
++extern bfd_boolean bfd_elf_record_link_assignment
++ (struct bfd_link_info *, const char *, bfd_boolean);
++extern struct bfd_link_needed_list *bfd_elf_get_needed_list
++ (bfd *, struct bfd_link_info *);
++extern bfd_boolean bfd_elf_get_bfd_needed_list
++ (bfd *, struct bfd_link_needed_list **);
++extern bfd_boolean bfd_elf_size_dynamic_sections
++ (bfd *, const char *, const char *, const char *, const char * const *,
++ struct bfd_link_info *, struct bfd_section **,
++ struct bfd_elf_version_tree *);
++extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr
++ (bfd *, struct bfd_link_info *);
++extern void bfd_elf_set_dt_needed_name
++ (bfd *, const char *);
++extern const char *bfd_elf_get_dt_soname
++ (bfd *);
++extern void bfd_elf_set_dyn_lib_class
++ (bfd *, int);
++extern int bfd_elf_get_dyn_lib_class
++ (bfd *);
++extern struct bfd_link_needed_list *bfd_elf_get_runpath_list
++ (bfd *, struct bfd_link_info *);
++extern bfd_boolean bfd_elf_discard_info
++ (bfd *, struct bfd_link_info *);
++extern unsigned int _bfd_elf_default_action_discarded
++ (struct bfd_section *);
++
++/* Return an upper bound on the number of bytes required to store a
++ copy of ABFD's program header table entries. Return -1 if an error
++ occurs; bfd_get_error will return an appropriate code. */
++extern long bfd_get_elf_phdr_upper_bound
++ (bfd *abfd);
++
++/* Copy ABFD's program header table entries to *PHDRS. The entries
++ will be stored as an array of Elf_Internal_Phdr structures, as
++ defined in include/elf/internal.h. To find out how large the
++ buffer needs to be, call bfd_get_elf_phdr_upper_bound.
++
++ Return the number of program header table entries read, or -1 if an
++ error occurs; bfd_get_error will return an appropriate code. */
++extern int bfd_get_elf_phdrs
++ (bfd *abfd, void *phdrs);
++
++/* Create a new BFD as if by bfd_openr. Rather than opening a file,
++ reconstruct an ELF file by reading the segments out of remote memory
++ based on the ELF file header at EHDR_VMA and the ELF program headers it
++ points to. If not null, *LOADBASEP is filled in with the difference
++ between the VMAs from which the segments were read, and the VMAs the
++ file headers (and hence BFD's idea of each section's VMA) put them at.
++
++ The function TARGET_READ_MEMORY is called to copy LEN bytes from the
++ remote memory at target address VMA into the local buffer at MYADDR; it
++ should return zero on success or an `errno' code on failure. TEMPL must
++ be a BFD for an ELF target with the word size and byte order found in
++ the remote memory. */
++extern bfd *bfd_elf_bfd_from_remote_memory
++ (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep,
++ int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr, int len));
++
++/* Return the arch_size field of an elf bfd, or -1 if not elf. */
++extern int bfd_get_arch_size
++ (bfd *);
++
++/* Return TRUE if address "naturally" sign extends, or -1 if not elf. */
++extern int bfd_get_sign_extend_vma
++ (bfd *);
++
++extern struct bfd_section *_bfd_elf_tls_setup
++ (bfd *, struct bfd_link_info *);
++
++extern void _bfd_elf_provide_symbol
++ (struct bfd_link_info *, const char *, bfd_vma, struct bfd_section *);
++
++extern void _bfd_elf_provide_section_bound_symbols
++ (struct bfd_link_info *, struct bfd_section *, const char *, const char *);
++
++extern void _bfd_elf_fix_excluded_sec_syms
++ (bfd *, struct bfd_link_info *);
++
++extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs
++ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *,
++ char **);
++
++/* SunOS shared library support routines for the linker. */
++
++extern struct bfd_link_needed_list *bfd_sunos_get_needed_list
++ (bfd *, struct bfd_link_info *);
++extern bfd_boolean bfd_sunos_record_link_assignment
++ (bfd *, struct bfd_link_info *, const char *);
++extern bfd_boolean bfd_sunos_size_dynamic_sections
++ (bfd *, struct bfd_link_info *, struct bfd_section **,
++ struct bfd_section **, struct bfd_section **);
++
++/* Linux shared library support routines for the linker. */
++
++extern bfd_boolean bfd_i386linux_size_dynamic_sections
++ (bfd *, struct bfd_link_info *);
++extern bfd_boolean bfd_m68klinux_size_dynamic_sections
++ (bfd *, struct bfd_link_info *);
++extern bfd_boolean bfd_sparclinux_size_dynamic_sections
++ (bfd *, struct bfd_link_info *);
++
++/* mmap hacks */
++
++struct _bfd_window_internal;
++typedef struct _bfd_window_internal bfd_window_internal;
++
++typedef struct _bfd_window
++{
++ /* What the user asked for. */
++ void *data;
++ bfd_size_type size;
++ /* The actual window used by BFD. Small user-requested read-only
++ regions sharing a page may share a single window into the object
++ file. Read-write versions shouldn't until I've fixed things to
++ keep track of which portions have been claimed by the
++ application; don't want to give the same region back when the
++ application wants two writable copies! */
++ struct _bfd_window_internal *i;
++}
++bfd_window;
++
++extern void bfd_init_window
++ (bfd_window *);
++extern void bfd_free_window
++ (bfd_window *);
++extern bfd_boolean bfd_get_file_window
++ (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean);
++
++/* XCOFF support routines for the linker. */
++
++extern bfd_boolean bfd_xcoff_link_record_set
++ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type);
++extern bfd_boolean bfd_xcoff_import_symbol
++ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma,
++ const char *, const char *, const char *, unsigned int);
++extern bfd_boolean bfd_xcoff_export_symbol
++ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *);
++extern bfd_boolean bfd_xcoff_link_count_reloc
++ (bfd *, struct bfd_link_info *, const char *);
++extern bfd_boolean bfd_xcoff_record_link_assignment
++ (bfd *, struct bfd_link_info *, const char *);
++extern bfd_boolean bfd_xcoff_size_dynamic_sections
++ (bfd *, struct bfd_link_info *, const char *, const char *,
++ unsigned long, unsigned long, unsigned long, bfd_boolean,
++ int, bfd_boolean, bfd_boolean, struct bfd_section **, bfd_boolean);
++extern bfd_boolean bfd_xcoff_link_generate_rtinit
++ (bfd *, const char *, const char *, bfd_boolean);
++
++/* XCOFF support routines for ar. */
++extern bfd_boolean bfd_xcoff_ar_archive_set_magic
++ (bfd *, char *);
++
++/* Externally visible COFF routines. */
++
++#if defined(__STDC__) || defined(ALMOST_STDC)
++struct internal_syment;
++union internal_auxent;
++#endif
++
++extern bfd_boolean bfd_coff_get_syment
++ (bfd *, struct bfd_symbol *, struct internal_syment *);
++
++extern bfd_boolean bfd_coff_get_auxent
++ (bfd *, struct bfd_symbol *, int, union internal_auxent *);
++
++extern bfd_boolean bfd_coff_set_symbol_class
++ (bfd *, struct bfd_symbol *, unsigned int);
++
++extern bfd_boolean bfd_m68k_coff_create_embedded_relocs
++ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **);
++
++/* ARM Interworking support. Called from linker. */
++extern bfd_boolean bfd_arm_allocate_interworking_sections
++ (struct bfd_link_info *);
++
++extern bfd_boolean bfd_arm_process_before_allocation
++ (bfd *, struct bfd_link_info *, int);
++
++extern bfd_boolean bfd_arm_get_bfd_for_interworking
++ (bfd *, struct bfd_link_info *);
++
++/* PE ARM Interworking support. Called from linker. */
++extern bfd_boolean bfd_arm_pe_allocate_interworking_sections
++ (struct bfd_link_info *);
++
++extern bfd_boolean bfd_arm_pe_process_before_allocation
++ (bfd *, struct bfd_link_info *, int);
++
++extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking
++ (bfd *, struct bfd_link_info *);
++
++/* ELF ARM Interworking support. Called from linker. */
++extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections
++ (struct bfd_link_info *);
++
++extern bfd_boolean bfd_elf32_arm_process_before_allocation
++ (bfd *, struct bfd_link_info *, int);
++
++void bfd_elf32_arm_set_target_relocs
++ (struct bfd_link_info *, int, char *, int, int);
++
++extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking
++ (bfd *, struct bfd_link_info *);
++
++extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd
++ (bfd *, struct bfd_link_info *);
++
++/* ELF ARM mapping symbol support */
++extern bfd_boolean bfd_is_arm_mapping_symbol_name
++ (const char * name);
++
++/* ARM Note section processing. */
++extern bfd_boolean bfd_arm_merge_machines
++ (bfd *, bfd *);
++
++extern bfd_boolean bfd_arm_update_notes
++ (bfd *, const char *);
++
++extern unsigned int bfd_arm_get_mach_from_notes
++ (bfd *, const char *);
++
++/* TI COFF load page support. */
++extern void bfd_ticoff_set_section_load_page
++ (struct bfd_section *, int);
++
++extern int bfd_ticoff_get_section_load_page
++ (struct bfd_section *);
++
++/* H8/300 functions. */
++extern bfd_vma bfd_h8300_pad_address
++ (bfd *, bfd_vma);
++
++/* IA64 Itanium code generation. Called from linker. */
++extern void bfd_elf32_ia64_after_parse
++ (int);
++
++extern void bfd_elf64_ia64_after_parse
++ (int);
++
++/* This structure is used for a comdat section, as in PE. A comdat
++ section is associated with a particular symbol. When the linker
++ sees a comdat section, it keeps only one of the sections with a
++ given name and associated with a given symbol. */
++
++struct coff_comdat_info
++{
++ /* The name of the symbol associated with a comdat section. */
++ const char *name;
++
++ /* The local symbol table index of the symbol associated with a
++ comdat section. This is only meaningful to the object file format
++ specific code; it is not an index into the list returned by
++ bfd_canonicalize_symtab. */
++ long symbol;
++};
++
++extern struct coff_comdat_info *bfd_coff_get_comdat_section
++ (bfd *, struct bfd_section *);
++
++/* Extracted from init.c. */
++void bfd_init (void);
++
++/* Extracted from opncls.c. */
++bfd *bfd_fopen (const char *filename, const char *target,
++ const char *mode, int fd);
++
++bfd *bfd_openr (const char *filename, const char *target);
++
++bfd *bfd_fdopenr (const char *filename, const char *target, int fd);
++
++bfd *bfd_openstreamr (const char *, const char *, void *);
++
++bfd *bfd_openr_iovec (const char *filename, const char *target,
++ void *(*open) (struct bfd *nbfd,
++ void *open_closure),
++ void *open_closure,
++ file_ptr (*pread) (struct bfd *nbfd,
++ void *stream,
++ void *buf,
++ file_ptr nbytes,
++ file_ptr offset),
++ int (*close) (struct bfd *nbfd,
++ void *stream));
++
++bfd *bfd_openw (const char *filename, const char *target);
++
++bfd_boolean bfd_close (bfd *abfd);
++
++bfd_boolean bfd_close_all_done (bfd *);
++
++bfd *bfd_create (const char *filename, bfd *templ);
++
++bfd_boolean bfd_make_writable (bfd *abfd);
++
++bfd_boolean bfd_make_readable (bfd *abfd);
++
++unsigned long bfd_calc_gnu_debuglink_crc32
++ (unsigned long crc, const unsigned char *buf, bfd_size_type len);
++
++char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir);
++
++struct bfd_section *bfd_create_gnu_debuglink_section
++ (bfd *abfd, const char *filename);
++
++bfd_boolean bfd_fill_in_gnu_debuglink_section
++ (bfd *abfd, struct bfd_section *sect, const char *filename);
++
++/* Extracted from libbfd.c. */
++
++/* Byte swapping macros for user section data. */
++
++#define bfd_put_8(abfd, val, ptr) \
++ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff))
++#define bfd_put_signed_8 \
++ bfd_put_8
++#define bfd_get_8(abfd, ptr) \
++ (*(unsigned char *) (ptr) & 0xff)
++#define bfd_get_signed_8(abfd, ptr) \
++ (((*(unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80)
++
++#define bfd_put_16(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_putx16, ((val),(ptr)))
++#define bfd_put_signed_16 \
++ bfd_put_16
++#define bfd_get_16(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx16, (ptr))
++#define bfd_get_signed_16(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx_signed_16, (ptr))
++
++#define bfd_put_32(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_putx32, ((val),(ptr)))
++#define bfd_put_signed_32 \
++ bfd_put_32
++#define bfd_get_32(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx32, (ptr))
++#define bfd_get_signed_32(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx_signed_32, (ptr))
++
++#define bfd_put_64(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_putx64, ((val), (ptr)))
++#define bfd_put_signed_64 \
++ bfd_put_64
++#define bfd_get_64(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx64, (ptr))
++#define bfd_get_signed_64(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx_signed_64, (ptr))
++
++#define bfd_get(bits, abfd, ptr) \
++ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \
++ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \
++ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \
++ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \
++ : (abort (), (bfd_vma) - 1))
++
++#define bfd_put(bits, abfd, val, ptr) \
++ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \
++ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \
++ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \
++ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \
++ : (abort (), (void) 0))
++
++
++/* Byte swapping macros for file header data. */
++
++#define bfd_h_put_8(abfd, val, ptr) \
++ bfd_put_8 (abfd, val, ptr)
++#define bfd_h_put_signed_8(abfd, val, ptr) \
++ bfd_put_8 (abfd, val, ptr)
++#define bfd_h_get_8(abfd, ptr) \
++ bfd_get_8 (abfd, ptr)
++#define bfd_h_get_signed_8(abfd, ptr) \
++ bfd_get_signed_8 (abfd, ptr)
++
++#define bfd_h_put_16(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_h_putx16, (val, ptr))
++#define bfd_h_put_signed_16 \
++ bfd_h_put_16
++#define bfd_h_get_16(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx16, (ptr))
++#define bfd_h_get_signed_16(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr))
++
++#define bfd_h_put_32(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_h_putx32, (val, ptr))
++#define bfd_h_put_signed_32 \
++ bfd_h_put_32
++#define bfd_h_get_32(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx32, (ptr))
++#define bfd_h_get_signed_32(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr))
++
++#define bfd_h_put_64(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_h_putx64, (val, ptr))
++#define bfd_h_put_signed_64 \
++ bfd_h_put_64
++#define bfd_h_get_64(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx64, (ptr))
++#define bfd_h_get_signed_64(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr))
++
++/* Aliases for the above, which should eventually go away. */
++
++#define H_PUT_64 bfd_h_put_64
++#define H_PUT_32 bfd_h_put_32
++#define H_PUT_16 bfd_h_put_16
++#define H_PUT_8 bfd_h_put_8
++#define H_PUT_S64 bfd_h_put_signed_64
++#define H_PUT_S32 bfd_h_put_signed_32
++#define H_PUT_S16 bfd_h_put_signed_16
++#define H_PUT_S8 bfd_h_put_signed_8
++#define H_GET_64 bfd_h_get_64
++#define H_GET_32 bfd_h_get_32
++#define H_GET_16 bfd_h_get_16
++#define H_GET_8 bfd_h_get_8
++#define H_GET_S64 bfd_h_get_signed_64
++#define H_GET_S32 bfd_h_get_signed_32
++#define H_GET_S16 bfd_h_get_signed_16
++#define H_GET_S8 bfd_h_get_signed_8
++
++
++/* Extracted from bfdio.c. */
++long bfd_get_mtime (bfd *abfd);
++
++long bfd_get_size (bfd *abfd);
++
++/* Extracted from bfdwin.c. */
++/* Extracted from section.c. */
++typedef struct bfd_section
++{
++ /* The name of the section; the name isn't a copy, the pointer is
++ the same as that passed to bfd_make_section. */
++ const char *name;
++
++ /* A unique sequence number. */
++ int id;
++
++ /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */
++ int index;
++
++ /* The next section in the list belonging to the BFD, or NULL. */
++ struct bfd_section *next;
++
++ /* The previous section in the list belonging to the BFD, or NULL. */
++ struct bfd_section *prev;
++
++ /* The field flags contains attributes of the section. Some
++ flags are read in from the object file, and some are
++ synthesized from other information. */
++ flagword flags;
++
++#define SEC_NO_FLAGS 0x000
++
++ /* Tells the OS to allocate space for this section when loading.
++ This is clear for a section containing debug information only. */
++#define SEC_ALLOC 0x001
++
++ /* Tells the OS to load the section from the file when loading.
++ This is clear for a .bss section. */
++#define SEC_LOAD 0x002
++
++ /* The section contains data still to be relocated, so there is
++ some relocation information too. */
++#define SEC_RELOC 0x004
++
++ /* A signal to the OS that the section contains read only data. */
++#define SEC_READONLY 0x008
++
++ /* The section contains code only. */
++#define SEC_CODE 0x010
++
++ /* The section contains data only. */
++#define SEC_DATA 0x020
++
++ /* The section will reside in ROM. */
++#define SEC_ROM 0x040
++
++ /* The section contains constructor information. This section
++ type is used by the linker to create lists of constructors and
++ destructors used by <<g++>>. When a back end sees a symbol
++ which should be used in a constructor list, it creates a new
++ section for the type of name (e.g., <<__CTOR_LIST__>>), attaches
++ the symbol to it, and builds a relocation. To build the lists
++ of constructors, all the linker has to do is catenate all the
++ sections called <<__CTOR_LIST__>> and relocate the data
++ contained within - exactly the operations it would peform on
++ standard data. */
++#define SEC_CONSTRUCTOR 0x080
++
++ /* The section has contents - a data section could be
++ <<SEC_ALLOC>> | <<SEC_HAS_CONTENTS>>; a debug section could be
++ <<SEC_HAS_CONTENTS>> */
++#define SEC_HAS_CONTENTS 0x100
++
++ /* An instruction to the linker to not output the section
++ even if it has information which would normally be written. */
++#define SEC_NEVER_LOAD 0x200
++
++ /* The section contains thread local data. */
++#define SEC_THREAD_LOCAL 0x400
++
++ /* The section has GOT references. This flag is only for the
++ linker, and is currently only used by the elf32-hppa back end.
++ It will be set if global offset table references were detected
++ in this section, which indicate to the linker that the section
++ contains PIC code, and must be handled specially when doing a
++ static link. */
++#define SEC_HAS_GOT_REF 0x800
++
++ /* The section contains common symbols (symbols may be defined
++ multiple times, the value of a symbol is the amount of
++ space it requires, and the largest symbol value is the one
++ used). Most targets have exactly one of these (which we
++ translate to bfd_com_section_ptr), but ECOFF has two. */
++#define SEC_IS_COMMON 0x1000
++
++ /* The section contains only debugging information. For
++ example, this is set for ELF .debug and .stab sections.
++ strip tests this flag to see if a section can be
++ discarded. */
++#define SEC_DEBUGGING 0x2000
++
++ /* The contents of this section are held in memory pointed to
++ by the contents field. This is checked by bfd_get_section_contents,
++ and the data is retrieved from memory if appropriate. */
++#define SEC_IN_MEMORY 0x4000
++
++ /* The contents of this section are to be excluded by the
++ linker for executable and shared objects unless those
++ objects are to be further relocated. */
++#define SEC_EXCLUDE 0x8000
++
++ /* The contents of this section are to be sorted based on the sum of
++ the symbol and addend values specified by the associated relocation
++ entries. Entries without associated relocation entries will be
++ appended to the end of the section in an unspecified order. */
++#define SEC_SORT_ENTRIES 0x10000
++
++ /* When linking, duplicate sections of the same name should be
++ discarded, rather than being combined into a single section as
++ is usually done. This is similar to how common symbols are
++ handled. See SEC_LINK_DUPLICATES below. */
++#define SEC_LINK_ONCE 0x20000
++
++ /* If SEC_LINK_ONCE is set, this bitfield describes how the linker
++ should handle duplicate sections. */
++#define SEC_LINK_DUPLICATES 0x40000
++
++ /* This value for SEC_LINK_DUPLICATES means that duplicate
++ sections with the same name should simply be discarded. */
++#define SEC_LINK_DUPLICATES_DISCARD 0x0
++
++ /* This value for SEC_LINK_DUPLICATES means that the linker
++ should warn if there are any duplicate sections, although
++ it should still only link one copy. */
++#define SEC_LINK_DUPLICATES_ONE_ONLY 0x80000
++
++ /* This value for SEC_LINK_DUPLICATES means that the linker
++ should warn if any duplicate sections are a different size. */
++#define SEC_LINK_DUPLICATES_SAME_SIZE 0x100000
++
++ /* This value for SEC_LINK_DUPLICATES means that the linker
++ should warn if any duplicate sections contain different
++ contents. */
++#define SEC_LINK_DUPLICATES_SAME_CONTENTS \
++ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE)
++
++ /* This section was created by the linker as part of dynamic
++ relocation or other arcane processing. It is skipped when
++ going through the first-pass output, trusting that someone
++ else up the line will take care of it later. */
++#define SEC_LINKER_CREATED 0x200000
++
++ /* This section should not be subject to garbage collection. */
++#define SEC_KEEP 0x400000
++
++ /* This section contains "short" data, and should be placed
++ "near" the GP. */
++#define SEC_SMALL_DATA 0x800000
++
++ /* Attempt to merge identical entities in the section.
++ Entity size is given in the entsize field. */
++#define SEC_MERGE 0x1000000
++
++ /* If given with SEC_MERGE, entities to merge are zero terminated
++ strings where entsize specifies character size instead of fixed
++ size entries. */
++#define SEC_STRINGS 0x2000000
++
++ /* This section contains data about section groups. */
++#define SEC_GROUP 0x4000000
++
++ /* The section is a COFF shared library section. This flag is
++ only for the linker. If this type of section appears in
++ the input file, the linker must copy it to the output file
++ without changing the vma or size. FIXME: Although this
++ was originally intended to be general, it really is COFF
++ specific (and the flag was renamed to indicate this). It
++ might be cleaner to have some more general mechanism to
++ allow the back end to control what the linker does with
++ sections. */
++#define SEC_COFF_SHARED_LIBRARY 0x10000000
++
++ /* This section contains data which may be shared with other
++ executables or shared objects. This is for COFF only. */
++#define SEC_COFF_SHARED 0x20000000
++
++ /* When a section with this flag is being linked, then if the size of
++ the input section is less than a page, it should not cross a page
++ boundary. If the size of the input section is one page or more,
++ it should be aligned on a page boundary. This is for TI
++ TMS320C54X only. */
++#define SEC_TIC54X_BLOCK 0x40000000
++
++ /* Conditionally link this section; do not link if there are no
++ references found to any symbol in the section. This is for TI
++ TMS320C54X only. */
++#define SEC_TIC54X_CLINK 0x80000000
++
++ /* End of section flags. */
++
++ /* Some internal packed boolean fields. */
++
++ /* See the vma field. */
++ unsigned int user_set_vma : 1;
++
++ /* A mark flag used by some of the linker backends. */
++ unsigned int linker_mark : 1;
++
++ /* Another mark flag used by some of the linker backends. Set for
++ output sections that have an input section. */
++ unsigned int linker_has_input : 1;
++
++ /* Mark flags used by some linker backends for garbage collection. */
++ unsigned int gc_mark : 1;
++ unsigned int gc_mark_from_eh : 1;
++
++ /* The following flags are used by the ELF linker. */
++
++ /* Mark sections which have been allocated to segments. */
++ unsigned int segment_mark : 1;
++
++ /* Type of sec_info information. */
++ unsigned int sec_info_type:3;
++#define ELF_INFO_TYPE_NONE 0
++#define ELF_INFO_TYPE_STABS 1
++#define ELF_INFO_TYPE_MERGE 2
++#define ELF_INFO_TYPE_EH_FRAME 3
++#define ELF_INFO_TYPE_JUST_SYMS 4
++
++ /* Nonzero if this section uses RELA relocations, rather than REL. */
++ unsigned int use_rela_p:1;
++
++ /* Bits used by various backends. The generic code doesn't touch
++ these fields. */
++
++ /* Nonzero if this section has TLS related relocations. */
++ unsigned int has_tls_reloc:1;
++
++ /* Nonzero if this section has a gp reloc. */
++ unsigned int has_gp_reloc:1;
++
++ /* Nonzero if this section needs the relax finalize pass. */
++ unsigned int need_finalize_relax:1;
++
++ /* Whether relocations have been processed. */
++ unsigned int reloc_done : 1;
++
++ /* End of internal packed boolean fields. */
++
++ /* The virtual memory address of the section - where it will be
++ at run time. The symbols are relocated against this. The
++ user_set_vma flag is maintained by bfd; if it's not set, the
++ backend can assign addresses (for example, in <<a.out>>, where
++ the default address for <<.data>> is dependent on the specific
++ target and various flags). */
++ bfd_vma vma;
++
++ /* The load address of the section - where it would be in a
++ rom image; really only used for writing section header
++ information. */
++ bfd_vma lma;
++
++ /* The size of the section in octets, as it will be output.
++ Contains a value even if the section has no contents (e.g., the
++ size of <<.bss>>). */
++ bfd_size_type size;
++
++ /* For input sections, the original size on disk of the section, in
++ octets. This field is used by the linker relaxation code. It is
++ currently only set for sections where the linker relaxation scheme
++ doesn't cache altered section and reloc contents (stabs, eh_frame,
++ SEC_MERGE, some coff relaxing targets), and thus the original size
++ needs to be kept to read the section multiple times.
++ For output sections, rawsize holds the section size calculated on
++ a previous linker relaxation pass. */
++ bfd_size_type rawsize;
++
++ /* If this section is going to be output, then this value is the
++ offset in *bytes* into the output section of the first byte in the
++ input section (byte ==> smallest addressable unit on the
++ target). In most cases, if this was going to start at the
++ 100th octet (8-bit quantity) in the output section, this value
++ would be 100. However, if the target byte size is 16 bits
++ (bfd_octets_per_byte is "2"), this value would be 50. */
++ bfd_vma output_offset;
++
++ /* The output section through which to map on output. */
++ struct bfd_section *output_section;
++
++ /* The alignment requirement of the section, as an exponent of 2 -
++ e.g., 3 aligns to 2^3 (or 8). */
++ unsigned int alignment_power;
++
++ /* If an input section, a pointer to a vector of relocation
++ records for the data in this section. */
++ struct reloc_cache_entry *relocation;
++
++ /* If an output section, a pointer to a vector of pointers to
++ relocation records for the data in this section. */
++ struct reloc_cache_entry **orelocation;
++
++ /* The number of relocation records in one of the above. */
++ unsigned reloc_count;
++
++ /* Information below is back end specific - and not always used
++ or updated. */
++
++ /* File position of section data. */
++ file_ptr filepos;
++
++ /* File position of relocation info. */
++ file_ptr rel_filepos;
++
++ /* File position of line data. */
++ file_ptr line_filepos;
++
++ /* Pointer to data for applications. */
++ void *userdata;
++
++ /* If the SEC_IN_MEMORY flag is set, this points to the actual
++ contents. */
++ unsigned char *contents;
++
++ /* Attached line number information. */
++ alent *lineno;
++
++ /* Number of line number records. */
++ unsigned int lineno_count;
++
++ /* Entity size for merging purposes. */
++ unsigned int entsize;
++
++ /* Points to the kept section if this section is a link-once section,
++ and is discarded. */
++ struct bfd_section *kept_section;
++
++ /* When a section is being output, this value changes as more
++ linenumbers are written out. */
++ file_ptr moving_line_filepos;
++
++ /* What the section number is in the target world. */
++ int target_index;
++
++ void *used_by_bfd;
++
++ /* If this is a constructor section then here is a list of the
++ relocations created to relocate items within it. */
++ struct relent_chain *constructor_chain;
++
++ /* The BFD which owns the section. */
++ bfd *owner;
++
++ /* A symbol which points at this section only. */
++ struct bfd_symbol *symbol;
++ struct bfd_symbol **symbol_ptr_ptr;
++
++ /* Early in the link process, map_head and map_tail are used to build
++ a list of input sections attached to an output section. Later,
++ output sections use these fields for a list of bfd_link_order
++ structs. */
++ union {
++ struct bfd_link_order *link_order;
++ struct bfd_section *s;
++ } map_head, map_tail;
++} asection;
++
++/* These sections are global, and are managed by BFD. The application
++ and target back end are not permitted to change the values in
++ these sections. New code should use the section_ptr macros rather
++ than referring directly to the const sections. The const sections
++ may eventually vanish. */
++#define BFD_ABS_SECTION_NAME "*ABS*"
++#define BFD_UND_SECTION_NAME "*UND*"
++#define BFD_COM_SECTION_NAME "*COM*"
++#define BFD_IND_SECTION_NAME "*IND*"
++
++/* The absolute section. */
++extern asection bfd_abs_section;
++#define bfd_abs_section_ptr ((asection *) &bfd_abs_section)
++#define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr)
++/* Pointer to the undefined section. */
++extern asection bfd_und_section;
++#define bfd_und_section_ptr ((asection *) &bfd_und_section)
++#define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr)
++/* Pointer to the common section. */
++extern asection bfd_com_section;
++#define bfd_com_section_ptr ((asection *) &bfd_com_section)
++/* Pointer to the indirect section. */
++extern asection bfd_ind_section;
++#define bfd_ind_section_ptr ((asection *) &bfd_ind_section)
++#define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr)
++
++#define bfd_is_const_section(SEC) \
++ ( ((SEC) == bfd_abs_section_ptr) \
++ || ((SEC) == bfd_und_section_ptr) \
++ || ((SEC) == bfd_com_section_ptr) \
++ || ((SEC) == bfd_ind_section_ptr))
++
++extern const struct bfd_symbol * const bfd_abs_symbol;
++extern const struct bfd_symbol * const bfd_com_symbol;
++extern const struct bfd_symbol * const bfd_und_symbol;
++extern const struct bfd_symbol * const bfd_ind_symbol;
++
++/* Macros to handle insertion and deletion of a bfd's sections. These
++ only handle the list pointers, ie. do not adjust section_count,
++ target_index etc. */
++#define bfd_section_list_remove(ABFD, S) \
++ do \
++ { \
++ asection *_s = S; \
++ asection *_next = _s->next; \
++ asection *_prev = _s->prev; \
++ if (_prev) \
++ _prev->next = _next; \
++ else \
++ (ABFD)->sections = _next; \
++ if (_next) \
++ _next->prev = _prev; \
++ else \
++ (ABFD)->section_last = _prev; \
++ } \
++ while (0)
++#define bfd_section_list_append(ABFD, S) \
++ do \
++ { \
++ asection *_s = S; \
++ bfd *_abfd = ABFD; \
++ _s->next = NULL; \
++ if (_abfd->section_last) \
++ { \
++ _s->prev = _abfd->section_last; \
++ _abfd->section_last->next = _s; \
++ } \
++ else \
++ { \
++ _s->prev = NULL; \
++ _abfd->sections = _s; \
++ } \
++ _abfd->section_last = _s; \
++ } \
++ while (0)
++#define bfd_section_list_prepend(ABFD, S) \
++ do \
++ { \
++ asection *_s = S; \
++ bfd *_abfd = ABFD; \
++ _s->prev = NULL; \
++ if (_abfd->sections) \
++ { \
++ _s->next = _abfd->sections; \
++ _abfd->sections->prev = _s; \
++ } \
++ else \
++ { \
++ _s->next = NULL; \
++ _abfd->section_last = _s; \
++ } \
++ _abfd->sections = _s; \
++ } \
++ while (0)
++#define bfd_section_list_insert_after(ABFD, A, S) \
++ do \
++ { \
++ asection *_a = A; \
++ asection *_s = S; \
++ asection *_next = _a->next; \
++ _s->next = _next; \
++ _s->prev = _a; \
++ _a->next = _s; \
++ if (_next) \
++ _next->prev = _s; \
++ else \
++ (ABFD)->section_last = _s; \
++ } \
++ while (0)
++#define bfd_section_list_insert_before(ABFD, B, S) \
++ do \
++ { \
++ asection *_b = B; \
++ asection *_s = S; \
++ asection *_prev = _b->prev; \
++ _s->prev = _prev; \
++ _s->next = _b; \
++ _b->prev = _s; \
++ if (_prev) \
++ _prev->next = _s; \
++ else \
++ (ABFD)->sections = _s; \
++ } \
++ while (0)
++#define bfd_section_removed_from_list(ABFD, S) \
++ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S))
++
++void bfd_section_list_clear (bfd *);
++
++asection *bfd_get_section_by_name (bfd *abfd, const char *name);
++
++asection *bfd_get_section_by_name_if
++ (bfd *abfd,
++ const char *name,
++ bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj),
++ void *obj);
++
++char *bfd_get_unique_section_name
++ (bfd *abfd, const char *templat, int *count);
++
++asection *bfd_make_section_old_way (bfd *abfd, const char *name);
++
++asection *bfd_make_section_anyway_with_flags
++ (bfd *abfd, const char *name, flagword flags);
++
++asection *bfd_make_section_anyway (bfd *abfd, const char *name);
++
++asection *bfd_make_section_with_flags
++ (bfd *, const char *name, flagword flags);
++
++asection *bfd_make_section (bfd *, const char *name);
++
++bfd_boolean bfd_set_section_flags
++ (bfd *abfd, asection *sec, flagword flags);
++
++void bfd_map_over_sections
++ (bfd *abfd,
++ void (*func) (bfd *abfd, asection *sect, void *obj),
++ void *obj);
++
++asection *bfd_sections_find_if
++ (bfd *abfd,
++ bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj),
++ void *obj);
++
++bfd_boolean bfd_set_section_size
++ (bfd *abfd, asection *sec, bfd_size_type val);
++
++bfd_boolean bfd_set_section_contents
++ (bfd *abfd, asection *section, const void *data,
++ file_ptr offset, bfd_size_type count);
++
++bfd_boolean bfd_get_section_contents
++ (bfd *abfd, asection *section, void *location, file_ptr offset,
++ bfd_size_type count);
++
++bfd_boolean bfd_malloc_and_get_section
++ (bfd *abfd, asection *section, bfd_byte **buf);
++
++bfd_boolean bfd_copy_private_section_data
++ (bfd *ibfd, asection *isec, bfd *obfd, asection *osec);
++
++#define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \
++ BFD_SEND (obfd, _bfd_copy_private_section_data, \
++ (ibfd, isection, obfd, osection))
++bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec);
++
++bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group);
++
++/* Extracted from archures.c. */
++enum bfd_architecture
++{
++ bfd_arch_unknown, /* File arch not known. */
++ bfd_arch_obscure, /* Arch known, not one of these. */
++ bfd_arch_m68k, /* Motorola 68xxx */
++#define bfd_mach_m68000 1
++#define bfd_mach_m68008 2
++#define bfd_mach_m68010 3
++#define bfd_mach_m68020 4
++#define bfd_mach_m68030 5
++#define bfd_mach_m68040 6
++#define bfd_mach_m68060 7
++#define bfd_mach_cpu32 8
++#define bfd_mach_mcf5200 9
++#define bfd_mach_mcf5206e 10
++#define bfd_mach_mcf5307 11
++#define bfd_mach_mcf5407 12
++#define bfd_mach_mcf528x 13
++#define bfd_mach_mcfv4e 14
++#define bfd_mach_mcf521x 15
++#define bfd_mach_mcf5249 16
++#define bfd_mach_mcf547x 17
++#define bfd_mach_mcf548x 18
++ bfd_arch_vax, /* DEC Vax */
++ bfd_arch_i960, /* Intel 960 */
++ /* The order of the following is important.
++ lower number indicates a machine type that
++ only accepts a subset of the instructions
++ available to machines with higher numbers.
++ The exception is the "ca", which is
++ incompatible with all other machines except
++ "core". */
++
++#define bfd_mach_i960_core 1
++#define bfd_mach_i960_ka_sa 2
++#define bfd_mach_i960_kb_sb 3
++#define bfd_mach_i960_mc 4
++#define bfd_mach_i960_xa 5
++#define bfd_mach_i960_ca 6
++#define bfd_mach_i960_jx 7
++#define bfd_mach_i960_hx 8
++
++ bfd_arch_or32, /* OpenRISC 32 */
++
++ bfd_arch_a29k, /* AMD 29000 */
++ bfd_arch_sparc, /* SPARC */
++#define bfd_mach_sparc 1
++/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */
++#define bfd_mach_sparc_sparclet 2
++#define bfd_mach_sparc_sparclite 3
++#define bfd_mach_sparc_v8plus 4
++#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */
++#define bfd_mach_sparc_sparclite_le 6
++#define bfd_mach_sparc_v9 7
++#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */
++#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */
++#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */
++/* Nonzero if MACH has the v9 instruction set. */
++#define bfd_mach_sparc_v9_p(mach) \
++ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \
++ && (mach) != bfd_mach_sparc_sparclite_le)
++/* Nonzero if MACH is a 64 bit sparc architecture. */
++#define bfd_mach_sparc_64bit_p(mach) \
++ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb)
++ bfd_arch_mips, /* MIPS Rxxxx */
++#define bfd_mach_mips3000 3000
++#define bfd_mach_mips3900 3900
++#define bfd_mach_mips4000 4000
++#define bfd_mach_mips4010 4010
++#define bfd_mach_mips4100 4100
++#define bfd_mach_mips4111 4111
++#define bfd_mach_mips4120 4120
++#define bfd_mach_mips4300 4300
++#define bfd_mach_mips4400 4400
++#define bfd_mach_mips4600 4600
++#define bfd_mach_mips4650 4650
++#define bfd_mach_mips5000 5000
++#define bfd_mach_mips5400 5400
++#define bfd_mach_mips5500 5500
++#define bfd_mach_mips6000 6000
++#define bfd_mach_mips7000 7000
++#define bfd_mach_mips8000 8000
++#define bfd_mach_mips9000 9000
++#define bfd_mach_mips10000 10000
++#define bfd_mach_mips12000 12000
++#define bfd_mach_mips16 16
++#define bfd_mach_mips5 5
++#define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */
++#define bfd_mach_mipsisa32 32
++#define bfd_mach_mipsisa32r2 33
++#define bfd_mach_mipsisa64 64
++#define bfd_mach_mipsisa64r2 65
++ bfd_arch_i386, /* Intel 386 */
++#define bfd_mach_i386_i386 1
++#define bfd_mach_i386_i8086 2
++#define bfd_mach_i386_i386_intel_syntax 3
++#define bfd_mach_x86_64 64
++#define bfd_mach_x86_64_intel_syntax 65
++ bfd_arch_we32k, /* AT&T WE32xxx */
++ bfd_arch_tahoe, /* CCI/Harris Tahoe */
++ bfd_arch_i860, /* Intel 860 */
++ bfd_arch_i370, /* IBM 360/370 Mainframes */
++ bfd_arch_romp, /* IBM ROMP PC/RT */
++ bfd_arch_alliant, /* Alliant */
++ bfd_arch_convex, /* Convex */
++ bfd_arch_m88k, /* Motorola 88xxx */
++ bfd_arch_m98k, /* Motorola 98xxx */
++ bfd_arch_pyramid, /* Pyramid Technology */
++ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */
++#define bfd_mach_h8300 1
++#define bfd_mach_h8300h 2
++#define bfd_mach_h8300s 3
++#define bfd_mach_h8300hn 4
++#define bfd_mach_h8300sn 5
++#define bfd_mach_h8300sx 6
++#define bfd_mach_h8300sxn 7
++ bfd_arch_pdp11, /* DEC PDP-11 */
++ bfd_arch_powerpc, /* PowerPC */
++#define bfd_mach_ppc 32
++#define bfd_mach_ppc64 64
++#define bfd_mach_ppc_403 403
++#define bfd_mach_ppc_403gc 4030
++#define bfd_mach_ppc_505 505
++#define bfd_mach_ppc_601 601
++#define bfd_mach_ppc_602 602
++#define bfd_mach_ppc_603 603
++#define bfd_mach_ppc_ec603e 6031
++#define bfd_mach_ppc_604 604
++#define bfd_mach_ppc_620 620
++#define bfd_mach_ppc_630 630
++#define bfd_mach_ppc_750 750
++#define bfd_mach_ppc_860 860
++#define bfd_mach_ppc_a35 35
++#define bfd_mach_ppc_rs64ii 642
++#define bfd_mach_ppc_rs64iii 643
++#define bfd_mach_ppc_7400 7400
++#define bfd_mach_ppc_e500 500
++ bfd_arch_rs6000, /* IBM RS/6000 */
++#define bfd_mach_rs6k 6000
++#define bfd_mach_rs6k_rs1 6001
++#define bfd_mach_rs6k_rsc 6003
++#define bfd_mach_rs6k_rs2 6002
++ bfd_arch_hppa, /* HP PA RISC */
++#define bfd_mach_hppa10 10
++#define bfd_mach_hppa11 11
++#define bfd_mach_hppa20 20
++#define bfd_mach_hppa20w 25
++ bfd_arch_d10v, /* Mitsubishi D10V */
++#define bfd_mach_d10v 1
++#define bfd_mach_d10v_ts2 2
++#define bfd_mach_d10v_ts3 3
++ bfd_arch_d30v, /* Mitsubishi D30V */
++ bfd_arch_dlx, /* DLX */
++ bfd_arch_m68hc11, /* Motorola 68HC11 */
++ bfd_arch_m68hc12, /* Motorola 68HC12 */
++#define bfd_mach_m6812_default 0
++#define bfd_mach_m6812 1
++#define bfd_mach_m6812s 2
++ bfd_arch_z8k, /* Zilog Z8000 */
++#define bfd_mach_z8001 1
++#define bfd_mach_z8002 2
++ bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */
++ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */
++#define bfd_mach_sh 1
++#define bfd_mach_sh2 0x20
++#define bfd_mach_sh_dsp 0x2d
++#define bfd_mach_sh2a 0x2a
++#define bfd_mach_sh2a_nofpu 0x2b
++#define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1
++#define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2
++#define bfd_mach_sh2a_or_sh4 0x2a3
++#define bfd_mach_sh2a_or_sh3e 0x2a4
++#define bfd_mach_sh2e 0x2e
++#define bfd_mach_sh3 0x30
++#define bfd_mach_sh3_nommu 0x31
++#define bfd_mach_sh3_dsp 0x3d
++#define bfd_mach_sh3e 0x3e
++#define bfd_mach_sh4 0x40
++#define bfd_mach_sh4_nofpu 0x41
++#define bfd_mach_sh4_nommu_nofpu 0x42
++#define bfd_mach_sh4a 0x4a
++#define bfd_mach_sh4a_nofpu 0x4b
++#define bfd_mach_sh4al_dsp 0x4d
++#define bfd_mach_sh5 0x50
++ bfd_arch_alpha, /* Dec Alpha */
++#define bfd_mach_alpha_ev4 0x10
++#define bfd_mach_alpha_ev5 0x20
++#define bfd_mach_alpha_ev6 0x30
++ bfd_arch_arm, /* Advanced Risc Machines ARM. */
++#define bfd_mach_arm_unknown 0
++#define bfd_mach_arm_2 1
++#define bfd_mach_arm_2a 2
++#define bfd_mach_arm_3 3
++#define bfd_mach_arm_3M 4
++#define bfd_mach_arm_4 5
++#define bfd_mach_arm_4T 6
++#define bfd_mach_arm_5 7
++#define bfd_mach_arm_5T 8
++#define bfd_mach_arm_5TE 9
++#define bfd_mach_arm_XScale 10
++#define bfd_mach_arm_ep9312 11
++#define bfd_mach_arm_iWMMXt 12
++ bfd_arch_ns32k, /* National Semiconductors ns32000 */
++ bfd_arch_w65, /* WDC 65816 */
++ bfd_arch_tic30, /* Texas Instruments TMS320C30 */
++ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */
++#define bfd_mach_tic3x 30
++#define bfd_mach_tic4x 40
++ bfd_arch_tic54x, /* Texas Instruments TMS320C54X */
++ bfd_arch_tic80, /* TI TMS320c80 (MVP) */
++ bfd_arch_v850, /* NEC V850 */
++#define bfd_mach_v850 1
++#define bfd_mach_v850e 'E'
++#define bfd_mach_v850e1 '1'
++ bfd_arch_arc, /* ARC Cores */
++#define bfd_mach_arc_5 5
++#define bfd_mach_arc_6 6
++#define bfd_mach_arc_7 7
++#define bfd_mach_arc_8 8
++ bfd_arch_m32c, /* Renesas M16C/M32C. */
++#define bfd_mach_m16c 0x75
++#define bfd_mach_m32c 0x78
++ bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */
++#define bfd_mach_m32r 1 /* For backwards compatibility. */
++#define bfd_mach_m32rx 'x'
++#define bfd_mach_m32r2 '2'
++ bfd_arch_mn10200, /* Matsushita MN10200 */
++ bfd_arch_mn10300, /* Matsushita MN10300 */
++#define bfd_mach_mn10300 300
++#define bfd_mach_am33 330
++#define bfd_mach_am33_2 332
++ bfd_arch_fr30,
++#define bfd_mach_fr30 0x46523330
++ bfd_arch_frv,
++#define bfd_mach_frv 1
++#define bfd_mach_frvsimple 2
++#define bfd_mach_fr300 300
++#define bfd_mach_fr400 400
++#define bfd_mach_fr450 450
++#define bfd_mach_frvtomcat 499 /* fr500 prototype */
++#define bfd_mach_fr500 500
++#define bfd_mach_fr550 550
++ bfd_arch_mcore,
++ bfd_arch_ia64, /* HP/Intel ia64 */
++#define bfd_mach_ia64_elf64 64
++#define bfd_mach_ia64_elf32 32
++ bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */
++#define bfd_mach_ip2022 1
++#define bfd_mach_ip2022ext 2
++ bfd_arch_iq2000, /* Vitesse IQ2000. */
++#define bfd_mach_iq2000 1
++#define bfd_mach_iq10 2
++ bfd_arch_ms1,
++#define bfd_mach_ms1 1
++#define bfd_mach_mrisc2 2
++ bfd_arch_pj,
++ bfd_arch_avr, /* Atmel AVR microcontrollers. */
++#define bfd_mach_avr1 1
++#define bfd_mach_avr2 2
++#define bfd_mach_avr3 3
++#define bfd_mach_avr4 4
++#define bfd_mach_avr5 5
++ bfd_arch_cr16c, /* National Semiconductor CompactRISC. */
++#define bfd_mach_cr16c 1
++ bfd_arch_crx, /* National Semiconductor CRX. */
++#define bfd_mach_crx 1
++ bfd_arch_cris, /* Axis CRIS */
++#define bfd_mach_cris_v0_v10 255
++#define bfd_mach_cris_v32 32
++#define bfd_mach_cris_v10_v32 1032
++ bfd_arch_s390, /* IBM s390 */
++#define bfd_mach_s390_31 31
++#define bfd_mach_s390_64 64
++ bfd_arch_openrisc, /* OpenRISC */
++ bfd_arch_mmix, /* Donald Knuth's educational processor. */
++ bfd_arch_xstormy16,
++#define bfd_mach_xstormy16 1
++ bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */
++#define bfd_mach_msp11 11
++#define bfd_mach_msp110 110
++#define bfd_mach_msp12 12
++#define bfd_mach_msp13 13
++#define bfd_mach_msp14 14
++#define bfd_mach_msp15 15
++#define bfd_mach_msp16 16
++#define bfd_mach_msp31 31
++#define bfd_mach_msp32 32
++#define bfd_mach_msp33 33
++#define bfd_mach_msp41 41
++#define bfd_mach_msp42 42
++#define bfd_mach_msp43 43
++#define bfd_mach_msp44 44
++ bfd_arch_xtensa, /* Tensilica's Xtensa cores. */
++#define bfd_mach_xtensa 1
++ bfd_arch_maxq, /* Dallas MAXQ 10/20 */
++#define bfd_mach_maxq10 10
++#define bfd_mach_maxq20 20
++ bfd_arch_last
++ };
++
++typedef struct bfd_arch_info
++{
++ int bits_per_word;
++ int bits_per_address;
++ int bits_per_byte;
++ enum bfd_architecture arch;
++ unsigned long mach;
++ const char *arch_name;
++ const char *printable_name;
++ unsigned int section_align_power;
++ /* TRUE if this is the default machine for the architecture.
++ The default arch should be the first entry for an arch so that
++ all the entries for that arch can be accessed via <<next>>. */
++ bfd_boolean the_default;
++ const struct bfd_arch_info * (*compatible)
++ (const struct bfd_arch_info *a, const struct bfd_arch_info *b);
++
++ bfd_boolean (*scan) (const struct bfd_arch_info *, const char *);
++
++ const struct bfd_arch_info *next;
++}
++bfd_arch_info_type;
++
++const char *bfd_printable_name (bfd *abfd);
++
++const bfd_arch_info_type *bfd_scan_arch (const char *string);
++
++const char **bfd_arch_list (void);
++
++const bfd_arch_info_type *bfd_arch_get_compatible
++ (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns);
++
++void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg);
++
++enum bfd_architecture bfd_get_arch (bfd *abfd);
++
++unsigned long bfd_get_mach (bfd *abfd);
++
++unsigned int bfd_arch_bits_per_byte (bfd *abfd);
++
++unsigned int bfd_arch_bits_per_address (bfd *abfd);
++
++const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd);
++
++const bfd_arch_info_type *bfd_lookup_arch
++ (enum bfd_architecture arch, unsigned long machine);
++
++const char *bfd_printable_arch_mach
++ (enum bfd_architecture arch, unsigned long machine);
++
++unsigned int bfd_octets_per_byte (bfd *abfd);
++
++unsigned int bfd_arch_mach_octets_per_byte
++ (enum bfd_architecture arch, unsigned long machine);
++
++/* Extracted from reloc.c. */
++typedef enum bfd_reloc_status
++{
++ /* No errors detected. */
++ bfd_reloc_ok,
++
++ /* The relocation was performed, but there was an overflow. */
++ bfd_reloc_overflow,
++
++ /* The address to relocate was not within the section supplied. */
++ bfd_reloc_outofrange,
++
++ /* Used by special functions. */
++ bfd_reloc_continue,
++
++ /* Unsupported relocation size requested. */
++ bfd_reloc_notsupported,
++
++ /* Unused. */
++ bfd_reloc_other,
++
++ /* The symbol to relocate against was undefined. */
++ bfd_reloc_undefined,
++
++ /* The relocation was performed, but may not be ok - presently
++ generated only when linking i960 coff files with i960 b.out
++ symbols. If this type is returned, the error_message argument
++ to bfd_perform_relocation will be set. */
++ bfd_reloc_dangerous
++ }
++ bfd_reloc_status_type;
++
++
++typedef struct reloc_cache_entry
++{
++ /* A pointer into the canonical table of pointers. */
++ struct bfd_symbol **sym_ptr_ptr;
++
++ /* offset in section. */
++ bfd_size_type address;
++
++ /* addend for relocation value. */
++ bfd_vma addend;
++
++ /* Pointer to how to perform the required relocation. */
++ reloc_howto_type *howto;
++
++}
++arelent;
++
++enum complain_overflow
++{
++ /* Do not complain on overflow. */
++ complain_overflow_dont,
++
++ /* Complain if the bitfield overflows, whether it is considered
++ as signed or unsigned. */
++ complain_overflow_bitfield,
++
++ /* Complain if the value overflows when considered as signed
++ number. */
++ complain_overflow_signed,
++
++ /* Complain if the value overflows when considered as an
++ unsigned number. */
++ complain_overflow_unsigned
++};
++
++struct reloc_howto_struct
++{
++ /* The type field has mainly a documentary use - the back end can
++ do what it wants with it, though normally the back end's
++ external idea of what a reloc number is stored
++ in this field. For example, a PC relative word relocation
++ in a coff environment has the type 023 - because that's
++ what the outside world calls a R_PCRWORD reloc. */
++ unsigned int type;
++
++ /* The value the final relocation is shifted right by. This drops
++ unwanted data from the relocation. */
++ unsigned int rightshift;
++
++ /* The size of the item to be relocated. This is *not* a
++ power-of-two measure. To get the number of bytes operated
++ on by a type of relocation, use bfd_get_reloc_size. */
++ int size;
++
++ /* The number of bits in the item to be relocated. This is used
++ when doing overflow checking. */
++ unsigned int bitsize;
++
++ /* Notes that the relocation is relative to the location in the
++ data section of the addend. The relocation function will
++ subtract from the relocation value the address of the location
++ being relocated. */
++ bfd_boolean pc_relative;
++
++ /* The bit position of the reloc value in the destination.
++ The relocated value is left shifted by this amount. */
++ unsigned int bitpos;
++
++ /* What type of overflow error should be checked for when
++ relocating. */
++ enum complain_overflow complain_on_overflow;
++
++ /* If this field is non null, then the supplied function is
++ called rather than the normal function. This allows really
++ strange relocation methods to be accommodated (e.g., i960 callj
++ instructions). */
++ bfd_reloc_status_type (*special_function)
++ (bfd *, arelent *, struct bfd_symbol *, void *, asection *,
++ bfd *, char **);
++
++ /* The textual name of the relocation type. */
++ char *name;
++
++ /* Some formats record a relocation addend in the section contents
++ rather than with the relocation. For ELF formats this is the
++ distinction between USE_REL and USE_RELA (though the code checks
++ for USE_REL == 1/0). The value of this field is TRUE if the
++ addend is recorded with the section contents; when performing a
++ partial link (ld -r) the section contents (the data) will be
++ modified. The value of this field is FALSE if addends are
++ recorded with the relocation (in arelent.addend); when performing
++ a partial link the relocation will be modified.
++ All relocations for all ELF USE_RELA targets should set this field
++ to FALSE (values of TRUE should be looked on with suspicion).
++ However, the converse is not true: not all relocations of all ELF
++ USE_REL targets set this field to TRUE. Why this is so is peculiar
++ to each particular target. For relocs that aren't used in partial
++ links (e.g. GOT stuff) it doesn't matter what this is set to. */
++ bfd_boolean partial_inplace;
++
++ /* src_mask selects the part of the instruction (or data) to be used
++ in the relocation sum. If the target relocations don't have an
++ addend in the reloc, eg. ELF USE_REL, src_mask will normally equal
++ dst_mask to extract the addend from the section contents. If
++ relocations do have an addend in the reloc, eg. ELF USE_RELA, this
++ field should be zero. Non-zero values for ELF USE_RELA targets are
++ bogus as in those cases the value in the dst_mask part of the
++ section contents should be treated as garbage. */
++ bfd_vma src_mask;
++
++ /* dst_mask selects which parts of the instruction (or data) are
++ replaced with a relocated value. */
++ bfd_vma dst_mask;
++
++ /* When some formats create PC relative instructions, they leave
++ the value of the pc of the place being relocated in the offset
++ slot of the instruction, so that a PC relative relocation can
++ be made just by adding in an ordinary offset (e.g., sun3 a.out).
++ Some formats leave the displacement part of an instruction
++ empty (e.g., m88k bcs); this flag signals the fact. */
++ bfd_boolean pcrel_offset;
++};
++
++#define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \
++ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC }
++#define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \
++ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \
++ NAME, FALSE, 0, 0, IN)
++
++#define EMPTY_HOWTO(C) \
++ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \
++ NULL, FALSE, 0, 0, FALSE)
++
++#define HOWTO_PREPARE(relocation, symbol) \
++ { \
++ if (symbol != NULL) \
++ { \
++ if (bfd_is_com_section (symbol->section)) \
++ { \
++ relocation = 0; \
++ } \
++ else \
++ { \
++ relocation = symbol->value; \
++ } \
++ } \
++ }
++
++unsigned int bfd_get_reloc_size (reloc_howto_type *);
++
++typedef struct relent_chain
++{
++ arelent relent;
++ struct relent_chain *next;
++}
++arelent_chain;
++
++bfd_reloc_status_type bfd_check_overflow
++ (enum complain_overflow how,
++ unsigned int bitsize,
++ unsigned int rightshift,
++ unsigned int addrsize,
++ bfd_vma relocation);
++
++bfd_reloc_status_type bfd_perform_relocation
++ (bfd *abfd,
++ arelent *reloc_entry,
++ void *data,
++ asection *input_section,
++ bfd *output_bfd,
++ char **error_message);
++
++bfd_reloc_status_type bfd_install_relocation
++ (bfd *abfd,
++ arelent *reloc_entry,
++ void *data, bfd_vma data_start,
++ asection *input_section,
++ char **error_message);
++
++enum bfd_reloc_code_real {
++ _dummy_first_bfd_reloc_code_real,
++
++
++/* Basic absolute relocations of N bits. */
++ BFD_RELOC_64,
++ BFD_RELOC_32,
++ BFD_RELOC_26,
++ BFD_RELOC_24,
++ BFD_RELOC_16,
++ BFD_RELOC_14,
++ BFD_RELOC_8,
++
++/* PC-relative relocations. Sometimes these are relative to the address
++of the relocation itself; sometimes they are relative to the start of
++the section containing the relocation. It depends on the specific target.
++
++The 24-bit relocation is used in some Intel 960 configurations. */
++ BFD_RELOC_64_PCREL,
++ BFD_RELOC_32_PCREL,
++ BFD_RELOC_24_PCREL,
++ BFD_RELOC_16_PCREL,
++ BFD_RELOC_12_PCREL,
++ BFD_RELOC_8_PCREL,
++
++/* Section relative relocations. Some targets need this for DWARF2. */
++ BFD_RELOC_32_SECREL,
++
++/* For ELF. */
++ BFD_RELOC_32_GOT_PCREL,
++ BFD_RELOC_16_GOT_PCREL,
++ BFD_RELOC_8_GOT_PCREL,
++ BFD_RELOC_32_GOTOFF,
++ BFD_RELOC_16_GOTOFF,
++ BFD_RELOC_LO16_GOTOFF,
++ BFD_RELOC_HI16_GOTOFF,
++ BFD_RELOC_HI16_S_GOTOFF,
++ BFD_RELOC_8_GOTOFF,
++ BFD_RELOC_64_PLT_PCREL,
++ BFD_RELOC_32_PLT_PCREL,
++ BFD_RELOC_24_PLT_PCREL,
++ BFD_RELOC_16_PLT_PCREL,
++ BFD_RELOC_8_PLT_PCREL,
++ BFD_RELOC_64_PLTOFF,
++ BFD_RELOC_32_PLTOFF,
++ BFD_RELOC_16_PLTOFF,
++ BFD_RELOC_LO16_PLTOFF,
++ BFD_RELOC_HI16_PLTOFF,
++ BFD_RELOC_HI16_S_PLTOFF,
++ BFD_RELOC_8_PLTOFF,
++
++/* Relocations used by 68K ELF. */
++ BFD_RELOC_68K_GLOB_DAT,
++ BFD_RELOC_68K_JMP_SLOT,
++ BFD_RELOC_68K_RELATIVE,
++
++/* Linkage-table relative. */
++ BFD_RELOC_32_BASEREL,
++ BFD_RELOC_16_BASEREL,
++ BFD_RELOC_LO16_BASEREL,
++ BFD_RELOC_HI16_BASEREL,
++ BFD_RELOC_HI16_S_BASEREL,
++ BFD_RELOC_8_BASEREL,
++ BFD_RELOC_RVA,
++
++/* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */
++ BFD_RELOC_8_FFnn,
++
++/* These PC-relative relocations are stored as word displacements --
++i.e., byte displacements shifted right two bits. The 30-bit word
++displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the
++SPARC. (SPARC tools generally refer to this as <<WDISP30>>.) The
++signed 16-bit displacement is used on the MIPS, and the 23-bit
++displacement is used on the Alpha. */
++ BFD_RELOC_32_PCREL_S2,
++ BFD_RELOC_16_PCREL_S2,
++ BFD_RELOC_23_PCREL_S2,
++
++/* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of
++the target word. These are used on the SPARC. */
++ BFD_RELOC_HI22,
++ BFD_RELOC_LO10,
++
++/* For systems that allocate a Global Pointer register, these are
++displacements off that register. These relocation types are
++handled specially, because the value the register will have is
++decided relatively late. */
++ BFD_RELOC_GPREL16,
++ BFD_RELOC_GPREL32,
++
++/* Reloc types used for i960/b.out. */
++ BFD_RELOC_I960_CALLJ,
++
++/* SPARC ELF relocations. There is probably some overlap with other
++relocation types already defined. */
++ BFD_RELOC_NONE,
++ BFD_RELOC_SPARC_WDISP22,
++ BFD_RELOC_SPARC22,
++ BFD_RELOC_SPARC13,
++ BFD_RELOC_SPARC_GOT10,
++ BFD_RELOC_SPARC_GOT13,
++ BFD_RELOC_SPARC_GOT22,
++ BFD_RELOC_SPARC_PC10,
++ BFD_RELOC_SPARC_PC22,
++ BFD_RELOC_SPARC_WPLT30,
++ BFD_RELOC_SPARC_COPY,
++ BFD_RELOC_SPARC_GLOB_DAT,
++ BFD_RELOC_SPARC_JMP_SLOT,
++ BFD_RELOC_SPARC_RELATIVE,
++ BFD_RELOC_SPARC_UA16,
++ BFD_RELOC_SPARC_UA32,
++ BFD_RELOC_SPARC_UA64,
++
++/* I think these are specific to SPARC a.out (e.g., Sun 4). */
++ BFD_RELOC_SPARC_BASE13,
++ BFD_RELOC_SPARC_BASE22,
++
++/* SPARC64 relocations */
++#define BFD_RELOC_SPARC_64 BFD_RELOC_64
++ BFD_RELOC_SPARC_10,
++ BFD_RELOC_SPARC_11,
++ BFD_RELOC_SPARC_OLO10,
++ BFD_RELOC_SPARC_HH22,
++ BFD_RELOC_SPARC_HM10,
++ BFD_RELOC_SPARC_LM22,
++ BFD_RELOC_SPARC_PC_HH22,
++ BFD_RELOC_SPARC_PC_HM10,
++ BFD_RELOC_SPARC_PC_LM22,
++ BFD_RELOC_SPARC_WDISP16,
++ BFD_RELOC_SPARC_WDISP19,
++ BFD_RELOC_SPARC_7,
++ BFD_RELOC_SPARC_6,
++ BFD_RELOC_SPARC_5,
++#define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL
++ BFD_RELOC_SPARC_PLT32,
++ BFD_RELOC_SPARC_PLT64,
++ BFD_RELOC_SPARC_HIX22,
++ BFD_RELOC_SPARC_LOX10,
++ BFD_RELOC_SPARC_H44,
++ BFD_RELOC_SPARC_M44,
++ BFD_RELOC_SPARC_L44,
++ BFD_RELOC_SPARC_REGISTER,
++
++/* SPARC little endian relocation */
++ BFD_RELOC_SPARC_REV32,
++
++/* SPARC TLS relocations */
++ BFD_RELOC_SPARC_TLS_GD_HI22,
++ BFD_RELOC_SPARC_TLS_GD_LO10,
++ BFD_RELOC_SPARC_TLS_GD_ADD,
++ BFD_RELOC_SPARC_TLS_GD_CALL,
++ BFD_RELOC_SPARC_TLS_LDM_HI22,
++ BFD_RELOC_SPARC_TLS_LDM_LO10,
++ BFD_RELOC_SPARC_TLS_LDM_ADD,
++ BFD_RELOC_SPARC_TLS_LDM_CALL,
++ BFD_RELOC_SPARC_TLS_LDO_HIX22,
++ BFD_RELOC_SPARC_TLS_LDO_LOX10,
++ BFD_RELOC_SPARC_TLS_LDO_ADD,
++ BFD_RELOC_SPARC_TLS_IE_HI22,
++ BFD_RELOC_SPARC_TLS_IE_LO10,
++ BFD_RELOC_SPARC_TLS_IE_LD,
++ BFD_RELOC_SPARC_TLS_IE_LDX,
++ BFD_RELOC_SPARC_TLS_IE_ADD,
++ BFD_RELOC_SPARC_TLS_LE_HIX22,
++ BFD_RELOC_SPARC_TLS_LE_LOX10,
++ BFD_RELOC_SPARC_TLS_DTPMOD32,
++ BFD_RELOC_SPARC_TLS_DTPMOD64,
++ BFD_RELOC_SPARC_TLS_DTPOFF32,
++ BFD_RELOC_SPARC_TLS_DTPOFF64,
++ BFD_RELOC_SPARC_TLS_TPOFF32,
++ BFD_RELOC_SPARC_TLS_TPOFF64,
++
++/* Alpha ECOFF and ELF relocations. Some of these treat the symbol or
++"addend" in some special way.
++For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when
++writing; when reading, it will be the absolute section symbol. The
++addend is the displacement in bytes of the "lda" instruction from
++the "ldah" instruction (which is at the address of this reloc). */
++ BFD_RELOC_ALPHA_GPDISP_HI16,
++
++/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as
++with GPDISP_HI16 relocs. The addend is ignored when writing the
++relocations out, and is filled in with the file's GP value on
++reading, for convenience. */
++ BFD_RELOC_ALPHA_GPDISP_LO16,
++
++/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16
++relocation except that there is no accompanying GPDISP_LO16
++relocation. */
++ BFD_RELOC_ALPHA_GPDISP,
++
++/* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference;
++the assembler turns it into a LDQ instruction to load the address of
++the symbol, and then fills in a register in the real instruction.
++
++The LITERAL reloc, at the LDQ instruction, refers to the .lita
++section symbol. The addend is ignored when writing, but is filled
++in with the file's GP value on reading, for convenience, as with the
++GPDISP_LO16 reloc.
++
++The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16.
++It should refer to the symbol to be referenced, as with 16_GOTOFF,
++but it generates output not based on the position within the .got
++section, but relative to the GP value chosen for the file during the
++final link stage.
++
++The LITUSE reloc, on the instruction using the loaded address, gives
++information to the linker that it might be able to use to optimize
++away some literal section references. The symbol is ignored (read
++as the absolute section symbol), and the "addend" indicates the type
++of instruction using the register:
++1 - "memory" fmt insn
++2 - byte-manipulation (byte offset reg)
++3 - jsr (target of branch) */
++ BFD_RELOC_ALPHA_LITERAL,
++ BFD_RELOC_ALPHA_ELF_LITERAL,
++ BFD_RELOC_ALPHA_LITUSE,
++
++/* The HINT relocation indicates a value that should be filled into the
++"hint" field of a jmp/jsr/ret instruction, for possible branch-
++prediction logic which may be provided on some processors. */
++ BFD_RELOC_ALPHA_HINT,
++
++/* The LINKAGE relocation outputs a linkage pair in the object file,
++which is filled by the linker. */
++ BFD_RELOC_ALPHA_LINKAGE,
++
++/* The CODEADDR relocation outputs a STO_CA in the object file,
++which is filled by the linker. */
++ BFD_RELOC_ALPHA_CODEADDR,
++
++/* The GPREL_HI/LO relocations together form a 32-bit offset from the
++GP register. */
++ BFD_RELOC_ALPHA_GPREL_HI16,
++ BFD_RELOC_ALPHA_GPREL_LO16,
++
++/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must
++share a common GP, and the target address is adjusted for
++STO_ALPHA_STD_GPLOAD. */
++ BFD_RELOC_ALPHA_BRSGP,
++
++/* Alpha thread-local storage relocations. */
++ BFD_RELOC_ALPHA_TLSGD,
++ BFD_RELOC_ALPHA_TLSLDM,
++ BFD_RELOC_ALPHA_DTPMOD64,
++ BFD_RELOC_ALPHA_GOTDTPREL16,
++ BFD_RELOC_ALPHA_DTPREL64,
++ BFD_RELOC_ALPHA_DTPREL_HI16,
++ BFD_RELOC_ALPHA_DTPREL_LO16,
++ BFD_RELOC_ALPHA_DTPREL16,
++ BFD_RELOC_ALPHA_GOTTPREL16,
++ BFD_RELOC_ALPHA_TPREL64,
++ BFD_RELOC_ALPHA_TPREL_HI16,
++ BFD_RELOC_ALPHA_TPREL_LO16,
++ BFD_RELOC_ALPHA_TPREL16,
++
++/* Bits 27..2 of the relocation address shifted right 2 bits;
++simple reloc otherwise. */
++ BFD_RELOC_MIPS_JMP,
++
++/* The MIPS16 jump instruction. */
++ BFD_RELOC_MIPS16_JMP,
++
++/* MIPS16 GP relative reloc. */
++ BFD_RELOC_MIPS16_GPREL,
++
++/* High 16 bits of 32-bit value; simple reloc. */
++ BFD_RELOC_HI16,
++
++/* High 16 bits of 32-bit value but the low 16 bits will be sign
++extended and added to form the final result. If the low 16
++bits form a negative number, we need to add one to the high value
++to compensate for the borrow when the low bits are added. */
++ BFD_RELOC_HI16_S,
++
++/* Low 16 bits. */
++ BFD_RELOC_LO16,
++
++/* High 16 bits of 32-bit pc-relative value */
++ BFD_RELOC_HI16_PCREL,
++
++/* High 16 bits of 32-bit pc-relative value, adjusted */
++ BFD_RELOC_HI16_S_PCREL,
++
++/* Low 16 bits of pc-relative value */
++ BFD_RELOC_LO16_PCREL,
++
++/* MIPS16 high 16 bits of 32-bit value. */
++ BFD_RELOC_MIPS16_HI16,
++
++/* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign
++extended and added to form the final result. If the low 16
++bits form a negative number, we need to add one to the high value
++to compensate for the borrow when the low bits are added. */
++ BFD_RELOC_MIPS16_HI16_S,
++
++/* MIPS16 low 16 bits. */
++ BFD_RELOC_MIPS16_LO16,
++
++/* Relocation against a MIPS literal section. */
++ BFD_RELOC_MIPS_LITERAL,
++
++/* MIPS ELF relocations. */
++ BFD_RELOC_MIPS_GOT16,
++ BFD_RELOC_MIPS_CALL16,
++ BFD_RELOC_MIPS_GOT_HI16,
++ BFD_RELOC_MIPS_GOT_LO16,
++ BFD_RELOC_MIPS_CALL_HI16,
++ BFD_RELOC_MIPS_CALL_LO16,
++ BFD_RELOC_MIPS_SUB,
++ BFD_RELOC_MIPS_GOT_PAGE,
++ BFD_RELOC_MIPS_GOT_OFST,
++ BFD_RELOC_MIPS_GOT_DISP,
++ BFD_RELOC_MIPS_SHIFT5,
++ BFD_RELOC_MIPS_SHIFT6,
++ BFD_RELOC_MIPS_INSERT_A,
++ BFD_RELOC_MIPS_INSERT_B,
++ BFD_RELOC_MIPS_DELETE,
++ BFD_RELOC_MIPS_HIGHEST,
++ BFD_RELOC_MIPS_HIGHER,
++ BFD_RELOC_MIPS_SCN_DISP,
++ BFD_RELOC_MIPS_REL16,
++ BFD_RELOC_MIPS_RELGOT,
++ BFD_RELOC_MIPS_JALR,
++ BFD_RELOC_MIPS_TLS_DTPMOD32,
++ BFD_RELOC_MIPS_TLS_DTPREL32,
++ BFD_RELOC_MIPS_TLS_DTPMOD64,
++ BFD_RELOC_MIPS_TLS_DTPREL64,
++ BFD_RELOC_MIPS_TLS_GD,
++ BFD_RELOC_MIPS_TLS_LDM,
++ BFD_RELOC_MIPS_TLS_DTPREL_HI16,
++ BFD_RELOC_MIPS_TLS_DTPREL_LO16,
++ BFD_RELOC_MIPS_TLS_GOTTPREL,
++ BFD_RELOC_MIPS_TLS_TPREL32,
++ BFD_RELOC_MIPS_TLS_TPREL64,
++ BFD_RELOC_MIPS_TLS_TPREL_HI16,
++ BFD_RELOC_MIPS_TLS_TPREL_LO16,
++
++
++/* Fujitsu Frv Relocations. */
++ BFD_RELOC_FRV_LABEL16,
++ BFD_RELOC_FRV_LABEL24,
++ BFD_RELOC_FRV_LO16,
++ BFD_RELOC_FRV_HI16,
++ BFD_RELOC_FRV_GPREL12,
++ BFD_RELOC_FRV_GPRELU12,
++ BFD_RELOC_FRV_GPREL32,
++ BFD_RELOC_FRV_GPRELHI,
++ BFD_RELOC_FRV_GPRELLO,
++ BFD_RELOC_FRV_GOT12,
++ BFD_RELOC_FRV_GOTHI,
++ BFD_RELOC_FRV_GOTLO,
++ BFD_RELOC_FRV_FUNCDESC,
++ BFD_RELOC_FRV_FUNCDESC_GOT12,
++ BFD_RELOC_FRV_FUNCDESC_GOTHI,
++ BFD_RELOC_FRV_FUNCDESC_GOTLO,
++ BFD_RELOC_FRV_FUNCDESC_VALUE,
++ BFD_RELOC_FRV_FUNCDESC_GOTOFF12,
++ BFD_RELOC_FRV_FUNCDESC_GOTOFFHI,
++ BFD_RELOC_FRV_FUNCDESC_GOTOFFLO,
++ BFD_RELOC_FRV_GOTOFF12,
++ BFD_RELOC_FRV_GOTOFFHI,
++ BFD_RELOC_FRV_GOTOFFLO,
++ BFD_RELOC_FRV_GETTLSOFF,
++ BFD_RELOC_FRV_TLSDESC_VALUE,
++ BFD_RELOC_FRV_GOTTLSDESC12,
++ BFD_RELOC_FRV_GOTTLSDESCHI,
++ BFD_RELOC_FRV_GOTTLSDESCLO,
++ BFD_RELOC_FRV_TLSMOFF12,
++ BFD_RELOC_FRV_TLSMOFFHI,
++ BFD_RELOC_FRV_TLSMOFFLO,
++ BFD_RELOC_FRV_GOTTLSOFF12,
++ BFD_RELOC_FRV_GOTTLSOFFHI,
++ BFD_RELOC_FRV_GOTTLSOFFLO,
++ BFD_RELOC_FRV_TLSOFF,
++ BFD_RELOC_FRV_TLSDESC_RELAX,
++ BFD_RELOC_FRV_GETTLSOFF_RELAX,
++ BFD_RELOC_FRV_TLSOFF_RELAX,
++ BFD_RELOC_FRV_TLSMOFF,
++
++
++/* This is a 24bit GOT-relative reloc for the mn10300. */
++ BFD_RELOC_MN10300_GOTOFF24,
++
++/* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes
++in the instruction. */
++ BFD_RELOC_MN10300_GOT32,
++
++/* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes
++in the instruction. */
++ BFD_RELOC_MN10300_GOT24,
++
++/* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes
++in the instruction. */
++ BFD_RELOC_MN10300_GOT16,
++
++/* Copy symbol at runtime. */
++ BFD_RELOC_MN10300_COPY,
++
++/* Create GOT entry. */
++ BFD_RELOC_MN10300_GLOB_DAT,
++
++/* Create PLT entry. */
++ BFD_RELOC_MN10300_JMP_SLOT,
++
++/* Adjust by program base. */
++ BFD_RELOC_MN10300_RELATIVE,
++
++
++/* i386/elf relocations */
++ BFD_RELOC_386_GOT32,
++ BFD_RELOC_386_PLT32,
++ BFD_RELOC_386_COPY,
++ BFD_RELOC_386_GLOB_DAT,
++ BFD_RELOC_386_JUMP_SLOT,
++ BFD_RELOC_386_RELATIVE,
++ BFD_RELOC_386_GOTOFF,
++ BFD_RELOC_386_GOTPC,
++ BFD_RELOC_386_TLS_TPOFF,
++ BFD_RELOC_386_TLS_IE,
++ BFD_RELOC_386_TLS_GOTIE,
++ BFD_RELOC_386_TLS_LE,
++ BFD_RELOC_386_TLS_GD,
++ BFD_RELOC_386_TLS_LDM,
++ BFD_RELOC_386_TLS_LDO_32,
++ BFD_RELOC_386_TLS_IE_32,
++ BFD_RELOC_386_TLS_LE_32,
++ BFD_RELOC_386_TLS_DTPMOD32,
++ BFD_RELOC_386_TLS_DTPOFF32,
++ BFD_RELOC_386_TLS_TPOFF32,
++
++/* x86-64/elf relocations */
++ BFD_RELOC_X86_64_GOT32,
++ BFD_RELOC_X86_64_PLT32,
++ BFD_RELOC_X86_64_COPY,
++ BFD_RELOC_X86_64_GLOB_DAT,
++ BFD_RELOC_X86_64_JUMP_SLOT,
++ BFD_RELOC_X86_64_RELATIVE,
++ BFD_RELOC_X86_64_GOTPCREL,
++ BFD_RELOC_X86_64_32S,
++ BFD_RELOC_X86_64_DTPMOD64,
++ BFD_RELOC_X86_64_DTPOFF64,
++ BFD_RELOC_X86_64_TPOFF64,
++ BFD_RELOC_X86_64_TLSGD,
++ BFD_RELOC_X86_64_TLSLD,
++ BFD_RELOC_X86_64_DTPOFF32,
++ BFD_RELOC_X86_64_GOTTPOFF,
++ BFD_RELOC_X86_64_TPOFF32,
++ BFD_RELOC_X86_64_GOTOFF64,
++ BFD_RELOC_X86_64_GOTPC32,
++
++/* ns32k relocations */
++ BFD_RELOC_NS32K_IMM_8,
++ BFD_RELOC_NS32K_IMM_16,
++ BFD_RELOC_NS32K_IMM_32,
++ BFD_RELOC_NS32K_IMM_8_PCREL,
++ BFD_RELOC_NS32K_IMM_16_PCREL,
++ BFD_RELOC_NS32K_IMM_32_PCREL,
++ BFD_RELOC_NS32K_DISP_8,
++ BFD_RELOC_NS32K_DISP_16,
++ BFD_RELOC_NS32K_DISP_32,
++ BFD_RELOC_NS32K_DISP_8_PCREL,
++ BFD_RELOC_NS32K_DISP_16_PCREL,
++ BFD_RELOC_NS32K_DISP_32_PCREL,
++
++/* PDP11 relocations */
++ BFD_RELOC_PDP11_DISP_8_PCREL,
++ BFD_RELOC_PDP11_DISP_6_PCREL,
++
++/* Picojava relocs. Not all of these appear in object files. */
++ BFD_RELOC_PJ_CODE_HI16,
++ BFD_RELOC_PJ_CODE_LO16,
++ BFD_RELOC_PJ_CODE_DIR16,
++ BFD_RELOC_PJ_CODE_DIR32,
++ BFD_RELOC_PJ_CODE_REL16,
++ BFD_RELOC_PJ_CODE_REL32,
++
++/* Power(rs6000) and PowerPC relocations. */
++ BFD_RELOC_PPC_B26,
++ BFD_RELOC_PPC_BA26,
++ BFD_RELOC_PPC_TOC16,
++ BFD_RELOC_PPC_B16,
++ BFD_RELOC_PPC_B16_BRTAKEN,
++ BFD_RELOC_PPC_B16_BRNTAKEN,
++ BFD_RELOC_PPC_BA16,
++ BFD_RELOC_PPC_BA16_BRTAKEN,
++ BFD_RELOC_PPC_BA16_BRNTAKEN,
++ BFD_RELOC_PPC_COPY,
++ BFD_RELOC_PPC_GLOB_DAT,
++ BFD_RELOC_PPC_JMP_SLOT,
++ BFD_RELOC_PPC_RELATIVE,
++ BFD_RELOC_PPC_LOCAL24PC,
++ BFD_RELOC_PPC_EMB_NADDR32,
++ BFD_RELOC_PPC_EMB_NADDR16,
++ BFD_RELOC_PPC_EMB_NADDR16_LO,
++ BFD_RELOC_PPC_EMB_NADDR16_HI,
++ BFD_RELOC_PPC_EMB_NADDR16_HA,
++ BFD_RELOC_PPC_EMB_SDAI16,
++ BFD_RELOC_PPC_EMB_SDA2I16,
++ BFD_RELOC_PPC_EMB_SDA2REL,
++ BFD_RELOC_PPC_EMB_SDA21,
++ BFD_RELOC_PPC_EMB_MRKREF,
++ BFD_RELOC_PPC_EMB_RELSEC16,
++ BFD_RELOC_PPC_EMB_RELST_LO,
++ BFD_RELOC_PPC_EMB_RELST_HI,
++ BFD_RELOC_PPC_EMB_RELST_HA,
++ BFD_RELOC_PPC_EMB_BIT_FLD,
++ BFD_RELOC_PPC_EMB_RELSDA,
++ BFD_RELOC_PPC64_HIGHER,
++ BFD_RELOC_PPC64_HIGHER_S,
++ BFD_RELOC_PPC64_HIGHEST,
++ BFD_RELOC_PPC64_HIGHEST_S,
++ BFD_RELOC_PPC64_TOC16_LO,
++ BFD_RELOC_PPC64_TOC16_HI,
++ BFD_RELOC_PPC64_TOC16_HA,
++ BFD_RELOC_PPC64_TOC,
++ BFD_RELOC_PPC64_PLTGOT16,
++ BFD_RELOC_PPC64_PLTGOT16_LO,
++ BFD_RELOC_PPC64_PLTGOT16_HI,
++ BFD_RELOC_PPC64_PLTGOT16_HA,
++ BFD_RELOC_PPC64_ADDR16_DS,
++ BFD_RELOC_PPC64_ADDR16_LO_DS,
++ BFD_RELOC_PPC64_GOT16_DS,
++ BFD_RELOC_PPC64_GOT16_LO_DS,
++ BFD_RELOC_PPC64_PLT16_LO_DS,
++ BFD_RELOC_PPC64_SECTOFF_DS,
++ BFD_RELOC_PPC64_SECTOFF_LO_DS,
++ BFD_RELOC_PPC64_TOC16_DS,
++ BFD_RELOC_PPC64_TOC16_LO_DS,
++ BFD_RELOC_PPC64_PLTGOT16_DS,
++ BFD_RELOC_PPC64_PLTGOT16_LO_DS,
++
++/* PowerPC and PowerPC64 thread-local storage relocations. */
++ BFD_RELOC_PPC_TLS,
++ BFD_RELOC_PPC_DTPMOD,
++ BFD_RELOC_PPC_TPREL16,
++ BFD_RELOC_PPC_TPREL16_LO,
++ BFD_RELOC_PPC_TPREL16_HI,
++ BFD_RELOC_PPC_TPREL16_HA,
++ BFD_RELOC_PPC_TPREL,
++ BFD_RELOC_PPC_DTPREL16,
++ BFD_RELOC_PPC_DTPREL16_LO,
++ BFD_RELOC_PPC_DTPREL16_HI,
++ BFD_RELOC_PPC_DTPREL16_HA,
++ BFD_RELOC_PPC_DTPREL,
++ BFD_RELOC_PPC_GOT_TLSGD16,
++ BFD_RELOC_PPC_GOT_TLSGD16_LO,
++ BFD_RELOC_PPC_GOT_TLSGD16_HI,
++ BFD_RELOC_PPC_GOT_TLSGD16_HA,
++ BFD_RELOC_PPC_GOT_TLSLD16,
++ BFD_RELOC_PPC_GOT_TLSLD16_LO,
++ BFD_RELOC_PPC_GOT_TLSLD16_HI,
++ BFD_RELOC_PPC_GOT_TLSLD16_HA,
++ BFD_RELOC_PPC_GOT_TPREL16,
++ BFD_RELOC_PPC_GOT_TPREL16_LO,
++ BFD_RELOC_PPC_GOT_TPREL16_HI,
++ BFD_RELOC_PPC_GOT_TPREL16_HA,
++ BFD_RELOC_PPC_GOT_DTPREL16,
++ BFD_RELOC_PPC_GOT_DTPREL16_LO,
++ BFD_RELOC_PPC_GOT_DTPREL16_HI,
++ BFD_RELOC_PPC_GOT_DTPREL16_HA,
++ BFD_RELOC_PPC64_TPREL16_DS,
++ BFD_RELOC_PPC64_TPREL16_LO_DS,
++ BFD_RELOC_PPC64_TPREL16_HIGHER,
++ BFD_RELOC_PPC64_TPREL16_HIGHERA,
++ BFD_RELOC_PPC64_TPREL16_HIGHEST,
++ BFD_RELOC_PPC64_TPREL16_HIGHESTA,
++ BFD_RELOC_PPC64_DTPREL16_DS,
++ BFD_RELOC_PPC64_DTPREL16_LO_DS,
++ BFD_RELOC_PPC64_DTPREL16_HIGHER,
++ BFD_RELOC_PPC64_DTPREL16_HIGHERA,
++ BFD_RELOC_PPC64_DTPREL16_HIGHEST,
++ BFD_RELOC_PPC64_DTPREL16_HIGHESTA,
++
++/* IBM 370/390 relocations */
++ BFD_RELOC_I370_D12,
++
++/* The type of reloc used to build a constructor table - at the moment
++probably a 32 bit wide absolute relocation, but the target can choose.
++It generally does map to one of the other relocation types. */
++ BFD_RELOC_CTOR,
++
++/* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are
++not stored in the instruction. */
++ BFD_RELOC_ARM_PCREL_BRANCH,
++
++/* ARM 26 bit pc-relative branch. The lowest bit must be zero and is
++not stored in the instruction. The 2nd lowest bit comes from a 1 bit
++field in the instruction. */
++ BFD_RELOC_ARM_PCREL_BLX,
++
++/* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is
++not stored in the instruction. The 2nd lowest bit comes from a 1 bit
++field in the instruction. */
++ BFD_RELOC_THUMB_PCREL_BLX,
++
++/* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches.
++The lowest bit must be zero and is not stored in the instruction.
++Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an
++"nn" one smaller in all cases. Note further that BRANCH23
++corresponds to R_ARM_THM_CALL. */
++ BFD_RELOC_THUMB_PCREL_BRANCH7,
++ BFD_RELOC_THUMB_PCREL_BRANCH9,
++ BFD_RELOC_THUMB_PCREL_BRANCH12,
++ BFD_RELOC_THUMB_PCREL_BRANCH20,
++ BFD_RELOC_THUMB_PCREL_BRANCH23,
++ BFD_RELOC_THUMB_PCREL_BRANCH25,
++
++/* 12-bit immediate offset, used in ARM-format ldr and str instructions. */
++ BFD_RELOC_ARM_OFFSET_IMM,
++
++/* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */
++ BFD_RELOC_ARM_THUMB_OFFSET,
++
++/* Pc-relative or absolute relocation depending on target. Used for
++entries in .init_array sections. */
++ BFD_RELOC_ARM_TARGET1,
++
++/* Read-only segment base relative address. */
++ BFD_RELOC_ARM_ROSEGREL32,
++
++/* Data segment base relative address. */
++ BFD_RELOC_ARM_SBREL32,
++
++/* This reloc is used for references to RTTI data from exception handling
++tables. The actual definition depends on the target. It may be a
++pc-relative or some form of GOT-indirect relocation. */
++ BFD_RELOC_ARM_TARGET2,
++
++/* 31-bit PC relative address. */
++ BFD_RELOC_ARM_PREL31,
++
++/* Relocations for setting up GOTs and PLTs for shared libraries. */
++ BFD_RELOC_ARM_JUMP_SLOT,
++ BFD_RELOC_ARM_GLOB_DAT,
++ BFD_RELOC_ARM_GOT32,
++ BFD_RELOC_ARM_PLT32,
++ BFD_RELOC_ARM_RELATIVE,
++ BFD_RELOC_ARM_GOTOFF,
++ BFD_RELOC_ARM_GOTPC,
++
++/* ARM thread-local storage relocations. */
++ BFD_RELOC_ARM_TLS_GD32,
++ BFD_RELOC_ARM_TLS_LDO32,
++ BFD_RELOC_ARM_TLS_LDM32,
++ BFD_RELOC_ARM_TLS_DTPOFF32,
++ BFD_RELOC_ARM_TLS_DTPMOD32,
++ BFD_RELOC_ARM_TLS_TPOFF32,
++ BFD_RELOC_ARM_TLS_IE32,
++ BFD_RELOC_ARM_TLS_LE32,
++
++/* These relocs are only used within the ARM assembler. They are not
++(at present) written to any object files. */
++ BFD_RELOC_ARM_IMMEDIATE,
++ BFD_RELOC_ARM_ADRL_IMMEDIATE,
++ BFD_RELOC_ARM_T32_IMMEDIATE,
++ BFD_RELOC_ARM_SHIFT_IMM,
++ BFD_RELOC_ARM_SMI,
++ BFD_RELOC_ARM_SWI,
++ BFD_RELOC_ARM_MULTI,
++ BFD_RELOC_ARM_CP_OFF_IMM,
++ BFD_RELOC_ARM_CP_OFF_IMM_S2,
++ BFD_RELOC_ARM_ADR_IMM,
++ BFD_RELOC_ARM_LDR_IMM,
++ BFD_RELOC_ARM_LITERAL,
++ BFD_RELOC_ARM_IN_POOL,
++ BFD_RELOC_ARM_OFFSET_IMM8,
++ BFD_RELOC_ARM_T32_OFFSET_U8,
++ BFD_RELOC_ARM_T32_OFFSET_IMM,
++ BFD_RELOC_ARM_HWLITERAL,
++ BFD_RELOC_ARM_THUMB_ADD,
++ BFD_RELOC_ARM_THUMB_IMM,
++ BFD_RELOC_ARM_THUMB_SHIFT,
++
++/* Renesas / SuperH SH relocs. Not all of these appear in object files. */
++ BFD_RELOC_SH_PCDISP8BY2,
++ BFD_RELOC_SH_PCDISP12BY2,
++ BFD_RELOC_SH_IMM3,
++ BFD_RELOC_SH_IMM3U,
++ BFD_RELOC_SH_DISP12,
++ BFD_RELOC_SH_DISP12BY2,
++ BFD_RELOC_SH_DISP12BY4,
++ BFD_RELOC_SH_DISP12BY8,
++ BFD_RELOC_SH_DISP20,
++ BFD_RELOC_SH_DISP20BY8,
++ BFD_RELOC_SH_IMM4,
++ BFD_RELOC_SH_IMM4BY2,
++ BFD_RELOC_SH_IMM4BY4,
++ BFD_RELOC_SH_IMM8,
++ BFD_RELOC_SH_IMM8BY2,
++ BFD_RELOC_SH_IMM8BY4,
++ BFD_RELOC_SH_PCRELIMM8BY2,
++ BFD_RELOC_SH_PCRELIMM8BY4,
++ BFD_RELOC_SH_SWITCH16,
++ BFD_RELOC_SH_SWITCH32,
++ BFD_RELOC_SH_USES,
++ BFD_RELOC_SH_COUNT,
++ BFD_RELOC_SH_ALIGN,
++ BFD_RELOC_SH_CODE,
++ BFD_RELOC_SH_DATA,
++ BFD_RELOC_SH_LABEL,
++ BFD_RELOC_SH_LOOP_START,
++ BFD_RELOC_SH_LOOP_END,
++ BFD_RELOC_SH_COPY,
++ BFD_RELOC_SH_GLOB_DAT,
++ BFD_RELOC_SH_JMP_SLOT,
++ BFD_RELOC_SH_RELATIVE,
++ BFD_RELOC_SH_GOTPC,
++ BFD_RELOC_SH_GOT_LOW16,
++ BFD_RELOC_SH_GOT_MEDLOW16,
++ BFD_RELOC_SH_GOT_MEDHI16,
++ BFD_RELOC_SH_GOT_HI16,
++ BFD_RELOC_SH_GOTPLT_LOW16,
++ BFD_RELOC_SH_GOTPLT_MEDLOW16,
++ BFD_RELOC_SH_GOTPLT_MEDHI16,
++ BFD_RELOC_SH_GOTPLT_HI16,
++ BFD_RELOC_SH_PLT_LOW16,
++ BFD_RELOC_SH_PLT_MEDLOW16,
++ BFD_RELOC_SH_PLT_MEDHI16,
++ BFD_RELOC_SH_PLT_HI16,
++ BFD_RELOC_SH_GOTOFF_LOW16,
++ BFD_RELOC_SH_GOTOFF_MEDLOW16,
++ BFD_RELOC_SH_GOTOFF_MEDHI16,
++ BFD_RELOC_SH_GOTOFF_HI16,
++ BFD_RELOC_SH_GOTPC_LOW16,
++ BFD_RELOC_SH_GOTPC_MEDLOW16,
++ BFD_RELOC_SH_GOTPC_MEDHI16,
++ BFD_RELOC_SH_GOTPC_HI16,
++ BFD_RELOC_SH_COPY64,
++ BFD_RELOC_SH_GLOB_DAT64,
++ BFD_RELOC_SH_JMP_SLOT64,
++ BFD_RELOC_SH_RELATIVE64,
++ BFD_RELOC_SH_GOT10BY4,
++ BFD_RELOC_SH_GOT10BY8,
++ BFD_RELOC_SH_GOTPLT10BY4,
++ BFD_RELOC_SH_GOTPLT10BY8,
++ BFD_RELOC_SH_GOTPLT32,
++ BFD_RELOC_SH_SHMEDIA_CODE,
++ BFD_RELOC_SH_IMMU5,
++ BFD_RELOC_SH_IMMS6,
++ BFD_RELOC_SH_IMMS6BY32,
++ BFD_RELOC_SH_IMMU6,
++ BFD_RELOC_SH_IMMS10,
++ BFD_RELOC_SH_IMMS10BY2,
++ BFD_RELOC_SH_IMMS10BY4,
++ BFD_RELOC_SH_IMMS10BY8,
++ BFD_RELOC_SH_IMMS16,
++ BFD_RELOC_SH_IMMU16,
++ BFD_RELOC_SH_IMM_LOW16,
++ BFD_RELOC_SH_IMM_LOW16_PCREL,
++ BFD_RELOC_SH_IMM_MEDLOW16,
++ BFD_RELOC_SH_IMM_MEDLOW16_PCREL,
++ BFD_RELOC_SH_IMM_MEDHI16,
++ BFD_RELOC_SH_IMM_MEDHI16_PCREL,
++ BFD_RELOC_SH_IMM_HI16,
++ BFD_RELOC_SH_IMM_HI16_PCREL,
++ BFD_RELOC_SH_PT_16,
++ BFD_RELOC_SH_TLS_GD_32,
++ BFD_RELOC_SH_TLS_LD_32,
++ BFD_RELOC_SH_TLS_LDO_32,
++ BFD_RELOC_SH_TLS_IE_32,
++ BFD_RELOC_SH_TLS_LE_32,
++ BFD_RELOC_SH_TLS_DTPMOD32,
++ BFD_RELOC_SH_TLS_DTPOFF32,
++ BFD_RELOC_SH_TLS_TPOFF32,
++
++/* ARC Cores relocs.
++ARC 22 bit pc-relative branch. The lowest two bits must be zero and are
++not stored in the instruction. The high 20 bits are installed in bits 26
++through 7 of the instruction. */
++ BFD_RELOC_ARC_B22_PCREL,
++
++/* ARC 26 bit absolute branch. The lowest two bits must be zero and are not
++stored in the instruction. The high 24 bits are installed in bits 23
++through 0. */
++ BFD_RELOC_ARC_B26,
++
++/* Mitsubishi D10V relocs.
++This is a 10-bit reloc with the right 2 bits
++assumed to be 0. */
++ BFD_RELOC_D10V_10_PCREL_R,
++
++/* Mitsubishi D10V relocs.
++This is a 10-bit reloc with the right 2 bits
++assumed to be 0. This is the same as the previous reloc
++except it is in the left container, i.e.,
++shifted left 15 bits. */
++ BFD_RELOC_D10V_10_PCREL_L,
++
++/* This is an 18-bit reloc with the right 2 bits
++assumed to be 0. */
++ BFD_RELOC_D10V_18,
++
++/* This is an 18-bit reloc with the right 2 bits
++assumed to be 0. */
++ BFD_RELOC_D10V_18_PCREL,
++
++/* Mitsubishi D30V relocs.
++This is a 6-bit absolute reloc. */
++ BFD_RELOC_D30V_6,
++
++/* This is a 6-bit pc-relative reloc with
++the right 3 bits assumed to be 0. */
++ BFD_RELOC_D30V_9_PCREL,
++
++/* This is a 6-bit pc-relative reloc with
++the right 3 bits assumed to be 0. Same
++as the previous reloc but on the right side
++of the container. */
++ BFD_RELOC_D30V_9_PCREL_R,
++
++/* This is a 12-bit absolute reloc with the
++right 3 bitsassumed to be 0. */
++ BFD_RELOC_D30V_15,
++
++/* This is a 12-bit pc-relative reloc with
++the right 3 bits assumed to be 0. */
++ BFD_RELOC_D30V_15_PCREL,
++
++/* This is a 12-bit pc-relative reloc with
++the right 3 bits assumed to be 0. Same
++as the previous reloc but on the right side
++of the container. */
++ BFD_RELOC_D30V_15_PCREL_R,
++
++/* This is an 18-bit absolute reloc with
++the right 3 bits assumed to be 0. */
++ BFD_RELOC_D30V_21,
++
++/* This is an 18-bit pc-relative reloc with
++the right 3 bits assumed to be 0. */
++ BFD_RELOC_D30V_21_PCREL,
++
++/* This is an 18-bit pc-relative reloc with
++the right 3 bits assumed to be 0. Same
++as the previous reloc but on the right side
++of the container. */
++ BFD_RELOC_D30V_21_PCREL_R,
++
++/* This is a 32-bit absolute reloc. */
++ BFD_RELOC_D30V_32,
++
++/* This is a 32-bit pc-relative reloc. */
++ BFD_RELOC_D30V_32_PCREL,
++
++/* DLX relocs */
++ BFD_RELOC_DLX_HI16_S,
++
++/* DLX relocs */
++ BFD_RELOC_DLX_LO16,
++
++/* DLX relocs */
++ BFD_RELOC_DLX_JMP26,
++
++/* Renesas M16C/M32C Relocations. */
++ BFD_RELOC_M16C_8_PCREL8,
++ BFD_RELOC_M16C_16_PCREL8,
++ BFD_RELOC_M16C_8_PCREL16,
++ BFD_RELOC_M16C_8_ELABEL24,
++ BFD_RELOC_M16C_8_ABS16,
++ BFD_RELOC_M16C_16_ABS16,
++ BFD_RELOC_M16C_16_ABS24,
++ BFD_RELOC_M16C_16_ABS32,
++ BFD_RELOC_M16C_24_ABS16,
++ BFD_RELOC_M16C_24_ABS24,
++ BFD_RELOC_M16C_24_ABS32,
++ BFD_RELOC_M16C_32_ABS16,
++ BFD_RELOC_M16C_32_ABS24,
++ BFD_RELOC_M16C_32_ABS32,
++ BFD_RELOC_M16C_40_ABS16,
++ BFD_RELOC_M16C_40_ABS24,
++ BFD_RELOC_M16C_40_ABS32,
++
++/* Renesas M32R (formerly Mitsubishi M32R) relocs.
++This is a 24 bit absolute address. */
++ BFD_RELOC_M32R_24,
++
++/* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */
++ BFD_RELOC_M32R_10_PCREL,
++
++/* This is an 18-bit reloc with the right 2 bits assumed to be 0. */
++ BFD_RELOC_M32R_18_PCREL,
++
++/* This is a 26-bit reloc with the right 2 bits assumed to be 0. */
++ BFD_RELOC_M32R_26_PCREL,
++
++/* This is a 16-bit reloc containing the high 16 bits of an address
++used when the lower 16 bits are treated as unsigned. */
++ BFD_RELOC_M32R_HI16_ULO,
++
++/* This is a 16-bit reloc containing the high 16 bits of an address
++used when the lower 16 bits are treated as signed. */
++ BFD_RELOC_M32R_HI16_SLO,
++
++/* This is a 16-bit reloc containing the lower 16 bits of an address. */
++ BFD_RELOC_M32R_LO16,
++
++/* This is a 16-bit reloc containing the small data area offset for use in
++add3, load, and store instructions. */
++ BFD_RELOC_M32R_SDA16,
++
++/* For PIC. */
++ BFD_RELOC_M32R_GOT24,
++ BFD_RELOC_M32R_26_PLTREL,
++ BFD_RELOC_M32R_COPY,
++ BFD_RELOC_M32R_GLOB_DAT,
++ BFD_RELOC_M32R_JMP_SLOT,
++ BFD_RELOC_M32R_RELATIVE,
++ BFD_RELOC_M32R_GOTOFF,
++ BFD_RELOC_M32R_GOTOFF_HI_ULO,
++ BFD_RELOC_M32R_GOTOFF_HI_SLO,
++ BFD_RELOC_M32R_GOTOFF_LO,
++ BFD_RELOC_M32R_GOTPC24,
++ BFD_RELOC_M32R_GOT16_HI_ULO,
++ BFD_RELOC_M32R_GOT16_HI_SLO,
++ BFD_RELOC_M32R_GOT16_LO,
++ BFD_RELOC_M32R_GOTPC_HI_ULO,
++ BFD_RELOC_M32R_GOTPC_HI_SLO,
++ BFD_RELOC_M32R_GOTPC_LO,
++
++/* This is a 9-bit reloc */
++ BFD_RELOC_V850_9_PCREL,
++
++/* This is a 22-bit reloc */
++ BFD_RELOC_V850_22_PCREL,
++
++/* This is a 16 bit offset from the short data area pointer. */
++ BFD_RELOC_V850_SDA_16_16_OFFSET,
++
++/* This is a 16 bit offset (of which only 15 bits are used) from the
++short data area pointer. */
++ BFD_RELOC_V850_SDA_15_16_OFFSET,
++
++/* This is a 16 bit offset from the zero data area pointer. */
++ BFD_RELOC_V850_ZDA_16_16_OFFSET,
++
++/* This is a 16 bit offset (of which only 15 bits are used) from the
++zero data area pointer. */
++ BFD_RELOC_V850_ZDA_15_16_OFFSET,
++
++/* This is an 8 bit offset (of which only 6 bits are used) from the
++tiny data area pointer. */
++ BFD_RELOC_V850_TDA_6_8_OFFSET,
++
++/* This is an 8bit offset (of which only 7 bits are used) from the tiny
++data area pointer. */
++ BFD_RELOC_V850_TDA_7_8_OFFSET,
++
++/* This is a 7 bit offset from the tiny data area pointer. */
++ BFD_RELOC_V850_TDA_7_7_OFFSET,
++
++/* This is a 16 bit offset from the tiny data area pointer. */
++ BFD_RELOC_V850_TDA_16_16_OFFSET,
++
++/* This is a 5 bit offset (of which only 4 bits are used) from the tiny
++data area pointer. */
++ BFD_RELOC_V850_TDA_4_5_OFFSET,
++
++/* This is a 4 bit offset from the tiny data area pointer. */
++ BFD_RELOC_V850_TDA_4_4_OFFSET,
++
++/* This is a 16 bit offset from the short data area pointer, with the
++bits placed non-contiguously in the instruction. */
++ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET,
++
++/* This is a 16 bit offset from the zero data area pointer, with the
++bits placed non-contiguously in the instruction. */
++ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET,
++
++/* This is a 6 bit offset from the call table base pointer. */
++ BFD_RELOC_V850_CALLT_6_7_OFFSET,
++
++/* This is a 16 bit offset from the call table base pointer. */
++ BFD_RELOC_V850_CALLT_16_16_OFFSET,
++
++/* Used for relaxing indirect function calls. */
++ BFD_RELOC_V850_LONGCALL,
++
++/* Used for relaxing indirect jumps. */
++ BFD_RELOC_V850_LONGJUMP,
++
++/* Used to maintain alignment whilst relaxing. */
++ BFD_RELOC_V850_ALIGN,
++
++/* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu
++instructions. */
++ BFD_RELOC_V850_LO16_SPLIT_OFFSET,
++
++/* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the
++instruction. */
++ BFD_RELOC_MN10300_32_PCREL,
++
++/* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the
++instruction. */
++ BFD_RELOC_MN10300_16_PCREL,
++
++/* This is a 8bit DP reloc for the tms320c30, where the most
++significant 8 bits of a 24 bit word are placed into the least
++significant 8 bits of the opcode. */
++ BFD_RELOC_TIC30_LDP,
++
++/* This is a 7bit reloc for the tms320c54x, where the least
++significant 7 bits of a 16 bit word are placed into the least
++significant 7 bits of the opcode. */
++ BFD_RELOC_TIC54X_PARTLS7,
++
++/* This is a 9bit DP reloc for the tms320c54x, where the most
++significant 9 bits of a 16 bit word are placed into the least
++significant 9 bits of the opcode. */
++ BFD_RELOC_TIC54X_PARTMS9,
++
++/* This is an extended address 23-bit reloc for the tms320c54x. */
++ BFD_RELOC_TIC54X_23,
++
++/* This is a 16-bit reloc for the tms320c54x, where the least
++significant 16 bits of a 23-bit extended address are placed into
++the opcode. */
++ BFD_RELOC_TIC54X_16_OF_23,
++
++/* This is a reloc for the tms320c54x, where the most
++significant 7 bits of a 23-bit extended address are placed into
++the opcode. */
++ BFD_RELOC_TIC54X_MS7_OF_23,
++
++/* This is a 48 bit reloc for the FR30 that stores 32 bits. */
++ BFD_RELOC_FR30_48,
++
++/* This is a 32 bit reloc for the FR30 that stores 20 bits split up into
++two sections. */
++ BFD_RELOC_FR30_20,
++
++/* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in
++4 bits. */
++ BFD_RELOC_FR30_6_IN_4,
++
++/* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset
++into 8 bits. */
++ BFD_RELOC_FR30_8_IN_8,
++
++/* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset
++into 8 bits. */
++ BFD_RELOC_FR30_9_IN_8,
++
++/* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset
++into 8 bits. */
++ BFD_RELOC_FR30_10_IN_8,
++
++/* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative
++short offset into 8 bits. */
++ BFD_RELOC_FR30_9_PCREL,
++
++/* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative
++short offset into 11 bits. */
++ BFD_RELOC_FR30_12_PCREL,
++
++/* Motorola Mcore relocations. */
++ BFD_RELOC_MCORE_PCREL_IMM8BY4,
++ BFD_RELOC_MCORE_PCREL_IMM11BY2,
++ BFD_RELOC_MCORE_PCREL_IMM4BY2,
++ BFD_RELOC_MCORE_PCREL_32,
++ BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2,
++ BFD_RELOC_MCORE_RVA,
++
++/* These are relocations for the GETA instruction. */
++ BFD_RELOC_MMIX_GETA,
++ BFD_RELOC_MMIX_GETA_1,
++ BFD_RELOC_MMIX_GETA_2,
++ BFD_RELOC_MMIX_GETA_3,
++
++/* These are relocations for a conditional branch instruction. */
++ BFD_RELOC_MMIX_CBRANCH,
++ BFD_RELOC_MMIX_CBRANCH_J,
++ BFD_RELOC_MMIX_CBRANCH_1,
++ BFD_RELOC_MMIX_CBRANCH_2,
++ BFD_RELOC_MMIX_CBRANCH_3,
++
++/* These are relocations for the PUSHJ instruction. */
++ BFD_RELOC_MMIX_PUSHJ,
++ BFD_RELOC_MMIX_PUSHJ_1,
++ BFD_RELOC_MMIX_PUSHJ_2,
++ BFD_RELOC_MMIX_PUSHJ_3,
++ BFD_RELOC_MMIX_PUSHJ_STUBBABLE,
++
++/* These are relocations for the JMP instruction. */
++ BFD_RELOC_MMIX_JMP,
++ BFD_RELOC_MMIX_JMP_1,
++ BFD_RELOC_MMIX_JMP_2,
++ BFD_RELOC_MMIX_JMP_3,
++
++/* This is a relocation for a relative address as in a GETA instruction or
++a branch. */
++ BFD_RELOC_MMIX_ADDR19,
++
++/* This is a relocation for a relative address as in a JMP instruction. */
++ BFD_RELOC_MMIX_ADDR27,
++
++/* This is a relocation for an instruction field that may be a general
++register or a value 0..255. */
++ BFD_RELOC_MMIX_REG_OR_BYTE,
++
++/* This is a relocation for an instruction field that may be a general
++register. */
++ BFD_RELOC_MMIX_REG,
++
++/* This is a relocation for two instruction fields holding a register and
++an offset, the equivalent of the relocation. */
++ BFD_RELOC_MMIX_BASE_PLUS_OFFSET,
++
++/* This relocation is an assertion that the expression is not allocated as
++a global register. It does not modify contents. */
++ BFD_RELOC_MMIX_LOCAL,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit pc relative
++short offset into 7 bits. */
++ BFD_RELOC_AVR_7_PCREL,
++
++/* This is a 16 bit reloc for the AVR that stores 13 bit pc relative
++short offset into 12 bits. */
++ BFD_RELOC_AVR_13_PCREL,
++
++/* This is a 16 bit reloc for the AVR that stores 17 bit value (usually
++program memory address) into 16 bits. */
++ BFD_RELOC_AVR_16_PM,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
++data memory address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_LO8_LDI,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
++of data memory address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_HI8_LDI,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
++of program memory address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_HH8_LDI,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(usually data memory address) into 8 bit immediate value of SUBI insn. */
++ BFD_RELOC_AVR_LO8_LDI_NEG,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(high 8 bit of data memory address) into 8 bit immediate value of
++SUBI insn. */
++ BFD_RELOC_AVR_HI8_LDI_NEG,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(most high 8 bit of program memory address) into 8 bit immediate value
++of LDI or SUBI insn. */
++ BFD_RELOC_AVR_HH8_LDI_NEG,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
++command address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_LO8_LDI_PM,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
++of command address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_HI8_LDI_PM,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
++of command address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_HH8_LDI_PM,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(usually command address) into 8 bit immediate value of SUBI insn. */
++ BFD_RELOC_AVR_LO8_LDI_PM_NEG,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(high 8 bit of 16 bit command address) into 8 bit immediate value
++of SUBI insn. */
++ BFD_RELOC_AVR_HI8_LDI_PM_NEG,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(high 6 bit of 22 bit command address) into 8 bit immediate
++value of SUBI insn. */
++ BFD_RELOC_AVR_HH8_LDI_PM_NEG,
++
++/* This is a 32 bit reloc for the AVR that stores 23 bit value
++into 22 bits. */
++ BFD_RELOC_AVR_CALL,
++
++/* This is a 16 bit reloc for the AVR that stores all needed bits
++for absolute addressing with ldi with overflow check to linktime */
++ BFD_RELOC_AVR_LDI,
++
++/* This is a 6 bit reloc for the AVR that stores offset for ldd/std
++instructions */
++ BFD_RELOC_AVR_6,
++
++/* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw
++instructions */
++ BFD_RELOC_AVR_6_ADIW,
++
++/* Direct 12 bit. */
++ BFD_RELOC_390_12,
++
++/* 12 bit GOT offset. */
++ BFD_RELOC_390_GOT12,
++
++/* 32 bit PC relative PLT address. */
++ BFD_RELOC_390_PLT32,
++
++/* Copy symbol at runtime. */
++ BFD_RELOC_390_COPY,
++
++/* Create GOT entry. */
++ BFD_RELOC_390_GLOB_DAT,
++
++/* Create PLT entry. */
++ BFD_RELOC_390_JMP_SLOT,
++
++/* Adjust by program base. */
++ BFD_RELOC_390_RELATIVE,
++
++/* 32 bit PC relative offset to GOT. */
++ BFD_RELOC_390_GOTPC,
++
++/* 16 bit GOT offset. */
++ BFD_RELOC_390_GOT16,
++
++/* PC relative 16 bit shifted by 1. */
++ BFD_RELOC_390_PC16DBL,
++
++/* 16 bit PC rel. PLT shifted by 1. */
++ BFD_RELOC_390_PLT16DBL,
++
++/* PC relative 32 bit shifted by 1. */
++ BFD_RELOC_390_PC32DBL,
++
++/* 32 bit PC rel. PLT shifted by 1. */
++ BFD_RELOC_390_PLT32DBL,
++
++/* 32 bit PC rel. GOT shifted by 1. */
++ BFD_RELOC_390_GOTPCDBL,
++
++/* 64 bit GOT offset. */
++ BFD_RELOC_390_GOT64,
++
++/* 64 bit PC relative PLT address. */
++ BFD_RELOC_390_PLT64,
++
++/* 32 bit rel. offset to GOT entry. */
++ BFD_RELOC_390_GOTENT,
++
++/* 64 bit offset to GOT. */
++ BFD_RELOC_390_GOTOFF64,
++
++/* 12-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_390_GOTPLT12,
++
++/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_390_GOTPLT16,
++
++/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_390_GOTPLT32,
++
++/* 64-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_390_GOTPLT64,
++
++/* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_390_GOTPLTENT,
++
++/* 16-bit rel. offset from the GOT to a PLT entry. */
++ BFD_RELOC_390_PLTOFF16,
++
++/* 32-bit rel. offset from the GOT to a PLT entry. */
++ BFD_RELOC_390_PLTOFF32,
++
++/* 64-bit rel. offset from the GOT to a PLT entry. */
++ BFD_RELOC_390_PLTOFF64,
++
++/* s390 tls relocations. */
++ BFD_RELOC_390_TLS_LOAD,
++ BFD_RELOC_390_TLS_GDCALL,
++ BFD_RELOC_390_TLS_LDCALL,
++ BFD_RELOC_390_TLS_GD32,
++ BFD_RELOC_390_TLS_GD64,
++ BFD_RELOC_390_TLS_GOTIE12,
++ BFD_RELOC_390_TLS_GOTIE32,
++ BFD_RELOC_390_TLS_GOTIE64,
++ BFD_RELOC_390_TLS_LDM32,
++ BFD_RELOC_390_TLS_LDM64,
++ BFD_RELOC_390_TLS_IE32,
++ BFD_RELOC_390_TLS_IE64,
++ BFD_RELOC_390_TLS_IEENT,
++ BFD_RELOC_390_TLS_LE32,
++ BFD_RELOC_390_TLS_LE64,
++ BFD_RELOC_390_TLS_LDO32,
++ BFD_RELOC_390_TLS_LDO64,
++ BFD_RELOC_390_TLS_DTPMOD,
++ BFD_RELOC_390_TLS_DTPOFF,
++ BFD_RELOC_390_TLS_TPOFF,
++
++/* Long displacement extension. */
++ BFD_RELOC_390_20,
++ BFD_RELOC_390_GOT20,
++ BFD_RELOC_390_GOTPLT20,
++ BFD_RELOC_390_TLS_GOTIE20,
++
++/* Scenix IP2K - 9-bit register number / data address */
++ BFD_RELOC_IP2K_FR9,
++
++/* Scenix IP2K - 4-bit register/data bank number */
++ BFD_RELOC_IP2K_BANK,
++
++/* Scenix IP2K - low 13 bits of instruction word address */
++ BFD_RELOC_IP2K_ADDR16CJP,
++
++/* Scenix IP2K - high 3 bits of instruction word address */
++ BFD_RELOC_IP2K_PAGE3,
++
++/* Scenix IP2K - ext/low/high 8 bits of data address */
++ BFD_RELOC_IP2K_LO8DATA,
++ BFD_RELOC_IP2K_HI8DATA,
++ BFD_RELOC_IP2K_EX8DATA,
++
++/* Scenix IP2K - low/high 8 bits of instruction word address */
++ BFD_RELOC_IP2K_LO8INSN,
++ BFD_RELOC_IP2K_HI8INSN,
++
++/* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */
++ BFD_RELOC_IP2K_PC_SKIP,
++
++/* Scenix IP2K - 16 bit word address in text section. */
++ BFD_RELOC_IP2K_TEXT,
++
++/* Scenix IP2K - 7-bit sp or dp offset */
++ BFD_RELOC_IP2K_FR_OFFSET,
++
++/* Scenix VPE4K coprocessor - data/insn-space addressing */
++ BFD_RELOC_VPE4KMATH_DATA,
++ BFD_RELOC_VPE4KMATH_INSN,
++
++/* These two relocations are used by the linker to determine which of
++the entries in a C++ virtual function table are actually used. When
++the --gc-sections option is given, the linker will zero out the entries
++that are not used, so that the code for those functions need not be
++included in the output.
++
++VTABLE_INHERIT is a zero-space relocation used to describe to the
++linker the inheritance tree of a C++ virtual function table. The
++relocation's symbol should be the parent class' vtable, and the
++relocation should be located at the child vtable.
++
++VTABLE_ENTRY is a zero-space relocation that describes the use of a
++virtual function table entry. The reloc's symbol should refer to the
++table of the class mentioned in the code. Off of that base, an offset
++describes the entry that is being used. For Rela hosts, this offset
++is stored in the reloc's addend. For Rel hosts, we are forced to put
++this offset in the reloc's section offset. */
++ BFD_RELOC_VTABLE_INHERIT,
++ BFD_RELOC_VTABLE_ENTRY,
++
++/* Intel IA64 Relocations. */
++ BFD_RELOC_IA64_IMM14,
++ BFD_RELOC_IA64_IMM22,
++ BFD_RELOC_IA64_IMM64,
++ BFD_RELOC_IA64_DIR32MSB,
++ BFD_RELOC_IA64_DIR32LSB,
++ BFD_RELOC_IA64_DIR64MSB,
++ BFD_RELOC_IA64_DIR64LSB,
++ BFD_RELOC_IA64_GPREL22,
++ BFD_RELOC_IA64_GPREL64I,
++ BFD_RELOC_IA64_GPREL32MSB,
++ BFD_RELOC_IA64_GPREL32LSB,
++ BFD_RELOC_IA64_GPREL64MSB,
++ BFD_RELOC_IA64_GPREL64LSB,
++ BFD_RELOC_IA64_LTOFF22,
++ BFD_RELOC_IA64_LTOFF64I,
++ BFD_RELOC_IA64_PLTOFF22,
++ BFD_RELOC_IA64_PLTOFF64I,
++ BFD_RELOC_IA64_PLTOFF64MSB,
++ BFD_RELOC_IA64_PLTOFF64LSB,
++ BFD_RELOC_IA64_FPTR64I,
++ BFD_RELOC_IA64_FPTR32MSB,
++ BFD_RELOC_IA64_FPTR32LSB,
++ BFD_RELOC_IA64_FPTR64MSB,
++ BFD_RELOC_IA64_FPTR64LSB,
++ BFD_RELOC_IA64_PCREL21B,
++ BFD_RELOC_IA64_PCREL21BI,
++ BFD_RELOC_IA64_PCREL21M,
++ BFD_RELOC_IA64_PCREL21F,
++ BFD_RELOC_IA64_PCREL22,
++ BFD_RELOC_IA64_PCREL60B,
++ BFD_RELOC_IA64_PCREL64I,
++ BFD_RELOC_IA64_PCREL32MSB,
++ BFD_RELOC_IA64_PCREL32LSB,
++ BFD_RELOC_IA64_PCREL64MSB,
++ BFD_RELOC_IA64_PCREL64LSB,
++ BFD_RELOC_IA64_LTOFF_FPTR22,
++ BFD_RELOC_IA64_LTOFF_FPTR64I,
++ BFD_RELOC_IA64_LTOFF_FPTR32MSB,
++ BFD_RELOC_IA64_LTOFF_FPTR32LSB,
++ BFD_RELOC_IA64_LTOFF_FPTR64MSB,
++ BFD_RELOC_IA64_LTOFF_FPTR64LSB,
++ BFD_RELOC_IA64_SEGREL32MSB,
++ BFD_RELOC_IA64_SEGREL32LSB,
++ BFD_RELOC_IA64_SEGREL64MSB,
++ BFD_RELOC_IA64_SEGREL64LSB,
++ BFD_RELOC_IA64_SECREL32MSB,
++ BFD_RELOC_IA64_SECREL32LSB,
++ BFD_RELOC_IA64_SECREL64MSB,
++ BFD_RELOC_IA64_SECREL64LSB,
++ BFD_RELOC_IA64_REL32MSB,
++ BFD_RELOC_IA64_REL32LSB,
++ BFD_RELOC_IA64_REL64MSB,
++ BFD_RELOC_IA64_REL64LSB,
++ BFD_RELOC_IA64_LTV32MSB,
++ BFD_RELOC_IA64_LTV32LSB,
++ BFD_RELOC_IA64_LTV64MSB,
++ BFD_RELOC_IA64_LTV64LSB,
++ BFD_RELOC_IA64_IPLTMSB,
++ BFD_RELOC_IA64_IPLTLSB,
++ BFD_RELOC_IA64_COPY,
++ BFD_RELOC_IA64_LTOFF22X,
++ BFD_RELOC_IA64_LDXMOV,
++ BFD_RELOC_IA64_TPREL14,
++ BFD_RELOC_IA64_TPREL22,
++ BFD_RELOC_IA64_TPREL64I,
++ BFD_RELOC_IA64_TPREL64MSB,
++ BFD_RELOC_IA64_TPREL64LSB,
++ BFD_RELOC_IA64_LTOFF_TPREL22,
++ BFD_RELOC_IA64_DTPMOD64MSB,
++ BFD_RELOC_IA64_DTPMOD64LSB,
++ BFD_RELOC_IA64_LTOFF_DTPMOD22,
++ BFD_RELOC_IA64_DTPREL14,
++ BFD_RELOC_IA64_DTPREL22,
++ BFD_RELOC_IA64_DTPREL64I,
++ BFD_RELOC_IA64_DTPREL32MSB,
++ BFD_RELOC_IA64_DTPREL32LSB,
++ BFD_RELOC_IA64_DTPREL64MSB,
++ BFD_RELOC_IA64_DTPREL64LSB,
++ BFD_RELOC_IA64_LTOFF_DTPREL22,
++
++/* Motorola 68HC11 reloc.
++This is the 8 bit high part of an absolute address. */
++ BFD_RELOC_M68HC11_HI8,
++
++/* Motorola 68HC11 reloc.
++This is the 8 bit low part of an absolute address. */
++ BFD_RELOC_M68HC11_LO8,
++
++/* Motorola 68HC11 reloc.
++This is the 3 bit of a value. */
++ BFD_RELOC_M68HC11_3B,
++
++/* Motorola 68HC11 reloc.
++This reloc marks the beginning of a jump/call instruction.
++It is used for linker relaxation to correctly identify beginning
++of instruction and change some branches to use PC-relative
++addressing mode. */
++ BFD_RELOC_M68HC11_RL_JUMP,
++
++/* Motorola 68HC11 reloc.
++This reloc marks a group of several instructions that gcc generates
++and for which the linker relaxation pass can modify and/or remove
++some of them. */
++ BFD_RELOC_M68HC11_RL_GROUP,
++
++/* Motorola 68HC11 reloc.
++This is the 16-bit lower part of an address. It is used for 'call'
++instruction to specify the symbol address without any special
++transformation (due to memory bank window). */
++ BFD_RELOC_M68HC11_LO16,
++
++/* Motorola 68HC11 reloc.
++This is a 8-bit reloc that specifies the page number of an address.
++It is used by 'call' instruction to specify the page number of
++the symbol. */
++ BFD_RELOC_M68HC11_PAGE,
++
++/* Motorola 68HC11 reloc.
++This is a 24-bit reloc that represents the address with a 16-bit
++value and a 8-bit page number. The symbol address is transformed
++to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */
++ BFD_RELOC_M68HC11_24,
++
++/* Motorola 68HC12 reloc.
++This is the 5 bits of a value. */
++ BFD_RELOC_M68HC12_5B,
++
++/* NS CR16C Relocations. */
++ BFD_RELOC_16C_NUM08,
++ BFD_RELOC_16C_NUM08_C,
++ BFD_RELOC_16C_NUM16,
++ BFD_RELOC_16C_NUM16_C,
++ BFD_RELOC_16C_NUM32,
++ BFD_RELOC_16C_NUM32_C,
++ BFD_RELOC_16C_DISP04,
++ BFD_RELOC_16C_DISP04_C,
++ BFD_RELOC_16C_DISP08,
++ BFD_RELOC_16C_DISP08_C,
++ BFD_RELOC_16C_DISP16,
++ BFD_RELOC_16C_DISP16_C,
++ BFD_RELOC_16C_DISP24,
++ BFD_RELOC_16C_DISP24_C,
++ BFD_RELOC_16C_DISP24a,
++ BFD_RELOC_16C_DISP24a_C,
++ BFD_RELOC_16C_REG04,
++ BFD_RELOC_16C_REG04_C,
++ BFD_RELOC_16C_REG04a,
++ BFD_RELOC_16C_REG04a_C,
++ BFD_RELOC_16C_REG14,
++ BFD_RELOC_16C_REG14_C,
++ BFD_RELOC_16C_REG16,
++ BFD_RELOC_16C_REG16_C,
++ BFD_RELOC_16C_REG20,
++ BFD_RELOC_16C_REG20_C,
++ BFD_RELOC_16C_ABS20,
++ BFD_RELOC_16C_ABS20_C,
++ BFD_RELOC_16C_ABS24,
++ BFD_RELOC_16C_ABS24_C,
++ BFD_RELOC_16C_IMM04,
++ BFD_RELOC_16C_IMM04_C,
++ BFD_RELOC_16C_IMM16,
++ BFD_RELOC_16C_IMM16_C,
++ BFD_RELOC_16C_IMM20,
++ BFD_RELOC_16C_IMM20_C,
++ BFD_RELOC_16C_IMM24,
++ BFD_RELOC_16C_IMM24_C,
++ BFD_RELOC_16C_IMM32,
++ BFD_RELOC_16C_IMM32_C,
++
++/* NS CRX Relocations. */
++ BFD_RELOC_CRX_REL4,
++ BFD_RELOC_CRX_REL8,
++ BFD_RELOC_CRX_REL8_CMP,
++ BFD_RELOC_CRX_REL16,
++ BFD_RELOC_CRX_REL24,
++ BFD_RELOC_CRX_REL32,
++ BFD_RELOC_CRX_REGREL12,
++ BFD_RELOC_CRX_REGREL22,
++ BFD_RELOC_CRX_REGREL28,
++ BFD_RELOC_CRX_REGREL32,
++ BFD_RELOC_CRX_ABS16,
++ BFD_RELOC_CRX_ABS32,
++ BFD_RELOC_CRX_NUM8,
++ BFD_RELOC_CRX_NUM16,
++ BFD_RELOC_CRX_NUM32,
++ BFD_RELOC_CRX_IMM16,
++ BFD_RELOC_CRX_IMM32,
++ BFD_RELOC_CRX_SWITCH8,
++ BFD_RELOC_CRX_SWITCH16,
++ BFD_RELOC_CRX_SWITCH32,
++
++/* These relocs are only used within the CRIS assembler. They are not
++(at present) written to any object files. */
++ BFD_RELOC_CRIS_BDISP8,
++ BFD_RELOC_CRIS_UNSIGNED_5,
++ BFD_RELOC_CRIS_SIGNED_6,
++ BFD_RELOC_CRIS_UNSIGNED_6,
++ BFD_RELOC_CRIS_SIGNED_8,
++ BFD_RELOC_CRIS_UNSIGNED_8,
++ BFD_RELOC_CRIS_SIGNED_16,
++ BFD_RELOC_CRIS_UNSIGNED_16,
++ BFD_RELOC_CRIS_LAPCQ_OFFSET,
++ BFD_RELOC_CRIS_UNSIGNED_4,
++
++/* Relocs used in ELF shared libraries for CRIS. */
++ BFD_RELOC_CRIS_COPY,
++ BFD_RELOC_CRIS_GLOB_DAT,
++ BFD_RELOC_CRIS_JUMP_SLOT,
++ BFD_RELOC_CRIS_RELATIVE,
++
++/* 32-bit offset to symbol-entry within GOT. */
++ BFD_RELOC_CRIS_32_GOT,
++
++/* 16-bit offset to symbol-entry within GOT. */
++ BFD_RELOC_CRIS_16_GOT,
++
++/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_CRIS_32_GOTPLT,
++
++/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_CRIS_16_GOTPLT,
++
++/* 32-bit offset to symbol, relative to GOT. */
++ BFD_RELOC_CRIS_32_GOTREL,
++
++/* 32-bit offset to symbol with PLT entry, relative to GOT. */
++ BFD_RELOC_CRIS_32_PLT_GOTREL,
++
++/* 32-bit offset to symbol with PLT entry, relative to this relocation. */
++ BFD_RELOC_CRIS_32_PLT_PCREL,
++
++/* Intel i860 Relocations. */
++ BFD_RELOC_860_COPY,
++ BFD_RELOC_860_GLOB_DAT,
++ BFD_RELOC_860_JUMP_SLOT,
++ BFD_RELOC_860_RELATIVE,
++ BFD_RELOC_860_PC26,
++ BFD_RELOC_860_PLT26,
++ BFD_RELOC_860_PC16,
++ BFD_RELOC_860_LOW0,
++ BFD_RELOC_860_SPLIT0,
++ BFD_RELOC_860_LOW1,
++ BFD_RELOC_860_SPLIT1,
++ BFD_RELOC_860_LOW2,
++ BFD_RELOC_860_SPLIT2,
++ BFD_RELOC_860_LOW3,
++ BFD_RELOC_860_LOGOT0,
++ BFD_RELOC_860_SPGOT0,
++ BFD_RELOC_860_LOGOT1,
++ BFD_RELOC_860_SPGOT1,
++ BFD_RELOC_860_LOGOTOFF0,
++ BFD_RELOC_860_SPGOTOFF0,
++ BFD_RELOC_860_LOGOTOFF1,
++ BFD_RELOC_860_SPGOTOFF1,
++ BFD_RELOC_860_LOGOTOFF2,
++ BFD_RELOC_860_LOGOTOFF3,
++ BFD_RELOC_860_LOPC,
++ BFD_RELOC_860_HIGHADJ,
++ BFD_RELOC_860_HAGOT,
++ BFD_RELOC_860_HAGOTOFF,
++ BFD_RELOC_860_HAPC,
++ BFD_RELOC_860_HIGH,
++ BFD_RELOC_860_HIGOT,
++ BFD_RELOC_860_HIGOTOFF,
++
++/* OpenRISC Relocations. */
++ BFD_RELOC_OPENRISC_ABS_26,
++ BFD_RELOC_OPENRISC_REL_26,
++
++/* H8 elf Relocations. */
++ BFD_RELOC_H8_DIR16A8,
++ BFD_RELOC_H8_DIR16R8,
++ BFD_RELOC_H8_DIR24A8,
++ BFD_RELOC_H8_DIR24R8,
++ BFD_RELOC_H8_DIR32A16,
++
++/* Sony Xstormy16 Relocations. */
++ BFD_RELOC_XSTORMY16_REL_12,
++ BFD_RELOC_XSTORMY16_12,
++ BFD_RELOC_XSTORMY16_24,
++ BFD_RELOC_XSTORMY16_FPTR16,
++
++/* Relocations used by VAX ELF. */
++ BFD_RELOC_VAX_GLOB_DAT,
++ BFD_RELOC_VAX_JMP_SLOT,
++ BFD_RELOC_VAX_RELATIVE,
++
++/* Morpho MS1 - 16 bit immediate relocation. */
++ BFD_RELOC_MS1_PC16,
++
++/* Morpho MS1 - Hi 16 bits of an address. */
++ BFD_RELOC_MS1_HI16,
++
++/* Morpho MS1 - Low 16 bits of an address. */
++ BFD_RELOC_MS1_LO16,
++
++/* Morpho MS1 - Used to tell the linker which vtable entries are used. */
++ BFD_RELOC_MS1_GNU_VTINHERIT,
++
++/* Morpho MS1 - Used to tell the linker which vtable entries are used. */
++ BFD_RELOC_MS1_GNU_VTENTRY,
++
++/* msp430 specific relocation codes */
++ BFD_RELOC_MSP430_10_PCREL,
++ BFD_RELOC_MSP430_16_PCREL,
++ BFD_RELOC_MSP430_16,
++ BFD_RELOC_MSP430_16_PCREL_BYTE,
++ BFD_RELOC_MSP430_16_BYTE,
++ BFD_RELOC_MSP430_2X_PCREL,
++ BFD_RELOC_MSP430_RL_PCREL,
++
++/* IQ2000 Relocations. */
++ BFD_RELOC_IQ2000_OFFSET_16,
++ BFD_RELOC_IQ2000_OFFSET_21,
++ BFD_RELOC_IQ2000_UHI16,
++
++/* Special Xtensa relocation used only by PLT entries in ELF shared
++objects to indicate that the runtime linker should set the value
++to one of its own internal functions or data structures. */
++ BFD_RELOC_XTENSA_RTLD,
++
++/* Xtensa relocations for ELF shared objects. */
++ BFD_RELOC_XTENSA_GLOB_DAT,
++ BFD_RELOC_XTENSA_JMP_SLOT,
++ BFD_RELOC_XTENSA_RELATIVE,
++
++/* Xtensa relocation used in ELF object files for symbols that may require
++PLT entries. Otherwise, this is just a generic 32-bit relocation. */
++ BFD_RELOC_XTENSA_PLT,
++
++/* Xtensa relocations to mark the difference of two local symbols.
++These are only needed to support linker relaxation and can be ignored
++when not relaxing. The field is set to the value of the difference
++assuming no relaxation. The relocation encodes the position of the
++first symbol so the linker can determine whether to adjust the field
++value. */
++ BFD_RELOC_XTENSA_DIFF8,
++ BFD_RELOC_XTENSA_DIFF16,
++ BFD_RELOC_XTENSA_DIFF32,
++
++/* Generic Xtensa relocations for instruction operands. Only the slot
++number is encoded in the relocation. The relocation applies to the
++last PC-relative immediate operand, or if there are no PC-relative
++immediates, to the last immediate operand. */
++ BFD_RELOC_XTENSA_SLOT0_OP,
++ BFD_RELOC_XTENSA_SLOT1_OP,
++ BFD_RELOC_XTENSA_SLOT2_OP,
++ BFD_RELOC_XTENSA_SLOT3_OP,
++ BFD_RELOC_XTENSA_SLOT4_OP,
++ BFD_RELOC_XTENSA_SLOT5_OP,
++ BFD_RELOC_XTENSA_SLOT6_OP,
++ BFD_RELOC_XTENSA_SLOT7_OP,
++ BFD_RELOC_XTENSA_SLOT8_OP,
++ BFD_RELOC_XTENSA_SLOT9_OP,
++ BFD_RELOC_XTENSA_SLOT10_OP,
++ BFD_RELOC_XTENSA_SLOT11_OP,
++ BFD_RELOC_XTENSA_SLOT12_OP,
++ BFD_RELOC_XTENSA_SLOT13_OP,
++ BFD_RELOC_XTENSA_SLOT14_OP,
++
++/* Alternate Xtensa relocations. Only the slot is encoded in the
++relocation. The meaning of these relocations is opcode-specific. */
++ BFD_RELOC_XTENSA_SLOT0_ALT,
++ BFD_RELOC_XTENSA_SLOT1_ALT,
++ BFD_RELOC_XTENSA_SLOT2_ALT,
++ BFD_RELOC_XTENSA_SLOT3_ALT,
++ BFD_RELOC_XTENSA_SLOT4_ALT,
++ BFD_RELOC_XTENSA_SLOT5_ALT,
++ BFD_RELOC_XTENSA_SLOT6_ALT,
++ BFD_RELOC_XTENSA_SLOT7_ALT,
++ BFD_RELOC_XTENSA_SLOT8_ALT,
++ BFD_RELOC_XTENSA_SLOT9_ALT,
++ BFD_RELOC_XTENSA_SLOT10_ALT,
++ BFD_RELOC_XTENSA_SLOT11_ALT,
++ BFD_RELOC_XTENSA_SLOT12_ALT,
++ BFD_RELOC_XTENSA_SLOT13_ALT,
++ BFD_RELOC_XTENSA_SLOT14_ALT,
++
++/* Xtensa relocations for backward compatibility. These have all been
++replaced by BFD_RELOC_XTENSA_SLOT0_OP. */
++ BFD_RELOC_XTENSA_OP0,
++ BFD_RELOC_XTENSA_OP1,
++ BFD_RELOC_XTENSA_OP2,
++
++/* Xtensa relocation to mark that the assembler expanded the
++instructions from an original target. The expansion size is
++encoded in the reloc size. */
++ BFD_RELOC_XTENSA_ASM_EXPAND,
++
++/* Xtensa relocation to mark that the linker should simplify
++assembler-expanded instructions. This is commonly used
++internally by the linker after analysis of a
++BFD_RELOC_XTENSA_ASM_EXPAND. */
++ BFD_RELOC_XTENSA_ASM_SIMPLIFY,
++ BFD_RELOC_UNUSED };
++typedef enum bfd_reloc_code_real bfd_reloc_code_real_type;
++reloc_howto_type *bfd_reloc_type_lookup
++ (bfd *abfd, bfd_reloc_code_real_type code);
++
++const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code);
++
++/* Extracted from syms.c. */
++
++typedef struct bfd_symbol
++{
++ /* A pointer to the BFD which owns the symbol. This information
++ is necessary so that a back end can work out what additional
++ information (invisible to the application writer) is carried
++ with the symbol.
++
++ This field is *almost* redundant, since you can use section->owner
++ instead, except that some symbols point to the global sections
++ bfd_{abs,com,und}_section. This could be fixed by making
++ these globals be per-bfd (or per-target-flavor). FIXME. */
++ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */
++
++ /* The text of the symbol. The name is left alone, and not copied; the
++ application may not alter it. */
++ const char *name;
++
++ /* The value of the symbol. This really should be a union of a
++ numeric value with a pointer, since some flags indicate that
++ a pointer to another symbol is stored here. */
++ symvalue value;
++
++ /* Attributes of a symbol. */
++#define BSF_NO_FLAGS 0x00
++
++ /* The symbol has local scope; <<static>> in <<C>>. The value
++ is the offset into the section of the data. */
++#define BSF_LOCAL 0x01
++
++ /* The symbol has global scope; initialized data in <<C>>. The
++ value is the offset into the section of the data. */
++#define BSF_GLOBAL 0x02
++
++ /* The symbol has global scope and is exported. The value is
++ the offset into the section of the data. */
++#define BSF_EXPORT BSF_GLOBAL /* No real difference. */
++
++ /* A normal C symbol would be one of:
++ <<BSF_LOCAL>>, <<BSF_FORT_COMM>>, <<BSF_UNDEFINED>> or
++ <<BSF_GLOBAL>>. */
++
++ /* The symbol is a debugging record. The value has an arbitrary
++ meaning, unless BSF_DEBUGGING_RELOC is also set. */
++#define BSF_DEBUGGING 0x08
++
++ /* The symbol denotes a function entry point. Used in ELF,
++ perhaps others someday. */
++#define BSF_FUNCTION 0x10
++
++ /* Used by the linker. */
++#define BSF_KEEP 0x20
++#define BSF_KEEP_G 0x40
++
++ /* A weak global symbol, overridable without warnings by
++ a regular global symbol of the same name. */
++#define BSF_WEAK 0x80
++
++ /* This symbol was created to point to a section, e.g. ELF's
++ STT_SECTION symbols. */
++#define BSF_SECTION_SYM 0x100
++
++ /* The symbol used to be a common symbol, but now it is
++ allocated. */
++#define BSF_OLD_COMMON 0x200
++
++ /* The default value for common data. */
++#define BFD_FORT_COMM_DEFAULT_VALUE 0
++
++ /* In some files the type of a symbol sometimes alters its
++ location in an output file - ie in coff a <<ISFCN>> symbol
++ which is also <<C_EXT>> symbol appears where it was
++ declared and not at the end of a section. This bit is set
++ by the target BFD part to convey this information. */
++#define BSF_NOT_AT_END 0x400
++
++ /* Signal that the symbol is the label of constructor section. */
++#define BSF_CONSTRUCTOR 0x800
++
++ /* Signal that the symbol is a warning symbol. The name is a
++ warning. The name of the next symbol is the one to warn about;
++ if a reference is made to a symbol with the same name as the next
++ symbol, a warning is issued by the linker. */
++#define BSF_WARNING 0x1000
++
++ /* Signal that the symbol is indirect. This symbol is an indirect
++ pointer to the symbol with the same name as the next symbol. */
++#define BSF_INDIRECT 0x2000
++
++ /* BSF_FILE marks symbols that contain a file name. This is used
++ for ELF STT_FILE symbols. */
++#define BSF_FILE 0x4000
++
++ /* Symbol is from dynamic linking information. */
++#define BSF_DYNAMIC 0x8000
++
++ /* The symbol denotes a data object. Used in ELF, and perhaps
++ others someday. */
++#define BSF_OBJECT 0x10000
++
++ /* This symbol is a debugging symbol. The value is the offset
++ into the section of the data. BSF_DEBUGGING should be set
++ as well. */
++#define BSF_DEBUGGING_RELOC 0x20000
++
++ /* This symbol is thread local. Used in ELF. */
++#define BSF_THREAD_LOCAL 0x40000
++
++ flagword flags;
++
++ /* A pointer to the section to which this symbol is
++ relative. This will always be non NULL, there are special
++ sections for undefined and absolute symbols. */
++ struct bfd_section *section;
++
++ /* Back end special data. */
++ union
++ {
++ void *p;
++ bfd_vma i;
++ }
++ udata;
++}
++asymbol;
++
++#define bfd_get_symtab_upper_bound(abfd) \
++ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd))
++
++bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym);
++
++bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name);
++
++#define bfd_is_local_label_name(abfd, name) \
++ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name))
++
++bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym);
++
++#define bfd_is_target_special_symbol(abfd, sym) \
++ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym))
++
++#define bfd_canonicalize_symtab(abfd, location) \
++ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location))
++
++bfd_boolean bfd_set_symtab
++ (bfd *abfd, asymbol **location, unsigned int count);
++
++void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol);
++
++#define bfd_make_empty_symbol(abfd) \
++ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd))
++
++asymbol *_bfd_generic_make_empty_symbol (bfd *);
++
++#define bfd_make_debug_symbol(abfd,ptr,size) \
++ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size))
++
++int bfd_decode_symclass (asymbol *symbol);
++
++bfd_boolean bfd_is_undefined_symclass (int symclass);
++
++void bfd_symbol_info (asymbol *symbol, symbol_info *ret);
++
++bfd_boolean bfd_copy_private_symbol_data
++ (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym);
++
++#define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \
++ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \
++ (ibfd, isymbol, obfd, osymbol))
++
++/* Extracted from bfd.c. */
++struct bfd
++{
++ /* A unique identifier of the BFD */
++ unsigned int id;
++
++ /* The filename the application opened the BFD with. */
++ const char *filename;
++
++ /* A pointer to the target jump table. */
++ const struct bfd_target *xvec;
++
++ /* The IOSTREAM, and corresponding IO vector that provide access
++ to the file backing the BFD. */
++ void *iostream;
++ const struct bfd_iovec *iovec;
++
++ /* Is the file descriptor being cached? That is, can it be closed as
++ needed, and re-opened when accessed later? */
++ bfd_boolean cacheable;
++
++ /* Marks whether there was a default target specified when the
++ BFD was opened. This is used to select which matching algorithm
++ to use to choose the back end. */
++ bfd_boolean target_defaulted;
++
++ /* The caching routines use these to maintain a
++ least-recently-used list of BFDs. */
++ struct bfd *lru_prev, *lru_next;
++
++ /* When a file is closed by the caching routines, BFD retains
++ state information on the file here... */
++ ufile_ptr where;
++
++ /* ... and here: (``once'' means at least once). */
++ bfd_boolean opened_once;
++
++ /* Set if we have a locally maintained mtime value, rather than
++ getting it from the file each time. */
++ bfd_boolean mtime_set;
++
++ /* File modified time, if mtime_set is TRUE. */
++ long mtime;
++
++ /* Reserved for an unimplemented file locking extension. */
++ int ifd;
++
++ /* The format which belongs to the BFD. (object, core, etc.) */
++ bfd_format format;
++
++ /* The direction with which the BFD was opened. */
++ enum bfd_direction
++ {
++ no_direction = 0,
++ read_direction = 1,
++ write_direction = 2,
++ both_direction = 3
++ }
++ direction;
++
++ /* Format_specific flags. */
++ flagword flags;
++
++ /* Currently my_archive is tested before adding origin to
++ anything. I believe that this can become always an add of
++ origin, with origin set to 0 for non archive files. */
++ ufile_ptr origin;
++
++ /* Remember when output has begun, to stop strange things
++ from happening. */
++ bfd_boolean output_has_begun;
++
++ /* A hash table for section names. */
++ struct bfd_hash_table section_htab;
++
++ /* Pointer to linked list of sections. */
++ struct bfd_section *sections;
++
++ /* The last section on the section list. */
++ struct bfd_section *section_last;
++
++ /* The number of sections. */
++ unsigned int section_count;
++
++ /* Stuff only useful for object files:
++ The start address. */
++ bfd_vma start_address;
++
++ /* Used for input and output. */
++ unsigned int symcount;
++
++ /* Symbol table for output BFD (with symcount entries). */
++ struct bfd_symbol **outsymbols;
++
++ /* Used for slurped dynamic symbol tables. */
++ unsigned int dynsymcount;
++
++ /* Pointer to structure which contains architecture information. */
++ const struct bfd_arch_info *arch_info;
++
++ /* Flag set if symbols from this BFD should not be exported. */
++ bfd_boolean no_export;
++
++ /* Stuff only useful for archives. */
++ void *arelt_data;
++ struct bfd *my_archive; /* The containing archive BFD. */
++ struct bfd *next; /* The next BFD in the archive. */
++ struct bfd *archive_head; /* The first BFD in the archive. */
++ bfd_boolean has_armap;
++
++ /* A chain of BFD structures involved in a link. */
++ struct bfd *link_next;
++
++ /* A field used by _bfd_generic_link_add_archive_symbols. This will
++ be used only for archive elements. */
++ int archive_pass;
++
++ /* Used by the back end to hold private data. */
++ union
++ {
++ struct aout_data_struct *aout_data;
++ struct artdata *aout_ar_data;
++ struct _oasys_data *oasys_obj_data;
++ struct _oasys_ar_data *oasys_ar_data;
++ struct coff_tdata *coff_obj_data;
++ struct pe_tdata *pe_obj_data;
++ struct xcoff_tdata *xcoff_obj_data;
++ struct ecoff_tdata *ecoff_obj_data;
++ struct ieee_data_struct *ieee_data;
++ struct ieee_ar_data_struct *ieee_ar_data;
++ struct srec_data_struct *srec_data;
++ struct ihex_data_struct *ihex_data;
++ struct tekhex_data_struct *tekhex_data;
++ struct elf_obj_tdata *elf_obj_data;
++ struct nlm_obj_tdata *nlm_obj_data;
++ struct bout_data_struct *bout_data;
++ struct mmo_data_struct *mmo_data;
++ struct sun_core_struct *sun_core_data;
++ struct sco5_core_struct *sco5_core_data;
++ struct trad_core_struct *trad_core_data;
++ struct som_data_struct *som_data;
++ struct hpux_core_struct *hpux_core_data;
++ struct hppabsd_core_struct *hppabsd_core_data;
++ struct sgi_core_struct *sgi_core_data;
++ struct lynx_core_struct *lynx_core_data;
++ struct osf_core_struct *osf_core_data;
++ struct cisco_core_struct *cisco_core_data;
++ struct versados_data_struct *versados_data;
++ struct netbsd_core_struct *netbsd_core_data;
++ struct mach_o_data_struct *mach_o_data;
++ struct mach_o_fat_data_struct *mach_o_fat_data;
++ struct bfd_pef_data_struct *pef_data;
++ struct bfd_pef_xlib_data_struct *pef_xlib_data;
++ struct bfd_sym_data_struct *sym_data;
++ void *any;
++ }
++ tdata;
++
++ /* Used by the application to hold private data. */
++ void *usrdata;
++
++ /* Where all the allocated stuff under this BFD goes. This is a
++ struct objalloc *, but we use void * to avoid requiring the inclusion
++ of objalloc.h. */
++ void *memory;
++};
++
++typedef enum bfd_error
++{
++ bfd_error_no_error = 0,
++ bfd_error_system_call,
++ bfd_error_invalid_target,
++ bfd_error_wrong_format,
++ bfd_error_wrong_object_format,
++ bfd_error_invalid_operation,
++ bfd_error_no_memory,
++ bfd_error_no_symbols,
++ bfd_error_no_armap,
++ bfd_error_no_more_archived_files,
++ bfd_error_malformed_archive,
++ bfd_error_file_not_recognized,
++ bfd_error_file_ambiguously_recognized,
++ bfd_error_no_contents,
++ bfd_error_nonrepresentable_section,
++ bfd_error_no_debug_section,
++ bfd_error_bad_value,
++ bfd_error_file_truncated,
++ bfd_error_file_too_big,
++ bfd_error_invalid_error_code
++}
++bfd_error_type;
++
++bfd_error_type bfd_get_error (void);
++
++void bfd_set_error (bfd_error_type error_tag);
++
++const char *bfd_errmsg (bfd_error_type error_tag);
++
++void bfd_perror (const char *message);
++
++typedef void (*bfd_error_handler_type) (const char *, ...);
++
++bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type);
++
++void bfd_set_error_program_name (const char *);
++
++bfd_error_handler_type bfd_get_error_handler (void);
++
++long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect);
++
++long bfd_canonicalize_reloc
++ (bfd *abfd, asection *sec, arelent **loc, asymbol **syms);
++
++void bfd_set_reloc
++ (bfd *abfd, asection *sec, arelent **rel, unsigned int count);
++
++bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags);
++
++int bfd_get_arch_size (bfd *abfd);
++
++int bfd_get_sign_extend_vma (bfd *abfd);
++
++bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma);
++
++unsigned int bfd_get_gp_size (bfd *abfd);
++
++void bfd_set_gp_size (bfd *abfd, unsigned int i);
++
++bfd_vma bfd_scan_vma (const char *string, const char **end, int base);
++
++bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd);
++
++#define bfd_copy_private_header_data(ibfd, obfd) \
++ BFD_SEND (obfd, _bfd_copy_private_header_data, \
++ (ibfd, obfd))
++bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd);
++
++#define bfd_copy_private_bfd_data(ibfd, obfd) \
++ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \
++ (ibfd, obfd))
++bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd);
++
++#define bfd_merge_private_bfd_data(ibfd, obfd) \
++ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \
++ (ibfd, obfd))
++bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags);
++
++#define bfd_set_private_flags(abfd, flags) \
++ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags))
++#define bfd_sizeof_headers(abfd, reloc) \
++ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, reloc))
++
++#define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \
++ BFD_SEND (abfd, _bfd_find_nearest_line, \
++ (abfd, sec, syms, off, file, func, line))
++
++#define bfd_find_line(abfd, syms, sym, file, line) \
++ BFD_SEND (abfd, _bfd_find_line, \
++ (abfd, syms, sym, file, line))
++
++#define bfd_find_inliner_info(abfd, file, func, line) \
++ BFD_SEND (abfd, _bfd_find_inliner_info, \
++ (abfd, file, func, line))
++
++#define bfd_debug_info_start(abfd) \
++ BFD_SEND (abfd, _bfd_debug_info_start, (abfd))
++
++#define bfd_debug_info_end(abfd) \
++ BFD_SEND (abfd, _bfd_debug_info_end, (abfd))
++
++#define bfd_debug_info_accumulate(abfd, section) \
++ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section))
++
++#define bfd_stat_arch_elt(abfd, stat) \
++ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat))
++
++#define bfd_update_armap_timestamp(abfd) \
++ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd))
++
++#define bfd_set_arch_mach(abfd, arch, mach)\
++ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach))
++
++#define bfd_relax_section(abfd, section, link_info, again) \
++ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again))
++
++#define bfd_gc_sections(abfd, link_info) \
++ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info))
++
++#define bfd_merge_sections(abfd, link_info) \
++ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info))
++
++#define bfd_is_group_section(abfd, sec) \
++ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec))
++
++#define bfd_discard_group(abfd, sec) \
++ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec))
++
++#define bfd_link_hash_table_create(abfd) \
++ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd))
++
++#define bfd_link_hash_table_free(abfd, hash) \
++ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash))
++
++#define bfd_link_add_symbols(abfd, info) \
++ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info))
++
++#define bfd_link_just_syms(abfd, sec, info) \
++ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info))
++
++#define bfd_final_link(abfd, info) \
++ BFD_SEND (abfd, _bfd_final_link, (abfd, info))
++
++#define bfd_free_cached_info(abfd) \
++ BFD_SEND (abfd, _bfd_free_cached_info, (abfd))
++
++#define bfd_get_dynamic_symtab_upper_bound(abfd) \
++ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd))
++
++#define bfd_print_private_bfd_data(abfd, file)\
++ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file))
++
++#define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \
++ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols))
++
++#define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \
++ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \
++ dyncount, dynsyms, ret))
++
++#define bfd_get_dynamic_reloc_upper_bound(abfd) \
++ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd))
++
++#define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \
++ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms))
++
++extern bfd_byte *bfd_get_relocated_section_contents
++ (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *,
++ bfd_boolean, asymbol **);
++
++bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative);
++
++struct bfd_preserve
++{
++ void *marker;
++ void *tdata;
++ flagword flags;
++ const struct bfd_arch_info *arch_info;
++ struct bfd_section *sections;
++ struct bfd_section *section_last;
++ unsigned int section_count;
++ struct bfd_hash_table section_htab;
++};
++
++bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *);
++
++void bfd_preserve_restore (bfd *, struct bfd_preserve *);
++
++void bfd_preserve_finish (bfd *, struct bfd_preserve *);
++
++/* Extracted from archive.c. */
++symindex bfd_get_next_mapent
++ (bfd *abfd, symindex previous, carsym **sym);
++
++bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head);
++
++bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous);
++
++/* Extracted from corefile.c. */
++const char *bfd_core_file_failing_command (bfd *abfd);
++
++int bfd_core_file_failing_signal (bfd *abfd);
++
++bfd_boolean core_file_matches_executable_p
++ (bfd *core_bfd, bfd *exec_bfd);
++
++/* Extracted from targets.c. */
++#define BFD_SEND(bfd, message, arglist) \
++ ((*((bfd)->xvec->message)) arglist)
++
++#ifdef DEBUG_BFD_SEND
++#undef BFD_SEND
++#define BFD_SEND(bfd, message, arglist) \
++ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
++ ((*((bfd)->xvec->message)) arglist) : \
++ (bfd_assert (__FILE__,__LINE__), NULL))
++#endif
++#define BFD_SEND_FMT(bfd, message, arglist) \
++ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist)
++
++#ifdef DEBUG_BFD_SEND
++#undef BFD_SEND_FMT
++#define BFD_SEND_FMT(bfd, message, arglist) \
++ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
++ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \
++ (bfd_assert (__FILE__,__LINE__), NULL))
++#endif
++
++enum bfd_flavour
++{
++ bfd_target_unknown_flavour,
++ bfd_target_aout_flavour,
++ bfd_target_coff_flavour,
++ bfd_target_ecoff_flavour,
++ bfd_target_xcoff_flavour,
++ bfd_target_elf_flavour,
++ bfd_target_ieee_flavour,
++ bfd_target_nlm_flavour,
++ bfd_target_oasys_flavour,
++ bfd_target_tekhex_flavour,
++ bfd_target_srec_flavour,
++ bfd_target_ihex_flavour,
++ bfd_target_som_flavour,
++ bfd_target_os9k_flavour,
++ bfd_target_versados_flavour,
++ bfd_target_msdos_flavour,
++ bfd_target_ovax_flavour,
++ bfd_target_evax_flavour,
++ bfd_target_mmo_flavour,
++ bfd_target_mach_o_flavour,
++ bfd_target_pef_flavour,
++ bfd_target_pef_xlib_flavour,
++ bfd_target_sym_flavour
++};
++
++enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN };
++
++/* Forward declaration. */
++typedef struct bfd_link_info _bfd_link_info;
++
++typedef struct bfd_target
++{
++ /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */
++ char *name;
++
++ /* The "flavour" of a back end is a general indication about
++ the contents of a file. */
++ enum bfd_flavour flavour;
++
++ /* The order of bytes within the data area of a file. */
++ enum bfd_endian byteorder;
++
++ /* The order of bytes within the header parts of a file. */
++ enum bfd_endian header_byteorder;
++
++ /* A mask of all the flags which an executable may have set -
++ from the set <<BFD_NO_FLAGS>>, <<HAS_RELOC>>, ...<<D_PAGED>>. */
++ flagword object_flags;
++
++ /* A mask of all the flags which a section may have set - from
++ the set <<SEC_NO_FLAGS>>, <<SEC_ALLOC>>, ...<<SET_NEVER_LOAD>>. */
++ flagword section_flags;
++
++ /* The character normally found at the front of a symbol.
++ (if any), perhaps `_'. */
++ char symbol_leading_char;
++
++ /* The pad character for file names within an archive header. */
++ char ar_pad_char;
++
++ /* The maximum number of characters in an archive header. */
++ unsigned short ar_max_namelen;
++
++ /* Entries for byte swapping for data. These are different from the
++ other entry points, since they don't take a BFD as the first argument.
++ Certain other handlers could do the same. */
++ bfd_uint64_t (*bfd_getx64) (const void *);
++ bfd_int64_t (*bfd_getx_signed_64) (const void *);
++ void (*bfd_putx64) (bfd_uint64_t, void *);
++ bfd_vma (*bfd_getx32) (const void *);
++ bfd_signed_vma (*bfd_getx_signed_32) (const void *);
++ void (*bfd_putx32) (bfd_vma, void *);
++ bfd_vma (*bfd_getx16) (const void *);
++ bfd_signed_vma (*bfd_getx_signed_16) (const void *);
++ void (*bfd_putx16) (bfd_vma, void *);
++
++ /* Byte swapping for the headers. */
++ bfd_uint64_t (*bfd_h_getx64) (const void *);
++ bfd_int64_t (*bfd_h_getx_signed_64) (const void *);
++ void (*bfd_h_putx64) (bfd_uint64_t, void *);
++ bfd_vma (*bfd_h_getx32) (const void *);
++ bfd_signed_vma (*bfd_h_getx_signed_32) (const void *);
++ void (*bfd_h_putx32) (bfd_vma, void *);
++ bfd_vma (*bfd_h_getx16) (const void *);
++ bfd_signed_vma (*bfd_h_getx_signed_16) (const void *);
++ void (*bfd_h_putx16) (bfd_vma, void *);
++
++ /* Format dependent routines: these are vectors of entry points
++ within the target vector structure, one for each format to check. */
++
++ /* Check the format of a file being read. Return a <<bfd_target *>> or zero. */
++ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *);
++
++ /* Set the format of a file being written. */
++ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *);
++
++ /* Write cached information into a file being written, at <<bfd_close>>. */
++ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *);
++
++
++ /* Generic entry points. */
++#define BFD_JUMP_TABLE_GENERIC(NAME) \
++ NAME##_close_and_cleanup, \
++ NAME##_bfd_free_cached_info, \
++ NAME##_new_section_hook, \
++ NAME##_get_section_contents, \
++ NAME##_get_section_contents_in_window
++
++ /* Called when the BFD is being closed to do any necessary cleanup. */
++ bfd_boolean (*_close_and_cleanup) (bfd *);
++ /* Ask the BFD to free all cached information. */
++ bfd_boolean (*_bfd_free_cached_info) (bfd *);
++ /* Called when a new section is created. */
++ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr);
++ /* Read the contents of a section. */
++ bfd_boolean (*_bfd_get_section_contents)
++ (bfd *, sec_ptr, void *, file_ptr, bfd_size_type);
++ bfd_boolean (*_bfd_get_section_contents_in_window)
++ (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type);
++
++ /* Entry points to copy private data. */
++#define BFD_JUMP_TABLE_COPY(NAME) \
++ NAME##_bfd_copy_private_bfd_data, \
++ NAME##_bfd_merge_private_bfd_data, \
++ NAME##_bfd_copy_private_section_data, \
++ NAME##_bfd_copy_private_symbol_data, \
++ NAME##_bfd_copy_private_header_data, \
++ NAME##_bfd_set_private_flags, \
++ NAME##_bfd_print_private_bfd_data
++
++ /* Called to copy BFD general private data from one object file
++ to another. */
++ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *);
++ /* Called to merge BFD general private data from one object file
++ to a common output file when linking. */
++ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *);
++ /* Called to copy BFD private section data from one object file
++ to another. */
++ bfd_boolean (*_bfd_copy_private_section_data)
++ (bfd *, sec_ptr, bfd *, sec_ptr);
++ /* Called to copy BFD private symbol data from one symbol
++ to another. */
++ bfd_boolean (*_bfd_copy_private_symbol_data)
++ (bfd *, asymbol *, bfd *, asymbol *);
++ /* Called to copy BFD private header data from one object file
++ to another. */
++ bfd_boolean (*_bfd_copy_private_header_data)
++ (bfd *, bfd *);
++ /* Called to set private backend flags. */
++ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword);
++
++ /* Called to print private BFD data. */
++ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *);
++
++ /* Core file entry points. */
++#define BFD_JUMP_TABLE_CORE(NAME) \
++ NAME##_core_file_failing_command, \
++ NAME##_core_file_failing_signal, \
++ NAME##_core_file_matches_executable_p
++
++ char * (*_core_file_failing_command) (bfd *);
++ int (*_core_file_failing_signal) (bfd *);
++ bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *);
++
++ /* Archive entry points. */
++#define BFD_JUMP_TABLE_ARCHIVE(NAME) \
++ NAME##_slurp_armap, \
++ NAME##_slurp_extended_name_table, \
++ NAME##_construct_extended_name_table, \
++ NAME##_truncate_arname, \
++ NAME##_write_armap, \
++ NAME##_read_ar_hdr, \
++ NAME##_openr_next_archived_file, \
++ NAME##_get_elt_at_index, \
++ NAME##_generic_stat_arch_elt, \
++ NAME##_update_armap_timestamp
++
++ bfd_boolean (*_bfd_slurp_armap) (bfd *);
++ bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *);
++ bfd_boolean (*_bfd_construct_extended_name_table)
++ (bfd *, char **, bfd_size_type *, const char **);
++ void (*_bfd_truncate_arname) (bfd *, const char *, char *);
++ bfd_boolean (*write_armap)
++ (bfd *, unsigned int, struct orl *, unsigned int, int);
++ void * (*_bfd_read_ar_hdr_fn) (bfd *);
++ bfd * (*openr_next_archived_file) (bfd *, bfd *);
++#define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i))
++ bfd * (*_bfd_get_elt_at_index) (bfd *, symindex);
++ int (*_bfd_stat_arch_elt) (bfd *, struct stat *);
++ bfd_boolean (*_bfd_update_armap_timestamp) (bfd *);
++
++ /* Entry points used for symbols. */
++#define BFD_JUMP_TABLE_SYMBOLS(NAME) \
++ NAME##_get_symtab_upper_bound, \
++ NAME##_canonicalize_symtab, \
++ NAME##_make_empty_symbol, \
++ NAME##_print_symbol, \
++ NAME##_get_symbol_info, \
++ NAME##_bfd_is_local_label_name, \
++ NAME##_bfd_is_target_special_symbol, \
++ NAME##_get_lineno, \
++ NAME##_find_nearest_line, \
++ _bfd_generic_find_line, \
++ NAME##_find_inliner_info, \
++ NAME##_bfd_make_debug_symbol, \
++ NAME##_read_minisymbols, \
++ NAME##_minisymbol_to_symbol
++
++ long (*_bfd_get_symtab_upper_bound) (bfd *);
++ long (*_bfd_canonicalize_symtab)
++ (bfd *, struct bfd_symbol **);
++ struct bfd_symbol *
++ (*_bfd_make_empty_symbol) (bfd *);
++ void (*_bfd_print_symbol)
++ (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type);
++#define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e))
++ void (*_bfd_get_symbol_info)
++ (bfd *, struct bfd_symbol *, symbol_info *);
++#define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e))
++ bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *);
++ bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *);
++ alent * (*_get_lineno) (bfd *, struct bfd_symbol *);
++ bfd_boolean (*_bfd_find_nearest_line)
++ (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma,
++ const char **, const char **, unsigned int *);
++ bfd_boolean (*_bfd_find_line)
++ (bfd *, struct bfd_symbol **, struct bfd_symbol *,
++ const char **, unsigned int *);
++ bfd_boolean (*_bfd_find_inliner_info)
++ (bfd *, const char **, const char **, unsigned int *);
++ /* Back-door to allow format-aware applications to create debug symbols
++ while using BFD for everything else. Currently used by the assembler
++ when creating COFF files. */
++ asymbol * (*_bfd_make_debug_symbol)
++ (bfd *, void *, unsigned long size);
++#define bfd_read_minisymbols(b, d, m, s) \
++ BFD_SEND (b, _read_minisymbols, (b, d, m, s))
++ long (*_read_minisymbols)
++ (bfd *, bfd_boolean, void **, unsigned int *);
++#define bfd_minisymbol_to_symbol(b, d, m, f) \
++ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f))
++ asymbol * (*_minisymbol_to_symbol)
++ (bfd *, bfd_boolean, const void *, asymbol *);
++
++ /* Routines for relocs. */
++#define BFD_JUMP_TABLE_RELOCS(NAME) \
++ NAME##_get_reloc_upper_bound, \
++ NAME##_canonicalize_reloc, \
++ NAME##_bfd_reloc_type_lookup
++
++ long (*_get_reloc_upper_bound) (bfd *, sec_ptr);
++ long (*_bfd_canonicalize_reloc)
++ (bfd *, sec_ptr, arelent **, struct bfd_symbol **);
++ /* See documentation on reloc types. */
++ reloc_howto_type *
++ (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type);
++
++ /* Routines used when writing an object file. */
++#define BFD_JUMP_TABLE_WRITE(NAME) \
++ NAME##_set_arch_mach, \
++ NAME##_set_section_contents
++
++ bfd_boolean (*_bfd_set_arch_mach)
++ (bfd *, enum bfd_architecture, unsigned long);
++ bfd_boolean (*_bfd_set_section_contents)
++ (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type);
++
++ /* Routines used by the linker. */
++#define BFD_JUMP_TABLE_LINK(NAME) \
++ NAME##_sizeof_headers, \
++ NAME##_bfd_get_relocated_section_contents, \
++ NAME##_bfd_relax_section, \
++ NAME##_bfd_link_hash_table_create, \
++ NAME##_bfd_link_hash_table_free, \
++ NAME##_bfd_link_add_symbols, \
++ NAME##_bfd_link_just_syms, \
++ NAME##_bfd_final_link, \
++ NAME##_bfd_link_split_section, \
++ NAME##_bfd_gc_sections, \
++ NAME##_bfd_merge_sections, \
++ NAME##_bfd_is_group_section, \
++ NAME##_bfd_discard_group, \
++ NAME##_section_already_linked \
++
++ int (*_bfd_sizeof_headers) (bfd *, bfd_boolean);
++ bfd_byte * (*_bfd_get_relocated_section_contents)
++ (bfd *, struct bfd_link_info *, struct bfd_link_order *,
++ bfd_byte *, bfd_boolean, struct bfd_symbol **);
++
++ bfd_boolean (*_bfd_relax_section)
++ (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *);
++
++ /* Create a hash table for the linker. Different backends store
++ different information in this table. */
++ struct bfd_link_hash_table *
++ (*_bfd_link_hash_table_create) (bfd *);
++
++ /* Release the memory associated with the linker hash table. */
++ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *);
++
++ /* Add symbols from this object file into the hash table. */
++ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *);
++
++ /* Indicate that we are only retrieving symbol values from this section. */
++ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *);
++
++ /* Do a link based on the link_order structures attached to each
++ section of the BFD. */
++ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *);
++
++ /* Should this section be split up into smaller pieces during linking. */
++ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *);
++
++ /* Remove sections that are not referenced from the output. */
++ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *);
++
++ /* Attempt to merge SEC_MERGE sections. */
++ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *);
++
++ /* Is this section a member of a group? */
++ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *);
++
++ /* Discard members of a group. */
++ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *);
++
++ /* Check if SEC has been already linked during a reloceatable or
++ final link. */
++ void (*_section_already_linked) (bfd *, struct bfd_section *);
++
++ /* Routines to handle dynamic symbols and relocs. */
++#define BFD_JUMP_TABLE_DYNAMIC(NAME) \
++ NAME##_get_dynamic_symtab_upper_bound, \
++ NAME##_canonicalize_dynamic_symtab, \
++ NAME##_get_synthetic_symtab, \
++ NAME##_get_dynamic_reloc_upper_bound, \
++ NAME##_canonicalize_dynamic_reloc
++
++ /* Get the amount of memory required to hold the dynamic symbols. */
++ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *);
++ /* Read in the dynamic symbols. */
++ long (*_bfd_canonicalize_dynamic_symtab)
++ (bfd *, struct bfd_symbol **);
++ /* Create synthetized symbols. */
++ long (*_bfd_get_synthetic_symtab)
++ (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **,
++ struct bfd_symbol **);
++ /* Get the amount of memory required to hold the dynamic relocs. */
++ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *);
++ /* Read in the dynamic relocs. */
++ long (*_bfd_canonicalize_dynamic_reloc)
++ (bfd *, arelent **, struct bfd_symbol **);
++
++ /* Opposite endian version of this target. */
++ const struct bfd_target * alternative_target;
++
++ /* Data for use by back-end routines, which isn't
++ generic enough to belong in this structure. */
++ const void *backend_data;
++
++} bfd_target;
++
++bfd_boolean bfd_set_default_target (const char *name);
++
++const bfd_target *bfd_find_target (const char *target_name, bfd *abfd);
++
++const char ** bfd_target_list (void);
++
++const bfd_target *bfd_search_for_target
++ (int (*search_func) (const bfd_target *, void *),
++ void *);
++
++/* Extracted from format.c. */
++bfd_boolean bfd_check_format (bfd *abfd, bfd_format format);
++
++bfd_boolean bfd_check_format_matches
++ (bfd *abfd, bfd_format format, char ***matching);
++
++bfd_boolean bfd_set_format (bfd *abfd, bfd_format format);
++
++const char *bfd_format_string (bfd_format format);
++
++/* Extracted from linker.c. */
++bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec);
++
++#define bfd_link_split_section(abfd, sec) \
++ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec))
++
++void bfd_section_already_linked (bfd *abfd, asection *sec);
++
++#define bfd_section_already_linked(abfd, sec) \
++ BFD_SEND (abfd, _section_already_linked, (abfd, sec))
++
++/* Extracted from simple.c. */
++bfd_byte *bfd_simple_get_relocated_section_contents
++ (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table);
++
++#ifdef __cplusplus
++}
++#endif
++#endif
+diff -Nurp linux-2.6.22-590/include/asm-i386/kdb.h linux-2.6.22-600/include/asm-i386/kdb.h
+--- linux-2.6.22-590/include/asm-i386/kdb.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/asm-i386/kdb.h 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,45 @@
++#ifndef _ASM_KDB_H
++#define _ASM_KDB_H
++
++/*
++ * Kernel Debugger Architecture Dependent Global Headers
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++/*
++ * KDB_ENTER() is a macro which causes entry into the kernel
++ * debugger from any point in the kernel code stream. If it
++ * is intended to be used from interrupt level, it must use
++ * a non-maskable entry method.
++ */
++#define KDB_ENTER() do {if (kdb_on && !KDB_IS_RUNNING()) { asm("\tint $129\n"); }} while(0)
++
++/*
++ * Needed for exported symbols.
++ */
++typedef unsigned long kdb_machreg_t;
++
++#define kdb_machreg_fmt "0x%lx"
++#define kdb_machreg_fmt0 "0x%08lx"
++#define kdb_bfd_vma_fmt "0x%lx"
++#define kdb_bfd_vma_fmt0 "0x%08lx"
++#define kdb_elfw_addr_fmt "0x%x"
++#define kdb_elfw_addr_fmt0 "0x%08x"
++
++/*
++ * Per cpu arch specific kdb state. Must be in range 0xff000000.
++ */
++#define KDB_STATE_A_IF 0x01000000 /* Saved IF flag */
++
++static inline unsigned long
++kdba_funcptr_value(void *fp)
++{
++ return (unsigned long)fp;
++}
++
++#endif /* !_ASM_KDB_H */
+diff -Nurp linux-2.6.22-590/include/asm-i386/kdbprivate.h linux-2.6.22-600/include/asm-i386/kdbprivate.h
+--- linux-2.6.22-590/include/asm-i386/kdbprivate.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/asm-i386/kdbprivate.h 2008-04-09 18:16:14.000000000 +0200
+@@ -0,0 +1,189 @@
++#ifndef _ASM_KDBPRIVATE_H
++#define _ASM_KDBPRIVATE_H
++
++/*
++ * Kernel Debugger Architecture Dependent Private Headers
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++typedef unsigned char kdb_machinst_t;
++
++/*
++ * KDB_MAXBPT describes the total number of breakpoints
++ * supported by this architecure.
++ */
++#define KDB_MAXBPT 16
++
++/*
++ * KDB_MAXHARDBPT describes the total number of hardware
++ * breakpoint registers that exist.
++ */
++#define KDB_MAXHARDBPT 4
++
++/* Maximum number of arguments to a function */
++#define KDBA_MAXARGS 16
++
++/*
++ * Platform specific environment entries
++ */
++#define KDB_PLATFORM_ENV "IDMODE=x86", "BYTESPERWORD=4", "IDCOUNT=16"
++
++/*
++ * Support for ia32 debug registers
++ */
++typedef struct _kdbhard_bp {
++ kdb_machreg_t bph_reg; /* Register this breakpoint uses */
++
++ unsigned int bph_free:1; /* Register available for use */
++ unsigned int bph_data:1; /* Data Access breakpoint */
++
++ unsigned int bph_write:1; /* Write Data breakpoint */
++ unsigned int bph_mode:2; /* 0=inst, 1=write, 2=io, 3=read */
++ unsigned int bph_length:2; /* 0=1, 1=2, 2=BAD, 3=4 (bytes) */
++} kdbhard_bp_t;
++
++#define IA32_BREAKPOINT_INSTRUCTION 0xcc
++
++#define DR6_BT 0x00008000
++#define DR6_BS 0x00004000
++#define DR6_BD 0x00002000
++
++#define DR6_B3 0x00000008
++#define DR6_B2 0x00000004
++#define DR6_B1 0x00000002
++#define DR6_B0 0x00000001
++#define DR6_DR_MASK 0x0000000F
++
++#define DR7_RW_VAL(dr, drnum) \
++ (((dr) >> (16 + (4 * (drnum)))) & 0x3)
++
++#define DR7_RW_SET(dr, drnum, rw) \
++ do { \
++ (dr) &= ~(0x3 << (16 + (4 * (drnum)))); \
++ (dr) |= (((rw) & 0x3) << (16 + (4 * (drnum)))); \
++ } while (0)
++
++#define DR7_RW0(dr) DR7_RW_VAL(dr, 0)
++#define DR7_RW0SET(dr,rw) DR7_RW_SET(dr, 0, rw)
++#define DR7_RW1(dr) DR7_RW_VAL(dr, 1)
++#define DR7_RW1SET(dr,rw) DR7_RW_SET(dr, 1, rw)
++#define DR7_RW2(dr) DR7_RW_VAL(dr, 2)
++#define DR7_RW2SET(dr,rw) DR7_RW_SET(dr, 2, rw)
++#define DR7_RW3(dr) DR7_RW_VAL(dr, 3)
++#define DR7_RW3SET(dr,rw) DR7_RW_SET(dr, 3, rw)
++
++
++#define DR7_LEN_VAL(dr, drnum) \
++ (((dr) >> (18 + (4 * (drnum)))) & 0x3)
++
++#define DR7_LEN_SET(dr, drnum, rw) \
++ do { \
++ (dr) &= ~(0x3 << (18 + (4 * (drnum)))); \
++ (dr) |= (((rw) & 0x3) << (18 + (4 * (drnum)))); \
++ } while (0)
++
++#define DR7_LEN0(dr) DR7_LEN_VAL(dr, 0)
++#define DR7_LEN0SET(dr,len) DR7_LEN_SET(dr, 0, len)
++#define DR7_LEN1(dr) DR7_LEN_VAL(dr, 1)
++#define DR7_LEN1SET(dr,len) DR7_LEN_SET(dr, 1, len)
++#define DR7_LEN2(dr) DR7_LEN_VAL(dr, 2)
++#define DR7_LEN2SET(dr,len) DR7_LEN_SET(dr, 2, len)
++#define DR7_LEN3(dr) DR7_LEN_VAL(dr, 3)
++#define DR7_LEN3SET(dr,len) DR7_LEN_SET(dr, 3, len)
++
++#define DR7_G0(dr) (((dr)>>1)&0x1)
++#define DR7_G0SET(dr) ((dr) |= 0x2)
++#define DR7_G0CLR(dr) ((dr) &= ~0x2)
++#define DR7_G1(dr) (((dr)>>3)&0x1)
++#define DR7_G1SET(dr) ((dr) |= 0x8)
++#define DR7_G1CLR(dr) ((dr) &= ~0x8)
++#define DR7_G2(dr) (((dr)>>5)&0x1)
++#define DR7_G2SET(dr) ((dr) |= 0x20)
++#define DR7_G2CLR(dr) ((dr) &= ~0x20)
++#define DR7_G3(dr) (((dr)>>7)&0x1)
++#define DR7_G3SET(dr) ((dr) |= 0x80)
++#define DR7_G3CLR(dr) ((dr) &= ~0x80)
++
++#define DR7_L0(dr) (((dr))&0x1)
++#define DR7_L0SET(dr) ((dr) |= 0x1)
++#define DR7_L0CLR(dr) ((dr) &= ~0x1)
++#define DR7_L1(dr) (((dr)>>2)&0x1)
++#define DR7_L1SET(dr) ((dr) |= 0x4)
++#define DR7_L1CLR(dr) ((dr) &= ~0x4)
++#define DR7_L2(dr) (((dr)>>4)&0x1)
++#define DR7_L2SET(dr) ((dr) |= 0x10)
++#define DR7_L2CLR(dr) ((dr) &= ~0x10)
++#define DR7_L3(dr) (((dr)>>6)&0x1)
++#define DR7_L3SET(dr) ((dr) |= 0x40)
++#define DR7_L3CLR(dr) ((dr) &= ~0x40)
++
++#define DR7_GD 0x00002000 /* General Detect Enable */
++#define DR7_GE 0x00000200 /* Global exact */
++#define DR7_LE 0x00000100 /* Local exact */
++
++#define DR_TYPE_EXECUTE 0x0
++#define DR_TYPE_WRITE 0x1
++#define DR_TYPE_IO 0x2
++#define DR_TYPE_RW 0x3
++
++extern kdb_machreg_t kdba_getdr6(void);
++extern void kdba_putdr6(kdb_machreg_t);
++
++extern kdb_machreg_t kdba_getdr7(void);
++
++/*
++ * Support for setjmp/longjmp
++ */
++#define JB_BX 0
++#define JB_SI 1
++#define JB_DI 2
++#define JB_BP 3
++#define JB_SP 4
++#define JB_PC 5
++
++typedef struct __kdb_jmp_buf {
++ unsigned long regs[6]; /* kdba_setjmp assumes fixed offsets here */
++} kdb_jmp_buf;
++
++extern int asmlinkage kdba_setjmp(kdb_jmp_buf *);
++extern void asmlinkage kdba_longjmp(kdb_jmp_buf *, int);
++#define kdba_setjmp kdba_setjmp
++
++extern kdb_jmp_buf *kdbjmpbuf;
++
++/* Arch specific data saved for running processes */
++
++struct kdba_running_process {
++ long esp; /* CONFIG_4KSTACKS may be on a different stack */
++ long eip; /* eip when esp was set */
++};
++
++static inline
++void kdba_save_running(struct kdba_running_process *k, struct pt_regs *regs)
++{
++ k->esp = current_stack_pointer;
++ __asm__ __volatile__ ( " lea 1f,%%eax; movl %%eax,%0 ; 1: " : "=r"(k->eip) : : "eax" );
++}
++
++static inline
++void kdba_unsave_running(struct kdba_running_process *k, struct pt_regs *regs)
++{
++}
++
++struct kdb_activation_record;
++extern void kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
++ struct kdb_activation_record *ar);
++
++extern void kdba_wait_for_cpus(void);
++
++extern fastcall void kdb_interrupt(void);
++
++#define KDB_INT_REGISTERS 8
++
++
++#endif /* !_ASM_KDBPRIVATE_H */
+diff -Nurp linux-2.6.22-590/include/asm-i386/kdebug.h linux-2.6.22-600/include/asm-i386/kdebug.h
+--- linux-2.6.22-590/include/asm-i386/kdebug.h 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/include/asm-i386/kdebug.h 2008-04-09 18:16:14.000000000 +0200
+@@ -23,6 +23,8 @@ enum die_val {
+ DIE_DIE,
+ DIE_NMIWATCHDOG,
+ DIE_KERNELDEBUG,
++ DIE_KDEBUG_ENTER,
++ DIE_KDEBUG_LEAVE,
+ DIE_TRAP,
+ DIE_GPF,
+ DIE_CALL,
+diff -Nurp linux-2.6.22-590/include/asm-i386/kmap_types.h linux-2.6.22-600/include/asm-i386/kmap_types.h
+--- linux-2.6.22-590/include/asm-i386/kmap_types.h 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/include/asm-i386/kmap_types.h 2008-04-09 18:16:14.000000000 +0200
+@@ -22,7 +22,8 @@ D(9) KM_IRQ0,
+ D(10) KM_IRQ1,
+ D(11) KM_SOFTIRQ0,
+ D(12) KM_SOFTIRQ1,
+-D(13) KM_TYPE_NR
++D(13) KM_KDB,
++D(14) KM_TYPE_NR
+ };
+
+ #undef D
+diff -Nurp linux-2.6.22-590/include/asm-i386/mach-default/irq_vectors.h linux-2.6.22-600/include/asm-i386/mach-default/irq_vectors.h
+--- linux-2.6.22-590/include/asm-i386/mach-default/irq_vectors.h 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/include/asm-i386/mach-default/irq_vectors.h 2008-04-09 18:16:14.000000000 +0200
+@@ -29,6 +29,7 @@
+ #define FIRST_EXTERNAL_VECTOR 0x20
+
+ #define SYSCALL_VECTOR 0x80
++#define KDBENTER_VECTOR 0x81
+
+ /*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+@@ -48,6 +49,7 @@
+ #define INVALIDATE_TLB_VECTOR 0xfd
+ #define RESCHEDULE_VECTOR 0xfc
+ #define CALL_FUNCTION_VECTOR 0xfb
++#define KDB_VECTOR 0xf9
+
+ #define THERMAL_APIC_VECTOR 0xf0
+ /*
+diff -Nurp linux-2.6.22-590/include/asm-i386/ptrace.h linux-2.6.22-600/include/asm-i386/ptrace.h
+--- linux-2.6.22-590/include/asm-i386/ptrace.h 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/include/asm-i386/ptrace.h 2008-04-09 18:16:14.000000000 +0200
+@@ -26,6 +26,29 @@ struct pt_regs {
+ int xss;
+ };
+
++enum EFLAGS {
++ EF_CF = 0x00000001,
++ EF_PF = 0x00000004,
++ EF_AF = 0x00000010,
++ EF_ZF = 0x00000040,
++ EF_SF = 0x00000080,
++ EF_TF = 0x00000100,
++ EF_IE = 0x00000200,
++ EF_DF = 0x00000400,
++ EF_OF = 0x00000800,
++ EF_IOPL = 0x00003000,
++ EF_IOPL_RING0 = 0x00000000,
++ EF_IOPL_RING1 = 0x00001000,
++ EF_IOPL_RING2 = 0x00002000,
++ EF_NT = 0x00004000, /* nested task */
++ EF_RF = 0x00010000, /* resume */
++ EF_VM = 0x00020000, /* virtual mode */
++ EF_AC = 0x00040000, /* alignment */
++ EF_VIF = 0x00080000, /* virtual interrupt */
++ EF_VIP = 0x00100000, /* virtual interrupt pending */
++ EF_ID = 0x00200000, /* id */
++};
++
+ #ifdef __KERNEL__
+
+ #include <asm/vm86.h>
+diff -Nurp linux-2.6.22-590/include/asm-x86_64/ansidecl.h linux-2.6.22-600/include/asm-x86_64/ansidecl.h
+--- linux-2.6.22-590/include/asm-x86_64/ansidecl.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/asm-x86_64/ansidecl.h 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,383 @@
++/* ANSI and traditional C compatability macros
++ Copyright 1991, 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001
++ Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++This program is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 2 of the License, or
++(at your option) any later version.
++
++This program is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this program; if not, write to the Free Software
++Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
++
++/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
++ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as
++ * required.
++ * Keith Owens <kaos@sgi.com> 15 May 2006
++ */
++
++/* ANSI and traditional C compatibility macros
++
++ ANSI C is assumed if __STDC__ is #defined.
++
++ Macro ANSI C definition Traditional C definition
++ ----- ---- - ---------- ----------- - ----------
++ ANSI_PROTOTYPES 1 not defined
++ PTR `void *' `char *'
++ PTRCONST `void *const' `char *'
++ LONG_DOUBLE `long double' `double'
++ const not defined `'
++ volatile not defined `'
++ signed not defined `'
++ VA_START(ap, var) va_start(ap, var) va_start(ap)
++
++ Note that it is safe to write "void foo();" indicating a function
++ with no return value, in all K+R compilers we have been able to test.
++
++ For declaring functions with prototypes, we also provide these:
++
++ PARAMS ((prototype))
++ -- for functions which take a fixed number of arguments. Use this
++ when declaring the function. When defining the function, write a
++ K+R style argument list. For example:
++
++ char *strcpy PARAMS ((char *dest, char *source));
++ ...
++ char *
++ strcpy (dest, source)
++ char *dest;
++ char *source;
++ { ... }
++
++
++ VPARAMS ((prototype, ...))
++ -- for functions which take a variable number of arguments. Use
++ PARAMS to declare the function, VPARAMS to define it. For example:
++
++ int printf PARAMS ((const char *format, ...));
++ ...
++ int
++ printf VPARAMS ((const char *format, ...))
++ {
++ ...
++ }
++
++ For writing functions which take variable numbers of arguments, we
++ also provide the VA_OPEN, VA_CLOSE, and VA_FIXEDARG macros. These
++ hide the differences between K+R <varargs.h> and C89 <stdarg.h> more
++ thoroughly than the simple VA_START() macro mentioned above.
++
++ VA_OPEN and VA_CLOSE are used *instead of* va_start and va_end.
++ Immediately after VA_OPEN, put a sequence of VA_FIXEDARG calls
++ corresponding to the list of fixed arguments. Then use va_arg
++ normally to get the variable arguments, or pass your va_list object
++ around. You do not declare the va_list yourself; VA_OPEN does it
++ for you.
++
++ Here is a complete example:
++
++ int
++ printf VPARAMS ((const char *format, ...))
++ {
++ int result;
++
++ VA_OPEN (ap, format);
++ VA_FIXEDARG (ap, const char *, format);
++
++ result = vfprintf (stdout, format, ap);
++ VA_CLOSE (ap);
++
++ return result;
++ }
++
++
++ You can declare variables either before or after the VA_OPEN,
++ VA_FIXEDARG sequence. Also, VA_OPEN and VA_CLOSE are the beginning
++ and end of a block. They must appear at the same nesting level,
++ and any variables declared after VA_OPEN go out of scope at
++ VA_CLOSE. Unfortunately, with a K+R compiler, that includes the
++ argument list. You can have multiple instances of VA_OPEN/VA_CLOSE
++ pairs in a single function in case you need to traverse the
++ argument list more than once.
++
++ For ease of writing code which uses GCC extensions but needs to be
++ portable to other compilers, we provide the GCC_VERSION macro that
++ simplifies testing __GNUC__ and __GNUC_MINOR__ together, and various
++ wrappers around __attribute__. Also, __extension__ will be #defined
++ to nothing if it doesn't work. See below.
++
++ This header also defines a lot of obsolete macros:
++ CONST, VOLATILE, SIGNED, PROTO, EXFUN, DEFUN, DEFUN_VOID,
++ AND, DOTS, NOARGS. Don't use them. */
++
++#ifndef _ANSIDECL_H
++#define _ANSIDECL_H 1
++
++/* Every source file includes this file,
++ so they will all get the switch for lint. */
++/* LINTLIBRARY */
++
++/* Using MACRO(x,y) in cpp #if conditionals does not work with some
++ older preprocessors. Thus we can't define something like this:
++
++#define HAVE_GCC_VERSION(MAJOR, MINOR) \
++ (__GNUC__ > (MAJOR) || (__GNUC__ == (MAJOR) && __GNUC_MINOR__ >= (MINOR)))
++
++and then test "#if HAVE_GCC_VERSION(2,7)".
++
++So instead we use the macro below and test it against specific values. */
++
++/* This macro simplifies testing whether we are using gcc, and if it
++ is of a particular minimum version. (Both major & minor numbers are
++ significant.) This macro will evaluate to 0 if we are not using
++ gcc at all. */
++#ifndef GCC_VERSION
++#define GCC_VERSION (__GNUC__ * 1000 + __GNUC_MINOR__)
++#endif /* GCC_VERSION */
++
++#if defined (__STDC__) || defined (_AIX) || (defined (__mips) && defined (_SYSTYPE_SVR4)) || defined(_WIN32) || (defined(__alpha) && defined(__cplusplus))
++/* All known AIX compilers implement these things (but don't always
++ define __STDC__). The RISC/OS MIPS compiler defines these things
++ in SVR4 mode, but does not define __STDC__. */
++/* eraxxon@alumni.rice.edu: The Compaq C++ compiler, unlike many other
++ C++ compilers, does not define __STDC__, though it acts as if this
++ was so. (Verified versions: 5.7, 6.2, 6.3, 6.5) */
++
++#define ANSI_PROTOTYPES 1
++#define PTR void *
++#define PTRCONST void *const
++#define LONG_DOUBLE long double
++
++/* PARAMS is often defined elsewhere (e.g. by libintl.h), so wrap it in
++ a #ifndef. */
++#ifndef PARAMS
++#define PARAMS(ARGS) ARGS
++#endif
++
++#define VPARAMS(ARGS) ARGS
++#define VA_START(VA_LIST, VAR) va_start(VA_LIST, VAR)
++
++/* variadic function helper macros */
++/* "struct Qdmy" swallows the semicolon after VA_OPEN/VA_FIXEDARG's
++ use without inhibiting further decls and without declaring an
++ actual variable. */
++#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP, VAR); { struct Qdmy
++#define VA_CLOSE(AP) } va_end(AP); }
++#define VA_FIXEDARG(AP, T, N) struct Qdmy
++
++#undef const
++#undef volatile
++#undef signed
++
++#ifdef __KERNEL__
++#ifndef __STDC_VERSION__
++#define __STDC_VERSION__ 0
++#endif
++#endif /* __KERNEL__ */
++
++/* inline requires special treatment; it's in C99, and GCC >=2.7 supports
++ it too, but it's not in C89. */
++#undef inline
++#if __STDC_VERSION__ > 199901L
++/* it's a keyword */
++#else
++# if GCC_VERSION >= 2007
++# define inline __inline__ /* __inline__ prevents -pedantic warnings */
++# else
++# define inline /* nothing */
++# endif
++#endif
++
++/* These are obsolete. Do not use. */
++#ifndef IN_GCC
++#define CONST const
++#define VOLATILE volatile
++#define SIGNED signed
++
++#define PROTO(type, name, arglist) type name arglist
++#define EXFUN(name, proto) name proto
++#define DEFUN(name, arglist, args) name(args)
++#define DEFUN_VOID(name) name(void)
++#define AND ,
++#define DOTS , ...
++#define NOARGS void
++#endif /* ! IN_GCC */
++
++#else /* Not ANSI C. */
++
++#undef ANSI_PROTOTYPES
++#define PTR char *
++#define PTRCONST PTR
++#define LONG_DOUBLE double
++
++#define PARAMS(args) ()
++#define VPARAMS(args) (va_alist) va_dcl
++#define VA_START(va_list, var) va_start(va_list)
++
++#define VA_OPEN(AP, VAR) { va_list AP; va_start(AP); { struct Qdmy
++#define VA_CLOSE(AP) } va_end(AP); }
++#define VA_FIXEDARG(AP, TYPE, NAME) TYPE NAME = va_arg(AP, TYPE)
++
++/* some systems define these in header files for non-ansi mode */
++#undef const
++#undef volatile
++#undef signed
++#undef inline
++#define const
++#define volatile
++#define signed
++#define inline
++
++#ifndef IN_GCC
++#define CONST
++#define VOLATILE
++#define SIGNED
++
++#define PROTO(type, name, arglist) type name ()
++#define EXFUN(name, proto) name()
++#define DEFUN(name, arglist, args) name arglist args;
++#define DEFUN_VOID(name) name()
++#define AND ;
++#define DOTS
++#define NOARGS
++#endif /* ! IN_GCC */
++
++#endif /* ANSI C. */
++
++/* Define macros for some gcc attributes. This permits us to use the
++ macros freely, and know that they will come into play for the
++ version of gcc in which they are supported. */
++
++#if (GCC_VERSION < 2007)
++# define __attribute__(x)
++#endif
++
++/* Attribute __malloc__ on functions was valid as of gcc 2.96. */
++#ifndef ATTRIBUTE_MALLOC
++# if (GCC_VERSION >= 2096)
++# define ATTRIBUTE_MALLOC __attribute__ ((__malloc__))
++# else
++# define ATTRIBUTE_MALLOC
++# endif /* GNUC >= 2.96 */
++#endif /* ATTRIBUTE_MALLOC */
++
++/* Attributes on labels were valid as of gcc 2.93. */
++#ifndef ATTRIBUTE_UNUSED_LABEL
++# if (!defined (__cplusplus) && GCC_VERSION >= 2093)
++# define ATTRIBUTE_UNUSED_LABEL ATTRIBUTE_UNUSED
++# else
++# define ATTRIBUTE_UNUSED_LABEL
++# endif /* !__cplusplus && GNUC >= 2.93 */
++#endif /* ATTRIBUTE_UNUSED_LABEL */
++
++#ifndef ATTRIBUTE_UNUSED
++#define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
++#endif /* ATTRIBUTE_UNUSED */
++
++/* Before GCC 3.4, the C++ frontend couldn't parse attributes placed after the
++ identifier name. */
++#if ! defined(__cplusplus) || (GCC_VERSION >= 3004)
++# define ARG_UNUSED(NAME) NAME ATTRIBUTE_UNUSED
++#else /* !__cplusplus || GNUC >= 3.4 */
++# define ARG_UNUSED(NAME) NAME
++#endif /* !__cplusplus || GNUC >= 3.4 */
++
++#ifndef ATTRIBUTE_NORETURN
++#define ATTRIBUTE_NORETURN __attribute__ ((__noreturn__))
++#endif /* ATTRIBUTE_NORETURN */
++
++/* Attribute `nonnull' was valid as of gcc 3.3. */
++#ifndef ATTRIBUTE_NONNULL
++# if (GCC_VERSION >= 3003)
++# define ATTRIBUTE_NONNULL(m) __attribute__ ((__nonnull__ (m)))
++# else
++# define ATTRIBUTE_NONNULL(m)
++# endif /* GNUC >= 3.3 */
++#endif /* ATTRIBUTE_NONNULL */
++
++/* Attribute `pure' was valid as of gcc 3.0. */
++#ifndef ATTRIBUTE_PURE
++# if (GCC_VERSION >= 3000)
++# define ATTRIBUTE_PURE __attribute__ ((__pure__))
++# else
++# define ATTRIBUTE_PURE
++# endif /* GNUC >= 3.0 */
++#endif /* ATTRIBUTE_PURE */
++
++/* Use ATTRIBUTE_PRINTF when the format specifier must not be NULL.
++ This was the case for the `printf' format attribute by itself
++ before GCC 3.3, but as of 3.3 we need to add the `nonnull'
++ attribute to retain this behavior. */
++#ifndef ATTRIBUTE_PRINTF
++#define ATTRIBUTE_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n))) ATTRIBUTE_NONNULL(m)
++#define ATTRIBUTE_PRINTF_1 ATTRIBUTE_PRINTF(1, 2)
++#define ATTRIBUTE_PRINTF_2 ATTRIBUTE_PRINTF(2, 3)
++#define ATTRIBUTE_PRINTF_3 ATTRIBUTE_PRINTF(3, 4)
++#define ATTRIBUTE_PRINTF_4 ATTRIBUTE_PRINTF(4, 5)
++#define ATTRIBUTE_PRINTF_5 ATTRIBUTE_PRINTF(5, 6)
++#endif /* ATTRIBUTE_PRINTF */
++
++/* Use ATTRIBUTE_FPTR_PRINTF when the format attribute is to be set on
++ a function pointer. Format attributes were allowed on function
++ pointers as of gcc 3.1. */
++#ifndef ATTRIBUTE_FPTR_PRINTF
++# if (GCC_VERSION >= 3001)
++# define ATTRIBUTE_FPTR_PRINTF(m, n) ATTRIBUTE_PRINTF(m, n)
++# else
++# define ATTRIBUTE_FPTR_PRINTF(m, n)
++# endif /* GNUC >= 3.1 */
++# define ATTRIBUTE_FPTR_PRINTF_1 ATTRIBUTE_FPTR_PRINTF(1, 2)
++# define ATTRIBUTE_FPTR_PRINTF_2 ATTRIBUTE_FPTR_PRINTF(2, 3)
++# define ATTRIBUTE_FPTR_PRINTF_3 ATTRIBUTE_FPTR_PRINTF(3, 4)
++# define ATTRIBUTE_FPTR_PRINTF_4 ATTRIBUTE_FPTR_PRINTF(4, 5)
++# define ATTRIBUTE_FPTR_PRINTF_5 ATTRIBUTE_FPTR_PRINTF(5, 6)
++#endif /* ATTRIBUTE_FPTR_PRINTF */
++
++/* Use ATTRIBUTE_NULL_PRINTF when the format specifier may be NULL. A
++ NULL format specifier was allowed as of gcc 3.3. */
++#ifndef ATTRIBUTE_NULL_PRINTF
++# if (GCC_VERSION >= 3003)
++# define ATTRIBUTE_NULL_PRINTF(m, n) __attribute__ ((__format__ (__printf__, m, n)))
++# else
++# define ATTRIBUTE_NULL_PRINTF(m, n)
++# endif /* GNUC >= 3.3 */
++# define ATTRIBUTE_NULL_PRINTF_1 ATTRIBUTE_NULL_PRINTF(1, 2)
++# define ATTRIBUTE_NULL_PRINTF_2 ATTRIBUTE_NULL_PRINTF(2, 3)
++# define ATTRIBUTE_NULL_PRINTF_3 ATTRIBUTE_NULL_PRINTF(3, 4)
++# define ATTRIBUTE_NULL_PRINTF_4 ATTRIBUTE_NULL_PRINTF(4, 5)
++# define ATTRIBUTE_NULL_PRINTF_5 ATTRIBUTE_NULL_PRINTF(5, 6)
++#endif /* ATTRIBUTE_NULL_PRINTF */
++
++/* Attribute `sentinel' was valid as of gcc 3.5. */
++#ifndef ATTRIBUTE_SENTINEL
++# if (GCC_VERSION >= 3005)
++# define ATTRIBUTE_SENTINEL __attribute__ ((__sentinel__))
++# else
++# define ATTRIBUTE_SENTINEL
++# endif /* GNUC >= 3.5 */
++#endif /* ATTRIBUTE_SENTINEL */
++
++
++#ifndef ATTRIBUTE_ALIGNED_ALIGNOF
++# if (GCC_VERSION >= 3000)
++# define ATTRIBUTE_ALIGNED_ALIGNOF(m) __attribute__ ((__aligned__ (__alignof__ (m))))
++# else
++# define ATTRIBUTE_ALIGNED_ALIGNOF(m)
++# endif /* GNUC >= 3.0 */
++#endif /* ATTRIBUTE_ALIGNED_ALIGNOF */
++
++/* We use __extension__ in some places to suppress -pedantic warnings
++ about GCC extensions. This feature didn't work properly before
++ gcc 2.8. */
++#if GCC_VERSION < 2008
++#define __extension__
++#endif
++
++#endif /* ansidecl.h */
+diff -Nurp linux-2.6.22-590/include/asm-x86_64/bfd.h linux-2.6.22-600/include/asm-x86_64/bfd.h
+--- linux-2.6.22-590/include/asm-x86_64/bfd.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/asm-x86_64/bfd.h 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,4917 @@
++/* DO NOT EDIT! -*- buffer-read-only: t -*- This file is automatically
++ generated from "bfd-in.h", "init.c", "opncls.c", "libbfd.c",
++ "bfdio.c", "bfdwin.c", "section.c", "archures.c", "reloc.c",
++ "syms.c", "bfd.c", "archive.c", "corefile.c", "targets.c", "format.c",
++ "linker.c" and "simple.c".
++ Run "make headers" in your build bfd/ to regenerate. */
++
++/* Main header file for the bfd library -- portable access to object files.
++
++ Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
++ 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
++
++ Contributed by Cygnus Support.
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
++
++/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
++ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as
++ * required.
++ * Keith Owens <kaos@sgi.com> 15 May 2006
++ */
++
++#ifndef __BFD_H_SEEN__
++#define __BFD_H_SEEN__
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#ifdef __KERNEL__
++#include <asm/ansidecl.h>
++#else /* __KERNEL__ */
++#include "ansidecl.h"
++#include "symcat.h"
++#endif /* __KERNEL__ */
++#if defined (__STDC__) || defined (ALMOST_STDC) || defined (HAVE_STRINGIZE)
++#ifndef SABER
++/* This hack is to avoid a problem with some strict ANSI C preprocessors.
++ The problem is, "32_" is not a valid preprocessing token, and we don't
++ want extra underscores (e.g., "nlm_32_"). The XCONCAT2 macro will
++ cause the inner CONCAT2 macros to be evaluated first, producing
++ still-valid pp-tokens. Then the final concatenation can be done. */
++#undef CONCAT4
++#define CONCAT4(a,b,c,d) XCONCAT2(CONCAT2(a,b),CONCAT2(c,d))
++#endif
++#endif
++
++/* The word size used by BFD on the host. This may be 64 with a 32
++ bit target if the host is 64 bit, or if other 64 bit targets have
++ been selected with --enable-targets, or if --enable-64-bit-bfd. */
++#define BFD_ARCH_SIZE 64
++
++/* The word size of the default bfd target. */
++#define BFD_DEFAULT_TARGET_SIZE 64
++
++#define BFD_HOST_64BIT_LONG 1
++#define BFD_HOST_LONG_LONG 1
++#if 1
++#define BFD_HOST_64_BIT long
++#define BFD_HOST_U_64_BIT unsigned long
++typedef BFD_HOST_64_BIT bfd_int64_t;
++typedef BFD_HOST_U_64_BIT bfd_uint64_t;
++#endif
++
++#if BFD_ARCH_SIZE >= 64
++#define BFD64
++#endif
++
++#ifndef INLINE
++#if __GNUC__ >= 2
++#define INLINE __inline__
++#else
++#define INLINE
++#endif
++#endif
++
++/* Forward declaration. */
++typedef struct bfd bfd;
++
++/* Boolean type used in bfd. Too many systems define their own
++ versions of "boolean" for us to safely typedef a "boolean" of
++ our own. Using an enum for "bfd_boolean" has its own set of
++ problems, with strange looking casts required to avoid warnings
++ on some older compilers. Thus we just use an int.
++
++ General rule: Functions which are bfd_boolean return TRUE on
++ success and FALSE on failure (unless they're a predicate). */
++
++typedef int bfd_boolean;
++#undef FALSE
++#undef TRUE
++#define FALSE 0
++#define TRUE 1
++
++#ifdef BFD64
++
++#ifndef BFD_HOST_64_BIT
++ #error No 64 bit integer type available
++#endif /* ! defined (BFD_HOST_64_BIT) */
++
++typedef BFD_HOST_U_64_BIT bfd_vma;
++typedef BFD_HOST_64_BIT bfd_signed_vma;
++typedef BFD_HOST_U_64_BIT bfd_size_type;
++typedef BFD_HOST_U_64_BIT symvalue;
++
++#ifndef fprintf_vma
++#if BFD_HOST_64BIT_LONG
++#define sprintf_vma(s,x) sprintf (s, "%016lx", x)
++#define fprintf_vma(f,x) fprintf (f, "%016lx", x)
++#else
++#define _bfd_int64_low(x) ((unsigned long) (((x) & 0xffffffff)))
++#define _bfd_int64_high(x) ((unsigned long) (((x) >> 32) & 0xffffffff))
++#define fprintf_vma(s,x) \
++ fprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x))
++#define sprintf_vma(s,x) \
++ sprintf ((s), "%08lx%08lx", _bfd_int64_high (x), _bfd_int64_low (x))
++#endif
++#endif
++
++#else /* not BFD64 */
++
++/* Represent a target address. Also used as a generic unsigned type
++ which is guaranteed to be big enough to hold any arithmetic types
++ we need to deal with. */
++typedef unsigned long bfd_vma;
++
++/* A generic signed type which is guaranteed to be big enough to hold any
++ arithmetic types we need to deal with. Can be assumed to be compatible
++ with bfd_vma in the same way that signed and unsigned ints are compatible
++ (as parameters, in assignment, etc). */
++typedef long bfd_signed_vma;
++
++typedef unsigned long symvalue;
++typedef unsigned long bfd_size_type;
++
++/* Print a bfd_vma x on stream s. */
++#define fprintf_vma(s,x) fprintf (s, "%08lx", x)
++#define sprintf_vma(s,x) sprintf (s, "%08lx", x)
++
++#endif /* not BFD64 */
++
++#define HALF_BFD_SIZE_TYPE \
++ (((bfd_size_type) 1) << (8 * sizeof (bfd_size_type) / 2))
++
++#ifndef BFD_HOST_64_BIT
++/* Fall back on a 32 bit type. The idea is to make these types always
++ available for function return types, but in the case that
++ BFD_HOST_64_BIT is undefined such a function should abort or
++ otherwise signal an error. */
++typedef bfd_signed_vma bfd_int64_t;
++typedef bfd_vma bfd_uint64_t;
++#endif
++
++/* An offset into a file. BFD always uses the largest possible offset
++ based on the build time availability of fseek, fseeko, or fseeko64. */
++typedef BFD_HOST_64_BIT file_ptr;
++typedef unsigned BFD_HOST_64_BIT ufile_ptr;
++
++extern void bfd_sprintf_vma (bfd *, char *, bfd_vma);
++extern void bfd_fprintf_vma (bfd *, void *, bfd_vma);
++
++#define printf_vma(x) fprintf_vma(stdout,x)
++#define bfd_printf_vma(abfd,x) bfd_fprintf_vma (abfd,stdout,x)
++
++typedef unsigned int flagword; /* 32 bits of flags */
++typedef unsigned char bfd_byte;
++\f
++/* File formats. */
++
++typedef enum bfd_format
++{
++ bfd_unknown = 0, /* File format is unknown. */
++ bfd_object, /* Linker/assembler/compiler output. */
++ bfd_archive, /* Object archive file. */
++ bfd_core, /* Core dump. */
++ bfd_type_end /* Marks the end; don't use it! */
++}
++bfd_format;
++
++/* Values that may appear in the flags field of a BFD. These also
++ appear in the object_flags field of the bfd_target structure, where
++ they indicate the set of flags used by that backend (not all flags
++ are meaningful for all object file formats) (FIXME: at the moment,
++ the object_flags values have mostly just been copied from backend
++ to another, and are not necessarily correct). */
++
++/* No flags. */
++#define BFD_NO_FLAGS 0x00
++
++/* BFD contains relocation entries. */
++#define HAS_RELOC 0x01
++
++/* BFD is directly executable. */
++#define EXEC_P 0x02
++
++/* BFD has line number information (basically used for F_LNNO in a
++ COFF header). */
++#define HAS_LINENO 0x04
++
++/* BFD has debugging information. */
++#define HAS_DEBUG 0x08
++
++/* BFD has symbols. */
++#define HAS_SYMS 0x10
++
++/* BFD has local symbols (basically used for F_LSYMS in a COFF
++ header). */
++#define HAS_LOCALS 0x20
++
++/* BFD is a dynamic object. */
++#define DYNAMIC 0x40
++
++/* Text section is write protected (if D_PAGED is not set, this is
++ like an a.out NMAGIC file) (the linker sets this by default, but
++ clears it for -r or -N). */
++#define WP_TEXT 0x80
++
++/* BFD is dynamically paged (this is like an a.out ZMAGIC file) (the
++ linker sets this by default, but clears it for -r or -n or -N). */
++#define D_PAGED 0x100
++
++/* BFD is relaxable (this means that bfd_relax_section may be able to
++ do something) (sometimes bfd_relax_section can do something even if
++ this is not set). */
++#define BFD_IS_RELAXABLE 0x200
++
++/* This may be set before writing out a BFD to request using a
++ traditional format. For example, this is used to request that when
++ writing out an a.out object the symbols not be hashed to eliminate
++ duplicates. */
++#define BFD_TRADITIONAL_FORMAT 0x400
++
++/* This flag indicates that the BFD contents are actually cached in
++ memory. If this is set, iostream points to a bfd_in_memory struct. */
++#define BFD_IN_MEMORY 0x800
++
++/* The sections in this BFD specify a memory page. */
++#define HAS_LOAD_PAGE 0x1000
++
++/* This BFD has been created by the linker and doesn't correspond
++ to any input file. */
++#define BFD_LINKER_CREATED 0x2000
++\f
++/* Symbols and relocation. */
++
++/* A count of carsyms (canonical archive symbols). */
++typedef unsigned long symindex;
++
++/* How to perform a relocation. */
++typedef const struct reloc_howto_struct reloc_howto_type;
++
++#define BFD_NO_MORE_SYMBOLS ((symindex) ~0)
++
++/* General purpose part of a symbol X;
++ target specific parts are in libcoff.h, libaout.h, etc. */
++
++#define bfd_get_section(x) ((x)->section)
++#define bfd_get_output_section(x) ((x)->section->output_section)
++#define bfd_set_section(x,y) ((x)->section) = (y)
++#define bfd_asymbol_base(x) ((x)->section->vma)
++#define bfd_asymbol_value(x) (bfd_asymbol_base(x) + (x)->value)
++#define bfd_asymbol_name(x) ((x)->name)
++/*Perhaps future: #define bfd_asymbol_bfd(x) ((x)->section->owner)*/
++#define bfd_asymbol_bfd(x) ((x)->the_bfd)
++#define bfd_asymbol_flavour(x) (bfd_asymbol_bfd(x)->xvec->flavour)
++
++/* A canonical archive symbol. */
++/* This is a type pun with struct ranlib on purpose! */
++typedef struct carsym
++{
++ char *name;
++ file_ptr file_offset; /* Look here to find the file. */
++}
++carsym; /* To make these you call a carsymogen. */
++
++/* Used in generating armaps (archive tables of contents).
++ Perhaps just a forward definition would do? */
++struct orl /* Output ranlib. */
++{
++ char **name; /* Symbol name. */
++ union
++ {
++ file_ptr pos;
++ bfd *abfd;
++ } u; /* bfd* or file position. */
++ int namidx; /* Index into string table. */
++};
++\f
++/* Linenumber stuff. */
++typedef struct lineno_cache_entry
++{
++ unsigned int line_number; /* Linenumber from start of function. */
++ union
++ {
++ struct bfd_symbol *sym; /* Function name. */
++ bfd_vma offset; /* Offset into section. */
++ } u;
++}
++alent;
++\f
++/* Object and core file sections. */
++
++#define align_power(addr, align) \
++ (((addr) + ((bfd_vma) 1 << (align)) - 1) & ((bfd_vma) -1 << (align)))
++
++typedef struct bfd_section *sec_ptr;
++
++#define bfd_get_section_name(bfd, ptr) ((ptr)->name + 0)
++#define bfd_get_section_vma(bfd, ptr) ((ptr)->vma + 0)
++#define bfd_get_section_lma(bfd, ptr) ((ptr)->lma + 0)
++#define bfd_get_section_alignment(bfd, ptr) ((ptr)->alignment_power + 0)
++#define bfd_section_name(bfd, ptr) ((ptr)->name)
++#define bfd_section_size(bfd, ptr) ((ptr)->size)
++#define bfd_get_section_size(ptr) ((ptr)->size)
++#define bfd_section_vma(bfd, ptr) ((ptr)->vma)
++#define bfd_section_lma(bfd, ptr) ((ptr)->lma)
++#define bfd_section_alignment(bfd, ptr) ((ptr)->alignment_power)
++#define bfd_get_section_flags(bfd, ptr) ((ptr)->flags + 0)
++#define bfd_get_section_userdata(bfd, ptr) ((ptr)->userdata)
++
++#define bfd_is_com_section(ptr) (((ptr)->flags & SEC_IS_COMMON) != 0)
++
++#define bfd_set_section_vma(bfd, ptr, val) (((ptr)->vma = (ptr)->lma = (val)), ((ptr)->user_set_vma = TRUE), TRUE)
++#define bfd_set_section_alignment(bfd, ptr, val) (((ptr)->alignment_power = (val)),TRUE)
++#define bfd_set_section_userdata(bfd, ptr, val) (((ptr)->userdata = (val)),TRUE)
++/* Find the address one past the end of SEC. */
++#define bfd_get_section_limit(bfd, sec) \
++ (((sec)->rawsize ? (sec)->rawsize : (sec)->size) \
++ / bfd_octets_per_byte (bfd))
++
++typedef struct stat stat_type;
++\f
++typedef enum bfd_print_symbol
++{
++ bfd_print_symbol_name,
++ bfd_print_symbol_more,
++ bfd_print_symbol_all
++} bfd_print_symbol_type;
++
++/* Information about a symbol that nm needs. */
++
++typedef struct _symbol_info
++{
++ symvalue value;
++ char type;
++ const char *name; /* Symbol name. */
++ unsigned char stab_type; /* Stab type. */
++ char stab_other; /* Stab other. */
++ short stab_desc; /* Stab desc. */
++ const char *stab_name; /* String for stab type. */
++} symbol_info;
++
++/* Get the name of a stabs type code. */
++
++extern const char *bfd_get_stab_name (int);
++\f
++/* Hash table routines. There is no way to free up a hash table. */
++
++/* An element in the hash table. Most uses will actually use a larger
++ structure, and an instance of this will be the first field. */
++
++struct bfd_hash_entry
++{
++ /* Next entry for this hash code. */
++ struct bfd_hash_entry *next;
++ /* String being hashed. */
++ const char *string;
++ /* Hash code. This is the full hash code, not the index into the
++ table. */
++ unsigned long hash;
++};
++
++/* A hash table. */
++
++struct bfd_hash_table
++{
++ /* The hash array. */
++ struct bfd_hash_entry **table;
++ /* The number of slots in the hash table. */
++ unsigned int size;
++ /* A function used to create new elements in the hash table. The
++ first entry is itself a pointer to an element. When this
++ function is first invoked, this pointer will be NULL. However,
++ having the pointer permits a hierarchy of method functions to be
++ built each of which calls the function in the superclass. Thus
++ each function should be written to allocate a new block of memory
++ only if the argument is NULL. */
++ struct bfd_hash_entry *(*newfunc)
++ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
++ /* An objalloc for this hash table. This is a struct objalloc *,
++ but we use void * to avoid requiring the inclusion of objalloc.h. */
++ void *memory;
++};
++
++/* Initialize a hash table. */
++extern bfd_boolean bfd_hash_table_init
++ (struct bfd_hash_table *,
++ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
++ struct bfd_hash_table *,
++ const char *));
++
++/* Initialize a hash table specifying a size. */
++extern bfd_boolean bfd_hash_table_init_n
++ (struct bfd_hash_table *,
++ struct bfd_hash_entry *(*) (struct bfd_hash_entry *,
++ struct bfd_hash_table *,
++ const char *),
++ unsigned int size);
++
++/* Free up a hash table. */
++extern void bfd_hash_table_free
++ (struct bfd_hash_table *);
++
++/* Look up a string in a hash table. If CREATE is TRUE, a new entry
++ will be created for this string if one does not already exist. The
++ COPY argument must be TRUE if this routine should copy the string
++ into newly allocated memory when adding an entry. */
++extern struct bfd_hash_entry *bfd_hash_lookup
++ (struct bfd_hash_table *, const char *, bfd_boolean create,
++ bfd_boolean copy);
++
++/* Replace an entry in a hash table. */
++extern void bfd_hash_replace
++ (struct bfd_hash_table *, struct bfd_hash_entry *old,
++ struct bfd_hash_entry *nw);
++
++/* Base method for creating a hash table entry. */
++extern struct bfd_hash_entry *bfd_hash_newfunc
++ (struct bfd_hash_entry *, struct bfd_hash_table *, const char *);
++
++/* Grab some space for a hash table entry. */
++extern void *bfd_hash_allocate
++ (struct bfd_hash_table *, unsigned int);
++
++/* Traverse a hash table in a random order, calling a function on each
++ element. If the function returns FALSE, the traversal stops. The
++ INFO argument is passed to the function. */
++extern void bfd_hash_traverse
++ (struct bfd_hash_table *,
++ bfd_boolean (*) (struct bfd_hash_entry *, void *),
++ void *info);
++
++/* Allows the default size of a hash table to be configured. New hash
++ tables allocated using bfd_hash_table_init will be created with
++ this size. */
++extern void bfd_hash_set_default_size (bfd_size_type);
++
++/* This structure is used to keep track of stabs in sections
++ information while linking. */
++
++struct stab_info
++{
++ /* A hash table used to hold stabs strings. */
++ struct bfd_strtab_hash *strings;
++ /* The header file hash table. */
++ struct bfd_hash_table includes;
++ /* The first .stabstr section. */
++ struct bfd_section *stabstr;
++};
++
++#define COFF_SWAP_TABLE (void *) &bfd_coff_std_swap_table
++
++/* User program access to BFD facilities. */
++
++/* Direct I/O routines, for programs which know more about the object
++ file than BFD does. Use higher level routines if possible. */
++
++extern bfd_size_type bfd_bread (void *, bfd_size_type, bfd *);
++extern bfd_size_type bfd_bwrite (const void *, bfd_size_type, bfd *);
++extern int bfd_seek (bfd *, file_ptr, int);
++extern file_ptr bfd_tell (bfd *);
++extern int bfd_flush (bfd *);
++extern int bfd_stat (bfd *, struct stat *);
++
++/* Deprecated old routines. */
++#if __GNUC__
++#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
++ (warn_deprecated ("bfd_read", __FILE__, __LINE__, __FUNCTION__), \
++ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
++#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
++ (warn_deprecated ("bfd_write", __FILE__, __LINE__, __FUNCTION__), \
++ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
++#else
++#define bfd_read(BUF, ELTSIZE, NITEMS, ABFD) \
++ (warn_deprecated ("bfd_read", (const char *) 0, 0, (const char *) 0), \
++ bfd_bread ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
++#define bfd_write(BUF, ELTSIZE, NITEMS, ABFD) \
++ (warn_deprecated ("bfd_write", (const char *) 0, 0, (const char *) 0),\
++ bfd_bwrite ((BUF), (ELTSIZE) * (NITEMS), (ABFD)))
++#endif
++extern void warn_deprecated (const char *, const char *, int, const char *);
++
++/* Cast from const char * to char * so that caller can assign to
++ a char * without a warning. */
++#define bfd_get_filename(abfd) ((char *) (abfd)->filename)
++#define bfd_get_cacheable(abfd) ((abfd)->cacheable)
++#define bfd_get_format(abfd) ((abfd)->format)
++#define bfd_get_target(abfd) ((abfd)->xvec->name)
++#define bfd_get_flavour(abfd) ((abfd)->xvec->flavour)
++#define bfd_family_coff(abfd) \
++ (bfd_get_flavour (abfd) == bfd_target_coff_flavour || \
++ bfd_get_flavour (abfd) == bfd_target_xcoff_flavour)
++#define bfd_big_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_BIG)
++#define bfd_little_endian(abfd) ((abfd)->xvec->byteorder == BFD_ENDIAN_LITTLE)
++#define bfd_header_big_endian(abfd) \
++ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_BIG)
++#define bfd_header_little_endian(abfd) \
++ ((abfd)->xvec->header_byteorder == BFD_ENDIAN_LITTLE)
++#define bfd_get_file_flags(abfd) ((abfd)->flags)
++#define bfd_applicable_file_flags(abfd) ((abfd)->xvec->object_flags)
++#define bfd_applicable_section_flags(abfd) ((abfd)->xvec->section_flags)
++#define bfd_my_archive(abfd) ((abfd)->my_archive)
++#define bfd_has_map(abfd) ((abfd)->has_armap)
++
++#define bfd_valid_reloc_types(abfd) ((abfd)->xvec->valid_reloc_types)
++#define bfd_usrdata(abfd) ((abfd)->usrdata)
++
++#define bfd_get_start_address(abfd) ((abfd)->start_address)
++#define bfd_get_symcount(abfd) ((abfd)->symcount)
++#define bfd_get_outsymbols(abfd) ((abfd)->outsymbols)
++#define bfd_count_sections(abfd) ((abfd)->section_count)
++
++#define bfd_get_dynamic_symcount(abfd) ((abfd)->dynsymcount)
++
++#define bfd_get_symbol_leading_char(abfd) ((abfd)->xvec->symbol_leading_char)
++
++#define bfd_set_cacheable(abfd,bool) (((abfd)->cacheable = bool), TRUE)
++
++extern bfd_boolean bfd_cache_close
++ (bfd *abfd);
++/* NB: This declaration should match the autogenerated one in libbfd.h. */
++
++extern bfd_boolean bfd_cache_close_all (void);
++
++extern bfd_boolean bfd_record_phdr
++ (bfd *, unsigned long, bfd_boolean, flagword, bfd_boolean, bfd_vma,
++ bfd_boolean, bfd_boolean, unsigned int, struct bfd_section **);
++
++/* Byte swapping routines. */
++
++bfd_uint64_t bfd_getb64 (const void *);
++bfd_uint64_t bfd_getl64 (const void *);
++bfd_int64_t bfd_getb_signed_64 (const void *);
++bfd_int64_t bfd_getl_signed_64 (const void *);
++bfd_vma bfd_getb32 (const void *);
++bfd_vma bfd_getl32 (const void *);
++bfd_signed_vma bfd_getb_signed_32 (const void *);
++bfd_signed_vma bfd_getl_signed_32 (const void *);
++bfd_vma bfd_getb16 (const void *);
++bfd_vma bfd_getl16 (const void *);
++bfd_signed_vma bfd_getb_signed_16 (const void *);
++bfd_signed_vma bfd_getl_signed_16 (const void *);
++void bfd_putb64 (bfd_uint64_t, void *);
++void bfd_putl64 (bfd_uint64_t, void *);
++void bfd_putb32 (bfd_vma, void *);
++void bfd_putl32 (bfd_vma, void *);
++void bfd_putb16 (bfd_vma, void *);
++void bfd_putl16 (bfd_vma, void *);
++
++/* Byte swapping routines which take size and endiannes as arguments. */
++
++bfd_uint64_t bfd_get_bits (const void *, int, bfd_boolean);
++void bfd_put_bits (bfd_uint64_t, void *, int, bfd_boolean);
++
++extern bfd_boolean bfd_section_already_linked_table_init (void);
++extern void bfd_section_already_linked_table_free (void);
++\f
++/* Externally visible ECOFF routines. */
++
++#if defined(__STDC__) || defined(ALMOST_STDC)
++struct ecoff_debug_info;
++struct ecoff_debug_swap;
++struct ecoff_extr;
++struct bfd_symbol;
++struct bfd_link_info;
++struct bfd_link_hash_entry;
++struct bfd_elf_version_tree;
++#endif
++extern bfd_vma bfd_ecoff_get_gp_value
++ (bfd * abfd);
++extern bfd_boolean bfd_ecoff_set_gp_value
++ (bfd *abfd, bfd_vma gp_value);
++extern bfd_boolean bfd_ecoff_set_regmasks
++ (bfd *abfd, unsigned long gprmask, unsigned long fprmask,
++ unsigned long *cprmask);
++extern void *bfd_ecoff_debug_init
++ (bfd *output_bfd, struct ecoff_debug_info *output_debug,
++ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
++extern void bfd_ecoff_debug_free
++ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
++ const struct ecoff_debug_swap *output_swap, struct bfd_link_info *);
++extern bfd_boolean bfd_ecoff_debug_accumulate
++ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
++ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
++ struct ecoff_debug_info *input_debug,
++ const struct ecoff_debug_swap *input_swap, struct bfd_link_info *);
++extern bfd_boolean bfd_ecoff_debug_accumulate_other
++ (void *handle, bfd *output_bfd, struct ecoff_debug_info *output_debug,
++ const struct ecoff_debug_swap *output_swap, bfd *input_bfd,
++ struct bfd_link_info *);
++extern bfd_boolean bfd_ecoff_debug_externals
++ (bfd *abfd, struct ecoff_debug_info *debug,
++ const struct ecoff_debug_swap *swap, bfd_boolean relocatable,
++ bfd_boolean (*get_extr) (struct bfd_symbol *, struct ecoff_extr *),
++ void (*set_index) (struct bfd_symbol *, bfd_size_type));
++extern bfd_boolean bfd_ecoff_debug_one_external
++ (bfd *abfd, struct ecoff_debug_info *debug,
++ const struct ecoff_debug_swap *swap, const char *name,
++ struct ecoff_extr *esym);
++extern bfd_size_type bfd_ecoff_debug_size
++ (bfd *abfd, struct ecoff_debug_info *debug,
++ const struct ecoff_debug_swap *swap);
++extern bfd_boolean bfd_ecoff_write_debug
++ (bfd *abfd, struct ecoff_debug_info *debug,
++ const struct ecoff_debug_swap *swap, file_ptr where);
++extern bfd_boolean bfd_ecoff_write_accumulated_debug
++ (void *handle, bfd *abfd, struct ecoff_debug_info *debug,
++ const struct ecoff_debug_swap *swap,
++ struct bfd_link_info *info, file_ptr where);
++
++/* Externally visible ELF routines. */
++
++struct bfd_link_needed_list
++{
++ struct bfd_link_needed_list *next;
++ bfd *by;
++ const char *name;
++};
++
++enum dynamic_lib_link_class {
++ DYN_NORMAL = 0,
++ DYN_AS_NEEDED = 1,
++ DYN_DT_NEEDED = 2,
++ DYN_NO_ADD_NEEDED = 4,
++ DYN_NO_NEEDED = 8
++};
++
++extern bfd_boolean bfd_elf_record_link_assignment
++ (struct bfd_link_info *, const char *, bfd_boolean);
++extern struct bfd_link_needed_list *bfd_elf_get_needed_list
++ (bfd *, struct bfd_link_info *);
++extern bfd_boolean bfd_elf_get_bfd_needed_list
++ (bfd *, struct bfd_link_needed_list **);
++extern bfd_boolean bfd_elf_size_dynamic_sections
++ (bfd *, const char *, const char *, const char *, const char * const *,
++ struct bfd_link_info *, struct bfd_section **,
++ struct bfd_elf_version_tree *);
++extern bfd_boolean bfd_elf_size_dynsym_hash_dynstr
++ (bfd *, struct bfd_link_info *);
++extern void bfd_elf_set_dt_needed_name
++ (bfd *, const char *);
++extern const char *bfd_elf_get_dt_soname
++ (bfd *);
++extern void bfd_elf_set_dyn_lib_class
++ (bfd *, int);
++extern int bfd_elf_get_dyn_lib_class
++ (bfd *);
++extern struct bfd_link_needed_list *bfd_elf_get_runpath_list
++ (bfd *, struct bfd_link_info *);
++extern bfd_boolean bfd_elf_discard_info
++ (bfd *, struct bfd_link_info *);
++extern unsigned int _bfd_elf_default_action_discarded
++ (struct bfd_section *);
++
++/* Return an upper bound on the number of bytes required to store a
++ copy of ABFD's program header table entries. Return -1 if an error
++ occurs; bfd_get_error will return an appropriate code. */
++extern long bfd_get_elf_phdr_upper_bound
++ (bfd *abfd);
++
++/* Copy ABFD's program header table entries to *PHDRS. The entries
++ will be stored as an array of Elf_Internal_Phdr structures, as
++ defined in include/elf/internal.h. To find out how large the
++ buffer needs to be, call bfd_get_elf_phdr_upper_bound.
++
++ Return the number of program header table entries read, or -1 if an
++ error occurs; bfd_get_error will return an appropriate code. */
++extern int bfd_get_elf_phdrs
++ (bfd *abfd, void *phdrs);
++
++/* Create a new BFD as if by bfd_openr. Rather than opening a file,
++ reconstruct an ELF file by reading the segments out of remote memory
++ based on the ELF file header at EHDR_VMA and the ELF program headers it
++ points to. If not null, *LOADBASEP is filled in with the difference
++ between the VMAs from which the segments were read, and the VMAs the
++ file headers (and hence BFD's idea of each section's VMA) put them at.
++
++ The function TARGET_READ_MEMORY is called to copy LEN bytes from the
++ remote memory at target address VMA into the local buffer at MYADDR; it
++ should return zero on success or an `errno' code on failure. TEMPL must
++ be a BFD for an ELF target with the word size and byte order found in
++ the remote memory. */
++extern bfd *bfd_elf_bfd_from_remote_memory
++ (bfd *templ, bfd_vma ehdr_vma, bfd_vma *loadbasep,
++ int (*target_read_memory) (bfd_vma vma, bfd_byte *myaddr, int len));
++
++/* Return the arch_size field of an elf bfd, or -1 if not elf. */
++extern int bfd_get_arch_size
++ (bfd *);
++
++/* Return TRUE if address "naturally" sign extends, or -1 if not elf. */
++extern int bfd_get_sign_extend_vma
++ (bfd *);
++
++extern struct bfd_section *_bfd_elf_tls_setup
++ (bfd *, struct bfd_link_info *);
++
++extern void _bfd_elf_provide_symbol
++ (struct bfd_link_info *, const char *, bfd_vma, struct bfd_section *);
++
++extern void _bfd_elf_provide_section_bound_symbols
++ (struct bfd_link_info *, struct bfd_section *, const char *, const char *);
++
++extern void _bfd_elf_fix_excluded_sec_syms
++ (bfd *, struct bfd_link_info *);
++
++extern bfd_boolean bfd_m68k_elf32_create_embedded_relocs
++ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *,
++ char **);
++
++/* SunOS shared library support routines for the linker. */
++
++extern struct bfd_link_needed_list *bfd_sunos_get_needed_list
++ (bfd *, struct bfd_link_info *);
++extern bfd_boolean bfd_sunos_record_link_assignment
++ (bfd *, struct bfd_link_info *, const char *);
++extern bfd_boolean bfd_sunos_size_dynamic_sections
++ (bfd *, struct bfd_link_info *, struct bfd_section **,
++ struct bfd_section **, struct bfd_section **);
++
++/* Linux shared library support routines for the linker. */
++
++extern bfd_boolean bfd_i386linux_size_dynamic_sections
++ (bfd *, struct bfd_link_info *);
++extern bfd_boolean bfd_m68klinux_size_dynamic_sections
++ (bfd *, struct bfd_link_info *);
++extern bfd_boolean bfd_sparclinux_size_dynamic_sections
++ (bfd *, struct bfd_link_info *);
++
++/* mmap hacks */
++
++struct _bfd_window_internal;
++typedef struct _bfd_window_internal bfd_window_internal;
++
++typedef struct _bfd_window
++{
++ /* What the user asked for. */
++ void *data;
++ bfd_size_type size;
++ /* The actual window used by BFD. Small user-requested read-only
++ regions sharing a page may share a single window into the object
++ file. Read-write versions shouldn't until I've fixed things to
++ keep track of which portions have been claimed by the
++ application; don't want to give the same region back when the
++ application wants two writable copies! */
++ struct _bfd_window_internal *i;
++}
++bfd_window;
++
++extern void bfd_init_window
++ (bfd_window *);
++extern void bfd_free_window
++ (bfd_window *);
++extern bfd_boolean bfd_get_file_window
++ (bfd *, file_ptr, bfd_size_type, bfd_window *, bfd_boolean);
++
++/* XCOFF support routines for the linker. */
++
++extern bfd_boolean bfd_xcoff_link_record_set
++ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_size_type);
++extern bfd_boolean bfd_xcoff_import_symbol
++ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *, bfd_vma,
++ const char *, const char *, const char *, unsigned int);
++extern bfd_boolean bfd_xcoff_export_symbol
++ (bfd *, struct bfd_link_info *, struct bfd_link_hash_entry *);
++extern bfd_boolean bfd_xcoff_link_count_reloc
++ (bfd *, struct bfd_link_info *, const char *);
++extern bfd_boolean bfd_xcoff_record_link_assignment
++ (bfd *, struct bfd_link_info *, const char *);
++extern bfd_boolean bfd_xcoff_size_dynamic_sections
++ (bfd *, struct bfd_link_info *, const char *, const char *,
++ unsigned long, unsigned long, unsigned long, bfd_boolean,
++ int, bfd_boolean, bfd_boolean, struct bfd_section **, bfd_boolean);
++extern bfd_boolean bfd_xcoff_link_generate_rtinit
++ (bfd *, const char *, const char *, bfd_boolean);
++
++/* XCOFF support routines for ar. */
++extern bfd_boolean bfd_xcoff_ar_archive_set_magic
++ (bfd *, char *);
++
++/* Externally visible COFF routines. */
++
++#if defined(__STDC__) || defined(ALMOST_STDC)
++struct internal_syment;
++union internal_auxent;
++#endif
++
++extern bfd_boolean bfd_coff_get_syment
++ (bfd *, struct bfd_symbol *, struct internal_syment *);
++
++extern bfd_boolean bfd_coff_get_auxent
++ (bfd *, struct bfd_symbol *, int, union internal_auxent *);
++
++extern bfd_boolean bfd_coff_set_symbol_class
++ (bfd *, struct bfd_symbol *, unsigned int);
++
++extern bfd_boolean bfd_m68k_coff_create_embedded_relocs
++ (bfd *, struct bfd_link_info *, struct bfd_section *, struct bfd_section *, char **);
++
++/* ARM Interworking support. Called from linker. */
++extern bfd_boolean bfd_arm_allocate_interworking_sections
++ (struct bfd_link_info *);
++
++extern bfd_boolean bfd_arm_process_before_allocation
++ (bfd *, struct bfd_link_info *, int);
++
++extern bfd_boolean bfd_arm_get_bfd_for_interworking
++ (bfd *, struct bfd_link_info *);
++
++/* PE ARM Interworking support. Called from linker. */
++extern bfd_boolean bfd_arm_pe_allocate_interworking_sections
++ (struct bfd_link_info *);
++
++extern bfd_boolean bfd_arm_pe_process_before_allocation
++ (bfd *, struct bfd_link_info *, int);
++
++extern bfd_boolean bfd_arm_pe_get_bfd_for_interworking
++ (bfd *, struct bfd_link_info *);
++
++/* ELF ARM Interworking support. Called from linker. */
++extern bfd_boolean bfd_elf32_arm_allocate_interworking_sections
++ (struct bfd_link_info *);
++
++extern bfd_boolean bfd_elf32_arm_process_before_allocation
++ (bfd *, struct bfd_link_info *, int);
++
++void bfd_elf32_arm_set_target_relocs
++ (struct bfd_link_info *, int, char *, int, int);
++
++extern bfd_boolean bfd_elf32_arm_get_bfd_for_interworking
++ (bfd *, struct bfd_link_info *);
++
++extern bfd_boolean bfd_elf32_arm_add_glue_sections_to_bfd
++ (bfd *, struct bfd_link_info *);
++
++/* ELF ARM mapping symbol support */
++extern bfd_boolean bfd_is_arm_mapping_symbol_name
++ (const char * name);
++
++/* ARM Note section processing. */
++extern bfd_boolean bfd_arm_merge_machines
++ (bfd *, bfd *);
++
++extern bfd_boolean bfd_arm_update_notes
++ (bfd *, const char *);
++
++extern unsigned int bfd_arm_get_mach_from_notes
++ (bfd *, const char *);
++
++/* TI COFF load page support. */
++extern void bfd_ticoff_set_section_load_page
++ (struct bfd_section *, int);
++
++extern int bfd_ticoff_get_section_load_page
++ (struct bfd_section *);
++
++/* H8/300 functions. */
++extern bfd_vma bfd_h8300_pad_address
++ (bfd *, bfd_vma);
++
++/* IA64 Itanium code generation. Called from linker. */
++extern void bfd_elf32_ia64_after_parse
++ (int);
++
++extern void bfd_elf64_ia64_after_parse
++ (int);
++
++/* This structure is used for a comdat section, as in PE. A comdat
++ section is associated with a particular symbol. When the linker
++ sees a comdat section, it keeps only one of the sections with a
++ given name and associated with a given symbol. */
++
++struct coff_comdat_info
++{
++ /* The name of the symbol associated with a comdat section. */
++ const char *name;
++
++ /* The local symbol table index of the symbol associated with a
++ comdat section. This is only meaningful to the object file format
++ specific code; it is not an index into the list returned by
++ bfd_canonicalize_symtab. */
++ long symbol;
++};
++
++extern struct coff_comdat_info *bfd_coff_get_comdat_section
++ (bfd *, struct bfd_section *);
++
++/* Extracted from init.c. */
++void bfd_init (void);
++
++/* Extracted from opncls.c. */
++bfd *bfd_fopen (const char *filename, const char *target,
++ const char *mode, int fd);
++
++bfd *bfd_openr (const char *filename, const char *target);
++
++bfd *bfd_fdopenr (const char *filename, const char *target, int fd);
++
++bfd *bfd_openstreamr (const char *, const char *, void *);
++
++bfd *bfd_openr_iovec (const char *filename, const char *target,
++ void *(*open) (struct bfd *nbfd,
++ void *open_closure),
++ void *open_closure,
++ file_ptr (*pread) (struct bfd *nbfd,
++ void *stream,
++ void *buf,
++ file_ptr nbytes,
++ file_ptr offset),
++ int (*close) (struct bfd *nbfd,
++ void *stream));
++
++bfd *bfd_openw (const char *filename, const char *target);
++
++bfd_boolean bfd_close (bfd *abfd);
++
++bfd_boolean bfd_close_all_done (bfd *);
++
++bfd *bfd_create (const char *filename, bfd *templ);
++
++bfd_boolean bfd_make_writable (bfd *abfd);
++
++bfd_boolean bfd_make_readable (bfd *abfd);
++
++unsigned long bfd_calc_gnu_debuglink_crc32
++ (unsigned long crc, const unsigned char *buf, bfd_size_type len);
++
++char *bfd_follow_gnu_debuglink (bfd *abfd, const char *dir);
++
++struct bfd_section *bfd_create_gnu_debuglink_section
++ (bfd *abfd, const char *filename);
++
++bfd_boolean bfd_fill_in_gnu_debuglink_section
++ (bfd *abfd, struct bfd_section *sect, const char *filename);
++
++/* Extracted from libbfd.c. */
++
++/* Byte swapping macros for user section data. */
++
++#define bfd_put_8(abfd, val, ptr) \
++ ((void) (*((unsigned char *) (ptr)) = (val) & 0xff))
++#define bfd_put_signed_8 \
++ bfd_put_8
++#define bfd_get_8(abfd, ptr) \
++ (*(unsigned char *) (ptr) & 0xff)
++#define bfd_get_signed_8(abfd, ptr) \
++ (((*(unsigned char *) (ptr) & 0xff) ^ 0x80) - 0x80)
++
++#define bfd_put_16(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_putx16, ((val),(ptr)))
++#define bfd_put_signed_16 \
++ bfd_put_16
++#define bfd_get_16(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx16, (ptr))
++#define bfd_get_signed_16(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx_signed_16, (ptr))
++
++#define bfd_put_32(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_putx32, ((val),(ptr)))
++#define bfd_put_signed_32 \
++ bfd_put_32
++#define bfd_get_32(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx32, (ptr))
++#define bfd_get_signed_32(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx_signed_32, (ptr))
++
++#define bfd_put_64(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_putx64, ((val), (ptr)))
++#define bfd_put_signed_64 \
++ bfd_put_64
++#define bfd_get_64(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx64, (ptr))
++#define bfd_get_signed_64(abfd, ptr) \
++ BFD_SEND (abfd, bfd_getx_signed_64, (ptr))
++
++#define bfd_get(bits, abfd, ptr) \
++ ((bits) == 8 ? (bfd_vma) bfd_get_8 (abfd, ptr) \
++ : (bits) == 16 ? bfd_get_16 (abfd, ptr) \
++ : (bits) == 32 ? bfd_get_32 (abfd, ptr) \
++ : (bits) == 64 ? bfd_get_64 (abfd, ptr) \
++ : (abort (), (bfd_vma) - 1))
++
++#define bfd_put(bits, abfd, val, ptr) \
++ ((bits) == 8 ? bfd_put_8 (abfd, val, ptr) \
++ : (bits) == 16 ? bfd_put_16 (abfd, val, ptr) \
++ : (bits) == 32 ? bfd_put_32 (abfd, val, ptr) \
++ : (bits) == 64 ? bfd_put_64 (abfd, val, ptr) \
++ : (abort (), (void) 0))
++
++
++/* Byte swapping macros for file header data. */
++
++#define bfd_h_put_8(abfd, val, ptr) \
++ bfd_put_8 (abfd, val, ptr)
++#define bfd_h_put_signed_8(abfd, val, ptr) \
++ bfd_put_8 (abfd, val, ptr)
++#define bfd_h_get_8(abfd, ptr) \
++ bfd_get_8 (abfd, ptr)
++#define bfd_h_get_signed_8(abfd, ptr) \
++ bfd_get_signed_8 (abfd, ptr)
++
++#define bfd_h_put_16(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_h_putx16, (val, ptr))
++#define bfd_h_put_signed_16 \
++ bfd_h_put_16
++#define bfd_h_get_16(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx16, (ptr))
++#define bfd_h_get_signed_16(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx_signed_16, (ptr))
++
++#define bfd_h_put_32(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_h_putx32, (val, ptr))
++#define bfd_h_put_signed_32 \
++ bfd_h_put_32
++#define bfd_h_get_32(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx32, (ptr))
++#define bfd_h_get_signed_32(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx_signed_32, (ptr))
++
++#define bfd_h_put_64(abfd, val, ptr) \
++ BFD_SEND (abfd, bfd_h_putx64, (val, ptr))
++#define bfd_h_put_signed_64 \
++ bfd_h_put_64
++#define bfd_h_get_64(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx64, (ptr))
++#define bfd_h_get_signed_64(abfd, ptr) \
++ BFD_SEND (abfd, bfd_h_getx_signed_64, (ptr))
++
++/* Aliases for the above, which should eventually go away. */
++
++#define H_PUT_64 bfd_h_put_64
++#define H_PUT_32 bfd_h_put_32
++#define H_PUT_16 bfd_h_put_16
++#define H_PUT_8 bfd_h_put_8
++#define H_PUT_S64 bfd_h_put_signed_64
++#define H_PUT_S32 bfd_h_put_signed_32
++#define H_PUT_S16 bfd_h_put_signed_16
++#define H_PUT_S8 bfd_h_put_signed_8
++#define H_GET_64 bfd_h_get_64
++#define H_GET_32 bfd_h_get_32
++#define H_GET_16 bfd_h_get_16
++#define H_GET_8 bfd_h_get_8
++#define H_GET_S64 bfd_h_get_signed_64
++#define H_GET_S32 bfd_h_get_signed_32
++#define H_GET_S16 bfd_h_get_signed_16
++#define H_GET_S8 bfd_h_get_signed_8
++
++
++/* Extracted from bfdio.c. */
++long bfd_get_mtime (bfd *abfd);
++
++long bfd_get_size (bfd *abfd);
++
++/* Extracted from bfdwin.c. */
++/* Extracted from section.c. */
++typedef struct bfd_section
++{
++ /* The name of the section; the name isn't a copy, the pointer is
++ the same as that passed to bfd_make_section. */
++ const char *name;
++
++ /* A unique sequence number. */
++ int id;
++
++ /* Which section in the bfd; 0..n-1 as sections are created in a bfd. */
++ int index;
++
++ /* The next section in the list belonging to the BFD, or NULL. */
++ struct bfd_section *next;
++
++ /* The previous section in the list belonging to the BFD, or NULL. */
++ struct bfd_section *prev;
++
++ /* The field flags contains attributes of the section. Some
++ flags are read in from the object file, and some are
++ synthesized from other information. */
++ flagword flags;
++
++#define SEC_NO_FLAGS 0x000
++
++ /* Tells the OS to allocate space for this section when loading.
++ This is clear for a section containing debug information only. */
++#define SEC_ALLOC 0x001
++
++ /* Tells the OS to load the section from the file when loading.
++ This is clear for a .bss section. */
++#define SEC_LOAD 0x002
++
++ /* The section contains data still to be relocated, so there is
++ some relocation information too. */
++#define SEC_RELOC 0x004
++
++ /* A signal to the OS that the section contains read only data. */
++#define SEC_READONLY 0x008
++
++ /* The section contains code only. */
++#define SEC_CODE 0x010
++
++ /* The section contains data only. */
++#define SEC_DATA 0x020
++
++ /* The section will reside in ROM. */
++#define SEC_ROM 0x040
++
++ /* The section contains constructor information. This section
++ type is used by the linker to create lists of constructors and
++ destructors used by <<g++>>. When a back end sees a symbol
++ which should be used in a constructor list, it creates a new
++ section for the type of name (e.g., <<__CTOR_LIST__>>), attaches
++ the symbol to it, and builds a relocation. To build the lists
++ of constructors, all the linker has to do is catenate all the
++ sections called <<__CTOR_LIST__>> and relocate the data
++ contained within - exactly the operations it would peform on
++ standard data. */
++#define SEC_CONSTRUCTOR 0x080
++
++ /* The section has contents - a data section could be
++ <<SEC_ALLOC>> | <<SEC_HAS_CONTENTS>>; a debug section could be
++ <<SEC_HAS_CONTENTS>> */
++#define SEC_HAS_CONTENTS 0x100
++
++ /* An instruction to the linker to not output the section
++ even if it has information which would normally be written. */
++#define SEC_NEVER_LOAD 0x200
++
++ /* The section contains thread local data. */
++#define SEC_THREAD_LOCAL 0x400
++
++ /* The section has GOT references. This flag is only for the
++ linker, and is currently only used by the elf32-hppa back end.
++ It will be set if global offset table references were detected
++ in this section, which indicate to the linker that the section
++ contains PIC code, and must be handled specially when doing a
++ static link. */
++#define SEC_HAS_GOT_REF 0x800
++
++ /* The section contains common symbols (symbols may be defined
++ multiple times, the value of a symbol is the amount of
++ space it requires, and the largest symbol value is the one
++ used). Most targets have exactly one of these (which we
++ translate to bfd_com_section_ptr), but ECOFF has two. */
++#define SEC_IS_COMMON 0x1000
++
++ /* The section contains only debugging information. For
++ example, this is set for ELF .debug and .stab sections.
++ strip tests this flag to see if a section can be
++ discarded. */
++#define SEC_DEBUGGING 0x2000
++
++ /* The contents of this section are held in memory pointed to
++ by the contents field. This is checked by bfd_get_section_contents,
++ and the data is retrieved from memory if appropriate. */
++#define SEC_IN_MEMORY 0x4000
++
++ /* The contents of this section are to be excluded by the
++ linker for executable and shared objects unless those
++ objects are to be further relocated. */
++#define SEC_EXCLUDE 0x8000
++
++ /* The contents of this section are to be sorted based on the sum of
++ the symbol and addend values specified by the associated relocation
++ entries. Entries without associated relocation entries will be
++ appended to the end of the section in an unspecified order. */
++#define SEC_SORT_ENTRIES 0x10000
++
++ /* When linking, duplicate sections of the same name should be
++ discarded, rather than being combined into a single section as
++ is usually done. This is similar to how common symbols are
++ handled. See SEC_LINK_DUPLICATES below. */
++#define SEC_LINK_ONCE 0x20000
++
++ /* If SEC_LINK_ONCE is set, this bitfield describes how the linker
++ should handle duplicate sections. */
++#define SEC_LINK_DUPLICATES 0x40000
++
++ /* This value for SEC_LINK_DUPLICATES means that duplicate
++ sections with the same name should simply be discarded. */
++#define SEC_LINK_DUPLICATES_DISCARD 0x0
++
++ /* This value for SEC_LINK_DUPLICATES means that the linker
++ should warn if there are any duplicate sections, although
++ it should still only link one copy. */
++#define SEC_LINK_DUPLICATES_ONE_ONLY 0x80000
++
++ /* This value for SEC_LINK_DUPLICATES means that the linker
++ should warn if any duplicate sections are a different size. */
++#define SEC_LINK_DUPLICATES_SAME_SIZE 0x100000
++
++ /* This value for SEC_LINK_DUPLICATES means that the linker
++ should warn if any duplicate sections contain different
++ contents. */
++#define SEC_LINK_DUPLICATES_SAME_CONTENTS \
++ (SEC_LINK_DUPLICATES_ONE_ONLY | SEC_LINK_DUPLICATES_SAME_SIZE)
++
++ /* This section was created by the linker as part of dynamic
++ relocation or other arcane processing. It is skipped when
++ going through the first-pass output, trusting that someone
++ else up the line will take care of it later. */
++#define SEC_LINKER_CREATED 0x200000
++
++ /* This section should not be subject to garbage collection. */
++#define SEC_KEEP 0x400000
++
++ /* This section contains "short" data, and should be placed
++ "near" the GP. */
++#define SEC_SMALL_DATA 0x800000
++
++ /* Attempt to merge identical entities in the section.
++ Entity size is given in the entsize field. */
++#define SEC_MERGE 0x1000000
++
++ /* If given with SEC_MERGE, entities to merge are zero terminated
++ strings where entsize specifies character size instead of fixed
++ size entries. */
++#define SEC_STRINGS 0x2000000
++
++ /* This section contains data about section groups. */
++#define SEC_GROUP 0x4000000
++
++ /* The section is a COFF shared library section. This flag is
++ only for the linker. If this type of section appears in
++ the input file, the linker must copy it to the output file
++ without changing the vma or size. FIXME: Although this
++ was originally intended to be general, it really is COFF
++ specific (and the flag was renamed to indicate this). It
++ might be cleaner to have some more general mechanism to
++ allow the back end to control what the linker does with
++ sections. */
++#define SEC_COFF_SHARED_LIBRARY 0x10000000
++
++ /* This section contains data which may be shared with other
++ executables or shared objects. This is for COFF only. */
++#define SEC_COFF_SHARED 0x20000000
++
++ /* When a section with this flag is being linked, then if the size of
++ the input section is less than a page, it should not cross a page
++ boundary. If the size of the input section is one page or more,
++ it should be aligned on a page boundary. This is for TI
++ TMS320C54X only. */
++#define SEC_TIC54X_BLOCK 0x40000000
++
++ /* Conditionally link this section; do not link if there are no
++ references found to any symbol in the section. This is for TI
++ TMS320C54X only. */
++#define SEC_TIC54X_CLINK 0x80000000
++
++ /* End of section flags. */
++
++ /* Some internal packed boolean fields. */
++
++ /* See the vma field. */
++ unsigned int user_set_vma : 1;
++
++ /* A mark flag used by some of the linker backends. */
++ unsigned int linker_mark : 1;
++
++ /* Another mark flag used by some of the linker backends. Set for
++ output sections that have an input section. */
++ unsigned int linker_has_input : 1;
++
++ /* Mark flags used by some linker backends for garbage collection. */
++ unsigned int gc_mark : 1;
++ unsigned int gc_mark_from_eh : 1;
++
++ /* The following flags are used by the ELF linker. */
++
++ /* Mark sections which have been allocated to segments. */
++ unsigned int segment_mark : 1;
++
++ /* Type of sec_info information. */
++ unsigned int sec_info_type:3;
++#define ELF_INFO_TYPE_NONE 0
++#define ELF_INFO_TYPE_STABS 1
++#define ELF_INFO_TYPE_MERGE 2
++#define ELF_INFO_TYPE_EH_FRAME 3
++#define ELF_INFO_TYPE_JUST_SYMS 4
++
++ /* Nonzero if this section uses RELA relocations, rather than REL. */
++ unsigned int use_rela_p:1;
++
++ /* Bits used by various backends. The generic code doesn't touch
++ these fields. */
++
++ /* Nonzero if this section has TLS related relocations. */
++ unsigned int has_tls_reloc:1;
++
++ /* Nonzero if this section has a gp reloc. */
++ unsigned int has_gp_reloc:1;
++
++ /* Nonzero if this section needs the relax finalize pass. */
++ unsigned int need_finalize_relax:1;
++
++ /* Whether relocations have been processed. */
++ unsigned int reloc_done : 1;
++
++ /* End of internal packed boolean fields. */
++
++ /* The virtual memory address of the section - where it will be
++ at run time. The symbols are relocated against this. The
++ user_set_vma flag is maintained by bfd; if it's not set, the
++ backend can assign addresses (for example, in <<a.out>>, where
++ the default address for <<.data>> is dependent on the specific
++ target and various flags). */
++ bfd_vma vma;
++
++ /* The load address of the section - where it would be in a
++ rom image; really only used for writing section header
++ information. */
++ bfd_vma lma;
++
++ /* The size of the section in octets, as it will be output.
++ Contains a value even if the section has no contents (e.g., the
++ size of <<.bss>>). */
++ bfd_size_type size;
++
++ /* For input sections, the original size on disk of the section, in
++ octets. This field is used by the linker relaxation code. It is
++ currently only set for sections where the linker relaxation scheme
++ doesn't cache altered section and reloc contents (stabs, eh_frame,
++ SEC_MERGE, some coff relaxing targets), and thus the original size
++ needs to be kept to read the section multiple times.
++ For output sections, rawsize holds the section size calculated on
++ a previous linker relaxation pass. */
++ bfd_size_type rawsize;
++
++ /* If this section is going to be output, then this value is the
++ offset in *bytes* into the output section of the first byte in the
++ input section (byte ==> smallest addressable unit on the
++ target). In most cases, if this was going to start at the
++ 100th octet (8-bit quantity) in the output section, this value
++ would be 100. However, if the target byte size is 16 bits
++ (bfd_octets_per_byte is "2"), this value would be 50. */
++ bfd_vma output_offset;
++
++ /* The output section through which to map on output. */
++ struct bfd_section *output_section;
++
++ /* The alignment requirement of the section, as an exponent of 2 -
++ e.g., 3 aligns to 2^3 (or 8). */
++ unsigned int alignment_power;
++
++ /* If an input section, a pointer to a vector of relocation
++ records for the data in this section. */
++ struct reloc_cache_entry *relocation;
++
++ /* If an output section, a pointer to a vector of pointers to
++ relocation records for the data in this section. */
++ struct reloc_cache_entry **orelocation;
++
++ /* The number of relocation records in one of the above. */
++ unsigned reloc_count;
++
++ /* Information below is back end specific - and not always used
++ or updated. */
++
++ /* File position of section data. */
++ file_ptr filepos;
++
++ /* File position of relocation info. */
++ file_ptr rel_filepos;
++
++ /* File position of line data. */
++ file_ptr line_filepos;
++
++ /* Pointer to data for applications. */
++ void *userdata;
++
++ /* If the SEC_IN_MEMORY flag is set, this points to the actual
++ contents. */
++ unsigned char *contents;
++
++ /* Attached line number information. */
++ alent *lineno;
++
++ /* Number of line number records. */
++ unsigned int lineno_count;
++
++ /* Entity size for merging purposes. */
++ unsigned int entsize;
++
++ /* Points to the kept section if this section is a link-once section,
++ and is discarded. */
++ struct bfd_section *kept_section;
++
++ /* When a section is being output, this value changes as more
++ linenumbers are written out. */
++ file_ptr moving_line_filepos;
++
++ /* What the section number is in the target world. */
++ int target_index;
++
++ void *used_by_bfd;
++
++ /* If this is a constructor section then here is a list of the
++ relocations created to relocate items within it. */
++ struct relent_chain *constructor_chain;
++
++ /* The BFD which owns the section. */
++ bfd *owner;
++
++ /* A symbol which points at this section only. */
++ struct bfd_symbol *symbol;
++ struct bfd_symbol **symbol_ptr_ptr;
++
++ /* Early in the link process, map_head and map_tail are used to build
++ a list of input sections attached to an output section. Later,
++ output sections use these fields for a list of bfd_link_order
++ structs. */
++ union {
++ struct bfd_link_order *link_order;
++ struct bfd_section *s;
++ } map_head, map_tail;
++} asection;
++
++/* These sections are global, and are managed by BFD. The application
++ and target back end are not permitted to change the values in
++ these sections. New code should use the section_ptr macros rather
++ than referring directly to the const sections. The const sections
++ may eventually vanish. */
++#define BFD_ABS_SECTION_NAME "*ABS*"
++#define BFD_UND_SECTION_NAME "*UND*"
++#define BFD_COM_SECTION_NAME "*COM*"
++#define BFD_IND_SECTION_NAME "*IND*"
++
++/* The absolute section. */
++extern asection bfd_abs_section;
++#define bfd_abs_section_ptr ((asection *) &bfd_abs_section)
++#define bfd_is_abs_section(sec) ((sec) == bfd_abs_section_ptr)
++/* Pointer to the undefined section. */
++extern asection bfd_und_section;
++#define bfd_und_section_ptr ((asection *) &bfd_und_section)
++#define bfd_is_und_section(sec) ((sec) == bfd_und_section_ptr)
++/* Pointer to the common section. */
++extern asection bfd_com_section;
++#define bfd_com_section_ptr ((asection *) &bfd_com_section)
++/* Pointer to the indirect section. */
++extern asection bfd_ind_section;
++#define bfd_ind_section_ptr ((asection *) &bfd_ind_section)
++#define bfd_is_ind_section(sec) ((sec) == bfd_ind_section_ptr)
++
++#define bfd_is_const_section(SEC) \
++ ( ((SEC) == bfd_abs_section_ptr) \
++ || ((SEC) == bfd_und_section_ptr) \
++ || ((SEC) == bfd_com_section_ptr) \
++ || ((SEC) == bfd_ind_section_ptr))
++
++extern const struct bfd_symbol * const bfd_abs_symbol;
++extern const struct bfd_symbol * const bfd_com_symbol;
++extern const struct bfd_symbol * const bfd_und_symbol;
++extern const struct bfd_symbol * const bfd_ind_symbol;
++
++/* Macros to handle insertion and deletion of a bfd's sections. These
++ only handle the list pointers, ie. do not adjust section_count,
++ target_index etc. */
++#define bfd_section_list_remove(ABFD, S) \
++ do \
++ { \
++ asection *_s = S; \
++ asection *_next = _s->next; \
++ asection *_prev = _s->prev; \
++ if (_prev) \
++ _prev->next = _next; \
++ else \
++ (ABFD)->sections = _next; \
++ if (_next) \
++ _next->prev = _prev; \
++ else \
++ (ABFD)->section_last = _prev; \
++ } \
++ while (0)
++#define bfd_section_list_append(ABFD, S) \
++ do \
++ { \
++ asection *_s = S; \
++ bfd *_abfd = ABFD; \
++ _s->next = NULL; \
++ if (_abfd->section_last) \
++ { \
++ _s->prev = _abfd->section_last; \
++ _abfd->section_last->next = _s; \
++ } \
++ else \
++ { \
++ _s->prev = NULL; \
++ _abfd->sections = _s; \
++ } \
++ _abfd->section_last = _s; \
++ } \
++ while (0)
++#define bfd_section_list_prepend(ABFD, S) \
++ do \
++ { \
++ asection *_s = S; \
++ bfd *_abfd = ABFD; \
++ _s->prev = NULL; \
++ if (_abfd->sections) \
++ { \
++ _s->next = _abfd->sections; \
++ _abfd->sections->prev = _s; \
++ } \
++ else \
++ { \
++ _s->next = NULL; \
++ _abfd->section_last = _s; \
++ } \
++ _abfd->sections = _s; \
++ } \
++ while (0)
++#define bfd_section_list_insert_after(ABFD, A, S) \
++ do \
++ { \
++ asection *_a = A; \
++ asection *_s = S; \
++ asection *_next = _a->next; \
++ _s->next = _next; \
++ _s->prev = _a; \
++ _a->next = _s; \
++ if (_next) \
++ _next->prev = _s; \
++ else \
++ (ABFD)->section_last = _s; \
++ } \
++ while (0)
++#define bfd_section_list_insert_before(ABFD, B, S) \
++ do \
++ { \
++ asection *_b = B; \
++ asection *_s = S; \
++ asection *_prev = _b->prev; \
++ _s->prev = _prev; \
++ _s->next = _b; \
++ _b->prev = _s; \
++ if (_prev) \
++ _prev->next = _s; \
++ else \
++ (ABFD)->sections = _s; \
++ } \
++ while (0)
++#define bfd_section_removed_from_list(ABFD, S) \
++ ((S)->next == NULL ? (ABFD)->section_last != (S) : (S)->next->prev != (S))
++
++void bfd_section_list_clear (bfd *);
++
++asection *bfd_get_section_by_name (bfd *abfd, const char *name);
++
++asection *bfd_get_section_by_name_if
++ (bfd *abfd,
++ const char *name,
++ bfd_boolean (*func) (bfd *abfd, asection *sect, void *obj),
++ void *obj);
++
++char *bfd_get_unique_section_name
++ (bfd *abfd, const char *templat, int *count);
++
++asection *bfd_make_section_old_way (bfd *abfd, const char *name);
++
++asection *bfd_make_section_anyway_with_flags
++ (bfd *abfd, const char *name, flagword flags);
++
++asection *bfd_make_section_anyway (bfd *abfd, const char *name);
++
++asection *bfd_make_section_with_flags
++ (bfd *, const char *name, flagword flags);
++
++asection *bfd_make_section (bfd *, const char *name);
++
++bfd_boolean bfd_set_section_flags
++ (bfd *abfd, asection *sec, flagword flags);
++
++void bfd_map_over_sections
++ (bfd *abfd,
++ void (*func) (bfd *abfd, asection *sect, void *obj),
++ void *obj);
++
++asection *bfd_sections_find_if
++ (bfd *abfd,
++ bfd_boolean (*operation) (bfd *abfd, asection *sect, void *obj),
++ void *obj);
++
++bfd_boolean bfd_set_section_size
++ (bfd *abfd, asection *sec, bfd_size_type val);
++
++bfd_boolean bfd_set_section_contents
++ (bfd *abfd, asection *section, const void *data,
++ file_ptr offset, bfd_size_type count);
++
++bfd_boolean bfd_get_section_contents
++ (bfd *abfd, asection *section, void *location, file_ptr offset,
++ bfd_size_type count);
++
++bfd_boolean bfd_malloc_and_get_section
++ (bfd *abfd, asection *section, bfd_byte **buf);
++
++bfd_boolean bfd_copy_private_section_data
++ (bfd *ibfd, asection *isec, bfd *obfd, asection *osec);
++
++#define bfd_copy_private_section_data(ibfd, isection, obfd, osection) \
++ BFD_SEND (obfd, _bfd_copy_private_section_data, \
++ (ibfd, isection, obfd, osection))
++bfd_boolean bfd_generic_is_group_section (bfd *, const asection *sec);
++
++bfd_boolean bfd_generic_discard_group (bfd *abfd, asection *group);
++
++/* Extracted from archures.c. */
++enum bfd_architecture
++{
++ bfd_arch_unknown, /* File arch not known. */
++ bfd_arch_obscure, /* Arch known, not one of these. */
++ bfd_arch_m68k, /* Motorola 68xxx */
++#define bfd_mach_m68000 1
++#define bfd_mach_m68008 2
++#define bfd_mach_m68010 3
++#define bfd_mach_m68020 4
++#define bfd_mach_m68030 5
++#define bfd_mach_m68040 6
++#define bfd_mach_m68060 7
++#define bfd_mach_cpu32 8
++#define bfd_mach_mcf5200 9
++#define bfd_mach_mcf5206e 10
++#define bfd_mach_mcf5307 11
++#define bfd_mach_mcf5407 12
++#define bfd_mach_mcf528x 13
++#define bfd_mach_mcfv4e 14
++#define bfd_mach_mcf521x 15
++#define bfd_mach_mcf5249 16
++#define bfd_mach_mcf547x 17
++#define bfd_mach_mcf548x 18
++ bfd_arch_vax, /* DEC Vax */
++ bfd_arch_i960, /* Intel 960 */
++ /* The order of the following is important.
++ lower number indicates a machine type that
++ only accepts a subset of the instructions
++ available to machines with higher numbers.
++ The exception is the "ca", which is
++ incompatible with all other machines except
++ "core". */
++
++#define bfd_mach_i960_core 1
++#define bfd_mach_i960_ka_sa 2
++#define bfd_mach_i960_kb_sb 3
++#define bfd_mach_i960_mc 4
++#define bfd_mach_i960_xa 5
++#define bfd_mach_i960_ca 6
++#define bfd_mach_i960_jx 7
++#define bfd_mach_i960_hx 8
++
++ bfd_arch_or32, /* OpenRISC 32 */
++
++ bfd_arch_a29k, /* AMD 29000 */
++ bfd_arch_sparc, /* SPARC */
++#define bfd_mach_sparc 1
++/* The difference between v8plus and v9 is that v9 is a true 64 bit env. */
++#define bfd_mach_sparc_sparclet 2
++#define bfd_mach_sparc_sparclite 3
++#define bfd_mach_sparc_v8plus 4
++#define bfd_mach_sparc_v8plusa 5 /* with ultrasparc add'ns. */
++#define bfd_mach_sparc_sparclite_le 6
++#define bfd_mach_sparc_v9 7
++#define bfd_mach_sparc_v9a 8 /* with ultrasparc add'ns. */
++#define bfd_mach_sparc_v8plusb 9 /* with cheetah add'ns. */
++#define bfd_mach_sparc_v9b 10 /* with cheetah add'ns. */
++/* Nonzero if MACH has the v9 instruction set. */
++#define bfd_mach_sparc_v9_p(mach) \
++ ((mach) >= bfd_mach_sparc_v8plus && (mach) <= bfd_mach_sparc_v9b \
++ && (mach) != bfd_mach_sparc_sparclite_le)
++/* Nonzero if MACH is a 64 bit sparc architecture. */
++#define bfd_mach_sparc_64bit_p(mach) \
++ ((mach) >= bfd_mach_sparc_v9 && (mach) != bfd_mach_sparc_v8plusb)
++ bfd_arch_mips, /* MIPS Rxxxx */
++#define bfd_mach_mips3000 3000
++#define bfd_mach_mips3900 3900
++#define bfd_mach_mips4000 4000
++#define bfd_mach_mips4010 4010
++#define bfd_mach_mips4100 4100
++#define bfd_mach_mips4111 4111
++#define bfd_mach_mips4120 4120
++#define bfd_mach_mips4300 4300
++#define bfd_mach_mips4400 4400
++#define bfd_mach_mips4600 4600
++#define bfd_mach_mips4650 4650
++#define bfd_mach_mips5000 5000
++#define bfd_mach_mips5400 5400
++#define bfd_mach_mips5500 5500
++#define bfd_mach_mips6000 6000
++#define bfd_mach_mips7000 7000
++#define bfd_mach_mips8000 8000
++#define bfd_mach_mips9000 9000
++#define bfd_mach_mips10000 10000
++#define bfd_mach_mips12000 12000
++#define bfd_mach_mips16 16
++#define bfd_mach_mips5 5
++#define bfd_mach_mips_sb1 12310201 /* octal 'SB', 01 */
++#define bfd_mach_mipsisa32 32
++#define bfd_mach_mipsisa32r2 33
++#define bfd_mach_mipsisa64 64
++#define bfd_mach_mipsisa64r2 65
++ bfd_arch_i386, /* Intel 386 */
++#define bfd_mach_i386_i386 1
++#define bfd_mach_i386_i8086 2
++#define bfd_mach_i386_i386_intel_syntax 3
++#define bfd_mach_x86_64 64
++#define bfd_mach_x86_64_intel_syntax 65
++ bfd_arch_we32k, /* AT&T WE32xxx */
++ bfd_arch_tahoe, /* CCI/Harris Tahoe */
++ bfd_arch_i860, /* Intel 860 */
++ bfd_arch_i370, /* IBM 360/370 Mainframes */
++ bfd_arch_romp, /* IBM ROMP PC/RT */
++ bfd_arch_alliant, /* Alliant */
++ bfd_arch_convex, /* Convex */
++ bfd_arch_m88k, /* Motorola 88xxx */
++ bfd_arch_m98k, /* Motorola 98xxx */
++ bfd_arch_pyramid, /* Pyramid Technology */
++ bfd_arch_h8300, /* Renesas H8/300 (formerly Hitachi H8/300) */
++#define bfd_mach_h8300 1
++#define bfd_mach_h8300h 2
++#define bfd_mach_h8300s 3
++#define bfd_mach_h8300hn 4
++#define bfd_mach_h8300sn 5
++#define bfd_mach_h8300sx 6
++#define bfd_mach_h8300sxn 7
++ bfd_arch_pdp11, /* DEC PDP-11 */
++ bfd_arch_powerpc, /* PowerPC */
++#define bfd_mach_ppc 32
++#define bfd_mach_ppc64 64
++#define bfd_mach_ppc_403 403
++#define bfd_mach_ppc_403gc 4030
++#define bfd_mach_ppc_505 505
++#define bfd_mach_ppc_601 601
++#define bfd_mach_ppc_602 602
++#define bfd_mach_ppc_603 603
++#define bfd_mach_ppc_ec603e 6031
++#define bfd_mach_ppc_604 604
++#define bfd_mach_ppc_620 620
++#define bfd_mach_ppc_630 630
++#define bfd_mach_ppc_750 750
++#define bfd_mach_ppc_860 860
++#define bfd_mach_ppc_a35 35
++#define bfd_mach_ppc_rs64ii 642
++#define bfd_mach_ppc_rs64iii 643
++#define bfd_mach_ppc_7400 7400
++#define bfd_mach_ppc_e500 500
++ bfd_arch_rs6000, /* IBM RS/6000 */
++#define bfd_mach_rs6k 6000
++#define bfd_mach_rs6k_rs1 6001
++#define bfd_mach_rs6k_rsc 6003
++#define bfd_mach_rs6k_rs2 6002
++ bfd_arch_hppa, /* HP PA RISC */
++#define bfd_mach_hppa10 10
++#define bfd_mach_hppa11 11
++#define bfd_mach_hppa20 20
++#define bfd_mach_hppa20w 25
++ bfd_arch_d10v, /* Mitsubishi D10V */
++#define bfd_mach_d10v 1
++#define bfd_mach_d10v_ts2 2
++#define bfd_mach_d10v_ts3 3
++ bfd_arch_d30v, /* Mitsubishi D30V */
++ bfd_arch_dlx, /* DLX */
++ bfd_arch_m68hc11, /* Motorola 68HC11 */
++ bfd_arch_m68hc12, /* Motorola 68HC12 */
++#define bfd_mach_m6812_default 0
++#define bfd_mach_m6812 1
++#define bfd_mach_m6812s 2
++ bfd_arch_z8k, /* Zilog Z8000 */
++#define bfd_mach_z8001 1
++#define bfd_mach_z8002 2
++ bfd_arch_h8500, /* Renesas H8/500 (formerly Hitachi H8/500) */
++ bfd_arch_sh, /* Renesas / SuperH SH (formerly Hitachi SH) */
++#define bfd_mach_sh 1
++#define bfd_mach_sh2 0x20
++#define bfd_mach_sh_dsp 0x2d
++#define bfd_mach_sh2a 0x2a
++#define bfd_mach_sh2a_nofpu 0x2b
++#define bfd_mach_sh2a_nofpu_or_sh4_nommu_nofpu 0x2a1
++#define bfd_mach_sh2a_nofpu_or_sh3_nommu 0x2a2
++#define bfd_mach_sh2a_or_sh4 0x2a3
++#define bfd_mach_sh2a_or_sh3e 0x2a4
++#define bfd_mach_sh2e 0x2e
++#define bfd_mach_sh3 0x30
++#define bfd_mach_sh3_nommu 0x31
++#define bfd_mach_sh3_dsp 0x3d
++#define bfd_mach_sh3e 0x3e
++#define bfd_mach_sh4 0x40
++#define bfd_mach_sh4_nofpu 0x41
++#define bfd_mach_sh4_nommu_nofpu 0x42
++#define bfd_mach_sh4a 0x4a
++#define bfd_mach_sh4a_nofpu 0x4b
++#define bfd_mach_sh4al_dsp 0x4d
++#define bfd_mach_sh5 0x50
++ bfd_arch_alpha, /* Dec Alpha */
++#define bfd_mach_alpha_ev4 0x10
++#define bfd_mach_alpha_ev5 0x20
++#define bfd_mach_alpha_ev6 0x30
++ bfd_arch_arm, /* Advanced Risc Machines ARM. */
++#define bfd_mach_arm_unknown 0
++#define bfd_mach_arm_2 1
++#define bfd_mach_arm_2a 2
++#define bfd_mach_arm_3 3
++#define bfd_mach_arm_3M 4
++#define bfd_mach_arm_4 5
++#define bfd_mach_arm_4T 6
++#define bfd_mach_arm_5 7
++#define bfd_mach_arm_5T 8
++#define bfd_mach_arm_5TE 9
++#define bfd_mach_arm_XScale 10
++#define bfd_mach_arm_ep9312 11
++#define bfd_mach_arm_iWMMXt 12
++ bfd_arch_ns32k, /* National Semiconductors ns32000 */
++ bfd_arch_w65, /* WDC 65816 */
++ bfd_arch_tic30, /* Texas Instruments TMS320C30 */
++ bfd_arch_tic4x, /* Texas Instruments TMS320C3X/4X */
++#define bfd_mach_tic3x 30
++#define bfd_mach_tic4x 40
++ bfd_arch_tic54x, /* Texas Instruments TMS320C54X */
++ bfd_arch_tic80, /* TI TMS320c80 (MVP) */
++ bfd_arch_v850, /* NEC V850 */
++#define bfd_mach_v850 1
++#define bfd_mach_v850e 'E'
++#define bfd_mach_v850e1 '1'
++ bfd_arch_arc, /* ARC Cores */
++#define bfd_mach_arc_5 5
++#define bfd_mach_arc_6 6
++#define bfd_mach_arc_7 7
++#define bfd_mach_arc_8 8
++ bfd_arch_m32c, /* Renesas M16C/M32C. */
++#define bfd_mach_m16c 0x75
++#define bfd_mach_m32c 0x78
++ bfd_arch_m32r, /* Renesas M32R (formerly Mitsubishi M32R/D) */
++#define bfd_mach_m32r 1 /* For backwards compatibility. */
++#define bfd_mach_m32rx 'x'
++#define bfd_mach_m32r2 '2'
++ bfd_arch_mn10200, /* Matsushita MN10200 */
++ bfd_arch_mn10300, /* Matsushita MN10300 */
++#define bfd_mach_mn10300 300
++#define bfd_mach_am33 330
++#define bfd_mach_am33_2 332
++ bfd_arch_fr30,
++#define bfd_mach_fr30 0x46523330
++ bfd_arch_frv,
++#define bfd_mach_frv 1
++#define bfd_mach_frvsimple 2
++#define bfd_mach_fr300 300
++#define bfd_mach_fr400 400
++#define bfd_mach_fr450 450
++#define bfd_mach_frvtomcat 499 /* fr500 prototype */
++#define bfd_mach_fr500 500
++#define bfd_mach_fr550 550
++ bfd_arch_mcore,
++ bfd_arch_ia64, /* HP/Intel ia64 */
++#define bfd_mach_ia64_elf64 64
++#define bfd_mach_ia64_elf32 32
++ bfd_arch_ip2k, /* Ubicom IP2K microcontrollers. */
++#define bfd_mach_ip2022 1
++#define bfd_mach_ip2022ext 2
++ bfd_arch_iq2000, /* Vitesse IQ2000. */
++#define bfd_mach_iq2000 1
++#define bfd_mach_iq10 2
++ bfd_arch_ms1,
++#define bfd_mach_ms1 1
++#define bfd_mach_mrisc2 2
++ bfd_arch_pj,
++ bfd_arch_avr, /* Atmel AVR microcontrollers. */
++#define bfd_mach_avr1 1
++#define bfd_mach_avr2 2
++#define bfd_mach_avr3 3
++#define bfd_mach_avr4 4
++#define bfd_mach_avr5 5
++ bfd_arch_cr16c, /* National Semiconductor CompactRISC. */
++#define bfd_mach_cr16c 1
++ bfd_arch_crx, /* National Semiconductor CRX. */
++#define bfd_mach_crx 1
++ bfd_arch_cris, /* Axis CRIS */
++#define bfd_mach_cris_v0_v10 255
++#define bfd_mach_cris_v32 32
++#define bfd_mach_cris_v10_v32 1032
++ bfd_arch_s390, /* IBM s390 */
++#define bfd_mach_s390_31 31
++#define bfd_mach_s390_64 64
++ bfd_arch_openrisc, /* OpenRISC */
++ bfd_arch_mmix, /* Donald Knuth's educational processor. */
++ bfd_arch_xstormy16,
++#define bfd_mach_xstormy16 1
++ bfd_arch_msp430, /* Texas Instruments MSP430 architecture. */
++#define bfd_mach_msp11 11
++#define bfd_mach_msp110 110
++#define bfd_mach_msp12 12
++#define bfd_mach_msp13 13
++#define bfd_mach_msp14 14
++#define bfd_mach_msp15 15
++#define bfd_mach_msp16 16
++#define bfd_mach_msp31 31
++#define bfd_mach_msp32 32
++#define bfd_mach_msp33 33
++#define bfd_mach_msp41 41
++#define bfd_mach_msp42 42
++#define bfd_mach_msp43 43
++#define bfd_mach_msp44 44
++ bfd_arch_xtensa, /* Tensilica's Xtensa cores. */
++#define bfd_mach_xtensa 1
++ bfd_arch_maxq, /* Dallas MAXQ 10/20 */
++#define bfd_mach_maxq10 10
++#define bfd_mach_maxq20 20
++ bfd_arch_last
++ };
++
++typedef struct bfd_arch_info
++{
++ int bits_per_word;
++ int bits_per_address;
++ int bits_per_byte;
++ enum bfd_architecture arch;
++ unsigned long mach;
++ const char *arch_name;
++ const char *printable_name;
++ unsigned int section_align_power;
++ /* TRUE if this is the default machine for the architecture.
++ The default arch should be the first entry for an arch so that
++ all the entries for that arch can be accessed via <<next>>. */
++ bfd_boolean the_default;
++ const struct bfd_arch_info * (*compatible)
++ (const struct bfd_arch_info *a, const struct bfd_arch_info *b);
++
++ bfd_boolean (*scan) (const struct bfd_arch_info *, const char *);
++
++ const struct bfd_arch_info *next;
++}
++bfd_arch_info_type;
++
++const char *bfd_printable_name (bfd *abfd);
++
++const bfd_arch_info_type *bfd_scan_arch (const char *string);
++
++const char **bfd_arch_list (void);
++
++const bfd_arch_info_type *bfd_arch_get_compatible
++ (const bfd *abfd, const bfd *bbfd, bfd_boolean accept_unknowns);
++
++void bfd_set_arch_info (bfd *abfd, const bfd_arch_info_type *arg);
++
++enum bfd_architecture bfd_get_arch (bfd *abfd);
++
++unsigned long bfd_get_mach (bfd *abfd);
++
++unsigned int bfd_arch_bits_per_byte (bfd *abfd);
++
++unsigned int bfd_arch_bits_per_address (bfd *abfd);
++
++const bfd_arch_info_type *bfd_get_arch_info (bfd *abfd);
++
++const bfd_arch_info_type *bfd_lookup_arch
++ (enum bfd_architecture arch, unsigned long machine);
++
++const char *bfd_printable_arch_mach
++ (enum bfd_architecture arch, unsigned long machine);
++
++unsigned int bfd_octets_per_byte (bfd *abfd);
++
++unsigned int bfd_arch_mach_octets_per_byte
++ (enum bfd_architecture arch, unsigned long machine);
++
++/* Extracted from reloc.c. */
++typedef enum bfd_reloc_status
++{
++ /* No errors detected. */
++ bfd_reloc_ok,
++
++ /* The relocation was performed, but there was an overflow. */
++ bfd_reloc_overflow,
++
++ /* The address to relocate was not within the section supplied. */
++ bfd_reloc_outofrange,
++
++ /* Used by special functions. */
++ bfd_reloc_continue,
++
++ /* Unsupported relocation size requested. */
++ bfd_reloc_notsupported,
++
++ /* Unused. */
++ bfd_reloc_other,
++
++ /* The symbol to relocate against was undefined. */
++ bfd_reloc_undefined,
++
++ /* The relocation was performed, but may not be ok - presently
++ generated only when linking i960 coff files with i960 b.out
++ symbols. If this type is returned, the error_message argument
++ to bfd_perform_relocation will be set. */
++ bfd_reloc_dangerous
++ }
++ bfd_reloc_status_type;
++
++
++typedef struct reloc_cache_entry
++{
++ /* A pointer into the canonical table of pointers. */
++ struct bfd_symbol **sym_ptr_ptr;
++
++ /* offset in section. */
++ bfd_size_type address;
++
++ /* addend for relocation value. */
++ bfd_vma addend;
++
++ /* Pointer to how to perform the required relocation. */
++ reloc_howto_type *howto;
++
++}
++arelent;
++
++enum complain_overflow
++{
++ /* Do not complain on overflow. */
++ complain_overflow_dont,
++
++ /* Complain if the bitfield overflows, whether it is considered
++ as signed or unsigned. */
++ complain_overflow_bitfield,
++
++ /* Complain if the value overflows when considered as signed
++ number. */
++ complain_overflow_signed,
++
++ /* Complain if the value overflows when considered as an
++ unsigned number. */
++ complain_overflow_unsigned
++};
++
++struct reloc_howto_struct
++{
++ /* The type field has mainly a documentary use - the back end can
++ do what it wants with it, though normally the back end's
++ external idea of what a reloc number is stored
++ in this field. For example, a PC relative word relocation
++ in a coff environment has the type 023 - because that's
++ what the outside world calls a R_PCRWORD reloc. */
++ unsigned int type;
++
++ /* The value the final relocation is shifted right by. This drops
++ unwanted data from the relocation. */
++ unsigned int rightshift;
++
++ /* The size of the item to be relocated. This is *not* a
++ power-of-two measure. To get the number of bytes operated
++ on by a type of relocation, use bfd_get_reloc_size. */
++ int size;
++
++ /* The number of bits in the item to be relocated. This is used
++ when doing overflow checking. */
++ unsigned int bitsize;
++
++ /* Notes that the relocation is relative to the location in the
++ data section of the addend. The relocation function will
++ subtract from the relocation value the address of the location
++ being relocated. */
++ bfd_boolean pc_relative;
++
++ /* The bit position of the reloc value in the destination.
++ The relocated value is left shifted by this amount. */
++ unsigned int bitpos;
++
++ /* What type of overflow error should be checked for when
++ relocating. */
++ enum complain_overflow complain_on_overflow;
++
++ /* If this field is non null, then the supplied function is
++ called rather than the normal function. This allows really
++ strange relocation methods to be accommodated (e.g., i960 callj
++ instructions). */
++ bfd_reloc_status_type (*special_function)
++ (bfd *, arelent *, struct bfd_symbol *, void *, asection *,
++ bfd *, char **);
++
++ /* The textual name of the relocation type. */
++ char *name;
++
++ /* Some formats record a relocation addend in the section contents
++ rather than with the relocation. For ELF formats this is the
++ distinction between USE_REL and USE_RELA (though the code checks
++ for USE_REL == 1/0). The value of this field is TRUE if the
++ addend is recorded with the section contents; when performing a
++ partial link (ld -r) the section contents (the data) will be
++ modified. The value of this field is FALSE if addends are
++ recorded with the relocation (in arelent.addend); when performing
++ a partial link the relocation will be modified.
++ All relocations for all ELF USE_RELA targets should set this field
++ to FALSE (values of TRUE should be looked on with suspicion).
++ However, the converse is not true: not all relocations of all ELF
++ USE_REL targets set this field to TRUE. Why this is so is peculiar
++ to each particular target. For relocs that aren't used in partial
++ links (e.g. GOT stuff) it doesn't matter what this is set to. */
++ bfd_boolean partial_inplace;
++
++ /* src_mask selects the part of the instruction (or data) to be used
++ in the relocation sum. If the target relocations don't have an
++ addend in the reloc, eg. ELF USE_REL, src_mask will normally equal
++ dst_mask to extract the addend from the section contents. If
++ relocations do have an addend in the reloc, eg. ELF USE_RELA, this
++ field should be zero. Non-zero values for ELF USE_RELA targets are
++ bogus as in those cases the value in the dst_mask part of the
++ section contents should be treated as garbage. */
++ bfd_vma src_mask;
++
++ /* dst_mask selects which parts of the instruction (or data) are
++ replaced with a relocated value. */
++ bfd_vma dst_mask;
++
++ /* When some formats create PC relative instructions, they leave
++ the value of the pc of the place being relocated in the offset
++ slot of the instruction, so that a PC relative relocation can
++ be made just by adding in an ordinary offset (e.g., sun3 a.out).
++ Some formats leave the displacement part of an instruction
++ empty (e.g., m88k bcs); this flag signals the fact. */
++ bfd_boolean pcrel_offset;
++};
++
++#define HOWTO(C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC) \
++ { (unsigned) C, R, S, B, P, BI, O, SF, NAME, INPLACE, MASKSRC, MASKDST, PC }
++#define NEWHOWTO(FUNCTION, NAME, SIZE, REL, IN) \
++ HOWTO (0, 0, SIZE, 0, REL, 0, complain_overflow_dont, FUNCTION, \
++ NAME, FALSE, 0, 0, IN)
++
++#define EMPTY_HOWTO(C) \
++ HOWTO ((C), 0, 0, 0, FALSE, 0, complain_overflow_dont, NULL, \
++ NULL, FALSE, 0, 0, FALSE)
++
++#define HOWTO_PREPARE(relocation, symbol) \
++ { \
++ if (symbol != NULL) \
++ { \
++ if (bfd_is_com_section (symbol->section)) \
++ { \
++ relocation = 0; \
++ } \
++ else \
++ { \
++ relocation = symbol->value; \
++ } \
++ } \
++ }
++
++unsigned int bfd_get_reloc_size (reloc_howto_type *);
++
++typedef struct relent_chain
++{
++ arelent relent;
++ struct relent_chain *next;
++}
++arelent_chain;
++
++bfd_reloc_status_type bfd_check_overflow
++ (enum complain_overflow how,
++ unsigned int bitsize,
++ unsigned int rightshift,
++ unsigned int addrsize,
++ bfd_vma relocation);
++
++bfd_reloc_status_type bfd_perform_relocation
++ (bfd *abfd,
++ arelent *reloc_entry,
++ void *data,
++ asection *input_section,
++ bfd *output_bfd,
++ char **error_message);
++
++bfd_reloc_status_type bfd_install_relocation
++ (bfd *abfd,
++ arelent *reloc_entry,
++ void *data, bfd_vma data_start,
++ asection *input_section,
++ char **error_message);
++
++enum bfd_reloc_code_real {
++ _dummy_first_bfd_reloc_code_real,
++
++
++/* Basic absolute relocations of N bits. */
++ BFD_RELOC_64,
++ BFD_RELOC_32,
++ BFD_RELOC_26,
++ BFD_RELOC_24,
++ BFD_RELOC_16,
++ BFD_RELOC_14,
++ BFD_RELOC_8,
++
++/* PC-relative relocations. Sometimes these are relative to the address
++of the relocation itself; sometimes they are relative to the start of
++the section containing the relocation. It depends on the specific target.
++
++The 24-bit relocation is used in some Intel 960 configurations. */
++ BFD_RELOC_64_PCREL,
++ BFD_RELOC_32_PCREL,
++ BFD_RELOC_24_PCREL,
++ BFD_RELOC_16_PCREL,
++ BFD_RELOC_12_PCREL,
++ BFD_RELOC_8_PCREL,
++
++/* Section relative relocations. Some targets need this for DWARF2. */
++ BFD_RELOC_32_SECREL,
++
++/* For ELF. */
++ BFD_RELOC_32_GOT_PCREL,
++ BFD_RELOC_16_GOT_PCREL,
++ BFD_RELOC_8_GOT_PCREL,
++ BFD_RELOC_32_GOTOFF,
++ BFD_RELOC_16_GOTOFF,
++ BFD_RELOC_LO16_GOTOFF,
++ BFD_RELOC_HI16_GOTOFF,
++ BFD_RELOC_HI16_S_GOTOFF,
++ BFD_RELOC_8_GOTOFF,
++ BFD_RELOC_64_PLT_PCREL,
++ BFD_RELOC_32_PLT_PCREL,
++ BFD_RELOC_24_PLT_PCREL,
++ BFD_RELOC_16_PLT_PCREL,
++ BFD_RELOC_8_PLT_PCREL,
++ BFD_RELOC_64_PLTOFF,
++ BFD_RELOC_32_PLTOFF,
++ BFD_RELOC_16_PLTOFF,
++ BFD_RELOC_LO16_PLTOFF,
++ BFD_RELOC_HI16_PLTOFF,
++ BFD_RELOC_HI16_S_PLTOFF,
++ BFD_RELOC_8_PLTOFF,
++
++/* Relocations used by 68K ELF. */
++ BFD_RELOC_68K_GLOB_DAT,
++ BFD_RELOC_68K_JMP_SLOT,
++ BFD_RELOC_68K_RELATIVE,
++
++/* Linkage-table relative. */
++ BFD_RELOC_32_BASEREL,
++ BFD_RELOC_16_BASEREL,
++ BFD_RELOC_LO16_BASEREL,
++ BFD_RELOC_HI16_BASEREL,
++ BFD_RELOC_HI16_S_BASEREL,
++ BFD_RELOC_8_BASEREL,
++ BFD_RELOC_RVA,
++
++/* Absolute 8-bit relocation, but used to form an address like 0xFFnn. */
++ BFD_RELOC_8_FFnn,
++
++/* These PC-relative relocations are stored as word displacements --
++i.e., byte displacements shifted right two bits. The 30-bit word
++displacement (<<32_PCREL_S2>> -- 32 bits, shifted 2) is used on the
++SPARC. (SPARC tools generally refer to this as <<WDISP30>>.) The
++signed 16-bit displacement is used on the MIPS, and the 23-bit
++displacement is used on the Alpha. */
++ BFD_RELOC_32_PCREL_S2,
++ BFD_RELOC_16_PCREL_S2,
++ BFD_RELOC_23_PCREL_S2,
++
++/* High 22 bits and low 10 bits of 32-bit value, placed into lower bits of
++the target word. These are used on the SPARC. */
++ BFD_RELOC_HI22,
++ BFD_RELOC_LO10,
++
++/* For systems that allocate a Global Pointer register, these are
++displacements off that register. These relocation types are
++handled specially, because the value the register will have is
++decided relatively late. */
++ BFD_RELOC_GPREL16,
++ BFD_RELOC_GPREL32,
++
++/* Reloc types used for i960/b.out. */
++ BFD_RELOC_I960_CALLJ,
++
++/* SPARC ELF relocations. There is probably some overlap with other
++relocation types already defined. */
++ BFD_RELOC_NONE,
++ BFD_RELOC_SPARC_WDISP22,
++ BFD_RELOC_SPARC22,
++ BFD_RELOC_SPARC13,
++ BFD_RELOC_SPARC_GOT10,
++ BFD_RELOC_SPARC_GOT13,
++ BFD_RELOC_SPARC_GOT22,
++ BFD_RELOC_SPARC_PC10,
++ BFD_RELOC_SPARC_PC22,
++ BFD_RELOC_SPARC_WPLT30,
++ BFD_RELOC_SPARC_COPY,
++ BFD_RELOC_SPARC_GLOB_DAT,
++ BFD_RELOC_SPARC_JMP_SLOT,
++ BFD_RELOC_SPARC_RELATIVE,
++ BFD_RELOC_SPARC_UA16,
++ BFD_RELOC_SPARC_UA32,
++ BFD_RELOC_SPARC_UA64,
++
++/* I think these are specific to SPARC a.out (e.g., Sun 4). */
++ BFD_RELOC_SPARC_BASE13,
++ BFD_RELOC_SPARC_BASE22,
++
++/* SPARC64 relocations */
++#define BFD_RELOC_SPARC_64 BFD_RELOC_64
++ BFD_RELOC_SPARC_10,
++ BFD_RELOC_SPARC_11,
++ BFD_RELOC_SPARC_OLO10,
++ BFD_RELOC_SPARC_HH22,
++ BFD_RELOC_SPARC_HM10,
++ BFD_RELOC_SPARC_LM22,
++ BFD_RELOC_SPARC_PC_HH22,
++ BFD_RELOC_SPARC_PC_HM10,
++ BFD_RELOC_SPARC_PC_LM22,
++ BFD_RELOC_SPARC_WDISP16,
++ BFD_RELOC_SPARC_WDISP19,
++ BFD_RELOC_SPARC_7,
++ BFD_RELOC_SPARC_6,
++ BFD_RELOC_SPARC_5,
++#define BFD_RELOC_SPARC_DISP64 BFD_RELOC_64_PCREL
++ BFD_RELOC_SPARC_PLT32,
++ BFD_RELOC_SPARC_PLT64,
++ BFD_RELOC_SPARC_HIX22,
++ BFD_RELOC_SPARC_LOX10,
++ BFD_RELOC_SPARC_H44,
++ BFD_RELOC_SPARC_M44,
++ BFD_RELOC_SPARC_L44,
++ BFD_RELOC_SPARC_REGISTER,
++
++/* SPARC little endian relocation */
++ BFD_RELOC_SPARC_REV32,
++
++/* SPARC TLS relocations */
++ BFD_RELOC_SPARC_TLS_GD_HI22,
++ BFD_RELOC_SPARC_TLS_GD_LO10,
++ BFD_RELOC_SPARC_TLS_GD_ADD,
++ BFD_RELOC_SPARC_TLS_GD_CALL,
++ BFD_RELOC_SPARC_TLS_LDM_HI22,
++ BFD_RELOC_SPARC_TLS_LDM_LO10,
++ BFD_RELOC_SPARC_TLS_LDM_ADD,
++ BFD_RELOC_SPARC_TLS_LDM_CALL,
++ BFD_RELOC_SPARC_TLS_LDO_HIX22,
++ BFD_RELOC_SPARC_TLS_LDO_LOX10,
++ BFD_RELOC_SPARC_TLS_LDO_ADD,
++ BFD_RELOC_SPARC_TLS_IE_HI22,
++ BFD_RELOC_SPARC_TLS_IE_LO10,
++ BFD_RELOC_SPARC_TLS_IE_LD,
++ BFD_RELOC_SPARC_TLS_IE_LDX,
++ BFD_RELOC_SPARC_TLS_IE_ADD,
++ BFD_RELOC_SPARC_TLS_LE_HIX22,
++ BFD_RELOC_SPARC_TLS_LE_LOX10,
++ BFD_RELOC_SPARC_TLS_DTPMOD32,
++ BFD_RELOC_SPARC_TLS_DTPMOD64,
++ BFD_RELOC_SPARC_TLS_DTPOFF32,
++ BFD_RELOC_SPARC_TLS_DTPOFF64,
++ BFD_RELOC_SPARC_TLS_TPOFF32,
++ BFD_RELOC_SPARC_TLS_TPOFF64,
++
++/* Alpha ECOFF and ELF relocations. Some of these treat the symbol or
++"addend" in some special way.
++For GPDISP_HI16 ("gpdisp") relocations, the symbol is ignored when
++writing; when reading, it will be the absolute section symbol. The
++addend is the displacement in bytes of the "lda" instruction from
++the "ldah" instruction (which is at the address of this reloc). */
++ BFD_RELOC_ALPHA_GPDISP_HI16,
++
++/* For GPDISP_LO16 ("ignore") relocations, the symbol is handled as
++with GPDISP_HI16 relocs. The addend is ignored when writing the
++relocations out, and is filled in with the file's GP value on
++reading, for convenience. */
++ BFD_RELOC_ALPHA_GPDISP_LO16,
++
++/* The ELF GPDISP relocation is exactly the same as the GPDISP_HI16
++relocation except that there is no accompanying GPDISP_LO16
++relocation. */
++ BFD_RELOC_ALPHA_GPDISP,
++
++/* The Alpha LITERAL/LITUSE relocs are produced by a symbol reference;
++the assembler turns it into a LDQ instruction to load the address of
++the symbol, and then fills in a register in the real instruction.
++
++The LITERAL reloc, at the LDQ instruction, refers to the .lita
++section symbol. The addend is ignored when writing, but is filled
++in with the file's GP value on reading, for convenience, as with the
++GPDISP_LO16 reloc.
++
++The ELF_LITERAL reloc is somewhere between 16_GOTOFF and GPDISP_LO16.
++It should refer to the symbol to be referenced, as with 16_GOTOFF,
++but it generates output not based on the position within the .got
++section, but relative to the GP value chosen for the file during the
++final link stage.
++
++The LITUSE reloc, on the instruction using the loaded address, gives
++information to the linker that it might be able to use to optimize
++away some literal section references. The symbol is ignored (read
++as the absolute section symbol), and the "addend" indicates the type
++of instruction using the register:
++1 - "memory" fmt insn
++2 - byte-manipulation (byte offset reg)
++3 - jsr (target of branch) */
++ BFD_RELOC_ALPHA_LITERAL,
++ BFD_RELOC_ALPHA_ELF_LITERAL,
++ BFD_RELOC_ALPHA_LITUSE,
++
++/* The HINT relocation indicates a value that should be filled into the
++"hint" field of a jmp/jsr/ret instruction, for possible branch-
++prediction logic which may be provided on some processors. */
++ BFD_RELOC_ALPHA_HINT,
++
++/* The LINKAGE relocation outputs a linkage pair in the object file,
++which is filled by the linker. */
++ BFD_RELOC_ALPHA_LINKAGE,
++
++/* The CODEADDR relocation outputs a STO_CA in the object file,
++which is filled by the linker. */
++ BFD_RELOC_ALPHA_CODEADDR,
++
++/* The GPREL_HI/LO relocations together form a 32-bit offset from the
++GP register. */
++ BFD_RELOC_ALPHA_GPREL_HI16,
++ BFD_RELOC_ALPHA_GPREL_LO16,
++
++/* Like BFD_RELOC_23_PCREL_S2, except that the source and target must
++share a common GP, and the target address is adjusted for
++STO_ALPHA_STD_GPLOAD. */
++ BFD_RELOC_ALPHA_BRSGP,
++
++/* Alpha thread-local storage relocations. */
++ BFD_RELOC_ALPHA_TLSGD,
++ BFD_RELOC_ALPHA_TLSLDM,
++ BFD_RELOC_ALPHA_DTPMOD64,
++ BFD_RELOC_ALPHA_GOTDTPREL16,
++ BFD_RELOC_ALPHA_DTPREL64,
++ BFD_RELOC_ALPHA_DTPREL_HI16,
++ BFD_RELOC_ALPHA_DTPREL_LO16,
++ BFD_RELOC_ALPHA_DTPREL16,
++ BFD_RELOC_ALPHA_GOTTPREL16,
++ BFD_RELOC_ALPHA_TPREL64,
++ BFD_RELOC_ALPHA_TPREL_HI16,
++ BFD_RELOC_ALPHA_TPREL_LO16,
++ BFD_RELOC_ALPHA_TPREL16,
++
++/* Bits 27..2 of the relocation address shifted right 2 bits;
++simple reloc otherwise. */
++ BFD_RELOC_MIPS_JMP,
++
++/* The MIPS16 jump instruction. */
++ BFD_RELOC_MIPS16_JMP,
++
++/* MIPS16 GP relative reloc. */
++ BFD_RELOC_MIPS16_GPREL,
++
++/* High 16 bits of 32-bit value; simple reloc. */
++ BFD_RELOC_HI16,
++
++/* High 16 bits of 32-bit value but the low 16 bits will be sign
++extended and added to form the final result. If the low 16
++bits form a negative number, we need to add one to the high value
++to compensate for the borrow when the low bits are added. */
++ BFD_RELOC_HI16_S,
++
++/* Low 16 bits. */
++ BFD_RELOC_LO16,
++
++/* High 16 bits of 32-bit pc-relative value */
++ BFD_RELOC_HI16_PCREL,
++
++/* High 16 bits of 32-bit pc-relative value, adjusted */
++ BFD_RELOC_HI16_S_PCREL,
++
++/* Low 16 bits of pc-relative value */
++ BFD_RELOC_LO16_PCREL,
++
++/* MIPS16 high 16 bits of 32-bit value. */
++ BFD_RELOC_MIPS16_HI16,
++
++/* MIPS16 high 16 bits of 32-bit value but the low 16 bits will be sign
++extended and added to form the final result. If the low 16
++bits form a negative number, we need to add one to the high value
++to compensate for the borrow when the low bits are added. */
++ BFD_RELOC_MIPS16_HI16_S,
++
++/* MIPS16 low 16 bits. */
++ BFD_RELOC_MIPS16_LO16,
++
++/* Relocation against a MIPS literal section. */
++ BFD_RELOC_MIPS_LITERAL,
++
++/* MIPS ELF relocations. */
++ BFD_RELOC_MIPS_GOT16,
++ BFD_RELOC_MIPS_CALL16,
++ BFD_RELOC_MIPS_GOT_HI16,
++ BFD_RELOC_MIPS_GOT_LO16,
++ BFD_RELOC_MIPS_CALL_HI16,
++ BFD_RELOC_MIPS_CALL_LO16,
++ BFD_RELOC_MIPS_SUB,
++ BFD_RELOC_MIPS_GOT_PAGE,
++ BFD_RELOC_MIPS_GOT_OFST,
++ BFD_RELOC_MIPS_GOT_DISP,
++ BFD_RELOC_MIPS_SHIFT5,
++ BFD_RELOC_MIPS_SHIFT6,
++ BFD_RELOC_MIPS_INSERT_A,
++ BFD_RELOC_MIPS_INSERT_B,
++ BFD_RELOC_MIPS_DELETE,
++ BFD_RELOC_MIPS_HIGHEST,
++ BFD_RELOC_MIPS_HIGHER,
++ BFD_RELOC_MIPS_SCN_DISP,
++ BFD_RELOC_MIPS_REL16,
++ BFD_RELOC_MIPS_RELGOT,
++ BFD_RELOC_MIPS_JALR,
++ BFD_RELOC_MIPS_TLS_DTPMOD32,
++ BFD_RELOC_MIPS_TLS_DTPREL32,
++ BFD_RELOC_MIPS_TLS_DTPMOD64,
++ BFD_RELOC_MIPS_TLS_DTPREL64,
++ BFD_RELOC_MIPS_TLS_GD,
++ BFD_RELOC_MIPS_TLS_LDM,
++ BFD_RELOC_MIPS_TLS_DTPREL_HI16,
++ BFD_RELOC_MIPS_TLS_DTPREL_LO16,
++ BFD_RELOC_MIPS_TLS_GOTTPREL,
++ BFD_RELOC_MIPS_TLS_TPREL32,
++ BFD_RELOC_MIPS_TLS_TPREL64,
++ BFD_RELOC_MIPS_TLS_TPREL_HI16,
++ BFD_RELOC_MIPS_TLS_TPREL_LO16,
++
++
++/* Fujitsu Frv Relocations. */
++ BFD_RELOC_FRV_LABEL16,
++ BFD_RELOC_FRV_LABEL24,
++ BFD_RELOC_FRV_LO16,
++ BFD_RELOC_FRV_HI16,
++ BFD_RELOC_FRV_GPREL12,
++ BFD_RELOC_FRV_GPRELU12,
++ BFD_RELOC_FRV_GPREL32,
++ BFD_RELOC_FRV_GPRELHI,
++ BFD_RELOC_FRV_GPRELLO,
++ BFD_RELOC_FRV_GOT12,
++ BFD_RELOC_FRV_GOTHI,
++ BFD_RELOC_FRV_GOTLO,
++ BFD_RELOC_FRV_FUNCDESC,
++ BFD_RELOC_FRV_FUNCDESC_GOT12,
++ BFD_RELOC_FRV_FUNCDESC_GOTHI,
++ BFD_RELOC_FRV_FUNCDESC_GOTLO,
++ BFD_RELOC_FRV_FUNCDESC_VALUE,
++ BFD_RELOC_FRV_FUNCDESC_GOTOFF12,
++ BFD_RELOC_FRV_FUNCDESC_GOTOFFHI,
++ BFD_RELOC_FRV_FUNCDESC_GOTOFFLO,
++ BFD_RELOC_FRV_GOTOFF12,
++ BFD_RELOC_FRV_GOTOFFHI,
++ BFD_RELOC_FRV_GOTOFFLO,
++ BFD_RELOC_FRV_GETTLSOFF,
++ BFD_RELOC_FRV_TLSDESC_VALUE,
++ BFD_RELOC_FRV_GOTTLSDESC12,
++ BFD_RELOC_FRV_GOTTLSDESCHI,
++ BFD_RELOC_FRV_GOTTLSDESCLO,
++ BFD_RELOC_FRV_TLSMOFF12,
++ BFD_RELOC_FRV_TLSMOFFHI,
++ BFD_RELOC_FRV_TLSMOFFLO,
++ BFD_RELOC_FRV_GOTTLSOFF12,
++ BFD_RELOC_FRV_GOTTLSOFFHI,
++ BFD_RELOC_FRV_GOTTLSOFFLO,
++ BFD_RELOC_FRV_TLSOFF,
++ BFD_RELOC_FRV_TLSDESC_RELAX,
++ BFD_RELOC_FRV_GETTLSOFF_RELAX,
++ BFD_RELOC_FRV_TLSOFF_RELAX,
++ BFD_RELOC_FRV_TLSMOFF,
++
++
++/* This is a 24bit GOT-relative reloc for the mn10300. */
++ BFD_RELOC_MN10300_GOTOFF24,
++
++/* This is a 32bit GOT-relative reloc for the mn10300, offset by two bytes
++in the instruction. */
++ BFD_RELOC_MN10300_GOT32,
++
++/* This is a 24bit GOT-relative reloc for the mn10300, offset by two bytes
++in the instruction. */
++ BFD_RELOC_MN10300_GOT24,
++
++/* This is a 16bit GOT-relative reloc for the mn10300, offset by two bytes
++in the instruction. */
++ BFD_RELOC_MN10300_GOT16,
++
++/* Copy symbol at runtime. */
++ BFD_RELOC_MN10300_COPY,
++
++/* Create GOT entry. */
++ BFD_RELOC_MN10300_GLOB_DAT,
++
++/* Create PLT entry. */
++ BFD_RELOC_MN10300_JMP_SLOT,
++
++/* Adjust by program base. */
++ BFD_RELOC_MN10300_RELATIVE,
++
++
++/* i386/elf relocations */
++ BFD_RELOC_386_GOT32,
++ BFD_RELOC_386_PLT32,
++ BFD_RELOC_386_COPY,
++ BFD_RELOC_386_GLOB_DAT,
++ BFD_RELOC_386_JUMP_SLOT,
++ BFD_RELOC_386_RELATIVE,
++ BFD_RELOC_386_GOTOFF,
++ BFD_RELOC_386_GOTPC,
++ BFD_RELOC_386_TLS_TPOFF,
++ BFD_RELOC_386_TLS_IE,
++ BFD_RELOC_386_TLS_GOTIE,
++ BFD_RELOC_386_TLS_LE,
++ BFD_RELOC_386_TLS_GD,
++ BFD_RELOC_386_TLS_LDM,
++ BFD_RELOC_386_TLS_LDO_32,
++ BFD_RELOC_386_TLS_IE_32,
++ BFD_RELOC_386_TLS_LE_32,
++ BFD_RELOC_386_TLS_DTPMOD32,
++ BFD_RELOC_386_TLS_DTPOFF32,
++ BFD_RELOC_386_TLS_TPOFF32,
++
++/* x86-64/elf relocations */
++ BFD_RELOC_X86_64_GOT32,
++ BFD_RELOC_X86_64_PLT32,
++ BFD_RELOC_X86_64_COPY,
++ BFD_RELOC_X86_64_GLOB_DAT,
++ BFD_RELOC_X86_64_JUMP_SLOT,
++ BFD_RELOC_X86_64_RELATIVE,
++ BFD_RELOC_X86_64_GOTPCREL,
++ BFD_RELOC_X86_64_32S,
++ BFD_RELOC_X86_64_DTPMOD64,
++ BFD_RELOC_X86_64_DTPOFF64,
++ BFD_RELOC_X86_64_TPOFF64,
++ BFD_RELOC_X86_64_TLSGD,
++ BFD_RELOC_X86_64_TLSLD,
++ BFD_RELOC_X86_64_DTPOFF32,
++ BFD_RELOC_X86_64_GOTTPOFF,
++ BFD_RELOC_X86_64_TPOFF32,
++ BFD_RELOC_X86_64_GOTOFF64,
++ BFD_RELOC_X86_64_GOTPC32,
++
++/* ns32k relocations */
++ BFD_RELOC_NS32K_IMM_8,
++ BFD_RELOC_NS32K_IMM_16,
++ BFD_RELOC_NS32K_IMM_32,
++ BFD_RELOC_NS32K_IMM_8_PCREL,
++ BFD_RELOC_NS32K_IMM_16_PCREL,
++ BFD_RELOC_NS32K_IMM_32_PCREL,
++ BFD_RELOC_NS32K_DISP_8,
++ BFD_RELOC_NS32K_DISP_16,
++ BFD_RELOC_NS32K_DISP_32,
++ BFD_RELOC_NS32K_DISP_8_PCREL,
++ BFD_RELOC_NS32K_DISP_16_PCREL,
++ BFD_RELOC_NS32K_DISP_32_PCREL,
++
++/* PDP11 relocations */
++ BFD_RELOC_PDP11_DISP_8_PCREL,
++ BFD_RELOC_PDP11_DISP_6_PCREL,
++
++/* Picojava relocs. Not all of these appear in object files. */
++ BFD_RELOC_PJ_CODE_HI16,
++ BFD_RELOC_PJ_CODE_LO16,
++ BFD_RELOC_PJ_CODE_DIR16,
++ BFD_RELOC_PJ_CODE_DIR32,
++ BFD_RELOC_PJ_CODE_REL16,
++ BFD_RELOC_PJ_CODE_REL32,
++
++/* Power(rs6000) and PowerPC relocations. */
++ BFD_RELOC_PPC_B26,
++ BFD_RELOC_PPC_BA26,
++ BFD_RELOC_PPC_TOC16,
++ BFD_RELOC_PPC_B16,
++ BFD_RELOC_PPC_B16_BRTAKEN,
++ BFD_RELOC_PPC_B16_BRNTAKEN,
++ BFD_RELOC_PPC_BA16,
++ BFD_RELOC_PPC_BA16_BRTAKEN,
++ BFD_RELOC_PPC_BA16_BRNTAKEN,
++ BFD_RELOC_PPC_COPY,
++ BFD_RELOC_PPC_GLOB_DAT,
++ BFD_RELOC_PPC_JMP_SLOT,
++ BFD_RELOC_PPC_RELATIVE,
++ BFD_RELOC_PPC_LOCAL24PC,
++ BFD_RELOC_PPC_EMB_NADDR32,
++ BFD_RELOC_PPC_EMB_NADDR16,
++ BFD_RELOC_PPC_EMB_NADDR16_LO,
++ BFD_RELOC_PPC_EMB_NADDR16_HI,
++ BFD_RELOC_PPC_EMB_NADDR16_HA,
++ BFD_RELOC_PPC_EMB_SDAI16,
++ BFD_RELOC_PPC_EMB_SDA2I16,
++ BFD_RELOC_PPC_EMB_SDA2REL,
++ BFD_RELOC_PPC_EMB_SDA21,
++ BFD_RELOC_PPC_EMB_MRKREF,
++ BFD_RELOC_PPC_EMB_RELSEC16,
++ BFD_RELOC_PPC_EMB_RELST_LO,
++ BFD_RELOC_PPC_EMB_RELST_HI,
++ BFD_RELOC_PPC_EMB_RELST_HA,
++ BFD_RELOC_PPC_EMB_BIT_FLD,
++ BFD_RELOC_PPC_EMB_RELSDA,
++ BFD_RELOC_PPC64_HIGHER,
++ BFD_RELOC_PPC64_HIGHER_S,
++ BFD_RELOC_PPC64_HIGHEST,
++ BFD_RELOC_PPC64_HIGHEST_S,
++ BFD_RELOC_PPC64_TOC16_LO,
++ BFD_RELOC_PPC64_TOC16_HI,
++ BFD_RELOC_PPC64_TOC16_HA,
++ BFD_RELOC_PPC64_TOC,
++ BFD_RELOC_PPC64_PLTGOT16,
++ BFD_RELOC_PPC64_PLTGOT16_LO,
++ BFD_RELOC_PPC64_PLTGOT16_HI,
++ BFD_RELOC_PPC64_PLTGOT16_HA,
++ BFD_RELOC_PPC64_ADDR16_DS,
++ BFD_RELOC_PPC64_ADDR16_LO_DS,
++ BFD_RELOC_PPC64_GOT16_DS,
++ BFD_RELOC_PPC64_GOT16_LO_DS,
++ BFD_RELOC_PPC64_PLT16_LO_DS,
++ BFD_RELOC_PPC64_SECTOFF_DS,
++ BFD_RELOC_PPC64_SECTOFF_LO_DS,
++ BFD_RELOC_PPC64_TOC16_DS,
++ BFD_RELOC_PPC64_TOC16_LO_DS,
++ BFD_RELOC_PPC64_PLTGOT16_DS,
++ BFD_RELOC_PPC64_PLTGOT16_LO_DS,
++
++/* PowerPC and PowerPC64 thread-local storage relocations. */
++ BFD_RELOC_PPC_TLS,
++ BFD_RELOC_PPC_DTPMOD,
++ BFD_RELOC_PPC_TPREL16,
++ BFD_RELOC_PPC_TPREL16_LO,
++ BFD_RELOC_PPC_TPREL16_HI,
++ BFD_RELOC_PPC_TPREL16_HA,
++ BFD_RELOC_PPC_TPREL,
++ BFD_RELOC_PPC_DTPREL16,
++ BFD_RELOC_PPC_DTPREL16_LO,
++ BFD_RELOC_PPC_DTPREL16_HI,
++ BFD_RELOC_PPC_DTPREL16_HA,
++ BFD_RELOC_PPC_DTPREL,
++ BFD_RELOC_PPC_GOT_TLSGD16,
++ BFD_RELOC_PPC_GOT_TLSGD16_LO,
++ BFD_RELOC_PPC_GOT_TLSGD16_HI,
++ BFD_RELOC_PPC_GOT_TLSGD16_HA,
++ BFD_RELOC_PPC_GOT_TLSLD16,
++ BFD_RELOC_PPC_GOT_TLSLD16_LO,
++ BFD_RELOC_PPC_GOT_TLSLD16_HI,
++ BFD_RELOC_PPC_GOT_TLSLD16_HA,
++ BFD_RELOC_PPC_GOT_TPREL16,
++ BFD_RELOC_PPC_GOT_TPREL16_LO,
++ BFD_RELOC_PPC_GOT_TPREL16_HI,
++ BFD_RELOC_PPC_GOT_TPREL16_HA,
++ BFD_RELOC_PPC_GOT_DTPREL16,
++ BFD_RELOC_PPC_GOT_DTPREL16_LO,
++ BFD_RELOC_PPC_GOT_DTPREL16_HI,
++ BFD_RELOC_PPC_GOT_DTPREL16_HA,
++ BFD_RELOC_PPC64_TPREL16_DS,
++ BFD_RELOC_PPC64_TPREL16_LO_DS,
++ BFD_RELOC_PPC64_TPREL16_HIGHER,
++ BFD_RELOC_PPC64_TPREL16_HIGHERA,
++ BFD_RELOC_PPC64_TPREL16_HIGHEST,
++ BFD_RELOC_PPC64_TPREL16_HIGHESTA,
++ BFD_RELOC_PPC64_DTPREL16_DS,
++ BFD_RELOC_PPC64_DTPREL16_LO_DS,
++ BFD_RELOC_PPC64_DTPREL16_HIGHER,
++ BFD_RELOC_PPC64_DTPREL16_HIGHERA,
++ BFD_RELOC_PPC64_DTPREL16_HIGHEST,
++ BFD_RELOC_PPC64_DTPREL16_HIGHESTA,
++
++/* IBM 370/390 relocations */
++ BFD_RELOC_I370_D12,
++
++/* The type of reloc used to build a constructor table - at the moment
++probably a 32 bit wide absolute relocation, but the target can choose.
++It generally does map to one of the other relocation types. */
++ BFD_RELOC_CTOR,
++
++/* ARM 26 bit pc-relative branch. The lowest two bits must be zero and are
++not stored in the instruction. */
++ BFD_RELOC_ARM_PCREL_BRANCH,
++
++/* ARM 26 bit pc-relative branch. The lowest bit must be zero and is
++not stored in the instruction. The 2nd lowest bit comes from a 1 bit
++field in the instruction. */
++ BFD_RELOC_ARM_PCREL_BLX,
++
++/* Thumb 22 bit pc-relative branch. The lowest bit must be zero and is
++not stored in the instruction. The 2nd lowest bit comes from a 1 bit
++field in the instruction. */
++ BFD_RELOC_THUMB_PCREL_BLX,
++
++/* Thumb 7-, 9-, 12-, 20-, 23-, and 25-bit pc-relative branches.
++The lowest bit must be zero and is not stored in the instruction.
++Note that the corresponding ELF R_ARM_THM_JUMPnn constant has an
++"nn" one smaller in all cases. Note further that BRANCH23
++corresponds to R_ARM_THM_CALL. */
++ BFD_RELOC_THUMB_PCREL_BRANCH7,
++ BFD_RELOC_THUMB_PCREL_BRANCH9,
++ BFD_RELOC_THUMB_PCREL_BRANCH12,
++ BFD_RELOC_THUMB_PCREL_BRANCH20,
++ BFD_RELOC_THUMB_PCREL_BRANCH23,
++ BFD_RELOC_THUMB_PCREL_BRANCH25,
++
++/* 12-bit immediate offset, used in ARM-format ldr and str instructions. */
++ BFD_RELOC_ARM_OFFSET_IMM,
++
++/* 5-bit immediate offset, used in Thumb-format ldr and str instructions. */
++ BFD_RELOC_ARM_THUMB_OFFSET,
++
++/* Pc-relative or absolute relocation depending on target. Used for
++entries in .init_array sections. */
++ BFD_RELOC_ARM_TARGET1,
++
++/* Read-only segment base relative address. */
++ BFD_RELOC_ARM_ROSEGREL32,
++
++/* Data segment base relative address. */
++ BFD_RELOC_ARM_SBREL32,
++
++/* This reloc is used for references to RTTI data from exception handling
++tables. The actual definition depends on the target. It may be a
++pc-relative or some form of GOT-indirect relocation. */
++ BFD_RELOC_ARM_TARGET2,
++
++/* 31-bit PC relative address. */
++ BFD_RELOC_ARM_PREL31,
++
++/* Relocations for setting up GOTs and PLTs for shared libraries. */
++ BFD_RELOC_ARM_JUMP_SLOT,
++ BFD_RELOC_ARM_GLOB_DAT,
++ BFD_RELOC_ARM_GOT32,
++ BFD_RELOC_ARM_PLT32,
++ BFD_RELOC_ARM_RELATIVE,
++ BFD_RELOC_ARM_GOTOFF,
++ BFD_RELOC_ARM_GOTPC,
++
++/* ARM thread-local storage relocations. */
++ BFD_RELOC_ARM_TLS_GD32,
++ BFD_RELOC_ARM_TLS_LDO32,
++ BFD_RELOC_ARM_TLS_LDM32,
++ BFD_RELOC_ARM_TLS_DTPOFF32,
++ BFD_RELOC_ARM_TLS_DTPMOD32,
++ BFD_RELOC_ARM_TLS_TPOFF32,
++ BFD_RELOC_ARM_TLS_IE32,
++ BFD_RELOC_ARM_TLS_LE32,
++
++/* These relocs are only used within the ARM assembler. They are not
++(at present) written to any object files. */
++ BFD_RELOC_ARM_IMMEDIATE,
++ BFD_RELOC_ARM_ADRL_IMMEDIATE,
++ BFD_RELOC_ARM_T32_IMMEDIATE,
++ BFD_RELOC_ARM_SHIFT_IMM,
++ BFD_RELOC_ARM_SMI,
++ BFD_RELOC_ARM_SWI,
++ BFD_RELOC_ARM_MULTI,
++ BFD_RELOC_ARM_CP_OFF_IMM,
++ BFD_RELOC_ARM_CP_OFF_IMM_S2,
++ BFD_RELOC_ARM_ADR_IMM,
++ BFD_RELOC_ARM_LDR_IMM,
++ BFD_RELOC_ARM_LITERAL,
++ BFD_RELOC_ARM_IN_POOL,
++ BFD_RELOC_ARM_OFFSET_IMM8,
++ BFD_RELOC_ARM_T32_OFFSET_U8,
++ BFD_RELOC_ARM_T32_OFFSET_IMM,
++ BFD_RELOC_ARM_HWLITERAL,
++ BFD_RELOC_ARM_THUMB_ADD,
++ BFD_RELOC_ARM_THUMB_IMM,
++ BFD_RELOC_ARM_THUMB_SHIFT,
++
++/* Renesas / SuperH SH relocs. Not all of these appear in object files. */
++ BFD_RELOC_SH_PCDISP8BY2,
++ BFD_RELOC_SH_PCDISP12BY2,
++ BFD_RELOC_SH_IMM3,
++ BFD_RELOC_SH_IMM3U,
++ BFD_RELOC_SH_DISP12,
++ BFD_RELOC_SH_DISP12BY2,
++ BFD_RELOC_SH_DISP12BY4,
++ BFD_RELOC_SH_DISP12BY8,
++ BFD_RELOC_SH_DISP20,
++ BFD_RELOC_SH_DISP20BY8,
++ BFD_RELOC_SH_IMM4,
++ BFD_RELOC_SH_IMM4BY2,
++ BFD_RELOC_SH_IMM4BY4,
++ BFD_RELOC_SH_IMM8,
++ BFD_RELOC_SH_IMM8BY2,
++ BFD_RELOC_SH_IMM8BY4,
++ BFD_RELOC_SH_PCRELIMM8BY2,
++ BFD_RELOC_SH_PCRELIMM8BY4,
++ BFD_RELOC_SH_SWITCH16,
++ BFD_RELOC_SH_SWITCH32,
++ BFD_RELOC_SH_USES,
++ BFD_RELOC_SH_COUNT,
++ BFD_RELOC_SH_ALIGN,
++ BFD_RELOC_SH_CODE,
++ BFD_RELOC_SH_DATA,
++ BFD_RELOC_SH_LABEL,
++ BFD_RELOC_SH_LOOP_START,
++ BFD_RELOC_SH_LOOP_END,
++ BFD_RELOC_SH_COPY,
++ BFD_RELOC_SH_GLOB_DAT,
++ BFD_RELOC_SH_JMP_SLOT,
++ BFD_RELOC_SH_RELATIVE,
++ BFD_RELOC_SH_GOTPC,
++ BFD_RELOC_SH_GOT_LOW16,
++ BFD_RELOC_SH_GOT_MEDLOW16,
++ BFD_RELOC_SH_GOT_MEDHI16,
++ BFD_RELOC_SH_GOT_HI16,
++ BFD_RELOC_SH_GOTPLT_LOW16,
++ BFD_RELOC_SH_GOTPLT_MEDLOW16,
++ BFD_RELOC_SH_GOTPLT_MEDHI16,
++ BFD_RELOC_SH_GOTPLT_HI16,
++ BFD_RELOC_SH_PLT_LOW16,
++ BFD_RELOC_SH_PLT_MEDLOW16,
++ BFD_RELOC_SH_PLT_MEDHI16,
++ BFD_RELOC_SH_PLT_HI16,
++ BFD_RELOC_SH_GOTOFF_LOW16,
++ BFD_RELOC_SH_GOTOFF_MEDLOW16,
++ BFD_RELOC_SH_GOTOFF_MEDHI16,
++ BFD_RELOC_SH_GOTOFF_HI16,
++ BFD_RELOC_SH_GOTPC_LOW16,
++ BFD_RELOC_SH_GOTPC_MEDLOW16,
++ BFD_RELOC_SH_GOTPC_MEDHI16,
++ BFD_RELOC_SH_GOTPC_HI16,
++ BFD_RELOC_SH_COPY64,
++ BFD_RELOC_SH_GLOB_DAT64,
++ BFD_RELOC_SH_JMP_SLOT64,
++ BFD_RELOC_SH_RELATIVE64,
++ BFD_RELOC_SH_GOT10BY4,
++ BFD_RELOC_SH_GOT10BY8,
++ BFD_RELOC_SH_GOTPLT10BY4,
++ BFD_RELOC_SH_GOTPLT10BY8,
++ BFD_RELOC_SH_GOTPLT32,
++ BFD_RELOC_SH_SHMEDIA_CODE,
++ BFD_RELOC_SH_IMMU5,
++ BFD_RELOC_SH_IMMS6,
++ BFD_RELOC_SH_IMMS6BY32,
++ BFD_RELOC_SH_IMMU6,
++ BFD_RELOC_SH_IMMS10,
++ BFD_RELOC_SH_IMMS10BY2,
++ BFD_RELOC_SH_IMMS10BY4,
++ BFD_RELOC_SH_IMMS10BY8,
++ BFD_RELOC_SH_IMMS16,
++ BFD_RELOC_SH_IMMU16,
++ BFD_RELOC_SH_IMM_LOW16,
++ BFD_RELOC_SH_IMM_LOW16_PCREL,
++ BFD_RELOC_SH_IMM_MEDLOW16,
++ BFD_RELOC_SH_IMM_MEDLOW16_PCREL,
++ BFD_RELOC_SH_IMM_MEDHI16,
++ BFD_RELOC_SH_IMM_MEDHI16_PCREL,
++ BFD_RELOC_SH_IMM_HI16,
++ BFD_RELOC_SH_IMM_HI16_PCREL,
++ BFD_RELOC_SH_PT_16,
++ BFD_RELOC_SH_TLS_GD_32,
++ BFD_RELOC_SH_TLS_LD_32,
++ BFD_RELOC_SH_TLS_LDO_32,
++ BFD_RELOC_SH_TLS_IE_32,
++ BFD_RELOC_SH_TLS_LE_32,
++ BFD_RELOC_SH_TLS_DTPMOD32,
++ BFD_RELOC_SH_TLS_DTPOFF32,
++ BFD_RELOC_SH_TLS_TPOFF32,
++
++/* ARC Cores relocs.
++ARC 22 bit pc-relative branch. The lowest two bits must be zero and are
++not stored in the instruction. The high 20 bits are installed in bits 26
++through 7 of the instruction. */
++ BFD_RELOC_ARC_B22_PCREL,
++
++/* ARC 26 bit absolute branch. The lowest two bits must be zero and are not
++stored in the instruction. The high 24 bits are installed in bits 23
++through 0. */
++ BFD_RELOC_ARC_B26,
++
++/* Mitsubishi D10V relocs.
++This is a 10-bit reloc with the right 2 bits
++assumed to be 0. */
++ BFD_RELOC_D10V_10_PCREL_R,
++
++/* Mitsubishi D10V relocs.
++This is a 10-bit reloc with the right 2 bits
++assumed to be 0. This is the same as the previous reloc
++except it is in the left container, i.e.,
++shifted left 15 bits. */
++ BFD_RELOC_D10V_10_PCREL_L,
++
++/* This is an 18-bit reloc with the right 2 bits
++assumed to be 0. */
++ BFD_RELOC_D10V_18,
++
++/* This is an 18-bit reloc with the right 2 bits
++assumed to be 0. */
++ BFD_RELOC_D10V_18_PCREL,
++
++/* Mitsubishi D30V relocs.
++This is a 6-bit absolute reloc. */
++ BFD_RELOC_D30V_6,
++
++/* This is a 6-bit pc-relative reloc with
++the right 3 bits assumed to be 0. */
++ BFD_RELOC_D30V_9_PCREL,
++
++/* This is a 6-bit pc-relative reloc with
++the right 3 bits assumed to be 0. Same
++as the previous reloc but on the right side
++of the container. */
++ BFD_RELOC_D30V_9_PCREL_R,
++
++/* This is a 12-bit absolute reloc with the
++right 3 bitsassumed to be 0. */
++ BFD_RELOC_D30V_15,
++
++/* This is a 12-bit pc-relative reloc with
++the right 3 bits assumed to be 0. */
++ BFD_RELOC_D30V_15_PCREL,
++
++/* This is a 12-bit pc-relative reloc with
++the right 3 bits assumed to be 0. Same
++as the previous reloc but on the right side
++of the container. */
++ BFD_RELOC_D30V_15_PCREL_R,
++
++/* This is an 18-bit absolute reloc with
++the right 3 bits assumed to be 0. */
++ BFD_RELOC_D30V_21,
++
++/* This is an 18-bit pc-relative reloc with
++the right 3 bits assumed to be 0. */
++ BFD_RELOC_D30V_21_PCREL,
++
++/* This is an 18-bit pc-relative reloc with
++the right 3 bits assumed to be 0. Same
++as the previous reloc but on the right side
++of the container. */
++ BFD_RELOC_D30V_21_PCREL_R,
++
++/* This is a 32-bit absolute reloc. */
++ BFD_RELOC_D30V_32,
++
++/* This is a 32-bit pc-relative reloc. */
++ BFD_RELOC_D30V_32_PCREL,
++
++/* DLX relocs */
++ BFD_RELOC_DLX_HI16_S,
++
++/* DLX relocs */
++ BFD_RELOC_DLX_LO16,
++
++/* DLX relocs */
++ BFD_RELOC_DLX_JMP26,
++
++/* Renesas M16C/M32C Relocations. */
++ BFD_RELOC_M16C_8_PCREL8,
++ BFD_RELOC_M16C_16_PCREL8,
++ BFD_RELOC_M16C_8_PCREL16,
++ BFD_RELOC_M16C_8_ELABEL24,
++ BFD_RELOC_M16C_8_ABS16,
++ BFD_RELOC_M16C_16_ABS16,
++ BFD_RELOC_M16C_16_ABS24,
++ BFD_RELOC_M16C_16_ABS32,
++ BFD_RELOC_M16C_24_ABS16,
++ BFD_RELOC_M16C_24_ABS24,
++ BFD_RELOC_M16C_24_ABS32,
++ BFD_RELOC_M16C_32_ABS16,
++ BFD_RELOC_M16C_32_ABS24,
++ BFD_RELOC_M16C_32_ABS32,
++ BFD_RELOC_M16C_40_ABS16,
++ BFD_RELOC_M16C_40_ABS24,
++ BFD_RELOC_M16C_40_ABS32,
++
++/* Renesas M32R (formerly Mitsubishi M32R) relocs.
++This is a 24 bit absolute address. */
++ BFD_RELOC_M32R_24,
++
++/* This is a 10-bit pc-relative reloc with the right 2 bits assumed to be 0. */
++ BFD_RELOC_M32R_10_PCREL,
++
++/* This is an 18-bit reloc with the right 2 bits assumed to be 0. */
++ BFD_RELOC_M32R_18_PCREL,
++
++/* This is a 26-bit reloc with the right 2 bits assumed to be 0. */
++ BFD_RELOC_M32R_26_PCREL,
++
++/* This is a 16-bit reloc containing the high 16 bits of an address
++used when the lower 16 bits are treated as unsigned. */
++ BFD_RELOC_M32R_HI16_ULO,
++
++/* This is a 16-bit reloc containing the high 16 bits of an address
++used when the lower 16 bits are treated as signed. */
++ BFD_RELOC_M32R_HI16_SLO,
++
++/* This is a 16-bit reloc containing the lower 16 bits of an address. */
++ BFD_RELOC_M32R_LO16,
++
++/* This is a 16-bit reloc containing the small data area offset for use in
++add3, load, and store instructions. */
++ BFD_RELOC_M32R_SDA16,
++
++/* For PIC. */
++ BFD_RELOC_M32R_GOT24,
++ BFD_RELOC_M32R_26_PLTREL,
++ BFD_RELOC_M32R_COPY,
++ BFD_RELOC_M32R_GLOB_DAT,
++ BFD_RELOC_M32R_JMP_SLOT,
++ BFD_RELOC_M32R_RELATIVE,
++ BFD_RELOC_M32R_GOTOFF,
++ BFD_RELOC_M32R_GOTOFF_HI_ULO,
++ BFD_RELOC_M32R_GOTOFF_HI_SLO,
++ BFD_RELOC_M32R_GOTOFF_LO,
++ BFD_RELOC_M32R_GOTPC24,
++ BFD_RELOC_M32R_GOT16_HI_ULO,
++ BFD_RELOC_M32R_GOT16_HI_SLO,
++ BFD_RELOC_M32R_GOT16_LO,
++ BFD_RELOC_M32R_GOTPC_HI_ULO,
++ BFD_RELOC_M32R_GOTPC_HI_SLO,
++ BFD_RELOC_M32R_GOTPC_LO,
++
++/* This is a 9-bit reloc */
++ BFD_RELOC_V850_9_PCREL,
++
++/* This is a 22-bit reloc */
++ BFD_RELOC_V850_22_PCREL,
++
++/* This is a 16 bit offset from the short data area pointer. */
++ BFD_RELOC_V850_SDA_16_16_OFFSET,
++
++/* This is a 16 bit offset (of which only 15 bits are used) from the
++short data area pointer. */
++ BFD_RELOC_V850_SDA_15_16_OFFSET,
++
++/* This is a 16 bit offset from the zero data area pointer. */
++ BFD_RELOC_V850_ZDA_16_16_OFFSET,
++
++/* This is a 16 bit offset (of which only 15 bits are used) from the
++zero data area pointer. */
++ BFD_RELOC_V850_ZDA_15_16_OFFSET,
++
++/* This is an 8 bit offset (of which only 6 bits are used) from the
++tiny data area pointer. */
++ BFD_RELOC_V850_TDA_6_8_OFFSET,
++
++/* This is an 8bit offset (of which only 7 bits are used) from the tiny
++data area pointer. */
++ BFD_RELOC_V850_TDA_7_8_OFFSET,
++
++/* This is a 7 bit offset from the tiny data area pointer. */
++ BFD_RELOC_V850_TDA_7_7_OFFSET,
++
++/* This is a 16 bit offset from the tiny data area pointer. */
++ BFD_RELOC_V850_TDA_16_16_OFFSET,
++
++/* This is a 5 bit offset (of which only 4 bits are used) from the tiny
++data area pointer. */
++ BFD_RELOC_V850_TDA_4_5_OFFSET,
++
++/* This is a 4 bit offset from the tiny data area pointer. */
++ BFD_RELOC_V850_TDA_4_4_OFFSET,
++
++/* This is a 16 bit offset from the short data area pointer, with the
++bits placed non-contiguously in the instruction. */
++ BFD_RELOC_V850_SDA_16_16_SPLIT_OFFSET,
++
++/* This is a 16 bit offset from the zero data area pointer, with the
++bits placed non-contiguously in the instruction. */
++ BFD_RELOC_V850_ZDA_16_16_SPLIT_OFFSET,
++
++/* This is a 6 bit offset from the call table base pointer. */
++ BFD_RELOC_V850_CALLT_6_7_OFFSET,
++
++/* This is a 16 bit offset from the call table base pointer. */
++ BFD_RELOC_V850_CALLT_16_16_OFFSET,
++
++/* Used for relaxing indirect function calls. */
++ BFD_RELOC_V850_LONGCALL,
++
++/* Used for relaxing indirect jumps. */
++ BFD_RELOC_V850_LONGJUMP,
++
++/* Used to maintain alignment whilst relaxing. */
++ BFD_RELOC_V850_ALIGN,
++
++/* This is a variation of BFD_RELOC_LO16 that can be used in v850e ld.bu
++instructions. */
++ BFD_RELOC_V850_LO16_SPLIT_OFFSET,
++
++/* This is a 32bit pcrel reloc for the mn10300, offset by two bytes in the
++instruction. */
++ BFD_RELOC_MN10300_32_PCREL,
++
++/* This is a 16bit pcrel reloc for the mn10300, offset by two bytes in the
++instruction. */
++ BFD_RELOC_MN10300_16_PCREL,
++
++/* This is a 8bit DP reloc for the tms320c30, where the most
++significant 8 bits of a 24 bit word are placed into the least
++significant 8 bits of the opcode. */
++ BFD_RELOC_TIC30_LDP,
++
++/* This is a 7bit reloc for the tms320c54x, where the least
++significant 7 bits of a 16 bit word are placed into the least
++significant 7 bits of the opcode. */
++ BFD_RELOC_TIC54X_PARTLS7,
++
++/* This is a 9bit DP reloc for the tms320c54x, where the most
++significant 9 bits of a 16 bit word are placed into the least
++significant 9 bits of the opcode. */
++ BFD_RELOC_TIC54X_PARTMS9,
++
++/* This is an extended address 23-bit reloc for the tms320c54x. */
++ BFD_RELOC_TIC54X_23,
++
++/* This is a 16-bit reloc for the tms320c54x, where the least
++significant 16 bits of a 23-bit extended address are placed into
++the opcode. */
++ BFD_RELOC_TIC54X_16_OF_23,
++
++/* This is a reloc for the tms320c54x, where the most
++significant 7 bits of a 23-bit extended address are placed into
++the opcode. */
++ BFD_RELOC_TIC54X_MS7_OF_23,
++
++/* This is a 48 bit reloc for the FR30 that stores 32 bits. */
++ BFD_RELOC_FR30_48,
++
++/* This is a 32 bit reloc for the FR30 that stores 20 bits split up into
++two sections. */
++ BFD_RELOC_FR30_20,
++
++/* This is a 16 bit reloc for the FR30 that stores a 6 bit word offset in
++4 bits. */
++ BFD_RELOC_FR30_6_IN_4,
++
++/* This is a 16 bit reloc for the FR30 that stores an 8 bit byte offset
++into 8 bits. */
++ BFD_RELOC_FR30_8_IN_8,
++
++/* This is a 16 bit reloc for the FR30 that stores a 9 bit short offset
++into 8 bits. */
++ BFD_RELOC_FR30_9_IN_8,
++
++/* This is a 16 bit reloc for the FR30 that stores a 10 bit word offset
++into 8 bits. */
++ BFD_RELOC_FR30_10_IN_8,
++
++/* This is a 16 bit reloc for the FR30 that stores a 9 bit pc relative
++short offset into 8 bits. */
++ BFD_RELOC_FR30_9_PCREL,
++
++/* This is a 16 bit reloc for the FR30 that stores a 12 bit pc relative
++short offset into 11 bits. */
++ BFD_RELOC_FR30_12_PCREL,
++
++/* Motorola Mcore relocations. */
++ BFD_RELOC_MCORE_PCREL_IMM8BY4,
++ BFD_RELOC_MCORE_PCREL_IMM11BY2,
++ BFD_RELOC_MCORE_PCREL_IMM4BY2,
++ BFD_RELOC_MCORE_PCREL_32,
++ BFD_RELOC_MCORE_PCREL_JSR_IMM11BY2,
++ BFD_RELOC_MCORE_RVA,
++
++/* These are relocations for the GETA instruction. */
++ BFD_RELOC_MMIX_GETA,
++ BFD_RELOC_MMIX_GETA_1,
++ BFD_RELOC_MMIX_GETA_2,
++ BFD_RELOC_MMIX_GETA_3,
++
++/* These are relocations for a conditional branch instruction. */
++ BFD_RELOC_MMIX_CBRANCH,
++ BFD_RELOC_MMIX_CBRANCH_J,
++ BFD_RELOC_MMIX_CBRANCH_1,
++ BFD_RELOC_MMIX_CBRANCH_2,
++ BFD_RELOC_MMIX_CBRANCH_3,
++
++/* These are relocations for the PUSHJ instruction. */
++ BFD_RELOC_MMIX_PUSHJ,
++ BFD_RELOC_MMIX_PUSHJ_1,
++ BFD_RELOC_MMIX_PUSHJ_2,
++ BFD_RELOC_MMIX_PUSHJ_3,
++ BFD_RELOC_MMIX_PUSHJ_STUBBABLE,
++
++/* These are relocations for the JMP instruction. */
++ BFD_RELOC_MMIX_JMP,
++ BFD_RELOC_MMIX_JMP_1,
++ BFD_RELOC_MMIX_JMP_2,
++ BFD_RELOC_MMIX_JMP_3,
++
++/* This is a relocation for a relative address as in a GETA instruction or
++a branch. */
++ BFD_RELOC_MMIX_ADDR19,
++
++/* This is a relocation for a relative address as in a JMP instruction. */
++ BFD_RELOC_MMIX_ADDR27,
++
++/* This is a relocation for an instruction field that may be a general
++register or a value 0..255. */
++ BFD_RELOC_MMIX_REG_OR_BYTE,
++
++/* This is a relocation for an instruction field that may be a general
++register. */
++ BFD_RELOC_MMIX_REG,
++
++/* This is a relocation for two instruction fields holding a register and
++an offset, the equivalent of the relocation. */
++ BFD_RELOC_MMIX_BASE_PLUS_OFFSET,
++
++/* This relocation is an assertion that the expression is not allocated as
++a global register. It does not modify contents. */
++ BFD_RELOC_MMIX_LOCAL,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit pc relative
++short offset into 7 bits. */
++ BFD_RELOC_AVR_7_PCREL,
++
++/* This is a 16 bit reloc for the AVR that stores 13 bit pc relative
++short offset into 12 bits. */
++ BFD_RELOC_AVR_13_PCREL,
++
++/* This is a 16 bit reloc for the AVR that stores 17 bit value (usually
++program memory address) into 16 bits. */
++ BFD_RELOC_AVR_16_PM,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
++data memory address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_LO8_LDI,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
++of data memory address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_HI8_LDI,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
++of program memory address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_HH8_LDI,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(usually data memory address) into 8 bit immediate value of SUBI insn. */
++ BFD_RELOC_AVR_LO8_LDI_NEG,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(high 8 bit of data memory address) into 8 bit immediate value of
++SUBI insn. */
++ BFD_RELOC_AVR_HI8_LDI_NEG,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(most high 8 bit of program memory address) into 8 bit immediate value
++of LDI or SUBI insn. */
++ BFD_RELOC_AVR_HH8_LDI_NEG,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (usually
++command address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_LO8_LDI_PM,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (high 8 bit
++of command address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_HI8_LDI_PM,
++
++/* This is a 16 bit reloc for the AVR that stores 8 bit value (most high 8 bit
++of command address) into 8 bit immediate value of LDI insn. */
++ BFD_RELOC_AVR_HH8_LDI_PM,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(usually command address) into 8 bit immediate value of SUBI insn. */
++ BFD_RELOC_AVR_LO8_LDI_PM_NEG,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(high 8 bit of 16 bit command address) into 8 bit immediate value
++of SUBI insn. */
++ BFD_RELOC_AVR_HI8_LDI_PM_NEG,
++
++/* This is a 16 bit reloc for the AVR that stores negated 8 bit value
++(high 6 bit of 22 bit command address) into 8 bit immediate
++value of SUBI insn. */
++ BFD_RELOC_AVR_HH8_LDI_PM_NEG,
++
++/* This is a 32 bit reloc for the AVR that stores 23 bit value
++into 22 bits. */
++ BFD_RELOC_AVR_CALL,
++
++/* This is a 16 bit reloc for the AVR that stores all needed bits
++for absolute addressing with ldi with overflow check to linktime */
++ BFD_RELOC_AVR_LDI,
++
++/* This is a 6 bit reloc for the AVR that stores offset for ldd/std
++instructions */
++ BFD_RELOC_AVR_6,
++
++/* This is a 6 bit reloc for the AVR that stores offset for adiw/sbiw
++instructions */
++ BFD_RELOC_AVR_6_ADIW,
++
++/* Direct 12 bit. */
++ BFD_RELOC_390_12,
++
++/* 12 bit GOT offset. */
++ BFD_RELOC_390_GOT12,
++
++/* 32 bit PC relative PLT address. */
++ BFD_RELOC_390_PLT32,
++
++/* Copy symbol at runtime. */
++ BFD_RELOC_390_COPY,
++
++/* Create GOT entry. */
++ BFD_RELOC_390_GLOB_DAT,
++
++/* Create PLT entry. */
++ BFD_RELOC_390_JMP_SLOT,
++
++/* Adjust by program base. */
++ BFD_RELOC_390_RELATIVE,
++
++/* 32 bit PC relative offset to GOT. */
++ BFD_RELOC_390_GOTPC,
++
++/* 16 bit GOT offset. */
++ BFD_RELOC_390_GOT16,
++
++/* PC relative 16 bit shifted by 1. */
++ BFD_RELOC_390_PC16DBL,
++
++/* 16 bit PC rel. PLT shifted by 1. */
++ BFD_RELOC_390_PLT16DBL,
++
++/* PC relative 32 bit shifted by 1. */
++ BFD_RELOC_390_PC32DBL,
++
++/* 32 bit PC rel. PLT shifted by 1. */
++ BFD_RELOC_390_PLT32DBL,
++
++/* 32 bit PC rel. GOT shifted by 1. */
++ BFD_RELOC_390_GOTPCDBL,
++
++/* 64 bit GOT offset. */
++ BFD_RELOC_390_GOT64,
++
++/* 64 bit PC relative PLT address. */
++ BFD_RELOC_390_PLT64,
++
++/* 32 bit rel. offset to GOT entry. */
++ BFD_RELOC_390_GOTENT,
++
++/* 64 bit offset to GOT. */
++ BFD_RELOC_390_GOTOFF64,
++
++/* 12-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_390_GOTPLT12,
++
++/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_390_GOTPLT16,
++
++/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_390_GOTPLT32,
++
++/* 64-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_390_GOTPLT64,
++
++/* 32-bit rel. offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_390_GOTPLTENT,
++
++/* 16-bit rel. offset from the GOT to a PLT entry. */
++ BFD_RELOC_390_PLTOFF16,
++
++/* 32-bit rel. offset from the GOT to a PLT entry. */
++ BFD_RELOC_390_PLTOFF32,
++
++/* 64-bit rel. offset from the GOT to a PLT entry. */
++ BFD_RELOC_390_PLTOFF64,
++
++/* s390 tls relocations. */
++ BFD_RELOC_390_TLS_LOAD,
++ BFD_RELOC_390_TLS_GDCALL,
++ BFD_RELOC_390_TLS_LDCALL,
++ BFD_RELOC_390_TLS_GD32,
++ BFD_RELOC_390_TLS_GD64,
++ BFD_RELOC_390_TLS_GOTIE12,
++ BFD_RELOC_390_TLS_GOTIE32,
++ BFD_RELOC_390_TLS_GOTIE64,
++ BFD_RELOC_390_TLS_LDM32,
++ BFD_RELOC_390_TLS_LDM64,
++ BFD_RELOC_390_TLS_IE32,
++ BFD_RELOC_390_TLS_IE64,
++ BFD_RELOC_390_TLS_IEENT,
++ BFD_RELOC_390_TLS_LE32,
++ BFD_RELOC_390_TLS_LE64,
++ BFD_RELOC_390_TLS_LDO32,
++ BFD_RELOC_390_TLS_LDO64,
++ BFD_RELOC_390_TLS_DTPMOD,
++ BFD_RELOC_390_TLS_DTPOFF,
++ BFD_RELOC_390_TLS_TPOFF,
++
++/* Long displacement extension. */
++ BFD_RELOC_390_20,
++ BFD_RELOC_390_GOT20,
++ BFD_RELOC_390_GOTPLT20,
++ BFD_RELOC_390_TLS_GOTIE20,
++
++/* Scenix IP2K - 9-bit register number / data address */
++ BFD_RELOC_IP2K_FR9,
++
++/* Scenix IP2K - 4-bit register/data bank number */
++ BFD_RELOC_IP2K_BANK,
++
++/* Scenix IP2K - low 13 bits of instruction word address */
++ BFD_RELOC_IP2K_ADDR16CJP,
++
++/* Scenix IP2K - high 3 bits of instruction word address */
++ BFD_RELOC_IP2K_PAGE3,
++
++/* Scenix IP2K - ext/low/high 8 bits of data address */
++ BFD_RELOC_IP2K_LO8DATA,
++ BFD_RELOC_IP2K_HI8DATA,
++ BFD_RELOC_IP2K_EX8DATA,
++
++/* Scenix IP2K - low/high 8 bits of instruction word address */
++ BFD_RELOC_IP2K_LO8INSN,
++ BFD_RELOC_IP2K_HI8INSN,
++
++/* Scenix IP2K - even/odd PC modifier to modify snb pcl.0 */
++ BFD_RELOC_IP2K_PC_SKIP,
++
++/* Scenix IP2K - 16 bit word address in text section. */
++ BFD_RELOC_IP2K_TEXT,
++
++/* Scenix IP2K - 7-bit sp or dp offset */
++ BFD_RELOC_IP2K_FR_OFFSET,
++
++/* Scenix VPE4K coprocessor - data/insn-space addressing */
++ BFD_RELOC_VPE4KMATH_DATA,
++ BFD_RELOC_VPE4KMATH_INSN,
++
++/* These two relocations are used by the linker to determine which of
++the entries in a C++ virtual function table are actually used. When
++the --gc-sections option is given, the linker will zero out the entries
++that are not used, so that the code for those functions need not be
++included in the output.
++
++VTABLE_INHERIT is a zero-space relocation used to describe to the
++linker the inheritance tree of a C++ virtual function table. The
++relocation's symbol should be the parent class' vtable, and the
++relocation should be located at the child vtable.
++
++VTABLE_ENTRY is a zero-space relocation that describes the use of a
++virtual function table entry. The reloc's symbol should refer to the
++table of the class mentioned in the code. Off of that base, an offset
++describes the entry that is being used. For Rela hosts, this offset
++is stored in the reloc's addend. For Rel hosts, we are forced to put
++this offset in the reloc's section offset. */
++ BFD_RELOC_VTABLE_INHERIT,
++ BFD_RELOC_VTABLE_ENTRY,
++
++/* Intel IA64 Relocations. */
++ BFD_RELOC_IA64_IMM14,
++ BFD_RELOC_IA64_IMM22,
++ BFD_RELOC_IA64_IMM64,
++ BFD_RELOC_IA64_DIR32MSB,
++ BFD_RELOC_IA64_DIR32LSB,
++ BFD_RELOC_IA64_DIR64MSB,
++ BFD_RELOC_IA64_DIR64LSB,
++ BFD_RELOC_IA64_GPREL22,
++ BFD_RELOC_IA64_GPREL64I,
++ BFD_RELOC_IA64_GPREL32MSB,
++ BFD_RELOC_IA64_GPREL32LSB,
++ BFD_RELOC_IA64_GPREL64MSB,
++ BFD_RELOC_IA64_GPREL64LSB,
++ BFD_RELOC_IA64_LTOFF22,
++ BFD_RELOC_IA64_LTOFF64I,
++ BFD_RELOC_IA64_PLTOFF22,
++ BFD_RELOC_IA64_PLTOFF64I,
++ BFD_RELOC_IA64_PLTOFF64MSB,
++ BFD_RELOC_IA64_PLTOFF64LSB,
++ BFD_RELOC_IA64_FPTR64I,
++ BFD_RELOC_IA64_FPTR32MSB,
++ BFD_RELOC_IA64_FPTR32LSB,
++ BFD_RELOC_IA64_FPTR64MSB,
++ BFD_RELOC_IA64_FPTR64LSB,
++ BFD_RELOC_IA64_PCREL21B,
++ BFD_RELOC_IA64_PCREL21BI,
++ BFD_RELOC_IA64_PCREL21M,
++ BFD_RELOC_IA64_PCREL21F,
++ BFD_RELOC_IA64_PCREL22,
++ BFD_RELOC_IA64_PCREL60B,
++ BFD_RELOC_IA64_PCREL64I,
++ BFD_RELOC_IA64_PCREL32MSB,
++ BFD_RELOC_IA64_PCREL32LSB,
++ BFD_RELOC_IA64_PCREL64MSB,
++ BFD_RELOC_IA64_PCREL64LSB,
++ BFD_RELOC_IA64_LTOFF_FPTR22,
++ BFD_RELOC_IA64_LTOFF_FPTR64I,
++ BFD_RELOC_IA64_LTOFF_FPTR32MSB,
++ BFD_RELOC_IA64_LTOFF_FPTR32LSB,
++ BFD_RELOC_IA64_LTOFF_FPTR64MSB,
++ BFD_RELOC_IA64_LTOFF_FPTR64LSB,
++ BFD_RELOC_IA64_SEGREL32MSB,
++ BFD_RELOC_IA64_SEGREL32LSB,
++ BFD_RELOC_IA64_SEGREL64MSB,
++ BFD_RELOC_IA64_SEGREL64LSB,
++ BFD_RELOC_IA64_SECREL32MSB,
++ BFD_RELOC_IA64_SECREL32LSB,
++ BFD_RELOC_IA64_SECREL64MSB,
++ BFD_RELOC_IA64_SECREL64LSB,
++ BFD_RELOC_IA64_REL32MSB,
++ BFD_RELOC_IA64_REL32LSB,
++ BFD_RELOC_IA64_REL64MSB,
++ BFD_RELOC_IA64_REL64LSB,
++ BFD_RELOC_IA64_LTV32MSB,
++ BFD_RELOC_IA64_LTV32LSB,
++ BFD_RELOC_IA64_LTV64MSB,
++ BFD_RELOC_IA64_LTV64LSB,
++ BFD_RELOC_IA64_IPLTMSB,
++ BFD_RELOC_IA64_IPLTLSB,
++ BFD_RELOC_IA64_COPY,
++ BFD_RELOC_IA64_LTOFF22X,
++ BFD_RELOC_IA64_LDXMOV,
++ BFD_RELOC_IA64_TPREL14,
++ BFD_RELOC_IA64_TPREL22,
++ BFD_RELOC_IA64_TPREL64I,
++ BFD_RELOC_IA64_TPREL64MSB,
++ BFD_RELOC_IA64_TPREL64LSB,
++ BFD_RELOC_IA64_LTOFF_TPREL22,
++ BFD_RELOC_IA64_DTPMOD64MSB,
++ BFD_RELOC_IA64_DTPMOD64LSB,
++ BFD_RELOC_IA64_LTOFF_DTPMOD22,
++ BFD_RELOC_IA64_DTPREL14,
++ BFD_RELOC_IA64_DTPREL22,
++ BFD_RELOC_IA64_DTPREL64I,
++ BFD_RELOC_IA64_DTPREL32MSB,
++ BFD_RELOC_IA64_DTPREL32LSB,
++ BFD_RELOC_IA64_DTPREL64MSB,
++ BFD_RELOC_IA64_DTPREL64LSB,
++ BFD_RELOC_IA64_LTOFF_DTPREL22,
++
++/* Motorola 68HC11 reloc.
++This is the 8 bit high part of an absolute address. */
++ BFD_RELOC_M68HC11_HI8,
++
++/* Motorola 68HC11 reloc.
++This is the 8 bit low part of an absolute address. */
++ BFD_RELOC_M68HC11_LO8,
++
++/* Motorola 68HC11 reloc.
++This is the 3 bit of a value. */
++ BFD_RELOC_M68HC11_3B,
++
++/* Motorola 68HC11 reloc.
++This reloc marks the beginning of a jump/call instruction.
++It is used for linker relaxation to correctly identify beginning
++of instruction and change some branches to use PC-relative
++addressing mode. */
++ BFD_RELOC_M68HC11_RL_JUMP,
++
++/* Motorola 68HC11 reloc.
++This reloc marks a group of several instructions that gcc generates
++and for which the linker relaxation pass can modify and/or remove
++some of them. */
++ BFD_RELOC_M68HC11_RL_GROUP,
++
++/* Motorola 68HC11 reloc.
++This is the 16-bit lower part of an address. It is used for 'call'
++instruction to specify the symbol address without any special
++transformation (due to memory bank window). */
++ BFD_RELOC_M68HC11_LO16,
++
++/* Motorola 68HC11 reloc.
++This is a 8-bit reloc that specifies the page number of an address.
++It is used by 'call' instruction to specify the page number of
++the symbol. */
++ BFD_RELOC_M68HC11_PAGE,
++
++/* Motorola 68HC11 reloc.
++This is a 24-bit reloc that represents the address with a 16-bit
++value and a 8-bit page number. The symbol address is transformed
++to follow the 16K memory bank of 68HC12 (seen as mapped in the window). */
++ BFD_RELOC_M68HC11_24,
++
++/* Motorola 68HC12 reloc.
++This is the 5 bits of a value. */
++ BFD_RELOC_M68HC12_5B,
++
++/* NS CR16C Relocations. */
++ BFD_RELOC_16C_NUM08,
++ BFD_RELOC_16C_NUM08_C,
++ BFD_RELOC_16C_NUM16,
++ BFD_RELOC_16C_NUM16_C,
++ BFD_RELOC_16C_NUM32,
++ BFD_RELOC_16C_NUM32_C,
++ BFD_RELOC_16C_DISP04,
++ BFD_RELOC_16C_DISP04_C,
++ BFD_RELOC_16C_DISP08,
++ BFD_RELOC_16C_DISP08_C,
++ BFD_RELOC_16C_DISP16,
++ BFD_RELOC_16C_DISP16_C,
++ BFD_RELOC_16C_DISP24,
++ BFD_RELOC_16C_DISP24_C,
++ BFD_RELOC_16C_DISP24a,
++ BFD_RELOC_16C_DISP24a_C,
++ BFD_RELOC_16C_REG04,
++ BFD_RELOC_16C_REG04_C,
++ BFD_RELOC_16C_REG04a,
++ BFD_RELOC_16C_REG04a_C,
++ BFD_RELOC_16C_REG14,
++ BFD_RELOC_16C_REG14_C,
++ BFD_RELOC_16C_REG16,
++ BFD_RELOC_16C_REG16_C,
++ BFD_RELOC_16C_REG20,
++ BFD_RELOC_16C_REG20_C,
++ BFD_RELOC_16C_ABS20,
++ BFD_RELOC_16C_ABS20_C,
++ BFD_RELOC_16C_ABS24,
++ BFD_RELOC_16C_ABS24_C,
++ BFD_RELOC_16C_IMM04,
++ BFD_RELOC_16C_IMM04_C,
++ BFD_RELOC_16C_IMM16,
++ BFD_RELOC_16C_IMM16_C,
++ BFD_RELOC_16C_IMM20,
++ BFD_RELOC_16C_IMM20_C,
++ BFD_RELOC_16C_IMM24,
++ BFD_RELOC_16C_IMM24_C,
++ BFD_RELOC_16C_IMM32,
++ BFD_RELOC_16C_IMM32_C,
++
++/* NS CRX Relocations. */
++ BFD_RELOC_CRX_REL4,
++ BFD_RELOC_CRX_REL8,
++ BFD_RELOC_CRX_REL8_CMP,
++ BFD_RELOC_CRX_REL16,
++ BFD_RELOC_CRX_REL24,
++ BFD_RELOC_CRX_REL32,
++ BFD_RELOC_CRX_REGREL12,
++ BFD_RELOC_CRX_REGREL22,
++ BFD_RELOC_CRX_REGREL28,
++ BFD_RELOC_CRX_REGREL32,
++ BFD_RELOC_CRX_ABS16,
++ BFD_RELOC_CRX_ABS32,
++ BFD_RELOC_CRX_NUM8,
++ BFD_RELOC_CRX_NUM16,
++ BFD_RELOC_CRX_NUM32,
++ BFD_RELOC_CRX_IMM16,
++ BFD_RELOC_CRX_IMM32,
++ BFD_RELOC_CRX_SWITCH8,
++ BFD_RELOC_CRX_SWITCH16,
++ BFD_RELOC_CRX_SWITCH32,
++
++/* These relocs are only used within the CRIS assembler. They are not
++(at present) written to any object files. */
++ BFD_RELOC_CRIS_BDISP8,
++ BFD_RELOC_CRIS_UNSIGNED_5,
++ BFD_RELOC_CRIS_SIGNED_6,
++ BFD_RELOC_CRIS_UNSIGNED_6,
++ BFD_RELOC_CRIS_SIGNED_8,
++ BFD_RELOC_CRIS_UNSIGNED_8,
++ BFD_RELOC_CRIS_SIGNED_16,
++ BFD_RELOC_CRIS_UNSIGNED_16,
++ BFD_RELOC_CRIS_LAPCQ_OFFSET,
++ BFD_RELOC_CRIS_UNSIGNED_4,
++
++/* Relocs used in ELF shared libraries for CRIS. */
++ BFD_RELOC_CRIS_COPY,
++ BFD_RELOC_CRIS_GLOB_DAT,
++ BFD_RELOC_CRIS_JUMP_SLOT,
++ BFD_RELOC_CRIS_RELATIVE,
++
++/* 32-bit offset to symbol-entry within GOT. */
++ BFD_RELOC_CRIS_32_GOT,
++
++/* 16-bit offset to symbol-entry within GOT. */
++ BFD_RELOC_CRIS_16_GOT,
++
++/* 32-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_CRIS_32_GOTPLT,
++
++/* 16-bit offset to symbol-entry within GOT, with PLT handling. */
++ BFD_RELOC_CRIS_16_GOTPLT,
++
++/* 32-bit offset to symbol, relative to GOT. */
++ BFD_RELOC_CRIS_32_GOTREL,
++
++/* 32-bit offset to symbol with PLT entry, relative to GOT. */
++ BFD_RELOC_CRIS_32_PLT_GOTREL,
++
++/* 32-bit offset to symbol with PLT entry, relative to this relocation. */
++ BFD_RELOC_CRIS_32_PLT_PCREL,
++
++/* Intel i860 Relocations. */
++ BFD_RELOC_860_COPY,
++ BFD_RELOC_860_GLOB_DAT,
++ BFD_RELOC_860_JUMP_SLOT,
++ BFD_RELOC_860_RELATIVE,
++ BFD_RELOC_860_PC26,
++ BFD_RELOC_860_PLT26,
++ BFD_RELOC_860_PC16,
++ BFD_RELOC_860_LOW0,
++ BFD_RELOC_860_SPLIT0,
++ BFD_RELOC_860_LOW1,
++ BFD_RELOC_860_SPLIT1,
++ BFD_RELOC_860_LOW2,
++ BFD_RELOC_860_SPLIT2,
++ BFD_RELOC_860_LOW3,
++ BFD_RELOC_860_LOGOT0,
++ BFD_RELOC_860_SPGOT0,
++ BFD_RELOC_860_LOGOT1,
++ BFD_RELOC_860_SPGOT1,
++ BFD_RELOC_860_LOGOTOFF0,
++ BFD_RELOC_860_SPGOTOFF0,
++ BFD_RELOC_860_LOGOTOFF1,
++ BFD_RELOC_860_SPGOTOFF1,
++ BFD_RELOC_860_LOGOTOFF2,
++ BFD_RELOC_860_LOGOTOFF3,
++ BFD_RELOC_860_LOPC,
++ BFD_RELOC_860_HIGHADJ,
++ BFD_RELOC_860_HAGOT,
++ BFD_RELOC_860_HAGOTOFF,
++ BFD_RELOC_860_HAPC,
++ BFD_RELOC_860_HIGH,
++ BFD_RELOC_860_HIGOT,
++ BFD_RELOC_860_HIGOTOFF,
++
++/* OpenRISC Relocations. */
++ BFD_RELOC_OPENRISC_ABS_26,
++ BFD_RELOC_OPENRISC_REL_26,
++
++/* H8 elf Relocations. */
++ BFD_RELOC_H8_DIR16A8,
++ BFD_RELOC_H8_DIR16R8,
++ BFD_RELOC_H8_DIR24A8,
++ BFD_RELOC_H8_DIR24R8,
++ BFD_RELOC_H8_DIR32A16,
++
++/* Sony Xstormy16 Relocations. */
++ BFD_RELOC_XSTORMY16_REL_12,
++ BFD_RELOC_XSTORMY16_12,
++ BFD_RELOC_XSTORMY16_24,
++ BFD_RELOC_XSTORMY16_FPTR16,
++
++/* Relocations used by VAX ELF. */
++ BFD_RELOC_VAX_GLOB_DAT,
++ BFD_RELOC_VAX_JMP_SLOT,
++ BFD_RELOC_VAX_RELATIVE,
++
++/* Morpho MS1 - 16 bit immediate relocation. */
++ BFD_RELOC_MS1_PC16,
++
++/* Morpho MS1 - Hi 16 bits of an address. */
++ BFD_RELOC_MS1_HI16,
++
++/* Morpho MS1 - Low 16 bits of an address. */
++ BFD_RELOC_MS1_LO16,
++
++/* Morpho MS1 - Used to tell the linker which vtable entries are used. */
++ BFD_RELOC_MS1_GNU_VTINHERIT,
++
++/* Morpho MS1 - Used to tell the linker which vtable entries are used. */
++ BFD_RELOC_MS1_GNU_VTENTRY,
++
++/* msp430 specific relocation codes */
++ BFD_RELOC_MSP430_10_PCREL,
++ BFD_RELOC_MSP430_16_PCREL,
++ BFD_RELOC_MSP430_16,
++ BFD_RELOC_MSP430_16_PCREL_BYTE,
++ BFD_RELOC_MSP430_16_BYTE,
++ BFD_RELOC_MSP430_2X_PCREL,
++ BFD_RELOC_MSP430_RL_PCREL,
++
++/* IQ2000 Relocations. */
++ BFD_RELOC_IQ2000_OFFSET_16,
++ BFD_RELOC_IQ2000_OFFSET_21,
++ BFD_RELOC_IQ2000_UHI16,
++
++/* Special Xtensa relocation used only by PLT entries in ELF shared
++objects to indicate that the runtime linker should set the value
++to one of its own internal functions or data structures. */
++ BFD_RELOC_XTENSA_RTLD,
++
++/* Xtensa relocations for ELF shared objects. */
++ BFD_RELOC_XTENSA_GLOB_DAT,
++ BFD_RELOC_XTENSA_JMP_SLOT,
++ BFD_RELOC_XTENSA_RELATIVE,
++
++/* Xtensa relocation used in ELF object files for symbols that may require
++PLT entries. Otherwise, this is just a generic 32-bit relocation. */
++ BFD_RELOC_XTENSA_PLT,
++
++/* Xtensa relocations to mark the difference of two local symbols.
++These are only needed to support linker relaxation and can be ignored
++when not relaxing. The field is set to the value of the difference
++assuming no relaxation. The relocation encodes the position of the
++first symbol so the linker can determine whether to adjust the field
++value. */
++ BFD_RELOC_XTENSA_DIFF8,
++ BFD_RELOC_XTENSA_DIFF16,
++ BFD_RELOC_XTENSA_DIFF32,
++
++/* Generic Xtensa relocations for instruction operands. Only the slot
++number is encoded in the relocation. The relocation applies to the
++last PC-relative immediate operand, or if there are no PC-relative
++immediates, to the last immediate operand. */
++ BFD_RELOC_XTENSA_SLOT0_OP,
++ BFD_RELOC_XTENSA_SLOT1_OP,
++ BFD_RELOC_XTENSA_SLOT2_OP,
++ BFD_RELOC_XTENSA_SLOT3_OP,
++ BFD_RELOC_XTENSA_SLOT4_OP,
++ BFD_RELOC_XTENSA_SLOT5_OP,
++ BFD_RELOC_XTENSA_SLOT6_OP,
++ BFD_RELOC_XTENSA_SLOT7_OP,
++ BFD_RELOC_XTENSA_SLOT8_OP,
++ BFD_RELOC_XTENSA_SLOT9_OP,
++ BFD_RELOC_XTENSA_SLOT10_OP,
++ BFD_RELOC_XTENSA_SLOT11_OP,
++ BFD_RELOC_XTENSA_SLOT12_OP,
++ BFD_RELOC_XTENSA_SLOT13_OP,
++ BFD_RELOC_XTENSA_SLOT14_OP,
++
++/* Alternate Xtensa relocations. Only the slot is encoded in the
++relocation. The meaning of these relocations is opcode-specific. */
++ BFD_RELOC_XTENSA_SLOT0_ALT,
++ BFD_RELOC_XTENSA_SLOT1_ALT,
++ BFD_RELOC_XTENSA_SLOT2_ALT,
++ BFD_RELOC_XTENSA_SLOT3_ALT,
++ BFD_RELOC_XTENSA_SLOT4_ALT,
++ BFD_RELOC_XTENSA_SLOT5_ALT,
++ BFD_RELOC_XTENSA_SLOT6_ALT,
++ BFD_RELOC_XTENSA_SLOT7_ALT,
++ BFD_RELOC_XTENSA_SLOT8_ALT,
++ BFD_RELOC_XTENSA_SLOT9_ALT,
++ BFD_RELOC_XTENSA_SLOT10_ALT,
++ BFD_RELOC_XTENSA_SLOT11_ALT,
++ BFD_RELOC_XTENSA_SLOT12_ALT,
++ BFD_RELOC_XTENSA_SLOT13_ALT,
++ BFD_RELOC_XTENSA_SLOT14_ALT,
++
++/* Xtensa relocations for backward compatibility. These have all been
++replaced by BFD_RELOC_XTENSA_SLOT0_OP. */
++ BFD_RELOC_XTENSA_OP0,
++ BFD_RELOC_XTENSA_OP1,
++ BFD_RELOC_XTENSA_OP2,
++
++/* Xtensa relocation to mark that the assembler expanded the
++instructions from an original target. The expansion size is
++encoded in the reloc size. */
++ BFD_RELOC_XTENSA_ASM_EXPAND,
++
++/* Xtensa relocation to mark that the linker should simplify
++assembler-expanded instructions. This is commonly used
++internally by the linker after analysis of a
++BFD_RELOC_XTENSA_ASM_EXPAND. */
++ BFD_RELOC_XTENSA_ASM_SIMPLIFY,
++ BFD_RELOC_UNUSED };
++typedef enum bfd_reloc_code_real bfd_reloc_code_real_type;
++reloc_howto_type *bfd_reloc_type_lookup
++ (bfd *abfd, bfd_reloc_code_real_type code);
++
++const char *bfd_get_reloc_code_name (bfd_reloc_code_real_type code);
++
++/* Extracted from syms.c. */
++
++typedef struct bfd_symbol
++{
++ /* A pointer to the BFD which owns the symbol. This information
++ is necessary so that a back end can work out what additional
++ information (invisible to the application writer) is carried
++ with the symbol.
++
++ This field is *almost* redundant, since you can use section->owner
++ instead, except that some symbols point to the global sections
++ bfd_{abs,com,und}_section. This could be fixed by making
++ these globals be per-bfd (or per-target-flavor). FIXME. */
++ struct bfd *the_bfd; /* Use bfd_asymbol_bfd(sym) to access this field. */
++
++ /* The text of the symbol. The name is left alone, and not copied; the
++ application may not alter it. */
++ const char *name;
++
++ /* The value of the symbol. This really should be a union of a
++ numeric value with a pointer, since some flags indicate that
++ a pointer to another symbol is stored here. */
++ symvalue value;
++
++ /* Attributes of a symbol. */
++#define BSF_NO_FLAGS 0x00
++
++ /* The symbol has local scope; <<static>> in <<C>>. The value
++ is the offset into the section of the data. */
++#define BSF_LOCAL 0x01
++
++ /* The symbol has global scope; initialized data in <<C>>. The
++ value is the offset into the section of the data. */
++#define BSF_GLOBAL 0x02
++
++ /* The symbol has global scope and is exported. The value is
++ the offset into the section of the data. */
++#define BSF_EXPORT BSF_GLOBAL /* No real difference. */
++
++ /* A normal C symbol would be one of:
++ <<BSF_LOCAL>>, <<BSF_FORT_COMM>>, <<BSF_UNDEFINED>> or
++ <<BSF_GLOBAL>>. */
++
++ /* The symbol is a debugging record. The value has an arbitrary
++ meaning, unless BSF_DEBUGGING_RELOC is also set. */
++#define BSF_DEBUGGING 0x08
++
++ /* The symbol denotes a function entry point. Used in ELF,
++ perhaps others someday. */
++#define BSF_FUNCTION 0x10
++
++ /* Used by the linker. */
++#define BSF_KEEP 0x20
++#define BSF_KEEP_G 0x40
++
++ /* A weak global symbol, overridable without warnings by
++ a regular global symbol of the same name. */
++#define BSF_WEAK 0x80
++
++ /* This symbol was created to point to a section, e.g. ELF's
++ STT_SECTION symbols. */
++#define BSF_SECTION_SYM 0x100
++
++ /* The symbol used to be a common symbol, but now it is
++ allocated. */
++#define BSF_OLD_COMMON 0x200
++
++ /* The default value for common data. */
++#define BFD_FORT_COMM_DEFAULT_VALUE 0
++
++ /* In some files the type of a symbol sometimes alters its
++ location in an output file - ie in coff a <<ISFCN>> symbol
++ which is also <<C_EXT>> symbol appears where it was
++ declared and not at the end of a section. This bit is set
++ by the target BFD part to convey this information. */
++#define BSF_NOT_AT_END 0x400
++
++ /* Signal that the symbol is the label of constructor section. */
++#define BSF_CONSTRUCTOR 0x800
++
++ /* Signal that the symbol is a warning symbol. The name is a
++ warning. The name of the next symbol is the one to warn about;
++ if a reference is made to a symbol with the same name as the next
++ symbol, a warning is issued by the linker. */
++#define BSF_WARNING 0x1000
++
++ /* Signal that the symbol is indirect. This symbol is an indirect
++ pointer to the symbol with the same name as the next symbol. */
++#define BSF_INDIRECT 0x2000
++
++ /* BSF_FILE marks symbols that contain a file name. This is used
++ for ELF STT_FILE symbols. */
++#define BSF_FILE 0x4000
++
++ /* Symbol is from dynamic linking information. */
++#define BSF_DYNAMIC 0x8000
++
++ /* The symbol denotes a data object. Used in ELF, and perhaps
++ others someday. */
++#define BSF_OBJECT 0x10000
++
++ /* This symbol is a debugging symbol. The value is the offset
++ into the section of the data. BSF_DEBUGGING should be set
++ as well. */
++#define BSF_DEBUGGING_RELOC 0x20000
++
++ /* This symbol is thread local. Used in ELF. */
++#define BSF_THREAD_LOCAL 0x40000
++
++ flagword flags;
++
++ /* A pointer to the section to which this symbol is
++ relative. This will always be non NULL, there are special
++ sections for undefined and absolute symbols. */
++ struct bfd_section *section;
++
++ /* Back end special data. */
++ union
++ {
++ void *p;
++ bfd_vma i;
++ }
++ udata;
++}
++asymbol;
++
++#define bfd_get_symtab_upper_bound(abfd) \
++ BFD_SEND (abfd, _bfd_get_symtab_upper_bound, (abfd))
++
++bfd_boolean bfd_is_local_label (bfd *abfd, asymbol *sym);
++
++bfd_boolean bfd_is_local_label_name (bfd *abfd, const char *name);
++
++#define bfd_is_local_label_name(abfd, name) \
++ BFD_SEND (abfd, _bfd_is_local_label_name, (abfd, name))
++
++bfd_boolean bfd_is_target_special_symbol (bfd *abfd, asymbol *sym);
++
++#define bfd_is_target_special_symbol(abfd, sym) \
++ BFD_SEND (abfd, _bfd_is_target_special_symbol, (abfd, sym))
++
++#define bfd_canonicalize_symtab(abfd, location) \
++ BFD_SEND (abfd, _bfd_canonicalize_symtab, (abfd, location))
++
++bfd_boolean bfd_set_symtab
++ (bfd *abfd, asymbol **location, unsigned int count);
++
++void bfd_print_symbol_vandf (bfd *abfd, void *file, asymbol *symbol);
++
++#define bfd_make_empty_symbol(abfd) \
++ BFD_SEND (abfd, _bfd_make_empty_symbol, (abfd))
++
++asymbol *_bfd_generic_make_empty_symbol (bfd *);
++
++#define bfd_make_debug_symbol(abfd,ptr,size) \
++ BFD_SEND (abfd, _bfd_make_debug_symbol, (abfd, ptr, size))
++
++int bfd_decode_symclass (asymbol *symbol);
++
++bfd_boolean bfd_is_undefined_symclass (int symclass);
++
++void bfd_symbol_info (asymbol *symbol, symbol_info *ret);
++
++bfd_boolean bfd_copy_private_symbol_data
++ (bfd *ibfd, asymbol *isym, bfd *obfd, asymbol *osym);
++
++#define bfd_copy_private_symbol_data(ibfd, isymbol, obfd, osymbol) \
++ BFD_SEND (obfd, _bfd_copy_private_symbol_data, \
++ (ibfd, isymbol, obfd, osymbol))
++
++/* Extracted from bfd.c. */
++struct bfd
++{
++ /* A unique identifier of the BFD */
++ unsigned int id;
++
++ /* The filename the application opened the BFD with. */
++ const char *filename;
++
++ /* A pointer to the target jump table. */
++ const struct bfd_target *xvec;
++
++ /* The IOSTREAM, and corresponding IO vector that provide access
++ to the file backing the BFD. */
++ void *iostream;
++ const struct bfd_iovec *iovec;
++
++ /* Is the file descriptor being cached? That is, can it be closed as
++ needed, and re-opened when accessed later? */
++ bfd_boolean cacheable;
++
++ /* Marks whether there was a default target specified when the
++ BFD was opened. This is used to select which matching algorithm
++ to use to choose the back end. */
++ bfd_boolean target_defaulted;
++
++ /* The caching routines use these to maintain a
++ least-recently-used list of BFDs. */
++ struct bfd *lru_prev, *lru_next;
++
++ /* When a file is closed by the caching routines, BFD retains
++ state information on the file here... */
++ ufile_ptr where;
++
++ /* ... and here: (``once'' means at least once). */
++ bfd_boolean opened_once;
++
++ /* Set if we have a locally maintained mtime value, rather than
++ getting it from the file each time. */
++ bfd_boolean mtime_set;
++
++ /* File modified time, if mtime_set is TRUE. */
++ long mtime;
++
++ /* Reserved for an unimplemented file locking extension. */
++ int ifd;
++
++ /* The format which belongs to the BFD. (object, core, etc.) */
++ bfd_format format;
++
++ /* The direction with which the BFD was opened. */
++ enum bfd_direction
++ {
++ no_direction = 0,
++ read_direction = 1,
++ write_direction = 2,
++ both_direction = 3
++ }
++ direction;
++
++ /* Format_specific flags. */
++ flagword flags;
++
++ /* Currently my_archive is tested before adding origin to
++ anything. I believe that this can become always an add of
++ origin, with origin set to 0 for non archive files. */
++ ufile_ptr origin;
++
++ /* Remember when output has begun, to stop strange things
++ from happening. */
++ bfd_boolean output_has_begun;
++
++ /* A hash table for section names. */
++ struct bfd_hash_table section_htab;
++
++ /* Pointer to linked list of sections. */
++ struct bfd_section *sections;
++
++ /* The last section on the section list. */
++ struct bfd_section *section_last;
++
++ /* The number of sections. */
++ unsigned int section_count;
++
++ /* Stuff only useful for object files:
++ The start address. */
++ bfd_vma start_address;
++
++ /* Used for input and output. */
++ unsigned int symcount;
++
++ /* Symbol table for output BFD (with symcount entries). */
++ struct bfd_symbol **outsymbols;
++
++ /* Used for slurped dynamic symbol tables. */
++ unsigned int dynsymcount;
++
++ /* Pointer to structure which contains architecture information. */
++ const struct bfd_arch_info *arch_info;
++
++ /* Flag set if symbols from this BFD should not be exported. */
++ bfd_boolean no_export;
++
++ /* Stuff only useful for archives. */
++ void *arelt_data;
++ struct bfd *my_archive; /* The containing archive BFD. */
++ struct bfd *next; /* The next BFD in the archive. */
++ struct bfd *archive_head; /* The first BFD in the archive. */
++ bfd_boolean has_armap;
++
++ /* A chain of BFD structures involved in a link. */
++ struct bfd *link_next;
++
++ /* A field used by _bfd_generic_link_add_archive_symbols. This will
++ be used only for archive elements. */
++ int archive_pass;
++
++ /* Used by the back end to hold private data. */
++ union
++ {
++ struct aout_data_struct *aout_data;
++ struct artdata *aout_ar_data;
++ struct _oasys_data *oasys_obj_data;
++ struct _oasys_ar_data *oasys_ar_data;
++ struct coff_tdata *coff_obj_data;
++ struct pe_tdata *pe_obj_data;
++ struct xcoff_tdata *xcoff_obj_data;
++ struct ecoff_tdata *ecoff_obj_data;
++ struct ieee_data_struct *ieee_data;
++ struct ieee_ar_data_struct *ieee_ar_data;
++ struct srec_data_struct *srec_data;
++ struct ihex_data_struct *ihex_data;
++ struct tekhex_data_struct *tekhex_data;
++ struct elf_obj_tdata *elf_obj_data;
++ struct nlm_obj_tdata *nlm_obj_data;
++ struct bout_data_struct *bout_data;
++ struct mmo_data_struct *mmo_data;
++ struct sun_core_struct *sun_core_data;
++ struct sco5_core_struct *sco5_core_data;
++ struct trad_core_struct *trad_core_data;
++ struct som_data_struct *som_data;
++ struct hpux_core_struct *hpux_core_data;
++ struct hppabsd_core_struct *hppabsd_core_data;
++ struct sgi_core_struct *sgi_core_data;
++ struct lynx_core_struct *lynx_core_data;
++ struct osf_core_struct *osf_core_data;
++ struct cisco_core_struct *cisco_core_data;
++ struct versados_data_struct *versados_data;
++ struct netbsd_core_struct *netbsd_core_data;
++ struct mach_o_data_struct *mach_o_data;
++ struct mach_o_fat_data_struct *mach_o_fat_data;
++ struct bfd_pef_data_struct *pef_data;
++ struct bfd_pef_xlib_data_struct *pef_xlib_data;
++ struct bfd_sym_data_struct *sym_data;
++ void *any;
++ }
++ tdata;
++
++ /* Used by the application to hold private data. */
++ void *usrdata;
++
++ /* Where all the allocated stuff under this BFD goes. This is a
++ struct objalloc *, but we use void * to avoid requiring the inclusion
++ of objalloc.h. */
++ void *memory;
++};
++
++typedef enum bfd_error
++{
++ bfd_error_no_error = 0,
++ bfd_error_system_call,
++ bfd_error_invalid_target,
++ bfd_error_wrong_format,
++ bfd_error_wrong_object_format,
++ bfd_error_invalid_operation,
++ bfd_error_no_memory,
++ bfd_error_no_symbols,
++ bfd_error_no_armap,
++ bfd_error_no_more_archived_files,
++ bfd_error_malformed_archive,
++ bfd_error_file_not_recognized,
++ bfd_error_file_ambiguously_recognized,
++ bfd_error_no_contents,
++ bfd_error_nonrepresentable_section,
++ bfd_error_no_debug_section,
++ bfd_error_bad_value,
++ bfd_error_file_truncated,
++ bfd_error_file_too_big,
++ bfd_error_invalid_error_code
++}
++bfd_error_type;
++
++bfd_error_type bfd_get_error (void);
++
++void bfd_set_error (bfd_error_type error_tag);
++
++const char *bfd_errmsg (bfd_error_type error_tag);
++
++void bfd_perror (const char *message);
++
++typedef void (*bfd_error_handler_type) (const char *, ...);
++
++bfd_error_handler_type bfd_set_error_handler (bfd_error_handler_type);
++
++void bfd_set_error_program_name (const char *);
++
++bfd_error_handler_type bfd_get_error_handler (void);
++
++long bfd_get_reloc_upper_bound (bfd *abfd, asection *sect);
++
++long bfd_canonicalize_reloc
++ (bfd *abfd, asection *sec, arelent **loc, asymbol **syms);
++
++void bfd_set_reloc
++ (bfd *abfd, asection *sec, arelent **rel, unsigned int count);
++
++bfd_boolean bfd_set_file_flags (bfd *abfd, flagword flags);
++
++int bfd_get_arch_size (bfd *abfd);
++
++int bfd_get_sign_extend_vma (bfd *abfd);
++
++bfd_boolean bfd_set_start_address (bfd *abfd, bfd_vma vma);
++
++unsigned int bfd_get_gp_size (bfd *abfd);
++
++void bfd_set_gp_size (bfd *abfd, unsigned int i);
++
++bfd_vma bfd_scan_vma (const char *string, const char **end, int base);
++
++bfd_boolean bfd_copy_private_header_data (bfd *ibfd, bfd *obfd);
++
++#define bfd_copy_private_header_data(ibfd, obfd) \
++ BFD_SEND (obfd, _bfd_copy_private_header_data, \
++ (ibfd, obfd))
++bfd_boolean bfd_copy_private_bfd_data (bfd *ibfd, bfd *obfd);
++
++#define bfd_copy_private_bfd_data(ibfd, obfd) \
++ BFD_SEND (obfd, _bfd_copy_private_bfd_data, \
++ (ibfd, obfd))
++bfd_boolean bfd_merge_private_bfd_data (bfd *ibfd, bfd *obfd);
++
++#define bfd_merge_private_bfd_data(ibfd, obfd) \
++ BFD_SEND (obfd, _bfd_merge_private_bfd_data, \
++ (ibfd, obfd))
++bfd_boolean bfd_set_private_flags (bfd *abfd, flagword flags);
++
++#define bfd_set_private_flags(abfd, flags) \
++ BFD_SEND (abfd, _bfd_set_private_flags, (abfd, flags))
++#define bfd_sizeof_headers(abfd, reloc) \
++ BFD_SEND (abfd, _bfd_sizeof_headers, (abfd, reloc))
++
++#define bfd_find_nearest_line(abfd, sec, syms, off, file, func, line) \
++ BFD_SEND (abfd, _bfd_find_nearest_line, \
++ (abfd, sec, syms, off, file, func, line))
++
++#define bfd_find_line(abfd, syms, sym, file, line) \
++ BFD_SEND (abfd, _bfd_find_line, \
++ (abfd, syms, sym, file, line))
++
++#define bfd_find_inliner_info(abfd, file, func, line) \
++ BFD_SEND (abfd, _bfd_find_inliner_info, \
++ (abfd, file, func, line))
++
++#define bfd_debug_info_start(abfd) \
++ BFD_SEND (abfd, _bfd_debug_info_start, (abfd))
++
++#define bfd_debug_info_end(abfd) \
++ BFD_SEND (abfd, _bfd_debug_info_end, (abfd))
++
++#define bfd_debug_info_accumulate(abfd, section) \
++ BFD_SEND (abfd, _bfd_debug_info_accumulate, (abfd, section))
++
++#define bfd_stat_arch_elt(abfd, stat) \
++ BFD_SEND (abfd, _bfd_stat_arch_elt,(abfd, stat))
++
++#define bfd_update_armap_timestamp(abfd) \
++ BFD_SEND (abfd, _bfd_update_armap_timestamp, (abfd))
++
++#define bfd_set_arch_mach(abfd, arch, mach)\
++ BFD_SEND ( abfd, _bfd_set_arch_mach, (abfd, arch, mach))
++
++#define bfd_relax_section(abfd, section, link_info, again) \
++ BFD_SEND (abfd, _bfd_relax_section, (abfd, section, link_info, again))
++
++#define bfd_gc_sections(abfd, link_info) \
++ BFD_SEND (abfd, _bfd_gc_sections, (abfd, link_info))
++
++#define bfd_merge_sections(abfd, link_info) \
++ BFD_SEND (abfd, _bfd_merge_sections, (abfd, link_info))
++
++#define bfd_is_group_section(abfd, sec) \
++ BFD_SEND (abfd, _bfd_is_group_section, (abfd, sec))
++
++#define bfd_discard_group(abfd, sec) \
++ BFD_SEND (abfd, _bfd_discard_group, (abfd, sec))
++
++#define bfd_link_hash_table_create(abfd) \
++ BFD_SEND (abfd, _bfd_link_hash_table_create, (abfd))
++
++#define bfd_link_hash_table_free(abfd, hash) \
++ BFD_SEND (abfd, _bfd_link_hash_table_free, (hash))
++
++#define bfd_link_add_symbols(abfd, info) \
++ BFD_SEND (abfd, _bfd_link_add_symbols, (abfd, info))
++
++#define bfd_link_just_syms(abfd, sec, info) \
++ BFD_SEND (abfd, _bfd_link_just_syms, (sec, info))
++
++#define bfd_final_link(abfd, info) \
++ BFD_SEND (abfd, _bfd_final_link, (abfd, info))
++
++#define bfd_free_cached_info(abfd) \
++ BFD_SEND (abfd, _bfd_free_cached_info, (abfd))
++
++#define bfd_get_dynamic_symtab_upper_bound(abfd) \
++ BFD_SEND (abfd, _bfd_get_dynamic_symtab_upper_bound, (abfd))
++
++#define bfd_print_private_bfd_data(abfd, file)\
++ BFD_SEND (abfd, _bfd_print_private_bfd_data, (abfd, file))
++
++#define bfd_canonicalize_dynamic_symtab(abfd, asymbols) \
++ BFD_SEND (abfd, _bfd_canonicalize_dynamic_symtab, (abfd, asymbols))
++
++#define bfd_get_synthetic_symtab(abfd, count, syms, dyncount, dynsyms, ret) \
++ BFD_SEND (abfd, _bfd_get_synthetic_symtab, (abfd, count, syms, \
++ dyncount, dynsyms, ret))
++
++#define bfd_get_dynamic_reloc_upper_bound(abfd) \
++ BFD_SEND (abfd, _bfd_get_dynamic_reloc_upper_bound, (abfd))
++
++#define bfd_canonicalize_dynamic_reloc(abfd, arels, asyms) \
++ BFD_SEND (abfd, _bfd_canonicalize_dynamic_reloc, (abfd, arels, asyms))
++
++extern bfd_byte *bfd_get_relocated_section_contents
++ (bfd *, struct bfd_link_info *, struct bfd_link_order *, bfd_byte *,
++ bfd_boolean, asymbol **);
++
++bfd_boolean bfd_alt_mach_code (bfd *abfd, int alternative);
++
++struct bfd_preserve
++{
++ void *marker;
++ void *tdata;
++ flagword flags;
++ const struct bfd_arch_info *arch_info;
++ struct bfd_section *sections;
++ struct bfd_section *section_last;
++ unsigned int section_count;
++ struct bfd_hash_table section_htab;
++};
++
++bfd_boolean bfd_preserve_save (bfd *, struct bfd_preserve *);
++
++void bfd_preserve_restore (bfd *, struct bfd_preserve *);
++
++void bfd_preserve_finish (bfd *, struct bfd_preserve *);
++
++/* Extracted from archive.c. */
++symindex bfd_get_next_mapent
++ (bfd *abfd, symindex previous, carsym **sym);
++
++bfd_boolean bfd_set_archive_head (bfd *output, bfd *new_head);
++
++bfd *bfd_openr_next_archived_file (bfd *archive, bfd *previous);
++
++/* Extracted from corefile.c. */
++const char *bfd_core_file_failing_command (bfd *abfd);
++
++int bfd_core_file_failing_signal (bfd *abfd);
++
++bfd_boolean core_file_matches_executable_p
++ (bfd *core_bfd, bfd *exec_bfd);
++
++/* Extracted from targets.c. */
++#define BFD_SEND(bfd, message, arglist) \
++ ((*((bfd)->xvec->message)) arglist)
++
++#ifdef DEBUG_BFD_SEND
++#undef BFD_SEND
++#define BFD_SEND(bfd, message, arglist) \
++ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
++ ((*((bfd)->xvec->message)) arglist) : \
++ (bfd_assert (__FILE__,__LINE__), NULL))
++#endif
++#define BFD_SEND_FMT(bfd, message, arglist) \
++ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist)
++
++#ifdef DEBUG_BFD_SEND
++#undef BFD_SEND_FMT
++#define BFD_SEND_FMT(bfd, message, arglist) \
++ (((bfd) && (bfd)->xvec && (bfd)->xvec->message) ? \
++ (((bfd)->xvec->message[(int) ((bfd)->format)]) arglist) : \
++ (bfd_assert (__FILE__,__LINE__), NULL))
++#endif
++
++enum bfd_flavour
++{
++ bfd_target_unknown_flavour,
++ bfd_target_aout_flavour,
++ bfd_target_coff_flavour,
++ bfd_target_ecoff_flavour,
++ bfd_target_xcoff_flavour,
++ bfd_target_elf_flavour,
++ bfd_target_ieee_flavour,
++ bfd_target_nlm_flavour,
++ bfd_target_oasys_flavour,
++ bfd_target_tekhex_flavour,
++ bfd_target_srec_flavour,
++ bfd_target_ihex_flavour,
++ bfd_target_som_flavour,
++ bfd_target_os9k_flavour,
++ bfd_target_versados_flavour,
++ bfd_target_msdos_flavour,
++ bfd_target_ovax_flavour,
++ bfd_target_evax_flavour,
++ bfd_target_mmo_flavour,
++ bfd_target_mach_o_flavour,
++ bfd_target_pef_flavour,
++ bfd_target_pef_xlib_flavour,
++ bfd_target_sym_flavour
++};
++
++enum bfd_endian { BFD_ENDIAN_BIG, BFD_ENDIAN_LITTLE, BFD_ENDIAN_UNKNOWN };
++
++/* Forward declaration. */
++typedef struct bfd_link_info _bfd_link_info;
++
++typedef struct bfd_target
++{
++ /* Identifies the kind of target, e.g., SunOS4, Ultrix, etc. */
++ char *name;
++
++ /* The "flavour" of a back end is a general indication about
++ the contents of a file. */
++ enum bfd_flavour flavour;
++
++ /* The order of bytes within the data area of a file. */
++ enum bfd_endian byteorder;
++
++ /* The order of bytes within the header parts of a file. */
++ enum bfd_endian header_byteorder;
++
++ /* A mask of all the flags which an executable may have set -
++ from the set <<BFD_NO_FLAGS>>, <<HAS_RELOC>>, ...<<D_PAGED>>. */
++ flagword object_flags;
++
++ /* A mask of all the flags which a section may have set - from
++ the set <<SEC_NO_FLAGS>>, <<SEC_ALLOC>>, ...<<SET_NEVER_LOAD>>. */
++ flagword section_flags;
++
++ /* The character normally found at the front of a symbol.
++ (if any), perhaps `_'. */
++ char symbol_leading_char;
++
++ /* The pad character for file names within an archive header. */
++ char ar_pad_char;
++
++ /* The maximum number of characters in an archive header. */
++ unsigned short ar_max_namelen;
++
++ /* Entries for byte swapping for data. These are different from the
++ other entry points, since they don't take a BFD as the first argument.
++ Certain other handlers could do the same. */
++ bfd_uint64_t (*bfd_getx64) (const void *);
++ bfd_int64_t (*bfd_getx_signed_64) (const void *);
++ void (*bfd_putx64) (bfd_uint64_t, void *);
++ bfd_vma (*bfd_getx32) (const void *);
++ bfd_signed_vma (*bfd_getx_signed_32) (const void *);
++ void (*bfd_putx32) (bfd_vma, void *);
++ bfd_vma (*bfd_getx16) (const void *);
++ bfd_signed_vma (*bfd_getx_signed_16) (const void *);
++ void (*bfd_putx16) (bfd_vma, void *);
++
++ /* Byte swapping for the headers. */
++ bfd_uint64_t (*bfd_h_getx64) (const void *);
++ bfd_int64_t (*bfd_h_getx_signed_64) (const void *);
++ void (*bfd_h_putx64) (bfd_uint64_t, void *);
++ bfd_vma (*bfd_h_getx32) (const void *);
++ bfd_signed_vma (*bfd_h_getx_signed_32) (const void *);
++ void (*bfd_h_putx32) (bfd_vma, void *);
++ bfd_vma (*bfd_h_getx16) (const void *);
++ bfd_signed_vma (*bfd_h_getx_signed_16) (const void *);
++ void (*bfd_h_putx16) (bfd_vma, void *);
++
++ /* Format dependent routines: these are vectors of entry points
++ within the target vector structure, one for each format to check. */
++
++ /* Check the format of a file being read. Return a <<bfd_target *>> or zero. */
++ const struct bfd_target *(*_bfd_check_format[bfd_type_end]) (bfd *);
++
++ /* Set the format of a file being written. */
++ bfd_boolean (*_bfd_set_format[bfd_type_end]) (bfd *);
++
++ /* Write cached information into a file being written, at <<bfd_close>>. */
++ bfd_boolean (*_bfd_write_contents[bfd_type_end]) (bfd *);
++
++
++ /* Generic entry points. */
++#define BFD_JUMP_TABLE_GENERIC(NAME) \
++ NAME##_close_and_cleanup, \
++ NAME##_bfd_free_cached_info, \
++ NAME##_new_section_hook, \
++ NAME##_get_section_contents, \
++ NAME##_get_section_contents_in_window
++
++ /* Called when the BFD is being closed to do any necessary cleanup. */
++ bfd_boolean (*_close_and_cleanup) (bfd *);
++ /* Ask the BFD to free all cached information. */
++ bfd_boolean (*_bfd_free_cached_info) (bfd *);
++ /* Called when a new section is created. */
++ bfd_boolean (*_new_section_hook) (bfd *, sec_ptr);
++ /* Read the contents of a section. */
++ bfd_boolean (*_bfd_get_section_contents)
++ (bfd *, sec_ptr, void *, file_ptr, bfd_size_type);
++ bfd_boolean (*_bfd_get_section_contents_in_window)
++ (bfd *, sec_ptr, bfd_window *, file_ptr, bfd_size_type);
++
++ /* Entry points to copy private data. */
++#define BFD_JUMP_TABLE_COPY(NAME) \
++ NAME##_bfd_copy_private_bfd_data, \
++ NAME##_bfd_merge_private_bfd_data, \
++ NAME##_bfd_copy_private_section_data, \
++ NAME##_bfd_copy_private_symbol_data, \
++ NAME##_bfd_copy_private_header_data, \
++ NAME##_bfd_set_private_flags, \
++ NAME##_bfd_print_private_bfd_data
++
++ /* Called to copy BFD general private data from one object file
++ to another. */
++ bfd_boolean (*_bfd_copy_private_bfd_data) (bfd *, bfd *);
++ /* Called to merge BFD general private data from one object file
++ to a common output file when linking. */
++ bfd_boolean (*_bfd_merge_private_bfd_data) (bfd *, bfd *);
++ /* Called to copy BFD private section data from one object file
++ to another. */
++ bfd_boolean (*_bfd_copy_private_section_data)
++ (bfd *, sec_ptr, bfd *, sec_ptr);
++ /* Called to copy BFD private symbol data from one symbol
++ to another. */
++ bfd_boolean (*_bfd_copy_private_symbol_data)
++ (bfd *, asymbol *, bfd *, asymbol *);
++ /* Called to copy BFD private header data from one object file
++ to another. */
++ bfd_boolean (*_bfd_copy_private_header_data)
++ (bfd *, bfd *);
++ /* Called to set private backend flags. */
++ bfd_boolean (*_bfd_set_private_flags) (bfd *, flagword);
++
++ /* Called to print private BFD data. */
++ bfd_boolean (*_bfd_print_private_bfd_data) (bfd *, void *);
++
++ /* Core file entry points. */
++#define BFD_JUMP_TABLE_CORE(NAME) \
++ NAME##_core_file_failing_command, \
++ NAME##_core_file_failing_signal, \
++ NAME##_core_file_matches_executable_p
++
++ char * (*_core_file_failing_command) (bfd *);
++ int (*_core_file_failing_signal) (bfd *);
++ bfd_boolean (*_core_file_matches_executable_p) (bfd *, bfd *);
++
++ /* Archive entry points. */
++#define BFD_JUMP_TABLE_ARCHIVE(NAME) \
++ NAME##_slurp_armap, \
++ NAME##_slurp_extended_name_table, \
++ NAME##_construct_extended_name_table, \
++ NAME##_truncate_arname, \
++ NAME##_write_armap, \
++ NAME##_read_ar_hdr, \
++ NAME##_openr_next_archived_file, \
++ NAME##_get_elt_at_index, \
++ NAME##_generic_stat_arch_elt, \
++ NAME##_update_armap_timestamp
++
++ bfd_boolean (*_bfd_slurp_armap) (bfd *);
++ bfd_boolean (*_bfd_slurp_extended_name_table) (bfd *);
++ bfd_boolean (*_bfd_construct_extended_name_table)
++ (bfd *, char **, bfd_size_type *, const char **);
++ void (*_bfd_truncate_arname) (bfd *, const char *, char *);
++ bfd_boolean (*write_armap)
++ (bfd *, unsigned int, struct orl *, unsigned int, int);
++ void * (*_bfd_read_ar_hdr_fn) (bfd *);
++ bfd * (*openr_next_archived_file) (bfd *, bfd *);
++#define bfd_get_elt_at_index(b,i) BFD_SEND (b, _bfd_get_elt_at_index, (b,i))
++ bfd * (*_bfd_get_elt_at_index) (bfd *, symindex);
++ int (*_bfd_stat_arch_elt) (bfd *, struct stat *);
++ bfd_boolean (*_bfd_update_armap_timestamp) (bfd *);
++
++ /* Entry points used for symbols. */
++#define BFD_JUMP_TABLE_SYMBOLS(NAME) \
++ NAME##_get_symtab_upper_bound, \
++ NAME##_canonicalize_symtab, \
++ NAME##_make_empty_symbol, \
++ NAME##_print_symbol, \
++ NAME##_get_symbol_info, \
++ NAME##_bfd_is_local_label_name, \
++ NAME##_bfd_is_target_special_symbol, \
++ NAME##_get_lineno, \
++ NAME##_find_nearest_line, \
++ _bfd_generic_find_line, \
++ NAME##_find_inliner_info, \
++ NAME##_bfd_make_debug_symbol, \
++ NAME##_read_minisymbols, \
++ NAME##_minisymbol_to_symbol
++
++ long (*_bfd_get_symtab_upper_bound) (bfd *);
++ long (*_bfd_canonicalize_symtab)
++ (bfd *, struct bfd_symbol **);
++ struct bfd_symbol *
++ (*_bfd_make_empty_symbol) (bfd *);
++ void (*_bfd_print_symbol)
++ (bfd *, void *, struct bfd_symbol *, bfd_print_symbol_type);
++#define bfd_print_symbol(b,p,s,e) BFD_SEND (b, _bfd_print_symbol, (b,p,s,e))
++ void (*_bfd_get_symbol_info)
++ (bfd *, struct bfd_symbol *, symbol_info *);
++#define bfd_get_symbol_info(b,p,e) BFD_SEND (b, _bfd_get_symbol_info, (b,p,e))
++ bfd_boolean (*_bfd_is_local_label_name) (bfd *, const char *);
++ bfd_boolean (*_bfd_is_target_special_symbol) (bfd *, asymbol *);
++ alent * (*_get_lineno) (bfd *, struct bfd_symbol *);
++ bfd_boolean (*_bfd_find_nearest_line)
++ (bfd *, struct bfd_section *, struct bfd_symbol **, bfd_vma,
++ const char **, const char **, unsigned int *);
++ bfd_boolean (*_bfd_find_line)
++ (bfd *, struct bfd_symbol **, struct bfd_symbol *,
++ const char **, unsigned int *);
++ bfd_boolean (*_bfd_find_inliner_info)
++ (bfd *, const char **, const char **, unsigned int *);
++ /* Back-door to allow format-aware applications to create debug symbols
++ while using BFD for everything else. Currently used by the assembler
++ when creating COFF files. */
++ asymbol * (*_bfd_make_debug_symbol)
++ (bfd *, void *, unsigned long size);
++#define bfd_read_minisymbols(b, d, m, s) \
++ BFD_SEND (b, _read_minisymbols, (b, d, m, s))
++ long (*_read_minisymbols)
++ (bfd *, bfd_boolean, void **, unsigned int *);
++#define bfd_minisymbol_to_symbol(b, d, m, f) \
++ BFD_SEND (b, _minisymbol_to_symbol, (b, d, m, f))
++ asymbol * (*_minisymbol_to_symbol)
++ (bfd *, bfd_boolean, const void *, asymbol *);
++
++ /* Routines for relocs. */
++#define BFD_JUMP_TABLE_RELOCS(NAME) \
++ NAME##_get_reloc_upper_bound, \
++ NAME##_canonicalize_reloc, \
++ NAME##_bfd_reloc_type_lookup
++
++ long (*_get_reloc_upper_bound) (bfd *, sec_ptr);
++ long (*_bfd_canonicalize_reloc)
++ (bfd *, sec_ptr, arelent **, struct bfd_symbol **);
++ /* See documentation on reloc types. */
++ reloc_howto_type *
++ (*reloc_type_lookup) (bfd *, bfd_reloc_code_real_type);
++
++ /* Routines used when writing an object file. */
++#define BFD_JUMP_TABLE_WRITE(NAME) \
++ NAME##_set_arch_mach, \
++ NAME##_set_section_contents
++
++ bfd_boolean (*_bfd_set_arch_mach)
++ (bfd *, enum bfd_architecture, unsigned long);
++ bfd_boolean (*_bfd_set_section_contents)
++ (bfd *, sec_ptr, const void *, file_ptr, bfd_size_type);
++
++ /* Routines used by the linker. */
++#define BFD_JUMP_TABLE_LINK(NAME) \
++ NAME##_sizeof_headers, \
++ NAME##_bfd_get_relocated_section_contents, \
++ NAME##_bfd_relax_section, \
++ NAME##_bfd_link_hash_table_create, \
++ NAME##_bfd_link_hash_table_free, \
++ NAME##_bfd_link_add_symbols, \
++ NAME##_bfd_link_just_syms, \
++ NAME##_bfd_final_link, \
++ NAME##_bfd_link_split_section, \
++ NAME##_bfd_gc_sections, \
++ NAME##_bfd_merge_sections, \
++ NAME##_bfd_is_group_section, \
++ NAME##_bfd_discard_group, \
++ NAME##_section_already_linked \
++
++ int (*_bfd_sizeof_headers) (bfd *, bfd_boolean);
++ bfd_byte * (*_bfd_get_relocated_section_contents)
++ (bfd *, struct bfd_link_info *, struct bfd_link_order *,
++ bfd_byte *, bfd_boolean, struct bfd_symbol **);
++
++ bfd_boolean (*_bfd_relax_section)
++ (bfd *, struct bfd_section *, struct bfd_link_info *, bfd_boolean *);
++
++ /* Create a hash table for the linker. Different backends store
++ different information in this table. */
++ struct bfd_link_hash_table *
++ (*_bfd_link_hash_table_create) (bfd *);
++
++ /* Release the memory associated with the linker hash table. */
++ void (*_bfd_link_hash_table_free) (struct bfd_link_hash_table *);
++
++ /* Add symbols from this object file into the hash table. */
++ bfd_boolean (*_bfd_link_add_symbols) (bfd *, struct bfd_link_info *);
++
++ /* Indicate that we are only retrieving symbol values from this section. */
++ void (*_bfd_link_just_syms) (asection *, struct bfd_link_info *);
++
++ /* Do a link based on the link_order structures attached to each
++ section of the BFD. */
++ bfd_boolean (*_bfd_final_link) (bfd *, struct bfd_link_info *);
++
++ /* Should this section be split up into smaller pieces during linking. */
++ bfd_boolean (*_bfd_link_split_section) (bfd *, struct bfd_section *);
++
++ /* Remove sections that are not referenced from the output. */
++ bfd_boolean (*_bfd_gc_sections) (bfd *, struct bfd_link_info *);
++
++ /* Attempt to merge SEC_MERGE sections. */
++ bfd_boolean (*_bfd_merge_sections) (bfd *, struct bfd_link_info *);
++
++ /* Is this section a member of a group? */
++ bfd_boolean (*_bfd_is_group_section) (bfd *, const struct bfd_section *);
++
++ /* Discard members of a group. */
++ bfd_boolean (*_bfd_discard_group) (bfd *, struct bfd_section *);
++
++ /* Check if SEC has been already linked during a reloceatable or
++ final link. */
++ void (*_section_already_linked) (bfd *, struct bfd_section *);
++
++ /* Routines to handle dynamic symbols and relocs. */
++#define BFD_JUMP_TABLE_DYNAMIC(NAME) \
++ NAME##_get_dynamic_symtab_upper_bound, \
++ NAME##_canonicalize_dynamic_symtab, \
++ NAME##_get_synthetic_symtab, \
++ NAME##_get_dynamic_reloc_upper_bound, \
++ NAME##_canonicalize_dynamic_reloc
++
++ /* Get the amount of memory required to hold the dynamic symbols. */
++ long (*_bfd_get_dynamic_symtab_upper_bound) (bfd *);
++ /* Read in the dynamic symbols. */
++ long (*_bfd_canonicalize_dynamic_symtab)
++ (bfd *, struct bfd_symbol **);
++ /* Create synthetized symbols. */
++ long (*_bfd_get_synthetic_symtab)
++ (bfd *, long, struct bfd_symbol **, long, struct bfd_symbol **,
++ struct bfd_symbol **);
++ /* Get the amount of memory required to hold the dynamic relocs. */
++ long (*_bfd_get_dynamic_reloc_upper_bound) (bfd *);
++ /* Read in the dynamic relocs. */
++ long (*_bfd_canonicalize_dynamic_reloc)
++ (bfd *, arelent **, struct bfd_symbol **);
++
++ /* Opposite endian version of this target. */
++ const struct bfd_target * alternative_target;
++
++ /* Data for use by back-end routines, which isn't
++ generic enough to belong in this structure. */
++ const void *backend_data;
++
++} bfd_target;
++
++bfd_boolean bfd_set_default_target (const char *name);
++
++const bfd_target *bfd_find_target (const char *target_name, bfd *abfd);
++
++const char ** bfd_target_list (void);
++
++const bfd_target *bfd_search_for_target
++ (int (*search_func) (const bfd_target *, void *),
++ void *);
++
++/* Extracted from format.c. */
++bfd_boolean bfd_check_format (bfd *abfd, bfd_format format);
++
++bfd_boolean bfd_check_format_matches
++ (bfd *abfd, bfd_format format, char ***matching);
++
++bfd_boolean bfd_set_format (bfd *abfd, bfd_format format);
++
++const char *bfd_format_string (bfd_format format);
++
++/* Extracted from linker.c. */
++bfd_boolean bfd_link_split_section (bfd *abfd, asection *sec);
++
++#define bfd_link_split_section(abfd, sec) \
++ BFD_SEND (abfd, _bfd_link_split_section, (abfd, sec))
++
++void bfd_section_already_linked (bfd *abfd, asection *sec);
++
++#define bfd_section_already_linked(abfd, sec) \
++ BFD_SEND (abfd, _section_already_linked, (abfd, sec))
++
++/* Extracted from simple.c. */
++bfd_byte *bfd_simple_get_relocated_section_contents
++ (bfd *abfd, asection *sec, bfd_byte *outbuf, asymbol **symbol_table);
++
++#ifdef __cplusplus
++}
++#endif
++#endif
+diff -Nurp linux-2.6.22-590/include/asm-x86_64/hw_irq.h linux-2.6.22-600/include/asm-x86_64/hw_irq.h
+--- linux-2.6.22-590/include/asm-x86_64/hw_irq.h 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/include/asm-x86_64/hw_irq.h 2008-04-09 18:16:24.000000000 +0200
+@@ -30,7 +30,7 @@
+ #define FIRST_EXTERNAL_VECTOR 0x20
+
+ #define IA32_SYSCALL_VECTOR 0x80
+-
++#define KDBENTER_VECTOR 0x81
+
+ /* Reserve the lowest usable priority level 0x20 - 0x2f for triggering
+ * cleanup after irq migration.
+@@ -68,8 +68,7 @@
+ #define ERROR_APIC_VECTOR 0xfe
+ #define RESCHEDULE_VECTOR 0xfd
+ #define CALL_FUNCTION_VECTOR 0xfc
+-/* fb free - please don't readd KDB here because it's useless
+- (hint - think what a NMI bit does to a vector) */
++#define KDB_VECTOR 0xfb
+ #define THERMAL_APIC_VECTOR 0xfa
+ #define THRESHOLD_APIC_VECTOR 0xf9
+ /* f8 free */
+diff -Nurp linux-2.6.22-590/include/asm-x86_64/kdb.h linux-2.6.22-600/include/asm-x86_64/kdb.h
+--- linux-2.6.22-590/include/asm-x86_64/kdb.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/asm-x86_64/kdb.h 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,120 @@
++#ifndef _ASM_KDB_H
++#define _ASM_KDB_H
++
++/*
++ * Kernel Debugger Architecture Dependent Global Headers
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++ /*
++ * KDB_ENTER() is a macro which causes entry into the kernel
++ * debugger from any point in the kernel code stream. If it
++ * is intended to be used from interrupt level, it must use
++ * a non-maskable entry method. The vector is KDB_VECTOR,
++ * defined in hw_irq.h
++ */
++#define KDB_ENTER() do {if (kdb_on && !KDB_IS_RUNNING()) { asm("\tint $129\n"); }} while(0)
++
++ /*
++ * Needed for exported symbols.
++ */
++typedef unsigned long kdb_machreg_t;
++
++#define kdb_machreg_fmt "0x%lx"
++#define kdb_machreg_fmt0 "0x%016lx"
++#define kdb_bfd_vma_fmt "0x%lx"
++#define kdb_bfd_vma_fmt0 "0x%016lx"
++#define kdb_elfw_addr_fmt "0x%x"
++#define kdb_elfw_addr_fmt0 "0x%016x"
++
++ /*
++ * Per cpu arch specific kdb state. Must be in range 0xff000000.
++ */
++#define KDB_STATE_A_IF 0x01000000 /* Saved IF flag */
++
++ /*
++ * Functions to safely read and write kernel areas. The {to,from}_xxx
++ * addresses are not necessarily valid, these functions must check for
++ * validity. If the arch already supports get and put routines with
++ * suitable validation and/or recovery on invalid addresses then use
++ * those routines, otherwise check it yourself.
++ */
++
++ /*
++ * asm-i386 uaccess.h supplies __copy_to_user which relies on MMU to
++ * trap invalid addresses in the _xxx fields. Verify the other address
++ * of the pair is valid by accessing the first and last byte ourselves,
++ * then any access violations should only be caused by the _xxx
++ * addresses,
++ */
++
++#include <asm/uaccess.h>
++
++static inline int
++__kdba_putarea_size(unsigned long to_xxx, void *from, size_t size)
++{
++ mm_segment_t oldfs = get_fs();
++ int r;
++ char c;
++ c = *((volatile char *)from);
++ c = *((volatile char *)from + size - 1);
++
++ if (to_xxx < PAGE_OFFSET) {
++ return kdb_putuserarea_size(to_xxx, from, size);
++ }
++
++ set_fs(KERNEL_DS);
++ r = __copy_to_user((void *)to_xxx, from, size);
++ set_fs(oldfs);
++ return r;
++}
++
++static inline int
++__kdba_getarea_size(void *to, unsigned long from_xxx, size_t size)
++{
++ mm_segment_t oldfs = get_fs();
++ int r;
++ *((volatile char *)to) = '\0';
++ *((volatile char *)to + size - 1) = '\0';
++
++ if (from_xxx < PAGE_OFFSET) {
++ return kdb_getuserarea_size(to, from_xxx, size);
++ }
++
++ set_fs(KERNEL_DS);
++ r = __copy_to_user(to, (void *)from_xxx, size);
++ set_fs(oldfs);
++ return r;
++}
++
++/* For numa with replicated code/data, the platform must supply its own
++ * kdba_putarea_size and kdba_getarea_size routines. Without replication kdb
++ * uses the standard architecture routines.
++ */
++#ifdef CONFIG_NUMA_REPLICATE
++extern int kdba_putarea_size(unsigned long to_xxx, void *from, size_t size);
++extern int kdba_getarea_size(void *to, unsigned long from_xxx, size_t size);
++#else
++#define kdba_putarea_size __kdba_putarea_size
++#define kdba_getarea_size __kdba_getarea_size
++#endif
++
++static inline int
++kdba_verify_rw(unsigned long addr, size_t size)
++{
++ unsigned char data[size];
++ return(kdba_getarea_size(data, addr, size) || kdba_putarea_size(addr, data, size));
++}
++
++static inline unsigned long
++kdba_funcptr_value(void *fp)
++{
++ return (unsigned long)fp;
++}
++
++#endif /* !_ASM_KDB_H */
+diff -Nurp linux-2.6.22-590/include/asm-x86_64/kdbprivate.h linux-2.6.22-600/include/asm-x86_64/kdbprivate.h
+--- linux-2.6.22-590/include/asm-x86_64/kdbprivate.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/asm-x86_64/kdbprivate.h 2008-04-09 18:16:24.000000000 +0200
+@@ -0,0 +1,193 @@
++#ifndef _ASM_KDBPRIVATE_H
++#define _ASM_KDBPRIVATE_H
++
++/*
++ * Kernel Debugger Architecture Dependent Private Headers
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++typedef unsigned char kdb_machinst_t;
++
++ /*
++ * KDB_MAXBPT describes the total number of breakpoints
++ * supported by this architecure.
++ */
++#define KDB_MAXBPT 16
++
++ /*
++ * KDB_MAXHARDBPT describes the total number of hardware
++ * breakpoint registers that exist.
++ */
++#define KDB_MAXHARDBPT 4
++
++/* Maximum number of arguments to a function */
++#define KDBA_MAXARGS 16
++
++ /*
++ * Platform specific environment entries
++ */
++#define KDB_PLATFORM_ENV "IDMODE=x86_64", "BYTESPERWORD=8", "IDCOUNT=16"
++
++ /*
++ * Support for ia32 debug registers
++ */
++typedef struct _kdbhard_bp {
++ kdb_machreg_t bph_reg; /* Register this breakpoint uses */
++
++ unsigned int bph_free:1; /* Register available for use */
++ unsigned int bph_data:1; /* Data Access breakpoint */
++
++ unsigned int bph_write:1; /* Write Data breakpoint */
++ unsigned int bph_mode:2; /* 0=inst, 1=write, 2=io, 3=read */
++ unsigned int bph_length:2; /* 0=1, 1=2, 2=BAD, 3=4 (bytes) */
++} kdbhard_bp_t;
++
++extern kdbhard_bp_t kdb_hardbreaks[/* KDB_MAXHARDBPT */];
++
++#define IA32_BREAKPOINT_INSTRUCTION 0xcc
++
++#define DR6_BT 0x00008000
++#define DR6_BS 0x00004000
++#define DR6_BD 0x00002000
++
++#define DR6_B3 0x00000008
++#define DR6_B2 0x00000004
++#define DR6_B1 0x00000002
++#define DR6_B0 0x00000001
++#define DR6_DR_MASK 0x0000000F
++
++#define DR7_RW_VAL(dr, drnum) \
++ (((dr) >> (16 + (4 * (drnum)))) & 0x3)
++
++#define DR7_RW_SET(dr, drnum, rw) \
++ do { \
++ (dr) &= ~(0x3 << (16 + (4 * (drnum)))); \
++ (dr) |= (((rw) & 0x3) << (16 + (4 * (drnum)))); \
++ } while (0)
++
++#define DR7_RW0(dr) DR7_RW_VAL(dr, 0)
++#define DR7_RW0SET(dr,rw) DR7_RW_SET(dr, 0, rw)
++#define DR7_RW1(dr) DR7_RW_VAL(dr, 1)
++#define DR7_RW1SET(dr,rw) DR7_RW_SET(dr, 1, rw)
++#define DR7_RW2(dr) DR7_RW_VAL(dr, 2)
++#define DR7_RW2SET(dr,rw) DR7_RW_SET(dr, 2, rw)
++#define DR7_RW3(dr) DR7_RW_VAL(dr, 3)
++#define DR7_RW3SET(dr,rw) DR7_RW_SET(dr, 3, rw)
++
++
++#define DR7_LEN_VAL(dr, drnum) \
++ (((dr) >> (18 + (4 * (drnum)))) & 0x3)
++
++#define DR7_LEN_SET(dr, drnum, rw) \
++ do { \
++ (dr) &= ~(0x3 << (18 + (4 * (drnum)))); \
++ (dr) |= (((rw) & 0x3) << (18 + (4 * (drnum)))); \
++ } while (0)
++#define DR7_LEN0(dr) DR7_LEN_VAL(dr, 0)
++#define DR7_LEN0SET(dr,len) DR7_LEN_SET(dr, 0, len)
++#define DR7_LEN1(dr) DR7_LEN_VAL(dr, 1)
++#define DR7_LEN1SET(dr,len) DR7_LEN_SET(dr, 1, len)
++#define DR7_LEN2(dr) DR7_LEN_VAL(dr, 2)
++#define DR7_LEN2SET(dr,len) DR7_LEN_SET(dr, 2, len)
++#define DR7_LEN3(dr) DR7_LEN_VAL(dr, 3)
++#define DR7_LEN3SET(dr,len) DR7_LEN_SET(dr, 3, len)
++
++#define DR7_G0(dr) (((dr)>>1)&0x1)
++#define DR7_G0SET(dr) ((dr) |= 0x2)
++#define DR7_G0CLR(dr) ((dr) &= ~0x2)
++#define DR7_G1(dr) (((dr)>>3)&0x1)
++#define DR7_G1SET(dr) ((dr) |= 0x8)
++#define DR7_G1CLR(dr) ((dr) &= ~0x8)
++#define DR7_G2(dr) (((dr)>>5)&0x1)
++#define DR7_G2SET(dr) ((dr) |= 0x20)
++#define DR7_G2CLR(dr) ((dr) &= ~0x20)
++#define DR7_G3(dr) (((dr)>>7)&0x1)
++#define DR7_G3SET(dr) ((dr) |= 0x80)
++#define DR7_G3CLR(dr) ((dr) &= ~0x80)
++
++#define DR7_L0(dr) (((dr))&0x1)
++#define DR7_L0SET(dr) ((dr) |= 0x1)
++#define DR7_L0CLR(dr) ((dr) &= ~0x1)
++#define DR7_L1(dr) (((dr)>>2)&0x1)
++#define DR7_L1SET(dr) ((dr) |= 0x4)
++#define DR7_L1CLR(dr) ((dr) &= ~0x4)
++#define DR7_L2(dr) (((dr)>>4)&0x1)
++#define DR7_L2SET(dr) ((dr) |= 0x10)
++#define DR7_L2CLR(dr) ((dr) &= ~0x10)
++#define DR7_L3(dr) (((dr)>>6)&0x1)
++#define DR7_L3SET(dr) ((dr) |= 0x40)
++#define DR7_L3CLR(dr) ((dr) &= ~0x40)
++
++#define DR7_GD 0x00002000 /* General Detect Enable */
++#define DR7_GE 0x00000200 /* Global exact */
++#define DR7_LE 0x00000100 /* Local exact */
++
++extern kdb_machreg_t kdba_getdr6(void);
++extern void kdba_putdr6(kdb_machreg_t);
++
++extern kdb_machreg_t kdba_getdr7(void);
++
++extern kdb_machreg_t kdba_getdr(int);
++extern void kdba_putdr(int, kdb_machreg_t);
++
++extern kdb_machreg_t kdb_getcr(int);
++
++/*
++ * reg indicies for x86_64 setjmp/longjmp
++ */
++#define JB_RBX 0
++#define JB_RBP 1
++#define JB_R12 2
++#define JB_R13 3
++#define JB_R14 4
++#define JB_R15 5
++#define JB_RSP 6
++#define JB_PC 7
++
++typedef struct __kdb_jmp_buf {
++ unsigned long regs[8]; /* kdba_setjmp assumes fixed offsets here */
++} kdb_jmp_buf;
++
++extern int asmlinkage kdba_setjmp(kdb_jmp_buf *);
++extern void asmlinkage kdba_longjmp(kdb_jmp_buf *, int);
++#define kdba_setjmp kdba_setjmp
++
++extern kdb_jmp_buf *kdbjmpbuf;
++
++/* Arch specific data saved for running processes */
++
++struct kdba_running_process {
++ long rsp; /* KDB may be on a different stack */
++ long rip; /* rip when rsp was set */
++};
++
++register unsigned long current_stack_pointer asm("rsp") __attribute_used__;
++
++static inline
++void kdba_save_running(struct kdba_running_process *k, struct pt_regs *regs)
++{
++ k->rsp = current_stack_pointer;
++ __asm__ __volatile__ ( " lea 0(%%rip),%%rax; movq %%rax,%0 ; " : "=r"(k->rip) : : "rax" );
++}
++
++static inline
++void kdba_unsave_running(struct kdba_running_process *k, struct pt_regs *regs)
++{
++}
++
++struct kdb_activation_record;
++extern void kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
++ struct kdb_activation_record *ar);
++
++extern void kdba_wait_for_cpus(void);
++
++extern asmlinkage void kdb_interrupt(void);
++
++#define KDB_INT_REGISTERS 16
++
++#endif /* !_ASM_KDBPRIVATE_H */
+diff -Nurp linux-2.6.22-590/include/asm-x86_64/kdebug.h linux-2.6.22-600/include/asm-x86_64/kdebug.h
+--- linux-2.6.22-590/include/asm-x86_64/kdebug.h 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/include/asm-x86_64/kdebug.h 2008-04-09 18:16:24.000000000 +0200
+@@ -23,6 +23,8 @@ enum die_val {
+ DIE_CALL,
+ DIE_NMI_IPI,
+ DIE_PAGE_FAULT,
++ DIE_KDEBUG_ENTER,
++ DIE_KDEBUG_LEAVE,
+ };
+
+ extern void printk_address(unsigned long address);
+diff -Nurp linux-2.6.22-590/include/asm-x86_64/kmap_types.h linux-2.6.22-600/include/asm-x86_64/kmap_types.h
+--- linux-2.6.22-590/include/asm-x86_64/kmap_types.h 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/include/asm-x86_64/kmap_types.h 2008-04-09 18:16:24.000000000 +0200
+@@ -13,6 +13,7 @@ enum km_type {
+ KM_IRQ1,
+ KM_SOFTIRQ0,
+ KM_SOFTIRQ1,
++ KM_KDB,
+ KM_TYPE_NR
+ };
+
+diff -Nurp linux-2.6.22-590/include/linux/console.h linux-2.6.22-600/include/linux/console.h
+--- linux-2.6.22-590/include/linux/console.h 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/include/linux/console.h 2008-04-09 18:14:28.000000000 +0200
+@@ -137,7 +137,12 @@ void vcs_remove_sysfs(struct tty_struct
+
+ /* Some debug stub to catch some of the obvious races in the VT code */
+ #if 1
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress && !atomic_read(&kdb_event))
++#else /* !CONFIG_KDB */
+ #define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress)
++#endif /* CONFIG_KDB */
+ #else
+ #define WARN_CONSOLE_UNLOCKED()
+ #endif
+diff -Nurp linux-2.6.22-590/include/linux/dis-asm.h linux-2.6.22-600/include/linux/dis-asm.h
+--- linux-2.6.22-590/include/linux/dis-asm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/linux/dis-asm.h 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,347 @@
++/* Interface between the opcode library and its callers.
++
++ Copyright 1999, 2000, 2001, 2002, 2003, 2004, 2005
++ Free Software Foundation, Inc.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2, or (at your option)
++ any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor,
++ Boston, MA 02110-1301, USA.
++
++ Written by Cygnus Support, 1993.
++
++ The opcode library (libopcodes.a) provides instruction decoders for
++ a large variety of instruction sets, callable with an identical
++ interface, for making instruction-processing programs more independent
++ of the instruction set being processed. */
++
++/* Extracted from binutils 2.16.91.0.2 (OpenSUSE 10.0) and modified for kdb use.
++ * Any trailing whitespace was removed and #ifdef/ifndef __KERNEL__ added as
++ * required.
++ * Keith Owens <kaos@sgi.com> 15 May 2006
++ */
++
++#ifndef DIS_ASM_H
++#define DIS_ASM_H
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#ifdef __KERNEL__
++#include <asm/ansidecl.h>
++#include <asm/bfd.h>
++typedef void FILE;
++#else /* __KERNEL__ */
++#include <stdio.h>
++#include "bfd.h"
++#endif /* __KERNEL__ */
++
++typedef int (*fprintf_ftype) (void *, const char*, ...) ATTRIBUTE_FPTR_PRINTF_2;
++
++enum dis_insn_type {
++ dis_noninsn, /* Not a valid instruction */
++ dis_nonbranch, /* Not a branch instruction */
++ dis_branch, /* Unconditional branch */
++ dis_condbranch, /* Conditional branch */
++ dis_jsr, /* Jump to subroutine */
++ dis_condjsr, /* Conditional jump to subroutine */
++ dis_dref, /* Data reference instruction */
++ dis_dref2 /* Two data references in instruction */
++};
++
++/* This struct is passed into the instruction decoding routine,
++ and is passed back out into each callback. The various fields are used
++ for conveying information from your main routine into your callbacks,
++ for passing information into the instruction decoders (such as the
++ addresses of the callback functions), or for passing information
++ back from the instruction decoders to their callers.
++
++ It must be initialized before it is first passed; this can be done
++ by hand, or using one of the initialization macros below. */
++
++typedef struct disassemble_info {
++ fprintf_ftype fprintf_func;
++ void *stream;
++ void *application_data;
++
++ /* Target description. We could replace this with a pointer to the bfd,
++ but that would require one. There currently isn't any such requirement
++ so to avoid introducing one we record these explicitly. */
++ /* The bfd_flavour. This can be bfd_target_unknown_flavour. */
++ enum bfd_flavour flavour;
++ /* The bfd_arch value. */
++ enum bfd_architecture arch;
++ /* The bfd_mach value. */
++ unsigned long mach;
++ /* Endianness (for bi-endian cpus). Mono-endian cpus can ignore this. */
++ enum bfd_endian endian;
++ /* An arch/mach-specific bitmask of selected instruction subsets, mainly
++ for processors with run-time-switchable instruction sets. The default,
++ zero, means that there is no constraint. CGEN-based opcodes ports
++ may use ISA_foo masks. */
++ unsigned long insn_sets;
++
++ /* Some targets need information about the current section to accurately
++ display insns. If this is NULL, the target disassembler function
++ will have to make its best guess. */
++ asection *section;
++
++ /* An array of pointers to symbols either at the location being disassembled
++ or at the start of the function being disassembled. The array is sorted
++ so that the first symbol is intended to be the one used. The others are
++ present for any misc. purposes. This is not set reliably, but if it is
++ not NULL, it is correct. */
++ asymbol **symbols;
++ /* Number of symbols in array. */
++ int num_symbols;
++
++ /* For use by the disassembler.
++ The top 16 bits are reserved for public use (and are documented here).
++ The bottom 16 bits are for the internal use of the disassembler. */
++ unsigned long flags;
++#define INSN_HAS_RELOC 0x80000000
++ void *private_data;
++
++ /* Function used to get bytes to disassemble. MEMADDR is the
++ address of the stuff to be disassembled, MYADDR is the address to
++ put the bytes in, and LENGTH is the number of bytes to read.
++ INFO is a pointer to this struct.
++ Returns an errno value or 0 for success. */
++ int (*read_memory_func)
++ (bfd_vma memaddr, bfd_byte *myaddr, unsigned int length,
++ struct disassemble_info *info);
++
++ /* Function which should be called if we get an error that we can't
++ recover from. STATUS is the errno value from read_memory_func and
++ MEMADDR is the address that we were trying to read. INFO is a
++ pointer to this struct. */
++ void (*memory_error_func)
++ (int status, bfd_vma memaddr, struct disassemble_info *info);
++
++ /* Function called to print ADDR. */
++ void (*print_address_func)
++ (bfd_vma addr, struct disassemble_info *info);
++
++ /* Function called to determine if there is a symbol at the given ADDR.
++ If there is, the function returns 1, otherwise it returns 0.
++ This is used by ports which support an overlay manager where
++ the overlay number is held in the top part of an address. In
++ some circumstances we want to include the overlay number in the
++ address, (normally because there is a symbol associated with
++ that address), but sometimes we want to mask out the overlay bits. */
++ int (* symbol_at_address_func)
++ (bfd_vma addr, struct disassemble_info * info);
++
++ /* Function called to check if a SYMBOL is can be displayed to the user.
++ This is used by some ports that want to hide special symbols when
++ displaying debugging outout. */
++ bfd_boolean (* symbol_is_valid)
++ (asymbol *, struct disassemble_info * info);
++
++ /* These are for buffer_read_memory. */
++ bfd_byte *buffer;
++ bfd_vma buffer_vma;
++ unsigned int buffer_length;
++
++ /* This variable may be set by the instruction decoder. It suggests
++ the number of bytes objdump should display on a single line. If
++ the instruction decoder sets this, it should always set it to
++ the same value in order to get reasonable looking output. */
++ int bytes_per_line;
++
++ /* The next two variables control the way objdump displays the raw data. */
++ /* For example, if bytes_per_line is 8 and bytes_per_chunk is 4, the */
++ /* output will look like this:
++ 00: 00000000 00000000
++ with the chunks displayed according to "display_endian". */
++ int bytes_per_chunk;
++ enum bfd_endian display_endian;
++
++ /* Number of octets per incremented target address
++ Normally one, but some DSPs have byte sizes of 16 or 32 bits. */
++ unsigned int octets_per_byte;
++
++ /* The number of zeroes we want to see at the end of a section before we
++ start skipping them. */
++ unsigned int skip_zeroes;
++
++ /* The number of zeroes to skip at the end of a section. If the number
++ of zeroes at the end is between SKIP_ZEROES_AT_END and SKIP_ZEROES,
++ they will be disassembled. If there are fewer than
++ SKIP_ZEROES_AT_END, they will be skipped. This is a heuristic
++ attempt to avoid disassembling zeroes inserted by section
++ alignment. */
++ unsigned int skip_zeroes_at_end;
++
++ /* Results from instruction decoders. Not all decoders yet support
++ this information. This info is set each time an instruction is
++ decoded, and is only valid for the last such instruction.
++
++ To determine whether this decoder supports this information, set
++ insn_info_valid to 0, decode an instruction, then check it. */
++
++ char insn_info_valid; /* Branch info has been set. */
++ char branch_delay_insns; /* How many sequential insn's will run before
++ a branch takes effect. (0 = normal) */
++ char data_size; /* Size of data reference in insn, in bytes */
++ enum dis_insn_type insn_type; /* Type of instruction */
++ bfd_vma target; /* Target address of branch or dref, if known;
++ zero if unknown. */
++ bfd_vma target2; /* Second target address for dref2 */
++
++ /* Command line options specific to the target disassembler. */
++ char * disassembler_options;
++
++} disassemble_info;
++
++\f
++/* Standard disassemblers. Disassemble one instruction at the given
++ target address. Return number of octets processed. */
++typedef int (*disassembler_ftype) (bfd_vma, disassemble_info *);
++
++extern int print_insn_big_mips (bfd_vma, disassemble_info *);
++extern int print_insn_little_mips (bfd_vma, disassemble_info *);
++extern int print_insn_i386 (bfd_vma, disassemble_info *);
++extern int print_insn_i386_att (bfd_vma, disassemble_info *);
++extern int print_insn_i386_intel (bfd_vma, disassemble_info *);
++extern int print_insn_ia64 (bfd_vma, disassemble_info *);
++extern int print_insn_i370 (bfd_vma, disassemble_info *);
++extern int print_insn_m68hc11 (bfd_vma, disassemble_info *);
++extern int print_insn_m68hc12 (bfd_vma, disassemble_info *);
++extern int print_insn_m68k (bfd_vma, disassemble_info *);
++extern int print_insn_z8001 (bfd_vma, disassemble_info *);
++extern int print_insn_z8002 (bfd_vma, disassemble_info *);
++extern int print_insn_h8300 (bfd_vma, disassemble_info *);
++extern int print_insn_h8300h (bfd_vma, disassemble_info *);
++extern int print_insn_h8300s (bfd_vma, disassemble_info *);
++extern int print_insn_h8500 (bfd_vma, disassemble_info *);
++extern int print_insn_alpha (bfd_vma, disassemble_info *);
++extern int print_insn_big_arm (bfd_vma, disassemble_info *);
++extern int print_insn_little_arm (bfd_vma, disassemble_info *);
++extern int print_insn_sparc (bfd_vma, disassemble_info *);
++extern int print_insn_big_a29k (bfd_vma, disassemble_info *);
++extern int print_insn_little_a29k (bfd_vma, disassemble_info *);
++extern int print_insn_avr (bfd_vma, disassemble_info *);
++extern int print_insn_d10v (bfd_vma, disassemble_info *);
++extern int print_insn_d30v (bfd_vma, disassemble_info *);
++extern int print_insn_dlx (bfd_vma, disassemble_info *);
++extern int print_insn_fr30 (bfd_vma, disassemble_info *);
++extern int print_insn_hppa (bfd_vma, disassemble_info *);
++extern int print_insn_i860 (bfd_vma, disassemble_info *);
++extern int print_insn_i960 (bfd_vma, disassemble_info *);
++extern int print_insn_ip2k (bfd_vma, disassemble_info *);
++extern int print_insn_m32r (bfd_vma, disassemble_info *);
++extern int print_insn_m88k (bfd_vma, disassemble_info *);
++extern int print_insn_maxq_little (bfd_vma, disassemble_info *);
++extern int print_insn_maxq_big (bfd_vma, disassemble_info *);
++extern int print_insn_mcore (bfd_vma, disassemble_info *);
++extern int print_insn_mmix (bfd_vma, disassemble_info *);
++extern int print_insn_mn10200 (bfd_vma, disassemble_info *);
++extern int print_insn_mn10300 (bfd_vma, disassemble_info *);
++extern int print_insn_ms1 (bfd_vma, disassemble_info *);
++extern int print_insn_msp430 (bfd_vma, disassemble_info *);
++extern int print_insn_ns32k (bfd_vma, disassemble_info *);
++extern int print_insn_crx (bfd_vma, disassemble_info *);
++extern int print_insn_openrisc (bfd_vma, disassemble_info *);
++extern int print_insn_big_or32 (bfd_vma, disassemble_info *);
++extern int print_insn_little_or32 (bfd_vma, disassemble_info *);
++extern int print_insn_pdp11 (bfd_vma, disassemble_info *);
++extern int print_insn_pj (bfd_vma, disassemble_info *);
++extern int print_insn_big_powerpc (bfd_vma, disassemble_info *);
++extern int print_insn_little_powerpc (bfd_vma, disassemble_info *);
++extern int print_insn_rs6000 (bfd_vma, disassemble_info *);
++extern int print_insn_s390 (bfd_vma, disassemble_info *);
++extern int print_insn_sh (bfd_vma, disassemble_info *);
++extern int print_insn_tic30 (bfd_vma, disassemble_info *);
++extern int print_insn_tic4x (bfd_vma, disassemble_info *);
++extern int print_insn_tic54x (bfd_vma, disassemble_info *);
++extern int print_insn_tic80 (bfd_vma, disassemble_info *);
++extern int print_insn_v850 (bfd_vma, disassemble_info *);
++extern int print_insn_vax (bfd_vma, disassemble_info *);
++extern int print_insn_w65 (bfd_vma, disassemble_info *);
++extern int print_insn_xstormy16 (bfd_vma, disassemble_info *);
++extern int print_insn_xtensa (bfd_vma, disassemble_info *);
++extern int print_insn_sh64 (bfd_vma, disassemble_info *);
++extern int print_insn_sh64x_media (bfd_vma, disassemble_info *);
++extern int print_insn_frv (bfd_vma, disassemble_info *);
++extern int print_insn_iq2000 (bfd_vma, disassemble_info *);
++extern int print_insn_m32c (bfd_vma, disassemble_info *);
++
++extern disassembler_ftype arc_get_disassembler (void *);
++extern disassembler_ftype cris_get_disassembler (bfd *);
++
++extern void print_mips_disassembler_options (FILE *);
++extern void print_ppc_disassembler_options (FILE *);
++extern void print_arm_disassembler_options (FILE *);
++extern void parse_arm_disassembler_option (char *);
++extern int get_arm_regname_num_options (void);
++extern int set_arm_regname_option (int);
++extern int get_arm_regnames (int, const char **, const char **, const char *const **);
++extern bfd_boolean arm_symbol_is_valid (asymbol *, struct disassemble_info *);
++
++/* Fetch the disassembler for a given BFD, if that support is available. */
++extern disassembler_ftype disassembler (bfd *);
++
++/* Amend the disassemble_info structure as necessary for the target architecture.
++ Should only be called after initialising the info->arch field. */
++extern void disassemble_init_for_target (struct disassemble_info * info);
++
++/* Document any target specific options available from the disassembler. */
++extern void disassembler_usage (FILE *);
++
++\f
++/* This block of definitions is for particular callers who read instructions
++ into a buffer before calling the instruction decoder. */
++
++/* Here is a function which callers may wish to use for read_memory_func.
++ It gets bytes from a buffer. */
++extern int buffer_read_memory
++ (bfd_vma, bfd_byte *, unsigned int, struct disassemble_info *);
++
++/* This function goes with buffer_read_memory.
++ It prints a message using info->fprintf_func and info->stream. */
++extern void perror_memory (int, bfd_vma, struct disassemble_info *);
++
++
++/* Just print the address in hex. This is included for completeness even
++ though both GDB and objdump provide their own (to print symbolic
++ addresses). */
++extern void generic_print_address
++ (bfd_vma, struct disassemble_info *);
++
++/* Always true. */
++extern int generic_symbol_at_address
++ (bfd_vma, struct disassemble_info *);
++
++/* Also always true. */
++extern bfd_boolean generic_symbol_is_valid
++ (asymbol *, struct disassemble_info *);
++
++/* Method to initialize a disassemble_info struct. This should be
++ called by all applications creating such a struct. */
++extern void init_disassemble_info (struct disassemble_info *info, void *stream,
++ fprintf_ftype fprintf_func);
++
++/* For compatibility with existing code. */
++#define INIT_DISASSEMBLE_INFO(INFO, STREAM, FPRINTF_FUNC) \
++ init_disassemble_info (&(INFO), (STREAM), (fprintf_ftype) (FPRINTF_FUNC))
++#define INIT_DISASSEMBLE_INFO_NO_ARCH(INFO, STREAM, FPRINTF_FUNC) \
++ init_disassemble_info (&(INFO), (STREAM), (fprintf_ftype) (FPRINTF_FUNC))
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* ! defined (DIS_ASM_H) */
+diff -Nurp linux-2.6.22-590/include/linux/kdb.h linux-2.6.22-600/include/linux/kdb.h
+--- linux-2.6.22-590/include/linux/kdb.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/linux/kdb.h 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,166 @@
++#ifndef _KDB_H
++#define _KDB_H
++
++/*
++ * Kernel Debugger Architecture Independent Global Headers
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
++ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
++ */
++
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <asm/atomic.h>
++
++#ifdef CONFIG_KDB
++/* These are really private, but they must be defined before including
++ * asm-$(ARCH)/kdb.h, so make them public and put them here.
++ */
++extern int kdb_getuserarea_size(void *, unsigned long, size_t);
++extern int kdb_putuserarea_size(unsigned long, void *, size_t);
++
++#include <asm/kdb.h>
++#endif
++
++#define KDB_MAJOR_VERSION 4
++#define KDB_MINOR_VERSION 4
++#define KDB_TEST_VERSION ""
++
++/*
++ * kdb_initial_cpu is initialized to -1, and is set to the cpu
++ * number whenever the kernel debugger is entered.
++ */
++extern volatile int kdb_initial_cpu;
++extern atomic_t kdb_event;
++extern atomic_t kdb_8250;
++#ifdef CONFIG_KDB
++#define KDB_IS_RUNNING() (kdb_initial_cpu != -1)
++#define KDB_8250() (atomic_read(&kdb_8250) != 0)
++#else
++#define KDB_IS_RUNNING() (0)
++#define KDB_8250() (0)
++#endif /* CONFIG_KDB */
++
++/*
++ * kdb_on
++ *
++ * Defines whether kdb is on or not. Default value
++ * is set by CONFIG_KDB_OFF. Boot with kdb=on/off/on-nokey
++ * or echo "[012]" > /proc/sys/kernel/kdb to change it.
++ */
++extern int kdb_on;
++
++#if defined(CONFIG_SERIAL_8250_CONSOLE) || defined(CONFIG_SERIAL_SGI_L1_CONSOLE)
++/*
++ * kdb_serial.iobase is initialized to zero, and is set to the I/O
++ * address of the serial port when the console is setup in
++ * serial_console_setup.
++ */
++extern struct kdb_serial {
++ int io_type;
++ unsigned long iobase;
++ unsigned long ioreg_shift;
++} kdb_serial;
++#endif
++
++/*
++ * kdb_diemsg
++ *
++ * Contains a pointer to the last string supplied to the
++ * kernel 'die' panic function.
++ */
++extern const char *kdb_diemsg;
++
++#define KDB_FLAG_EARLYKDB (1 << 0) /* set from boot parameter kdb=early */
++#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */
++#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */
++#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */
++#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when kdb is off */
++#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available, kdb is disabled */
++#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do not use keyboard */
++#define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do not use keyboard */
++#define KDB_FLAG_RECOVERY (1 << 8) /* kdb is being entered for an error which has been recovered */
++
++extern volatile int kdb_flags; /* Global flags, see kdb_state for per cpu state */
++
++extern void kdb_save_flags(void);
++extern void kdb_restore_flags(void);
++
++#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag)
++#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag))
++#define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag))
++
++/*
++ * External entry point for the kernel debugger. The pt_regs
++ * at the time of entry are supplied along with the reason for
++ * entry to the kernel debugger.
++ */
++
++typedef enum {
++ KDB_REASON_ENTER=1, /* KDB_ENTER() trap/fault - regs valid */
++ KDB_REASON_ENTER_SLAVE, /* KDB_ENTER_SLAVE() trap/fault - regs valid */
++ KDB_REASON_BREAK, /* Breakpoint inst. - regs valid */
++ KDB_REASON_DEBUG, /* Debug Fault - regs valid */
++ KDB_REASON_OOPS, /* Kernel Oops - regs valid */
++ KDB_REASON_SWITCH, /* CPU switch - regs valid*/
++ KDB_REASON_KEYBOARD, /* Keyboard entry - regs valid */
++ KDB_REASON_NMI, /* Non-maskable interrupt; regs valid */
++ KDB_REASON_RECURSE, /* Recursive entry to kdb; regs probably valid */
++ KDB_REASON_CPU_UP, /* Add one cpu to kdb; regs invalid */
++ KDB_REASON_SILENT, /* Silent entry/exit to kdb; regs invalid - internal only */
++} kdb_reason_t;
++
++#ifdef CONFIG_KDB
++extern fastcall int kdb(kdb_reason_t, int, struct pt_regs *);
++#else
++#define kdb(reason,error_code,frame) (0)
++#endif
++
++/* Mainly used by kdb code, but this function is sometimes used
++ * by hacked debug code so make it generally available, not private.
++ */
++extern void kdb_printf(const char *,...)
++ __attribute__ ((format (printf, 1, 2)));
++typedef void (*kdb_printf_t)(const char *, ...)
++ __attribute__ ((format (printf, 1, 2)));
++extern void kdb_init(void);
++
++#if defined(CONFIG_SMP)
++/*
++ * Kernel debugger non-maskable IPI handler.
++ */
++extern int kdb_ipi(struct pt_regs *, void (*ack_interrupt)(void));
++extern void smp_kdb_stop(void);
++#else /* CONFIG_SMP */
++#define smp_kdb_stop()
++#endif /* CONFIG_SMP */
++
++#ifdef CONFIG_KDB_USB
++#include <linux/usb.h>
++
++struct kdb_usb_exchange {
++ void *uhci; /* pointer to the UHCI structure */
++ struct urb *urb; /* pointer to the URB */
++ unsigned char *buffer; /* pointer to buffer */
++ void (*poll_func)(void *, struct urb *); /* pointer to the polling function */
++ void (*reset_timer)(void); /* pointer to the reset timer function */
++};
++extern struct kdb_usb_exchange kdb_usb_infos; /* KDB common structure */
++#endif /* CONFIG_KDB_USB */
++
++static inline
++int kdb_process_cpu(const struct task_struct *p)
++{
++ unsigned int cpu = task_thread_info(p)->cpu;
++ if (cpu > NR_CPUS)
++ cpu = 0;
++ return cpu;
++}
++
++extern const char kdb_serial_str[];
++
++#endif /* !_KDB_H */
+diff -Nurp linux-2.6.22-590/include/linux/kdbprivate.h linux-2.6.22-600/include/linux/kdbprivate.h
+--- linux-2.6.22-590/include/linux/kdbprivate.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/include/linux/kdbprivate.h 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,485 @@
++#ifndef _KDBPRIVATE_H
++#define _KDBPRIVATE_H
++
++/*
++ * Kernel Debugger Architecture Independent Private Headers
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++
++#include <linux/dis-asm.h>
++#include <asm/kdbprivate.h>
++#include <asm/bfd.h>
++
++ /*
++ * Kernel Debugger Error codes. Must not overlap with command codes.
++ */
++
++#define KDB_NOTFOUND (-1)
++#define KDB_ARGCOUNT (-2)
++#define KDB_BADWIDTH (-3)
++#define KDB_BADRADIX (-4)
++#define KDB_NOTENV (-5)
++#define KDB_NOENVVALUE (-6)
++#define KDB_NOTIMP (-7)
++#define KDB_ENVFULL (-8)
++#define KDB_ENVBUFFULL (-9 )
++#define KDB_TOOMANYBPT (-10)
++#define KDB_TOOMANYDBREGS (-11)
++#define KDB_DUPBPT (-12)
++#define KDB_BPTNOTFOUND (-13)
++#define KDB_BADMODE (-14)
++#define KDB_BADINT (-15)
++#define KDB_INVADDRFMT (-16)
++#define KDB_BADREG (-17)
++#define KDB_BADCPUNUM (-18)
++#define KDB_BADLENGTH (-19)
++#define KDB_NOBP (-20)
++#define KDB_BADADDR (-21)
++
++ /*
++ * Kernel Debugger Command codes. Must not overlap with error codes.
++ */
++#define KDB_CMD_GO (-1001)
++#define KDB_CMD_CPU (-1002)
++#define KDB_CMD_SS (-1003)
++#define KDB_CMD_SSB (-1004)
++
++ /*
++ * Internal debug flags
++ */
++/* KDB_DEBUG_FLAG_BT 0x0001 Was Stack traceback debug */
++#define KDB_DEBUG_FLAG_BP 0x0002 /* Breakpoint subsystem debug */
++#define KDB_DEBUG_FLAG_BB_SUMM 0x0004 /* Basic block analysis, summary only */
++#define KDB_DEBUG_FLAG_AR 0x0008 /* Activation record, generic */
++#define KDB_DEBUG_FLAG_ARA 0x0010 /* Activation record, arch specific */
++#define KDB_DEBUG_FLAG_BB 0x0020 /* All basic block analysis */
++#define KDB_DEBUG_FLAG_STATE 0x0040 /* State flags */
++#define KDB_DEBUG_FLAG_MASK 0xffff /* All debug flags */
++#define KDB_DEBUG_FLAG_SHIFT 16 /* Shift factor for dbflags */
++
++#define KDB_DEBUG(flag) (kdb_flags & (KDB_DEBUG_FLAG_##flag << KDB_DEBUG_FLAG_SHIFT))
++#define KDB_DEBUG_STATE(text,value) if (KDB_DEBUG(STATE)) kdb_print_state(text, value)
++
++typedef enum {
++ KDB_REPEAT_NONE = 0, /* Do not repeat this command */
++ KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */
++ KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */
++} kdb_repeat_t;
++
++typedef int (*kdb_func_t)(int, const char **);
++
++ /*
++ * Symbol table format returned by kallsyms.
++ */
++
++typedef struct __ksymtab {
++ unsigned long value; /* Address of symbol */
++ const char *mod_name; /* Module containing symbol or "kernel" */
++ unsigned long mod_start;
++ unsigned long mod_end;
++ const char *sec_name; /* Section containing symbol */
++ unsigned long sec_start;
++ unsigned long sec_end;
++ const char *sym_name; /* Full symbol name, including any version */
++ unsigned long sym_start;
++ unsigned long sym_end;
++ } kdb_symtab_t;
++extern int kallsyms_symbol_next(char *prefix_name, int flag);
++extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
++
++ /*
++ * Exported Symbols for kernel loadable modules to use.
++ */
++extern int kdb_register(char *, kdb_func_t, char *, char *, short);
++extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, short, kdb_repeat_t);
++extern int kdb_unregister(char *);
++
++extern int kdb_getarea_size(void *, unsigned long, size_t);
++extern int kdb_putarea_size(unsigned long, void *, size_t);
++
++/* Like get_user and put_user, kdb_getarea and kdb_putarea take variable
++ * names, not pointers. The underlying *_size functions take pointers.
++ */
++#define kdb_getarea(x,addr) kdb_getarea_size(&(x), addr, sizeof((x)))
++#define kdb_putarea(addr,x) kdb_putarea_size(addr, &(x), sizeof((x)))
++
++extern int kdb_getphysword(unsigned long *word,
++ unsigned long addr, size_t size);
++extern int kdb_getword(unsigned long *, unsigned long, size_t);
++extern int kdb_putword(unsigned long, unsigned long, size_t);
++
++extern int kdbgetularg(const char *, unsigned long *);
++extern char *kdbgetenv(const char *);
++extern int kdbgetintenv(const char *, int *);
++extern int kdbgetaddrarg(int, const char**, int*, unsigned long *,
++ long *, char **);
++extern int kdbgetsymval(const char *, kdb_symtab_t *);
++extern int kdbnearsym(unsigned long, kdb_symtab_t *);
++extern void kdbnearsym_cleanup(void);
++extern char *kdb_read(char *buffer, size_t bufsize);
++extern char *kdb_strdup(const char *str, gfp_t type);
++extern void kdb_symbol_print(kdb_machreg_t, const kdb_symtab_t *, unsigned int);
++
++ /*
++ * Do we have a set of registers?
++ */
++
++#define KDB_NULL_REGS(regs) \
++ (regs == (struct pt_regs *)NULL ? kdb_printf("%s: null regs - should never happen\n", __FUNCTION__), 1 : 0)
++
++ /*
++ * Routine for debugging the debugger state.
++ */
++
++extern void kdb_print_state(const char *, int);
++
++ /*
++ * Per cpu kdb state. A cpu can be under kdb control but outside kdb,
++ * for example when doing single step.
++ */
++volatile extern int kdb_state[ /*NR_CPUS*/ ];
++#define KDB_STATE_KDB 0x00000001 /* Cpu is inside kdb */
++#define KDB_STATE_LEAVING 0x00000002 /* Cpu is leaving kdb */
++#define KDB_STATE_CMD 0x00000004 /* Running a kdb command */
++#define KDB_STATE_KDB_CONTROL 0x00000008 /* This cpu is under kdb control */
++#define KDB_STATE_HOLD_CPU 0x00000010 /* Hold this cpu inside kdb */
++#define KDB_STATE_DOING_SS 0x00000020 /* Doing ss command */
++#define KDB_STATE_DOING_SSB 0x00000040 /* Doing ssb command, DOING_SS is also set */
++#define KDB_STATE_SSBPT 0x00000080 /* Install breakpoint after one ss, independent of DOING_SS */
++#define KDB_STATE_REENTRY 0x00000100 /* Valid re-entry into kdb */
++#define KDB_STATE_SUPPRESS 0x00000200 /* Suppress error messages */
++#define KDB_STATE_LONGJMP 0x00000400 /* longjmp() data is available */
++#define KDB_STATE_GO_SWITCH 0x00000800 /* go is switching back to initial cpu */
++#define KDB_STATE_PRINTF_LOCK 0x00001000 /* Holds kdb_printf lock */
++#define KDB_STATE_WAIT_IPI 0x00002000 /* Waiting for kdb_ipi() NMI */
++#define KDB_STATE_RECURSE 0x00004000 /* Recursive entry to kdb */
++#define KDB_STATE_IP_ADJUSTED 0x00008000 /* Restart IP has been adjusted */
++#define KDB_STATE_GO1 0x00010000 /* go only releases one cpu */
++#define KDB_STATE_KEYBOARD 0x00020000 /* kdb entered via keyboard on this cpu */
++#define KDB_STATE_ARCH 0xff000000 /* Reserved for arch specific use */
++
++#define KDB_STATE_CPU(flag,cpu) (kdb_state[cpu] & KDB_STATE_##flag)
++#define KDB_STATE_SET_CPU(flag,cpu) ((void)(kdb_state[cpu] |= KDB_STATE_##flag))
++#define KDB_STATE_CLEAR_CPU(flag,cpu) ((void)(kdb_state[cpu] &= ~KDB_STATE_##flag))
++
++#define KDB_STATE(flag) KDB_STATE_CPU(flag,smp_processor_id())
++#define KDB_STATE_SET(flag) KDB_STATE_SET_CPU(flag,smp_processor_id())
++#define KDB_STATE_CLEAR(flag) KDB_STATE_CLEAR_CPU(flag,smp_processor_id())
++
++ /*
++ * kdb_nextline
++ *
++ * Contains the current line number on the screen. Used
++ * to handle the built-in pager (LINES env variable)
++ */
++extern volatile int kdb_nextline;
++
++ /*
++ * Breakpoint state
++ *
++ * Each active and inactive breakpoint is represented by
++ * an instance of the following data structure.
++ */
++
++typedef struct _kdb_bp {
++ bfd_vma bp_addr; /* Address breakpoint is present at */
++ kdb_machinst_t bp_inst; /* Replaced instruction */
++
++ unsigned int bp_free:1; /* This entry is available */
++
++ unsigned int bp_enabled:1; /* Breakpoint is active in register */
++ unsigned int bp_global:1; /* Global to all processors */
++
++ unsigned int bp_hardtype:1; /* Uses hardware register */
++ unsigned int bp_forcehw:1; /* Force hardware register */
++ unsigned int bp_installed:1; /* Breakpoint is installed */
++ unsigned int bp_delay:1; /* Do delayed bp handling */
++ unsigned int bp_delayed:1; /* Delayed breakpoint */
++
++ int bp_cpu; /* Cpu # (if bp_global == 0) */
++ kdbhard_bp_t bp_template; /* Hardware breakpoint template */
++ kdbhard_bp_t *bp_hard; /* Hardware breakpoint structure */
++ int bp_adjust; /* Adjustment to PC for real instruction */
++} kdb_bp_t;
++
++ /*
++ * Breakpoint handling subsystem global variables
++ */
++extern kdb_bp_t kdb_breakpoints[/* KDB_MAXBPT */];
++
++ /*
++ * Breakpoint architecture dependent functions. Must be provided
++ * in some form for all architectures.
++ */
++extern void kdba_initbp(void);
++extern void kdba_printbp(kdb_bp_t *);
++extern kdbhard_bp_t *kdba_allocbp(kdbhard_bp_t *, int *);
++extern void kdba_freebp(kdbhard_bp_t *);
++extern int kdba_parsebp(int, const char**, int *, kdb_bp_t*);
++extern char *kdba_bptype(kdbhard_bp_t *);
++extern void kdba_setsinglestep(struct pt_regs *);
++extern void kdba_clearsinglestep(struct pt_regs *);
++
++ /*
++ * Adjust instruction pointer architecture dependent function. Must be
++ * provided in some form for all architectures.
++ */
++extern void kdba_adjust_ip(kdb_reason_t, int, struct pt_regs *);
++
++ /*
++ * KDB-only global function prototypes.
++ */
++extern void kdb_id1(unsigned long);
++extern void kdb_id_init(void);
++
++ /*
++ * Initialization functions.
++ */
++extern void kdba_init(void);
++extern void kdb_io_init(void);
++
++ /*
++ * Architecture specific function to read a string.
++ */
++typedef int (*get_char_func)(void);
++extern get_char_func poll_funcs[];
++
++#ifndef CONFIG_IA64
++ /*
++ * Data for a single activation record on stack.
++ */
++
++struct kdb_stack_info {
++ kdb_machreg_t physical_start;
++ kdb_machreg_t physical_end;
++ kdb_machreg_t logical_start;
++ kdb_machreg_t logical_end;
++ kdb_machreg_t next;
++ const char * id;
++};
++
++typedef struct { DECLARE_BITMAP(bits, KDBA_MAXARGS); } valid_t;
++
++struct kdb_activation_record {
++ struct kdb_stack_info stack; /* information about current stack */
++ int args; /* number of arguments detected */
++ kdb_machreg_t arg[KDBA_MAXARGS]; /* -> arguments */
++ valid_t valid; /* is argument n valid? */
++};
++#endif
++
++ /*
++ * Architecture specific Stack Traceback functions.
++ */
++
++struct task_struct;
++
++extern int kdba_bt_address(kdb_machreg_t, int);
++extern int kdba_bt_process(const struct task_struct *, int);
++
++ /*
++ * KDB Command Table
++ */
++
++typedef struct _kdbtab {
++ char *cmd_name; /* Command name */
++ kdb_func_t cmd_func; /* Function to execute command */
++ char *cmd_usage; /* Usage String for this command */
++ char *cmd_help; /* Help message for this command */
++ short cmd_flags; /* Parsing flags */
++ short cmd_minlen; /* Minimum legal # command chars required */
++ kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */
++} kdbtab_t;
++
++ /*
++ * External command function declarations
++ */
++
++extern int kdb_id(int, const char **);
++extern int kdb_bt(int, const char **);
++
++ /*
++ * External utility function declarations
++ */
++extern char* kdb_getstr(char *, size_t, char *);
++
++ /*
++ * Register contents manipulation
++ */
++extern int kdba_getregcontents(const char *, struct pt_regs *, kdb_machreg_t *);
++extern int kdba_setregcontents(const char *, struct pt_regs *, kdb_machreg_t);
++extern int kdba_dumpregs(struct pt_regs *, const char *, const char *);
++extern int kdba_setpc(struct pt_regs *, kdb_machreg_t);
++extern kdb_machreg_t kdba_getpc(struct pt_regs *);
++
++ /*
++ * Debug register handling.
++ */
++extern void kdba_installdbreg(kdb_bp_t*);
++extern void kdba_removedbreg(kdb_bp_t*);
++
++ /*
++ * Breakpoint handling - External interfaces
++ */
++extern void kdb_initbptab(void);
++extern void kdb_bp_install_global(struct pt_regs *);
++extern void kdb_bp_install_local(struct pt_regs *);
++extern void kdb_bp_remove_global(void);
++extern void kdb_bp_remove_local(void);
++
++ /*
++ * Breakpoint handling - Internal to kdb_bp.c/kdba_bp.c
++ */
++extern int kdba_installbp(struct pt_regs *regs, kdb_bp_t *);
++extern int kdba_removebp(kdb_bp_t *);
++
++
++typedef enum {
++ KDB_DB_BPT, /* Breakpoint */
++ KDB_DB_SS, /* Single-step trap */
++ KDB_DB_SSB, /* Single step to branch */
++ KDB_DB_SSBPT, /* Single step over breakpoint */
++ KDB_DB_NOBPT /* Spurious breakpoint */
++} kdb_dbtrap_t;
++
++extern kdb_dbtrap_t kdba_db_trap(struct pt_regs *, int); /* DEBUG trap/fault handler */
++extern kdb_dbtrap_t kdba_bp_trap(struct pt_regs *, int); /* Breakpoint trap/fault hdlr */
++
++ /*
++ * Interrupt Handling
++ */
++typedef unsigned long kdb_intstate_t;
++
++extern void kdba_disableint(kdb_intstate_t *);
++extern void kdba_restoreint(kdb_intstate_t *);
++
++ /*
++ * SMP and process stack manipulation routines.
++ */
++extern int kdba_ipi(struct pt_regs *, void (*)(void));
++extern int kdba_main_loop(kdb_reason_t, kdb_reason_t, int, kdb_dbtrap_t, struct pt_regs *);
++extern int kdb_main_loop(kdb_reason_t, kdb_reason_t, int, kdb_dbtrap_t, struct pt_regs *);
++
++ /*
++ * General Disassembler interfaces
++ */
++extern int kdb_dis_fprintf(PTR, const char *, ...) __attribute__ ((format (printf, 2, 3)));
++extern int kdb_dis_fprintf_dummy(PTR, const char *, ...) __attribute__ ((format (printf, 2, 3)));
++extern disassemble_info kdb_di;
++
++ /*
++ * Architecture Dependent Disassembler interfaces
++ */
++extern int kdba_id_printinsn(kdb_machreg_t, disassemble_info *);
++extern int kdba_id_parsemode(const char *, disassemble_info*);
++extern void kdba_id_init(disassemble_info *);
++extern void kdba_check_pc(kdb_machreg_t *);
++
++ /*
++ * Miscellaneous functions and data areas
++ */
++extern char *kdb_cmds[];
++extern void kdb_syslog_data(char *syslog_data[]);
++extern unsigned long kdb_task_state_string(const char *);
++extern char kdb_task_state_char (const struct task_struct *);
++extern unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask);
++extern void kdb_ps_suppressed(void);
++extern void kdb_ps1(const struct task_struct *p);
++extern int kdb_parse(const char *cmdstr);
++extern void kdb_print_nameval(const char *name, unsigned long val);
++extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info, int seqno);
++#ifdef CONFIG_SWAP
++extern void kdb_si_swapinfo(struct sysinfo *);
++#else
++#include <linux/swap.h>
++#define kdb_si_swapinfo(x) si_swapinfo(x)
++#endif
++extern void kdb_meminfo_read_proc(void);
++#ifdef CONFIG_HUGETLB_PAGE
++extern void kdb_hugetlb_report_meminfo(void);
++#endif /* CONFIG_HUGETLB_PAGE */
++extern const char *kdb_walk_kallsyms(loff_t *pos);
++
++ /*
++ * Architecture Dependant Local Processor setup & cleanup interfaces
++ */
++extern void kdba_local_arch_setup(void);
++extern void kdba_local_arch_cleanup(void);
++
++ /*
++ * Defines for kdb_symbol_print.
++ */
++#define KDB_SP_SPACEB 0x0001 /* Space before string */
++#define KDB_SP_SPACEA 0x0002 /* Space after string */
++#define KDB_SP_PAREN 0x0004 /* Parenthesis around string */
++#define KDB_SP_VALUE 0x0008 /* Print the value of the address */
++#define KDB_SP_SYMSIZE 0x0010 /* Print the size of the symbol */
++#define KDB_SP_NEWLINE 0x0020 /* Newline after string */
++#define KDB_SP_DEFAULT (KDB_SP_VALUE|KDB_SP_PAREN)
++
++/* Save data about running processes */
++
++struct kdb_running_process {
++ struct task_struct *p;
++ struct pt_regs *regs;
++ int seqno; /* kdb sequence number */
++ int irq_depth; /* irq count */
++ struct kdba_running_process arch; /* arch dependent save data */
++};
++
++extern struct kdb_running_process kdb_running_process[/* NR_CPUS */];
++
++extern void kdb_save_running(struct pt_regs *);
++extern void kdb_unsave_running(struct pt_regs *);
++extern struct task_struct *kdb_curr_task(int);
++
++/* Incremented each time the main kdb loop is entered on the initial cpu,
++ * it gives some indication of how old the saved data is.
++ */
++extern int kdb_seqno;
++
++#define kdb_task_has_cpu(p) (task_curr(p))
++extern void kdb_runqueue(unsigned long cpu, kdb_printf_t xxx_printf);
++
++/* Simplify coexistence with NPTL */
++#define kdb_do_each_thread(g, p) do_each_thread(g, p)
++#define kdb_while_each_thread(g, p) while_each_thread(g, p)
++
++#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
++
++extern void *debug_kmalloc(size_t size, gfp_t flags);
++extern void debug_kfree(void *);
++extern void debug_kusage(void);
++
++extern void kdba_set_current_task(const struct task_struct *);
++extern const struct task_struct *kdb_current_task;
++extern struct pt_regs *kdb_current_regs;
++
++/* Functions to safely read and write kernel areas. The {to,from}_xxx
++ * addresses are not necessarily valid, these functions must check for
++ * validity. If the arch already supports get and put routines with suitable
++ * validation and/or recovery on invalid addresses then use those routines,
++ * otherwise check it yourself.
++ */
++
++extern int kdba_putarea_size(unsigned long to_xxx, void *from, size_t size);
++extern int kdba_getarea_size(void *to, unsigned long from_xxx, size_t size);
++extern int kdba_verify_rw(unsigned long addr, size_t size);
++
++#ifndef KDB_RUNNING_PROCESS_ORIGINAL
++#define KDB_RUNNING_PROCESS_ORIGINAL kdb_running_process
++#endif
++
++extern int kdb_wait_for_cpus_secs;
++extern void kdba_cpu_up(void);
++extern char kdb_prompt_str[];
++
++#define KDB_WORD_SIZE ((int)sizeof(kdb_machreg_t))
++
++#endif /* !_KDBPRIVATE_H */
+diff -Nurp linux-2.6.22-590/include/linux/sysctl.h linux-2.6.22-600/include/linux/sysctl.h
+--- linux-2.6.22-590/include/linux/sysctl.h 2008-04-09 18:10:57.000000000 +0200
++++ linux-2.6.22-600/include/linux/sysctl.h 2008-04-09 18:14:28.000000000 +0200
+@@ -166,6 +166,7 @@ enum
+ KERN_MAX_LOCK_DEPTH=74,
+ KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */
+ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
++ KERN_KDB=77, /* int: kdb on/off */
+ };
+
+
+diff -Nurp linux-2.6.22-590/init/main.c linux-2.6.22-600/init/main.c
+--- linux-2.6.22-590/init/main.c 2008-04-09 18:10:53.000000000 +0200
++++ linux-2.6.22-600/init/main.c 2008-04-09 18:14:28.000000000 +0200
+@@ -67,6 +67,10 @@
+ #include <asm/smp.h>
+ #endif
+
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#endif /* CONFIG_KDB */
++
+ /*
+ * This is one of the first .c files built. Error out early if we have compiler
+ * trouble.
+@@ -188,6 +192,26 @@ static const char *panic_later, *panic_p
+
+ extern struct obs_kernel_param __setup_start[], __setup_end[];
+
++#ifdef CONFIG_KDB
++static int __init kdb_setup(char *str)
++{
++ if (strcmp(str, "on") == 0) {
++ kdb_on = 1;
++ } else if (strcmp(str, "on-nokey") == 0) {
++ kdb_on = 2;
++ } else if (strcmp(str, "off") == 0) {
++ kdb_on = 0;
++ } else if (strcmp(str, "early") == 0) {
++ kdb_on = 1;
++ kdb_flags |= KDB_FLAG_EARLYKDB;
++ } else
++ printk("kdb flag %s not recognised\n", str);
++ return 0;
++}
++
++__setup("kdb=", kdb_setup);
++#endif /* CONFIG_KDB */
++
+ static int __init obsolete_checksetup(char *line)
+ {
+ struct obs_kernel_param *p;
+@@ -609,6 +633,14 @@ asmlinkage void __init start_kernel(void
+ pgtable_cache_init();
+ prio_tree_init();
+ anon_vma_init();
++
++#ifdef CONFIG_KDB
++ kdb_init();
++ if (KDB_FLAG(EARLYKDB)) {
++ KDB_ENTER();
++ }
++#endif /* CONFIG_KDB */
++
+ #ifdef CONFIG_X86
+ if (efi_enabled)
+ efi_enter_virtual_mode();
+diff -Nurp linux-2.6.22-590/kdb/ChangeLog linux-2.6.22-600/kdb/ChangeLog
+--- linux-2.6.22-590/kdb/ChangeLog 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/ChangeLog 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,1693 @@
++2007-07-26 Keith Owens <kaos@sgi.com>
++
++ * New x86 backtrace code.
++ * kdb v4.4-2.6.22-common-4.
++
++2007-07-17 Keith Owens <kaos@sgi.com>
++
++ * Make kdb_printf_lock an irq lock to keep lockdep happy.
++ * kdb v4.4-2.6.22-common-3.
++
++2007-07-13 Keith Owens <kaos@sgi.com>
++
++ * Increase the size of the debug_alloc pool.
++ * Add the caller that obtained each entry in the debug_alloc pool.
++ * Poison entries in the debug_alloc pool.
++ * Track current and maximum usage in debug_alloc pool.
++ * Print the debug_alloc entries that are still in use when kdb exits
++ (memory leaks).
++ * Increase the default value of BTARGS to 9.
++ * kdb v4.4-2.6.22-common-2.
++
++2007-07-09 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-common-1.
++
++2007-07-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc7-common-1.
++
++2007-06-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc5-common-1.
++
++2007-06-15 Keith Owens <kaos@sgi.com>
++
++ * Do not include asm/kdb.h unless CONFIG_KDB is on. Dave Jiang.
++ * kdb v4.4-2.6.22-rc4-common-2.
++
++2007-06-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc4-common-1.
++
++2007-05-28 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc3-common-1.
++
++2007-05-22 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc2-common-1.
++
++2007-05-22 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.22-rc1-common-1.
++
++2007-05-17 Keith Owens <kaos@sgi.com>
++
++ * Add rdmsr and wrmsr commands for i386 and x86_64. Original patch by
++ Bernardo Innocenti for i386, reworked by Keith Owens to make it safe
++ on all cpu models and to handle both i386 and x86_64.
++ * kdb v4.4-2.6.21-common-3.
++
++2007-05-15 Keith Owens <kaos@sgi.com>
++
++ * Correct alignment of debug_alloc_header.
++ * kdb v4.4-2.6.21-common-2.
++
++2007-04-29 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-common-1.
++
++2007-04-16 Keith Owens <kaos@sgi.com>
++
++ * Remove dead symbol declarations.
++ * kdb v4.4-2.6.21-rc7-common-2.
++
++2007-04-16 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc7-common-1.
++
++2007-04-10 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc6-common-1.
++
++2007-04-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc5-common-1.
++
++2007-03-19 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc4-common-1.
++
++2007-03-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc3-common-1.
++
++2007-03-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc2-common-1.
++
++2007-03-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.21-rc1-common-1.
++
++2007-03-01 Keith Owens <kaos@sgi.com>
++
++ * Remove sparse warnings.
++ * kdb v4.4-2.6.20-common-6.
++
++2007-02-27 Keith Owens <kaos@sgi.com>
++
++ * set_irq_regs() on entry to kdb() if they are not already set.
++ * kdb v4.4-2.6.20-common-5.
++
++2007-02-22 Keith Owens <kaos@sgi.com>
++
++ * Initialise struct disassemble_info in kdb_id1().
++ * kdb v4.4-2.6.20-common-4.
++
++2007-02-16 Keith Owens <kaos@sgi.com>
++
++ * Clean up debug_alloc_pool code.
++ * kdb v4.4-2.6.20-common-3.
++
++2007-02-16 Keith Owens <kaos@sgi.com>
++
++ * Initialise variable bits of struct disassemble_info each time.
++ * kdb v4.4-2.6.20-common-2.
++
++2007-02-06 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-common-1.
++
++2007-02-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc7-common-1.
++
++2007-01-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc4-common-1.
++
++2007-01-02 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc3-common-1.
++
++2006-12-21 Keith Owens <kaos@sgi.com>
++
++ * Initialize the debug_kmalloc pool on the first call, so it can be
++ used at any time.
++ * kdb v4.4-2.6.20-rc1-common-2.
++
++2006-12-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.20-rc1-common-1.
++
++2006-11-30 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-common-1.
++
++2006-11-30 Keith Owens <kaos@sgi.com>
++
++ * Do not access registers if kdb_current_regs is NULL.
++ * kdb v4.4-2.6.19-rc6-common-3.
++
++2006-11-27 Keith Owens <kaos@sgi.com>
++
++ * Only use VT keyboard if the command line allows it and ACPI indicates
++ that there is an i8042.
++ * Optimize kdb_read() to reduce the risk of dropping input characters.
++ * Print cpumasks as lists instead of hex, also cope with long lists.
++ * kdb v4.4-2.6.19-rc6-common-2.
++
++2006-11-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc6-common-1.
++
++2006-11-09 Keith Owens <kaos@sgi.com>
++
++ * Change kdb() to fastcall.
++ * Correct loop in kdb_help(). Georg Nikodym.
++ * Only use VT console if the command line allows it.
++ * kdb v4.4-2.6.19-rc5-common-2.
++
++2006-11-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc5-common-1.
++
++2006-11-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc4-common-1.
++
++2006-10-24 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc3-common-1.
++
++2006-10-24 Keith Owens <kaos@sgi.com>
++
++ * Remove redundant regs and envp parameters.
++ * kdb v4.4-2.6.19-rc2-common-2.
++
++2006-10-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc2-common-1.
++
++2006-10-11 Keith Owens <kaos@sgi.com>
++
++ * Move kdbm_x86.c from the i386 to the common KDB patch.
++ * Expand kdbm_x86.c to work on x86_64 as well as i386.
++ * kdb v4.4-2.6.19-rc1-common-2.
++
++2006-10-09 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.19-rc1-common-1.
++
++2006-10-06 Keith Owens <kaos@sgi.com>
++
++ * Remove #include <linux/config.h>
++ * kdb v4.4-2.6.18-common-2.
++
++2006-09-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-common-1.
++
++2006-09-15 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc7-common-1.
++
++2006-08-29 Keith Owens <kaos@sgi.com>
++
++ * Rewrite all backtrace code.
++ * kdb v4.4-2.6.18-rc5-common-2.
++
++2006-08-28 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc5-common-1.
++
++2006-08-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc4-common-1.
++
++2006-08-04 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc3-common-1.
++
++2006-07-18 Keith Owens <kaos@sgi.com>
++
++ * 8250.c locking has been fixed so there is no need to break spinlocks
++ for keyboard entry.
++ * kdb v4.4-2.6.18-rc2-common-2.
++
++2006-07-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc2-common-1.
++
++2006-07-12 Keith Owens <kaos@sgi.com>
++
++ * Remove dead KDB_REASON codes.
++ * The main kdb() function is now always entered with interrupts
++ disabled, so there is no need to disable bottom halves.
++ * sparse cleanups.
++ * kdb v4.4-2.6.18-rc1-common-2.
++
++2006-07-07 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.18-rc1-common-1.
++
++2006-07-04 Keith Owens <kaos@sgi.com>
++
++ * Add KDB_REASON_CPU_UP and callbacks for cpus coming online.
++ * Relegate KDB_REASON_SILENT to KDB internal use only.
++ * Backout the v4.4-2.6.15-common-3 change that made KDB_REASON_SILENT
++ wait for cpus, the Dell Xeon problem has been fixed.
++ * notify_die() is not called for KDB_REASON_SILENT nor
++ KDB_REASON_CPU_UP, these events do not stay in KDB.
++ * Export kdb_current_task for kdbm_x86. SuSE patch
++ kdb-missing-export.diff
++ * Scale kdb_wait_for_cpus_secs by the number of online cpus.
++ * Delete kdb_enablehwfault, architectures now do their own setup.
++ * Delete kdba_enable_mce, architectures now do their own setup.
++ * Delete kdba_enable_lbr, kdba_disable_lbr, kdba_print_lbr,
++ page_fault_mca. Only ever implemented on x86, difficult to maintain
++ and rarely used in the field.
++ * Replace #ifdef KDB_HAVE_LONGJMP with #ifdef kdba_setjmp.
++ * kdb v4.4-2.6.17-common-2.
++
++2006-06-19 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-common-1.
++
++2006-05-31 Keith Owens <kaos@sgi.com>
++
++ * Break spinlocks for keyboard entry. Hopefully a temporary hack while
++ I track down why keyboard entry to KDB is hanging.
++ * kdb v4.4-2.6.17-rc5-common-2.
++
++2006-05-25 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc5-common-1.
++
++2006-05-15 Keith Owens <kaos@sgi.com>
++
++ * Refresh bfd related files from binutils 2.16.91.0.2.
++ * kdb v4.4-2.6.17-rc4-common-2.
++
++2006-05-12 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc4-common-1.
++
++2006-04-28 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc3-common-1.
++
++2006-04-22 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc2-common-1.
++
++2006-04-11 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.17-rc1-common-1.
++
++2006-04-05 Keith Owens <kaos@sgi.com>
++
++ * More fixes for the timing race with KDB_ENTER_SLAVE.
++ * kdb v4.4-2.6.16-common-5.
++
++2006-03-30 Keith Owens <kaos@sgi.com>
++
++ * Some code was testing KDB_IS_RUNNING() twice, which left it open to
++ races. Cache the result instead.
++ * kdb v4.4-2.6.16-common-4.
++
++2006-03-30 Keith Owens <kaos@sgi.com>
++
++ * Change CONFIG_LKCD to CONFIG_LKCD_DUMP.
++ * kdb v4.4-2.6.16-common-3.
++
++2006-03-22 Keith Owens <kaos@sgi.com>
++
++ * Add some more xpc flags. Dean Nelson, SGI.
++ * Replace open coded counter references with atomic_read().
++ * Pass early_uart_console to early_uart_setup(). Francois
++ Wellenreiter, Bull.
++ * Replace open code with for_each_online_cpu().
++ * If cpus do not come into kdb after a few seconds then let
++ architectures send a more forceful interrupt.
++ * Close a timing race with KDB_ENTER_SLAVE.
++ * kdb v4.4-2.6.16-common-2.
++
++2006-03-21 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.16-common-1.
++
++2006-03-14 Nathan Scott <nathans@sgi.com>
++
++ * kdb v4.4-2.6.16-rc6-common-1.
++
++2006-02-28 Nathan Scott <nathans@sgi.com>
++
++ * kdb v4.4-2.6.16-rc5-common-1.
++
++2006-02-20 Nathan Scott <nathans@sgi.com>
++
++ * kdb v4.4-2.6.16-rc4-common-1.
++
++2006-02-06 Keith Owens <kaos@sgi.com>
++
++ * Change CONFIG_CRASH_DUMP to CONFIG_LKCD.
++ * Remove obsolete kdb_notifier_list.
++ * kdb v4.4-2.6.16-rc2-common-2.
++
++2006-02-06 Keith Owens <kaos@sgi.com>
++
++ * Add xpcusers command. Dean Nelson, SGI.
++ * kdb v4.4-2.6.16-rc2-common-1.
++
++2006-02-02 Keith Owens <kaos@sgi.com>
++
++ * Check if we have a console before using it for KDB.
++ * kdb v4.4-2.6.16-rc1-common-3.
++
++2006-02-01 Keith Owens <kaos@sgi.com>
++
++ * Add option 'R' to the pid command to reset to the original task.
++ * Include 'pid R' in archkdb* commands to reset up the original failing
++ task. Users may have switched to other cpus and/or tasks before
++ issuing archkdb.
++ * Compile fix for kdbm_pg.c on i386.
++ * kdb v4.4-2.6.16-rc1-common-2.
++
++2006-01-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.16-rc1-common-1.
++
++2006-01-11 Keith Owens <kaos@sgi.com>
++
++ * Plug a timing race between KDB_ENTER_SLAVE and KDB_ENTER, and allow
++ the cpu command to switch to a slave cpu.
++ * KDB_REASON_SILENT now waits for other cpus, to avoid spurious NMI
++ events that were seen on some Xeon systems.
++ * kdb v4.4-2.6.15-common-3.
++
++2006-01-08 Keith Owens <kaos@sgi.com>
++
++ * kdb mainline invokes DIE_KDEBUG_ENTER and DIE_KDEBUG_LEAVE via
++ notify_die.
++ * Move xpc debug support from xpc to mainline kdb.
++ * kdbm_cm.c: check if file_lock_operations or lock_manager_operations
++ are set before dereferencing them. Felix Blyakher, SGI.
++ * kdb v4.4-2.6.15-common-2.
++
++2006-01-04 Keith Owens <kaos@sgi.com>
++
++ * Print all buffers on a page in inode pages and update formatting to be
++ legible, too. David Chinner, SGI.
++ * Update page flags in kdbm_pg.
++ * Remove inline from *.c files.
++ * kdb v4.4-2.6.15-common-1.
++
++2005-12-25 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc7-common-1.
++
++2005-12-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc6-common-1.
++
++2005-12-10 Keith Owens <kaos@sgi.com>
++
++ * Update mapping of flags to strings in kdbm_pg.c and kdbm_vm.c.
++ * kdb v4.4-2.6.15-rc5-common-3.
++
++2005-12-06 Keith Owens <kaos@sgi.com>
++
++ * Add RECOVERY flag to global KDB flags.
++ * Add kdb_{save,restore}_flags.
++ * kdb v4.4-2.6.15-rc5-common-2.
++
++2005-12-05 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc5-common-1.
++
++2005-12-02 Keith Owens <kaos@sgi.com>
++
++ * kdbm_vm.c: offsets of page macros should be unsigned long. Reported
++ by Dean Nelson, SGI.
++ * kdb v4.4-2.6.15-rc4-common-1.
++
++2005-11-30 Keith Owens <kaos@sgi.com>
++
++ * New follow_page() API.
++ * kdb v4.4-2.6.15-rc3-common-1.
++
++2005-11-21 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc2-common-1.
++
++2005-11-15 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.15-rc1-common-1.
++
++2005-11-15 Keith Owens <kaos@sgi.com>
++
++ * Allow kdb_printf() to be used outside kdb, in preemptible context.
++ * Build with CONFIG_SWAP=n. Reported by Leo Yuriev.
++ * kdb v4.4-2.6.14-common-2.
++
++2005-10-28 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.14-common-1.
++
++2005-10-21 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.14-rc5-common-1.
++
++2005-10-11 Keith Owens <kaos@sgi.com>
++
++ * Handle removal of USB keyboard. Aaron Young, SGI.
++ * kdb v4.4-2.6.14-rc4-common-1.
++
++2005-10-05 Keith Owens <kaos@sgi.com>
++
++ * Extend kdb_notifier_list() codes to include dumping.
++ * Use emergency_restart() for reboot, it can be called from interrupt
++ context, unlike machine_restart().
++ * kdb v4.4-2.6.14-rc3-common-1.
++
++2005-09-21 Keith Owens <kaos@sgi.com>
++
++ * Support kdb_current_task in register display and modify commands.
++ * Document what changes kdb's notion of the current task.
++ * Update rd documentation for IA64.
++ * Move some definictions to kdbprivate.h and remove some unused symbol
++ exports.
++ * kdb v4.4-2.6.14-rc2-common-1.
++
++2005-09-20 Keith Owens <kaos@sgi.com>
++
++ * Document IA64 handlers command.
++ * Add more fields to the task command.
++ * Cope with MCA/INIT handlers in the ps command.
++ * Namespace cleanup, delete unused exports, make some functions static.
++ * Add a kdb_notifier_list callback when kdb is about to reboot the
++ system.
++ * kdb v4.4-2.6.14-rc1-common-1.
++
++2005-08-29 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-common-1.
++
++2005-08-24 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-rc7-common-1.
++
++2005-08-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-rc6-common-1.
++
++2005-08-02 Keith Owens <kaos@sgi.com>
++
++ * Print more fields from filp, dentry.
++ * Add kdb=on-nokey to suppress kdb entry from the keyboard.
++ * kdb v4.4-2.6.13-rc5-common-1.
++
++2005-07-30 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-rc4-common-1.
++
++2005-07-26 Keith Owens <kaos@sgi.com>
++
++ * Fix compile problem with CONFIG_USB_KBD.
++ * kdb v4.4-2.6.13-rc3-common-3.
++
++2005-07-22 Keith Owens <kaos@sgi.com>
++
++ * The asmlinkage kdb() patch was lost during packaging. Reinstate it.
++ * kdb v4.4-2.6.13-rc3-common-2.
++
++2005-07-19 Keith Owens <kaos@sgi.com>
++
++ * Add support for USB keyboard (OHCI only). Aaron Young, SGI.
++ * kdb v4.4-2.6.13-rc3-common-1.
++
++2005-07-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.13-rc2-common-1.
++
++2005-07-01 Keith Owens <kaos@sgi.com>
++
++ * Make kdb() asmlinkage to avoid problems with CONFIG_REGPARM.
++ * Change some uses of smp_processor_id() to be preempt safe.
++ * Use DEFINE_SPINLOCK().
++ * kdb v4.4-2.6.13-rc1-common-1.
++
++2005-06-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.12-common-1.
++
++2005-06-08 Keith Owens <kaos@sgi.com>
++
++ * Correct early exit from bd *.
++ * kdb v4.4-2.6.12-rc6-common-1.
++
++2005-05-25 Keith Owens <kaos@sgi.com>
++
++ * Delete Documentation/kdb/dump.txt. lkcd now has reasonable
++ integration with kdb.
++ * kdb v4.4-2.6.12-rc5-common-1.
++
++2005-05-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.12-rc4-common-1.
++
++2005-04-21 Keith Owens <kaos@sgi.com>
++
++ * Add rpte command (find the pte for a physical page).
++ * kdb v4.4-2.6.12-rc3-common-1.
++
++2005-04-06 Keith Owens <kaos@sgi.com>
++
++ * Add rq and rqa commands. John Hawkes, SGI.
++ * kdb v4.4-2.6.12-rc2-common-1.
++
++2005-03-29 Keith Owens <kaos@sgi.com>
++
++ * Use register_sysctl_table() instead of patching kernel/sysctl.c.
++ * Non-ASCII characters are not printable.
++ * kdb v4.4-2.6.12-rc1-common-1.
++
++2005-03-15 Keith Owens <kaos@sgi.com>
++
++ * More coexistence patches for lkcd. Jason Uhlenkott, SGI.
++ * kdb v4.4-2.6.11-common-3.
++
++2005-03-08 Keith Owens <kaos@sgi.com>
++
++ * Coexistence patches for lkcd. Jason Uhlenkott, SGI.
++ * kdb v4.4-2.6.11-common-2.
++
++2005-03-03 Keith Owens <kaos@sgi.com>
++
++ * Add kdb to drivers/serial/8250_early.c. Francois Wellenreiter, Bull.
++ * kdb v4.4-2.6.11-common-1.
++
++2005-02-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.11-rc4-common-1.
++
++2005-02-08 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.11-rc3-bk4-common-1.
++
++2005-02-03 Keith Owens <kaos@sgi.com>
++
++ * Print more superblock fields. Nathan Scott, SGI.
++ * Remove kallsyms correction for modules, Linus took it.
++ * kdb v4.4-2.6.11-rc3-common-1.
++
++2005-01-27 Keith Owens <kaos@sgi.com>
++
++ * Add bio command. Nathan Scott, SGI.
++ * kdb v4.4-2.6.11-rc2-common-1.
++
++2005-01-20 Keith Owens <kaos@sgi.com>
++
++ * Include kallsyms correction for modules until Linus takes it.
++ * kdb v4.4-2.6.11-rc1-bk7-common-1.
++
++2005-01-12 Keith Owens <kaos@sgi.com>
++
++ * kallsyms now supports all symbols properly, remove kdb patch.
++ * Add last ditch allocator for debugging.
++ * Update kdb_meminfo_read_proc() for vmalloc changes.
++ * Update kdbm_vm.c for 4 level page tables.
++ * kdb v4.4-2.6.11-rc1-common-1.
++
++2004-12-25 Keith Owens <kaos@sgi.com>
++
++ * Add kobject command.
++ * Ignore low addresses and large offsets in kdbnearsym().
++ * Console updates for sn2 simulator.
++ * kdb v4.4-2.6.10-common-1.
++
++2004-12-07 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.10-rc3-common-1.
++
++2004-11-23 Keith Owens <kaos@sgi.com>
++
++ * Remove warning message from kdb_get_one_user_page(), it was too noisy.
++ * kdb v4.4-2.6.10-rc2-common-1.
++
++2004-11-02 Keith Owens <kaos@sgi.com>
++
++ * Build with kdb patch applied but CONFIG_KDB=n.
++ * kdb v4.4-2.6.10-rc1-common-2.
++
++2004-10-29 Keith Owens <kaos@sgi.com>
++
++ * Handle new compression scheme for kallsyms.
++ * Handle move of DEAD and ZOMBIE for task->state to task->exit_state.
++ * Tweak the concept of a valid kernel address to get all symbols,
++ including the symbols in the ia64 gate page.
++ * kdb v4.4-2.6.10-rc1-common-1.
++
++2004-10-21 Keith Owens <kaos@sgi.com>
++
++ * Handle variable size for the kernel log buffer.
++ * kdb v4.4-2.6.9-common-2.
++
++2004-10-19 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.9-common-1.
++
++2004-10-12 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.9-rc4-common-1.
++
++2004-10-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.9-rc3-common-1.
++
++2004-09-30 Keith Owens <kaos@sgi.com>
++
++ * Add stackdepth command to Documentation/kdb/kdb.mm. stackdepth is
++ only supported on i386 and ia64 at the moment.
++ * Skip kdbm_pg memmap build on x86_64. Scott Lurndal, 3leafnetworks.
++ * Export kdb_serial_str for modular I/O. Bryan Cardillo, UPenn.
++ * Reinstate tab completion for symbols.
++ * kdb v4.4-2.6.9-rc2-common-2.
++
++2004-09-14 Keith Owens <kaos@sgi.com>
++
++ * Add task states C (traCed) and E (dEad).
++ * kdb v4.4-2.6.9-rc2-common-1.
++
++2004-08-27 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.9-rc1-common-1.
++
++2004-08-14 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.8-common-1.
++
++2004-08-12 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.8-rc4-common-1.
++
++2004-08-05 Keith Owens <kaos@sgi.com>
++
++ * Mark kdb_initcall as __attribute_used__ for newer gcc.
++ * kdb v4.4-2.6.8-rc3-common-2.
++
++2004-08-04 Keith Owens <kaos@sgi.com>
++
++ * Add mdp (memory display physical) comnmand.
++ Ananth N Mavinakayanahalli, IBM.
++ * kdb v4.4-2.6.8-rc3-common-1.
++
++2004-07-18 Keith Owens <kaos@sgi.com>
++
++ * Patch for new sn_console. Erik Jacobson. SGI.
++ * kdb v4.4-2.6.8-rc2-common-1.
++
++2004-07-12 Keith Owens <kaos@sgi.com>
++
++ * Convert kdbm_task to standard cpumask_t.
++ * Document '*' (all breakpoints) option on bd/be/bc commands.
++ * kdb v4.4-2.6.8-rc1-common-1.
++
++2004-06-30 Keith Owens <kaos@sgi.com>
++
++ * Common changes to help the x86-64 port.
++ * kdb v4.4-2.6.7-common-3.
++
++2004-06-20 Keith Owens <kaos@sgi.com>
++
++ * Move kdb includes in mm/swapfile.c to reduce conflicts with other
++ SGI patches.
++ * kdb v4.4-2.6.7-common-2.
++
++2004-06-16 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.7-common-1.
++
++2004-06-09 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.7-rc3-common-1.
++
++2004-06-09 Keith Owens <kaos@sgi.com>
++
++ * Namespace clean up. Mark code/variables as static when it is only
++ used in one file, delete dead code/variables.
++ * Saved interrupt state requires long, not int.
++ * kdb v4.4-2.6.7-rc2-common-3.
++
++2004-06-08 Keith Owens <kaos@sgi.com>
++
++ * Whitespace clean up, no code changes.
++ * kdb v4.4-2.6.7-rc2-common-2.
++
++2004-06-07 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.4-2.6.7-rc2-common-1.
++
++2004-06-06 Keith Owens <kaos@sgi.com>
++
++ * Avoid recursion problems in kdb_init().
++ * Add standard archkdb commands.
++ * Add per_cpu command.
++ * Move kdb_{get,put}userarea_size definitions to linux/kdb.h.
++ * kdb v4.4-2.6.6-common-2.
++
++2004-05-23 Keith Owens <kaos@sgi.com>
++
++ * Shrink the output from the cpu command.
++ * Add cpu state 'I', the cpu is idle.
++ * Add cpu state '+', some kdb data is available but the cpu is not
++ responding.
++ * Do not print tasks in state I or M by default in ps and bta commands.
++ * Add states I (idle task) and M (sleeping system daemon) to ps and
++ bta commands.
++ * Delete unused variables.
++ * Move private kdb fields from kdb.h to kdbprivate.h.
++ * Print 'for keyboard entry' for the special cases when KDB_ENTER() is
++ used to get registers.
++ * Move bfd.h and ansidecl.h from arch/$(ARCH)/kdb to include/asm-$(ARCH)
++ and remove -I arch/$(ARCH)/kdb.
++ * dmesg command now prints from either the start or end of dmesg, or at
++ an arbitrary point in the middle of the kernel log buffer.
++ * Sensible string dump for multi byte md commands.
++ * 'page' command handles ia64 correctly.
++ * Show some activity when waiting for cpus to enter kdb.
++ * Change the KDB entry code to <esc>KDB.
++ * Allow comment commands, starting with '#'.
++ * Commands defined using defcmd from kdb_cmds are not printed as they
++ are entered, use defcmd with no parameters to print all the defined
++ commands.
++ * Add summary command.
++ * Update copyright notices.
++ * Zero suppression on md command.
++ * Make set NOSECT=1 the default.
++ * PPC64 uses OF-stdout instead of console. Ananth N Mavinakayanahalli.
++ * kdb v4.4-2.6.6-common-1.
++
++2004-05-10 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.6-common-1.
++
++2004-05-06 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.6-rc3-common-1.
++
++2004-05-06 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.6-rc2-common-1.
++
++2004-04-30 Keith Owens <kaos@sgi.com>
++
++ * Rewrite inode_pages command for new radix code in struct page.
++ * kdb v4.3-2.6.6-rc1-common-1.
++
++2004-04-11 Keith Owens <kaos@sgi.com>
++
++ * Unlock sn_sal_lock before entering kdb from sn_serial.
++ * kdb v4.3-2.6.5-common-2.
++
++2004-04-05 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.5-common-1.
++
++2004-03-22 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.5-rc2-common-1.
++
++2004-03-12 Keith Owens <kaos@sgi.com>
++
++ * More work to avoid spurious messages from WARN_CONSOLE_UNLOCKED().
++ * bh command bug fixes. Nathan Scott.
++ * kdb v4.3-2.6.4-common-1.
++
++2004-03-06 Keith Owens <kaos@sgi.com>
++
++ * Set KDB_IS_RUNNING() during kdb_init to avoid spurious messages from
++ WARN_CONSOLE_UNLOCKED().
++ * Correct loss of symbol names in kdbnearsym.
++ * kdb v4.3-2.6.4-rc2-common-1.
++
++2004-02-29 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.4-rc1-common-1.
++
++2004-02-21 Keith Owens <kaos@sgi.com>
++
++ * Correct build of kdb_cmds when using a separate object directory and
++ make it quiet. j-nomura (NEC), Keith Owens.
++ * kdb v4.3-2.6.3-common-2.
++
++2004-02-18 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.6.3-common-1.
++
++2004-02-17 Keith Owens <kaos@sgi.com>
++
++ * Remove WAR for incorrect console registration patch.
++ * kdb v4.3-2.6.3-rc4-common-1.
++
++2004-02-17 Keith Owens <kaos@sgi.com>
++
++ * Convert longjmp buffers from static to dynamic allocation, for large
++ cpu counts.
++ * Tweak kdbm_task for SMP/UP.
++ * Reconcile with kdb-v4.3 2.4.25-rc1-common-1.
++ * Simplify coexistence with NPTL patches.
++ * Support kill command on new scheduler.
++ * Do not refetch data when printing a value as characters.
++ * Document the pid command.
++ * Work around 2.6 kallsyms 'feature'.
++ * Upgrade to 2.6.3-rc3.
++ * WAR for incorrect console registration patch.
++ * kdb v4.3-2.6.3-rc3-common-1.
++
++2003-12-03 Keith Owens <kaos@sgi.com>
++
++ * Reconcile 2.6-test versions from Xavier Bru (Bull), Greg Banks (SGI),
++ Jim Houston (Concurrent Computer Corp).
++ * Reconcile with kdb v4.3-2.4.23-common-2.
++ * Clean up CONFIG_KDB changes to {scripts,kernel}/kallsyms.c.
++ * Correct handling of kdb command line arguments.
++ * Make hooks into module code less intrusive.
++ * Delete kdb_active_task, not required with O(1) scheduler.
++ * Port kdbm_task.c from 2.4.
++ * Disable debug check in exit.c::next_thread() when kdb is running.
++ * Remove "only bh_disable when interrupts are set". BH must be disabled
++ in kdb to prevent deadlock on breakpoints in interrupt handlers.
++ * Add kdb to drivers/char/sn_serial.c.
++ * kdb v4.3-2.6.0-test11-common-1.
++
++2003-11-11 Xavier Bru <xavier.bru@bull.net>
++ * Merge to 2.6.0-test9
++2003-10-17 Xavier Bru <xavier.bru@bull.net>
++ * fix NUll ptr in kdb_ps at early prompt.
++2003-10-14 Xavier Bru <xavier.bru@bull.net>
++ * fix NUll ptr in kdb_ps when cpu not present.
++2003-10-06 Xavier Bru <xavier.bru@bull.net>
++ * Merge to 2.6.0-test5
++ * fix compile error with CONFIG_MODULES not set.
++
++2003-09-08 Xavier Bru <xavier.bru@bull.net>
++ * Merge to 2.6.0-test4
++
++2003-07-10 Xavier Bru <xavier.bru@bull.net>
++
++ * Merge kdb v4.3 to 2.5.72 ia64
++ * don't call local_bh_enable() with interrupts masked.
++
++2003-04-07 Xavier Bru <xavier.bru@bull.net>
++
++ * Merge kdb v4.1 to 2.5.64 ia64
++ * new kernel parameters support
++ * new module format
++ * new kallsyms support
++
++2003-12-02 Keith Owens <kaos@sgi.com>
++
++ * Use correct page alignment in kdb_get_one_user_page().
++ Prasanna S Panchamukhi, IBM.
++ * Split pte command into pte -m and pte -p. Dean Roe, SGI.
++ * kdb v4.3-2.4.23-common-2.
++
++2003-12-01 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.4.23-common-1.
++
++2003-11-11 Keith Owens <kaos@sgi.com>
++
++ * Make KDB for USB keyboards build. Peter T. Breuer.
++ * Do not use USB keyboard if it has not been probed.
++ * kdb v4.3-2.4.23-rc1-common-1.
++
++2003-10-10 Keith Owens <kaos@sgi.com>
++
++ * Sync with XFS 2.4.22 tree.
++ * kdb v4.3-2.4.22-common-2.
++
++2003-08-29 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.4.22-common-1.
++
++2003-07-27 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.4.22-pre8-common-8.
++
++2003-07-20 Keith Owens <kaos@sgi.com>
++
++ * Make kdb_serial_str a common constant, the same for all consoles.
++ * Support SGI L1 console.
++ * kdb v4.3-2.4.21-common-8.
++
++2003-07-14 Keith Owens <kaos@sgi.com>
++
++ * Correct ll command.
++ * kdb v4.3-2.4.21-common-7.
++
++2003-07-08 Keith Owens <kaos@sgi.com>
++
++ * Export more kdb symbols. Vamsi Krishna S., IBM.
++ * kdb v4.3-2.4.21-common-6.
++
++2003-07-07 Keith Owens <kaos@sgi.com>
++
++ * Tweak 'waiting for cpus' message.
++ * kdb v4.3-2.4.21-common-5.
++
++2003-07-07 Keith Owens <kaos@sgi.com>
++
++ * 2.4.21-ia64-030702 patches common code that affects kdb. Workaround
++ this nuisance.
++ * kdb v4.3-2.4.21-common-4.
++
++2003-06-24 Keith Owens <kaos@sgi.com>
++
++ * Add task and sigset commands. Mark Goodwin, SGI.
++ * kdb v4.3-2.4.21-common-3.
++
++2003-06-23 Keith Owens <kaos@sgi.com>
++
++ * Sync with XFS 2.4.21 tree.
++ * kdb v4.3-2.4.21-common-2.
++
++2003-06-20 Keith Owens <kaos@sgi.com>
++
++ * kdb v4.3-2.4.21-common-1.
++
++2003-06-20 Keith Owens <kaos@sgi.com>
++
++ * More details on vm command, add vmp and pte commands.
++ Dean Nelson, Dean Roe, SGI.
++ * YAO1SCF (Yet Another O(1) Scheduler Coexistence Fix).
++ * Changes to common code to build on sparc. Tom Duffy.
++ * Move Tom Duffy's changes to drivers/sbus from the sparc64
++ patch to the common patch to keep all the serial changes
++ together.
++ * Changes to common code to build on Xscale. Eddie Dong, Intel.
++ * Remove CROSS_COMPILE_INC.
++ * Remove obsolete boot parameter 'kdb', long since replaced by
++ 'kdb=on'.
++ * Remove obsolete kdb_eframe_t casts.
++ * Add CONFIG_KDB_CONTINUE_CATASTROPHIC.
++ * Wait a short interval for cpus to join kdb before proceeding.
++ * Automatically enable sysrq for sr command.
++ * Correct double free of kdb_printf lock, spotted by Richard Sanders.
++ * Add optional cpu parameter to btc command.
++ * kdb v4.3-2.4.20-common-1.
++
++2003-05-02 Keith Owens <kaos@sgi.com>
++
++ * Some architectures have problems with the initial empty kallsyms
++ section so revert to three kallsyms passes.
++ * Flush buffered input at startup and at 'more' prompt.
++ * Only print 'more' prompt when longjmp data is available.
++ * Print more data for buffers and inodes.
++ * Disable kill command when O(1) scheduler is installed, the code
++ needs to be redone for O(1).
++ * The kernel has an undocumented assumption that enable_bh() is
++ always called with interrupts enabled, make it so.
++ * Print trailing punctuation even for symbols that are not in kernel.
++ * Add read/write access to user pages. Vamsi Krishna S., IBM
++ * Rename cpu_is_online to cpu_online, as in 2.5.
++ * O(1) scheduler removes init_task so kdb maintains its own list of
++ active tasks.
++ * Delete btp 0 <cpuid> option, it needed init_tasks.
++ * Clean up USB keyboard support. Steven Dake.
++ * Sync with XFS 2.4.20 tree.
++ * kdb v4.2-2.4.20-common-1.
++
++2003-04-04 Keith Owens <kaos@sgi.com>
++
++ * Remove one kallsyms pass.
++ * Automatic detection of O(1) scheduler.
++ * Rename cpu_online to cpu_is_online.
++ * Workarounds for scheduler bugs.
++ * Tweak algorithm for detecting if cpu process data is available.
++ * Add 'kill' command. Sonic Zhang, Keith Owens.
++ * kdb v4.1-2.4.20-common-1.
++
++2003-03-16 Keith Owens <kaos@sgi.com>
++
++ * Each cpu saves its state as it enters kdb or before it enters code
++ which cannot call kdb.
++ * Allow btp on process 0 for a specified cpu.
++ * Add btt command, backtrace given a struct task address.
++ * btc command no longer switches cpus, instead it uses the saved data.
++ * bta shows the idle task on each cpu as well as real tasks, the idle
++ task could be handling an interrupt.
++ * ps command shows the idle task on each cpu.
++ * ps checks that the saved data for a cpu matches the process running on
++ that cpu and warns about stale saved data or no saved data at all.
++ * Remove special cases for i386 backtrace from common code and simplify
++ common bt code.
++ * Clean up kdb interaction with CONFIG_SERIAL_CONSOLE.
++ * Do not automatically repeat commands after the user typed 'q'.
++ * O(1) scheduler patch changes the process cpu field but does not set
++ any indicator that O(1) is being used. Adjust kdb_process_cpu() by
++ hand after applying O(1).
++ * Add kdb_print_nameval() to common code.
++ * Convert tests of cpu_online_map to cpu_online() macro.
++ * module.h needs errno.h when compiling with CONFIG_MODULES=n.
++ * Correct duplicate breakpoint handling.
++ * Do not try to send IPI during a catastrophic error, send_ipi can hang
++ and take kdb with it.
++ * kdb memmap command is i386 only, restrict it.
++ * Add large block device (LBD) support from XFS tree. Eric Sandeen.
++ * kdb v4.0-2.4.20-common-1.
++
++2003-02-03 Keith Owens <kaos@sgi.com>
++
++ * Register kdb commands early.
++ * Decode oops via kallsyms if it is available.
++ * Update copyright notices to 2003.
++ * Add defcmd/endefcmd to allow users to package their own macros.
++ * kdb commands that fail are ignored when prefixed with '-'.
++ * Add selection options to bta command.
++ * Add btc command (switch to each cpu and backtrace).
++ * Do real time detection of dead cpus.
++ * Clear ip adjusted flag when leaving kdb.
++ * Clean up ps command.
++ * Print ps output for each task when backtracing.
++ * Bump to version v3.0 to reduce confusion between kdb and kernel
++ version numbers.
++ * Add kdba_local_arch_setup/kdba_local_arch_cleanup to correct
++ keyboard freeze. Ashish Kalra.
++ * Refuse multiple breakpoints at the same address.
++ * Add fl (file_lock) command, from XFS development tree.
++ * Correct inode_pages, from XFS development tree.
++ * Add command history and editing. Sonic Zhang.
++ * Extend command history and editing to handle vt100 escape sequences.
++ * Allow tab completion at start of line.
++ * Touch nmi watchdog on long running bta and btc commands.
++ * Clean up ps output and standardize with bta codes.
++ * Correctly handle escaped characters in commands.
++ * Update man pages for btc and command history/editing.
++ * kdb v3.0-2.4.20-common-1.
++
++2002-11-29 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.20.
++ * Correct Documentation/kdb/kdb_sr.man.
++ * Remove leading zeroes from pids, they are decimal, not octal.
++ * kdb v2.5-2.4.20-common-1.
++
++2002-11-14 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.20-rc1.
++ * kdb v2.5-2.4.20-rc1-common-1.
++
++2002-11-14 Keith Owens <kaos@sgi.com>
++
++ * Fix processing with O(1) scheduler.
++ * 'go' switches back to initial cpu first.
++ * 'go <address>' only allowed on initial cpu.
++ * 'go' installs the global breakpoints from the initial cpu before
++ releasing the other cpus.
++ * If 'go' has to single step over a breakpoint then it single steps just
++ the initial cpu, installs the global breakpoints then releases the
++ other cpus.
++ * General clean up of handling for breakpoints and single stepping over
++ software breakpoints.
++ * Add kdb_notifier_block so other code can tell when kdb is in control.
++ * kdb v2.5-2.4.19-common-1.
++
++2002-11-02 Keith Owens <kaos@sgi.com>
++
++ * Correct build without CONFIG_KDB.
++ * kdb v2.4-2.4.19-common-3.
++
++2002-11-01 Keith Owens <kaos@sgi.com>
++
++ * Minimize differences from 2.5.44.
++ * kdb v2.4-2.4.19-common-2.
++
++2002-10-31 Keith Owens <kaos@sgi.com>
++
++ * Add defcmd/endefcmd feature.
++ * Remove kdb_eframe_t.
++ * Clear bp data before using.
++ * Sanity check if we have pt_regs.
++ * Force LINES > 1.
++ * Remove special case for KDB_REASON_PANIC, use KDB_ENTER() instead.
++ * Remove kdba_getcurrentframe().
++ * Coexist with O(1) scheduler.
++ * Add lines option to dmesg, speed up dmesg.
++ * kdb v2.4-2.4.19-common-1.
++
++2002-10-17 Keith Owens <kaos@sgi.com>
++
++ * Add selection critera to ps and bta commands.
++ * kdb v2.3-2.4.19-common-4.
++
++2002-10-07 Keith Owens <kaos@sgi.com>
++
++ * New man page, Documentation/kdb/kdb_sr.man.
++
++2002-10-04 Keith Owens <kaos@sgi.com>
++
++ * Minimize differences between patches for 2.4 and 2.5 kernels.
++ * Add Configure.help for CONFIG_KDB_USB.
++ * Reduce stack usage.
++ * kdb v2.3-2.4.19-common-3.
++
++2002-08-10 Keith Owens <kaos@sgi.com>
++
++ * Replace kdb_port with kdb_serial to support memory mapped I/O.
++ David Mosberger.
++ * kdb v2.3-2.4.19-common-2.
++
++2002-08-07 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19.
++ * Remove individual SGI copyrights, the general SGI copyright applies.
++ * Handle md0. Reported by Hugh Dickins, different fix by Keith Owens.
++ * Use page_address() in kdbm_pg.c. Hugh Dickins.
++ * Remove debugging printk from kdbm_pg.c. Hugh Dickins.
++ * Move breakpoint address verification into arch dependent code.
++ * Dynamically resize kdb command table as required.
++ * Common code to support USB keyboard. Sebastien Lelarge.
++ * kdb v2.3-2.4.19-common-1.
++
++2002-07-09 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19-rc1.
++ * Add dmesg command.
++ * Clean up copyrights, Eric Sandeen.
++ * kdb v2.2-2.4.19-rc1-common-1.
++
++2002-06-14 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19-pre10.
++ * Sync with XFS.
++ * kdb v2.1-2.4.19-pre10-common-1.
++
++2002-04-09 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.19-pre6.
++ * kdb v2.1-2.4.19-pre6-common-1.
++
++2002-03-18 Keith Owens <kaos@sgi.com>
++
++ * Syntax check mdWcN commands.
++
++2002-03-01 Keith Owens <kaos@sgi.com>
++
++ * Sync with XFS 2.4.18.
++ * kdb v2.1-2.4.18-common-2.
++
++2002-02-26 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.18.
++ * Add Paul Dorwin (IBM) magicpoint slides on using kdb as
++ Documentation/kdb/slides.
++ * kdb v2.1-2.4.18-common-1.
++
++2002-01-23 Keith Owens <kaos@sgi.com>
++
++ * Sync with XFS pagebuf changes.
++ * kdb v2.1-2.4.17-common-2.
++
++2002-01-18 Keith Owens <kaos@sgi.com>
++
++ * Ignore single stepping during panic.
++ * Remove kdba_getword, kdba_putword. Replace with kdb_getword,
++ kdb_putword that rely on copy_xx_user. The new functions return
++ an error code, like copy_xx_user.
++ * New functions kdb_getarea, kdb_putarea for copying areas of data
++ such as structures. These functions also return an error code.
++ * Change all common code to use the new functions.
++ * bp command checks that it can read and write the word at the
++ breakpoint before accepting the address.
++ * Break points are now set FIFO and cleared LIFO so overlapping
++ entries give sensible results.
++ * Verify address before disassembling code.
++ * Common changes for sparc64. Ethan Solomita, Tom Duffy.
++ * Remove ss <count>, never supported.
++ * Remove kallsyms entries from arch vmlinux.lds files.
++ * Specify which commands auto repeat.
++ * kdb v2.1-2.4.17-common-1.
++
++2002-01-07 Keith Owens <kaos@sgi.com>
++
++ * Remove console semaphore code, not good in interrupt.
++ * Remove fragment of ia64 patch that had crept into kdb.
++ * Release as kdb v2.0-2.4.17-common-3.
++
++2002-01-04 Keith Owens <kaos@sgi.com>
++
++ * Sync xfs <-> kdb common code.
++
++2001-12-22 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.17.
++ * Clean up ifdef CONFIG_KDB.
++ * Add ifdef CONFIG_KDB around include kdb.h.
++ * Delete dummy kdb.h files for unsupported architectures.
++ * Delete arch i386 and ia64 specific files. This changelog now
++ applies to kdb common code only.
++ * Release as kdb v2.0-2.4.17-common-1.
++
++2001-12-03 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.16.
++ * Add include/asm-um/kdb.h stub to allow XFS to be tested under UML.
++ * Check if an interrupt frame on i386 came from user space.
++ * Out of scope bug fix in kdb_id.c. Ethan Solomita.
++ * Changes to common code to support sparc64. Ethan Solomita.
++ * Change GFP_KERNEL to GFP_ATOMIC in disasm. Ethan Solomita.
++
++2001-11-16 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.15-pre5.
++ * Wrap () around #define expressions with unary operators.
++
++2001-11-13 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.15-pre4.
++ * kbdm_pg.c patch from Hugh Dickins.
++
++2001-11-07 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to 2.4.14-ia64-011105.
++ * Change name of l1 serial I/O routine, add ia64 init command. SGI.
++ * Sync kdbm_pg with XFS.
++
++2001-11-06 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to kernel 2.4.14.
++
++2001-11-02 Keith Owens <kaos@sgi.com>
++
++ * Sync kdbm_pg.c with XFS.
++
++2001-10-24 Keith Owens <kaos@sgi.com>
++
++ * Upgrade to kernel 2.4.13.
++
++2001-10-14 Keith Owens <kaos@melbourne.sgi.com>
++
++ * More use of TMPPREFIX in top level Makefile to speed up NFS compiles.
++
++ * Correct repeat calculations in md/mds commands.
++
++2001-10-10 Keith Owens <kaos@melbourne.sgi.com>
++
++ * Copy bfd.h and ansidecl.h to arch/$(ARCH)/kdb, remove dependecies on
++ user space includes.
++
++ * Update kdb v1.9 to kernel 2.4.11.
++
++2001-10-01 Keith Owens <kaos@melbourne.sgi.com>
++
++ * Update kdb v1.9 to kernel 2.4.11-pre1 and 2.4.10-ac1.
++
++ * Correct loop in kdb_parse, reported by Tachino Nobuhiro.
++
++2001-09-25 Keith Owens <kaos@melbourne.sgi.com>
++
++ * Update kdb v1.8 to kernel 2.4.10.
++
++ * kdbm_pg patch from Hugh Dickens.
++
++ * DProbes patch from Bharata B Rao.
++
++ * mdWcn and mmW patch from Vamsi Krishna S.
++
++ * i386 disasm layout patch from Jean-Marc Saffroy.
++
++ * Work around for 64 bit binutils, Simon Munton.
++
++ * kdb.mm doc correction by Chris Pascoe.
++
++ * Enter repeats the last command, IA64 disasm only prints one
++ instruction. Don Dugger.
++
++ * Allow kdb/modules to be linked into vmlinux.
++
++ * Remove obsolete code from kdb/modules/kdbm_{pg,vm}.c.
++
++ * Warn when commands are entered at more prompt.
++
++ * Add MODULE_AUTHOR, DESCRIPTION, LICENSE.
++
++ * Release as kdb v1.9.
++
++2001-02-27 Keith Owens <kaos@melbourne.sgi.com>
++
++ * Update kdb v1.8 to kernel 2.4.2, sync kdb/modules with XFS.
++
++ * Hook into panic() call.
++
++2000-12-18 Keith Owens <kaos@melbourne.sgi.com>
++
++ * Update kdb v1.7 to kernel 2.4.0-test13-pre3, sync kdb/modules with
++ XFS.
++
++2000-11-18 Keith Owens <kaos@melbourne.sgi.com>
++
++ * Update to kernel 2.4.0-test11-pre7, including forward port of
++ bug fixes from WIP 2.4.0-test9 tree.
++
++ * Update to Cygnus CVS trees for disassembly code.
++
++ * Bump to kdb v1.6.
++
++2000-10-19 Keith Owens <kaos@melbourne.sgi.com>
++
++ * Update to kernel 2.4.0-test10-pre4.
++
++2000-10-15 Keith Owens <kaos@melbourne.sgi.com>
++
++ * kdb/kdbmain.c (kdb_parse): Correctly handle blank input.
++
++ * kdb/kdbmain.c (kdb_local, kdb): Reason SILENT can have NULL regs.
++
++2000-10-13 Keith Owens <kaos@melbourne.sgi.com>
++
++ * kdb/kdbmain.c: Reduce CMD_LEN to avoid overflowing kdb_printf buffer.
++
++2000-10-11 Keith Owens <kaos@melbourne.sgi.com>
++
++ * kdb/kdbmain.c (kdb): Test for userspace breakpoints before driving
++ other cpus into kdb. Speeds up gdb and avoids SMP race.
++
++ * arch/i386/kdb/kdba_io.c (get_serial_char, get_kbd_char): Ignore
++ unprintable characters.
++
++ * arch/i386/kdb/kdba_io.c (kdba_read): Better handling of buffer size.
++
++2000-10-04 Keith Owens <kaos@melbourne.sgi.com>
++
++ * arch/i386/kdb/kdba_bt.c (kdba_bt_process): Verify that esp is inside
++ task_struct. Original patch by Mike Galbraith.
++
++ * kdb/kdb_io.c (kdb_getstr): Reset output line counter, remove
++ unnecessary prompts.
++
++ * arch/i386/kdb/kdbasupport.c (kdb_getregcontents): Change " cs" to
++ "xcs", ditto ss, ds, es. gdb2kdb does not like leading spaces.
++
++ * include/asm-xxx/kdb.h: Add dummy kdb.h for all architectures except
++ ix86. This allows #include <linux/kdb.h> to appear in arch independent
++ code without causing compile errors.
++
++ * kdb/modules/kdbm_pg: Sync with XFS.
++
++2000-10-03 Keith Owens <kaos@melbourne.sgi.com>
++
++ * kdb/kdb_io.c (kdb_read): Ignore NMI while waiting for input.
++
++ * kdb/kdb_io.c, kdb/Makefile: Export kdb_read.
++
++2000-10-02 Keith Owens <kaos@melbourne.sgi.com>
++
++ * arch/i386/kernel/smpboot.c (do_boot_cpu): Set nmi_watchdog_source to 2
++ to avoid premature NMI oops during cpu bring up. We have to assume that
++ a box with more than 1 cpu has a working IO-APIC.
++
++ * Documentation/kdb/{kdb.mm,kdb_md.man}: Add mdr command.
++
++ * kdb/kdbmain.c (kdb_md): Add mdr command.
++
++ * Release as kdb v1.5 against 2.4.0-test9-pre8.
++
++ * arch/i386/kdb/kdba_io.c, arch/i386/kdb/kdbasupport.c, kdb/kdbmain.c,
++ kdb/kdb_io.c, kdb/kdb_id.c: Remove zero initializers for static
++ variables.
++
++2000-09-28 Keith Owens <kaos@melbourne.sgi.com>
++
++ * various: Add nmi_watchdog_source, 1 local APIC, 2 IO-APIC.
++ Test nmi_watchdog_source instead of nr_ioapics so UP works on SMP hardware.
++
++ * arch/i386/kernel/io_apic.c: Rename setup_nmi to setup_nmi_io for clarity.
++
++ * kdb/kdbmain.c (kdb_parse): Only set NO_WATCHDOG if it was already set.
++
++ * kdb/kdbmain.c (kdb): Clear NO_WATCHDOG on all exit paths.
++
++ * include/linux/kdb.h: Add KDB_REASON_SILENT.
++
++ * kdb/kdbmain.c (kdb_local): Treat reason SILENT as immediate 'go'.
++
++ * kdb/kdbmain.c (kdb_init): Invoke kdb with reason SILENT to instantiate
++ any breakpoints on boot cpu.
++
++ * arch/i386/kernel/smpboot.c (smp_callin): Invoke kdb with reason SILENT
++ to instantiate any global breakpoints on this cpu.
++
++ * kdb/kdb_cmds: Remove comment that said initial commands only worked on
++ boot cpu.
++
++2000-09-27 Keith Owens <kaos@melbourne.sgi.com>
++
++ * arch/i386/kernel/msr.c: Move {rd,wr}msr_eio to include/asm-i386/apic.h.
++
++ * include/asm-i386/apic.h: Define NMI interfaces.
++
++ * kernel/sysctl.c (kern_table):
++ * kernel/sysctl.c (do_proc_set_nmi_watchdog):
++ Add /proc/sys/kernel/nmi_watchdog.
++
++ * arch/i386/kernel/apic.c: New routines set_nmi_counter_local,
++ setup_apic_nmi_watchdog.
++
++ * arch/i386/kernel/traps.c: New routine set_nmi_watchdog(). Call apic
++ routines to set/clear local apic timer.
++
++2000-09-26 Keith Owens <kaos@melbourne.sgi.com>
++
++ * include/linux/sysctl.h (enum): Add NMI_WATCHDOG.
++
++ * arch/i386/kernel/traps.c (nmi_watchdog_tick): Check nmi_watchdog is
++ still on.
++
++ * arch/i386/config.in: Add CONFIG_UP_NMI_WATCHDOG.
++
++ * Documentation/Configure.help: Add CONFIG_UP_NMI_WATCHDOG.
++
++ * Documentation/nmi_watchdog.txt: Update for UP NMI watchdog.
++
++2000-09-25 Keith Owens <kaos@melbourne.sgi.com>
++
++ * arch/i386/kernel/apic.c (init_apic_mappings):
++ * arch/i386/kernel/io_apic.c (IO_APIC_init_uniprocessor):
++ Merge Keir Fraser's local APIC for uniprocessors patch.
++
++2000-09-24 Keith Owens <kaos@melbourne.sgi.com>
++
++ * Various: Declare initialization routines as __init.
++
++ * Makefile: Define and export AWK.
++
++ * kdb/Makefile: Generate gen-kdb_cmds.c from kdb/kdb_cmds.
++
++ * kdb/kdbmain.c (kdb_init): Call new routine kdb_cmds_init to execute
++ whatever the user put in kdb/kdb_cmds.
++
++ * arch/i386/kdb/kdba_bt.c (kdba_bt_stack): New parameter to
++ indicate if esp in regs is known to be valid or not.
++
++ * kdb/kdb_bp.c, arch/i386/kdb/kdba_bp.c: More trace prints for
++ breakpoint handling.
++
++ * arch/i386/kdb/kdba_bp.c (kdba_installbp): Finally found and fixed the
++ annoying breakpoint bug where breakpoints where not always installed
++ after 'go'.
++
++ * Documentation/kdb: Update man pages kdb.mm, kdb_env.man, kdb_ss.man.
++
++ * Released as kdb-v1.5-beta1-2.4.0-test8.
++
++ * Sync to 2.4.0-test9-pre6 and release as kdb-v1.5-beta1-2.4.0-test9-pre6.
++
++2000-09-23 Keith Owens <kaos@melbourne.sgi.com>
++
++ * arch/i386/kdb/kdbasupport.c (kdba_getregcontents): New pseudo
++ registers cesp and ceflags to help with debugging the debugger.
++
++ * kdb/kdbmain.c (kdb_local, kdb): Add KDB_REASON_RECURSE. Add
++ environment variable RECURSE. Add code to cope with some types of
++ recursion.
++
++ * kdb/kdbmain.c (kdb), arch/i386/kdba/kdba_bp.c: Add
++ kdba_clearsinglestep.
++
++2000-09-22 Keith Owens <kaos@melbourne.sgi.com>
++
++ * drivers/video/vgacon.c (write_vga): No cli() if kdb is running, avoid
++ console deadlock.
++
++ * arch/i386/kernel/irq.c (get_irqlock): Warn if kdb is running, may hang.
++
++ * include/linux/kdb.h: Define KDB_IS_RUNNING as (0) if no CONFIG_KDB.
++
++ * arch/i386/kdb/kdba_bt.c (kdba_bt_stack): Do not attempt a backtrace if
++ the code segment is not in the kernel.
++
++ * kdb/modules: Change modules from MX_OBJS to M_OBJS. Remove EXPORT_NOSYMBOLS.
++
++2000-09-21 Keith Owens <kaos@melbourne.sgi.com>
++
++ * arch/i386/kernel/i386_ksyms.c: Move EXPORT_SYMBOLS for kdb to kdb/kdbmain.c.
++
++ * kdb/Makefile: Change kdb/kdbmain.o from O_OBJS to OX_OBJS.
++
++ * arch/i386/kernel/smp.c: Remove some #ifdef CONFIG_KDB. Remove kdbprivate.h.
++
++ * include/linux/kdb.h: Add kdb_print_state. Add KDB_STATE_WAIT_IPI.
++
++ * kdb/kdbmain.c (kdb): Only mark cpu as leaving if it is in KDB state. Maintain
++ WAIT_IPI state so a cpu is only driven through NMI once.
++
++ * arch/i386/kernel/smp.c (smp_kdb_stop): All state fiddling moved to kdb().
++
++2000-09-20 Keith Owens <kaos@melbourne.sgi.com>
++
++ * include/linux/kdb.h: #define kdb() as (0) if kdb is not configured.
++
++ * arch/i386/kernel/traps.c: Remove some #ifdef CONFIG_KDB.
++
++ * include/linux/kdbprivate.h: Move per cpu state to kdb.h.
++
++ * include/linux/kdb.h: Add KDB_STATE_NO_WATCHDOG, KDB_STATE_PRINTF_LOCK.
++ Rename KDB_DEBUG_xxx to KDB_DEBUG_FLAG_xxx. Clean up debug flag
++ definitions.
++
++ * arch/i386/kernel/traps.c (nmi_watchdog_tick): Check no watchdog.
++
++ * kdb/kdbmain.c (kdb): Set no watchdog in normal kdb code.
++
++ * kdb/kdbmain.c (kdb_parse): Allow watchdog in commands.
++
++ * kdb/kdb_io.c (kdb_printf): No watchdog during printing. Clean up lock handling.
++
++ * kdb/kdbmain.c (kdb_set): Clean up debug flag handling.
++
++2000-09-19 Juan J. Quintela <quintela@fi.udc.es>
++
++ * kdb/arch/i386/kdb/kdba_io.c: Allow kdb to compile without CONFIG_VT and/or
++ serial console.
++
++2000-09-19 Keith Owens <kaos@melbourne.sgi.com>
++
++ * include/linux/kdb.h: Define KDB_DEBUG_STATE().
++
++ * kdb/kdbmain.c (kdb): Add kdb_print_state(), calls to KDB_DEBUG_STATE().
++
++2000-09-16 Keith Owens <kaos@melbourne.sgi.com>
++
++ * Move to finer grained control over individual processors in kdb with
++ per cpu kdb state. Needed to allow ss[b] to only release one processor,
++ previously ss[b] released all processors. Also need to recover from
++ errors inside kdb commands, e.g. oops in kdbm_pg code.
++
++ * various:
++ Move global flags KDB_FLAG_SSB, KDB_FLAG_SUPRESS, KDB_FLAG_FAULT,
++ KDB_FLAG_SS, KDB_FLAG_SSBPT, kdb_active, to per cpu state and macros
++ KDB_STATE(xxx).
++ Replace kdb_flags & KDB_FLAG_xxx with KDB_FLAG(xxx).
++ Replace kdb_flags & KDB_DEBUG_xxx with KDB_DEBUG(xxx).
++ Replace specific tests with wrapper KDB_IS_RUNNING().
++
++ * various: Remove #ifdef CONFIG_SMP from kdb code wherever
++ possible. Simplifies the code and makes it much more readable.
++
++ * arch/i386/kdb/kdbasupport.c (kdb_setjmp): Record if we have reliable
++ longjmp data instead of assuming it is always set.
++
++ * various: Replace smp_kdb_wait with per cpu state, HOLD_CPU.
++
++ * init/main.c : Replace #ifdef KDB_DEBUG with KDB_DEBUG(CALLBACK).
++
++ * include/linux/kdbprivate.h: Separate command return codes from error
++ codes. Add more detailed command codes.
++
++ * arch/i386/kernel/traps.c (die): Change spin_lock_irq to
++ spin_lock_irqsave. Why did I do this?
++
++ * kdb/kdbmain.c (kdb_parse): Set per cpu flag CMD before executing kdb
++ command. More detailed return codes for commands that affect
++ processors.
++
++ * kdb/kdbmain.c (kdb_previous_event): New, check if any processors are
++ still executing the previous kdb event. Removes a race window where a
++ second event could enter kdb before the first had completely ended.
++
++ * kdb/kdbmain.c (kdb): Document all the concurrency conditions and how
++ kdb handles them. ss[b] now releases only the current cpu. Do not set
++ breakpoints when releasing for ss[b]. Recover from errors in kdb
++ commands. Check that we have reliable longjmp data before using it.
++
++ * various: Update return code documentation.
++
++ * kdb/kdb_bp.c (kdb_ss): Separate ss and ssb return codes.
++
++ * kdb/kdbsupport.c (kdb_ipi): Finer grained algorithm for deciding
++ whether to call send a stop signal to a cpu.
++
++ * arch/i386/kdb/kdba_bp.c (kdba_db_trap): Separate ss and ssb return
++ codes. Reinstall delayed software breakpoints per cpu instead of
++ globally. Changed algorithm for handling ss[b].
++
++ * arch/i386/kdb/kdba_bp.c (kdba_bp_trap): Match software breakpoints per
++ cpu instead of globally.
++
++ * include/linux/kdb.h: Bump version to kdb v1.5.
++
++2000-09-16 Keith Owens <kaos@melbourne.sgi.com>
++
++ * kernel/sysctl.c (kern_table): add /proc/sys/kernel/kdb.
++
++ * init/main.c (parse_options): add boot flags kdb=on, kdb=off,
++ kdb=early.
++
++ * include/linux/sysctl.h (enum): add KERN_KDB.
++
++ * drivers/char/serial.c (receive_chars): check kdb_on.
++
++ * drivers/char/keyboard.c (handle_scancode): check kdb_on.
++
++ * arch/i386/kernel/traps.c (nmi_watchdog_tick): check kdb_on.
++
++ * arch/i386/config.in: add CONFIG_KDB_OFF.
++
++ * Documentation/Configure.help: add CONFIG_KDB_OFF.
++
++ * kdb/kdbmain.c: add kdb_initial_cpu, kdb_on.
++
++ * kdb/kdbmain.c (kdb): check kdb_on, set kdb_initial_cpu.
++
++ * kdb/kdbmain.c (kdb_init): add Keith Owens to kdb banner.
++
++ * kdb/kdb_io.c (kdb_printf): serialize kdb_printf output.
++
++ * kdb/kdb_bt.c (kdb_bt): check environment variable BTAPROMPT.
++
++ * kdb/kdbsupport.c (kdb_ipi): ignore NMI for kdb_initial_cpu.
++
++ * kdb/modules/kdbm_pg.c (kdbm_page): merge updates from 2.4.0-test5-xfs.
++
++ * kdb/kdb_bt.man: add btp, bta, BTAPROMPT.
++
++ * kdb/kdb.mm: add CONFIG_KDB_OFF, boot flags, btp, bta.
++
++ * include/linux/kdbprivate.h: add kdb_initial_cpu.
++
++ * include/linux/kdb.h: add kdb_on, bump version to kdb v1.4.
+diff -Nurp linux-2.6.22-590/kdb/kdba_bt_x86.c linux-2.6.22-600/kdb/kdba_bt_x86.c
+--- linux-2.6.22-590/kdb/kdba_bt_x86.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/kdba_bt_x86.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,5142 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 2006, 2007 Silicon Graphics, Inc. All Rights Reserved.
++ *
++ * Common code for doing accurate backtraces on i386 and x86_64, including
++ * printing the values of arguments.
++ */
++
++#include <linux/init.h>
++#include <linux/kallsyms.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/stringify.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/nmi.h>
++#include <asm/asm-offsets.h>
++#include <asm/system.h>
++
++#define KDB_DEBUG_BB(fmt, ...) \
++ {if (KDB_DEBUG(BB)) kdb_printf(fmt, ## __VA_ARGS__);}
++#define KDB_DEBUG_BB_OFFSET_PRINTF(offset, prefix, suffix) \
++ kdb_printf(prefix "%c0x%x" suffix, \
++ offset >= 0 ? '+' : '-', \
++ offset >= 0 ? offset : -offset)
++#define KDB_DEBUG_BB_OFFSET(offset, prefix, suffix) \
++ {if (KDB_DEBUG(BB)) KDB_DEBUG_BB_OFFSET_PRINTF(offset, prefix, suffix);}
++
++#define BB_CHECK(expr, val, ret) \
++({ \
++ if (unlikely(expr)) { \
++ kdb_printf("%s, line %d: BB_CHECK(" #expr ") failed " \
++ #val "=%lx\n", \
++ __FUNCTION__, __LINE__, (long)val); \
++ bb_giveup = 1; \
++ return ret; \
++ } \
++})
++
++/* Use BBRG_Rxx for both i386 and x86_64. RAX through R15 must be at the end,
++ * starting with RAX. Some of these codes do not reflect actual registers,
++ * such codes are special cases when parsing the record of register changes.
++ * When updating BBRG_ entries, update bbrg_name as well.
++ */
++
++enum bb_reg_code
++{
++ BBRG_UNDEFINED = 0, /* Register contents are undefined */
++ BBRG_OSP, /* original stack pointer on entry to function */
++ BBRG_RAX,
++ BBRG_RBX,
++ BBRG_RCX,
++ BBRG_RDX,
++ BBRG_RDI,
++ BBRG_RSI,
++ BBRG_RBP,
++ BBRG_RSP,
++ BBRG_R8,
++ BBRG_R9,
++ BBRG_R10,
++ BBRG_R11,
++ BBRG_R12,
++ BBRG_R13,
++ BBRG_R14,
++ BBRG_R15,
++};
++
++const static char *bbrg_name[] = {
++ [BBRG_UNDEFINED] = "undefined",
++ [BBRG_OSP] = "osp",
++ [BBRG_RAX] = "rax",
++ [BBRG_RBX] = "rbx",
++ [BBRG_RCX] = "rcx",
++ [BBRG_RDX] = "rdx",
++ [BBRG_RDI] = "rdi",
++ [BBRG_RSI] = "rsi",
++ [BBRG_RBP] = "rbp",
++ [BBRG_RSP] = "rsp",
++ [BBRG_R8] = "r8",
++ [BBRG_R9] = "r9",
++ [BBRG_R10] = "r10",
++ [BBRG_R11] = "r11",
++ [BBRG_R12] = "r12",
++ [BBRG_R13] = "r13",
++ [BBRG_R14] = "r14",
++ [BBRG_R15] = "r15",
++};
++
++/* Map a register name to its register code. This includes the sub-register
++ * addressable fields, e.g. parts of rax can be addressed as ax, al, ah, eax.
++ * The list is sorted so it can be binary chopped, sort command is:
++ * LANG=C sort -t '"' -k2
++ */
++
++struct bb_reg_code_map {
++ enum bb_reg_code reg;
++ const char *name;
++};
++
++const static struct bb_reg_code_map
++bb_reg_code_map[] = {
++ { BBRG_RAX, "ah" },
++ { BBRG_RAX, "al" },
++ { BBRG_RAX, "ax" },
++ { BBRG_RBX, "bh" },
++ { BBRG_RBX, "bl" },
++ { BBRG_RBP, "bp" },
++ { BBRG_RBP, "bpl" },
++ { BBRG_RBX, "bx" },
++ { BBRG_RCX, "ch" },
++ { BBRG_RCX, "cl" },
++ { BBRG_RCX, "cx" },
++ { BBRG_RDX, "dh" },
++ { BBRG_RDI, "di" },
++ { BBRG_RDI, "dil" },
++ { BBRG_RDX, "dl" },
++ { BBRG_RDX, "dx" },
++ { BBRG_RAX, "eax" },
++ { BBRG_RBP, "ebp" },
++ { BBRG_RBX, "ebx" },
++ { BBRG_RCX, "ecx" },
++ { BBRG_RDI, "edi" },
++ { BBRG_RDX, "edx" },
++ { BBRG_RSI, "esi" },
++ { BBRG_RSP, "esp" },
++ { BBRG_R10, "r10" },
++ { BBRG_R10, "r10d" },
++ { BBRG_R10, "r10l" },
++ { BBRG_R10, "r10w" },
++ { BBRG_R11, "r11" },
++ { BBRG_R11, "r11d" },
++ { BBRG_R11, "r11l" },
++ { BBRG_R11, "r11w" },
++ { BBRG_R12, "r12" },
++ { BBRG_R12, "r12d" },
++ { BBRG_R12, "r12l" },
++ { BBRG_R12, "r12w" },
++ { BBRG_R13, "r13" },
++ { BBRG_R13, "r13d" },
++ { BBRG_R13, "r13l" },
++ { BBRG_R13, "r13w" },
++ { BBRG_R14, "r14" },
++ { BBRG_R14, "r14d" },
++ { BBRG_R14, "r14l" },
++ { BBRG_R14, "r14w" },
++ { BBRG_R15, "r15" },
++ { BBRG_R15, "r15d" },
++ { BBRG_R15, "r15l" },
++ { BBRG_R15, "r15w" },
++ { BBRG_R8, "r8" },
++ { BBRG_R8, "r8d" },
++ { BBRG_R8, "r8l" },
++ { BBRG_R8, "r8w" },
++ { BBRG_R9, "r9" },
++ { BBRG_R9, "r9d" },
++ { BBRG_R9, "r9l" },
++ { BBRG_R9, "r9w" },
++ { BBRG_RAX, "rax" },
++ { BBRG_RBP, "rbp" },
++ { BBRG_RBX, "rbx" },
++ { BBRG_RCX, "rcx" },
++ { BBRG_RDI, "rdi" },
++ { BBRG_RDX, "rdx" },
++ { BBRG_RSI, "rsi" },
++ { BBRG_RSP, "rsp" },
++ { BBRG_RSI, "si" },
++ { BBRG_RSI, "sil" },
++ { BBRG_RSP, "sp" },
++ { BBRG_RSP, "spl" },
++};
++
++/* Record register contents in terms of the values that were passed to this
++ * function, IOW track which registers contain an input value. A register's
++ * contents can be undefined, it can contain an input register value or it can
++ * contain an offset from the original stack pointer.
++ *
++ * This structure is used to represent the current contents of the integer
++ * registers, it is held in an array that is indexed by BBRG_xxx. The element
++ * for BBRG_xxx indicates what input value is currently in BBRG_xxx. When
++ * 'value' is BBRG_OSP then register BBRG_xxx contains a stack pointer,
++ * pointing at 'offset' from the original stack pointer on entry to the
++ * function. When 'value' is not BBRG_OSP then element BBRG_xxx contains the
++ * original contents of an input register and offset is ignored.
++ *
++ * An input register 'value' can be stored in more than one register and/or in
++ * more than one memory location.
++ */
++
++struct bb_reg_contains
++{
++ enum bb_reg_code value: 8;
++ short offset;
++};
++
++/* Note: the offsets in struct bb_mem_contains in this code are _NOT_ offsets
++ * from OSP, they are offsets from current RSP. It fits better with the way
++ * that struct pt_regs is built, some code pushes extra data before pt_regs so
++ * working with OSP relative offsets gets messy. struct bb_mem_contains
++ * entries must be in descending order of RSP offset.
++ */
++
++typedef struct { DECLARE_BITMAP(bits, BBRG_R15+1); } bbrgmask_t;
++#define BB_SKIP(reg) (1 << (BBRG_ ## reg))
++struct bb_mem_contains {
++ short offset_address;
++ enum bb_reg_code value: 8;
++};
++
++/* Transfer of control to a label outside the current function. If the
++ * transfer is to a known common restore path that expects known registers
++ * and/or a known memory state (e.g. struct pt_regs) then do a sanity check on
++ * the state at this point.
++ */
++
++struct bb_name_state {
++ const char *name; /* target function */
++ bfd_vma address; /* Address of target function */
++ const char *fname; /* optional from function name */
++ const struct bb_mem_contains *mem; /* expected memory state */
++ const struct bb_reg_contains *regs; /* expected register state */
++ const unsigned short mem_size; /* ARRAY_SIZE(mem) */
++ const unsigned short regs_size; /* ARRAY_SIZE(regs) */
++ const short osp_offset; /* RSP in regs == OSP+osp_offset */
++ const bbrgmask_t skip_mem; /* Some slots in mem may be undefined */
++ const bbrgmask_t skip_regs; /* Some slots in regs may be undefined */
++};
++
++/* NS (NAME_STATE) macros define the register and memory state when we transfer
++ * control to or start decoding a special case name. Use NS when the target
++ * label always has the same state. Use NS_FROM and specify the source label
++ * if the target state is slightly different depending on where it is branched
++ * from. This gives better state checking, by isolating the special cases.
++ *
++ * Note: for the same target label, NS_FROM entries must be followed by a
++ * single NS entry.
++ */
++
++#define NS_FROM(iname, ifname, imem, iregs, iskip_mem, iskip_regs, iosp_offset) \
++ { \
++ .name = iname, \
++ .fname = ifname, \
++ .mem = imem, \
++ .regs = iregs, \
++ .mem_size = ARRAY_SIZE(imem), \
++ .regs_size = ARRAY_SIZE(iregs), \
++ .skip_mem.bits[0] = iskip_mem, \
++ .skip_regs.bits[0] = iskip_regs, \
++ .osp_offset = iosp_offset, \
++ .address = 0 \
++ }
++
++/* Shorter forms for the common cases */
++#define NS(iname, imem, iregs, iskip_mem, iskip_regs, iosp_offset) \
++ NS_FROM(iname, NULL, imem, iregs, iskip_mem, iskip_regs, iosp_offset)
++#define NS_MEM(iname, imem, iskip_mem) \
++ NS_FROM(iname, NULL, imem, no_regs, iskip_mem, 0, 0)
++#define NS_MEM_FROM(iname, ifname, imem, iskip_mem) \
++ NS_FROM(iname, ifname, imem, no_regs, iskip_mem, 0, 0)
++#define NS_REG(iname, iregs, iskip_regs) \
++ NS_FROM(iname, NULL, no_memory, iregs, 0, iskip_regs, 0)
++#define NS_REG_FROM(iname, ifname, iregs, iskip_regs) \
++ NS_FROM(iname, ifname, no_memory, iregs, 0, iskip_regs, 0)
++
++static void
++bb_reg_code_set_value(enum bb_reg_code dst, enum bb_reg_code src);
++
++static const char *bb_mod_name, *bb_func_name;
++
++/*============================================================================*/
++/* */
++/* Most of the basic block code and data is common to x86_64 and i386. This */
++/* large ifdef contains almost all of the differences between the two */
++/* architectures. */
++/* */
++/* Make sure you update the correct section of this ifdef. */
++/* */
++/*============================================================================*/
++
++#ifdef CONFIG_X86_64
++
++/* Registers that can be used to pass parameters, in the order that parameters
++ * are passed.
++ */
++
++const static enum bb_reg_code
++bb_param_reg[] = {
++ BBRG_RDI,
++ BBRG_RSI,
++ BBRG_RDX,
++ BBRG_RCX,
++ BBRG_R8,
++ BBRG_R9,
++};
++
++const static enum bb_reg_code
++bb_preserved_reg[] = {
++ BBRG_RBX,
++ BBRG_RBP,
++ BBRG_RSP,
++ BBRG_R12,
++ BBRG_R13,
++ BBRG_R14,
++ BBRG_R15,
++};
++
++static const struct bb_mem_contains full_pt_regs[] = {
++ { 0x70, BBRG_RDI },
++ { 0x68, BBRG_RSI },
++ { 0x60, BBRG_RDX },
++ { 0x58, BBRG_RCX },
++ { 0x50, BBRG_RAX },
++ { 0x48, BBRG_R8 },
++ { 0x40, BBRG_R9 },
++ { 0x38, BBRG_R10 },
++ { 0x30, BBRG_R11 },
++ { 0x28, BBRG_RBX },
++ { 0x20, BBRG_RBP },
++ { 0x18, BBRG_R12 },
++ { 0x10, BBRG_R13 },
++ { 0x08, BBRG_R14 },
++ { 0x00, BBRG_R15 },
++};
++static const struct bb_mem_contains partial_pt_regs[] = {
++ { 0x40, BBRG_RDI },
++ { 0x38, BBRG_RSI },
++ { 0x30, BBRG_RDX },
++ { 0x28, BBRG_RCX },
++ { 0x20, BBRG_RAX },
++ { 0x18, BBRG_R8 },
++ { 0x10, BBRG_R9 },
++ { 0x08, BBRG_R10 },
++ { 0x00, BBRG_R11 },
++};
++static const struct bb_mem_contains partial_pt_regs_plus_1[] = {
++ { 0x48, BBRG_RDI },
++ { 0x40, BBRG_RSI },
++ { 0x38, BBRG_RDX },
++ { 0x30, BBRG_RCX },
++ { 0x28, BBRG_RAX },
++ { 0x20, BBRG_R8 },
++ { 0x18, BBRG_R9 },
++ { 0x10, BBRG_R10 },
++ { 0x08, BBRG_R11 },
++};
++static const struct bb_mem_contains partial_pt_regs_plus_2[] = {
++ { 0x50, BBRG_RDI },
++ { 0x48, BBRG_RSI },
++ { 0x40, BBRG_RDX },
++ { 0x38, BBRG_RCX },
++ { 0x30, BBRG_RAX },
++ { 0x28, BBRG_R8 },
++ { 0x20, BBRG_R9 },
++ { 0x18, BBRG_R10 },
++ { 0x10, BBRG_R11 },
++};
++static const struct bb_mem_contains no_memory[] = {
++};
++/* Hardware has already pushed an error_code on the stack. Use undefined just
++ * to set the initial stack offset.
++ */
++static const struct bb_mem_contains error_code[] = {
++ { 0x0, BBRG_UNDEFINED },
++};
++/* error_code plus original rax */
++static const struct bb_mem_contains error_code_rax[] = {
++ { 0x8, BBRG_UNDEFINED },
++ { 0x0, BBRG_RAX },
++};
++
++static const struct bb_reg_contains all_regs[] = {
++ [BBRG_RAX] = { BBRG_RAX, 0 },
++ [BBRG_RBX] = { BBRG_RBX, 0 },
++ [BBRG_RCX] = { BBRG_RCX, 0 },
++ [BBRG_RDX] = { BBRG_RDX, 0 },
++ [BBRG_RDI] = { BBRG_RDI, 0 },
++ [BBRG_RSI] = { BBRG_RSI, 0 },
++ [BBRG_RBP] = { BBRG_RBP, 0 },
++ [BBRG_RSP] = { BBRG_OSP, 0 },
++ [BBRG_R8 ] = { BBRG_R8, 0 },
++ [BBRG_R9 ] = { BBRG_R9, 0 },
++ [BBRG_R10] = { BBRG_R10, 0 },
++ [BBRG_R11] = { BBRG_R11, 0 },
++ [BBRG_R12] = { BBRG_R12, 0 },
++ [BBRG_R13] = { BBRG_R13, 0 },
++ [BBRG_R14] = { BBRG_R14, 0 },
++ [BBRG_R15] = { BBRG_R15, 0 },
++};
++static const struct bb_reg_contains no_regs[] = {
++};
++
++static struct bb_name_state bb_special_cases[] = {
++
++ /* First the cases that pass data only in memory. We do not check any
++ * register state for these cases.
++ */
++
++ /* Simple cases, no exceptions */
++ NS_MEM("ia32_ptregs_common", partial_pt_regs_plus_1, 0),
++ NS_MEM("ia32_sysret", partial_pt_regs, 0),
++ NS_MEM("int_careful", partial_pt_regs, 0),
++ NS_MEM("int_restore_rest", full_pt_regs, 0),
++ NS_MEM("int_signal", full_pt_regs, 0),
++ NS_MEM("int_very_careful", partial_pt_regs, 0),
++ NS_MEM("int_with_check", partial_pt_regs, 0),
++#ifdef CONFIG_TRACE_IRQFLAGS
++ NS_MEM("paranoid_exit0", full_pt_regs, 0),
++#endif /* CONFIG_TRACE_IRQFLAGS */
++ NS_MEM("paranoid_exit1", full_pt_regs, 0),
++ NS_MEM("ptregscall_common", partial_pt_regs_plus_1, 0),
++ NS_MEM("restore_norax", partial_pt_regs, 0),
++ NS_MEM("restore", partial_pt_regs, 0),
++ NS_MEM("ret_from_intr", partial_pt_regs_plus_2, 0),
++ NS_MEM("stub32_clone", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub32_execve", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub32_fork", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub32_iopl", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub32_rt_sigreturn", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub32_rt_sigsuspend", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub32_sigaltstack", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub32_sigreturn", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub32_sigsuspend", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub32_vfork", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub_clone", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub_execve", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub_fork", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub_iopl", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub_rt_sigreturn", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub_rt_sigsuspend", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub_sigaltstack", partial_pt_regs_plus_1, 0),
++ NS_MEM("stub_vfork", partial_pt_regs_plus_1, 0),
++
++ NS_MEM_FROM("ia32_badsys", "ia32_sysenter_target",
++ partial_pt_regs,
++ /* ia32_sysenter_target uses CLEAR_RREGS to clear R8-R11 on
++ * some paths. It also stomps on RAX.
++ */
++ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
++ BB_SKIP(RAX)),
++ NS_MEM_FROM("ia32_badsys", "ia32_cstar_target",
++ partial_pt_regs,
++ /* ia32_cstar_target uses CLEAR_RREGS to clear R8-R11 on some
++ * paths. It also stomps on RAX. Even more confusing, instead
++ * of storing RCX it stores RBP. WTF?
++ */
++ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
++ BB_SKIP(RAX) | BB_SKIP(RCX)),
++ NS_MEM("ia32_badsys", partial_pt_regs, 0),
++
++ /* Various bits of code branch to int_ret_from_sys_call, with slightly
++ * different missing values in pt_regs.
++ */
++ NS_MEM_FROM("int_ret_from_sys_call", "ret_from_fork",
++ partial_pt_regs,
++ BB_SKIP(R11)),
++ NS_MEM_FROM("int_ret_from_sys_call", "stub_execve",
++ partial_pt_regs,
++ BB_SKIP(RAX) | BB_SKIP(RCX)),
++ NS_MEM_FROM("int_ret_from_sys_call", "stub_rt_sigreturn",
++ partial_pt_regs,
++ BB_SKIP(RAX) | BB_SKIP(RCX)),
++ NS_MEM_FROM("int_ret_from_sys_call", "kernel_execve",
++ partial_pt_regs,
++ BB_SKIP(RAX)),
++ NS_MEM_FROM("int_ret_from_sys_call", "ia32_syscall",
++ partial_pt_regs,
++ /* ia32_syscall only saves RDI through RCX. */
++ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
++ BB_SKIP(RAX)),
++ NS_MEM_FROM("int_ret_from_sys_call", "ia32_sysenter_target",
++ partial_pt_regs,
++ /* ia32_sysenter_target uses CLEAR_RREGS to clear R8-R11 on
++ * some paths. It also stomps on RAX.
++ */
++ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
++ BB_SKIP(RAX)),
++ NS_MEM_FROM("int_ret_from_sys_call", "ia32_cstar_target",
++ partial_pt_regs,
++ /* ia32_cstar_target uses CLEAR_RREGS to clear R8-R11 on some
++ * paths. It also stomps on RAX. Even more confusing, instead
++ * of storing RCX it stores RBP. WTF?
++ */
++ BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
++ BB_SKIP(RAX) | BB_SKIP(RCX)),
++ NS_MEM("int_ret_from_sys_call", partial_pt_regs, 0),
++
++ NS_MEM("retint_kernel", partial_pt_regs, BB_SKIP(RAX)),
++
++ NS_MEM("retint_careful", partial_pt_regs, BB_SKIP(RAX)),
++
++ /* Horrible hack: For a brand new x86_64 task, switch_to() branches to
++ * ret_from_fork with a totally different stack state from all the
++ * other tasks that come out of switch_to(). This non-standard state
++ * cannot be represented so just ignore the branch from switch_to() to
++ * ret_from_fork. Due to inlining and linker labels, switch_to() can
++ * appear as several different function labels, including schedule,
++ * context_switch and __sched_text_start.
++ */
++ NS_MEM_FROM("ret_from_fork", "schedule", no_memory, 0),
++ NS_MEM_FROM("ret_from_fork", "__sched_text_start", no_memory, 0),
++ NS_MEM_FROM("ret_from_fork", "context_switch", no_memory, 0),
++ NS_MEM("ret_from_fork", full_pt_regs, 0),
++
++
++ NS_MEM_FROM("ret_from_sys_call", "ret_from_fork",
++ partial_pt_regs,
++ BB_SKIP(R11)),
++ NS_MEM("ret_from_sys_call", partial_pt_regs, 0),
++
++ NS_MEM("retint_restore_args",
++ partial_pt_regs,
++ BB_SKIP(RAX) | BB_SKIP(RCX)),
++
++ NS_MEM("retint_swapgs",
++ partial_pt_regs,
++ BB_SKIP(RAX) | BB_SKIP(RCX)),
++
++ /* Now the cases that pass data in registers. We do not check any
++ * memory state for these cases.
++ */
++
++ NS_REG("bad_put_user",
++ all_regs,
++ BB_SKIP(RAX) | BB_SKIP(RCX) | BB_SKIP(R8)),
++
++ NS_REG("bad_get_user",
++ all_regs,
++ BB_SKIP(RAX) | BB_SKIP(RCX) | BB_SKIP(R8)),
++
++ NS_REG("bad_to_user",
++ all_regs,
++ BB_SKIP(RAX) | BB_SKIP(RCX)),
++
++ NS_REG("ia32_ptregs_common",
++ all_regs,
++ 0),
++
++ NS_REG("copy_user_generic_unrolled",
++ all_regs,
++ BB_SKIP(RAX) | BB_SKIP(RCX)),
++
++ NS_REG("copy_user_generic_string",
++ all_regs,
++ 0),
++
++ NS_REG("iret_label",
++ all_regs,
++ 0),
++
++ /* Finally the cases that pass data in both registers and memory.
++ */
++
++ NS("invalid_TSS", error_code, all_regs, 0, 0, 0),
++ NS("segment_not_present", error_code, all_regs, 0, 0, 0),
++ NS("alignment_check", error_code, all_regs, 0, 0, 0),
++ NS("page_fault", error_code, all_regs, 0, 0, 0),
++ NS("general_protection", error_code, all_regs, 0, 0, 0),
++ NS("error_entry", error_code_rax, all_regs, 0, BB_SKIP(RAX), -0x10),
++ NS("common_interrupt", error_code, all_regs, 0, 0, -0x8),
++};
++
++static const char *bb_spurious[] = {
++ /* schedule */
++ "thread_return",
++ /* ret_from_fork */
++ "rff_action",
++ "rff_trace",
++ /* system_call */
++ "ret_from_sys_call",
++ "sysret_check",
++ "sysret_careful",
++ "sysret_signal",
++ "badsys",
++ "tracesys",
++ "int_ret_from_sys_call",
++ "int_with_check",
++ "int_careful",
++ "int_very_careful",
++ "int_signal",
++ "int_restore_rest",
++ /* common_interrupt */
++ "ret_from_intr",
++ "exit_intr",
++ "retint_with_reschedule",
++ "retint_check",
++ "retint_swapgs",
++ "retint_restore_args",
++ "restore_args",
++ "iret_label",
++ "bad_iret",
++ "retint_careful",
++ "retint_signal",
++ "retint_kernel",
++ /* .macro paranoidexit */
++#ifdef CONFIG_TRACE_IRQFLAGS
++ "paranoid_exit0",
++ "paranoid_userspace0",
++ "paranoid_restore0",
++ "paranoid_swapgs0",
++ "paranoid_schedule0",
++#endif /* CONFIG_TRACE_IRQFLAGS */
++ "paranoid_exit1",
++ "paranoid_swapgs1",
++ "paranoid_restore1",
++ "paranoid_userspace1",
++ "paranoid_schedule1",
++ /* error_entry */
++ "error_swapgs",
++ "error_sti",
++ "error_exit",
++ "error_kernelspace",
++ /* load_gs_index */
++ "gs_change",
++ "bad_gs",
++ /* ia32_sysenter_target */
++ "sysenter_do_call",
++ "sysenter_tracesys",
++ /* ia32_cstar_target */
++ "cstar_do_call",
++ "cstar_tracesys",
++ "ia32_badarg",
++ /* ia32_syscall */
++ "ia32_do_syscall",
++ "ia32_sysret",
++ "ia32_tracesys",
++ "ia32_badsys",
++};
++
++#define HARDWARE_PUSHED (5 * KDB_WORD_SIZE)
++
++static const char *bb_hardware_handlers[] = {
++ "system_call",
++ "common_interrupt",
++ "error_entry",
++ "debug",
++ "nmi",
++ "int3",
++ "double_fault",
++ "stack_segment",
++ "machine_check",
++ "kdb_call",
++};
++
++static void
++bb_start_block0(void)
++{
++ bb_reg_code_set_value(BBRG_RAX, BBRG_RAX);
++ bb_reg_code_set_value(BBRG_RBX, BBRG_RBX);
++ bb_reg_code_set_value(BBRG_RCX, BBRG_RCX);
++ bb_reg_code_set_value(BBRG_RDX, BBRG_RDX);
++ bb_reg_code_set_value(BBRG_RDI, BBRG_RDI);
++ bb_reg_code_set_value(BBRG_RSI, BBRG_RSI);
++ bb_reg_code_set_value(BBRG_RBP, BBRG_RBP);
++ bb_reg_code_set_value(BBRG_RSP, BBRG_OSP);
++ bb_reg_code_set_value(BBRG_R8, BBRG_R8);
++ bb_reg_code_set_value(BBRG_R9, BBRG_R9);
++ bb_reg_code_set_value(BBRG_R10, BBRG_R10);
++ bb_reg_code_set_value(BBRG_R11, BBRG_R11);
++ bb_reg_code_set_value(BBRG_R12, BBRG_R12);
++ bb_reg_code_set_value(BBRG_R13, BBRG_R13);
++ bb_reg_code_set_value(BBRG_R14, BBRG_R14);
++ bb_reg_code_set_value(BBRG_R15, BBRG_R15);
++}
++
++/* x86_64 does not have a special case for __switch_to */
++
++static void
++bb_fixup_switch_to(char *p)
++{
++}
++
++static int
++bb_asmlinkage_arch(void)
++{
++ return strncmp(bb_func_name, "__down", 6) == 0 ||
++ strncmp(bb_func_name, "__up", 4) == 0 ||
++ strncmp(bb_func_name, "stub_", 5) == 0 ||
++ strcmp(bb_func_name, "ret_from_fork") == 0 ||
++ strcmp(bb_func_name, "ptregscall_common") == 0;
++}
++
++#else /* !CONFIG_X86_64 */
++
++/* Registers that can be used to pass parameters, in the order that parameters
++ * are passed.
++ */
++
++const static enum bb_reg_code
++bb_param_reg[] = {
++ BBRG_RAX,
++ BBRG_RDX,
++ BBRG_RCX,
++};
++
++const static enum bb_reg_code
++bb_preserved_reg[] = {
++ BBRG_RBX,
++ BBRG_RBP,
++ BBRG_RSP,
++ BBRG_RSI,
++ BBRG_RDI,
++};
++
++static const struct bb_mem_contains full_pt_regs[] = {
++ { 0x18, BBRG_RAX },
++ { 0x14, BBRG_RBP },
++ { 0x10, BBRG_RDI },
++ { 0x0c, BBRG_RSI },
++ { 0x08, BBRG_RDX },
++ { 0x04, BBRG_RCX },
++ { 0x00, BBRG_RBX },
++};
++static const struct bb_mem_contains no_memory[] = {
++};
++/* Hardware has already pushed an error_code on the stack. Use undefined just
++ * to set the initial stack offset.
++ */
++static const struct bb_mem_contains error_code[] = {
++ { 0x0, BBRG_UNDEFINED },
++};
++/* rbx already pushed */
++static const struct bb_mem_contains rbx_pushed[] = {
++ { 0x0, BBRG_RBX },
++};
++
++static const struct bb_reg_contains all_regs[] = {
++ [BBRG_RAX] = { BBRG_RAX, 0 },
++ [BBRG_RBX] = { BBRG_RBX, 0 },
++ [BBRG_RCX] = { BBRG_RCX, 0 },
++ [BBRG_RDX] = { BBRG_RDX, 0 },
++ [BBRG_RDI] = { BBRG_RDI, 0 },
++ [BBRG_RSI] = { BBRG_RSI, 0 },
++ [BBRG_RBP] = { BBRG_RBP, 0 },
++ [BBRG_RSP] = { BBRG_OSP, 0 },
++};
++static const struct bb_reg_contains no_regs[] = {
++};
++
++static struct bb_name_state bb_special_cases[] = {
++
++ /* First the cases that pass data only in memory. We do not check any
++ * register state for these cases.
++ */
++
++ /* Simple cases, no exceptions */
++ NS_MEM("check_userspace", full_pt_regs, 0),
++ NS_MEM("device_not_available_emulate", full_pt_regs, 0),
++ NS_MEM("ldt_ss", full_pt_regs, 0),
++ NS_MEM("no_singlestep", full_pt_regs, 0),
++ NS_MEM("restore_all", full_pt_regs, 0),
++ NS_MEM("restore_nocheck", full_pt_regs, 0),
++ NS_MEM("restore_nocheck_notrace", full_pt_regs, 0),
++ NS_MEM("ret_from_exception", full_pt_regs, 0),
++ NS_MEM("ret_from_fork", full_pt_regs, 0),
++ NS_MEM("ret_from_intr", full_pt_regs, 0),
++ NS_MEM("work_notifysig", full_pt_regs, 0),
++ NS_MEM("work_pending", full_pt_regs, 0),
++
++#ifdef CONFIG_PREEMPT
++ NS_MEM("resume_kernel", full_pt_regs, 0),
++#endif /* CONFIG_PREEMPT */
++
++ NS_MEM("common_interrupt", error_code, 0),
++ NS_MEM("error_code", error_code, 0),
++
++ NS_MEM("bad_put_user", rbx_pushed, 0),
++
++ NS_MEM_FROM("resume_userspace", "syscall_badsys",
++ full_pt_regs, BB_SKIP(RAX)),
++ NS_MEM_FROM("resume_userspace", "syscall_fault",
++ full_pt_regs, BB_SKIP(RAX)),
++ NS_MEM_FROM("resume_userspace", "syscall_trace_entry",
++ full_pt_regs, BB_SKIP(RAX)),
++ /* Too difficult to trace through the various vm86 functions for now.
++ * They are C functions that start off with some memory state, fiddle
++ * the registers then jmp directly to resume_userspace. For the
++ * moment, just assume that they are valid and do no checks.
++ */
++ NS_FROM("resume_userspace", "do_int",
++ no_memory, no_regs, 0, 0, 0),
++ NS_FROM("resume_userspace", "do_sys_vm86",
++ no_memory, no_regs, 0, 0, 0),
++ NS_FROM("resume_userspace", "handle_vm86_fault",
++ no_memory, no_regs, 0, 0, 0),
++ NS_FROM("resume_userspace", "handle_vm86_trap",
++ no_memory, no_regs, 0, 0, 0),
++ NS_MEM("resume_userspace", full_pt_regs, 0),
++
++ NS_MEM_FROM("syscall_badsys", "sysenter_entry",
++ full_pt_regs, BB_SKIP(RBP)),
++ NS_MEM("syscall_badsys", full_pt_regs, 0),
++
++ NS_MEM_FROM("syscall_call", "syscall_trace_entry",
++ full_pt_regs, BB_SKIP(RAX)),
++ NS_MEM("syscall_call", full_pt_regs, 0),
++
++ NS_MEM_FROM("syscall_exit", "syscall_trace_entry",
++ full_pt_regs, BB_SKIP(RAX)),
++ NS_MEM("syscall_exit", full_pt_regs, 0),
++
++ NS_MEM_FROM("syscall_exit_work", "sysenter_entry",
++ full_pt_regs, BB_SKIP(RAX) | BB_SKIP(RBP)),
++ NS_MEM_FROM("syscall_exit_work", "system_call",
++ full_pt_regs, BB_SKIP(RAX)),
++ NS_MEM("syscall_exit_work", full_pt_regs, 0),
++
++ NS_MEM_FROM("syscall_trace_entry", "sysenter_entry",
++ full_pt_regs, BB_SKIP(RBP)),
++ NS_MEM_FROM("syscall_trace_entry", "system_call",
++ full_pt_regs, BB_SKIP(RAX)),
++ NS_MEM("syscall_trace_entry", full_pt_regs, 0),
++
++ /* Now the cases that pass data in registers. We do not check any
++ * memory state for these cases.
++ */
++
++ NS_REG("syscall_fault", all_regs, 0),
++
++ NS_REG("bad_get_user", all_regs,
++ BB_SKIP(RAX) | BB_SKIP(RDX)),
++
++ /* Finally the cases that pass data in both registers and memory.
++ */
++
++ /* This entry is redundant now because bb_fixup_switch_to() hides the
++ * jmp __switch_to case, however the entry is left here as
++ * documentation.
++ *
++ * NS("__switch_to", no_memory, no_regs, 0, 0, 0),
++ */
++};
++
++static const char *bb_spurious[] = {
++ /* ret_from_exception */
++ "ret_from_intr",
++ "check_userspace",
++ "resume_userspace",
++ /* resume_kernel */
++#ifdef CONFIG_PREEMPT
++ "need_resched",
++#endif /* CONFIG_PREEMPT */
++ /* sysenter_entry */
++ "sysenter_past_esp",
++ /* system_call */
++ "no_singlestep",
++ "syscall_call",
++ "syscall_exit",
++ "restore_all",
++ "restore_nocheck",
++ "restore_nocheck_notrace",
++ "ldt_ss",
++ /* do not include iret_exc, it is in a .fixup section */
++ /* work_pending */
++ "work_resched",
++ "work_notifysig",
++#ifdef CONFIG_VM86
++ "work_notifysig_v86",
++#endif /* CONFIG_VM86 */
++ /* page_fault */
++ "error_code",
++ /* device_not_available */
++ "device_not_available_emulate",
++ /* debug */
++ "debug_esp_fix_insn",
++ "debug_stack_correct",
++ /* nmi */
++ "nmi_stack_correct",
++ "nmi_stack_fixup",
++ "nmi_debug_stack_check",
++ "nmi_espfix_stack",
++};
++
++#define HARDWARE_PUSHED (2 * KDB_WORD_SIZE)
++
++static const char *bb_hardware_handlers[] = {
++ "ret_from_exception",
++ "system_call",
++ "work_pending",
++ "syscall_fault",
++ "page_fault",
++ "coprocessor_error",
++ "simd_coprocessor_error",
++ "device_not_available",
++ "debug",
++ "nmi",
++ "int3",
++ "overflow",
++ "bounds",
++ "invalid_op",
++ "coprocessor_segment_overrun",
++ "invalid_TSS",
++ "segment_not_present",
++ "stack_segment",
++ "general_protection",
++ "alignment_check",
++ "kdb_call",
++ "divide_error",
++ "machine_check",
++ "spurious_interrupt_bug",
++};
++
++static void
++bb_start_block0(void)
++{
++ bb_reg_code_set_value(BBRG_RAX, BBRG_RAX);
++ bb_reg_code_set_value(BBRG_RBX, BBRG_RBX);
++ bb_reg_code_set_value(BBRG_RCX, BBRG_RCX);
++ bb_reg_code_set_value(BBRG_RDX, BBRG_RDX);
++ bb_reg_code_set_value(BBRG_RDI, BBRG_RDI);
++ bb_reg_code_set_value(BBRG_RSI, BBRG_RSI);
++ bb_reg_code_set_value(BBRG_RBP, BBRG_RBP);
++ bb_reg_code_set_value(BBRG_RSP, BBRG_OSP);
++}
++
++/* The i386 code that switches stack in a context switch is an extremely
++ * special case. It saves the rip pointing to a label that is not otherwise
++ * referenced, saves the current rsp then pushes a word. The magic code that
++ * resumes the new task picks up the saved rip and rsp, effectively referencing
++ * a label that otherwise is not used and ignoring the pushed word.
++ *
++ * The simplest way to handle this very strange case is to recognise jmp
++ * address <__switch_to> and treat it as a popfl instruction. This avoids
++ * terminating the block on this jmp and removes one word from the stack state,
++ * which is the end effect of all the magic code.
++ *
++ * Called with the instruction line, starting after the first ':'.
++ */
++
++static void
++bb_fixup_switch_to(char *p)
++{
++ char *p1 = p;
++ p += strspn(p, " \t"); /* start of instruction */
++ if (strncmp(p, "jmp", 3))
++ return;
++ p += strcspn(p, " \t"); /* end of instruction */
++ p += strspn(p, " \t"); /* start of address */
++ p += strcspn(p, " \t"); /* end of address */
++ p += strspn(p, " \t"); /* start of comment */
++ if (strcmp(p, "<__switch_to>") == 0)
++ strcpy(p1, "popfl");
++}
++
++static int
++bb_asmlinkage_arch(void)
++{
++ return strcmp(bb_func_name, "ret_from_exception") == 0 ||
++ strcmp(bb_func_name, "syscall_trace_entry") == 0;
++}
++
++#endif /* CONFIG_X86_64 */
++
++
++/*============================================================================*/
++/* */
++/* Common code and data. */
++/* */
++/*============================================================================*/
++
++
++/* Tracking registers by decoding the instructions is quite a bit harder than
++ * doing the same tracking using compiler generated information. Register
++ * contents can remain in the same register, they can be copied to other
++ * registers, they can be stored on stack or they can be modified/overwritten.
++ * At any one time, there are 0 or more copies of the original value that was
++ * supplied in each register on input to the current function. If a register
++ * exists in multiple places, one copy of that register is the master version,
++ * the others are temporary copies which may or may not be destroyed before the
++ * end of the function.
++ *
++ * The compiler knows which copy of a register is the master and which are
++ * temporary copies, which makes it relatively easy to track register contents
++ * as they are saved and restored. Without that compiler based knowledge, this
++ * code has to track _every_ possible copy of each register, simply because we
++ * do not know which is the master copy and which are temporary copies which
++ * may be destroyed later.
++ *
++ * It gets worse: registers that contain parameters can be copied to other
++ * registers which are then saved on stack in a lower level function. Also the
++ * stack pointer may be held in multiple registers (typically RSP and RBP)
++ * which contain different offsets from the base of the stack on entry to this
++ * function. All of which means that we have to track _all_ register
++ * movements, or at least as much as possible.
++ *
++ * Start with the basic block that contains the start of the function, by
++ * definition all registers contain their initial value. Track each
++ * instruction's effect on register contents, this includes reading from a
++ * parameter register before any write to that register, IOW the register
++ * really does contain a parameter. The register state is represented by a
++ * dynamically sized array with each entry containing :-
++ *
++ * Register name
++ * Location it is copied to (another register or stack + offset)
++ *
++ * Besides the register tracking array, we track which parameter registers are
++ * read before being written, to determine how many parameters are passed in
++ * registers. We also track which registers contain stack pointers, including
++ * their offset from the original stack pointer on entry to the function.
++ *
++ * At each exit from the current basic block (via JMP instruction or drop
++ * through), the register state is cloned to form the state on input to the
++ * target basic block and the target is marked for processing using this state.
++ * When there are multiple ways to enter a basic block (e.g. several JMP
++ * instructions referencing the same target) then there will be multiple sets
++ * of register state to form the "input" for that basic block, there is no
++ * guarantee that all paths to that block will have the same register state.
++ *
++ * As each target block is processed, all the known sets of register state are
++ * merged to form a suitable subset of the state which agrees with all the
++ * inputs. The most common case is where one path to this block copies a
++ * register to another register but another path does not, therefore the copy
++ * is only a temporary and should not be propogated into this block.
++ *
++ * If the target block already has an input state from the current transfer
++ * point and the new input state is identical to the previous input state then
++ * we have reached a steady state for the arc from the current location to the
++ * target block. Therefore there is no need to process the target block again.
++ *
++ * The steps of "process a block, create state for target block(s), pick a new
++ * target block, merge state for target block, process target block" will
++ * continue until all the state changes have propogated all the way down the
++ * basic block tree, including round any cycles in the tree. The merge step
++ * only deletes tracking entries from the input state(s), it never adds a
++ * tracking entry. Therefore the overall algorithm is guaranteed to converge
++ * to a steady state, the worst possible case is that every tracking entry into
++ * a block is deleted, which will result in an empty output state.
++ *
++ * As each instruction is decoded, it is checked to see if this is the point at
++ * which execution left this function. This can be a call to another function
++ * (actually the return address to this function) or is the instruction which
++ * was about to be executed when an interrupt occurred (including an oops).
++ * Save the register state at this point.
++ *
++ * We always know what the registers contain when execution left this function.
++ * For an interrupt, the registers are in struct pt_regs. For a call to
++ * another function, we have already deduced the register state on entry to the
++ * other function by unwinding to the start of that function. Given the
++ * register state on exit from this function plus the known register contents
++ * on entry to the next function, we can determine the stack pointer value on
++ * input to this function. That in turn lets us calculate the address of input
++ * registers that have been stored on stack, giving us the input parameters.
++ * Finally the stack pointer gives us the return address which is the exit
++ * point from the calling function, repeat the unwind process on that function.
++ *
++ * The data that tracks which registers contain input parameters is function
++ * global, not local to any basic block. To determine which input registers
++ * contain parameters, we have to decode the entire function. Otherwise an
++ * exit early in the function might not have read any parameters yet.
++ */
++
++/* Record memory contents in terms of the values that were passed to this
++ * function, IOW track which memory locations contain an input value. A memory
++ * location's contents can be undefined, it can contain an input register value
++ * or it can contain an offset from the original stack pointer.
++ *
++ * This structure is used to record register contents that have been stored in
++ * memory. Location (BBRG_OSP + 'offset_address') contains the input value
++ * from register 'value'. When 'value' is BBRG_OSP then offset_value contains
++ * the offset from the original stack pointer that was stored in this memory
++ * location. When 'value' is not BBRG_OSP then the memory location contains
++ * the original contents of an input register and offset_value is ignored.
++ *
++ * An input register 'value' can be stored in more than one register and/or in
++ * more than one memory location.
++ */
++
++struct bb_memory_contains
++{
++ short offset_address;
++ enum bb_reg_code value: 8;
++ short offset_value;
++};
++
++/* Track the register state in each basic block. */
++
++struct bb_reg_state
++{
++ /* Indexed by register value 'reg - BBRG_RAX' */
++ struct bb_reg_contains contains[KDB_INT_REGISTERS];
++ int ref_count;
++ int mem_count;
++ /* dynamic size for memory locations, see mem_count */
++ struct bb_memory_contains memory[0];
++};
++
++static struct bb_reg_state *bb_reg_state, *bb_exit_state;
++static int bb_reg_state_max, bb_reg_params, bb_memory_params;
++
++struct bb_actual
++{
++ bfd_vma value;
++ int valid;
++};
++
++/* Contains the actual hex value of a register, plus a valid bit. Indexed by
++ * register value 'reg - BBRG_RAX'
++ */
++static struct bb_actual bb_actual[KDB_INT_REGISTERS];
++
++static bfd_vma bb_func_start, bb_func_end;
++static bfd_vma bb_common_interrupt, bb_error_entry, bb_ret_from_intr,
++ bb_thread_return, bb_sync_regs, bb_save_v86_state,
++ bb__sched_text_start, bb__sched_text_end;
++
++/* Record jmp instructions, both conditional and unconditional. These form the
++ * arcs between the basic blocks. This is also used to record the state when
++ * one block drops through into the next.
++ *
++ * A bb can have multiple associated bb_jmp entries, one for each jcc
++ * instruction plus at most one bb_jmp for the drop through case. If a bb
++ * drops through to the next bb then the drop through bb_jmp entry will be the
++ * last entry in the set of bb_jmp's that are associated with the bb. This is
++ * enforced by the fact that jcc entries are added during the disassembly phase
++ * of pass 1, the drop through entries are added near the end of pass 1.
++ *
++ * At address 'from' in this block, we have a jump to address 'to'. The
++ * register state at 'from' is copied to the target block.
++ */
++
++struct bb_jmp
++{
++ bfd_vma from;
++ bfd_vma to;
++ struct bb_reg_state *state;
++ unsigned int drop_through: 1;
++};
++
++struct bb
++{
++ bfd_vma start;
++ /* The end address of a basic block is sloppy. It can be the first
++ * byte of the last instruction in the block or it can be the last byte
++ * of the block.
++ */
++ bfd_vma end;
++ unsigned int changed: 1;
++ unsigned int drop_through: 1;
++};
++
++static struct bb **bb_list, *bb_curr;
++static int bb_max, bb_count;
++
++static struct bb_jmp *bb_jmp_list;
++static int bb_jmp_max, bb_jmp_count;
++
++static int bb_giveup;
++
++/* Add a new bb entry to the list. This does an insert sort. */
++
++static struct bb *
++bb_new(bfd_vma order)
++{
++ int i, j;
++ struct bb *bb, *p;
++ if (bb_giveup)
++ return NULL;
++ if (bb_count == bb_max) {
++ struct bb **bb_list_new;
++ bb_max += 10;
++ bb_list_new = debug_kmalloc(bb_max*sizeof(*bb_list_new),
++ GFP_ATOMIC);
++ if (!bb_list_new) {
++ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
++ bb_giveup = 1;
++ return NULL;
++ }
++ memcpy(bb_list_new, bb_list, bb_count*sizeof(*bb_list));
++ debug_kfree(bb_list);
++ bb_list = bb_list_new;
++ }
++ bb = debug_kmalloc(sizeof(*bb), GFP_ATOMIC);
++ if (!bb) {
++ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
++ bb_giveup = 1;
++ return NULL;
++ }
++ memset(bb, 0, sizeof(*bb));
++ for (i = 0; i < bb_count; ++i) {
++ p = bb_list[i];
++ if ((p->start && p->start > order) ||
++ (p->end && p->end > order))
++ break;
++ }
++ for (j = bb_count-1; j >= i; --j)
++ bb_list[j+1] = bb_list[j];
++ bb_list[i] = bb;
++ ++bb_count;
++ return bb;
++}
++
++/* Add a new bb_jmp entry to the list. This list is not sorted. */
++
++static struct bb_jmp *
++bb_jmp_new(bfd_vma from, bfd_vma to, unsigned int drop_through)
++{
++ struct bb_jmp *bb_jmp;
++ if (bb_giveup)
++ return NULL;
++ if (bb_jmp_count == bb_jmp_max) {
++ struct bb_jmp *bb_jmp_list_new;
++ bb_jmp_max += 10;
++ bb_jmp_list_new =
++ debug_kmalloc(bb_jmp_max*sizeof(*bb_jmp_list_new),
++ GFP_ATOMIC);
++ if (!bb_jmp_list_new) {
++ kdb_printf("\n\n%s: out of debug_kmalloc\n",
++ __FUNCTION__);
++ bb_giveup = 1;
++ return NULL;
++ }
++ memcpy(bb_jmp_list_new, bb_jmp_list,
++ bb_jmp_count*sizeof(*bb_jmp_list));
++ debug_kfree(bb_jmp_list);
++ bb_jmp_list = bb_jmp_list_new;
++ }
++ bb_jmp = bb_jmp_list + bb_jmp_count++;
++ bb_jmp->from = from;
++ bb_jmp->to = to;
++ bb_jmp->drop_through = drop_through;
++ bb_jmp->state = NULL;
++ return bb_jmp;
++}
++
++static void
++bb_delete(int i)
++{
++ struct bb *bb = bb_list[i];
++ memcpy(bb_list+i, bb_list+i+1, (bb_count-i-1)*sizeof(*bb_list));
++ bb_list[--bb_count] = NULL;
++ debug_kfree(bb);
++}
++
++static struct bb *
++bb_add(bfd_vma start, bfd_vma end)
++{
++ int i;
++ struct bb *bb;
++ /* Ignore basic blocks whose start address is outside the current
++ * function. These occur for call instructions and for tail recursion.
++ */
++ if (start &&
++ (start < bb_func_start || start >= bb_func_end))
++ return NULL;
++ for (i = 0; i < bb_count; ++i) {
++ bb = bb_list[i];
++ if ((start && bb->start == start) ||
++ (end && bb->end == end))
++ return bb;
++ }
++ bb = bb_new(start ? start : end);
++ if (bb) {
++ bb->start = start;
++ bb->end = end;
++ }
++ return bb;
++}
++
++static struct bb_jmp *
++bb_jmp_add(bfd_vma from, bfd_vma to, unsigned int drop_through)
++{
++ int i;
++ struct bb_jmp *bb_jmp;
++ for (i = 0, bb_jmp = bb_jmp_list; i < bb_jmp_count; ++i, ++bb_jmp) {
++ if (bb_jmp->from == from &&
++ bb_jmp->to == to &&
++ bb_jmp->drop_through == drop_through)
++ return bb_jmp;
++ }
++ bb_jmp = bb_jmp_new(from, to, drop_through);
++ return bb_jmp;
++}
++
++static unsigned long bb_curr_addr, bb_exit_addr;
++static char bb_buffer[256]; /* A bit too big to go on stack */
++
++/* Computed jmp uses 'jmp *addr(,%reg,[48])' where 'addr' is the start of a
++ * table of addresses that point into the current function. Run the table and
++ * generate bb starts for each target address plus a bb_jmp from this address
++ * to the target address.
++ *
++ * Only called for 'jmp' instructions, with the pointer starting at 'jmp'.
++ */
++
++static void
++bb_pass1_computed_jmp(char *p)
++{
++ unsigned long table, scale;
++ kdb_machreg_t addr;
++ struct bb* bb;
++ p += strcspn(p, " \t"); /* end of instruction */
++ p += strspn(p, " \t"); /* start of address */
++ if (*p++ != '*')
++ return;
++ table = simple_strtoul(p, &p, 0);
++ if (strncmp(p, "(,%", 3) != 0)
++ return;
++ p += 3;
++ p += strcspn(p, ","); /* end of reg */
++ if (*p++ != ',')
++ return;
++ scale = simple_strtoul(p, &p, 0);
++ if (scale != KDB_WORD_SIZE || strcmp(p, ")"))
++ return;
++ while (!bb_giveup) {
++ if (kdb_getword(&addr, table, sizeof(addr)))
++ return;
++ if (addr < bb_func_start || addr >= bb_func_end)
++ return;
++ bb = bb_add(addr, 0);
++ if (bb)
++ bb_jmp_add(bb_curr_addr, addr, 0);
++ table += KDB_WORD_SIZE;
++ }
++}
++
++/* Pass 1, identify the start and end of each basic block */
++
++static int
++bb_dis_pass1(PTR file, const char *fmt, ...)
++{
++ int l = strlen(bb_buffer);
++ char *p;
++ va_list ap;
++ va_start(ap, fmt);
++ vsnprintf(bb_buffer + l, sizeof(bb_buffer) - l, fmt, ap);
++ va_end(ap);
++ if ((p = strchr(bb_buffer, '\n'))) {
++ *p = '\0';
++ /* ret[q], iret[q], sysexit, sysret, ud2a or jmp[q] end a
++ * block.
++ */
++ p = bb_buffer;
++ p += strcspn(p, ":");
++ if (*p++ == ':') {
++ bb_fixup_switch_to(p);
++ p += strspn(p, " \t"); /* start of instruction */
++ if (strncmp(p, "ret", 3) == 0 ||
++ strncmp(p, "iret", 4) == 0 ||
++ strncmp(p, "sysexit", 7) == 0 ||
++ strncmp(p, "sysret", 6) == 0 ||
++ strncmp(p, "ud2a", 4) == 0 ||
++ strncmp(p, "jmp", 3) == 0) {
++ if (strncmp(p, "jmp", 3) == 0)
++ bb_pass1_computed_jmp(p);
++ bb_add(0, bb_curr_addr);
++ };
++ }
++ bb_buffer[0] = '\0';
++ }
++ return 0;
++}
++
++static void
++bb_printaddr_pass1(bfd_vma addr, disassemble_info *dip)
++{
++ kdb_symtab_t symtab;
++ unsigned int offset;
++ struct bb* bb;
++ /* disasm only calls the printaddr routine for the target of jmp, loop
++ * or call instructions, i.e. the start of a basic block. call is
++ * ignored by bb_add because the target address is outside the current
++ * function.
++ */
++ dip->fprintf_func(dip->stream, "0x%lx", addr);
++ kdbnearsym(addr, &symtab);
++ if (symtab.sym_name) {
++ dip->fprintf_func(dip->stream, " <%s", symtab.sym_name);
++ if ((offset = addr - symtab.sym_start))
++ dip->fprintf_func(dip->stream, "+0x%x", offset);
++ dip->fprintf_func(dip->stream, ">");
++ }
++ bb = bb_add(addr, 0);
++ if (bb)
++ bb_jmp_add(bb_curr_addr, addr, 0);
++}
++
++static void
++bb_pass1(void)
++{
++ int i;
++ unsigned long addr;
++ struct bb* bb;
++ struct bb_jmp *bb_jmp;
++
++ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
++ kdb_printf("%s: func_name %s func_start " kdb_bfd_vma_fmt0
++ " func_end " kdb_bfd_vma_fmt0 "\n",
++ __FUNCTION__,
++ bb_func_name,
++ bb_func_start,
++ bb_func_end);
++ kdb_di.fprintf_func = bb_dis_pass1;
++ kdb_di.print_address_func = bb_printaddr_pass1;
++
++ bb_add(bb_func_start, 0);
++ for (bb_curr_addr = bb_func_start;
++ bb_curr_addr < bb_func_end;
++ ++bb_curr_addr) {
++ unsigned char c;
++ if (kdb_getarea(c, bb_curr_addr)) {
++ kdb_printf("%s: unreadable function code at ",
++ __FUNCTION__);
++ kdb_symbol_print(bb_curr_addr, NULL, KDB_SP_DEFAULT);
++ kdb_printf(", giving up\n");
++ bb_giveup = 1;
++ return;
++ }
++ }
++ for (addr = bb_func_start; addr < bb_func_end; ) {
++ bb_curr_addr = addr;
++ addr += kdba_id_printinsn(addr, &kdb_di);
++ kdb_di.fprintf_func(NULL, "\n");
++ }
++ if (bb_giveup)
++ goto out;
++
++ /* Special case: a block consisting of a single instruction which is
++ * both the target of a jmp and is also an ending instruction, so we
++ * add two blocks using the same address, one as a start and one as an
++ * end, in no guaranteed order. The end must be ordered after the
++ * start.
++ */
++ for (i = 0; i < bb_count-1; ++i) {
++ struct bb *bb1 = bb_list[i], *bb2 = bb_list[i+1];
++ if (bb1->end && bb1->end == bb2->start) {
++ bb = bb_list[i+1];
++ bb_list[i+1] = bb_list[i];
++ bb_list[i] = bb;
++ }
++ }
++
++ /* Some bb have a start address, some have an end address. Collapse
++ * them into entries that have both start and end addresses. The first
++ * entry is guaranteed to have a start address.
++ */
++ for (i = 0; i < bb_count-1; ++i) {
++ struct bb *bb1 = bb_list[i], *bb2 = bb_list[i+1];
++ if (bb1->end)
++ continue;
++ if (bb2->start) {
++ bb1->end = bb2->start - 1;
++ bb1->drop_through = 1;
++ bb_jmp_add(bb1->end, bb2->start, 1);
++ } else {
++ bb1->end = bb2->end;
++ bb_delete(i+1);
++ }
++ }
++ bb = bb_list[bb_count-1];
++ if (!bb->end)
++ bb->end = bb_func_end - 1;
++
++ /* It would be nice to check that all bb have a valid start and end
++ * address but there is just too much garbage code in the kernel to do
++ * that check. Aligned functions in assembler code mean that there is
++ * space between the end of one function and the start of the next and
++ * that space contains previous code from the assembler's buffers. It
++ * looks like dead code with nothing that branches to it, so no start
++ * address. do_sys_vm86() ends with 'jmp resume_userspace' which the C
++ * compiler does not know about so gcc appends the normal exit code,
++ * again nothing branches to this dangling code.
++ *
++ * The best we can do is delete bb entries with no start address.
++ */
++ for (i = 0; i < bb_count; ++i) {
++ struct bb *bb = bb_list[i];
++ if (!bb->start)
++ bb_delete(i--);
++ }
++ for (i = 0; i < bb_count; ++i) {
++ struct bb *bb = bb_list[i];
++ if (!bb->end) {
++ kdb_printf("%s: incomplete bb state\n", __FUNCTION__);
++ bb_giveup = 1;
++ goto debug;
++ }
++ }
++
++out:
++ if (!KDB_DEBUG(BB))
++ return;
++debug:
++ kdb_printf("%s: end\n", __FUNCTION__);
++ for (i = 0; i < bb_count; ++i) {
++ bb = bb_list[i];
++ kdb_printf(" bb[%d] start "
++ kdb_bfd_vma_fmt0
++ " end " kdb_bfd_vma_fmt0
++ " drop_through %d",
++ i, bb->start, bb->end, bb->drop_through);
++ kdb_printf("\n");
++ }
++ for (i = 0; i < bb_jmp_count; ++i) {
++ bb_jmp = bb_jmp_list + i;
++ kdb_printf(" bb_jmp[%d] from "
++ kdb_bfd_vma_fmt0
++ " to " kdb_bfd_vma_fmt0
++ " drop_through %d\n",
++ i, bb_jmp->from, bb_jmp->to, bb_jmp->drop_through);
++ }
++}
++
++/* Pass 2, record register changes in each basic block */
++
++/* For each opcode that we care about, indicate how it uses its operands. Most
++ * opcodes can be handled generically because they completely specify their
++ * operands in the instruction, however many opcodes have side effects such as
++ * reading or writing rax or updating rsp. Instructions that change registers
++ * that are not listed in the operands must be handled as special cases. In
++ * addition, instructions that copy registers while preserving their contents
++ * (push, pop, mov) or change the contents in a well defined way (add with an
++ * immediate, lea) must be handled as special cases in order to track the
++ * register contents.
++ *
++ * The tables below only list opcodes that are actually used in the Linux
++ * kernel, so they omit most of the floating point and all of the SSE type
++ * instructions. The operand usage entries only cater for accesses to memory
++ * and to the integer registers, accesses to floating point registers and flags
++ * are not relevant for kernel backtraces.
++ */
++
++enum bb_operand_usage {
++ BBOU_UNKNOWN = 0,
++ /* generic entries. because xchg can do any combinations of
++ * read src, write src, read dst and write dst we need to
++ * define all 16 possibilities. These are ordered by rs = 1,
++ * rd = 2, ws = 4, wd = 8, bb_usage_x*() functions rely on this
++ * order.
++ */
++ BBOU_RS = 1, /* read src */ /* 1 */
++ BBOU_RD, /* read dst */ /* 2 */
++ BBOU_RSRD, /* 3 */
++ BBOU_WS, /* write src */ /* 4 */
++ BBOU_RSWS, /* 5 */
++ BBOU_RDWS, /* 6 */
++ BBOU_RSRDWS, /* 7 */
++ BBOU_WD, /* write dst */ /* 8 */
++ BBOU_RSWD, /* 9 */
++ BBOU_RDWD, /* 10 */
++ BBOU_RSRDWD, /* 11 */
++ BBOU_WSWD, /* 12 */
++ BBOU_RSWSWD, /* 13 */
++ BBOU_RDWSWD, /* 14 */
++ BBOU_RSRDWSWD, /* 15 */
++ /* opcode specific entries */
++ BBOU_ADD,
++ BBOU_CALL,
++ BBOU_CBW,
++ BBOU_CMOV,
++ BBOU_CMPXCHG,
++ BBOU_CMPXCHGD,
++ BBOU_CPUID,
++ BBOU_CWD,
++ BBOU_DIV,
++ BBOU_IDIV,
++ BBOU_IMUL,
++ BBOU_IRET,
++ BBOU_JMP,
++ BBOU_LAHF,
++ BBOU_LEA,
++ BBOU_LEAVE,
++ BBOU_LODS,
++ BBOU_LOOP,
++ BBOU_LSS,
++ BBOU_MONITOR,
++ BBOU_MOV,
++ BBOU_MOVS,
++ BBOU_MUL,
++ BBOU_MWAIT,
++ BBOU_NOP,
++ BBOU_OUTS,
++ BBOU_POP,
++ BBOU_POPF,
++ BBOU_PUSH,
++ BBOU_PUSHF,
++ BBOU_RDMSR,
++ BBOU_RDTSC,
++ BBOU_RET,
++ BBOU_SAHF,
++ BBOU_SCAS,
++ BBOU_SUB,
++ BBOU_SYSEXIT,
++ BBOU_SYSRET,
++ BBOU_WRMSR,
++ BBOU_XADD,
++ BBOU_XCHG,
++ BBOU_XOR,
++};
++
++struct bb_opcode_usage {
++ int length;
++ enum bb_operand_usage usage;
++ const char *opcode;
++};
++
++/* This table is sorted in alphabetical order of opcode, except that the
++ * trailing '"' is treated as a high value. For example, 'in' sorts after
++ * 'inc', 'bt' after 'btc'. This modified sort order ensures that shorter
++ * opcodes come after long ones. A normal sort would put 'in' first, so 'in'
++ * would match both 'inc' and 'in'. When adding any new entries to this table,
++ * be careful to put shorter entries last in their group.
++ *
++ * To automatically sort the table (in vi)
++ * Mark the first and last opcode line with 'a and 'b
++ * 'a
++ * !'bsed -e 's/"}/}}/' | LANG=C sort -t '"' -k2 | sed -e 's/}}/"}/'
++ *
++ * If a new instruction has to be added, first consider if it affects registers
++ * other than those listed in the operands. Also consider if you want to track
++ * the results of issuing the instruction, IOW can you extract useful
++ * information by looking in detail at the modified registers or memory. If
++ * either test is true then you need a special case to handle the instruction.
++ *
++ * The generic entries at the start of enum bb_operand_usage all have one thing
++ * in common, if a register or memory location is updated then that location
++ * becomes undefined, i.e. we lose track of anything that was previously saved
++ * in that location. So only use a generic BBOU_* value when the result of the
++ * instruction cannot be calculated exactly _and_ when all the affected
++ * registers are listed in the operands.
++ *
++ * Examples:
++ *
++ * 'call' does not generate a known result, but as a side effect of call,
++ * several scratch registers become undefined, so it needs a special BBOU_CALL
++ * entry.
++ *
++ * 'adc' generates a variable result, it depends on the carry flag, so 'adc'
++ * gets a generic entry. 'add' can generate an exact result (add with
++ * immediate on a register that points to the stack) or it can generate an
++ * unknown result (add a variable, or add immediate to a register that does not
++ * contain a stack pointer) so 'add' has its own BBOU_ADD entry.
++ */
++
++static const struct bb_opcode_usage
++bb_opcode_usage_all[] = {
++ {3, BBOU_RSRDWD, "adc"},
++ {3, BBOU_ADD, "add"},
++ {3, BBOU_RSRDWD, "and"},
++ {3, BBOU_RSWD, "bsf"},
++ {3, BBOU_RSWD, "bsr"},
++ {5, BBOU_RSWS, "bswap"},
++ {3, BBOU_RSRDWD, "btc"},
++ {3, BBOU_RSRDWD, "btr"},
++ {3, BBOU_RSRDWD, "bts"},
++ {2, BBOU_RSRD, "bt"},
++ {4, BBOU_CALL, "call"},
++ {4, BBOU_CBW, "cbtw"}, /* Intel cbw */
++ {3, BBOU_NOP, "clc"},
++ {3, BBOU_NOP, "cld"},
++ {7, BBOU_RS, "clflush"},
++ {3, BBOU_NOP, "cli"},
++ {4, BBOU_CWD, "cltd"}, /* Intel cdq */
++ {4, BBOU_CBW, "cltq"}, /* Intel cdqe */
++ {4, BBOU_NOP, "clts"},
++ {4, BBOU_CMOV, "cmov"},
++ {9, BBOU_CMPXCHGD,"cmpxchg16"},
++ {8, BBOU_CMPXCHGD,"cmpxchg8"},
++ {7, BBOU_CMPXCHG, "cmpxchg"},
++ {3, BBOU_RSRD, "cmp"},
++ {5, BBOU_CPUID, "cpuid"},
++ {4, BBOU_CWD, "cqto"}, /* Intel cdo */
++ {4, BBOU_CWD, "cwtd"}, /* Intel cwd */
++ {4, BBOU_CBW, "cwtl"}, /* Intel cwde */
++ {4, BBOU_NOP, "data"}, /* alternative ASM_NOP<n> generates data16 on x86_64 */
++ {3, BBOU_RSWS, "dec"},
++ {3, BBOU_DIV, "div"},
++ {5, BBOU_RS, "fdivl"},
++ {5, BBOU_NOP, "finit"},
++ {6, BBOU_RS, "fistpl"},
++ {4, BBOU_RS, "fldl"},
++ {5, BBOU_RS, "fmull"},
++ {6, BBOU_NOP, "fnclex"},
++ {6, BBOU_NOP, "fninit"},
++ {6, BBOU_RS, "fnsave"},
++ {7, BBOU_NOP, "fnsetpm"},
++ {6, BBOU_RS, "frstor"},
++ {5, BBOU_WS, "fstsw"},
++ {5, BBOU_RS, "fsubp"},
++ {5, BBOU_NOP, "fwait"},
++ {7, BBOU_RS, "fxrstor"},
++ {6, BBOU_RS, "fxsave"},
++ {3, BBOU_NOP, "hlt"},
++ {4, BBOU_IDIV, "idiv"},
++ {4, BBOU_IMUL, "imul"},
++ {3, BBOU_RSWS, "inc"},
++ {3, BBOU_NOP, "int"},
++ {6, BBOU_RS, "invlpg"},
++ {2, BBOU_RSWD, "in"},
++ {4, BBOU_IRET, "iret"},
++ {1, BBOU_JMP, "j"},
++ {4, BBOU_LAHF, "lahf"},
++ {3, BBOU_RSWD, "lar"},
++ {5, BBOU_RS, "lcall"},
++ {5, BBOU_LEAVE, "leave"},
++ {3, BBOU_LEA, "lea"},
++ {6, BBOU_NOP, "lfence"},
++ {4, BBOU_RS, "lgdt"},
++ {4, BBOU_RS, "lidt"},
++ {4, BBOU_RS, "ljmp"},
++ {4, BBOU_RS, "lldt"},
++ {4, BBOU_RS, "lmsw"},
++ {4, BBOU_LODS, "lods"},
++ {4, BBOU_LOOP, "loop"},
++ {4, BBOU_NOP, "lret"},
++ {3, BBOU_RSWD, "lsl"},
++ {3, BBOU_LSS, "lss"},
++ {3, BBOU_RS, "ltr"},
++ {6, BBOU_NOP, "mfence"},
++ {7, BBOU_MONITOR, "monitor"},
++ {4, BBOU_MOVS, "movs"},
++ {3, BBOU_MOV, "mov"},
++ {3, BBOU_MUL, "mul"},
++ {5, BBOU_MWAIT, "mwait"},
++ {3, BBOU_RSWS, "neg"},
++ {3, BBOU_NOP, "nop"},
++ {3, BBOU_RSWS, "not"},
++ {2, BBOU_RSRDWD, "or"},
++ {4, BBOU_OUTS, "outs"},
++ {3, BBOU_RSRD, "out"},
++ {5, BBOU_NOP, "pause"},
++ {4, BBOU_POPF, "popf"},
++ {3, BBOU_POP, "pop"},
++ {8, BBOU_RS, "prefetch"},
++ {5, BBOU_PUSHF, "pushf"},
++ {4, BBOU_PUSH, "push"},
++ {3, BBOU_RSRDWD, "rcr"},
++ {5, BBOU_RDMSR, "rdmsr"},
++ {5, BBOU_RDTSC, "rdtsc"},
++ {3, BBOU_RET, "ret"},
++ {3, BBOU_RSRDWD, "rol"},
++ {3, BBOU_RSRDWD, "ror"},
++ {4, BBOU_SAHF, "sahf"},
++ {3, BBOU_RSRDWD, "sar"},
++ {3, BBOU_RSRDWD, "sbb"},
++ {4, BBOU_SCAS, "scas"},
++ {3, BBOU_WS, "set"},
++ {6, BBOU_NOP, "sfence"},
++ {4, BBOU_WS, "sgdt"},
++ {3, BBOU_RSRDWD, "shl"},
++ {3, BBOU_RSRDWD, "shr"},
++ {4, BBOU_WS, "sidt"},
++ {4, BBOU_WS, "sldt"},
++ {3, BBOU_NOP, "stc"},
++ {3, BBOU_NOP, "std"},
++ {3, BBOU_NOP, "sti"},
++ {4, BBOU_SCAS, "stos"},
++ {4, BBOU_WS, "strl"},
++ {3, BBOU_WS, "str"},
++ {3, BBOU_SUB, "sub"},
++ {6, BBOU_NOP, "swapgs"},
++ {7, BBOU_SYSEXIT, "sysexit"},
++ {6, BBOU_SYSRET, "sysret"},
++ {4, BBOU_NOP, "test"},
++ {4, BBOU_NOP, "ud2a"},
++ {6, BBOU_NOP, "wbinvd"},
++ {5, BBOU_WRMSR, "wrmsr"},
++ {4, BBOU_XADD, "xadd"},
++ {4, BBOU_XCHG, "xchg"},
++ {3, BBOU_XOR, "xor"},
++};
++
++/* To speed up searching, index bb_opcode_usage_all by the first letter of each
++ * opcode.
++ */
++static struct {
++ const struct bb_opcode_usage *opcode;
++ int size;
++} bb_opcode_usage[26];
++
++struct bb_operand {
++ char *base;
++ char *index;
++ char *segment;
++ long disp;
++ unsigned int scale;
++ enum bb_reg_code base_rc; /* UNDEFINED or RAX through R15 */
++ enum bb_reg_code index_rc; /* UNDEFINED or RAX through R15 */
++ unsigned int present :1;
++ unsigned int disp_present :1;
++ unsigned int indirect :1; /* must be combined with reg or memory */
++ unsigned int immediate :1; /* exactly one of these 3 must be set */
++ unsigned int reg :1;
++ unsigned int memory :1;
++};
++
++struct bb_decode {
++ char *prefix;
++ char *opcode;
++ const struct bb_opcode_usage *match;
++ struct bb_operand src;
++ struct bb_operand dst;
++ struct bb_operand dst2;
++};
++
++static struct bb_decode bb_decode;
++
++static enum bb_reg_code
++bb_reg_map(const char *reg)
++{
++ int lo, hi, c;
++ const struct bb_reg_code_map *p;
++ lo = 0;
++ hi = ARRAY_SIZE(bb_reg_code_map) - 1;
++ while (lo <= hi) {
++ int mid = (hi + lo) / 2;
++ p = bb_reg_code_map + mid;
++ c = strcmp(p->name, reg+1);
++ if (c == 0)
++ return p->reg;
++ else if (c > 0)
++ hi = mid - 1;
++ else
++ lo = mid + 1;
++ }
++ return BBRG_UNDEFINED;
++}
++
++static void
++bb_parse_operand(char *str, struct bb_operand *operand)
++{
++ char *p = str;
++ int sign = 1;
++ operand->present = 1;
++ /* extract any segment prefix */
++ if (p[0] == '%' && p[1] && p[2] == 's' && p[3] == ':') {
++ operand->memory = 1;
++ operand->segment = p;
++ p[3] = '\0';
++ p += 4;
++ }
++ /* extract displacement, base, index, scale */
++ if (*p == '*') {
++ /* jmp/call *disp(%reg), *%reg or *0xnnn */
++ operand->indirect = 1;
++ ++p;
++ }
++ if (*p == '-') {
++ sign = -1;
++ ++p;
++ }
++ if (*p == '$') {
++ operand->immediate = 1;
++ operand->disp_present = 1;
++ operand->disp = simple_strtoul(p+1, &p, 0);
++ } else if (isdigit(*p)) {
++ operand->memory = 1;
++ operand->disp_present = 1;
++ operand->disp = simple_strtoul(p, &p, 0) * sign;
++ }
++ if (*p == '%') {
++ operand->reg = 1;
++ operand->base = p;
++ } else if (*p == '(') {
++ operand->memory = 1;
++ operand->base = ++p;
++ p += strcspn(p, ",)");
++ if (p == operand->base)
++ operand->base = NULL;
++ if (*p == ',') {
++ *p = '\0';
++ operand->index = ++p;
++ p += strcspn(p, ",)");
++ if (p == operand->index)
++ operand->index = NULL;
++ }
++ if (*p == ',') {
++ *p = '\0';
++ operand->scale = simple_strtoul(p+1, &p, 0);
++ }
++ *p = '\0';
++ } else if (*p) {
++ kdb_printf("%s: unexpected token '%c' after disp '%s'\n",
++ __FUNCTION__, *p, str);
++ bb_giveup = 1;
++ }
++ if ((operand->immediate + operand->reg + operand->memory != 1) ||
++ (operand->indirect && operand->immediate)) {
++ kdb_printf("%s: incorrect decode '%s' N %d I %d R %d M %d\n",
++ __FUNCTION__, str,
++ operand->indirect, operand->immediate, operand->reg,
++ operand->memory);
++ bb_giveup = 1;
++ }
++ if (operand->base)
++ operand->base_rc = bb_reg_map(operand->base);
++ if (operand->index)
++ operand->index_rc = bb_reg_map(operand->index);
++}
++
++static void
++bb_print_operand(const char *type, const struct bb_operand *operand)
++{
++ if (!operand->present)
++ return;
++ kdb_printf(" %s %c%c: ",
++ type,
++ operand->indirect ? 'N' : ' ',
++ operand->immediate ? 'I' :
++ operand->reg ? 'R' :
++ operand->memory ? 'M' :
++ '?'
++ );
++ if (operand->segment)
++ kdb_printf("%s:", operand->segment);
++ if (operand->immediate) {
++ kdb_printf("$0x%lx", operand->disp);
++ } else if (operand->reg) {
++ if (operand->indirect)
++ kdb_printf("*");
++ kdb_printf("%s", operand->base);
++ } else if (operand->memory) {
++ if (operand->indirect && (operand->base || operand->index))
++ kdb_printf("*");
++ if (operand->disp_present) {
++ kdb_printf("0x%lx", operand->disp);
++ }
++ if (operand->base || operand->index || operand->scale) {
++ kdb_printf("(");
++ if (operand->base)
++ kdb_printf("%s", operand->base);
++ if (operand->index || operand->scale)
++ kdb_printf(",");
++ if (operand->index)
++ kdb_printf("%s", operand->index);
++ if (operand->scale)
++ kdb_printf(",%d", operand->scale);
++ kdb_printf(")");
++ }
++ }
++ if (operand->base_rc)
++ kdb_printf(" base_rc %d (%s)",
++ operand->base_rc, bbrg_name[operand->base_rc]);
++ if (operand->index_rc)
++ kdb_printf(" index_rc %d (%s)",
++ operand->index_rc,
++ bbrg_name[operand->index_rc]);
++ kdb_printf("\n");
++}
++
++static void
++bb_print_opcode(void)
++{
++ const struct bb_opcode_usage *o = bb_decode.match;
++ kdb_printf(" ");
++ if (bb_decode.prefix)
++ kdb_printf("%s ", bb_decode.prefix);
++ kdb_printf("opcode '%s' matched by '%s', usage %d\n",
++ bb_decode.opcode, o->opcode, o->usage);
++}
++
++static int
++bb_parse_opcode(void)
++{
++ int c, i;
++ const struct bb_opcode_usage *o;
++ static int bb_parse_opcode_error_limit = 5;
++ c = bb_decode.opcode[0] - 'a';
++ if (c < 0 || c >= ARRAY_SIZE(bb_opcode_usage))
++ goto nomatch;
++ o = bb_opcode_usage[c].opcode;
++ if (!o)
++ goto nomatch;
++ for (i = 0; i < bb_opcode_usage[c].size; ++i, ++o) {
++ if (strncmp(bb_decode.opcode, o->opcode, o->length) == 0) {
++ bb_decode.match = o;
++ if (KDB_DEBUG(BB))
++ bb_print_opcode();
++ return 0;
++ }
++ }
++nomatch:
++ if (!bb_parse_opcode_error_limit)
++ return 1;
++ --bb_parse_opcode_error_limit;
++ kdb_printf("%s: no match at [%s]%s " kdb_bfd_vma_fmt0 " - '%s'\n",
++ __FUNCTION__,
++ bb_mod_name, bb_func_name, bb_curr_addr,
++ bb_decode.opcode);
++ return 1;
++}
++
++static bool
++bb_is_int_reg(enum bb_reg_code reg)
++{
++ return reg >= BBRG_RAX && reg < (BBRG_RAX + KDB_INT_REGISTERS);
++}
++
++static bool
++bb_is_simple_memory(const struct bb_operand *operand)
++{
++ return operand->memory &&
++ bb_is_int_reg(operand->base_rc) &&
++ !operand->index_rc &&
++ operand->scale == 0 &&
++ !operand->segment;
++}
++
++static bool
++bb_is_static_disp(const struct bb_operand *operand)
++{
++ return operand->memory &&
++ !operand->base_rc &&
++ !operand->index_rc &&
++ operand->scale == 0 &&
++ !operand->segment &&
++ !operand->indirect;
++}
++
++static enum bb_reg_code
++bb_reg_code_value(enum bb_reg_code reg)
++{
++ BB_CHECK(!bb_is_int_reg(reg), reg, 0);
++ return bb_reg_state->contains[reg - BBRG_RAX].value;
++}
++
++static short
++bb_reg_code_offset(enum bb_reg_code reg)
++{
++ BB_CHECK(!bb_is_int_reg(reg), reg, 0);
++ return bb_reg_state->contains[reg - BBRG_RAX].offset;
++}
++
++static void
++bb_reg_code_set_value(enum bb_reg_code dst, enum bb_reg_code src)
++{
++ BB_CHECK(!bb_is_int_reg(dst), dst, );
++ bb_reg_state->contains[dst - BBRG_RAX].value = src;
++}
++
++static void
++bb_reg_code_set_offset(enum bb_reg_code dst, short offset)
++{
++ BB_CHECK(!bb_is_int_reg(dst), dst, );
++ bb_reg_state->contains[dst - BBRG_RAX].offset = offset;
++}
++
++static bool
++bb_is_osp_defined(enum bb_reg_code reg)
++{
++ if (bb_is_int_reg(reg))
++ return bb_reg_code_value(reg) == BBRG_OSP;
++ else
++ return 0;
++}
++
++static bfd_vma
++bb_actual_value(enum bb_reg_code reg)
++{
++ BB_CHECK(!bb_is_int_reg(reg), reg, 0);
++ return bb_actual[reg - BBRG_RAX].value;
++}
++
++static int
++bb_actual_valid(enum bb_reg_code reg)
++{
++ BB_CHECK(!bb_is_int_reg(reg), reg, 0);
++ return bb_actual[reg - BBRG_RAX].valid;
++}
++
++static void
++bb_actual_set_value(enum bb_reg_code reg, bfd_vma value)
++{
++ BB_CHECK(!bb_is_int_reg(reg), reg, );
++ bb_actual[reg - BBRG_RAX].value = value;
++}
++
++static void
++bb_actual_set_valid(enum bb_reg_code reg, int valid)
++{
++ BB_CHECK(!bb_is_int_reg(reg), reg, );
++ bb_actual[reg - BBRG_RAX].valid = valid;
++}
++
++/* The scheduler code switches RSP then does PUSH, it is not an error for RSP
++ * to be undefined in this area of the code.
++ */
++static bool
++bb_is_scheduler_address(void)
++{
++ return bb_curr_addr >= bb__sched_text_start &&
++ bb_curr_addr < bb__sched_text_end;
++}
++
++static void
++bb_reg_read(enum bb_reg_code reg)
++{
++ int i, o = 0;
++ if (!bb_is_int_reg(reg) ||
++ bb_reg_code_value(reg) != reg)
++ return;
++ for (i = 0;
++ i < min_t(unsigned int, REGPARM, ARRAY_SIZE(bb_param_reg));
++ ++i) {
++ if (reg == bb_param_reg[i]) {
++ o = i + 1;
++ break;
++ }
++ }
++ bb_reg_params = max(bb_reg_params, o);
++}
++
++static void
++bb_do_reg_state_print(const struct bb_reg_state *s)
++{
++ int i, offset_address, offset_value;
++ struct bb_memory_contains *c;
++ enum bb_reg_code value;
++ kdb_printf(" bb_reg_state %p\n", s);
++ for (i = 0; i < ARRAY_SIZE(s->contains); ++i) {
++ value = s->contains[i].value;
++ offset_value = s->contains[i].offset;
++ kdb_printf(" %s = %s",
++ bbrg_name[i + BBRG_RAX], bbrg_name[value]);
++ if (value == BBRG_OSP)
++ KDB_DEBUG_BB_OFFSET_PRINTF(offset_value, "", "");
++ kdb_printf("\n");
++ }
++ for (i = 0, c = s->memory; i < s->mem_count; ++i, ++c) {
++ offset_address = c->offset_address;
++ value = c->value;
++ offset_value = c->offset_value;
++ kdb_printf(" slot %d offset_address %c0x%x %s",
++ i,
++ offset_address >= 0 ? '+' : '-',
++ offset_address >= 0 ? offset_address : -offset_address,
++ bbrg_name[value]);
++ if (value == BBRG_OSP)
++ KDB_DEBUG_BB_OFFSET_PRINTF(offset_value, "", "");
++ kdb_printf("\n");
++ }
++}
++
++static void
++bb_reg_state_print(const struct bb_reg_state *s)
++{
++ if (KDB_DEBUG(BB))
++ bb_do_reg_state_print(s);
++}
++
++/* Set register 'dst' to contain the value from 'src'. This includes reading
++ * from 'src' and writing to 'dst'. The offset value is copied iff 'src'
++ * contains a stack pointer.
++ *
++ * Be very careful about the context here. 'dst' and 'src' reflect integer
++ * registers by name, _not_ by the value of their contents. "mov %rax,%rsi"
++ * will call this function as bb_reg_set_reg(BBRG_RSI, BBRG_RAX), which
++ * reflects what the assembler code is doing. However we need to track the
++ * _values_ in the registers, not their names. IOW, we really care about "what
++ * value does rax contain when it is copied into rsi?", so we can record the
++ * fact that we now have two copies of that value, one in rax and one in rsi.
++ */
++
++static void
++bb_reg_set_reg(enum bb_reg_code dst, enum bb_reg_code src)
++{
++ enum bb_reg_code src_value = BBRG_UNDEFINED;
++ short offset_value = 0;
++ KDB_DEBUG_BB(" %s = %s", bbrg_name[dst], bbrg_name[src]);
++ if (bb_is_int_reg(src)) {
++ bb_reg_read(src);
++ src_value = bb_reg_code_value(src);
++ KDB_DEBUG_BB(" (%s", bbrg_name[src_value]);
++ if (bb_is_osp_defined(src)) {
++ offset_value = bb_reg_code_offset(src);
++ KDB_DEBUG_BB_OFFSET(offset_value, "", "");
++ }
++ KDB_DEBUG_BB(")");
++ }
++ if (bb_is_int_reg(dst)) {
++ bb_reg_code_set_value(dst, src_value);
++ bb_reg_code_set_offset(dst, offset_value);
++ }
++ KDB_DEBUG_BB("\n");
++}
++
++static void
++bb_reg_set_undef(enum bb_reg_code dst)
++{
++ bb_reg_set_reg(dst, BBRG_UNDEFINED);
++}
++
++/* Delete any record of a stored register held in osp + 'offset' */
++
++static void
++bb_delete_memory(short offset)
++{
++ int i;
++ struct bb_memory_contains *c;
++ for (i = 0, c = bb_reg_state->memory;
++ i < bb_reg_state->mem_count;
++ ++i, ++c) {
++ if (c->offset_address == offset &&
++ c->value != BBRG_UNDEFINED) {
++ KDB_DEBUG_BB(" delete %s from ",
++ bbrg_name[c->value]);
++ KDB_DEBUG_BB_OFFSET(offset, "osp", "");
++ KDB_DEBUG_BB(" slot %d\n",
++ (int)(c - bb_reg_state->memory));
++ memset(c, BBRG_UNDEFINED, sizeof(*c));
++ if (i == bb_reg_state->mem_count - 1)
++ --bb_reg_state->mem_count;
++ }
++ }
++}
++
++/* Set memory location *('dst' + 'offset_address') to contain the supplied
++ * value and offset. 'dst' is assumed to be a register that contains a stack
++ * pointer.
++ */
++
++static void
++bb_memory_set_reg_value(enum bb_reg_code dst, short offset_address,
++ enum bb_reg_code value, short offset_value)
++{
++ int i;
++ struct bb_memory_contains *c, *free = NULL;
++ BB_CHECK(!bb_is_osp_defined(dst), dst, );
++ KDB_DEBUG_BB(" *(%s", bbrg_name[dst]);
++ KDB_DEBUG_BB_OFFSET(offset_address, "", "");
++ offset_address += bb_reg_code_offset(dst);
++ KDB_DEBUG_BB_OFFSET(offset_address, " osp", ") = ");
++ KDB_DEBUG_BB("%s", bbrg_name[value]);
++ if (value == BBRG_OSP)
++ KDB_DEBUG_BB_OFFSET(offset_value, "", "");
++ for (i = 0, c = bb_reg_state->memory;
++ i < bb_reg_state_max;
++ ++i, ++c) {
++ if (c->offset_address == offset_address)
++ free = c;
++ else if (c->value == BBRG_UNDEFINED && !free)
++ free = c;
++ }
++ if (!free) {
++ struct bb_reg_state *new, *old = bb_reg_state;
++ size_t old_size, new_size;
++ int slot;
++ old_size = sizeof(*old) + bb_reg_state_max *
++ sizeof(old->memory[0]);
++ slot = bb_reg_state_max;
++ bb_reg_state_max += 5;
++ new_size = sizeof(*new) + bb_reg_state_max *
++ sizeof(new->memory[0]);
++ new = debug_kmalloc(new_size, GFP_ATOMIC);
++ if (!new) {
++ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
++ bb_giveup = 1;
++ } else {
++ memcpy(new, old, old_size);
++ memset((char *)new + old_size, BBRG_UNDEFINED,
++ new_size - old_size);
++ bb_reg_state = new;
++ debug_kfree(old);
++ free = bb_reg_state->memory + slot;
++ }
++ }
++ if (free) {
++ int slot = free - bb_reg_state->memory;
++ free->offset_address = offset_address;
++ free->value = value;
++ free->offset_value = offset_value;
++ KDB_DEBUG_BB(" slot %d", slot);
++ bb_reg_state->mem_count = max(bb_reg_state->mem_count, slot+1);
++ }
++ KDB_DEBUG_BB("\n");
++}
++
++/* Set memory location *('dst' + 'offset') to contain the value from register
++ * 'src'. 'dst' is assumed to be a register that contains a stack pointer.
++ * This differs from bb_memory_set_reg_value because it takes a src register
++ * which contains a value and possibly an offset, bb_memory_set_reg_value is
++ * passed the value and offset directly.
++ */
++
++static void
++bb_memory_set_reg(enum bb_reg_code dst, enum bb_reg_code src,
++ short offset_address)
++{
++ int offset_value;
++ enum bb_reg_code value;
++ BB_CHECK(!bb_is_osp_defined(dst), dst, );
++ if (!bb_is_int_reg(src))
++ return;
++ value = bb_reg_code_value(src);
++ if (value == BBRG_UNDEFINED) {
++ bb_delete_memory(offset_address + bb_reg_code_offset(dst));
++ return;
++ }
++ offset_value = bb_reg_code_offset(src);
++ bb_reg_read(src);
++ bb_memory_set_reg_value(dst, offset_address, value, offset_value);
++}
++
++/* Set register 'dst' to contain the value from memory *('src' + offset_address).
++ * 'src' is assumed to be a register that contains a stack pointer.
++ */
++
++static void
++bb_reg_set_memory(enum bb_reg_code dst, enum bb_reg_code src, short offset_address)
++{
++ int i, defined = 0;
++ struct bb_memory_contains *s;
++ BB_CHECK(!bb_is_osp_defined(src), src, );
++ KDB_DEBUG_BB(" %s = *(%s",
++ bbrg_name[dst], bbrg_name[src]);
++ KDB_DEBUG_BB_OFFSET(offset_address, "", ")");
++ offset_address += bb_reg_code_offset(src);
++ KDB_DEBUG_BB_OFFSET(offset_address, " (osp", ")");
++ for (i = 0, s = bb_reg_state->memory;
++ i < bb_reg_state->mem_count;
++ ++i, ++s) {
++ if (s->offset_address == offset_address && bb_is_int_reg(dst)) {
++ bb_reg_code_set_value(dst, s->value);
++ KDB_DEBUG_BB(" value %s", bbrg_name[s->value]);
++ if (s->value == BBRG_OSP) {
++ bb_reg_code_set_offset(dst, s->offset_value);
++ KDB_DEBUG_BB_OFFSET(s->offset_value, "", "");
++ } else {
++ bb_reg_code_set_offset(dst, 0);
++ }
++ defined = 1;
++ }
++ }
++ if (!defined)
++ bb_reg_set_reg(dst, BBRG_UNDEFINED);
++ else
++ KDB_DEBUG_BB("\n");
++}
++
++/* A generic read from an operand. */
++
++static void
++bb_read_operand(const struct bb_operand *operand)
++{
++ int o = 0;
++ if (operand->base_rc)
++ bb_reg_read(operand->base_rc);
++ if (operand->index_rc)
++ bb_reg_read(operand->index_rc);
++ if (bb_is_simple_memory(operand) &&
++ bb_is_osp_defined(operand->base_rc) &&
++ bb_decode.match->usage != BBOU_LEA) {
++ o = (bb_reg_code_offset(operand->base_rc) + operand->disp +
++ KDB_WORD_SIZE - 1) / KDB_WORD_SIZE;
++ bb_memory_params = max(bb_memory_params, o);
++ }
++}
++
++/* A generic write to an operand, resulting in an undefined value in that
++ * location. All well defined operands are handled separately, this function
++ * only handles the opcodes where the result is undefined.
++ */
++
++static void
++bb_write_operand(const struct bb_operand *operand)
++{
++ enum bb_reg_code base_rc = operand->base_rc;
++ if (operand->memory) {
++ if (base_rc)
++ bb_reg_read(base_rc);
++ if (operand->index_rc)
++ bb_reg_read(operand->index_rc);
++ } else if (operand->reg && base_rc) {
++ bb_reg_set_undef(base_rc);
++ }
++ if (bb_is_simple_memory(operand) && bb_is_osp_defined(base_rc)) {
++ int offset;
++ offset = bb_reg_code_offset(base_rc) + operand->disp;
++ offset = ALIGN(offset - KDB_WORD_SIZE + 1, KDB_WORD_SIZE);
++ bb_delete_memory(offset);
++ }
++}
++
++/* Adjust a register that contains a stack pointer */
++
++static void
++bb_adjust_osp(enum bb_reg_code reg, int adjust)
++{
++ int offset = bb_reg_code_offset(reg), old_offset = offset;
++ KDB_DEBUG_BB(" %s osp offset ", bbrg_name[reg]);
++ KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(reg), "", " -> ");
++ offset += adjust;
++ bb_reg_code_set_offset(reg, offset);
++ KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(reg), "", "\n");
++ /* When RSP is adjusted upwards, it invalidates any memory
++ * stored between the old and current stack offsets.
++ */
++ if (reg == BBRG_RSP) {
++ while (old_offset < bb_reg_code_offset(reg)) {
++ bb_delete_memory(old_offset);
++ old_offset += KDB_WORD_SIZE;
++ }
++ }
++}
++
++/* The current instruction adjusts a register that contains a stack pointer.
++ * Direction is 1 or -1, depending on whether the instruction is add/lea or
++ * sub.
++ */
++
++static void
++bb_adjust_osp_instruction(int direction)
++{
++ enum bb_reg_code dst_reg = bb_decode.dst.base_rc;
++ if (bb_decode.src.immediate ||
++ bb_decode.match->usage == BBOU_LEA /* lea has its own checks */) {
++ int adjust = direction * bb_decode.src.disp;
++ bb_adjust_osp(dst_reg, adjust);
++ } else {
++ /* variable stack adjustment, osp offset is not well defined */
++ KDB_DEBUG_BB(" %s osp offset ", bbrg_name[dst_reg]);
++ KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(dst_reg), "", " -> undefined\n");
++ bb_reg_code_set_value(dst_reg, BBRG_UNDEFINED);
++ bb_reg_code_set_offset(dst_reg, 0);
++ }
++}
++
++/* Some instructions using memory have an explicit length suffix (b, w, l, q).
++ * The equivalent instructions using a register imply the length from the
++ * register name. Deduce the operand length.
++ */
++
++static int
++bb_operand_length(const struct bb_operand *operand, char opcode_suffix)
++{
++ int l = 0;
++ switch (opcode_suffix) {
++ case 'b':
++ l = 8;
++ break;
++ case 'w':
++ l = 16;
++ break;
++ case 'l':
++ l = 32;
++ break;
++ case 'q':
++ l = 64;
++ break;
++ }
++ if (l == 0 && operand->reg) {
++ switch (strlen(operand->base)) {
++ case 3:
++ switch (operand->base[2]) {
++ case 'h':
++ case 'l':
++ l = 8;
++ break;
++ default:
++ l = 16;
++ break;
++ }
++ case 4:
++ if (operand->base[1] == 'r')
++ l = 64;
++ else
++ l = 32;
++ break;
++ }
++ }
++ return l;
++}
++
++static int
++bb_reg_state_size(const struct bb_reg_state *state)
++{
++ return sizeof(*state) +
++ state->mem_count * sizeof(state->memory[0]);
++}
++
++/* Canonicalize the current bb_reg_state so it can be compared against
++ * previously created states. Sort the memory entries in descending order of
++ * offset_address (stack grows down). Empty slots are moved to the end of the
++ * list and trimmed.
++ */
++
++static void
++bb_reg_state_canonicalize(void)
++{
++ int i, o, changed;
++ struct bb_memory_contains *p1, *p2, temp;
++ do {
++ changed = 0;
++ for (i = 0, p1 = bb_reg_state->memory;
++ i < bb_reg_state->mem_count-1;
++ ++i, ++p1) {
++ p2 = p1 + 1;
++ if (p2->value == BBRG_UNDEFINED) {
++ o = 0;
++ } else if (p1->value == BBRG_UNDEFINED) {
++ o = 1;
++ } else if (p1->offset_address < p2->offset_address) {
++ o = 1;
++ } else if (p1->offset_address > p2->offset_address) {
++ o = -1;
++ } else {
++ o = 0;
++ }
++ if (o > 0) {
++ temp = *p2;
++ *p2 = *p1;
++ *p1 = temp;
++ changed = 1;
++ }
++ }
++ } while(changed);
++ for (i = 0, p1 = bb_reg_state->memory;
++ i < bb_reg_state_max;
++ ++i, ++p1) {
++ if (p1->value != BBRG_UNDEFINED)
++ bb_reg_state->mem_count = i + 1;
++ }
++ bb_reg_state_print(bb_reg_state);
++}
++
++static int
++bb_special_case(bfd_vma to)
++{
++ int i, j, rsp_offset, expect_offset, offset, errors = 0, max_errors = 40;
++ enum bb_reg_code reg, expect_value, value;
++ struct bb_name_state *r;
++
++ for (i = 0, r = bb_special_cases;
++ i < ARRAY_SIZE(bb_special_cases);
++ ++i, ++r) {
++ if (to == r->address &&
++ (r->fname == NULL || strcmp(bb_func_name, r->fname) == 0))
++ goto match;
++ }
++ /* Some inline assembler code has jumps to .fixup sections which result
++ * in out of line transfers with undefined state, ignore them.
++ */
++ if (strcmp(bb_func_name, "strnlen_user") == 0 ||
++ strcmp(bb_func_name, "copy_from_user") == 0)
++ return 1;
++ return 0;
++
++match:
++ /* Check the running registers match */
++ for (reg = BBRG_RAX; reg < r->regs_size; ++reg) {
++ expect_value = r->regs[reg].value;
++ if (test_bit(expect_value, r->skip_regs.bits)) {
++ /* this regs entry is not defined for this label */
++ continue;
++ }
++ if (expect_value == BBRG_UNDEFINED)
++ continue;
++ expect_offset = r->regs[reg].offset;
++ value = bb_reg_code_value(reg);
++ offset = bb_reg_code_offset(reg);
++ if (expect_value == value &&
++ (value != BBRG_OSP || r->osp_offset == offset))
++ continue;
++ kdb_printf("%s: Expected %s to contain %s",
++ __FUNCTION__,
++ bbrg_name[reg],
++ bbrg_name[expect_value]);
++ if (r->osp_offset)
++ KDB_DEBUG_BB_OFFSET_PRINTF(r->osp_offset, "", "");
++ kdb_printf(". It actually contains %s", bbrg_name[value]);
++ if (offset)
++ KDB_DEBUG_BB_OFFSET_PRINTF(offset, "", "");
++ kdb_printf("\n");
++ ++errors;
++ if (max_errors-- == 0)
++ goto fail;
++ }
++ /* Check that any memory data on stack matches */
++ i = j = 0;
++ while (i < bb_reg_state->mem_count &&
++ j < r->mem_size) {
++ expect_value = r->mem[j].value;
++ if (test_bit(expect_value, r->skip_mem.bits) ||
++ expect_value == BBRG_UNDEFINED) {
++ /* this memory slot is not defined for this label */
++ ++j;
++ continue;
++ }
++ rsp_offset = bb_reg_state->memory[i].offset_address -
++ bb_reg_code_offset(BBRG_RSP);
++ if (rsp_offset >
++ r->mem[j].offset_address) {
++ /* extra slots in memory are OK */
++ ++i;
++ } else if (rsp_offset <
++ r->mem[j].offset_address) {
++ /* Required memory slot is missing */
++ kdb_printf("%s: Invalid bb_reg_state.memory, "
++ "missing memory entry[%d] %s\n",
++ __FUNCTION__, j, bbrg_name[expect_value]);
++ ++errors;
++ if (max_errors-- == 0)
++ goto fail;
++ ++j;
++ } else {
++ if (bb_reg_state->memory[i].offset_value ||
++ bb_reg_state->memory[i].value != expect_value) {
++ /* memory slot is present but contains wrong
++ * value.
++ */
++ kdb_printf("%s: Invalid bb_reg_state.memory, "
++ "wrong value in slot %d, "
++ "should be %s, it is %s\n",
++ __FUNCTION__, i,
++ bbrg_name[expect_value],
++ bbrg_name[bb_reg_state->memory[i].value]);
++ ++errors;
++ if (max_errors-- == 0)
++ goto fail;
++ }
++ ++i;
++ ++j;
++ }
++ }
++ while (j < r->mem_size) {
++ expect_value = r->mem[j].value;
++ if (test_bit(expect_value, r->skip_mem.bits) ||
++ expect_value == BBRG_UNDEFINED)
++ ++j;
++ else
++ break;
++ }
++ if (j != r->mem_size) {
++ /* Hit end of memory before testing all the pt_reg slots */
++ kdb_printf("%s: Invalid bb_reg_state.memory, "
++ "missing trailing entries\n",
++ __FUNCTION__);
++ ++errors;
++ if (max_errors-- == 0)
++ goto fail;
++ }
++ if (errors)
++ goto fail;
++ return 1;
++fail:
++ kdb_printf("%s: on transfer to %s\n", __FUNCTION__, r->name);
++ bb_giveup = 1;
++ return 1;
++}
++
++/* Transfer of control to a label outside the current function. If the
++ * transfer is to a known common code path then do a sanity check on the state
++ * at this point.
++ */
++
++static void
++bb_sanity_check(int type)
++{
++ enum bb_reg_code expect, actual;
++ int i, offset, error = 0;
++
++ for (i = 0; i < ARRAY_SIZE(bb_preserved_reg); ++i) {
++ expect = bb_preserved_reg[i];
++ actual = bb_reg_code_value(expect);
++ offset = bb_reg_code_offset(expect);
++ if (expect == actual)
++ continue;
++ /* type == 1 is sysret/sysexit, ignore RSP */
++ if (type && expect == BBRG_RSP)
++ continue;
++#ifndef CONFIG_X86_64
++ /* type == 1 is sysret/sysexit, ignore RBP for i386 */
++ if (type && expect == BBRG_RBP)
++ continue;
++#endif /* !CONFIG_X86_64 */
++ /* RSP should contain OSP+0. Except for ptregscall_common and
++ * ia32_ptregs_common, they get a partial pt_regs, fudge the
++ * stack to make it a full pt_regs then reverse the effect on
++ * exit, so the offset is -0x50 on exit.
++ */
++ if (expect == BBRG_RSP &&
++ bb_is_osp_defined(expect) &&
++ (offset == 0 ||
++ (offset == -0x50 &&
++ (strcmp(bb_func_name, "ptregscall_common") == 0 ||
++ strcmp(bb_func_name, "ia32_ptregs_common") == 0))))
++ continue;
++ kdb_printf("%s: Expected %s, got %s",
++ __FUNCTION__,
++ bbrg_name[expect], bbrg_name[actual]);
++ if (offset)
++ KDB_DEBUG_BB_OFFSET_PRINTF(offset, "", "");
++ kdb_printf("\n");
++ error = 1;
++ }
++ BB_CHECK(error, error, );
++}
++
++/* Transfer of control. Follow the arc and save the current state as input to
++ * another basic block.
++ */
++
++static void
++bb_transfer(bfd_vma from, bfd_vma to, unsigned int drop_through)
++{
++ int i, found;
++ size_t size;
++ struct bb* bb = NULL; /*stupid gcc */
++ struct bb_jmp *bb_jmp;
++ struct bb_reg_state *state;
++ bb_reg_state_canonicalize();
++ found = 0;
++ for (i = 0; i < bb_jmp_count; ++i) {
++ bb_jmp = bb_jmp_list + i;
++ if (bb_jmp->from == from &&
++ bb_jmp->to == to &&
++ bb_jmp->drop_through == drop_through) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found) {
++ /* Transfer outside the current function. Check the special
++ * cases (mainly in entry.S) first. If it is not a known
++ * special case then check if the target address is the start
++ * of a function or not. If it is the start of a function then
++ * assume tail recursion and require that the state be the same
++ * as on entry. Otherwise assume out of line code (e.g.
++ * spinlock contention path) and ignore it, the state can be
++ * anything.
++ */
++ kdb_symtab_t symtab;
++ if (bb_special_case(to))
++ return;
++ kdbnearsym(to, &symtab);
++ if (symtab.sym_start != to)
++ return;
++ bb_sanity_check(0);
++ if (bb_giveup)
++ return;
++#ifdef NO_SIBLINGS
++ /* Only print this message when the kernel is compiled with
++ * -fno-optimize-sibling-calls. Otherwise it would print a
++ * message for every tail recursion call. If you see the
++ * message below then you probably have an assembler label that
++ * is not listed in the special cases.
++ */
++ kdb_printf(" not matched: from "
++ kdb_bfd_vma_fmt0
++ " to " kdb_bfd_vma_fmt0
++ " drop_through %d bb_jmp[%d]\n",
++ from, to, drop_through, i);
++#endif /* NO_SIBLINGS */
++ return;
++ }
++ KDB_DEBUG_BB(" matched: from " kdb_bfd_vma_fmt0
++ " to " kdb_bfd_vma_fmt0
++ " drop_through %d bb_jmp[%d]\n",
++ from, to, drop_through, i);
++ found = 0;
++ for (i = 0; i < bb_count; ++i) {
++ bb = bb_list[i];
++ if (bb->start == to) {
++ found = 1;
++ break;
++ }
++ }
++ BB_CHECK(!found, to, );
++ /* If the register state for this arc has already been set (we are
++ * rescanning the block that originates the arc) and the state is the
++ * same as the previous state for this arc then this input to the
++ * target block is the same as last time, so there is no need to rescan
++ * the target block.
++ */
++ state = bb_jmp->state;
++ size = bb_reg_state_size(bb_reg_state);
++ if (state) {
++ bb_reg_state->ref_count = state->ref_count;
++ if (memcmp(state, bb_reg_state, size) == 0) {
++ KDB_DEBUG_BB(" no state change\n");
++ return;
++ }
++ if (--state->ref_count == 0)
++ debug_kfree(state);
++ bb_jmp->state = NULL;
++ }
++ /* New input state is required. To save space, check if any other arcs
++ * have the same state and reuse them where possible. The overall set
++ * of inputs to the target block is now different so the target block
++ * must be rescanned.
++ */
++ bb->changed = 1;
++ for (i = 0; i < bb_jmp_count; ++i) {
++ state = bb_jmp_list[i].state;
++ if (!state)
++ continue;
++ bb_reg_state->ref_count = state->ref_count;
++ if (memcmp(state, bb_reg_state, size) == 0) {
++ KDB_DEBUG_BB(" reuse bb_jmp[%d]\n", i);
++ bb_jmp->state = state;
++ ++state->ref_count;
++ return;
++ }
++ }
++ state = debug_kmalloc(size, GFP_ATOMIC);
++ if (!state) {
++ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
++ bb_giveup = 1;
++ return;
++ }
++ memcpy(state, bb_reg_state, size);
++ state->ref_count = 1;
++ bb_jmp->state = state;
++ KDB_DEBUG_BB(" new state %p\n", state);
++}
++
++/* Isolate the processing for 'mov' so it can be used for 'xadd'/'xchg' as
++ * well.
++ */
++
++static enum bb_operand_usage
++bb_usage_mov(const struct bb_operand *src, const struct bb_operand *dst, int l)
++{
++ int full_register_src, full_register_dst;
++ full_register_src = bb_operand_length(src, bb_decode.opcode[l])
++ == KDB_WORD_SIZE * 8;
++ full_register_dst = bb_operand_length(dst, bb_decode.opcode[l])
++ == KDB_WORD_SIZE * 8;
++ /* If both src and dst are full integer registers then record the
++ * register change.
++ */
++ if (src->reg &&
++ bb_is_int_reg(src->base_rc) &&
++ dst->reg &&
++ bb_is_int_reg(dst->base_rc) &&
++ full_register_src &&
++ full_register_dst) {
++ bb_reg_set_reg(dst->base_rc, src->base_rc);
++ return BBOU_NOP;
++ }
++ /* If the move is from a full integer register to stack then record it.
++ */
++ if (src->reg &&
++ bb_is_simple_memory(dst) &&
++ bb_is_osp_defined(dst->base_rc) &&
++ full_register_src) {
++ /* Ugly special case. Initializing list heads on stack causes
++ * false references to stack variables when the list head is
++ * used. Static code analysis cannot detect that the list head
++ * has been changed by a previous execution loop and that a
++ * basic block is only executed after the list head has been
++ * changed.
++ *
++ * These false references can result in valid stack variables
++ * being incorrectly cleared on some logic paths. Ignore
++ * stores to stack variables which point to themselves or to
++ * the previous word so the list head initialization is not
++ * recorded.
++ */
++ if (bb_is_osp_defined(src->base_rc)) {
++ int stack1 = bb_reg_code_offset(src->base_rc);
++ int stack2 = bb_reg_code_offset(dst->base_rc) +
++ dst->disp;
++ if (stack1 == stack2 ||
++ stack1 == stack2 - KDB_WORD_SIZE)
++ return BBOU_NOP;
++ }
++ bb_memory_set_reg(dst->base_rc, src->base_rc, dst->disp);
++ return BBOU_NOP;
++ }
++ /* If the move is from stack to a full integer register then record it.
++ */
++ if (bb_is_simple_memory(src) &&
++ bb_is_osp_defined(src->base_rc) &&
++ dst->reg &&
++ bb_is_int_reg(dst->base_rc) &&
++ full_register_dst) {
++#ifndef CONFIG_X86_64
++ /* mov from TSS_sysenter_esp0+offset to esp to fix up the
++ * sysenter stack, it leaves esp well defined. mov
++ * TSS_sysenter_esp0+offset(%esp),%esp is followed by up to 5
++ * push instructions to mimic the hardware stack push. If
++ * TSS_sysenter_esp0 is offset then only 3 words will be
++ * pushed.
++ */
++ if (dst->base_rc == BBRG_RSP &&
++ src->disp >= TSS_sysenter_esp0 &&
++ bb_is_osp_defined(BBRG_RSP)) {
++ int pushes;
++ pushes = src->disp == TSS_sysenter_esp0 ? 5 : 3;
++ bb_reg_code_set_offset(BBRG_RSP,
++ bb_reg_code_offset(BBRG_RSP) +
++ pushes * KDB_WORD_SIZE);
++ KDB_DEBUG_BB_OFFSET(
++ bb_reg_code_offset(BBRG_RSP),
++ " sysenter fixup, RSP",
++ "\n");
++ return BBOU_NOP;
++ }
++#endif /* !CONFIG_X86_64 */
++ bb_read_operand(src);
++ bb_reg_set_memory(dst->base_rc, src->base_rc, src->disp);
++ return BBOU_NOP;
++ }
++ /* move %gs:0x<nn>,%rsp is used to unconditionally switch to another
++ * stack. Ignore this special case, it is handled by the stack
++ * unwinding code.
++ */
++ if (src->segment &&
++ strcmp(src->segment, "%gs") == 0 &&
++ dst->reg &&
++ dst->base_rc == BBRG_RSP)
++ return BBOU_NOP;
++ /* move %reg,%reg is a nop */
++ if (src->reg &&
++ dst->reg &&
++ !src->segment &&
++ !dst->segment &&
++ strcmp(src->base, dst->base) == 0)
++ return BBOU_NOP;
++ /* Special case for the code that switches stacks in the scheduler
++ * (switch_to()). That code must modify RSP but it does it in a well
++ * defined manner. Do not invalidate RSP.
++ */
++ if (dst->reg &&
++ dst->base_rc == BBRG_RSP &&
++ full_register_dst &&
++ bb_is_scheduler_address())
++ return BBOU_RS;
++ return BBOU_RSWD;
++}
++
++static enum bb_operand_usage
++bb_usage_xadd(const struct bb_operand *src, const struct bb_operand *dst)
++{
++ /* Simulate xadd as a series of instructions including mov, that way we
++ * get the benefit of all the special cases already handled by
++ * BBOU_MOV.
++ *
++ * tmp = src + dst, src = dst, dst = tmp.
++ *
++ * For tmp, pick a register that is undefined. If all registers are
++ * defined then pick one that is not being used by xadd.
++ */
++ enum bb_reg_code reg = BBRG_UNDEFINED;
++ struct bb_operand tmp;
++ struct bb_reg_contains save_tmp;
++ enum bb_operand_usage usage;
++ int undefined = 0;
++ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
++ if (bb_reg_code_value(reg) == BBRG_UNDEFINED) {
++ undefined = 1;
++ break;
++ }
++ }
++ if (!undefined) {
++ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
++ if (reg != src->base_rc &&
++ reg != src->index_rc &&
++ reg != dst->base_rc &&
++ reg != dst->index_rc &&
++ reg != BBRG_RSP)
++ break;
++ }
++ }
++ KDB_DEBUG_BB(" %s saving tmp %s\n", __FUNCTION__, bbrg_name[reg]);
++ save_tmp = bb_reg_state->contains[reg - BBRG_RAX];
++ bb_reg_set_undef(reg);
++ memset(&tmp, 0, sizeof(tmp));
++ tmp.present = 1;
++ tmp.reg = 1;
++ tmp.base = (char *)bbrg_name[reg];
++ tmp.base_rc = reg;
++ bb_read_operand(src);
++ bb_read_operand(dst);
++ if (bb_usage_mov(src, dst, sizeof("xadd")-1) == BBOU_NOP)
++ usage = BBOU_RSRD;
++ else
++ usage = BBOU_RSRDWS;
++ bb_usage_mov(&tmp, dst, sizeof("xadd")-1);
++ KDB_DEBUG_BB(" %s restoring tmp %s\n", __FUNCTION__, bbrg_name[reg]);
++ bb_reg_state->contains[reg - BBRG_RAX] = save_tmp;
++ return usage;
++}
++
++static enum bb_operand_usage
++bb_usage_xchg(const struct bb_operand *src, const struct bb_operand *dst)
++{
++ /* Simulate xchg as a series of mov instructions, that way we get the
++ * benefit of all the special cases already handled by BBOU_MOV.
++ *
++ * mov dst,tmp; mov src,dst; mov tmp,src;
++ *
++ * For tmp, pick a register that is undefined. If all registers are
++ * defined then pick one that is not being used by xchg.
++ */
++ enum bb_reg_code reg = BBRG_UNDEFINED;
++ int rs = BBOU_RS, rd = BBOU_RD, ws = BBOU_WS, wd = BBOU_WD;
++ struct bb_operand tmp;
++ struct bb_reg_contains save_tmp;
++ int undefined = 0;
++ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
++ if (bb_reg_code_value(reg) == BBRG_UNDEFINED) {
++ undefined = 1;
++ break;
++ }
++ }
++ if (!undefined) {
++ for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
++ if (reg != src->base_rc &&
++ reg != src->index_rc &&
++ reg != dst->base_rc &&
++ reg != dst->index_rc &&
++ reg != BBRG_RSP)
++ break;
++ }
++ }
++ KDB_DEBUG_BB(" %s saving tmp %s\n", __FUNCTION__, bbrg_name[reg]);
++ save_tmp = bb_reg_state->contains[reg - BBRG_RAX];
++ memset(&tmp, 0, sizeof(tmp));
++ tmp.present = 1;
++ tmp.reg = 1;
++ tmp.base = (char *)bbrg_name[reg];
++ tmp.base_rc = reg;
++ if (bb_usage_mov(dst, &tmp, sizeof("xchg")-1) == BBOU_NOP)
++ rd = 0;
++ if (bb_usage_mov(src, dst, sizeof("xchg")-1) == BBOU_NOP) {
++ rs = 0;
++ wd = 0;
++ }
++ if (bb_usage_mov(&tmp, src, sizeof("xchg")-1) == BBOU_NOP)
++ ws = 0;
++ KDB_DEBUG_BB(" %s restoring tmp %s\n", __FUNCTION__, bbrg_name[reg]);
++ bb_reg_state->contains[reg - BBRG_RAX] = save_tmp;
++ return rs | rd | ws | wd;
++}
++
++/* Invalidate all the scratch registers */
++
++static void
++bb_invalidate_scratch_reg(void)
++{
++ int i, j;
++ for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
++ for (j = 0; j < ARRAY_SIZE(bb_preserved_reg); ++j) {
++ if (i == bb_preserved_reg[j])
++ goto preserved;
++ }
++ bb_reg_set_undef(i);
++preserved:
++ continue;
++ }
++}
++
++
++static void
++bb_pass2_computed_jmp(const struct bb_operand *src)
++{
++ unsigned long table = src->disp;
++ kdb_machreg_t addr;
++ while (!bb_giveup) {
++ if (kdb_getword(&addr, table, sizeof(addr)))
++ return;
++ if (addr < bb_func_start || addr >= bb_func_end)
++ return;
++ bb_transfer(bb_curr_addr, addr, 0);
++ table += KDB_WORD_SIZE;
++ }
++}
++
++/* The current instruction has been decoded and all the information is in
++ * bb_decode. Based on the opcode, track any operand usage that we care about.
++ */
++
++static void
++bb_usage(void)
++{
++ enum bb_operand_usage usage = bb_decode.match->usage;
++ struct bb_operand *src = &bb_decode.src;
++ struct bb_operand *dst = &bb_decode.dst;
++ struct bb_operand *dst2 = &bb_decode.dst2;
++ int opcode_suffix, operand_length;
++
++ /* First handle all the special usage cases, and map them to a generic
++ * case after catering for the side effects.
++ */
++
++ if (usage == BBOU_IMUL &&
++ src->present && !dst->present && !dst2->present) {
++ /* single operand imul, same effects as mul */
++ usage = BBOU_MUL;
++ }
++
++ /* AT&T syntax uses movs<l1><l2> for move with sign extension, instead
++ * of the Intel movsx. The AT&T syntax causes problems for the opcode
++ * mapping; movs with sign extension needs to be treated as a generic
++ * read src, write dst, but instead it falls under the movs I/O
++ * instruction. Fix it.
++ */
++ if (usage == BBOU_MOVS && strlen(bb_decode.opcode) > 5)
++ usage = BBOU_RSWD;
++
++ /* This switch statement deliberately does not use 'default' at the top
++ * level. That way the compiler will complain if a new BBOU_ enum is
++ * added above and not explicitly handled here.
++ */
++ switch (usage) {
++ case BBOU_UNKNOWN: /* drop through */
++ case BBOU_RS: /* drop through */
++ case BBOU_RD: /* drop through */
++ case BBOU_RSRD: /* drop through */
++ case BBOU_WS: /* drop through */
++ case BBOU_RSWS: /* drop through */
++ case BBOU_RDWS: /* drop through */
++ case BBOU_RSRDWS: /* drop through */
++ case BBOU_WD: /* drop through */
++ case BBOU_RSWD: /* drop through */
++ case BBOU_RDWD: /* drop through */
++ case BBOU_RSRDWD: /* drop through */
++ case BBOU_WSWD: /* drop through */
++ case BBOU_RSWSWD: /* drop through */
++ case BBOU_RDWSWD: /* drop through */
++ case BBOU_RSRDWSWD:
++ break; /* ignore generic usage for now */
++ case BBOU_ADD:
++ /* Special case for add instructions that adjust registers
++ * which are mapping the stack.
++ */
++ if (dst->reg && bb_is_osp_defined(dst->base_rc)) {
++ bb_adjust_osp_instruction(1);
++ usage = BBOU_RS;
++ } else {
++ usage = BBOU_RSRDWD;
++ }
++ break;
++ case BBOU_CALL:
++ /* Invalidate the scratch registers. Functions sync_regs and
++ * save_v86_state are special, their return value is the new
++ * stack pointer.
++ */
++ bb_reg_state_print(bb_reg_state);
++ bb_invalidate_scratch_reg();
++ if (bb_is_static_disp(src)) {
++ if (src->disp == bb_sync_regs) {
++ bb_reg_set_reg(BBRG_RAX, BBRG_RSP);
++ } else if (src->disp == bb_save_v86_state) {
++ bb_reg_set_reg(BBRG_RAX, BBRG_RSP);
++ bb_adjust_osp(BBRG_RAX, +KDB_WORD_SIZE);
++ }
++ }
++ usage = BBOU_NOP;
++ break;
++ case BBOU_CBW:
++ /* Convert word in RAX. Read RAX, write RAX */
++ bb_reg_read(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RAX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_CMOV:
++ /* cmove %gs:0x<nn>,%rsp is used to conditionally switch to
++ * another stack. Ignore this special case, it is handled by
++ * the stack unwinding code.
++ */
++ if (src->segment &&
++ strcmp(src->segment, "%gs") == 0 &&
++ dst->reg &&
++ dst->base_rc == BBRG_RSP)
++ usage = BBOU_NOP;
++ else
++ usage = BBOU_RSWD;
++ break;
++ case BBOU_CMPXCHG:
++ /* Read RAX, write RAX plus src read, dst write */
++ bb_reg_read(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RAX);
++ usage = BBOU_RSWD;
++ break;
++ case BBOU_CMPXCHGD:
++ /* Read RAX, RBX, RCX, RDX, write RAX, RDX plus src read/write */
++ bb_reg_read(BBRG_RAX);
++ bb_reg_read(BBRG_RBX);
++ bb_reg_read(BBRG_RCX);
++ bb_reg_read(BBRG_RDX);
++ bb_reg_set_undef(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RDX);
++ usage = BBOU_RSWS;
++ break;
++ case BBOU_CPUID:
++ /* Read RAX, write RAX, RBX, RCX, RDX */
++ bb_reg_read(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RBX);
++ bb_reg_set_undef(BBRG_RCX);
++ bb_reg_set_undef(BBRG_RDX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_CWD:
++ /* Convert word in RAX, RDX. Read RAX, write RDX */
++ bb_reg_read(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RDX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_DIV: /* drop through */
++ case BBOU_IDIV:
++ /* The 8 bit variants only affect RAX, the 16, 32 and 64 bit
++ * variants affect RDX as well.
++ */
++ switch (usage) {
++ case BBOU_DIV:
++ opcode_suffix = bb_decode.opcode[3];
++ break;
++ case BBOU_IDIV:
++ opcode_suffix = bb_decode.opcode[4];
++ break;
++ default:
++ opcode_suffix = 'q';
++ break;
++ }
++ operand_length = bb_operand_length(src, opcode_suffix);
++ bb_reg_read(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RAX);
++ if (operand_length != 8) {
++ bb_reg_read(BBRG_RDX);
++ bb_reg_set_undef(BBRG_RDX);
++ }
++ usage = BBOU_RS;
++ break;
++ case BBOU_IMUL:
++ /* Only the two and three operand forms get here. The one
++ * operand form is treated as mul.
++ */
++ if (dst2->present) {
++ /* The three operand form is a special case, read the first two
++ * operands, write the third.
++ */
++ bb_read_operand(src);
++ bb_read_operand(dst);
++ bb_write_operand(dst2);
++ usage = BBOU_NOP;
++ } else {
++ usage = BBOU_RSRDWD;
++ }
++ break;
++ case BBOU_IRET:
++ bb_sanity_check(0);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_JMP:
++ if (bb_is_static_disp(src))
++ bb_transfer(bb_curr_addr, src->disp, 0);
++ else if (src->indirect &&
++ src->disp &&
++ src->base == NULL &&
++ src->index &&
++ src->scale == KDB_WORD_SIZE)
++ bb_pass2_computed_jmp(src);
++ usage = BBOU_RS;
++ break;
++ case BBOU_LAHF:
++ /* Write RAX */
++ bb_reg_set_undef(BBRG_RAX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_LEA:
++ /* dst = src + disp. Often used to calculate offsets into the
++ * stack, so check if it uses a stack pointer.
++ */
++ usage = BBOU_RSWD;
++ if (bb_is_simple_memory(src)) {
++ if (bb_is_osp_defined(src->base_rc)) {
++ bb_reg_set_reg(dst->base_rc, src->base_rc);
++ bb_adjust_osp_instruction(1);
++ usage = BBOU_RS;
++ } else if (src->disp == 0 &&
++ src->base_rc == dst->base_rc) {
++ /* lea 0(%reg),%reg is generated by i386
++ * GENERIC_NOP7.
++ */
++ usage = BBOU_NOP;
++ }
++ }
++ break;
++ case BBOU_LEAVE:
++ /* RSP = RBP; RBP = *(RSP); RSP += KDB_WORD_SIZE; */
++ bb_reg_set_reg(BBRG_RSP, BBRG_RBP);
++ if (bb_is_osp_defined(BBRG_RSP))
++ bb_reg_set_memory(BBRG_RBP, BBRG_RSP, 0);
++ else
++ bb_reg_set_undef(BBRG_RBP);
++ if (bb_is_osp_defined(BBRG_RSP))
++ bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
++ /* common_interrupt uses leave in a non-standard manner */
++ if (strcmp(bb_func_name, "common_interrupt") != 0)
++ bb_sanity_check(0);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_LODS:
++ /* Read RSI, write RAX, RSI */
++ bb_reg_read(BBRG_RSI);
++ bb_reg_set_undef(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RSI);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_LOOP:
++ /* Read and write RCX */
++ bb_reg_read(BBRG_RCX);
++ bb_reg_set_undef(BBRG_RCX);
++ if (bb_is_static_disp(src))
++ bb_transfer(bb_curr_addr, src->disp, 0);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_LSS:
++ /* lss offset(%esp),%esp leaves esp well defined */
++ if (dst->reg &&
++ dst->base_rc == BBRG_RSP &&
++ bb_is_simple_memory(src) &&
++ src->base_rc == BBRG_RSP) {
++ bb_adjust_osp(BBRG_RSP, 2*KDB_WORD_SIZE + src->disp);
++ usage = BBOU_NOP;
++ } else {
++ usage = BBOU_RSWD;
++ }
++ break;
++ case BBOU_MONITOR:
++ /* Read RAX, RCX, RDX */
++ bb_reg_set_undef(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RCX);
++ bb_reg_set_undef(BBRG_RDX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_MOV:
++ usage = bb_usage_mov(src, dst, sizeof("mov")-1);
++ break;
++ case BBOU_MOVS:
++ /* Read RSI, RDI, write RSI, RDI */
++ bb_reg_read(BBRG_RSI);
++ bb_reg_read(BBRG_RDI);
++ bb_reg_set_undef(BBRG_RSI);
++ bb_reg_set_undef(BBRG_RDI);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_MUL:
++ /* imul (one operand form only) or mul. Read RAX. If the
++ * operand length is not 8 then write RDX.
++ */
++ if (bb_decode.opcode[0] == 'i')
++ opcode_suffix = bb_decode.opcode[4];
++ else
++ opcode_suffix = bb_decode.opcode[3];
++ operand_length = bb_operand_length(src, opcode_suffix);
++ bb_reg_read(BBRG_RAX);
++ if (operand_length != 8)
++ bb_reg_set_undef(BBRG_RDX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_MWAIT:
++ /* Read RAX, RCX */
++ bb_reg_read(BBRG_RAX);
++ bb_reg_read(BBRG_RCX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_NOP:
++ break;
++ case BBOU_OUTS:
++ /* Read RSI, RDX, write RSI */
++ bb_reg_read(BBRG_RSI);
++ bb_reg_read(BBRG_RDX);
++ bb_reg_set_undef(BBRG_RSI);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_POP:
++ /* Complicated by the fact that you can pop from top of stack
++ * to a stack location, for this case the destination location
++ * is calculated after adjusting RSP. Analysis of the kernel
++ * code shows that gcc only uses this strange format to get the
++ * flags into a local variable, e.g. pushf; popl 0x10(%esp); so
++ * I am going to ignore this special case.
++ */
++ usage = BBOU_WS;
++ if (!bb_is_osp_defined(BBRG_RSP)) {
++ if (!bb_is_scheduler_address()) {
++ kdb_printf("pop when BBRG_RSP is undefined?\n");
++ bb_giveup = 1;
++ }
++ } else {
++ if (src->reg) {
++ bb_reg_set_memory(src->base_rc, BBRG_RSP, 0);
++ usage = BBOU_NOP;
++ }
++ bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
++ }
++ break;
++ case BBOU_POPF:
++ /* Do not care about flags, just adjust RSP */
++ if (!bb_is_osp_defined(BBRG_RSP)) {
++ if (!bb_is_scheduler_address()) {
++ kdb_printf("popf when BBRG_RSP is undefined?\n");
++ bb_giveup = 1;
++ }
++ } else {
++ bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
++ }
++ usage = BBOU_WS;
++ break;
++ case BBOU_PUSH:
++ /* Complicated by the fact that you can push from a stack
++ * location to top of stack, the source location is calculated
++ * before adjusting RSP. Analysis of the kernel code shows
++ * that gcc only uses this strange format to restore the flags
++ * from a local variable, e.g. pushl 0x10(%esp); popf; so I am
++ * going to ignore this special case.
++ */
++ usage = BBOU_RS;
++ if (!bb_is_osp_defined(BBRG_RSP)) {
++ if (!bb_is_scheduler_address()) {
++ kdb_printf("push when BBRG_RSP is undefined?\n");
++ bb_giveup = 1;
++ }
++ } else {
++ bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
++ if (src->reg &&
++ bb_reg_code_offset(BBRG_RSP) <= 0)
++ bb_memory_set_reg(BBRG_RSP, src->base_rc, 0);
++ }
++ break;
++ case BBOU_PUSHF:
++ /* Do not care about flags, just adjust RSP */
++ if (!bb_is_osp_defined(BBRG_RSP)) {
++ if (!bb_is_scheduler_address()) {
++ kdb_printf("pushf when BBRG_RSP is undefined?\n");
++ bb_giveup = 1;
++ }
++ } else {
++ bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
++ }
++ usage = BBOU_WS;
++ break;
++ case BBOU_RDMSR:
++ /* Read RCX, write RAX, RDX */
++ bb_reg_read(BBRG_RCX);
++ bb_reg_set_undef(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RDX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_RDTSC:
++ /* Write RAX, RDX */
++ bb_reg_set_undef(BBRG_RAX);
++ bb_reg_set_undef(BBRG_RDX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_RET:
++ bb_sanity_check(0);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_SAHF:
++ /* Read RAX */
++ bb_reg_read(BBRG_RAX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_SCAS:
++ /* Read RAX, RDI, write RDI */
++ bb_reg_read(BBRG_RAX);
++ bb_reg_read(BBRG_RDI);
++ bb_reg_set_undef(BBRG_RDI);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_SUB:
++ /* Special case for sub instructions that adjust registers
++ * which are mapping the stack.
++ */
++ if (dst->reg && bb_is_osp_defined(dst->base_rc)) {
++ bb_adjust_osp_instruction(-1);
++ usage = BBOU_RS;
++ } else {
++ usage = BBOU_RSRDWD;
++ }
++ break;
++ case BBOU_SYSEXIT:
++ bb_sanity_check(1);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_SYSRET:
++ bb_sanity_check(1);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_WRMSR:
++ /* Read RCX, RAX, RDX */
++ bb_reg_read(BBRG_RCX);
++ bb_reg_read(BBRG_RAX);
++ bb_reg_read(BBRG_RDX);
++ usage = BBOU_NOP;
++ break;
++ case BBOU_XADD:
++ usage = bb_usage_xadd(src, dst);
++ break;
++ case BBOU_XCHG:
++ /* i386 do_IRQ with 4K stacks does xchg %ebx,%esp; call
++ * irq_handler; mov %ebx,%esp; to switch stacks. Ignore this
++ * stack switch when tracking registers, it is handled by
++ * higher level backtrace code. Convert xchg %ebx,%esp to mov
++ * %esp,%ebx so the later mov %ebx,%esp becomes a NOP and the
++ * stack remains defined so we can backtrace through do_IRQ's
++ * stack switch.
++ */
++ if (src->reg &&
++ dst->reg &&
++ src->base_rc == BBRG_RBX &&
++ dst->base_rc == BBRG_RSP &&
++ strcmp(bb_func_name, "do_IRQ") == 0) {
++ strcpy(bb_decode.opcode, "mov");
++ usage = bb_usage_mov(dst, src, sizeof("mov")-1);
++ } else {
++ usage = bb_usage_xchg(src, dst);
++ }
++ break;
++ case BBOU_XOR:
++ /* xor %reg,%reg only counts as a register write, the original
++ * contents of reg are irrelevant.
++ */
++ if (src->reg && dst->reg && src->base_rc == dst->base_rc)
++ usage = BBOU_WS;
++ else
++ usage = BBOU_RSRDWD;
++ break;
++ }
++
++ /* The switch statement above handled all the special cases. Every
++ * opcode should now have a usage of NOP or one of the generic cases.
++ */
++ if (usage == BBOU_UNKNOWN || usage == BBOU_NOP) {
++ /* nothing to do */
++ } else if (usage >= BBOU_RS && usage <= BBOU_RSRDWSWD) {
++ if (usage & BBOU_RS)
++ bb_read_operand(src);
++ if (usage & BBOU_RD)
++ bb_read_operand(dst);
++ if (usage & BBOU_WS)
++ bb_write_operand(src);
++ if (usage & BBOU_WD)
++ bb_write_operand(dst);
++ } else {
++ kdb_printf("%s: opcode not fully handled\n", __FUNCTION__);
++ if (!KDB_DEBUG(BB)) {
++ bb_print_opcode();
++ if (bb_decode.src.present)
++ bb_print_operand("src", &bb_decode.src);
++ if (bb_decode.dst.present)
++ bb_print_operand("dst", &bb_decode.dst);
++ if (bb_decode.dst2.present)
++ bb_print_operand("dst2", &bb_decode.dst2);
++ }
++ bb_giveup = 1;
++ }
++}
++
++static void
++bb_parse_buffer(void)
++{
++ char *p, *src, *dst = NULL, *dst2 = NULL;
++ int paren = 0;
++ p = bb_buffer;
++ memset(&bb_decode, 0, sizeof(bb_decode));
++ KDB_DEBUG_BB(" '%s'\n", p);
++ p += strcspn(p, ":"); /* skip address and function name+offset: */
++ if (*p++ != ':') {
++ kdb_printf("%s: cannot find ':' in buffer '%s'\n",
++ __FUNCTION__, bb_buffer);
++ bb_giveup = 1;
++ return;
++ }
++ p += strspn(p, " \t"); /* step to opcode */
++ if (strncmp(p, "(bad)", 5) == 0)
++ strcpy(p, "nop");
++ /* separate any opcode prefix */
++ if (strncmp(p, "lock", 4) == 0 ||
++ strncmp(p, "rep", 3) == 0 ||
++ strncmp(p, "rex", 3) == 0 ||
++ strncmp(p, "addr", 4) == 0) {
++ bb_decode.prefix = p;
++ p += strcspn(p, " \t");
++ *p++ = '\0';
++ p += strspn(p, " \t");
++ }
++ bb_decode.opcode = p;
++ strsep(&p, " \t"); /* step to end of opcode */
++ if (bb_parse_opcode())
++ return;
++ if (!p)
++ goto no_operands;
++ p += strspn(p, " \t"); /* step to operand(s) */
++ if (!*p)
++ goto no_operands;
++ src = p;
++ p = strsep(&p, " \t"); /* strip comments after operands */
++ /* split 'src','dst' but ignore ',' inside '(' ')' */
++ while (*p) {
++ if (*p == '(') {
++ ++paren;
++ } else if (*p == ')') {
++ --paren;
++ } else if (*p == ',' && paren == 0) {
++ *p = '\0';
++ if (dst)
++ dst2 = p+1;
++ else
++ dst = p+1;
++ }
++ ++p;
++ }
++ bb_parse_operand(src, &bb_decode.src);
++ if (KDB_DEBUG(BB))
++ bb_print_operand("src", &bb_decode.src);
++ if (dst && !bb_giveup) {
++ bb_parse_operand(dst, &bb_decode.dst);
++ if (KDB_DEBUG(BB))
++ bb_print_operand("dst", &bb_decode.dst);
++ }
++ if (dst2 && !bb_giveup) {
++ bb_parse_operand(dst2, &bb_decode.dst2);
++ if (KDB_DEBUG(BB))
++ bb_print_operand("dst2", &bb_decode.dst2);
++ }
++no_operands:
++ if (!bb_giveup)
++ bb_usage();
++}
++
++static int
++bb_dis_pass2(PTR file, const char *fmt, ...)
++{
++ char *p;
++ int l = strlen(bb_buffer);
++ va_list ap;
++ va_start(ap, fmt);
++ vsnprintf(bb_buffer + l, sizeof(bb_buffer) - l, fmt, ap);
++ va_end(ap);
++ if ((p = strchr(bb_buffer, '\n'))) {
++ *p = '\0';
++ p = bb_buffer;
++ p += strcspn(p, ":");
++ if (*p++ == ':')
++ bb_fixup_switch_to(p);
++ bb_parse_buffer();
++ bb_buffer[0] = '\0';
++ }
++ return 0;
++}
++
++static void
++bb_printaddr_pass2(bfd_vma addr, disassemble_info *dip)
++{
++ kdb_symtab_t symtab;
++ unsigned int offset;
++ dip->fprintf_func(dip->stream, "0x%lx", addr);
++ kdbnearsym(addr, &symtab);
++ if (symtab.sym_name) {
++ dip->fprintf_func(dip->stream, " <%s", symtab.sym_name);
++ if ((offset = addr - symtab.sym_start))
++ dip->fprintf_func(dip->stream, "+0x%x", offset);
++ dip->fprintf_func(dip->stream, ">");
++ }
++}
++
++/* Set the starting register and memory state for the current bb */
++
++static void
++bb_start_block0_special(void)
++{
++ int i;
++ short offset_address;
++ enum bb_reg_code reg, value;
++ struct bb_name_state *r;
++ for (i = 0, r = bb_special_cases;
++ i < ARRAY_SIZE(bb_special_cases);
++ ++i, ++r) {
++ if (bb_func_start == r->address && r->fname == NULL)
++ goto match;
++ }
++ return;
++match:
++ /* Set the running registers */
++ for (reg = BBRG_RAX; reg < r->regs_size; ++reg) {
++ value = r->regs[reg].value;
++ if (test_bit(value, r->skip_regs.bits)) {
++ /* this regs entry is not defined for this label */
++ continue;
++ }
++ bb_reg_code_set_value(reg, value);
++ bb_reg_code_set_offset(reg, r->regs[reg].offset);
++ }
++ /* Set any memory contents, e.g. pt_regs. Adjust RSP as required. */
++ offset_address = 0;
++ for (i = 0; i < r->mem_size; ++i) {
++ offset_address = max_t(int,
++ r->mem[i].offset_address + KDB_WORD_SIZE,
++ offset_address);
++ }
++ if (bb_reg_code_offset(BBRG_RSP) > -offset_address)
++ bb_adjust_osp(BBRG_RSP, -offset_address - bb_reg_code_offset(BBRG_RSP));
++ for (i = 0; i < r->mem_size; ++i) {
++ value = r->mem[i].value;
++ if (test_bit(value, r->skip_mem.bits)) {
++ /* this memory entry is not defined for this label */
++ continue;
++ }
++ bb_memory_set_reg_value(BBRG_RSP, r->mem[i].offset_address,
++ value, 0);
++ bb_reg_set_undef(value);
++ }
++ return;
++}
++
++static void
++bb_pass2_start_block(int number)
++{
++ int i, j, k, first, changed;
++ size_t size;
++ struct bb_jmp *bb_jmp;
++ struct bb_reg_state *state;
++ struct bb_memory_contains *c1, *c2;
++ bb_reg_state->mem_count = bb_reg_state_max;
++ size = bb_reg_state_size(bb_reg_state);
++ memset(bb_reg_state, 0, size);
++
++ if (number == 0) {
++ /* The first block is assumed to have well defined inputs */
++ bb_start_block0();
++ /* Some assembler labels have non-standard entry
++ * states.
++ */
++ bb_start_block0_special();
++ bb_reg_state_print(bb_reg_state);
++ return;
++ }
++
++ /* Merge all the input states for the current bb together */
++ first = 1;
++ changed = 0;
++ for (i = 0; i < bb_jmp_count; ++i) {
++ bb_jmp = bb_jmp_list + i;
++ if (bb_jmp->to != bb_curr->start)
++ continue;
++ state = bb_jmp->state;
++ if (!state)
++ continue;
++ if (first) {
++ size = bb_reg_state_size(state);
++ memcpy(bb_reg_state, state, size);
++ KDB_DEBUG_BB(" first state %p\n", state);
++ bb_reg_state_print(bb_reg_state);
++ first = 0;
++ continue;
++ }
++
++ KDB_DEBUG_BB(" merging state %p\n", state);
++ /* Merge the register states */
++ for (j = 0; j < ARRAY_SIZE(state->contains); ++j) {
++ if (memcmp(bb_reg_state->contains + j,
++ state->contains + j,
++ sizeof(bb_reg_state->contains[0]))) {
++ /* Different states for this register from two
++ * or more inputs, make it undefined.
++ */
++ if (bb_reg_state->contains[j].value !=
++ BBRG_UNDEFINED) {
++ bb_reg_set_undef(BBRG_RAX + j);
++ changed = 1;
++ }
++ }
++ }
++
++ /* Merge the memory states. This relies on both
++ * bb_reg_state->memory and state->memory being sorted in
++ * descending order, with undefined entries at the end.
++ */
++ c1 = bb_reg_state->memory;
++ c2 = state->memory;
++ j = k = 0;
++ while (j < bb_reg_state->mem_count &&
++ k < state->mem_count) {
++ if (c1->offset_address < c2->offset_address) {
++ KDB_DEBUG_BB_OFFSET(c2->offset_address,
++ " ignoring c2->offset_address ",
++ "\n");
++ ++c2;
++ ++k;
++ continue;
++ }
++ if (c1->offset_address > c2->offset_address) {
++ /* Memory location is not in all input states,
++ * delete the memory location.
++ */
++ bb_delete_memory(c1->offset_address);
++ changed = 1;
++ ++c1;
++ ++j;
++ continue;
++ }
++ if (memcmp(c1, c2, sizeof(*c1))) {
++ /* Same location, different contents, delete
++ * the memory location.
++ */
++ bb_delete_memory(c1->offset_address);
++ KDB_DEBUG_BB_OFFSET(c2->offset_address,
++ " ignoring c2->offset_address ",
++ "\n");
++ changed = 1;
++ }
++ ++c1;
++ ++c2;
++ ++j;
++ ++k;
++ }
++ while (j < bb_reg_state->mem_count) {
++ bb_delete_memory(c1->offset_address);
++ changed = 1;
++ ++c1;
++ ++j;
++ }
++ }
++ if (changed) {
++ KDB_DEBUG_BB(" final state\n");
++ bb_reg_state_print(bb_reg_state);
++ }
++}
++
++/* We have reached the exit point from the current function, either a call to
++ * the next function or the instruction that was about to executed when an
++ * interrupt occurred. Save the current register state in bb_exit_state.
++ */
++
++static void
++bb_save_exit_state(void)
++{
++ size_t size;
++ debug_kfree(bb_exit_state);
++ bb_exit_state = NULL;
++ bb_reg_state_canonicalize();
++ size = bb_reg_state_size(bb_reg_state);
++ bb_exit_state = debug_kmalloc(size, GFP_ATOMIC);
++ if (!bb_exit_state) {
++ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
++ bb_giveup = 1;
++ return;
++ }
++ memcpy(bb_exit_state, bb_reg_state, size);
++}
++
++static int
++bb_pass2_do_changed_blocks(int allow_missing)
++{
++ int i, j, missing, changed, maxloops;
++ unsigned long addr;
++ struct bb_jmp *bb_jmp;
++ KDB_DEBUG_BB("\n %s: allow_missing %d\n", __FUNCTION__, allow_missing);
++ /* Absolute worst case is we have to iterate over all the basic blocks,
++ * each iteration losing one register or memory state. Any more loops
++ * than that is a bug.
++ */
++ maxloops = KDB_INT_REGISTERS + bb_reg_state_max;
++ changed = 1;
++ do {
++ changed = 0;
++ for (i = 0; i < bb_count; ++i) {
++ bb_curr = bb_list[i];
++ if (!bb_curr->changed)
++ continue;
++ missing = 0;
++ for (j = 0, bb_jmp = bb_jmp_list;
++ j < bb_jmp_count;
++ ++j, ++bb_jmp) {
++ if (bb_jmp->to == bb_curr->start &&
++ !bb_jmp->state)
++ ++missing;
++ }
++ if (missing > allow_missing)
++ continue;
++ bb_curr->changed = 0;
++ changed = 1;
++ KDB_DEBUG_BB("\n bb[%d]\n", i);
++ bb_pass2_start_block(i);
++ for (addr = bb_curr->start;
++ addr <= bb_curr->end; ) {
++ bb_curr_addr = addr;
++ if (addr == bb_exit_addr)
++ bb_save_exit_state();
++ addr += kdba_id_printinsn(addr, &kdb_di);
++ kdb_di.fprintf_func(NULL, "\n");
++ if (bb_giveup)
++ goto done;
++ }
++ if (addr == bb_exit_addr)
++ bb_save_exit_state();
++ if (bb_curr->drop_through)
++ bb_transfer(bb_curr->end,
++ bb_list[i+1]->start, 1);
++ }
++ if (maxloops-- == 0) {
++ kdb_printf("\n\n%s maxloops reached\n",
++ __FUNCTION__);
++ bb_giveup = 1;
++ goto done;
++ }
++ } while(changed);
++done:
++ for (i = 0; i < bb_count; ++i) {
++ bb_curr = bb_list[i];
++ if (bb_curr->changed)
++ return 1; /* more to do, increase allow_missing */
++ }
++ return 0; /* all blocks done */
++}
++
++/* Assume that the current function is a pass through function that does not
++ * refer to its register parameters. Exclude known asmlinkage functions and
++ * assume the other functions actually use their registers.
++ */
++
++static void
++bb_assume_pass_through(void)
++{
++ static int first_time = 1;
++ if (strncmp(bb_func_name, "sys_", 4) == 0 ||
++ strncmp(bb_func_name, "compat_sys_", 11) == 0 ||
++ strcmp(bb_func_name, "schedule") == 0 ||
++ strcmp(bb_func_name, "do_softirq") == 0 ||
++ strcmp(bb_func_name, "printk") == 0 ||
++ strcmp(bb_func_name, "vprintk") == 0 ||
++ strcmp(bb_func_name, "preempt_schedule") == 0 ||
++ strcmp(bb_func_name, "start_kernel") == 0 ||
++ strcmp(bb_func_name, "csum_partial") == 0 ||
++ strcmp(bb_func_name, "csum_partial_copy_generic") == 0 ||
++ strcmp(bb_func_name, "math_state_restore") == 0 ||
++ strcmp(bb_func_name, "panic") == 0 ||
++ strcmp(bb_func_name, "kdb_printf") == 0 ||
++ strcmp(bb_func_name, "kdb_interrupt") == 0)
++ return;
++ if (bb_asmlinkage_arch())
++ return;
++ bb_reg_params = REGPARM;
++ if (first_time) {
++ kdb_printf(" %s has memory parameters but no register "
++ "parameters.\n Assuming it is a 'pass "
++ "through' function that does not refer to "
++ "its register\n parameters and setting %d "
++ "register parameters\n",
++ bb_func_name, REGPARM);
++ first_time = 0;
++ return;
++ }
++ kdb_printf(" Assuming %s is 'pass through' with %d register "
++ "parameters\n",
++ bb_func_name, REGPARM);
++}
++
++static void
++bb_pass2(void)
++{
++ int allow_missing;
++ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
++ kdb_printf("%s: start\n", __FUNCTION__);
++
++ kdb_di.fprintf_func = bb_dis_pass2;
++ kdb_di.print_address_func = bb_printaddr_pass2;
++
++ bb_reg_state = debug_kmalloc(sizeof(*bb_reg_state), GFP_ATOMIC);
++ if (!bb_reg_state) {
++ kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
++ bb_giveup = 1;
++ return;
++ }
++ bb_list[0]->changed = 1;
++
++ /* If a block does not have all its input states available then it is
++ * possible for a register to initially appear to hold a known value,
++ * but when other inputs are available then it becomes a variable
++ * value. The initial false state of "known" can generate false values
++ * for other registers and can even make it look like stack locations
++ * are being changed.
++ *
++ * To avoid these false positives, only process blocks which have all
++ * their inputs defined. That gives a clean depth first traversal of
++ * the tree, except for loops. If there are any loops, then start
++ * processing blocks with one missing input, then two missing inputs
++ * etc.
++ *
++ * Absolute worst case is we have to iterate over all the jmp entries,
++ * each iteration allowing one more missing input. Any more loops than
++ * that is a bug. Watch out for the corner case of 0 jmp entries.
++ */
++ for (allow_missing = 0; allow_missing <= bb_jmp_count; ++allow_missing) {
++ if (!bb_pass2_do_changed_blocks(allow_missing))
++ break;
++ if (bb_giveup)
++ break;
++ }
++ if (allow_missing > bb_jmp_count) {
++ kdb_printf("\n\n%s maxloops reached\n",
++ __FUNCTION__);
++ bb_giveup = 1;
++ return;
++ }
++
++ if (bb_memory_params && bb_reg_params)
++ bb_reg_params = REGPARM;
++ if (REGPARM &&
++ bb_memory_params &&
++ !bb_reg_params)
++ bb_assume_pass_through();
++ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) {
++ kdb_printf("%s: end bb_reg_params %d bb_memory_params %d\n",
++ __FUNCTION__, bb_reg_params, bb_memory_params);
++ if (bb_exit_state) {
++ kdb_printf("%s: bb_exit_state at " kdb_bfd_vma_fmt0 "\n",
++ __FUNCTION__, bb_exit_addr);
++ bb_do_reg_state_print(bb_exit_state);
++ }
++ }
++}
++
++static void
++bb_cleanup(void)
++{
++ int i;
++ struct bb* bb;
++ struct bb_reg_state *state;
++ while (bb_count) {
++ bb = bb_list[0];
++ bb_delete(0);
++ }
++ debug_kfree(bb_list);
++ bb_list = NULL;
++ bb_count = bb_max = 0;
++ for (i = 0; i < bb_jmp_count; ++i) {
++ state = bb_jmp_list[i].state;
++ if (state && --state->ref_count == 0)
++ debug_kfree(state);
++ }
++ debug_kfree(bb_jmp_list);
++ bb_jmp_list = NULL;
++ bb_jmp_count = bb_jmp_max = 0;
++ debug_kfree(bb_reg_state);
++ bb_reg_state = NULL;
++ bb_reg_state_max = 0;
++ debug_kfree(bb_exit_state);
++ bb_exit_state = NULL;
++ bb_reg_params = bb_memory_params = 0;
++ bb_giveup = 0;
++}
++
++static int
++bb_spurious_global_label(const char *func_name)
++{
++ int i;
++ for (i = 0; i < ARRAY_SIZE(bb_spurious); ++i) {
++ if (strcmp(bb_spurious[i], func_name) == 0)
++ return 1;
++ }
++ return 0;
++}
++
++/* Given the current actual register contents plus the exit state deduced from
++ * a basic block analysis of the current function, rollback the actual register
++ * contents to the values they had on entry to this function.
++ */
++
++static void
++bb_actual_rollback(const struct kdb_activation_record *ar)
++{
++ int i, offset_address;
++ struct bb_memory_contains *c;
++ enum bb_reg_code reg;
++ unsigned long address, new_rsp = 0;
++ struct bb_actual new[ARRAY_SIZE(bb_actual)];
++
++
++ if (!bb_exit_state) {
++ kdb_printf("%s: no bb_exit_state, cannot rollback\n",
++ __FUNCTION__);
++ bb_giveup = 1;
++ return;
++ }
++ memcpy(bb_reg_state, bb_exit_state, bb_reg_state_size(bb_exit_state));
++ memset(new, 0, sizeof(new));
++
++ /* The most important register for obtaining saved state is rsp so get
++ * its new value first. Prefer rsp if it is valid, then other
++ * registers. Saved values of rsp in memory are unusable without a
++ * register that points to memory.
++ */
++ if (!bb_actual_valid(BBRG_RSP)) {
++ kdb_printf("%s: no starting value for RSP, cannot rollback\n",
++ __FUNCTION__);
++ bb_giveup = 1;
++ return;
++ }
++ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
++ kdb_printf("%s: rsp " kdb_bfd_vma_fmt0,
++ __FUNCTION__, bb_actual_value(BBRG_RSP));
++ i = BBRG_RSP;
++ if (!bb_is_osp_defined(i)) {
++ for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
++ if (bb_is_osp_defined(i) && bb_actual_valid(i))
++ break;
++ }
++ }
++ if (bb_is_osp_defined(i) && bb_actual_valid(i)) {
++ new_rsp = new[BBRG_RSP - BBRG_RAX].value =
++ bb_actual_value(i) - bb_reg_code_offset(i);
++ new[BBRG_RSP - BBRG_RAX].valid = 1;
++ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
++ kdb_printf(" -> " kdb_bfd_vma_fmt0 "\n", new_rsp);
++ } else {
++ bb_actual_set_valid(BBRG_RSP, 0);
++ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
++ kdb_printf(" -> undefined\n");
++ kdb_printf("%s: no ending value for RSP, cannot rollback\n",
++ __FUNCTION__);
++ bb_giveup = 1;
++ return;
++ }
++
++ /* Now the other registers. First look at register values that have
++ * been copied to other registers.
++ */
++ for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
++ reg = bb_reg_code_value(i);
++ if (bb_is_int_reg(reg)) {
++ new[reg - BBRG_RAX] = bb_actual[i - BBRG_RAX];
++ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
++ kdb_printf("%s: %s is in %s, "
++ kdb_bfd_vma_fmt0 "\n",
++ __FUNCTION__,
++ bbrg_name[reg],
++ bbrg_name[i],
++ bb_actual_value(reg));
++ }
++ }
++
++ /* Finally register values that have been saved on stack */
++ for (i = 0, c = bb_reg_state->memory;
++ i < bb_reg_state->mem_count;
++ ++i, ++c) {
++ offset_address = c->offset_address;
++ reg = c->value;
++ if (!bb_is_int_reg(reg))
++ continue;
++ address = new_rsp + offset_address;
++ if (address < ar->stack.logical_start ||
++ address >= ar->stack.logical_end) {
++ new[reg - BBRG_RAX].value = 0;
++ new[reg - BBRG_RAX].valid = 0;
++ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
++ kdb_printf("%s: %s -> undefined\n",
++ __FUNCTION__,
++ bbrg_name[reg]);
++ } else {
++ new[reg - BBRG_RAX].value = *(bfd_vma *)address;
++ new[reg - BBRG_RAX].valid = 1;
++ if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
++ kdb_printf("%s: %s -> " kdb_bfd_vma_fmt0 "\n",
++ __FUNCTION__,
++ bbrg_name[reg],
++ new[reg - BBRG_RAX].value);
++ }
++ }
++
++ memcpy(bb_actual, new, sizeof(bb_actual));
++}
++
++/* Return the number of bytes pushed on stack by the hardware. Either 0 or the
++ * size of the hardware specific data.
++ *
++ */
++
++static int
++bb_hardware_pushed(kdb_machreg_t rip)
++{
++ unsigned long disp8, disp32, target, addr = (unsigned long)rip;
++ unsigned char code[5];
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(bb_hardware_handlers); ++i)
++ if (strcmp(bb_func_name, bb_hardware_handlers[i]) == 0)
++ return HARDWARE_PUSHED;
++
++ /* Given the large number of interrupt handlers, it is easiest to look
++ * at the next instruction and see if it is a jmp to the common exit
++ * routines.
++ */
++ if (kdb_getarea(code, addr) ||
++ kdb_getword(&disp32, addr+1, 4) ||
++ kdb_getword(&disp8, addr+1, 1))
++ return 0; /* not a valid code address */
++ if (code[0] == 0xe9) {
++ target = addr + (s32) disp32 + 5; /* jmp disp32 */
++ if (target == bb_ret_from_intr ||
++ target == bb_common_interrupt ||
++ target == bb_error_entry)
++ return HARDWARE_PUSHED;
++ }
++ if (code[0] == 0xeb) {
++ target = addr + (s8) disp8 + 2; /* jmp disp8 */
++ if (target == bb_ret_from_intr ||
++ target == bb_common_interrupt ||
++ target == bb_error_entry)
++ return HARDWARE_PUSHED;
++ }
++ if (strcmp(bb_func_name, "kdb_call") == 0)
++ return HARDWARE_PUSHED;
++
++ return 0;
++}
++
++/* Copy argument information that was deduced by the basic block analysis and
++ * rollback into the kdb stack activation record.
++ */
++
++static void
++bb_arguments(struct kdb_activation_record *ar)
++{
++ int i;
++ enum bb_reg_code reg;
++ kdb_machreg_t rsp;
++ ar->args = bb_reg_params + bb_memory_params;
++ bitmap_zero(ar->valid.bits, KDBA_MAXARGS);
++ for (i = 0; i < bb_reg_params; ++i) {
++ reg = bb_param_reg[i];
++ if (bb_actual_valid(reg)) {
++ ar->arg[i] = bb_actual_value(reg);
++ set_bit(i, ar->valid.bits);
++ }
++ }
++ if (!bb_actual_valid(BBRG_RSP))
++ return;
++ rsp = bb_actual_value(BBRG_RSP);
++ for (i = bb_reg_params; i < ar->args; ++i) {
++ rsp += KDB_WORD_SIZE;
++ if (kdb_getarea(ar->arg[i], rsp) == 0)
++ set_bit(i, ar->valid.bits);
++ }
++}
++
++/* Given an exit address from a function, decompose the entire function into
++ * basic blocks and determine the register state at the exit point.
++ */
++
++static void
++kdb_bb(unsigned long exit)
++{
++ kdb_symtab_t symtab;
++ if (!kdbnearsym(exit, &symtab)) {
++ kdb_printf("%s: address " kdb_bfd_vma_fmt0 " not recognised\n",
++ __FUNCTION__, exit);
++ bb_giveup = 1;
++ return;
++ }
++ bb_exit_addr = exit;
++ bb_mod_name = symtab.mod_name;
++ bb_func_name = symtab.sym_name;
++ bb_func_start = symtab.sym_start;
++ bb_func_end = symtab.sym_end;
++ /* Various global labels exist in the middle of assembler code and have
++ * a non-standard state. Ignore these labels and use the start of the
++ * previous label instead.
++ */
++ while (bb_spurious_global_label(symtab.sym_name)) {
++ if (!kdbnearsym(symtab.sym_start - 1, &symtab))
++ break;
++ bb_func_start = symtab.sym_start;
++ }
++ bb_mod_name = symtab.mod_name;
++ bb_func_name = symtab.sym_name;
++ bb_func_start = symtab.sym_start;
++ /* Ignore spurious labels past this point and use the next non-spurious
++ * label as the end point.
++ */
++ if (kdbnearsym(bb_func_end, &symtab)) {
++ while (bb_spurious_global_label(symtab.sym_name)) {
++ bb_func_end = symtab.sym_end;
++ if (!kdbnearsym(symtab.sym_end + 1, &symtab))
++ break;
++ }
++ }
++ bb_pass1();
++ if (!bb_giveup)
++ bb_pass2();
++ if (bb_giveup)
++ kdb_printf("%s: " kdb_bfd_vma_fmt0
++ " [%s]%s failed at " kdb_bfd_vma_fmt0 "\n\n",
++ __FUNCTION__, exit,
++ bb_mod_name, bb_func_name, bb_curr_addr);
++}
++
++static int
++kdb_bb1(int argc, const char **argv)
++{
++ int diag;
++ unsigned long addr;
++ bb_cleanup(); /* in case previous command was interrupted */
++ kdba_id_init(&kdb_di);
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++ if ((diag = kdbgetularg((char *)argv[1], &addr)))
++ return diag;
++ kdb_save_flags();
++ kdb_flags |= KDB_DEBUG_FLAG_BB << KDB_DEBUG_FLAG_SHIFT;
++ kdb_bb(addr);
++ bb_cleanup();
++ kdb_restore_flags();
++ kdbnearsym_cleanup();
++ return 0;
++}
++
++/* Run a basic block analysis on every function in the base kernel. Used as a
++ * global sanity check to find errors in the basic block code.
++ */
++
++static int
++kdb_bb_all(int argc, const char **argv)
++{
++ loff_t pos = 0;
++ const char *symname;
++ unsigned long addr;
++ int i, max_errors = 20;
++ struct bb_name_state *r;
++ kdb_printf("%s: conditional build variables:"
++#ifdef CONFIG_X86_64
++ " CONFIG_X86_64"
++#endif
++#ifdef CONFIG_4KSTACKS
++ " CONFIG_4KSTACKS"
++#endif
++#ifdef CONFIG_PREEMPT
++ " CONFIG_PREEMPT"
++#endif
++#ifdef CONFIG_VM86
++ " CONFIG_VM86"
++#endif
++#ifdef CONFIG_FRAME_POINTER
++ " CONFIG_FRAME_POINTER"
++#endif
++#ifdef CONFIG_TRACE_IRQFLAGS
++ " CONFIG_TRACE_IRQFLAGS"
++#endif
++#ifdef NO_SIBLINGS
++ " NO_SIBLINGS"
++#endif
++ " REGPARM=" __stringify(REGPARM)
++ "\n\n", __FUNCTION__);
++ for (i = 0, r = bb_special_cases;
++ i < ARRAY_SIZE(bb_special_cases);
++ ++i, ++r) {
++ if (!r->address)
++ kdb_printf("%s: cannot find special_case name %s\n",
++ __FUNCTION__, r->name);
++ }
++ for (i = 0; i < ARRAY_SIZE(bb_spurious); ++i) {
++ if (!kallsyms_lookup_name(bb_spurious[i]))
++ kdb_printf("%s: cannot find spurious label %s\n",
++ __FUNCTION__, bb_spurious[i]);
++ }
++ while ((symname = kdb_walk_kallsyms(&pos))) {
++ ++pos;
++ if (strcmp(symname, "_stext") == 0 ||
++ strcmp(symname, "stext") == 0)
++ break;
++ }
++ if (!symname) {
++ kdb_printf("%s: cannot find _stext\n", __FUNCTION__);
++ return 0;
++ }
++ kdba_id_init(&kdb_di);
++ i = 0;
++ while ((symname = kdb_walk_kallsyms(&pos))) {
++ if (strcmp(symname, "_etext") == 0)
++ break;
++ if (i++ % 100 == 0)
++ kdb_printf(".");
++ /* x86_64 has some 16 bit functions that appear between stext
++ * and _etext. Skip them.
++ */
++ if (strcmp(symname, "verify_cpu") == 0 ||
++ strcmp(symname, "verify_cpu_noamd") == 0 ||
++ strcmp(symname, "verify_cpu_sse_test") == 0 ||
++ strcmp(symname, "verify_cpu_no_longmode") == 0 ||
++ strcmp(symname, "verify_cpu_sse_ok") == 0 ||
++ strcmp(symname, "mode_seta") == 0 ||
++ strcmp(symname, "bad_address") == 0 ||
++ strcmp(symname, "wakeup_code") == 0 ||
++ strcmp(symname, "wakeup_code_start") == 0 ||
++ strcmp(symname, "wakeup_start") == 0 ||
++ strcmp(symname, "wakeup_32_vector") == 0 ||
++ strcmp(symname, "wakeup_32") == 0 ||
++ strcmp(symname, "wakeup_long64_vector") == 0 ||
++ strcmp(symname, "wakeup_long64") == 0 ||
++ strcmp(symname, "gdta") == 0 ||
++ strcmp(symname, "idt_48a") == 0 ||
++ strcmp(symname, "gdt_48a") == 0 ||
++ strcmp(symname, "bogus_real_magic") == 0 ||
++ strcmp(symname, "bogus_64_magic") == 0 ||
++ strcmp(symname, "no_longmode") == 0 ||
++ strcmp(symname, "mode_seta") == 0 ||
++ strcmp(symname, "setbada") == 0 ||
++ strcmp(symname, "check_vesaa") == 0 ||
++ strcmp(symname, "_setbada") == 0 ||
++ strcmp(symname, "wakeup_stack_begin") == 0 ||
++ strcmp(symname, "wakeup_stack") == 0 ||
++ strcmp(symname, "wakeup_level4_pgt") == 0 ||
++ strcmp(symname, "acpi_copy_wakeup_routine") == 0 ||
++ strcmp(symname, "wakeup_end") == 0 ||
++ strcmp(symname, "do_suspend_lowlevel_s4bios") == 0 ||
++ strcmp(symname, "do_suspend_lowlevel") == 0)
++ continue;
++ /* __kprobes_text_end contains branches to the middle of code,
++ * with undefined states.
++ */
++ if (strcmp(symname, "__kprobes_text_end") == 0)
++ continue;
++ if (bb_spurious_global_label(symname))
++ continue;
++ if ((addr = kallsyms_lookup_name(symname)) == 0)
++ continue;
++ // kdb_printf("BB " kdb_bfd_vma_fmt0 " %s\n", addr, symname);
++ bb_cleanup(); /* in case previous command was interrupted */
++ kdbnearsym_cleanup();
++ kdb_bb(addr);
++ touch_nmi_watchdog();
++ if (bb_giveup) {
++ if (max_errors-- == 0) {
++ kdb_printf("%s: max_errors reached, giving up\n",
++ __FUNCTION__);
++ break;
++ } else {
++ bb_giveup = 0;
++ }
++ }
++ }
++ kdb_printf("\n");
++ bb_cleanup();
++ kdbnearsym_cleanup();
++ return 0;
++}
++
++/*
++ *=============================================================================
++ *
++ * Everything above this line is doing basic block analysis, function by
++ * function. Everything below this line uses the basic block data to do a
++ * complete backtrace over all functions that are used by a process.
++ *
++ *=============================================================================
++ */
++
++
++/*============================================================================*/
++/* */
++/* Most of the backtrace code and data is common to x86_64 and i386. This */
++/* large ifdef contains all of the differences between the two architectures. */
++/* */
++/* Make sure you update the correct section of this ifdef. */
++/* */
++/*============================================================================*/
++
++#ifdef CONFIG_X86_64
++
++#define XCS "cs"
++#define RSP "rsp"
++#define RIP "rip"
++#define ARCH_RSP rsp
++#define ARCH_RIP rip
++#define ARCH_NORMAL_PADDING (16 * 8)
++
++/* x86_64 has multiple alternate stacks, with different sizes and different
++ * offsets to get the link from one stack to the next. Some of the stacks are
++ * referenced via cpu_pda, some via per_cpu orig_ist. Debug events can even
++ * have multiple nested stacks within the single physical stack, each nested
++ * stack has its own link and some of those links are wrong.
++ *
++ * Consistent it's not!
++ *
++ * Do not assume that these stacks are aligned on their size.
++ */
++#define INTERRUPT_STACK (N_EXCEPTION_STACKS + 1)
++void
++kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
++ struct kdb_activation_record *ar)
++{
++ static struct {
++ const char *id;
++ unsigned int total_size;
++ unsigned int nested_size;
++ unsigned int next;
++ } *sdp, stack_data[] = {
++ [STACKFAULT_STACK - 1] = { "stackfault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
++ [DOUBLEFAULT_STACK - 1] = { "doublefault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
++ [NMI_STACK - 1] = { "nmi", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
++ [DEBUG_STACK - 1] = { "debug", DEBUG_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
++ [MCE_STACK - 1] = { "machine check", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
++ [INTERRUPT_STACK - 1] = { "interrupt", IRQSTACKSIZE, IRQSTACKSIZE, IRQSTACKSIZE - sizeof(void *) },
++ };
++ unsigned long total_start = 0, total_size, total_end;
++ int sd, found = 0;
++ extern unsigned long kdba_orig_ist(int, int);
++
++ for (sd = 0, sdp = stack_data;
++ sd < ARRAY_SIZE(stack_data);
++ ++sd, ++sdp) {
++ total_size = sdp->total_size;
++ if (!total_size)
++ continue; /* in case stack_data[] has any holes */
++ if (cpu < 0) {
++ /* Arbitrary address which can be on any cpu, see if it
++ * falls within any of the alternate stacks
++ */
++ int c;
++ for_each_online_cpu(c) {
++ if (sd == INTERRUPT_STACK - 1)
++ total_end = (unsigned long)cpu_pda(c)->irqstackptr;
++ else
++ total_end = per_cpu(orig_ist, c).ist[sd];
++ total_start = total_end - total_size;
++ if (addr >= total_start && addr < total_end) {
++ found = 1;
++ cpu = c;
++ break;
++ }
++ }
++ if (!found)
++ continue;
++ }
++ /* Only check the supplied or found cpu */
++ if (sd == INTERRUPT_STACK - 1)
++ total_end = (unsigned long)cpu_pda(cpu)->irqstackptr;
++ else
++ total_end = per_cpu(orig_ist, cpu).ist[sd];
++ total_start = total_end - total_size;
++ if (addr >= total_start && addr < total_end) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found)
++ return;
++ /* find which nested stack the address is in */
++ while (addr > total_start + sdp->nested_size)
++ total_start += sdp->nested_size;
++ ar->stack.physical_start = total_start;
++ ar->stack.physical_end = total_start + sdp->nested_size;
++ ar->stack.logical_start = total_start;
++ ar->stack.logical_end = total_start + sdp->next;
++ ar->stack.next = *(unsigned long *)ar->stack.logical_end;
++ ar->stack.id = sdp->id;
++
++ /* Nasty: when switching to the interrupt stack, the stack state of the
++ * caller is split over two stacks, the original stack and the
++ * interrupt stack. One word (the previous frame pointer) is stored on
++ * the interrupt stack, the rest of the interrupt data is in the old
++ * frame. To make the interrupted stack state look as though it is
++ * contiguous, copy the missing word from the interrupt stack to the
++ * original stack and adjust the new stack pointer accordingly.
++ */
++
++ if (sd == INTERRUPT_STACK - 1) {
++ *(unsigned long *)(ar->stack.next - KDB_WORD_SIZE) =
++ ar->stack.next;
++ ar->stack.next -= KDB_WORD_SIZE;
++ }
++}
++
++/* rip is not in the thread struct for x86_64. We know that the stack value
++ * was saved in schedule near the label thread_return. Setting rip to
++ * thread_return lets the stack trace find that we are in schedule and
++ * correctly decode its prologue.
++ */
++
++static kdb_machreg_t
++kdba_bt_stack_rip(const struct task_struct *p)
++{
++ return bb_thread_return;
++}
++
++#else /* !CONFIG_X86_64 */
++
++#define XCS "xcs"
++#define RSP "esp"
++#define RIP "eip"
++#define ARCH_RSP esp
++#define ARCH_RIP eip
++#define ARCH_NORMAL_PADDING (19 * 4)
++
++#ifdef CONFIG_4KSTACKS
++static struct thread_info **kdba_hardirq_ctx, **kdba_softirq_ctx;
++#endif /* CONFIG_4KSTACKS */
++
++/* On a 4K stack kernel, hardirq_ctx and softirq_ctx are [NR_CPUS] arrays. The
++ * first element of each per-cpu stack is a struct thread_info.
++ */
++void
++kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
++ struct kdb_activation_record *ar)
++{
++#ifdef CONFIG_4KSTACKS
++ struct thread_info *tinfo;
++ tinfo = (struct thread_info *)(addr & -THREAD_SIZE);
++ if (cpu < 0) {
++ /* Arbitrary address, see if it falls within any of the irq
++ * stacks
++ */
++ int found = 0;
++ for_each_online_cpu(cpu) {
++ if (tinfo == kdba_hardirq_ctx[cpu] ||
++ tinfo == kdba_softirq_ctx[cpu]) {
++ found = 1;
++ break;
++ }
++ }
++ if (!found)
++ return;
++ }
++ if (tinfo == kdba_hardirq_ctx[cpu] ||
++ tinfo == kdba_softirq_ctx[cpu]) {
++ ar->stack.physical_start = (kdb_machreg_t)tinfo;
++ ar->stack.physical_end = ar->stack.physical_start + THREAD_SIZE;
++ ar->stack.logical_start = ar->stack.physical_start +
++ sizeof(struct thread_info);
++ ar->stack.logical_end = ar->stack.physical_end;
++ ar->stack.next = tinfo->previous_esp;
++ if (tinfo == kdba_hardirq_ctx[cpu])
++ ar->stack.id = "hardirq_ctx";
++ else
++ ar->stack.id = "softirq_ctx";
++ }
++#endif /* CONFIG_4KSTACKS */
++}
++
++/* rip is in the thread struct for i386 */
++
++static kdb_machreg_t
++kdba_bt_stack_rip(const struct task_struct *p)
++{
++ return p->thread.eip;
++}
++
++#endif /* CONFIG_X86_64 */
++
++/* Given an address which claims to be on a stack, an optional cpu number and
++ * an optional task address, get information about the stack.
++ *
++ * t == NULL, cpu < 0 indicates an arbitrary stack address with no associated
++ * struct task, the address can be in an alternate stack or any task's normal
++ * stack.
++ *
++ * t != NULL, cpu >= 0 indicates a running task, the address can be in an
++ * alternate stack or that task's normal stack.
++ *
++ * t != NULL, cpu < 0 indicates a blocked task, the address can only be in that
++ * task's normal stack.
++ *
++ * t == NULL, cpu >= 0 is not a valid combination.
++ */
++
++static void
++kdba_get_stack_info(kdb_machreg_t rsp, int cpu,
++ struct kdb_activation_record *ar,
++ const struct task_struct *t)
++{
++ struct thread_info *tinfo;
++ struct task_struct *g, *p;
++ memset(&ar->stack, 0, sizeof(ar->stack));
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: " RSP "=0x%lx cpu=%d task=%p\n",
++ __FUNCTION__, rsp, cpu, t);
++ if (t == NULL || cpu >= 0) {
++ kdba_get_stack_info_alternate(rsp, cpu, ar);
++ if (ar->stack.logical_start)
++ goto out;
++ }
++ rsp &= -THREAD_SIZE;
++ tinfo = (struct thread_info *)rsp;
++ if (t == NULL) {
++ /* Arbitrary stack address without an associated task, see if
++ * it falls within any normal process stack, including the idle
++ * tasks.
++ */
++ kdb_do_each_thread(g, p) {
++ if (tinfo == task_thread_info(p)) {
++ t = p;
++ goto found;
++ }
++ } kdb_while_each_thread(g, p);
++ for_each_online_cpu(cpu) {
++ p = idle_task(cpu);
++ if (tinfo == task_thread_info(p)) {
++ t = p;
++ goto found;
++ }
++ }
++ found:
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: found task %p\n", __FUNCTION__, t);
++ } else if (cpu >= 0) {
++ /* running task */
++ struct kdb_running_process *krp = kdb_running_process + cpu;
++ if (krp->p != t || tinfo != task_thread_info(t))
++ t = NULL;
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: running task %p\n", __FUNCTION__, t);
++ } else {
++ /* blocked task */
++ if (tinfo != task_thread_info(t))
++ t = NULL;
++ if (KDB_DEBUG(ARA))
++ kdb_printf("%s: blocked task %p\n", __FUNCTION__, t);
++ }
++ if (t) {
++ ar->stack.physical_start = rsp;
++ ar->stack.physical_end = rsp + THREAD_SIZE;
++ ar->stack.logical_start = rsp + sizeof(struct thread_info);
++ ar->stack.logical_end = ar->stack.physical_end - ARCH_NORMAL_PADDING;
++ ar->stack.next = 0;
++ ar->stack.id = "normal";
++ }
++out:
++ if (ar->stack.physical_start && KDB_DEBUG(ARA)) {
++ kdb_printf("%s: ar->stack\n", __FUNCTION__);
++ kdb_printf(" physical_start=0x%lx\n", ar->stack.physical_start);
++ kdb_printf(" physical_end=0x%lx\n", ar->stack.physical_end);
++ kdb_printf(" logical_start=0x%lx\n", ar->stack.logical_start);
++ kdb_printf(" logical_end=0x%lx\n", ar->stack.logical_end);
++ kdb_printf(" next=0x%lx\n", ar->stack.next);
++ kdb_printf(" id=%s\n", ar->stack.id);
++ kdb_printf(" set MDCOUNT %ld\n",
++ (ar->stack.physical_end - ar->stack.physical_start) /
++ KDB_WORD_SIZE);
++ kdb_printf(" mds " kdb_machreg_fmt0 "\n",
++ ar->stack.physical_start);
++ }
++}
++
++static void
++bt_print_one(kdb_machreg_t rip, kdb_machreg_t rsp,
++ const struct kdb_activation_record *ar,
++ const kdb_symtab_t *symtab, int argcount)
++{
++ int btsymarg = 0;
++ int nosect = 0;
++
++ kdbgetintenv("BTSYMARG", &btsymarg);
++ kdbgetintenv("NOSECT", &nosect);
++
++ kdb_printf(kdb_machreg_fmt0, rsp);
++ kdb_symbol_print(rip, symtab,
++ KDB_SP_SPACEB|KDB_SP_VALUE);
++ if (argcount && ar->args) {
++ int i, argc = ar->args;
++ kdb_printf(" (");
++ if (argc > argcount)
++ argc = argcount;
++ for (i = 0; i < argc; i++) {
++ if (i)
++ kdb_printf(", ");
++ if (test_bit(i, ar->valid.bits))
++ kdb_printf("0x%lx", ar->arg[i]);
++ else
++ kdb_printf("invalid");
++ }
++ kdb_printf(")");
++ }
++ kdb_printf("\n");
++ if (symtab->sym_name) {
++ if (!nosect) {
++ kdb_printf(" %s",
++ symtab->mod_name);
++ if (symtab->sec_name && symtab->sec_start)
++ kdb_printf(" 0x%lx 0x%lx",
++ symtab->sec_start, symtab->sec_end);
++ kdb_printf(" 0x%lx 0x%lx\n",
++ symtab->sym_start, symtab->sym_end);
++ }
++ }
++ if (argcount && ar->args && btsymarg) {
++ int i, argc = ar->args;
++ kdb_symtab_t arg_symtab;
++ for (i = 0; i < argc; i++) {
++ kdb_machreg_t arg = ar->arg[i];
++ if (test_bit(i, ar->valid.bits) &&
++ kdbnearsym(arg, &arg_symtab)) {
++ kdb_printf(" ARG %2d ", i);
++ kdb_symbol_print(arg, &arg_symtab,
++ KDB_SP_DEFAULT|KDB_SP_NEWLINE);
++ }
++ }
++ }
++}
++
++static void
++kdba_bt_new_stack(struct kdb_activation_record *ar, kdb_machreg_t *rsp,
++ int *count, int *suppress)
++{
++ /* Nasty: common_interrupt builds a partial pt_regs, with r15 through
++ * rbx not being filled in. It passes struct pt_regs* to do_IRQ (in
++ * rdi) but the stack pointer is not adjusted to account for r15
++ * through rbx. This has two effects :-
++ *
++ * (1) struct pt_regs on an external interrupt actually overlaps with
++ * the local stack area used by do_IRQ. Not only are r15-rbx
++ * undefined, the area that claims to hold their values can even
++ * change as the irq is processed.
++ *
++ * (2) The back stack pointer saved for the new frame is not pointing
++ * at pt_regs, it is pointing at rbx within the pt_regs passed to
++ * do_IRQ.
++ *
++ * There is nothing that I can do about (1) but I have to fix (2)
++ * because kdb backtrace looks for the "start" address of pt_regs as it
++ * walks back through the stacks. When switching from the interrupt
++ * stack to another stack, we have to assume that pt_regs has been
++ * seen and turn off backtrace supression.
++ */
++ int probable_pt_regs = strcmp(ar->stack.id, "interrupt") == 0;
++ *rsp = ar->stack.next;
++ if (KDB_DEBUG(ARA))
++ kdb_printf("new " RSP "=" kdb_machreg_fmt0 "\n", *rsp);
++ bb_actual_set_value(BBRG_RSP, *rsp);
++ kdba_get_stack_info(*rsp, -1, ar, NULL);
++ if (!ar->stack.physical_start) {
++ kdb_printf("+++ Cannot resolve next stack\n");
++ } else if (!*suppress) {
++ kdb_printf(" ======================= <%s>\n",
++ ar->stack.id);
++ ++*count;
++ }
++ if (probable_pt_regs)
++ *suppress = 0;
++}
++
++/*
++ * kdba_bt_stack
++ *
++ * Inputs:
++ * addr Address provided to 'bt' command, if any.
++ * argcount
++ * p Pointer to task for 'btp' command.
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * Ultimately all the bt* commands come through this routine. If
++ * old_style is 0 then it uses the basic block analysis to get an accurate
++ * backtrace with arguments, otherwise it falls back to the old method of
++ * printing anything on stack that looks like a kernel address.
++ */
++
++static int
++kdba_bt_stack(kdb_machreg_t addr, int argcount, const struct task_struct *p,
++ int old_style)
++{
++ struct kdb_activation_record ar;
++ kdb_machreg_t rip = 0, rsp = 0, prev_rsp;
++ kdb_symtab_t symtab;
++ int rip_at_rsp = 0, count = 0, btsp = 0, suppress, hardware_pushed = 0;
++ struct pt_regs *regs = NULL;
++
++ kdbgetintenv("BTSP", &btsp);
++ suppress = !btsp;
++ memset(&ar, 0, sizeof(ar));
++ if (old_style)
++ kdb_printf("Using old style backtrace, unreliable with no arguments\n");
++
++ /*
++ * The caller may have supplied an address at which the stack traceback
++ * operation should begin. This address is assumed by this code to
++ * point to a return address on the stack to be traced back.
++ *
++ * Warning: type in the wrong address and you will get garbage in the
++ * backtrace.
++ */
++ if (addr) {
++ rsp = addr;
++ kdb_getword(&rip, rsp, sizeof(rip));
++ rip_at_rsp = 1;
++ suppress = 0;
++ kdba_get_stack_info(rsp, -1, &ar, NULL);
++ } else {
++ if (task_curr(p)) {
++ struct kdb_running_process *krp =
++ kdb_running_process + task_cpu(p);
++ kdb_machreg_t cs;
++ regs = krp->regs;
++ if (krp->seqno &&
++ krp->p == p &&
++ krp->seqno >= kdb_seqno - 1 &&
++ !KDB_NULL_REGS(regs)) {
++ /* valid saved state, continue processing */
++ } else {
++ kdb_printf
++ ("Process did not save state, cannot backtrace\n");
++ kdb_ps1(p);
++ return 0;
++ }
++ kdba_getregcontents(XCS, regs, &cs);
++ if ((cs & 0xffff) != __KERNEL_CS) {
++ kdb_printf("Stack is not in kernel space, backtrace not available\n");
++ return 0;
++ }
++ rip = krp->arch.ARCH_RIP;
++ rsp = krp->arch.ARCH_RSP;
++ kdba_get_stack_info(rsp, kdb_process_cpu(p), &ar, p);
++ } else {
++ /* Not on cpu, assume blocked. Blocked tasks do not
++ * have pt_regs. p->thread contains some data, alas
++ * what it contains differs between i386 and x86_64.
++ */
++ rip = kdba_bt_stack_rip(p);
++ rsp = p->thread.ARCH_RSP;
++ suppress = 0;
++ kdba_get_stack_info(rsp, -1, &ar, p);
++ }
++ }
++ if (!ar.stack.physical_start) {
++ kdb_printf(RSP "=0x%lx is not in a valid kernel stack, backtrace not available\n",
++ rsp);
++ return 0;
++ }
++ memset(&bb_actual, 0, sizeof(bb_actual));
++ bb_actual_set_value(BBRG_RSP, rsp);
++ bb_actual_set_valid(BBRG_RSP, 1);
++
++ kdb_printf(RSP "%*s" RIP "%*sFunction (args)\n",
++ 2*KDB_WORD_SIZE, " ",
++ 2*KDB_WORD_SIZE, " ");
++ if (ar.stack.next && !suppress)
++ kdb_printf(" ======================= <%s>\n",
++ ar.stack.id);
++
++ bb_cleanup();
++ /* Run through all the stacks */
++ while (ar.stack.physical_start) {
++ if (rip_at_rsp)
++ rip = *(kdb_machreg_t *)rsp;
++ kdbnearsym(rip, &symtab);
++ if (old_style) {
++ if (__kernel_text_address(rip) && !suppress) {
++ bt_print_one(rip, rsp, &ar, &symtab, 0);
++ ++count;
++ }
++ if (rsp == (unsigned long)regs) {
++ if (ar.stack.next && suppress)
++ kdb_printf(" ======================= <%s>\n",
++ ar.stack.id);
++ ++count;
++ suppress = 0;
++ }
++ rsp += sizeof(rip);
++ rip_at_rsp = 1;
++ if (rsp >= ar.stack.logical_end) {
++ if (!ar.stack.next)
++ break;
++ kdba_bt_new_stack(&ar, &rsp, &count, &suppress);
++ rip_at_rsp = 0;
++ continue;
++ }
++ } else {
++ /* Start each analysis with no dynamic data from the
++ * previous kdb_bb() run.
++ */
++ bb_cleanup();
++ kdb_bb(rip);
++ if (bb_giveup)
++ break;
++ prev_rsp = rsp;
++ if (rip_at_rsp) {
++ rsp += sizeof(rip) + hardware_pushed;
++ hardware_pushed = 0;
++ if (rsp >= ar.stack.logical_end &&
++ ar.stack.next) {
++ kdba_bt_new_stack(&ar, &rsp, &count,
++ &suppress);
++ rip_at_rsp = 0;
++ continue;
++ }
++ bb_actual_set_value(BBRG_RSP, rsp);
++ }
++ rip_at_rsp = 1;
++ bb_actual_rollback(&ar);
++ if (bb_giveup)
++ break;
++ if (bb_actual_value(BBRG_RSP) < rsp) {
++ kdb_printf("%s: " RSP " is going backwards, "
++ kdb_machreg_fmt0 " -> "
++ kdb_machreg_fmt0 "\n",
++ __FUNCTION__,
++ rsp,
++ bb_actual_value(BBRG_RSP));
++ bb_giveup = 1;
++ break;
++ }
++ bb_arguments(&ar);
++ if (!suppress) {
++ bt_print_one(rip, prev_rsp, &ar, &symtab, argcount);
++ ++count;
++ }
++ /* Functions that terminate the backtrace */
++ if (strcmp(bb_func_name, "cpu_idle") == 0)
++ break;
++ if (rsp >= ar.stack.logical_end &&
++ !ar.stack.next)
++ break;
++ if (rsp <= (unsigned long)regs &&
++ bb_actual_value(BBRG_RSP) > (unsigned long)regs) {
++ if (ar.stack.next && suppress)
++ kdb_printf(" ======================= <%s>\n",
++ ar.stack.id);
++ ++count;
++ suppress = 0;
++ }
++ rsp = bb_actual_value(BBRG_RSP);
++ hardware_pushed = bb_hardware_pushed(rip);
++ }
++ if (count > 200)
++ break;
++ }
++ if (bb_giveup)
++ return 1;
++ bb_cleanup();
++ kdbnearsym_cleanup();
++
++ if (count > 200) {
++ kdb_printf("bt truncated, count limit reached\n");
++ return 1;
++ } else if (suppress) {
++ kdb_printf
++ ("bt did not find pt_regs - no trace produced. Suggest 'set BTSP 1'\n");
++ return 1;
++ }
++
++ return 0;
++}
++
++/*
++ * kdba_bt_address
++ *
++ * Do a backtrace starting at a specified stack address. Use this if the
++ * heuristics get the stack decode wrong.
++ *
++ * Inputs:
++ * addr Address provided to 'bt' command.
++ * argcount
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * mds %rsp comes in handy when examining the stack to do a manual
++ * traceback.
++ */
++
++int kdba_bt_address(kdb_machreg_t addr, int argcount)
++{
++ int ret;
++ kdba_id_init(&kdb_di); /* kdb_bb needs this done once */
++ ret = kdba_bt_stack(addr, argcount, NULL, 0);
++ if (ret == 1)
++ ret = kdba_bt_stack(addr, argcount, NULL, 1);
++ return ret;
++}
++
++/*
++ * kdba_bt_process
++ *
++ * Do a backtrace for a specified process.
++ *
++ * Inputs:
++ * p Struct task pointer extracted by 'bt' command.
++ * argcount
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ */
++
++int kdba_bt_process(const struct task_struct *p, int argcount)
++{
++ int ret;
++ kdba_id_init(&kdb_di); /* kdb_bb needs this done once */
++ ret = kdba_bt_stack(0, argcount, p, 0);
++ if (ret == 1)
++ ret = kdba_bt_stack(0, argcount, p, 1);
++ return ret;
++}
++
++static int __init kdba_bt_x86_init(void)
++{
++ int i, c, cp = -1;
++ struct bb_name_state *r;
++
++ kdb_register_repeat("bb1", kdb_bb1, "<vaddr>", "Analyse one basic block", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("bb_all", kdb_bb_all, "", "Backtrace check on all built in functions", 0, KDB_REPEAT_NONE);
++
++ /* Split the opcode usage table by the first letter of each set of
++ * opcodes, for faster mapping of opcode to its operand usage.
++ */
++ for (i = 0; i < ARRAY_SIZE(bb_opcode_usage_all); ++i) {
++ c = bb_opcode_usage_all[i].opcode[0] - 'a';
++ if (c != cp) {
++ cp = c;
++ bb_opcode_usage[c].opcode = bb_opcode_usage_all + i;
++ }
++ ++bb_opcode_usage[c].size;
++ }
++
++ bb_common_interrupt = kallsyms_lookup_name("common_interrupt");
++ bb_error_entry = kallsyms_lookup_name("error_entry");
++ bb_ret_from_intr = kallsyms_lookup_name("ret_from_intr");
++ bb_thread_return = kallsyms_lookup_name("thread_return");
++ bb_sync_regs = kallsyms_lookup_name("sync_regs");
++ bb_save_v86_state = kallsyms_lookup_name("save_v86_state");
++ bb__sched_text_start = kallsyms_lookup_name("__sched_text_start");
++ bb__sched_text_end = kallsyms_lookup_name("__sched_text_end");
++ for (i = 0, r = bb_special_cases;
++ i < ARRAY_SIZE(bb_special_cases);
++ ++i, ++r) {
++ r->address = kallsyms_lookup_name(r->name);
++ }
++
++#ifdef CONFIG_4KSTACKS
++ kdba_hardirq_ctx = (struct thread_info **)kallsyms_lookup_name("hardirq_ctx");
++ kdba_softirq_ctx = (struct thread_info **)kallsyms_lookup_name("softirq_ctx");
++#endif /* CONFIG_4KSTACKS */
++
++ return 0;
++}
++
++static void __exit kdba_bt_x86_exit(void)
++{
++ kdb_unregister("bb1");
++ kdb_unregister("bb_all");
++}
++
++module_init(kdba_bt_x86_init)
++module_exit(kdba_bt_x86_exit)
+diff -Nurp linux-2.6.22-590/kdb/kdb_bp.c linux-2.6.22-600/kdb/kdb_bp.c
+--- linux-2.6.22-590/kdb/kdb_bp.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/kdb_bp.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,619 @@
++/*
++ * Kernel Debugger Architecture Independent Breakpoint Handler
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/smp.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <asm/system.h>
++
++/*
++ * Table of kdb_breakpoints
++ */
++kdb_bp_t kdb_breakpoints[KDB_MAXBPT];
++
++/*
++ * kdb_bp_install_global
++ *
++ * Install global kdb_breakpoints prior to returning from the
++ * kernel debugger. This allows the kdb_breakpoints to be set
++ * upon functions that are used internally by kdb, such as
++ * printk().
++ *
++ * Parameters:
++ * regs Execution frame.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ *
++ * This function is only called once per kdb session.
++ */
++
++void
++kdb_bp_install_global(struct pt_regs *regs)
++{
++ int i;
++
++ for(i=0; i<KDB_MAXBPT; i++) {
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdb_bp_install_global bp %d bp_enabled %d bp_global %d\n",
++ i, kdb_breakpoints[i].bp_enabled, kdb_breakpoints[i].bp_global);
++ }
++ if (kdb_breakpoints[i].bp_enabled
++ && kdb_breakpoints[i].bp_global) {
++ kdba_installbp(regs, &kdb_breakpoints[i]);
++ }
++ }
++}
++
++/*
++ * kdb_bp_install_local
++ *
++ * Install local kdb_breakpoints prior to returning from the
++ * kernel debugger. This allows the kdb_breakpoints to be set
++ * upon functions that are used internally by kdb, such as
++ * printk().
++ *
++ * Parameters:
++ * regs Execution frame.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ *
++ * This function is called once per processor.
++ */
++
++void
++kdb_bp_install_local(struct pt_regs *regs)
++{
++ int i;
++
++ for(i=0; i<KDB_MAXBPT; i++) {
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdb_bp_install_local bp %d bp_enabled %d bp_global %d cpu %d bp_cpu %d\n",
++ i, kdb_breakpoints[i].bp_enabled, kdb_breakpoints[i].bp_global,
++ smp_processor_id(), kdb_breakpoints[i].bp_cpu);
++ }
++ if (kdb_breakpoints[i].bp_enabled
++ && kdb_breakpoints[i].bp_cpu == smp_processor_id()
++ && !kdb_breakpoints[i].bp_global){
++ kdba_installbp(regs, &kdb_breakpoints[i]);
++ }
++ }
++}
++
++/*
++ * kdb_bp_remove_global
++ *
++ * Remove global kdb_breakpoints upon entry to the kernel debugger.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void
++kdb_bp_remove_global(void)
++{
++ int i;
++
++ for(i=KDB_MAXBPT-1; i>=0; i--) {
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdb_bp_remove_global bp %d bp_enabled %d bp_global %d\n",
++ i, kdb_breakpoints[i].bp_enabled, kdb_breakpoints[i].bp_global);
++ }
++ if (kdb_breakpoints[i].bp_enabled
++ && kdb_breakpoints[i].bp_global) {
++ kdba_removebp(&kdb_breakpoints[i]);
++ }
++ }
++}
++
++
++/*
++ * kdb_bp_remove_local
++ *
++ * Remove local kdb_breakpoints upon entry to the kernel debugger.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void
++kdb_bp_remove_local(void)
++{
++ int i;
++
++ for(i=KDB_MAXBPT-1; i>=0; i--) {
++ if (KDB_DEBUG(BP)) {
++ kdb_printf("kdb_bp_remove_local bp %d bp_enabled %d bp_global %d cpu %d bp_cpu %d\n",
++ i, kdb_breakpoints[i].bp_enabled, kdb_breakpoints[i].bp_global,
++ smp_processor_id(), kdb_breakpoints[i].bp_cpu);
++ }
++ if (kdb_breakpoints[i].bp_enabled
++ && kdb_breakpoints[i].bp_cpu == smp_processor_id()
++ && !kdb_breakpoints[i].bp_global){
++ kdba_removebp(&kdb_breakpoints[i]);
++ }
++ }
++}
++
++/*
++ * kdb_printbp
++ *
++ * Internal function to format and print a breakpoint entry.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++static void
++kdb_printbp(kdb_bp_t *bp, int i)
++{
++ if (bp->bp_forcehw) {
++ kdb_printf("Forced ");
++ }
++
++ if (!bp->bp_template.bph_free) {
++ kdb_printf("%s ", kdba_bptype(&bp->bp_template));
++ } else {
++ kdb_printf("Instruction(i) ");
++ }
++
++ kdb_printf("BP #%d at ", i);
++ kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT);
++
++ if (bp->bp_enabled) {
++ kdba_printbp(bp);
++ if (bp->bp_global)
++ kdb_printf(" globally");
++ else
++ kdb_printf(" on cpu %d", bp->bp_cpu);
++ if (bp->bp_adjust)
++ kdb_printf(" adjust %d", bp->bp_adjust);
++ } else {
++ kdb_printf("\n is disabled");
++ }
++
++ kdb_printf("\n");
++}
++
++/*
++ * kdb_bp
++ *
++ * Handle the bp, and bpa commands.
++ *
++ * [bp|bpa|bph] <addr-expression> [DATAR|DATAW|IO [length]]
++ *
++ * Parameters:
++ * argc Count of arguments in argv
++ * argv Space delimited command line arguments
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic if failure.
++ * Locking:
++ * None.
++ * Remarks:
++ *
++ * bp Set breakpoint. Only use hardware assist if necessary.
++ * bpa Set breakpoint on all cpus, only use hardware regs if necessary
++ * bph Set breakpoint - force hardware register
++ * bpha Set breakpoint on all cpus, force hardware register
++ */
++
++static int
++kdb_bp(int argc, const char **argv)
++{
++ int i, bpno;
++ kdb_bp_t *bp, *bp_check;
++ int diag;
++ int free;
++ kdb_machreg_t addr;
++ char *symname = NULL;
++ long offset = 0ul;
++ int nextarg;
++ int hardware;
++ int global;
++
++ if (argc == 0) {
++ /*
++ * Display breakpoint table
++ */
++ for(bpno=0,bp=kdb_breakpoints; bpno<KDB_MAXBPT; bpno++, bp++) {
++ if (bp->bp_free) continue;
++
++ kdb_printbp(bp, bpno);
++ }
++
++ return 0;
++ }
++
++ global = ((strcmp(argv[0], "bpa") == 0)
++ || (strcmp(argv[0], "bpha") == 0));
++ hardware = ((strcmp(argv[0], "bph") == 0)
++ || (strcmp(argv[0], "bpha") == 0));
++
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, &symname);
++ if (diag)
++ return diag;
++ if (!addr)
++ return KDB_BADINT;
++
++ /*
++ * Allocate a new bp structure
++ */
++ free = KDB_MAXBPT;
++ for(bpno=0,bp=kdb_breakpoints; bpno<KDB_MAXBPT; bpno++,bp++) {
++ if (bp->bp_free) {
++ break;
++ }
++ }
++
++ if (bpno == KDB_MAXBPT)
++ return KDB_TOOMANYBPT;
++
++ memset(bp, 0, sizeof(*bp));
++ bp->bp_free = 1;
++ kdba_check_pc(&addr);
++ for(i=0,bp_check=kdb_breakpoints; i<KDB_MAXBPT; i++,bp_check++) {
++ if (!bp_check->bp_free && bp_check->bp_addr == addr) {
++ kdb_printf("You already have a breakpoint at " kdb_bfd_vma_fmt0 "\n", addr);
++ return KDB_DUPBPT;
++ }
++ }
++ bp->bp_addr = addr;
++ bp->bp_free = 0;
++
++ bp->bp_forcehw = hardware;
++ if (KDB_DEBUG(BP))
++ kdb_printf("kdb_bp: forcehw is %d hardware is %d\n", bp->bp_forcehw, hardware);
++
++ /*
++ * Handle architecture dependent parsing
++ */
++ diag = kdba_parsebp(argc, argv, &nextarg, bp);
++ if (diag) {
++ return diag;
++ }
++
++ bp->bp_enabled = 1;
++ bp->bp_global = 1; /* Most breakpoints are global */
++
++ if (hardware && !global) {
++ bp->bp_global = 0;
++ bp->bp_cpu = smp_processor_id();
++ }
++
++ /*
++ * Allocate a hardware breakpoint. If one is not available,
++ * disable the breakpoint, but leave it in the breakpoint
++ * table. When the breakpoint is re-enabled (via 'be'), we'll
++ * attempt to allocate a hardware register for it.
++ */
++ if (!bp->bp_template.bph_free) {
++ bp->bp_hard = kdba_allocbp(&bp->bp_template, &diag);
++ if (diag) {
++ bp->bp_enabled = 0;
++ return diag;
++ }
++ bp->bp_hardtype = 1;
++ }
++
++ kdb_printbp(bp, bpno);
++
++ return 0;
++}
++
++/*
++ * kdb_bc
++ *
++ * Handles the 'bc', 'be', and 'bd' commands
++ *
++ * [bd|bc|be] <breakpoint-number>
++ * [bd|bc|be] *
++ *
++ * Parameters:
++ * argc Count of arguments in argv
++ * argv Space delimited command line arguments
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic for failure
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++#define KDBCMD_BC 0
++#define KDBCMD_BE 1
++#define KDBCMD_BD 2
++
++static int
++kdb_bc(int argc, const char **argv)
++{
++ kdb_machreg_t addr;
++ kdb_bp_t *bp = NULL;
++ int lowbp = KDB_MAXBPT;
++ int highbp = 0;
++ int done = 0;
++ int i;
++ int diag;
++ int cmd; /* KDBCMD_B? */
++
++ if (strcmp(argv[0], "be") == 0) {
++ cmd = KDBCMD_BE;
++ } else if (strcmp(argv[0], "bd") == 0) {
++ cmd = KDBCMD_BD;
++ } else
++ cmd = KDBCMD_BC;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ if (strcmp(argv[1], "*") == 0) {
++ lowbp = 0;
++ highbp = KDB_MAXBPT;
++ } else {
++ diag = kdbgetularg(argv[1], &addr);
++ if (diag)
++ return diag;
++
++ /*
++ * For addresses less than the maximum breakpoint number,
++ * assume that the breakpoint number is desired.
++ */
++ if (addr < KDB_MAXBPT) {
++ bp = &kdb_breakpoints[addr];
++ lowbp = highbp = addr;
++ highbp++;
++ } else {
++ for(i=0, bp=kdb_breakpoints; i<KDB_MAXBPT; i++, bp++) {
++ if (bp->bp_addr == addr) {
++ lowbp = highbp = i;
++ highbp++;
++ break;
++ }
++ }
++ }
++ }
++
++ /*
++ * Now operate on the set of breakpoints matching the input
++ * criteria (either '*' for all, or an individual breakpoint).
++ */
++ for(bp=&kdb_breakpoints[lowbp], i=lowbp;
++ i < highbp;
++ i++, bp++) {
++ if (bp->bp_free)
++ continue;
++
++ done++;
++
++ switch (cmd) {
++ case KDBCMD_BC:
++ if (bp->bp_hardtype) {
++ kdba_freebp(bp->bp_hard);
++ bp->bp_hard = NULL;
++ bp->bp_hardtype = 0;
++ }
++
++ bp->bp_enabled = 0;
++ bp->bp_global = 0;
++
++ kdb_printf("Breakpoint %d at " kdb_bfd_vma_fmt " cleared\n",
++ i, bp->bp_addr);
++
++ bp->bp_addr = 0;
++ bp->bp_free = 1;
++
++ break;
++ case KDBCMD_BE:
++ /*
++ * Allocate a hardware breakpoint. If one is not
++ * available, don't enable the breakpoint.
++ */
++ if (!bp->bp_template.bph_free
++ && !bp->bp_hardtype) {
++ bp->bp_hard = kdba_allocbp(&bp->bp_template, &diag);
++ if (diag) {
++ bp->bp_enabled = 0;
++ return diag;
++ }
++ bp->bp_hardtype = 1;
++ }
++
++ bp->bp_enabled = 1;
++
++ kdb_printf("Breakpoint %d at " kdb_bfd_vma_fmt " enabled",
++ i, bp->bp_addr);
++
++ kdb_printf("\n");
++ break;
++ case KDBCMD_BD:
++ if (!bp->bp_enabled)
++ break;
++
++ /*
++ * Since this breakpoint is now disabled, we can
++ * give up the hardware register which is allocated
++ * to it.
++ */
++ if (bp->bp_hardtype) {
++ kdba_freebp(bp->bp_hard);
++ bp->bp_hard = NULL;
++ bp->bp_hardtype = 0;
++ }
++
++ bp->bp_enabled = 0;
++
++ kdb_printf("Breakpoint %d at " kdb_bfd_vma_fmt " disabled\n",
++ i, bp->bp_addr);
++
++ break;
++ }
++ if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) {
++ bp->bp_delay = 0;
++ KDB_STATE_CLEAR(SSBPT);
++ }
++ }
++
++ return (!done)?KDB_BPTNOTFOUND:0;
++}
++
++/*
++ * kdb_ss
++ *
++ * Process the 'ss' (Single Step) and 'ssb' (Single Step to Branch)
++ * commands.
++ *
++ * ss
++ * ssb
++ *
++ * Parameters:
++ * argc Argument count
++ * argv Argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * KDB_CMD_SS[B] for success, a kdb error if failure.
++ * Locking:
++ * None.
++ * Remarks:
++ *
++ * Set the arch specific option to trigger a debug trap after the next
++ * instruction.
++ *
++ * For 'ssb', set the trace flag in the debug trap handler
++ * after printing the current insn and return directly without
++ * invoking the kdb command processor, until a branch instruction
++ * is encountered.
++ */
++
++static int
++kdb_ss(int argc, const char **argv)
++{
++ int ssb = 0;
++ struct pt_regs *regs = get_irq_regs();
++
++ ssb = (strcmp(argv[0], "ssb") == 0);
++ if (argc != 0)
++ return KDB_ARGCOUNT;
++
++ if (!regs) {
++ kdb_printf("%s: pt_regs not available\n", __FUNCTION__);
++ return KDB_BADREG;
++ }
++
++ /*
++ * Set trace flag and go.
++ */
++ KDB_STATE_SET(DOING_SS);
++ if (ssb)
++ KDB_STATE_SET(DOING_SSB);
++
++ kdba_setsinglestep(regs); /* Enable single step */
++
++ if (ssb)
++ return KDB_CMD_SSB;
++ return KDB_CMD_SS;
++}
++
++/*
++ * kdb_initbptab
++ *
++ * Initialize the breakpoint table. Register breakpoint commands.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void __init
++kdb_initbptab(void)
++{
++ int i;
++ kdb_bp_t *bp;
++
++ /*
++ * First time initialization.
++ */
++ memset(&kdb_breakpoints, '\0', sizeof(kdb_breakpoints));
++
++ for (i=0, bp=kdb_breakpoints; i<KDB_MAXBPT; i++, bp++) {
++ bp->bp_free = 1;
++ /*
++ * The bph_free flag is architecturally required. It
++ * is set by architecture-dependent code to false (zero)
++ * in the event a hardware breakpoint register is required
++ * for this breakpoint.
++ *
++ * The rest of the template is reserved to the architecture
++ * dependent code and _must_ not be touched by the architecture
++ * independent code.
++ */
++ bp->bp_template.bph_free = 1;
++ }
++
++ kdb_register_repeat("bp", kdb_bp, "[<vaddr>]", "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("bl", kdb_bp, "[<vaddr>]", "Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("bpa", kdb_bp, "[<vaddr>]", "Set/Display global breakpoints", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("bph", kdb_bp, "[<vaddr>]", "Set hardware breakpoint", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("bpha", kdb_bp, "[<vaddr>]", "Set global hardware breakpoint", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("bc", kdb_bc, "<bpnum>", "Clear Breakpoint", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("be", kdb_bc, "<bpnum>", "Enable Breakpoint", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("bd", kdb_bc, "<bpnum>", "Disable Breakpoint", 0, KDB_REPEAT_NONE);
++
++ kdb_register_repeat("ss", kdb_ss, "", "Single Step", 1, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("ssb", kdb_ss, "", "Single step to branch/call", 0, KDB_REPEAT_NO_ARGS);
++ /*
++ * Architecture dependent initialization.
++ */
++ kdba_initbp();
++}
+diff -Nurp linux-2.6.22-590/kdb/kdb_bt.c linux-2.6.22-600/kdb/kdb_bt.c
+--- linux-2.6.22-590/kdb/kdb_bt.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/kdb_bt.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,180 @@
++/*
++ * Kernel Debugger Architecture Independent Stack Traceback
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/nmi.h>
++#include <asm/system.h>
++
++
++/*
++ * kdb_bt
++ *
++ * This function implements the 'bt' command. Print a stack
++ * traceback.
++ *
++ * bt [<address-expression>] (addr-exp is for alternate stacks)
++ * btp <pid> Kernel stack for <pid>
++ * btt <address-expression> Kernel stack for task structure at <address-expression>
++ * bta [DRSTCZEUIMA] All useful processes, optionally filtered by state
++ * btc [<cpu>] The current process on one cpu, default is all cpus
++ *
++ * bt <address-expression> refers to a address on the stack, that location
++ * is assumed to contain a return address.
++ *
++ * btt <address-expression> refers to the address of a struct task.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * Backtrack works best when the code uses frame pointers. But even
++ * without frame pointers we should get a reasonable trace.
++ *
++ * mds comes in handy when examining the stack to do a manual traceback or
++ * to get a starting point for bt <address-expression>.
++ */
++
++static int
++kdb_bt1(const struct task_struct *p, unsigned long mask, int argcount, int btaprompt)
++{
++ int diag;
++ char buffer[2];
++ if (kdb_getarea(buffer[0], (unsigned long)p) ||
++ kdb_getarea(buffer[0], (unsigned long)(p+1)-1))
++ return KDB_BADADDR;
++ if (!kdb_task_state(p, mask))
++ return 0;
++ kdb_printf("Stack traceback for pid %d\n", p->pid);
++ kdb_ps1(p);
++ diag = kdba_bt_process(p, argcount);
++ if (btaprompt) {
++ kdb_getstr(buffer, sizeof(buffer), "Enter <q> to end, <cr> to continue:");
++ if (buffer[0] == 'q') {
++ kdb_printf("\n");
++ return 1;
++ }
++ }
++ touch_nmi_watchdog();
++ return 0;
++}
++
++int
++kdb_bt(int argc, const char **argv)
++{
++ int diag;
++ int argcount = 5;
++ int btaprompt = 1;
++ int nextarg;
++ unsigned long addr;
++ long offset;
++
++ kdbgetintenv("BTARGS", &argcount); /* Arguments to print */
++ kdbgetintenv("BTAPROMPT", &btaprompt); /* Prompt after each proc in bta */
++
++ if (strcmp(argv[0], "bta") == 0) {
++ struct task_struct *g, *p;
++ unsigned long cpu;
++ unsigned long mask = kdb_task_state_string(argc ? argv[1] : NULL);
++ if (argc == 0)
++ kdb_ps_suppressed();
++ /* Run the active tasks first */
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (!cpu_online(cpu))
++ continue;
++ p = kdb_curr_task(cpu);
++ if (kdb_bt1(p, mask, argcount, btaprompt))
++ return 0;
++ }
++ /* Now the inactive tasks */
++ kdb_do_each_thread(g, p) {
++ if (task_curr(p))
++ continue;
++ if (kdb_bt1(p, mask, argcount, btaprompt))
++ return 0;
++ } kdb_while_each_thread(g, p);
++ } else if (strcmp(argv[0], "btp") == 0) {
++ struct task_struct *p;
++ unsigned long pid;
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++ if ((diag = kdbgetularg((char *)argv[1], &pid)))
++ return diag;
++ if ((p = find_task_by_pid(pid))) {
++ kdba_set_current_task(p);
++ return kdb_bt1(p, ~0UL, argcount, 0);
++ }
++ kdb_printf("No process with pid == %ld found\n", pid);
++ return 0;
++ } else if (strcmp(argv[0], "btt") == 0) {
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++ if ((diag = kdbgetularg((char *)argv[1], &addr)))
++ return diag;
++ kdba_set_current_task((struct task_struct *)addr);
++ return kdb_bt1((struct task_struct *)addr, ~0UL, argcount, 0);
++ } else if (strcmp(argv[0], "btc") == 0) {
++ unsigned long cpu = ~0;
++ struct kdb_running_process *krp;
++ const struct task_struct *save_current_task = kdb_current_task;
++ char buf[80];
++ if (argc > 1)
++ return KDB_ARGCOUNT;
++ if (argc == 1 && (diag = kdbgetularg((char *)argv[1], &cpu)))
++ return diag;
++ /* Recursive use of kdb_parse, do not use argv after this point */
++ argv = NULL;
++ if (cpu != ~0) {
++ krp = kdb_running_process + cpu;
++ if (cpu >= NR_CPUS || !krp->seqno || !cpu_online(cpu)) {
++ kdb_printf("no process for cpu %ld\n", cpu);
++ return 0;
++ }
++ sprintf(buf, "btt 0x%p\n", krp->p);
++ kdb_parse(buf);
++ return 0;
++ }
++ kdb_printf("btc: cpu status: ");
++ kdb_parse("cpu\n");
++ for (cpu = 0, krp = kdb_running_process; cpu < NR_CPUS; ++cpu, ++krp) {
++ if (!cpu_online(cpu) || !krp->seqno)
++ continue;
++ sprintf(buf, "btt 0x%p\n", krp->p);
++ kdb_parse(buf);
++ touch_nmi_watchdog();
++ }
++ kdba_set_current_task(save_current_task);
++ return 0;
++ } else {
++ if (argc) {
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
++ &offset, NULL);
++ if (diag)
++ return diag;
++ return kdba_bt_address(addr, argcount);
++ } else {
++ return kdb_bt1(kdb_current_task, ~0UL, argcount, 0);
++ }
++ }
++
++ /* NOTREACHED */
++ return 0;
++}
+diff -Nurp linux-2.6.22-590/kdb/kdb_cmds linux-2.6.22-600/kdb/kdb_cmds
+--- linux-2.6.22-590/kdb/kdb_cmds 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/kdb_cmds 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,32 @@
++# Initial commands for kdb, alter to suit your needs.
++# These commands are executed in kdb_init() context, no SMP, no
++# processes. Commands that require process data (including stack or
++# registers) are not reliable this early. set and bp commands should
++# be safe. Global breakpoint commands affect each cpu as it is booted.
++
++# Standard debugging information for first level support, just type archkdb
++# or archkdbcpu or archkdbshort at the kdb prompt.
++
++defcmd archkdb "" "First line arch debugging"
++ set BTSYMARG 1
++ set BTARGS 9
++ pid R
++ -archkdbcommon
++ -bta
++endefcmd
++
++defcmd archkdbcpu "" "archkdb with only tasks on cpus"
++ set BTSYMARG 1
++ set BTARGS 9
++ pid R
++ -archkdbcommon
++ -btc
++endefcmd
++
++defcmd archkdbshort "" "archkdb with less detailed backtrace"
++ set BTSYMARG 0
++ set BTARGS 0
++ pid R
++ -archkdbcommon
++ -bta
++endefcmd
+diff -Nurp linux-2.6.22-590/kdb/kdb_id.c linux-2.6.22-600/kdb/kdb_id.c
+--- linux-2.6.22-590/kdb/kdb_id.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/kdb_id.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,236 @@
++/*
++ * Kernel Debugger Architecture Independent Instruction Disassembly
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <stdarg.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++
++disassemble_info kdb_di;
++
++/*
++ * kdb_id
++ *
++ * Handle the id (instruction display) command.
++ *
++ * id [<addr>]
++ *
++ * Parameters:
++ * argc Count of arguments in argv
++ * argv Space delimited command line arguments
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic if failure.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++int
++kdb_id(int argc, const char **argv)
++{
++ kdb_machreg_t pc;
++ int icount;
++ int diag;
++ int i;
++ char *mode;
++ int nextarg;
++ long offset = 0;
++ static kdb_machreg_t lastpc;
++ struct disassemble_info *dip = &kdb_di;
++ char lastbuf[50];
++ unsigned long word;
++
++ kdb_di.fprintf_func = kdb_dis_fprintf;
++ kdba_id_init(&kdb_di);
++
++ if (argc != 1) {
++ if (lastpc == 0) {
++ return KDB_ARGCOUNT;
++ } else {
++ sprintf(lastbuf, "0x%lx", lastpc);
++ argv[1] = lastbuf;
++ argc = 1;
++ }
++ }
++
++
++ /*
++ * Fetch PC. First, check to see if it is a symbol, if not,
++ * try address.
++ */
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &pc, &offset, NULL);
++ if (diag)
++ return diag;
++ kdba_check_pc(&pc);
++ if (kdb_getarea(word, pc))
++ return(0);
++
++ /*
++ * Number of lines to display
++ */
++ diag = kdbgetintenv("IDCOUNT", &icount);
++ if (diag)
++ return diag;
++
++ mode = kdbgetenv("IDMODE");
++ diag = kdba_id_parsemode(mode, dip);
++ if (diag) {
++ return diag;
++ }
++
++ for(i=0; i<icount; i++) {
++ pc += kdba_id_printinsn(pc, &kdb_di);
++ kdb_printf("\n");
++ }
++
++ lastpc = pc;
++
++ return 0;
++}
++
++/*
++ * kdb_id1
++ *
++ * Disassemble a single instruction at 'pc'.
++ *
++ * Parameters:
++ * pc Address of instruction to disassemble
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic if failure.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void
++kdb_id1(unsigned long pc)
++{
++ char *mode;
++ int diag;
++
++ kdb_di.fprintf_func = kdb_dis_fprintf;
++ kdba_id_init(&kdb_di);
++
++ /*
++ * Allow the user to specify that this instruction
++ * should be treated differently.
++ */
++
++ mode = kdbgetenv("IDMODE");
++ diag = kdba_id_parsemode(mode, &kdb_di);
++ if (diag) {
++ kdb_printf("kdb_id: bad value in 'IDMODE' environment variable ignored\n");
++ }
++
++ (void) kdba_id_printinsn(pc, &kdb_di);
++ kdb_printf("\n");
++}
++
++/*
++ * kdb_dis_fprintf
++ *
++ * Format and print a string.
++ *
++ * Parameters:
++ * file Unused paramter.
++ * fmt Format string
++ * ... Optional additional parameters.
++ * Returns:
++ * 0
++ * Locking:
++ * Remarks:
++ * Result of format conversion cannot exceed 255 bytes.
++ */
++
++int
++kdb_dis_fprintf(PTR file, const char *fmt, ...)
++{
++ char buffer[256];
++ va_list ap;
++
++ va_start(ap, fmt);
++ vsprintf(buffer, fmt, ap);
++ va_end(ap);
++
++ kdb_printf("%s", buffer);
++
++ return 0;
++}
++
++/*
++ * kdb_dis_fprintf_dummy
++ *
++ * A dummy printf function for the disassembler, it does nothing.
++ * This lets code call the disassembler to step through
++ * instructions without actually printing anything.
++ * Inputs:
++ * Always ignored.
++ * Outputs:
++ * None.
++ * Returns:
++ * Always 0.
++ * Locking:
++ * none.
++ * Remarks:
++ * None.
++ */
++
++int
++kdb_dis_fprintf_dummy(PTR file, const char *fmt, ...)
++{
++ return(0);
++}
++
++/*
++ * kdb_disinit
++ *
++ * Initialize the disassembly information structure
++ * for the GNU disassembler.
++ *
++ * Parameters:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic if failure.
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++void __init
++kdb_id_init(void)
++{
++ kdb_di.stream = NULL;
++ kdb_di.application_data = NULL;
++ kdb_di.symbols = NULL;
++ kdb_di.num_symbols = 0;
++ kdb_di.flags = 0;
++ kdb_di.private_data = NULL;
++ kdb_di.buffer = NULL;
++ kdb_di.buffer_vma = 0;
++ kdb_di.buffer_length = 0;
++ kdb_di.bytes_per_line = 0;
++ kdb_di.bytes_per_chunk = 0;
++ kdb_di.insn_info_valid = 0;
++ kdb_di.branch_delay_insns = 0;
++ kdb_di.data_size = 0;
++ kdb_di.insn_type = 0;
++ kdb_di.target = 0;
++ kdb_di.target2 = 0;
++}
+diff -Nurp linux-2.6.22-590/kdb/kdb_io.c linux-2.6.22-600/kdb/kdb_io.c
+--- linux-2.6.22-590/kdb/kdb_io.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/kdb_io.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,672 @@
++/*
++ * Kernel Debugger Architecture Independent Console I/O handler
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/kdev_t.h>
++#include <linux/console.h>
++#include <linux/string.h>
++#include <linux/sched.h>
++#include <linux/smp.h>
++#include <linux/nmi.h>
++#include <linux/delay.h>
++
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/kallsyms.h>
++
++static struct console *kdbcons;
++
++#ifdef CONFIG_PPC64
++#include <asm/udbg.h>
++#endif
++
++#define CMD_BUFLEN 256
++char kdb_prompt_str[CMD_BUFLEN];
++
++/*
++ * kdb_read
++ *
++ * This function reads a string of characters, terminated by
++ * a newline, or by reaching the end of the supplied buffer,
++ * from the current kernel debugger console device.
++ * Parameters:
++ * buffer - Address of character buffer to receive input characters.
++ * bufsize - size, in bytes, of the character buffer
++ * Returns:
++ * Returns a pointer to the buffer containing the received
++ * character string. This string will be terminated by a
++ * newline character.
++ * Locking:
++ * No locks are required to be held upon entry to this
++ * function. It is not reentrant - it relies on the fact
++ * that while kdb is running on any one processor all other
++ * processors will be spinning at the kdb barrier.
++ * Remarks:
++ *
++ * Davidm asks, why doesn't kdb use the console abstraction;
++ * here are some reasons:
++ * - you cannot debug the console abstraction with kdb if
++ * kdb uses it.
++ * - you rely on the correct functioning of the abstraction
++ * in the presence of general system failures.
++ * - You must acquire the console spinlock thus restricting
++ * the usability - what if the kernel fails with the spinlock
++ * held - one still wishes to debug such situations.
++ * - How about debugging before the console(s) are registered?
++ * - None of the current consoles (sercons, vt_console_driver)
++ * have read functions defined.
++ * - The standard pc keyboard and terminal drivers are interrupt
++ * driven. We cannot enable interrupts while kdb is active,
++ * so the standard input functions cannot be used by kdb.
++ *
++ * An implementation could be improved by removing the need for
++ * lock acquisition - just keep a 'struct console *kdbconsole;' global
++ * variable which refers to the preferred kdb console.
++ *
++ * The bulk of this function is architecture dependent.
++ *
++ * The buffer size must be >= 2. A buffer size of 2 means that the caller only
++ * wants a single key.
++ *
++ * An escape key could be the start of a vt100 control sequence such as \e[D
++ * (left arrow) or it could be a character in its own right. The standard
++ * method for detecting the difference is to wait for 2 seconds to see if there
++ * are any other characters. kdb is complicated by the lack of a timer service
++ * (interrupts are off), by multiple input sources and by the need to sometimes
++ * return after just one key. Escape sequence processing has to be done as
++ * states in the polling loop.
++ */
++
++char *
++kdb_read(char *buffer, size_t bufsize)
++{
++ char *cp = buffer;
++ char *bufend = buffer+bufsize-2; /* Reserve space for newline and null byte */
++
++ char *lastchar;
++ char *p_tmp;
++ char tmp;
++ static char tmpbuffer[CMD_BUFLEN];
++ int len = strlen(buffer);
++ int len_tmp;
++ int tab=0;
++ int count;
++ int i;
++ int diag, dtab_count;
++
++#define ESCAPE_UDELAY 1000
++#define ESCAPE_DELAY 2*1000000/ESCAPE_UDELAY /* 2 seconds worth of udelays */
++ char escape_data[5]; /* longest vt100 escape sequence is 4 bytes */
++ char *ped = escape_data;
++ int escape_delay = 0;
++ get_char_func *f, *f_escape = NULL;
++
++ diag = kdbgetintenv("DTABCOUNT",&dtab_count);
++ if (diag)
++ dtab_count = 30;
++
++ if (len > 0 ) {
++ cp += len;
++ if (*(buffer+len-1) == '\n')
++ cp--;
++ }
++
++ lastchar = cp;
++ *cp = '\0';
++ kdb_printf("%s", buffer);
++
++ for (;;) {
++ int key;
++ for (f = &poll_funcs[0]; ; ++f) {
++ if (*f == NULL) {
++ /* Reset NMI watchdog once per poll loop */
++ touch_nmi_watchdog();
++ f = &poll_funcs[0];
++ }
++ if (escape_delay == 2) {
++ *ped = '\0';
++ ped = escape_data;
++ --escape_delay;
++ }
++ if (escape_delay == 1) {
++ key = *ped++;
++ if (!*ped)
++ --escape_delay;
++ break;
++ }
++ key = (*f)();
++ if (key == -1) {
++ if (escape_delay) {
++ udelay(ESCAPE_UDELAY);
++ --escape_delay;
++ }
++ continue;
++ }
++ if (bufsize <= 2) {
++ if (key == '\r')
++ key = '\n';
++ kdb_printf("%c", key);
++ *buffer++ = key;
++ *buffer = '\0';
++ return buffer;
++ }
++ if (escape_delay == 0 && key == '\e') {
++ escape_delay = ESCAPE_DELAY;
++ ped = escape_data;
++ f_escape = f;
++ }
++ if (escape_delay) {
++ *ped++ = key;
++ if (f_escape != f) {
++ escape_delay = 2;
++ continue;
++ }
++ if (ped - escape_data == 1) {
++ /* \e */
++ continue;
++ }
++ else if (ped - escape_data == 2) {
++ /* \e<something> */
++ if (key != '[')
++ escape_delay = 2;
++ continue;
++ } else if (ped - escape_data == 3) {
++ /* \e[<something> */
++ int mapkey = 0;
++ switch (key) {
++ case 'A': mapkey = 16; break; /* \e[A, up arrow */
++ case 'B': mapkey = 14; break; /* \e[B, down arrow */
++ case 'C': mapkey = 6; break; /* \e[C, right arrow */
++ case 'D': mapkey = 2; break; /* \e[D, left arrow */
++ case '1': /* dropthrough */
++ case '3': /* dropthrough */
++ case '4': mapkey = -1; break; /* \e[<1,3,4>], may be home, del, end */
++ }
++ if (mapkey != -1) {
++ if (mapkey > 0) {
++ escape_data[0] = mapkey;
++ escape_data[1] = '\0';
++ }
++ escape_delay = 2;
++ }
++ continue;
++ } else if (ped - escape_data == 4) {
++ /* \e[<1,3,4><something> */
++ int mapkey = 0;
++ if (key == '~') {
++ switch (escape_data[2]) {
++ case '1': mapkey = 1; break; /* \e[1~, home */
++ case '3': mapkey = 4; break; /* \e[3~, del */
++ case '4': mapkey = 5; break; /* \e[4~, end */
++ }
++ }
++ if (mapkey > 0) {
++ escape_data[0] = mapkey;
++ escape_data[1] = '\0';
++ }
++ escape_delay = 2;
++ continue;
++ }
++ }
++ break; /* A key to process */
++ }
++
++ if (key != 9)
++ tab = 0;
++ switch (key) {
++ case 8: /* backspace */
++ if (cp > buffer) {
++ if (cp < lastchar) {
++ memcpy(tmpbuffer, cp, lastchar - cp);
++ memcpy(cp-1, tmpbuffer, lastchar - cp);
++ }
++ *(--lastchar) = '\0';
++ --cp;
++ kdb_printf("\b%s \r", cp);
++ tmp = *cp;
++ *cp = '\0';
++ kdb_printf(kdb_prompt_str);
++ kdb_printf("%s", buffer);
++ *cp = tmp;
++ }
++ break;
++ case 13: /* enter */
++ *lastchar++ = '\n';
++ *lastchar++ = '\0';
++ kdb_printf("\n");
++ return buffer;
++ case 4: /* Del */
++ if(cp < lastchar) {
++ memcpy(tmpbuffer, cp+1, lastchar - cp -1);
++ memcpy(cp, tmpbuffer, lastchar - cp -1);
++ *(--lastchar) = '\0';
++ kdb_printf("%s \r", cp);
++ tmp = *cp;
++ *cp = '\0';
++ kdb_printf(kdb_prompt_str);
++ kdb_printf("%s", buffer);
++ *cp = tmp;
++ }
++ break;
++ case 1: /* Home */
++ if(cp > buffer) {
++ kdb_printf("\r");
++ kdb_printf(kdb_prompt_str);
++ cp = buffer;
++ }
++ break;
++ case 5: /* End */
++ if(cp < lastchar) {
++ kdb_printf("%s", cp);
++ cp = lastchar;
++ }
++ break;
++ case 2: /* Left */
++ if (cp > buffer) {
++ kdb_printf("\b");
++ --cp;
++ }
++ break;
++ case 14: /* Down */
++ memset(tmpbuffer, ' ', strlen(kdb_prompt_str)+(lastchar-buffer));
++ *(tmpbuffer+strlen(kdb_prompt_str)+(lastchar-buffer)) = '\0';
++ kdb_printf("\r%s\r", tmpbuffer);
++ *lastchar = (char)key;
++ *(lastchar+1) = '\0';
++ return lastchar;
++ case 6: /* Right */
++ if (cp < lastchar) {
++ kdb_printf("%c", *cp);
++ ++cp;
++ }
++ break;
++ case 16: /* Up */
++ memset(tmpbuffer, ' ', strlen(kdb_prompt_str)+(lastchar-buffer));
++ *(tmpbuffer+strlen(kdb_prompt_str)+(lastchar-buffer)) = '\0';
++ kdb_printf("\r%s\r", tmpbuffer);
++ *lastchar = (char)key;
++ *(lastchar+1) = '\0';
++ return lastchar;
++ case 9: /* Tab */
++ if (tab < 2)
++ ++tab;
++ p_tmp = buffer;
++ while(*p_tmp==' ') p_tmp++;
++ if (p_tmp<=cp) {
++ memcpy(tmpbuffer, p_tmp, cp-p_tmp);
++ *(tmpbuffer + (cp-p_tmp)) = '\0';
++ p_tmp = strrchr(tmpbuffer, ' ');
++ if (p_tmp)
++ ++p_tmp;
++ else
++ p_tmp = tmpbuffer;
++ len = strlen(p_tmp);
++ count = kallsyms_symbol_complete(p_tmp, sizeof(tmpbuffer) - (p_tmp - tmpbuffer));
++ if (tab == 2) {
++ if (count > 0) {
++ kdb_printf("\n%d symbols are found.", count);
++ if(count>dtab_count) {
++ count=dtab_count;
++ kdb_printf(" But only first %d symbols will be printed.\nYou can change the environment variable DTABCOUNT.", count);
++ }
++ kdb_printf("\n");
++ for(i=0;i<count;i++) {
++ if(kallsyms_symbol_next(p_tmp, i)<0)
++ break;
++ kdb_printf("%s ",p_tmp);
++ *(p_tmp+len)='\0';
++ }
++ if(i>=dtab_count)kdb_printf("...");
++ kdb_printf("\n");
++ kdb_printf(kdb_prompt_str);
++ kdb_printf("%s", buffer);
++ }
++ }
++ else {
++ if (count > 0) {
++ len_tmp = strlen(p_tmp);
++ strncpy(p_tmp+len_tmp,cp, lastchar-cp+1);
++ len_tmp = strlen(p_tmp);
++ strncpy(cp, p_tmp+len, len_tmp-len+1);
++ len = len_tmp - len;
++ kdb_printf("%s", cp);
++ cp+=len;
++ lastchar+=len;
++ }
++ }
++ kdb_nextline = 1; /* reset output line number */
++ }
++ break;
++ default:
++ if (key >= 32 &&lastchar < bufend) {
++ if (cp < lastchar) {
++ memcpy(tmpbuffer, cp, lastchar - cp);
++ memcpy(cp+1, tmpbuffer, lastchar - cp);
++ *++lastchar = '\0';
++ *cp = key;
++ kdb_printf("%s\r", cp);
++ ++cp;
++ tmp = *cp;
++ *cp = '\0';
++ kdb_printf(kdb_prompt_str);
++ kdb_printf("%s", buffer);
++ *cp = tmp;
++ } else {
++ *++lastchar = '\0';
++ *cp++ = key;
++ kdb_printf("%c", key);
++ }
++ }
++ break;
++ }
++ }
++}
++
++/*
++ * kdb_getstr
++ *
++ * Print the prompt string and read a command from the
++ * input device.
++ *
++ * Parameters:
++ * buffer Address of buffer to receive command
++ * bufsize Size of buffer in bytes
++ * prompt Pointer to string to use as prompt string
++ * Returns:
++ * Pointer to command buffer.
++ * Locking:
++ * None.
++ * Remarks:
++ * For SMP kernels, the processor number will be
++ * substituted for %d, %x or %o in the prompt.
++ */
++
++char *
++kdb_getstr(char *buffer, size_t bufsize, char *prompt)
++{
++ if(prompt && kdb_prompt_str!=prompt)
++ strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
++ kdb_printf(kdb_prompt_str);
++ kdb_nextline = 1; /* Prompt and input resets line number */
++ return kdb_read(buffer, bufsize);
++}
++
++/*
++ * kdb_input_flush
++ *
++ * Get rid of any buffered console input.
++ *
++ * Parameters:
++ * none
++ * Returns:
++ * nothing
++ * Locking:
++ * none
++ * Remarks:
++ * Call this function whenever you want to flush input. If there is any
++ * outstanding input, it ignores all characters until there has been no
++ * data for approximately half a second.
++ */
++
++#define FLUSH_UDELAY 100
++#define FLUSH_DELAY 500000/FLUSH_UDELAY /* 0.5 seconds worth of udelays */
++
++static void
++kdb_input_flush(void)
++{
++ get_char_func *f;
++ int flush_delay = 1;
++ while (flush_delay--) {
++ touch_nmi_watchdog();
++ for (f = &poll_funcs[0]; *f; ++f) {
++ if ((*f)() != -1) {
++ flush_delay = FLUSH_DELAY;
++ break;
++ }
++ }
++ if (flush_delay)
++ udelay(FLUSH_UDELAY);
++ }
++}
++
++/*
++ * kdb_printf
++ *
++ * Print a string to the output device(s).
++ *
++ * Parameters:
++ * printf-like format and optional args.
++ * Returns:
++ * 0
++ * Locking:
++ * None.
++ * Remarks:
++ * use 'kdbcons->write()' to avoid polluting 'log_buf' with
++ * kdb output.
++ */
++
++static char kdb_buffer[256]; /* A bit too big to go on stack */
++
++void
++kdb_printf(const char *fmt, ...)
++{
++ va_list ap;
++ int diag;
++ int linecount;
++ int logging, saved_loglevel = 0;
++ int do_longjmp = 0;
++ int got_printf_lock = 0;
++ struct console *c = console_drivers;
++ static DEFINE_SPINLOCK(kdb_printf_lock);
++ unsigned long uninitialized_var(flags);
++
++ preempt_disable();
++ /* Serialize kdb_printf if multiple cpus try to write at once.
++ * But if any cpu goes recursive in kdb, just print the output,
++ * even if it is interleaved with any other text.
++ */
++ if (!KDB_STATE(PRINTF_LOCK)) {
++ KDB_STATE_SET(PRINTF_LOCK);
++ spin_lock_irqsave(&kdb_printf_lock, flags);
++ got_printf_lock = 1;
++ atomic_inc(&kdb_event);
++ } else {
++ __acquire(kdb_printf_lock);
++ }
++
++ diag = kdbgetintenv("LINES", &linecount);
++ if (diag || linecount <= 1)
++ linecount = 22;
++
++ diag = kdbgetintenv("LOGGING", &logging);
++ if (diag)
++ logging = 0;
++
++ va_start(ap, fmt);
++ vsnprintf(kdb_buffer, sizeof(kdb_buffer), fmt, ap);
++ va_end(ap);
++
++ /*
++ * Write to all consoles.
++ */
++#ifdef CONFIG_SPARC64
++ if (c == NULL)
++ prom_printf("%s", kdb_buffer);
++ else
++#endif
++
++#ifdef CONFIG_PPC64
++ if (udbg_write)
++ udbg_write(kdb_buffer, strlen(kdb_buffer));
++ else
++#endif
++
++ while (c) {
++ c->write(c, kdb_buffer, strlen(kdb_buffer));
++ c = c->next;
++ }
++ if (logging) {
++ saved_loglevel = console_loglevel;
++ console_loglevel = 0;
++ printk("%s", kdb_buffer);
++ }
++
++ if (KDB_STATE(LONGJMP) && strchr(kdb_buffer, '\n'))
++ kdb_nextline++;
++
++ if (kdb_nextline == linecount) {
++ char buf1[16]="";
++#if defined(CONFIG_SMP)
++ char buf2[32];
++#endif
++ char *moreprompt;
++
++ /* Watch out for recursion here. Any routine that calls
++ * kdb_printf will come back through here. And kdb_read
++ * uses kdb_printf to echo on serial consoles ...
++ */
++ kdb_nextline = 1; /* In case of recursion */
++
++ /*
++ * Pause until cr.
++ */
++ moreprompt = kdbgetenv("MOREPROMPT");
++ if (moreprompt == NULL) {
++ moreprompt = "more> ";
++ }
++
++#if defined(CONFIG_SMP)
++ if (strchr(moreprompt, '%')) {
++ sprintf(buf2, moreprompt, get_cpu());
++ put_cpu();
++ moreprompt = buf2;
++ }
++#endif
++
++ kdb_input_flush();
++ c = console_drivers;
++#ifdef CONFIG_SPARC64
++ if (c == NULL)
++ prom_printf("%s", moreprompt);
++ else
++#endif
++
++#ifdef CONFIG_PPC64
++ if (udbg_write)
++ udbg_write(moreprompt, strlen(moreprompt));
++ else
++#endif
++
++ while (c) {
++ c->write(c, moreprompt, strlen(moreprompt));
++ c = c->next;
++ }
++
++ if (logging)
++ printk("%s", moreprompt);
++
++ kdb_read(buf1, 2); /* '2' indicates to return immediately after getting one key. */
++ kdb_nextline = 1; /* Really set output line 1 */
++
++ if ((buf1[0] == 'q') || (buf1[0] == 'Q')) {
++ do_longjmp = 1;
++ KDB_FLAG_SET(CMD_INTERRUPT); /* command was interrupted */
++ kdb_printf("\n");
++ }
++ else if (buf1[0] && buf1[0] != '\n') {
++ kdb_printf("\nOnly 'q' or 'Q' are processed at more prompt, input ignored\n");
++ }
++ kdb_input_flush();
++ }
++
++ if (logging) {
++ console_loglevel = saved_loglevel;
++ }
++ if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) {
++ got_printf_lock = 0;
++ spin_unlock_irqrestore(&kdb_printf_lock, flags);
++ KDB_STATE_CLEAR(PRINTF_LOCK);
++ atomic_dec(&kdb_event);
++ } else {
++ __release(kdb_printf_lock);
++ }
++ preempt_enable();
++ if (do_longjmp)
++#ifdef kdba_setjmp
++ kdba_longjmp(&kdbjmpbuf[smp_processor_id()], 1)
++#endif /* kdba_setjmp */
++ ;
++}
++
++/*
++ * kdb_io_init
++ *
++ * Initialize kernel debugger output environment.
++ *
++ * Parameters:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ * Select a console device. Only use a VT console if the user specified
++ * or defaulted console= /^tty[0-9]*$/
++ *
++ * FIXME: 2.6.22-rc1 initializes the serial console long after kdb starts,
++ * so booting with 'console=tty console=ttyS0' does not create the console
++ * entry for ttyS0 in time. For now simply assume that we have a working
++ * console, until a better solution can be found.
++ */
++
++void __init
++kdb_io_init(void)
++{
++ /*
++ * Select a console.
++ */
++ struct console *c = console_drivers;
++ int vt_console = 0;
++
++ while (c) {
++#if 0 /* FIXME: we don't register serial consoles in time */
++ if ((c->flags & CON_CONSDEV) && !kdbcons)
++ kdbcons = c;
++#else
++ if (!kdbcons)
++ kdbcons = c;
++#endif
++ if ((c->flags & CON_ENABLED) &&
++ strncmp(c->name, "tty", 3) == 0) {
++ char *p = c->name + 3;
++ while (isdigit(*p))
++ ++p;
++ if (*p == '\0')
++ vt_console = 1;
++ }
++ c = c->next;
++ }
++
++ if (kdbcons == NULL) {
++ printk(KERN_ERR "kdb: Initialization failed - no console. kdb is disabled.\n");
++ KDB_FLAG_SET(NO_CONSOLE);
++ kdb_on = 0;
++ }
++ if (!vt_console)
++ KDB_FLAG_SET(NO_VT_CONSOLE);
++ kdb_input_flush();
++ return;
++}
++
++EXPORT_SYMBOL(kdb_read);
+diff -Nurp linux-2.6.22-590/kdb/kdbmain.c linux-2.6.22-600/kdb/kdbmain.c
+--- linux-2.6.22-590/kdb/kdbmain.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/kdbmain.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,4034 @@
++/*
++ * Kernel Debugger Architecture Independent Main Code
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
++ * Xscale (R) modifications copyright (C) 2003 Intel Corporation.
++ */
++
++/*
++ * Updated for Xscale (R) architecture support
++ * Eddie Dong <eddie.dong@intel.com> 8 Jan 03
++ */
++
++#include <linux/ctype.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/reboot.h>
++#include <linux/sched.h>
++#include <linux/sysrq.h>
++#include <linux/smp.h>
++#include <linux/utsname.h>
++#include <linux/vmalloc.h>
++#include <linux/module.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/kallsyms.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/notifier.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/nmi.h>
++#include <linux/ptrace.h>
++#include <linux/sysctl.h>
++#if defined(CONFIG_LKCD_DUMP) || defined(CONFIG_LKCD_DUMP_MODULE)
++#include <linux/dump.h>
++#endif
++#include <linux/cpu.h>
++#include <linux/kdebug.h>
++
++#include <acpi/acpi_bus.h>
++
++#include <asm/system.h>
++#include <asm/kdebug.h>
++
++/*
++ * Kernel debugger state flags
++ */
++volatile int kdb_flags;
++atomic_t kdb_event;
++atomic_t kdb_8250;
++
++/*
++ * kdb_lock protects updates to kdb_initial_cpu. Used to
++ * single thread processors through the kernel debugger.
++ */
++static DEFINE_SPINLOCK(kdb_lock);
++volatile int kdb_initial_cpu = -1; /* cpu number that owns kdb */
++int kdb_seqno = 2; /* how many times kdb has been entered */
++
++volatile int kdb_nextline = 1;
++static volatile int kdb_new_cpu; /* Which cpu to switch to */
++
++volatile int kdb_state[NR_CPUS]; /* Per cpu state */
++
++const struct task_struct *kdb_current_task;
++EXPORT_SYMBOL(kdb_current_task);
++struct pt_regs *kdb_current_regs;
++
++#ifdef CONFIG_KDB_OFF
++int kdb_on = 0; /* Default is off */
++#else
++int kdb_on = 1; /* Default is on */
++#endif /* CONFIG_KDB_OFF */
++
++const char *kdb_diemsg;
++static int kdb_go_count;
++#ifdef CONFIG_KDB_CONTINUE_CATASTROPHIC
++static unsigned int kdb_continue_catastrophic = CONFIG_KDB_CONTINUE_CATASTROPHIC;
++#else
++static unsigned int kdb_continue_catastrophic = 0;
++#endif
++
++#ifdef kdba_setjmp
++ /*
++ * Must have a setjmp buffer per CPU. Switching cpus will
++ * cause the jump buffer to be setup for the new cpu, and
++ * subsequent switches (and pager aborts) will use the
++ * appropriate per-processor values.
++ */
++kdb_jmp_buf *kdbjmpbuf;
++#endif /* kdba_setjmp */
++
++ /*
++ * kdb_commands describes the available commands.
++ */
++static kdbtab_t *kdb_commands;
++static int kdb_max_commands;
++
++typedef struct _kdbmsg {
++ int km_diag; /* kdb diagnostic */
++ char *km_msg; /* Corresponding message text */
++} kdbmsg_t;
++
++#define KDBMSG(msgnum, text) \
++ { KDB_##msgnum, text }
++
++static kdbmsg_t kdbmsgs[] = {
++ KDBMSG(NOTFOUND,"Command Not Found"),
++ KDBMSG(ARGCOUNT, "Improper argument count, see usage."),
++ KDBMSG(BADWIDTH, "Illegal value for BYTESPERWORD use 1, 2, 4 or 8, 8 is only allowed on 64 bit systems"),
++ KDBMSG(BADRADIX, "Illegal value for RADIX use 8, 10 or 16"),
++ KDBMSG(NOTENV, "Cannot find environment variable"),
++ KDBMSG(NOENVVALUE, "Environment variable should have value"),
++ KDBMSG(NOTIMP, "Command not implemented"),
++ KDBMSG(ENVFULL, "Environment full"),
++ KDBMSG(ENVBUFFULL, "Environment buffer full"),
++ KDBMSG(TOOMANYBPT, "Too many breakpoints defined"),
++#ifdef CONFIG_CPU_XSCALE
++ KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"),
++#else
++ KDBMSG(TOOMANYDBREGS, "More breakpoints than db registers defined"),
++#endif
++ KDBMSG(DUPBPT, "Duplicate breakpoint address"),
++ KDBMSG(BPTNOTFOUND, "Breakpoint not found"),
++ KDBMSG(BADMODE, "Invalid IDMODE"),
++ KDBMSG(BADINT, "Illegal numeric value"),
++ KDBMSG(INVADDRFMT, "Invalid symbolic address format"),
++ KDBMSG(BADREG, "Invalid register name"),
++ KDBMSG(BADCPUNUM, "Invalid cpu number"),
++ KDBMSG(BADLENGTH, "Invalid length field"),
++ KDBMSG(NOBP, "No Breakpoint exists"),
++ KDBMSG(BADADDR, "Invalid address"),
++};
++#undef KDBMSG
++
++static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
++
++
++/*
++ * Initial environment. This is all kept static and local to
++ * this file. We don't want to rely on the memory allocation
++ * mechanisms in the kernel, so we use a very limited allocate-only
++ * heap for new and altered environment variables. The entire
++ * environment is limited to a fixed number of entries (add more
++ * to __env[] if required) and a fixed amount of heap (add more to
++ * KDB_ENVBUFSIZE if required).
++ */
++
++static char *__env[] = {
++#if defined(CONFIG_SMP)
++ "PROMPT=[%d]kdb> ",
++ "MOREPROMPT=[%d]more> ",
++#else
++ "PROMPT=kdb> ",
++ "MOREPROMPT=more> ",
++#endif
++ "RADIX=16",
++ "LINES=24",
++ "COLUMNS=80",
++ "MDCOUNT=8", /* lines of md output */
++ "BTARGS=9", /* 9 possible args in bt */
++ KDB_PLATFORM_ENV,
++ "DTABCOUNT=30",
++ "NOSECT=1",
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++ (char *)0,
++};
++
++static const int __nenv = (sizeof(__env) / sizeof(char *));
++
++/*
++ * kdb_serial_str is the sequence that the user must enter on a serial
++ * console to invoke kdb. It can be a single character such as "\001"
++ * (control-A) or multiple characters such as "\eKDB". NOTE: All except the
++ * last character are passed through to the application reading from the serial
++ * console.
++ *
++ * I tried to make the sequence a CONFIG_ option but most of CML1 cannot cope
++ * with '\' in strings. CML2 would have been able to do it but we lost CML2.
++ * KAO.
++ */
++const char kdb_serial_str[] = "\eKDB";
++EXPORT_SYMBOL(kdb_serial_str);
++
++struct task_struct *
++kdb_curr_task(int cpu)
++{
++ struct task_struct *p = curr_task(cpu);
++#ifdef _TIF_MCA_INIT
++ struct kdb_running_process *krp = kdb_running_process + cpu;
++ if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && krp->p)
++ p = krp->p;
++#endif
++ return p;
++}
++
++/*
++ * kdbgetenv
++ *
++ * This function will return the character string value of
++ * an environment variable.
++ *
++ * Parameters:
++ * match A character string representing an environment variable.
++ * Outputs:
++ * None.
++ * Returns:
++ * NULL No environment variable matches 'match'
++ * char* Pointer to string value of environment variable.
++ * Locking:
++ * No locking considerations required.
++ * Remarks:
++ */
++char *
++kdbgetenv(const char *match)
++{
++ char **ep = __env;
++ int matchlen = strlen(match);
++ int i;
++
++ for(i=0; i<__nenv; i++) {
++ char *e = *ep++;
++
++ if (!e) continue;
++
++ if ((strncmp(match, e, matchlen) == 0)
++ && ((e[matchlen] == '\0')
++ ||(e[matchlen] == '='))) {
++ char *cp = strchr(e, '=');
++ return (cp ? ++cp :"");
++ }
++ }
++ return NULL;
++}
++
++/*
++ * kdballocenv
++ *
++ * This function is used to allocate bytes for environment entries.
++ *
++ * Parameters:
++ * match A character string representing a numeric value
++ * Outputs:
++ * *value the unsigned long represntation of the env variable 'match'
++ * Returns:
++ * Zero on success, a kdb diagnostic on failure.
++ * Locking:
++ * No locking considerations required. Must be called with all
++ * processors halted.
++ * Remarks:
++ * We use a static environment buffer (envbuffer) to hold the values
++ * of dynamically generated environment variables (see kdb_set). Buffer
++ * space once allocated is never free'd, so over time, the amount of space
++ * (currently 512 bytes) will be exhausted if env variables are changed
++ * frequently.
++ */
++static char *
++kdballocenv(size_t bytes)
++{
++#define KDB_ENVBUFSIZE 512
++ static char envbuffer[KDB_ENVBUFSIZE];
++ static int envbufsize;
++ char *ep = NULL;
++
++ if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) {
++ ep = &envbuffer[envbufsize];
++ envbufsize += bytes;
++ }
++ return ep;
++}
++
++/*
++ * kdbgetulenv
++ *
++ * This function will return the value of an unsigned long-valued
++ * environment variable.
++ *
++ * Parameters:
++ * match A character string representing a numeric value
++ * Outputs:
++ * *value the unsigned long represntation of the env variable 'match'
++ * Returns:
++ * Zero on success, a kdb diagnostic on failure.
++ * Locking:
++ * No locking considerations required.
++ * Remarks:
++ */
++
++static int
++kdbgetulenv(const char *match, unsigned long *value)
++{
++ char *ep;
++
++ ep = kdbgetenv(match);
++ if (!ep) return KDB_NOTENV;
++ if (strlen(ep) == 0) return KDB_NOENVVALUE;
++
++ *value = simple_strtoul(ep, NULL, 0);
++
++ return 0;
++}
++
++/*
++ * kdbgetintenv
++ *
++ * This function will return the value of an integer-valued
++ * environment variable.
++ *
++ * Parameters:
++ * match A character string representing an integer-valued env variable
++ * Outputs:
++ * *value the integer representation of the environment variable 'match'
++ * Returns:
++ * Zero on success, a kdb diagnostic on failure.
++ * Locking:
++ * No locking considerations required.
++ * Remarks:
++ */
++
++int
++kdbgetintenv(const char *match, int *value) {
++ unsigned long val;
++ int diag;
++
++ diag = kdbgetulenv(match, &val);
++ if (!diag) {
++ *value = (int) val;
++ }
++ return diag;
++}
++
++/*
++ * kdbgetularg
++ *
++ * This function will convert a numeric string
++ * into an unsigned long value.
++ *
++ * Parameters:
++ * arg A character string representing a numeric value
++ * Outputs:
++ * *value the unsigned long represntation of arg.
++ * Returns:
++ * Zero on success, a kdb diagnostic on failure.
++ * Locking:
++ * No locking considerations required.
++ * Remarks:
++ */
++
++int
++kdbgetularg(const char *arg, unsigned long *value)
++{
++ char *endp;
++ unsigned long val;
++
++ val = simple_strtoul(arg, &endp, 0);
++
++ if (endp == arg) {
++ /*
++ * Try base 16, for us folks too lazy to type the
++ * leading 0x...
++ */
++ val = simple_strtoul(arg, &endp, 16);
++ if (endp == arg)
++ return KDB_BADINT;
++ }
++
++ *value = val;
++
++ return 0;
++}
++
++/*
++ * kdb_set
++ *
++ * This function implements the 'set' command. Alter an existing
++ * environment variable or create a new one.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_set(int argc, const char **argv)
++{
++ int i;
++ char *ep;
++ size_t varlen, vallen;
++
++ /*
++ * we can be invoked two ways:
++ * set var=value argv[1]="var", argv[2]="value"
++ * set var = value argv[1]="var", argv[2]="=", argv[3]="value"
++ * - if the latter, shift 'em down.
++ */
++ if (argc == 3) {
++ argv[2] = argv[3];
++ argc--;
++ }
++
++ if (argc != 2)
++ return KDB_ARGCOUNT;
++
++ /*
++ * Check for internal variables
++ */
++ if (strcmp(argv[1], "KDBDEBUG") == 0) {
++ unsigned int debugflags;
++ char *cp;
++
++ debugflags = simple_strtoul(argv[2], &cp, 0);
++ if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) {
++ kdb_printf("kdb: illegal debug flags '%s'\n",
++ argv[2]);
++ return 0;
++ }
++ kdb_flags = (kdb_flags & ~(KDB_DEBUG_FLAG_MASK << KDB_DEBUG_FLAG_SHIFT))
++ | (debugflags << KDB_DEBUG_FLAG_SHIFT);
++
++ return 0;
++ }
++
++ /*
++ * Tokenizer squashed the '=' sign. argv[1] is variable
++ * name, argv[2] = value.
++ */
++ varlen = strlen(argv[1]);
++ vallen = strlen(argv[2]);
++ ep = kdballocenv(varlen + vallen + 2);
++ if (ep == (char *)0)
++ return KDB_ENVBUFFULL;
++
++ sprintf(ep, "%s=%s", argv[1], argv[2]);
++
++ ep[varlen+vallen+1]='\0';
++
++ for(i=0; i<__nenv; i++) {
++ if (__env[i]
++ && ((strncmp(__env[i], argv[1], varlen)==0)
++ && ((__env[i][varlen] == '\0')
++ || (__env[i][varlen] == '=')))) {
++ __env[i] = ep;
++ return 0;
++ }
++ }
++
++ /*
++ * Wasn't existing variable. Fit into slot.
++ */
++ for(i=0; i<__nenv-1; i++) {
++ if (__env[i] == (char *)0) {
++ __env[i] = ep;
++ return 0;
++ }
++ }
++
++ return KDB_ENVFULL;
++}
++
++static int
++kdb_check_regs(void)
++{
++ if (!kdb_current_regs) {
++ kdb_printf("No current kdb registers."
++ " You may need to select another task\n");
++ return KDB_BADREG;
++ }
++ return 0;
++}
++
++/*
++ * kdbgetaddrarg
++ *
++ * This function is responsible for parsing an
++ * address-expression and returning the value of
++ * the expression, symbol name, and offset to the caller.
++ *
++ * The argument may consist of a numeric value (decimal or
++ * hexidecimal), a symbol name, a register name (preceeded
++ * by the percent sign), an environment variable with a numeric
++ * value (preceeded by a dollar sign) or a simple arithmetic
++ * expression consisting of a symbol name, +/-, and a numeric
++ * constant value (offset).
++ *
++ * Parameters:
++ * argc - count of arguments in argv
++ * argv - argument vector
++ * *nextarg - index to next unparsed argument in argv[]
++ * regs - Register state at time of KDB entry
++ * Outputs:
++ * *value - receives the value of the address-expression
++ * *offset - receives the offset specified, if any
++ * *name - receives the symbol name, if any
++ * *nextarg - index to next unparsed argument in argv[]
++ *
++ * Returns:
++ * zero is returned on success, a kdb diagnostic code is
++ * returned on error.
++ *
++ * Locking:
++ * No locking requirements.
++ *
++ * Remarks:
++ *
++ */
++
++int
++kdbgetaddrarg(int argc, const char **argv, int *nextarg,
++ kdb_machreg_t *value, long *offset,
++ char **name)
++{
++ kdb_machreg_t addr;
++ unsigned long off = 0;
++ int positive;
++ int diag;
++ int found = 0;
++ char *symname;
++ char symbol = '\0';
++ char *cp;
++ kdb_symtab_t symtab;
++
++ /*
++ * Process arguments which follow the following syntax:
++ *
++ * symbol | numeric-address [+/- numeric-offset]
++ * %register
++ * $environment-variable
++ */
++
++ if (*nextarg > argc) {
++ return KDB_ARGCOUNT;
++ }
++
++ symname = (char *)argv[*nextarg];
++
++ /*
++ * If there is no whitespace between the symbol
++ * or address and the '+' or '-' symbols, we
++ * remember the character and replace it with a
++ * null so the symbol/value can be properly parsed
++ */
++ if ((cp = strpbrk(symname, "+-")) != NULL) {
++ symbol = *cp;
++ *cp++ = '\0';
++ }
++
++ if (symname[0] == '$') {
++ diag = kdbgetulenv(&symname[1], &addr);
++ if (diag)
++ return diag;
++ } else if (symname[0] == '%') {
++ if ((diag = kdb_check_regs()))
++ return diag;
++ diag = kdba_getregcontents(&symname[1], kdb_current_regs, &addr);
++ if (diag)
++ return diag;
++ } else {
++ found = kdbgetsymval(symname, &symtab);
++ if (found) {
++ addr = symtab.sym_start;
++ } else {
++ diag = kdbgetularg(argv[*nextarg], &addr);
++ if (diag)
++ return diag;
++ }
++ }
++
++ if (!found)
++ found = kdbnearsym(addr, &symtab);
++
++ (*nextarg)++;
++
++ if (name)
++ *name = symname;
++ if (value)
++ *value = addr;
++ if (offset && name && *name)
++ *offset = addr - symtab.sym_start;
++
++ if ((*nextarg > argc)
++ && (symbol == '\0'))
++ return 0;
++
++ /*
++ * check for +/- and offset
++ */
++
++ if (symbol == '\0') {
++ if ((argv[*nextarg][0] != '+')
++ && (argv[*nextarg][0] != '-')) {
++ /*
++ * Not our argument. Return.
++ */
++ return 0;
++ } else {
++ positive = (argv[*nextarg][0] == '+');
++ (*nextarg)++;
++ }
++ } else
++ positive = (symbol == '+');
++
++ /*
++ * Now there must be an offset!
++ */
++ if ((*nextarg > argc)
++ && (symbol == '\0')) {
++ return KDB_INVADDRFMT;
++ }
++
++ if (!symbol) {
++ cp = (char *)argv[*nextarg];
++ (*nextarg)++;
++ }
++
++ diag = kdbgetularg(cp, &off);
++ if (diag)
++ return diag;
++
++ if (!positive)
++ off = -off;
++
++ if (offset)
++ *offset += off;
++
++ if (value)
++ *value += off;
++
++ return 0;
++}
++
++static void
++kdb_cmderror(int diag)
++{
++ int i;
++
++ if (diag >= 0) {
++ kdb_printf("no error detected\n");
++ return;
++ }
++
++ for(i=0; i<__nkdb_err; i++) {
++ if (kdbmsgs[i].km_diag == diag) {
++ kdb_printf("diag: %d: %s\n", diag, kdbmsgs[i].km_msg);
++ return;
++ }
++ }
++
++ kdb_printf("Unknown diag %d\n", -diag);
++}
++
++/*
++ * kdb_defcmd, kdb_defcmd2
++ *
++ * This function implements the 'defcmd' command which defines one
++ * command as a set of other commands, terminated by endefcmd.
++ * kdb_defcmd processes the initial 'defcmd' command, kdb_defcmd2
++ * is invoked from kdb_parse for the following commands until
++ * 'endefcmd'.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++struct defcmd_set {
++ int count;
++ int usable;
++ char *name;
++ char *usage;
++ char *help;
++ char **command;
++};
++static struct defcmd_set *defcmd_set;
++static int defcmd_set_count;
++static int defcmd_in_progress;
++
++/* Forward references */
++static int kdb_exec_defcmd(int argc, const char **argv);
++
++static int
++kdb_defcmd2(const char *cmdstr, const char *argv0)
++{
++ struct defcmd_set *s = defcmd_set + defcmd_set_count - 1;
++ char **save_command = s->command;
++ if (strcmp(argv0, "endefcmd") == 0) {
++ defcmd_in_progress = 0;
++ if (!s->count)
++ s->usable = 0;
++ if (s->usable)
++ kdb_register(s->name, kdb_exec_defcmd, s->usage, s->help, 0);
++ return 0;
++ }
++ if (!s->usable)
++ return KDB_NOTIMP;
++ s->command = kmalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
++ if (!s->command) {
++ kdb_printf("Could not allocate new kdb_defcmd table for %s\n", cmdstr);
++ s->usable = 0;
++ return KDB_NOTIMP;
++ }
++ memcpy(s->command, save_command, s->count * sizeof(*(s->command)));
++ s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB);
++ kfree(save_command);
++ return 0;
++}
++
++static int
++kdb_defcmd(int argc, const char **argv)
++{
++ struct defcmd_set *save_defcmd_set = defcmd_set, *s;
++ if (defcmd_in_progress) {
++ kdb_printf("kdb: nested defcmd detected, assuming missing endefcmd\n");
++ kdb_defcmd2("endefcmd", "endefcmd");
++ }
++ if (argc == 0) {
++ int i;
++ for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) {
++ kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name, s->usage, s->help);
++ for (i = 0; i < s->count; ++i)
++ kdb_printf("%s", s->command[i]);
++ kdb_printf("endefcmd\n");
++ }
++ return 0;
++ }
++ if (argc != 3)
++ return KDB_ARGCOUNT;
++ defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set), GFP_KDB);
++ if (!defcmd_set) {
++ kdb_printf("Could not allocate new defcmd_set entry for %s\n", argv[1]);
++ defcmd_set = save_defcmd_set;
++ return KDB_NOTIMP;
++ }
++ memcpy(defcmd_set, save_defcmd_set, defcmd_set_count * sizeof(*defcmd_set));
++ kfree(save_defcmd_set);
++ s = defcmd_set + defcmd_set_count;
++ memset(s, 0, sizeof(*s));
++ s->usable = 1;
++ s->name = kdb_strdup(argv[1], GFP_KDB);
++ s->usage = kdb_strdup(argv[2], GFP_KDB);
++ s->help = kdb_strdup(argv[3], GFP_KDB);
++ if (s->usage[0] == '"') {
++ strcpy(s->usage, s->usage+1);
++ s->usage[strlen(s->usage)-1] = '\0';
++ }
++ if (s->help[0] == '"') {
++ strcpy(s->help, s->help+1);
++ s->help[strlen(s->help)-1] = '\0';
++ }
++ ++defcmd_set_count;
++ defcmd_in_progress = 1;
++ return 0;
++}
++
++/*
++ * kdb_exec_defcmd
++ *
++ * Execute the set of commands associated with this defcmd name.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_exec_defcmd(int argc, const char **argv)
++{
++ int i, ret;
++ struct defcmd_set *s;
++ if (argc != 0)
++ return KDB_ARGCOUNT;
++ for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) {
++ if (strcmp(s->name, argv[0]) == 0)
++ break;
++ }
++ if (i == defcmd_set_count) {
++ kdb_printf("kdb_exec_defcmd: could not find commands for %s\n", argv[0]);
++ return KDB_NOTIMP;
++ }
++ for (i = 0; i < s->count; ++i) {
++ /* Recursive use of kdb_parse, do not use argv after this point */
++ argv = NULL;
++ kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]);
++ if ((ret = kdb_parse(s->command[i])))
++ return ret;
++ }
++ return 0;
++}
++
++/* Command history */
++#define KDB_CMD_HISTORY_COUNT 32
++#define CMD_BUFLEN 200 /* kdb_printf: max printline size == 256 */
++static unsigned int cmd_head=0, cmd_tail=0;
++static unsigned int cmdptr;
++static char cmd_hist[KDB_CMD_HISTORY_COUNT][CMD_BUFLEN];
++static char cmd_cur[CMD_BUFLEN];
++
++/*
++ * kdb_parse
++ *
++ * Parse the command line, search the command table for a
++ * matching command and invoke the command function.
++ * This function may be called recursively, if it is, the second call
++ * will overwrite argv and cbuf. It is the caller's responsibility to
++ * save their argv if they recursively call kdb_parse().
++ *
++ * Parameters:
++ * cmdstr The input command line to be parsed.
++ * regs The registers at the time kdb was entered.
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero for success, a kdb diagnostic if failure.
++ * Locking:
++ * None.
++ * Remarks:
++ * Limited to 20 tokens.
++ *
++ * Real rudimentary tokenization. Basically only whitespace
++ * is considered a token delimeter (but special consideration
++ * is taken of the '=' sign as used by the 'set' command).
++ *
++ * The algorithm used to tokenize the input string relies on
++ * there being at least one whitespace (or otherwise useless)
++ * character between tokens as the character immediately following
++ * the token is altered in-place to a null-byte to terminate the
++ * token string.
++ */
++
++#define MAXARGC 20
++
++int
++kdb_parse(const char *cmdstr)
++{
++ static char *argv[MAXARGC];
++ static int argc = 0;
++ static char cbuf[CMD_BUFLEN+2];
++ const char *cp;
++ char *cpp, quoted;
++ kdbtab_t *tp;
++ int i, escaped, ignore_errors = 0;
++
++ /*
++ * First tokenize the command string.
++ */
++ cp = cmdstr;
++
++ if (KDB_FLAG(CMD_INTERRUPT)) {
++ /* Previous command was interrupted, newline must not repeat the command */
++ KDB_FLAG_CLEAR(CMD_INTERRUPT);
++ argc = 0; /* no repeat */
++ }
++
++ if (*cp != '\n' && *cp != '\0') {
++ argc = 0;
++ cpp = cbuf;
++ while (*cp) {
++ /* skip whitespace */
++ while (isspace(*cp)) cp++;
++ if ((*cp == '\0') || (*cp == '\n') || (*cp == '#' && !defcmd_in_progress))
++ break;
++ if (cpp >= cbuf + CMD_BUFLEN) {
++ kdb_printf("kdb_parse: command buffer overflow, command ignored\n%s\n", cmdstr);
++ return KDB_NOTFOUND;
++ }
++ if (argc >= MAXARGC - 1) {
++ kdb_printf("kdb_parse: too many arguments, command ignored\n%s\n", cmdstr);
++ return KDB_NOTFOUND;
++ }
++ argv[argc++] = cpp;
++ escaped = 0;
++ quoted = '\0';
++ /* Copy to next unquoted and unescaped whitespace or '=' */
++ while (*cp && *cp != '\n' && (escaped || quoted || !isspace(*cp))) {
++ if (cpp >= cbuf + CMD_BUFLEN)
++ break;
++ if (escaped) {
++ escaped = 0;
++ *cpp++ = *cp++;
++ continue;
++ }
++ if (*cp == '\\') {
++ escaped = 1;
++ ++cp;
++ continue;
++ }
++ if (*cp == quoted) {
++ quoted = '\0';
++ } else if (*cp == '\'' || *cp == '"') {
++ quoted = *cp;
++ }
++ if ((*cpp = *cp++) == '=' && !quoted)
++ break;
++ ++cpp;
++ }
++ *cpp++ = '\0'; /* Squash a ws or '=' character */
++ }
++ }
++ if (!argc)
++ return 0;
++ if (defcmd_in_progress) {
++ int result = kdb_defcmd2(cmdstr, argv[0]);
++ if (!defcmd_in_progress) {
++ argc = 0; /* avoid repeat on endefcmd */
++ *(argv[0]) = '\0';
++ }
++ return result;
++ }
++ if (argv[0][0] == '-' && argv[0][1] && (argv[0][1] < '0' || argv[0][1] > '9')) {
++ ignore_errors = 1;
++ ++argv[0];
++ }
++
++ for(tp=kdb_commands, i=0; i < kdb_max_commands; i++,tp++) {
++ if (tp->cmd_name) {
++ /*
++ * If this command is allowed to be abbreviated,
++ * check to see if this is it.
++ */
++
++ if (tp->cmd_minlen
++ && (strlen(argv[0]) <= tp->cmd_minlen)) {
++ if (strncmp(argv[0],
++ tp->cmd_name,
++ tp->cmd_minlen) == 0) {
++ break;
++ }
++ }
++
++ if (strcmp(argv[0], tp->cmd_name)==0) {
++ break;
++ }
++ }
++ }
++
++ /*
++ * If we don't find a command by this name, see if the first
++ * few characters of this match any of the known commands.
++ * e.g., md1c20 should match md.
++ */
++ if (i == kdb_max_commands) {
++ for(tp=kdb_commands, i=0; i < kdb_max_commands; i++,tp++) {
++ if (tp->cmd_name) {
++ if (strncmp(argv[0],
++ tp->cmd_name,
++ strlen(tp->cmd_name))==0) {
++ break;
++ }
++ }
++ }
++ }
++
++ if (i < kdb_max_commands) {
++ int result;
++ KDB_STATE_SET(CMD);
++ result = (*tp->cmd_func)(argc-1,
++ (const char**)argv);
++ if (result && ignore_errors && result > KDB_CMD_GO)
++ result = 0;
++ KDB_STATE_CLEAR(CMD);
++ switch (tp->cmd_repeat) {
++ case KDB_REPEAT_NONE:
++ argc = 0;
++ if (argv[0])
++ *(argv[0]) = '\0';
++ break;
++ case KDB_REPEAT_NO_ARGS:
++ argc = 1;
++ if (argv[1])
++ *(argv[1]) = '\0';
++ break;
++ case KDB_REPEAT_WITH_ARGS:
++ break;
++ }
++ return result;
++ }
++
++ /*
++ * If the input with which we were presented does not
++ * map to an existing command, attempt to parse it as an
++ * address argument and display the result. Useful for
++ * obtaining the address of a variable, or the nearest symbol
++ * to an address contained in a register.
++ */
++ {
++ kdb_machreg_t value;
++ char *name = NULL;
++ long offset;
++ int nextarg = 0;
++
++ if (kdbgetaddrarg(0, (const char **)argv, &nextarg,
++ &value, &offset, &name)) {
++ return KDB_NOTFOUND;
++ }
++
++ kdb_printf("%s = ", argv[0]);
++ kdb_symbol_print(value, NULL, KDB_SP_DEFAULT);
++ kdb_printf("\n");
++ return 0;
++ }
++}
++
++
++static int
++handle_ctrl_cmd(char *cmd)
++{
++#define CTRL_P 16
++#define CTRL_N 14
++
++ /* initial situation */
++ if (cmd_head == cmd_tail) return 0;
++
++ switch(*cmd) {
++ case CTRL_P:
++ if (cmdptr != cmd_tail)
++ cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT;
++ strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
++ return 1;
++ case CTRL_N:
++ if (cmdptr != cmd_head)
++ cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT;
++ strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
++ return 1;
++ }
++ return 0;
++}
++
++/*
++ * kdb_do_dump
++ *
++ * Call the dump() function if the kernel is configured for LKCD.
++ * Inputs:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * None. dump() may or may not return.
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static void
++kdb_do_dump(void)
++{
++#if defined(CONFIG_LKCD_DUMP) || defined(CONFIG_LKCD_DUMP_MODULE)
++ kdb_printf("Forcing dump (if configured)\n");
++ console_loglevel = 8; /* to see the dump messages */
++ dump("kdb_do_dump");
++#endif
++}
++
++/*
++ * kdb_reboot
++ *
++ * This function implements the 'reboot' command. Reboot the system
++ * immediately.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * Shouldn't return from this function.
++ */
++
++static int
++kdb_reboot(int argc, const char **argv)
++{
++ emergency_restart();
++ kdb_printf("Hmm, kdb_reboot did not reboot, spinning here\n");
++ while (1) {};
++ /* NOTREACHED */
++ return 0;
++}
++
++static int
++kdb_quiet(int reason)
++{
++ return (reason == KDB_REASON_CPU_UP || reason == KDB_REASON_SILENT);
++}
++
++/*
++ * kdb_local
++ *
++ * The main code for kdb. This routine is invoked on a specific
++ * processor, it is not global. The main kdb() routine ensures
++ * that only one processor at a time is in this routine. This
++ * code is called with the real reason code on the first entry
++ * to a kdb session, thereafter it is called with reason SWITCH,
++ * even if the user goes back to the original cpu.
++ *
++ * Inputs:
++ * reason The reason KDB was invoked
++ * error The hardware-defined error code
++ * regs The exception frame at time of fault/breakpoint. NULL
++ * for reason SILENT or CPU_UP, otherwise valid.
++ * db_result Result code from the break or debug point.
++ * Returns:
++ * 0 KDB was invoked for an event which it wasn't responsible
++ * 1 KDB handled the event for which it was invoked.
++ * KDB_CMD_GO User typed 'go'.
++ * KDB_CMD_CPU User switched to another cpu.
++ * KDB_CMD_SS Single step.
++ * KDB_CMD_SSB Single step until branch.
++ * Locking:
++ * none
++ * Remarks:
++ * none
++ */
++
++static int
++kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, kdb_dbtrap_t db_result)
++{
++ char *cmdbuf;
++ int diag;
++ struct task_struct *kdb_current = kdb_curr_task(smp_processor_id());
++
++ /* If kdb has been entered for an event which has been/will be
++ * recovered then silently return. We have to get this far into kdb in
++ * order to synchronize all the cpus, typically only one cpu (monarch)
++ * knows that the event is recoverable but the other cpus (slaves) may
++ * also be driven into kdb before that decision is made by the monarch.
++ *
++ * To pause in kdb even for recoverable events, 'set RECOVERY_PAUSE 1'
++ */
++ KDB_DEBUG_STATE("kdb_local 1", reason);
++ if (reason == KDB_REASON_ENTER
++ && KDB_FLAG(RECOVERY)
++ && !KDB_FLAG(CATASTROPHIC)) {
++ int recovery_pause = 0;
++ kdbgetintenv("RECOVERY_PAUSE", &recovery_pause);
++ if (recovery_pause == 0)
++ reason = KDB_REASON_SILENT;
++ else
++ kdb_printf("%s: Recoverable error detected but"
++ " RECOVERY_PAUSE is set, staying in KDB\n",
++ __FUNCTION__);
++ }
++
++ KDB_DEBUG_STATE("kdb_local 2", reason);
++ kdb_go_count = 0;
++ if (kdb_quiet(reason)) {
++ /* no message */
++ } else if (reason == KDB_REASON_DEBUG) {
++ /* special case below */
++ } else {
++ kdb_printf("\nEntering kdb (current=0x%p, pid %d) ", kdb_current, kdb_current->pid);
++#if defined(CONFIG_SMP)
++ kdb_printf("on processor %d ", smp_processor_id());
++#endif
++ }
++
++ switch (reason) {
++ case KDB_REASON_DEBUG:
++ {
++ /*
++ * If re-entering kdb after a single step
++ * command, don't print the message.
++ */
++ switch(db_result) {
++ case KDB_DB_BPT:
++ kdb_printf("\nEntering kdb (0x%p, pid %d) ", kdb_current, kdb_current->pid);
++#if defined(CONFIG_SMP)
++ kdb_printf("on processor %d ", smp_processor_id());
++#endif
++ kdb_printf("due to Debug @ " kdb_machreg_fmt "\n", kdba_getpc(regs));
++ break;
++ case KDB_DB_SSB:
++ /*
++ * In the midst of ssb command. Just return.
++ */
++ KDB_DEBUG_STATE("kdb_local 3", reason);
++ return KDB_CMD_SSB; /* Continue with SSB command */
++
++ break;
++ case KDB_DB_SS:
++ break;
++ case KDB_DB_SSBPT:
++ KDB_DEBUG_STATE("kdb_local 4", reason);
++ return 1; /* kdba_db_trap did the work */
++ default:
++ kdb_printf("kdb: Bad result from kdba_db_trap: %d\n",
++ db_result);
++ break;
++ }
++
++ }
++ break;
++ case KDB_REASON_ENTER:
++ if (KDB_STATE(KEYBOARD))
++ kdb_printf("due to Keyboard Entry\n");
++ else
++ kdb_printf("due to KDB_ENTER()\n");
++ break;
++ case KDB_REASON_KEYBOARD:
++ KDB_STATE_SET(KEYBOARD);
++ kdb_printf("due to Keyboard Entry\n");
++ break;
++ case KDB_REASON_ENTER_SLAVE: /* drop through, slaves only get released via cpu switch */
++ case KDB_REASON_SWITCH:
++ kdb_printf("due to cpu switch\n");
++ if (KDB_STATE(GO_SWITCH)) {
++ KDB_STATE_CLEAR(GO_SWITCH);
++ KDB_DEBUG_STATE("kdb_local 5", reason);
++ return KDB_CMD_GO;
++ }
++ break;
++ case KDB_REASON_OOPS:
++ kdb_printf("Oops: %s\n", kdb_diemsg);
++ kdb_printf("due to oops @ " kdb_machreg_fmt "\n", kdba_getpc(regs));
++ kdba_dumpregs(regs, NULL, NULL);
++ break;
++ case KDB_REASON_NMI:
++ kdb_printf("due to NonMaskable Interrupt @ " kdb_machreg_fmt "\n",
++ kdba_getpc(regs));
++ kdba_dumpregs(regs, NULL, NULL);
++ break;
++ case KDB_REASON_BREAK:
++ kdb_printf("due to Breakpoint @ " kdb_machreg_fmt "\n", kdba_getpc(regs));
++ /*
++ * Determine if this breakpoint is one that we
++ * are interested in.
++ */
++ if (db_result != KDB_DB_BPT) {
++ kdb_printf("kdb: error return from kdba_bp_trap: %d\n", db_result);
++ KDB_DEBUG_STATE("kdb_local 6", reason);
++ return 0; /* Not for us, dismiss it */
++ }
++ break;
++ case KDB_REASON_RECURSE:
++ kdb_printf("due to Recursion @ " kdb_machreg_fmt "\n", kdba_getpc(regs));
++ break;
++ case KDB_REASON_CPU_UP:
++ case KDB_REASON_SILENT:
++ KDB_DEBUG_STATE("kdb_local 7", reason);
++ if (reason == KDB_REASON_CPU_UP)
++ kdba_cpu_up();
++ return KDB_CMD_GO; /* Silent entry, silent exit */
++ break;
++ default:
++ kdb_printf("kdb: unexpected reason code: %d\n", reason);
++ KDB_DEBUG_STATE("kdb_local 8", reason);
++ return 0; /* Not for us, dismiss it */
++ }
++
++ kdba_local_arch_setup();
++
++ kdba_set_current_task(kdb_current);
++
++ while (1) {
++ /*
++ * Initialize pager context.
++ */
++ kdb_nextline = 1;
++ KDB_STATE_CLEAR(SUPPRESS);
++#ifdef kdba_setjmp
++ /*
++ * Use kdba_setjmp/kdba_longjmp to break out of
++ * the pager early and to attempt to recover from kdb errors.
++ */
++ KDB_STATE_CLEAR(LONGJMP);
++ if (kdbjmpbuf) {
++ if (kdba_setjmp(&kdbjmpbuf[smp_processor_id()])) {
++ /* Command aborted (usually in pager) */
++ continue;
++ }
++ else
++ KDB_STATE_SET(LONGJMP);
++ }
++#endif /* kdba_setjmp */
++
++ cmdbuf = cmd_cur;
++ *cmdbuf = '\0';
++ *(cmd_hist[cmd_head])='\0';
++
++ if (KDB_FLAG(ONLY_DO_DUMP)) {
++ /* kdb is off but a catastrophic error requires a dump.
++ * Take the dump and reboot.
++ * Turn on logging so the kdb output appears in the log
++ * buffer in the dump.
++ */
++ const char *setargs[] = { "set", "LOGGING", "1" };
++ kdb_set(2, setargs);
++ kdb_do_dump();
++ kdb_reboot(0, NULL);
++ /*NOTREACHED*/
++ }
++
++do_full_getstr:
++#if defined(CONFIG_SMP)
++ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"), smp_processor_id());
++#else
++ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"));
++#endif
++ if (defcmd_in_progress)
++ strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
++
++ /*
++ * Fetch command from keyboard
++ */
++ cmdbuf = kdb_getstr(cmdbuf, CMD_BUFLEN, kdb_prompt_str);
++ if (*cmdbuf != '\n') {
++ if (*cmdbuf < 32) {
++ if(cmdptr == cmd_head) {
++ strncpy(cmd_hist[cmd_head], cmd_cur, CMD_BUFLEN);
++ *(cmd_hist[cmd_head]+strlen(cmd_hist[cmd_head])-1) = '\0';
++ }
++ if(!handle_ctrl_cmd(cmdbuf))
++ *(cmd_cur+strlen(cmd_cur)-1) = '\0';
++ cmdbuf = cmd_cur;
++ goto do_full_getstr;
++ }
++ else
++ strncpy(cmd_hist[cmd_head], cmd_cur, CMD_BUFLEN);
++
++ cmd_head = (cmd_head+1) % KDB_CMD_HISTORY_COUNT;
++ if (cmd_head == cmd_tail) cmd_tail = (cmd_tail+1) % KDB_CMD_HISTORY_COUNT;
++
++ }
++
++ cmdptr = cmd_head;
++ diag = kdb_parse(cmdbuf);
++ if (diag == KDB_NOTFOUND) {
++ kdb_printf("Unknown kdb command: '%s'\n", cmdbuf);
++ diag = 0;
++ }
++ if (diag == KDB_CMD_GO
++ || diag == KDB_CMD_CPU
++ || diag == KDB_CMD_SS
++ || diag == KDB_CMD_SSB)
++ break;
++
++ if (diag)
++ kdb_cmderror(diag);
++ }
++
++ kdba_local_arch_cleanup();
++
++ KDB_DEBUG_STATE("kdb_local 9", diag);
++ return diag;
++}
++
++
++/*
++ * kdb_print_state
++ *
++ * Print the state data for the current processor for debugging.
++ *
++ * Inputs:
++ * text Identifies the debug point
++ * value Any integer value to be printed, e.g. reason code.
++ * Returns:
++ * None.
++ * Locking:
++ * none
++ * Remarks:
++ * none
++ */
++
++void kdb_print_state(const char *text, int value)
++{
++ kdb_printf("state: %s cpu %d value %d initial %d state %x\n",
++ text, smp_processor_id(), value, kdb_initial_cpu, kdb_state[smp_processor_id()]);
++}
++
++/*
++ * kdb_previous_event
++ *
++ * Return a count of cpus that are leaving kdb, i.e. the number
++ * of processors that are still handling the previous kdb event.
++ *
++ * Inputs:
++ * None.
++ * Returns:
++ * Count of cpus in previous event.
++ * Locking:
++ * none
++ * Remarks:
++ * none
++ */
++
++static int
++kdb_previous_event(void)
++{
++ int i, leaving = 0;
++ for (i = 0; i < NR_CPUS; ++i) {
++ if (KDB_STATE_CPU(LEAVING, i))
++ ++leaving;
++ }
++ return leaving;
++}
++
++/*
++ * kdb_wait_for_cpus
++ *
++ * Invoked once at the start of a kdb event, from the controlling cpu. Wait a
++ * short period for the other cpus to enter kdb state.
++ *
++ * Inputs:
++ * none
++ * Returns:
++ * none
++ * Locking:
++ * none
++ * Remarks:
++ * none
++ */
++
++int kdb_wait_for_cpus_secs;
++
++static void
++kdb_wait_for_cpus(void)
++{
++#ifdef CONFIG_SMP
++ int online = 0, kdb_data = 0, prev_kdb_data = 0, c, time;
++ mdelay(100);
++ for (time = 0; time < kdb_wait_for_cpus_secs; ++time) {
++ online = 0;
++ kdb_data = 0;
++ for_each_online_cpu(c) {
++ ++online;
++ if (kdb_running_process[c].seqno >= kdb_seqno - 1)
++ ++kdb_data;
++ }
++ if (online == kdb_data)
++ break;
++ if (prev_kdb_data != kdb_data) {
++ kdb_nextline = 0; /* no prompt yet */
++ kdb_printf(" %d out of %d cpus in kdb, waiting for the rest, timeout in %d second(s)\n",
++ kdb_data, online, kdb_wait_for_cpus_secs - time);
++ prev_kdb_data = kdb_data;
++ }
++ touch_nmi_watchdog();
++ mdelay(1000);
++ /* Architectures may want to send a more forceful interrupt */
++ if (time == min(kdb_wait_for_cpus_secs / 2, 5))
++ kdba_wait_for_cpus();
++ if (time % 4 == 0)
++ kdb_printf(".");
++ }
++ if (time) {
++ int wait = online - kdb_data;
++ if (wait == 0)
++ kdb_printf("All cpus are now in kdb\n");
++ else
++ kdb_printf("%d cpu%s not in kdb, %s state is unknown\n",
++ wait,
++ wait == 1 ? " is" : "s are",
++ wait == 1 ? "its" : "their");
++ }
++#endif /* CONFIG_SMP */
++}
++
++/*
++ * kdb_main_loop
++ *
++ * The main kdb loop. After initial setup and assignment of the controlling
++ * cpu, all cpus are in this loop. One cpu is in control and will issue the kdb
++ * prompt, the others will spin until 'go' or cpu switch.
++ *
++ * To get a consistent view of the kernel stacks for all processes, this routine
++ * is invoked from the main kdb code via an architecture specific routine.
++ * kdba_main_loop is responsible for making the kernel stacks consistent for all
++ * processes, there should be no difference between a blocked process and a
++ * running process as far as kdb is concerned.
++ *
++ * Inputs:
++ * reason The reason KDB was invoked
++ * error The hardware-defined error code
++ * reason2 kdb's current reason code. Initially error but can change
++ * acording to kdb state.
++ * db_result Result code from break or debug point.
++ * regs The exception frame at time of fault/breakpoint. If reason
++ * is SILENT or CPU_UP then regs is NULL, otherwise it
++ * should always be valid.
++ * Returns:
++ * 0 KDB was invoked for an event which it wasn't responsible
++ * 1 KDB handled the event for which it was invoked.
++ * Locking:
++ * none
++ * Remarks:
++ * none
++ */
++
++int
++kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
++ kdb_dbtrap_t db_result, struct pt_regs *regs)
++{
++ int result = 1;
++ /* Stay in kdb() until 'go', 'ss[b]' or an error */
++ while (1) {
++ /*
++ * All processors except the one that is in control
++ * will spin here.
++ */
++ KDB_DEBUG_STATE("kdb_main_loop 1", reason);
++ while (KDB_STATE(HOLD_CPU)) {
++ /* state KDB is turned off by kdb_cpu to see if the
++ * other cpus are still live, each cpu in this loop
++ * turns it back on.
++ */
++ if (!KDB_STATE(KDB)) {
++ KDB_STATE_SET(KDB);
++ }
++ }
++ KDB_STATE_CLEAR(SUPPRESS);
++ KDB_DEBUG_STATE("kdb_main_loop 2", reason);
++ if (KDB_STATE(LEAVING))
++ break; /* Another cpu said 'go' */
++
++ if (!kdb_quiet(reason))
++ kdb_wait_for_cpus();
++ /* Still using kdb, this processor is in control */
++ result = kdb_local(reason2, error, regs, db_result);
++ KDB_DEBUG_STATE("kdb_main_loop 3", result);
++
++ if (result == KDB_CMD_CPU) {
++ /* Cpu switch, hold the current cpu, release the target one. */
++ reason2 = KDB_REASON_SWITCH;
++ KDB_STATE_SET(HOLD_CPU);
++ KDB_STATE_CLEAR_CPU(HOLD_CPU, kdb_new_cpu);
++ continue;
++ }
++
++ if (result == KDB_CMD_SS) {
++ KDB_STATE_SET(DOING_SS);
++ break;
++ }
++
++ if (result == KDB_CMD_SSB) {
++ KDB_STATE_SET(DOING_SS);
++ KDB_STATE_SET(DOING_SSB);
++ break;
++ }
++
++ if (result && result != 1 && result != KDB_CMD_GO)
++ kdb_printf("\nUnexpected kdb_local return code %d\n", result);
++
++ KDB_DEBUG_STATE("kdb_main_loop 4", reason);
++ break;
++ }
++ if (KDB_STATE(DOING_SS))
++ KDB_STATE_CLEAR(SSBPT);
++ return result;
++}
++
++/* iapc_boot_arch was defined in ACPI 2.0, FADT revision 3 onwards. For any
++ * FADT prior to revision 3, we have to assume that we have an i8042 I/O
++ * device. ACPI initialises after KDB initialises but before using KDB, so
++ * check iapc_boot_arch on each entry to KDB.
++ */
++static void
++kdb_check_i8042(void)
++{
++ KDB_FLAG_CLEAR(NO_I8042);
++#ifdef CONFIG_ACPI
++ if (acpi_gbl_FADT.header.revision >= 3 &&
++ (acpi_gbl_FADT.boot_flags & BAF_8042_KEYBOARD_CONTROLLER) == 0)
++ KDB_FLAG_SET(NO_I8042);
++#endif /* CONFIG_ACPI */
++}
++
++/*
++ * kdb
++ *
++ * This function is the entry point for the kernel debugger. It
++ * provides a command parser and associated support functions to
++ * allow examination and control of an active kernel.
++ *
++ * The breakpoint trap code should invoke this function with
++ * one of KDB_REASON_BREAK (int 03) or KDB_REASON_DEBUG (debug register)
++ *
++ * the die_if_kernel function should invoke this function with
++ * KDB_REASON_OOPS.
++ *
++ * In single step mode, one cpu is released to run without
++ * breakpoints. Interrupts and NMI are reset to their original values,
++ * the cpu is allowed to do one instruction which causes a trap
++ * into kdb with KDB_REASON_DEBUG.
++ *
++ * Inputs:
++ * reason The reason KDB was invoked
++ * error The hardware-defined error code
++ * regs The exception frame at time of fault/breakpoint. If reason
++ * is SILENT or CPU_UP then regs is NULL, otherwise it
++ * should always be valid.
++ * Returns:
++ * 0 KDB was invoked for an event which it wasn't responsible
++ * 1 KDB handled the event for which it was invoked.
++ * Locking:
++ * none
++ * Remarks:
++ * No assumptions of system state. This function may be invoked
++ * with arbitrary locks held. It will stop all other processors
++ * in an SMP environment, disable all interrupts and does not use
++ * the operating systems keyboard driver.
++ *
++ * This code is reentrant but only for cpu switch. Any other
++ * reentrancy is an error, although kdb will attempt to recover.
++ *
++ * At the start of a kdb session the initial processor is running
++ * kdb() and the other processors can be doing anything. When the
++ * initial processor calls smp_kdb_stop() the other processors are
++ * driven through kdb_ipi which calls kdb() with reason SWITCH.
++ * That brings all processors into this routine, one with a "real"
++ * reason code, the other with SWITCH.
++ *
++ * Because the other processors are driven via smp_kdb_stop(),
++ * they enter here from the NMI handler. Until the other
++ * processors exit from here and exit from kdb_ipi, they will not
++ * take any more NMI requests. The initial cpu will still take NMI.
++ *
++ * Multiple race and reentrancy conditions, each with different
++ * advoidance mechanisms.
++ *
++ * Two cpus hit debug points at the same time.
++ *
++ * kdb_lock and kdb_initial_cpu ensure that only one cpu gets
++ * control of kdb. The others spin on kdb_initial_cpu until
++ * they are driven through NMI into kdb_ipi. When the initial
++ * cpu releases the others from NMI, they resume trying to get
++ * kdb_initial_cpu to start a new event.
++ *
++ * A cpu is released from kdb and starts a new event before the
++ * original event has completely ended.
++ *
++ * kdb_previous_event() prevents any cpu from entering
++ * kdb_initial_cpu state until the previous event has completely
++ * ended on all cpus.
++ *
++ * An exception occurs inside kdb.
++ *
++ * kdb_initial_cpu detects recursive entry to kdb and attempts
++ * to recover. The recovery uses longjmp() which means that
++ * recursive calls to kdb never return. Beware of assumptions
++ * like
++ *
++ * ++depth;
++ * kdb();
++ * --depth;
++ *
++ * If the kdb call is recursive then longjmp takes over and
++ * --depth is never executed.
++ *
++ * NMI handling.
++ *
++ * NMI handling is tricky. The initial cpu is invoked by some kdb event,
++ * this event could be NMI driven but usually is not. The other cpus are
++ * driven into kdb() via kdb_ipi which uses NMI so at the start the other
++ * cpus will not accept NMI. Some operations such as SS release one cpu
++ * but hold all the others. Releasing a cpu means it drops back to
++ * whatever it was doing before the kdb event, this means it drops out of
++ * kdb_ipi and hence out of NMI status. But the software watchdog uses
++ * NMI and we do not want spurious watchdog calls into kdb. kdba_read()
++ * resets the watchdog counters in its input polling loop, when a kdb
++ * command is running it is subject to NMI watchdog events.
++ *
++ * Another problem with NMI handling is the NMI used to drive the other
++ * cpus into kdb cannot be distinguished from the watchdog NMI. State
++ * flag WAIT_IPI indicates that a cpu is waiting for NMI via kdb_ipi,
++ * if not set then software NMI is ignored by kdb_ipi.
++ *
++ * Cpu switching.
++ *
++ * All cpus are in kdb (or they should be), all but one are
++ * spinning on KDB_STATE(HOLD_CPU). Only one cpu is not in
++ * HOLD_CPU state, only that cpu can handle commands.
++ *
++ * Go command entered.
++ *
++ * If necessary, go will switch to the initial cpu first. If the event
++ * was caused by a software breakpoint (assumed to be global) that
++ * requires single-step to get over the breakpoint then only release the
++ * initial cpu, after the initial cpu has single-stepped the breakpoint
++ * then release the rest of the cpus. If SSBPT is not required then
++ * release all the cpus at once.
++ */
++
++fastcall int
++kdb(kdb_reason_t reason, int error, struct pt_regs *regs)
++{
++ kdb_intstate_t int_state; /* Interrupt state */
++ kdb_reason_t reason2 = reason;
++ int result = 0; /* Default is kdb did not handle it */
++ int ss_event, old_regs_saved = 0;
++ struct pt_regs *old_regs = NULL;
++ kdb_dbtrap_t db_result=KDB_DB_NOBPT;
++ preempt_disable();
++ atomic_inc(&kdb_event);
++
++ switch(reason) {
++ case KDB_REASON_OOPS:
++ case KDB_REASON_NMI:
++ KDB_FLAG_SET(CATASTROPHIC); /* kernel state is dubious now */
++ break;
++ default:
++ break;
++ }
++ switch(reason) {
++ case KDB_REASON_ENTER:
++ case KDB_REASON_ENTER_SLAVE:
++ case KDB_REASON_BREAK:
++ case KDB_REASON_DEBUG:
++ case KDB_REASON_OOPS:
++ case KDB_REASON_SWITCH:
++ case KDB_REASON_KEYBOARD:
++ case KDB_REASON_NMI:
++ if (regs && regs != get_irq_regs()) {
++ old_regs = set_irq_regs(regs);
++ old_regs_saved = 1;
++ }
++ break;
++ default:
++ break;
++ }
++ if (kdb_continue_catastrophic > 2) {
++ kdb_printf("kdb_continue_catastrophic is out of range, setting to 2\n");
++ kdb_continue_catastrophic = 2;
++ }
++ if (!kdb_on && KDB_FLAG(CATASTROPHIC) && kdb_continue_catastrophic == 2) {
++ KDB_FLAG_SET(ONLY_DO_DUMP);
++ }
++ if (!kdb_on && !KDB_FLAG(ONLY_DO_DUMP))
++ goto out;
++
++ KDB_DEBUG_STATE("kdb 1", reason);
++ KDB_STATE_CLEAR(SUPPRESS);
++
++ /* Filter out userspace breakpoints first, no point in doing all
++ * the kdb smp fiddling when it is really a gdb trap.
++ * Save the single step status first, kdba_db_trap clears ss status.
++ * kdba_b[dp]_trap sets SSBPT if required.
++ */
++ ss_event = KDB_STATE(DOING_SS) || KDB_STATE(SSBPT);
++#ifdef CONFIG_CPU_XSCALE
++ if ( KDB_STATE(A_XSC_ICH) ) {
++ /* restore changed I_BIT */
++ KDB_STATE_CLEAR(A_XSC_ICH);
++ kdba_restore_retirq(regs, KDB_STATE(A_XSC_IRQ));
++ if ( !ss_event ) {
++ kdb_printf("Stranger!!! Why IRQ bit is changed====\n");
++ }
++ }
++#endif
++ if (reason == KDB_REASON_BREAK) {
++ db_result = kdba_bp_trap(regs, error); /* Only call this once */
++ }
++ if (reason == KDB_REASON_DEBUG) {
++ db_result = kdba_db_trap(regs, error); /* Only call this once */
++ }
++
++ if ((reason == KDB_REASON_BREAK || reason == KDB_REASON_DEBUG)
++ && db_result == KDB_DB_NOBPT) {
++ KDB_DEBUG_STATE("kdb 2", reason);
++ goto out; /* Not one of mine */
++ }
++
++ /* Turn off single step if it was being used */
++ if (ss_event) {
++ kdba_clearsinglestep(regs);
++ /* Single step after a breakpoint removes the need for a delayed reinstall */
++ if (reason == KDB_REASON_BREAK || reason == KDB_REASON_DEBUG)
++ KDB_STATE_CLEAR(SSBPT);
++ }
++
++ /* kdb can validly reenter but only for certain well defined conditions */
++ if (reason == KDB_REASON_DEBUG
++ && !KDB_STATE(HOLD_CPU)
++ && ss_event)
++ KDB_STATE_SET(REENTRY);
++ else
++ KDB_STATE_CLEAR(REENTRY);
++
++ /* Wait for previous kdb event to completely exit before starting
++ * a new event.
++ */
++ while (kdb_previous_event())
++ ;
++ KDB_DEBUG_STATE("kdb 3", reason);
++
++ /*
++ * If kdb is already active, print a message and try to recover.
++ * If recovery is not possible and recursion is allowed or
++ * forced recursion without recovery is set then try to recurse
++ * in kdb. Not guaranteed to work but it makes an attempt at
++ * debugging the debugger.
++ */
++ if (reason != KDB_REASON_SWITCH &&
++ reason != KDB_REASON_ENTER_SLAVE) {
++ if (KDB_IS_RUNNING() && !KDB_STATE(REENTRY)) {
++ int recover = 1;
++ unsigned long recurse = 0;
++ kdb_printf("kdb: Debugger re-entered on cpu %d, new reason = %d\n",
++ smp_processor_id(), reason);
++ /* Should only re-enter from released cpu */
++
++ if (KDB_STATE(HOLD_CPU)) {
++ kdb_printf(" Strange, cpu %d should not be running\n", smp_processor_id());
++ recover = 0;
++ }
++ if (!KDB_STATE(CMD)) {
++ kdb_printf(" Not executing a kdb command\n");
++ recover = 0;
++ }
++ if (!KDB_STATE(LONGJMP)) {
++ kdb_printf(" No longjmp available for recovery\n");
++ recover = 0;
++ }
++ kdbgetulenv("RECURSE", &recurse);
++ if (recurse > 1) {
++ kdb_printf(" Forced recursion is set\n");
++ recover = 0;
++ }
++ if (recover) {
++ kdb_printf(" Attempting to abort command and recover\n");
++#ifdef kdba_setjmp
++ kdba_longjmp(&kdbjmpbuf[smp_processor_id()], 0);
++#endif /* kdba_setjmp */
++ }
++ if (recurse) {
++ if (KDB_STATE(RECURSE)) {
++ kdb_printf(" Already in recursive mode\n");
++ } else {
++ kdb_printf(" Attempting recursive mode\n");
++ KDB_STATE_SET(RECURSE);
++ KDB_STATE_SET(REENTRY);
++ reason2 = KDB_REASON_RECURSE;
++ recover = 1;
++ }
++ }
++ if (!recover) {
++ kdb_printf(" Cannot recover, allowing event to proceed\n");
++ /*temp*/
++ while (KDB_IS_RUNNING())
++ cpu_relax();
++ goto out;
++ }
++ }
++ } else if (reason == KDB_REASON_SWITCH && !KDB_IS_RUNNING()) {
++ kdb_printf("kdb: CPU switch without kdb running, I'm confused\n");
++ goto out;
++ }
++
++ /*
++ * Disable interrupts, breakpoints etc. on this processor
++ * during kdb command processing
++ */
++ KDB_STATE_SET(KDB);
++ kdba_disableint(&int_state);
++ if (!KDB_STATE(KDB_CONTROL)) {
++ kdb_bp_remove_local();
++ KDB_STATE_SET(KDB_CONTROL);
++ }
++
++ /*
++ * If not entering the debugger due to CPU switch or single step
++ * reentry, serialize access here.
++ * The processors may race getting to this point - if,
++ * for example, more than one processor hits a breakpoint
++ * at the same time. We'll serialize access to kdb here -
++ * other processors will loop here, and the NMI from the stop
++ * IPI will take them into kdb as switch candidates. Once
++ * the initial processor releases the debugger, the rest of
++ * the processors will race for it.
++ *
++ * The above describes the normal state of affairs, where two or more
++ * cpus that are entering kdb at the "same" time are assumed to be for
++ * separate events. However some processes such as ia64 MCA/INIT will
++ * drive all the cpus into error processing at the same time. For that
++ * case, all of the cpus entering kdb at the "same" time are really a
++ * single event.
++ *
++ * That case is handled by the use of KDB_ENTER by one cpu (the
++ * monarch) and KDB_ENTER_SLAVE on the other cpus (the slaves).
++ * KDB_ENTER_SLAVE maps to KDB_REASON_ENTER_SLAVE. The slave events
++ * will be treated as if they had just responded to the kdb IPI, i.e.
++ * as if they were KDB_REASON_SWITCH.
++ *
++ * Because of races across multiple cpus, ENTER_SLAVE can occur before
++ * the main ENTER. Hold up ENTER_SLAVE here until the main ENTER
++ * arrives.
++ */
++
++ if (reason == KDB_REASON_ENTER_SLAVE) {
++ spin_lock(&kdb_lock);
++ while (!KDB_IS_RUNNING()) {
++ spin_unlock(&kdb_lock);
++ while (!KDB_IS_RUNNING())
++ cpu_relax();
++ spin_lock(&kdb_lock);
++ }
++ reason = KDB_REASON_SWITCH;
++ KDB_STATE_SET(HOLD_CPU);
++ spin_unlock(&kdb_lock);
++ }
++
++ if (reason == KDB_REASON_SWITCH || KDB_STATE(REENTRY))
++ ; /* drop through */
++ else {
++ KDB_DEBUG_STATE("kdb 4", reason);
++ spin_lock(&kdb_lock);
++ while (KDB_IS_RUNNING() || kdb_previous_event()) {
++ spin_unlock(&kdb_lock);
++ while (KDB_IS_RUNNING() || kdb_previous_event())
++ cpu_relax();
++ spin_lock(&kdb_lock);
++ }
++ KDB_DEBUG_STATE("kdb 5", reason);
++
++ kdb_initial_cpu = smp_processor_id();
++ ++kdb_seqno;
++ spin_unlock(&kdb_lock);
++ if (!kdb_quiet(reason))
++ notify_die(DIE_KDEBUG_ENTER, "KDEBUG ENTER", regs, error, 0, 0);
++ }
++
++ if (smp_processor_id() == kdb_initial_cpu
++ && !KDB_STATE(REENTRY)) {
++ KDB_STATE_CLEAR(HOLD_CPU);
++ KDB_STATE_CLEAR(WAIT_IPI);
++ kdb_check_i8042();
++ /*
++ * Remove the global breakpoints. This is only done
++ * once from the initial processor on initial entry.
++ */
++ if (!kdb_quiet(reason) || smp_processor_id() == 0)
++ kdb_bp_remove_global();
++
++ /*
++ * If SMP, stop other processors. The other processors
++ * will enter kdb() with KDB_REASON_SWITCH and spin in
++ * kdb_main_loop().
++ */
++ KDB_DEBUG_STATE("kdb 6", reason);
++ if (NR_CPUS > 1 && !kdb_quiet(reason)) {
++ int i;
++ for (i = 0; i < NR_CPUS; ++i) {
++ if (!cpu_online(i))
++ continue;
++ if (i != kdb_initial_cpu) {
++ KDB_STATE_SET_CPU(HOLD_CPU, i);
++ KDB_STATE_SET_CPU(WAIT_IPI, i);
++ }
++ }
++ KDB_DEBUG_STATE("kdb 7", reason);
++ smp_kdb_stop();
++ KDB_DEBUG_STATE("kdb 8", reason);
++ }
++ }
++
++ if (KDB_STATE(GO1)) {
++ kdb_bp_remove_global(); /* They were set for single-step purposes */
++ KDB_STATE_CLEAR(GO1);
++ reason = KDB_REASON_SILENT; /* Now silently go */
++ }
++
++ /* Set up a consistent set of process stacks before talking to the user */
++ KDB_DEBUG_STATE("kdb 9", result);
++ result = kdba_main_loop(reason, reason2, error, db_result, regs);
++
++ KDB_DEBUG_STATE("kdb 10", result);
++ kdba_adjust_ip(reason2, error, regs);
++ KDB_STATE_CLEAR(LONGJMP);
++ KDB_DEBUG_STATE("kdb 11", result);
++ /* go which requires single-step over a breakpoint must only release
++ * one cpu.
++ */
++ if (result == KDB_CMD_GO && KDB_STATE(SSBPT))
++ KDB_STATE_SET(GO1);
++
++ if (smp_processor_id() == kdb_initial_cpu &&
++ !KDB_STATE(DOING_SS) &&
++ !KDB_STATE(RECURSE)) {
++ /*
++ * (Re)install the global breakpoints and cleanup the cached
++ * symbol table. This is only done once from the initial
++ * processor on go.
++ */
++ KDB_DEBUG_STATE("kdb 12", reason);
++ if (!kdb_quiet(reason) || smp_processor_id() == 0) {
++ kdb_bp_install_global(regs);
++ kdbnearsym_cleanup();
++ debug_kusage();
++ }
++ if (!KDB_STATE(GO1)) {
++ /*
++ * Release all other cpus which will see KDB_STATE(LEAVING) is set.
++ */
++ int i;
++ for (i = 0; i < NR_CPUS; ++i) {
++ if (KDB_STATE_CPU(KDB, i))
++ KDB_STATE_SET_CPU(LEAVING, i);
++ KDB_STATE_CLEAR_CPU(WAIT_IPI, i);
++ KDB_STATE_CLEAR_CPU(HOLD_CPU, i);
++ }
++ /* Wait until all the other processors leave kdb */
++ while (kdb_previous_event() != 1)
++ ;
++ if (!kdb_quiet(reason))
++ notify_die(DIE_KDEBUG_LEAVE, "KDEBUG LEAVE", regs, error, 0, 0);
++ kdb_initial_cpu = -1; /* release kdb control */
++ KDB_DEBUG_STATE("kdb 13", reason);
++ }
++ }
++
++ KDB_DEBUG_STATE("kdb 14", result);
++ kdba_restoreint(&int_state);
++#ifdef CONFIG_CPU_XSCALE
++ if ( smp_processor_id() == kdb_initial_cpu &&
++ ( KDB_STATE(SSBPT) | KDB_STATE(DOING_SS) )
++ ) {
++ kdba_setsinglestep(regs);
++ // disable IRQ in stack frame
++ KDB_STATE_SET(A_XSC_ICH);
++ if ( kdba_disable_retirq(regs) ) {
++ KDB_STATE_SET(A_XSC_IRQ);
++ }
++ else {
++ KDB_STATE_CLEAR(A_XSC_IRQ);
++ }
++ }
++#endif
++
++ /* Only do this work if we are really leaving kdb */
++ if (!(KDB_STATE(DOING_SS) || KDB_STATE(SSBPT) || KDB_STATE(RECURSE))) {
++ KDB_DEBUG_STATE("kdb 15", result);
++ kdb_bp_install_local(regs);
++ if (old_regs_saved)
++ set_irq_regs(old_regs);
++ KDB_STATE_CLEAR(KDB_CONTROL);
++ }
++
++ KDB_DEBUG_STATE("kdb 16", result);
++ KDB_FLAG_CLEAR(CATASTROPHIC);
++ KDB_STATE_CLEAR(IP_ADJUSTED); /* Re-adjust ip next time in */
++ KDB_STATE_CLEAR(KEYBOARD);
++ KDB_STATE_CLEAR(KDB); /* Main kdb state has been cleared */
++ KDB_STATE_CLEAR(RECURSE);
++ KDB_STATE_CLEAR(LEAVING); /* No more kdb work after this */
++ KDB_DEBUG_STATE("kdb 17", reason);
++out:
++ atomic_dec(&kdb_event);
++ preempt_enable();
++ return result != 0;
++}
++
++/*
++ * kdb_mdr
++ *
++ * This function implements the guts of the 'mdr' command.
++ *
++ * mdr <addr arg>,<byte count>
++ *
++ * Inputs:
++ * addr Start address
++ * count Number of bytes
++ * Outputs:
++ * None.
++ * Returns:
++ * Always 0. Any errors are detected and printed by kdb_getarea.
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_mdr(kdb_machreg_t addr, unsigned int count)
++{
++ unsigned char c;
++ while (count--) {
++ if (kdb_getarea(c, addr))
++ return 0;
++ kdb_printf("%02x", c);
++ addr++;
++ }
++ kdb_printf("\n");
++ return 0;
++}
++
++/*
++ * kdb_md
++ *
++ * This function implements the 'md', 'md1', 'md2', 'md4', 'md8'
++ * 'mdr' and 'mds' commands.
++ *
++ * md|mds [<addr arg> [<line count> [<radix>]]]
++ * mdWcN [<addr arg> [<line count> [<radix>]]]
++ * where W = is the width (1, 2, 4 or 8) and N is the count.
++ * for eg., md1c20 reads 20 bytes, 1 at a time.
++ * mdr <addr arg>,<byte count>
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static void
++kdb_md_line(const char *fmtstr, kdb_machreg_t addr,
++ int symbolic, int nosect, int bytesperword,
++ int num, int repeat, int phys)
++{
++ /* print just one line of data */
++ kdb_symtab_t symtab;
++ char cbuf[32];
++ char *c = cbuf;
++ int i;
++ unsigned long word;
++
++ memset(cbuf, '\0', sizeof(cbuf));
++ if (phys)
++ kdb_printf("phys " kdb_machreg_fmt0 " ", addr);
++ else
++ kdb_printf(kdb_machreg_fmt0 " ", addr);
++
++ for (i = 0; i < num && repeat--; i++) {
++ if (phys) {
++ if (kdb_getphysword(&word, addr, bytesperword))
++ break;
++ } else if (kdb_getword(&word, addr, bytesperword))
++ break;
++ kdb_printf(fmtstr, word);
++ if (symbolic)
++ kdbnearsym(word, &symtab);
++ else
++ memset(&symtab, 0, sizeof(symtab));
++ if (symtab.sym_name) {
++ kdb_symbol_print(word, &symtab, 0);
++ if (!nosect) {
++ kdb_printf("\n");
++ kdb_printf(" %s %s "
++ kdb_machreg_fmt " " kdb_machreg_fmt " " kdb_machreg_fmt,
++ symtab.mod_name,
++ symtab.sec_name,
++ symtab.sec_start,
++ symtab.sym_start,
++ symtab.sym_end);
++ }
++ addr += bytesperword;
++ } else {
++ union {
++ u64 word;
++ unsigned char c[8];
++ } wc;
++ unsigned char *cp;
++#ifdef __BIG_ENDIAN
++ cp = wc.c + 8 - bytesperword;
++#else
++ cp = wc.c;
++#endif
++ wc.word = word;
++#define printable_char(c) ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.';})
++ switch (bytesperword) {
++ case 8:
++ *c++ = printable_char(*cp++);
++ *c++ = printable_char(*cp++);
++ *c++ = printable_char(*cp++);
++ *c++ = printable_char(*cp++);
++ addr += 4;
++ case 4:
++ *c++ = printable_char(*cp++);
++ *c++ = printable_char(*cp++);
++ addr += 2;
++ case 2:
++ *c++ = printable_char(*cp++);
++ addr++;
++ case 1:
++ *c++ = printable_char(*cp++);
++ addr++;
++ break;
++ }
++#undef printable_char
++ }
++ }
++ kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1), " ", cbuf);
++}
++
++static int
++kdb_md(int argc, const char **argv)
++{
++ static kdb_machreg_t last_addr;
++ static int last_radix, last_bytesperword, last_repeat;
++ int radix = 16, mdcount = 8, bytesperword = KDB_WORD_SIZE, repeat;
++ int nosect = 0;
++ char fmtchar, fmtstr[64];
++ kdb_machreg_t addr;
++ unsigned long word;
++ long offset = 0;
++ int symbolic = 0;
++ int valid = 0;
++ int phys = 0;
++
++ kdbgetintenv("MDCOUNT", &mdcount);
++ kdbgetintenv("RADIX", &radix);
++ kdbgetintenv("BYTESPERWORD", &bytesperword);
++
++ /* Assume 'md <addr>' and start with environment values */
++ repeat = mdcount * 16 / bytesperword;
++
++ if (strcmp(argv[0], "mdr") == 0) {
++ if (argc != 2)
++ return KDB_ARGCOUNT;
++ valid = 1;
++ } else if (isdigit(argv[0][2])) {
++ bytesperword = (int)(argv[0][2] - '0');
++ if (bytesperword == 0) {
++ bytesperword = last_bytesperword;
++ if (bytesperword == 0) {
++ bytesperword = 4;
++ }
++ }
++ last_bytesperword = bytesperword;
++ repeat = mdcount * 16 / bytesperword;
++ if (!argv[0][3])
++ valid = 1;
++ else if (argv[0][3] == 'c' && argv[0][4]) {
++ char *p;
++ repeat = simple_strtoul(argv[0]+4, &p, 10);
++ mdcount = ((repeat * bytesperword) + 15) / 16;
++ valid = !*p;
++ }
++ last_repeat = repeat;
++ } else if (strcmp(argv[0], "md") == 0)
++ valid = 1;
++ else if (strcmp(argv[0], "mds") == 0)
++ valid = 1;
++ else if (strcmp(argv[0], "mdp") == 0) {
++ phys = valid = 1;
++ }
++ if (!valid)
++ return KDB_NOTFOUND;
++
++ if (argc == 0) {
++ if (last_addr == 0)
++ return KDB_ARGCOUNT;
++ addr = last_addr;
++ radix = last_radix;
++ bytesperword = last_bytesperword;
++ repeat = last_repeat;
++ mdcount = ((repeat * bytesperword) + 15) / 16;
++ }
++
++ if (argc) {
++ kdb_machreg_t val;
++ int diag, nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (diag)
++ return diag;
++ if (argc > nextarg+2)
++ return KDB_ARGCOUNT;
++
++ if (argc >= nextarg) {
++ diag = kdbgetularg(argv[nextarg], &val);
++ if (!diag) {
++ mdcount = (int) val;
++ repeat = mdcount * 16 / bytesperword;
++ }
++ }
++ if (argc >= nextarg+1) {
++ diag = kdbgetularg(argv[nextarg+1], &val);
++ if (!diag)
++ radix = (int) val;
++ }
++ }
++
++ if (strcmp(argv[0], "mdr") == 0) {
++ return kdb_mdr(addr, mdcount);
++ }
++
++ switch (radix) {
++ case 10:
++ fmtchar = 'd';
++ break;
++ case 16:
++ fmtchar = 'x';
++ break;
++ case 8:
++ fmtchar = 'o';
++ break;
++ default:
++ return KDB_BADRADIX;
++ }
++
++ last_radix = radix;
++
++ if (bytesperword > KDB_WORD_SIZE)
++ return KDB_BADWIDTH;
++
++ switch (bytesperword) {
++ case 8:
++ sprintf(fmtstr, "%%16.16l%c ", fmtchar);
++ break;
++ case 4:
++ sprintf(fmtstr, "%%8.8l%c ", fmtchar);
++ break;
++ case 2:
++ sprintf(fmtstr, "%%4.4l%c ", fmtchar);
++ break;
++ case 1:
++ sprintf(fmtstr, "%%2.2l%c ", fmtchar);
++ break;
++ default:
++ return KDB_BADWIDTH;
++ }
++
++ last_repeat = repeat;
++ last_bytesperword = bytesperword;
++
++ if (strcmp(argv[0], "mds") == 0) {
++ symbolic = 1;
++ /* Do not save these changes as last_*, they are temporary mds
++ * overrides.
++ */
++ bytesperword = KDB_WORD_SIZE;
++ repeat = mdcount;
++ kdbgetintenv("NOSECT", &nosect);
++ }
++
++ /* Round address down modulo BYTESPERWORD */
++
++ addr &= ~(bytesperword-1);
++
++ while (repeat > 0) {
++ unsigned long a;
++ int n, z, num = (symbolic ? 1 : (16 / bytesperword));
++
++ for (a = addr, z = 0; z < repeat; a += bytesperword, ++z) {
++ if (phys) {
++ if (kdb_getphysword(&word, a, bytesperword)
++ || word)
++ break;
++ } else if (kdb_getword(&word, a, bytesperword) || word)
++ break;
++ }
++ n = min(num, repeat);
++ kdb_md_line(fmtstr, addr, symbolic, nosect, bytesperword, num, repeat, phys);
++ addr += bytesperword * n;
++ repeat -= n;
++ z = (z + num - 1) / num;
++ if (z > 2) {
++ int s = num * (z-2);
++ kdb_printf(kdb_machreg_fmt0 "-" kdb_machreg_fmt0 " zero suppressed\n",
++ addr, addr + bytesperword * s - 1);
++ addr += bytesperword * s;
++ repeat -= s;
++ }
++ }
++ last_addr = addr;
++
++ return 0;
++}
++
++/*
++ * kdb_mm
++ *
++ * This function implements the 'mm' command.
++ *
++ * mm address-expression new-value
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * mm works on machine words, mmW works on bytes.
++ */
++
++static int
++kdb_mm(int argc, const char **argv)
++{
++ int diag;
++ kdb_machreg_t addr;
++ long offset = 0;
++ unsigned long contents;
++ int nextarg;
++ int width;
++
++ if (argv[0][2] && !isdigit(argv[0][2]))
++ return KDB_NOTFOUND;
++
++ if (argc < 2) {
++ return KDB_ARGCOUNT;
++ }
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)))
++ return diag;
++
++ if (nextarg > argc)
++ return KDB_ARGCOUNT;
++
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &contents, NULL, NULL)))
++ return diag;
++
++ if (nextarg != argc + 1)
++ return KDB_ARGCOUNT;
++
++ width = argv[0][2] ? (argv[0][2] - '0') : (KDB_WORD_SIZE);
++ if ((diag = kdb_putword(addr, contents, width)))
++ return diag;
++
++ kdb_printf(kdb_machreg_fmt " = " kdb_machreg_fmt "\n", addr, contents);
++
++ return 0;
++}
++
++/*
++ * kdb_go
++ *
++ * This function implements the 'go' command.
++ *
++ * go [address-expression]
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * KDB_CMD_GO for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_go(int argc, const char **argv)
++{
++ kdb_machreg_t addr;
++ int diag;
++ int nextarg;
++ long offset;
++ struct pt_regs *regs = get_irq_regs();
++
++ if (argc == 1) {
++ if (smp_processor_id() != kdb_initial_cpu) {
++ kdb_printf("go <address> must be issued from the initial cpu, do cpu %d first\n", kdb_initial_cpu);
++ return KDB_ARGCOUNT;
++ }
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg,
++ &addr, &offset, NULL);
++ if (diag)
++ return diag;
++
++ kdba_setpc(regs, addr);
++ } else if (argc)
++ return KDB_ARGCOUNT;
++
++ diag = KDB_CMD_GO;
++ if (KDB_FLAG(CATASTROPHIC)) {
++ kdb_printf("Catastrophic error detected\n");
++ kdb_printf("kdb_continue_catastrophic=%d, ",
++ kdb_continue_catastrophic);
++ if (kdb_continue_catastrophic == 0 && kdb_go_count++ == 0) {
++ kdb_printf("type go a second time if you really want to continue\n");
++ return 0;
++ }
++ if (kdb_continue_catastrophic == 2) {
++ kdb_do_dump();
++ kdb_printf("forcing reboot\n");
++ kdb_reboot(0, NULL);
++ }
++ kdb_printf("attempting to continue\n");
++ }
++ if (smp_processor_id() != kdb_initial_cpu) {
++ char buf[80];
++ kdb_printf("go was not issued from initial cpu, switching back to cpu %d\n", kdb_initial_cpu);
++ sprintf(buf, "cpu %d\n", kdb_initial_cpu);
++ /* Recursive use of kdb_parse, do not use argv after this point */
++ argv = NULL;
++ diag = kdb_parse(buf);
++ if (diag == KDB_CMD_CPU)
++ KDB_STATE_SET_CPU(GO_SWITCH, kdb_initial_cpu);
++ }
++ return diag;
++}
++
++/*
++ * kdb_rd
++ *
++ * This function implements the 'rd' command.
++ *
++ * rd display all general registers.
++ * rd c display all control registers.
++ * rd d display all debug registers.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_rd(int argc, const char **argv)
++{
++ int diag;
++ if (argc == 0) {
++ if ((diag = kdb_check_regs()))
++ return diag;
++ return kdba_dumpregs(kdb_current_regs, NULL, NULL);
++ }
++
++ if (argc > 2) {
++ return KDB_ARGCOUNT;
++ }
++
++ if ((diag = kdb_check_regs()))
++ return diag;
++ return kdba_dumpregs(kdb_current_regs, argv[1], argc==2 ? argv[2]: NULL);
++}
++
++/*
++ * kdb_rm
++ *
++ * This function implements the 'rm' (register modify) command.
++ *
++ * rm register-name new-contents
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * Currently doesn't allow modification of control or
++ * debug registers.
++ */
++
++static int
++kdb_rm(int argc, const char **argv)
++{
++ int diag;
++ int ind = 0;
++ kdb_machreg_t contents;
++
++ if (argc != 2) {
++ return KDB_ARGCOUNT;
++ }
++
++ /*
++ * Allow presence or absence of leading '%' symbol.
++ */
++
++ if (argv[1][0] == '%')
++ ind = 1;
++
++ diag = kdbgetularg(argv[2], &contents);
++ if (diag)
++ return diag;
++
++ if ((diag = kdb_check_regs()))
++ return diag;
++ diag = kdba_setregcontents(&argv[1][ind], kdb_current_regs, contents);
++ if (diag)
++ return diag;
++
++ return 0;
++}
++
++#if defined(CONFIG_MAGIC_SYSRQ)
++/*
++ * kdb_sr
++ *
++ * This function implements the 'sr' (SYSRQ key) command which
++ * interfaces to the soi-disant MAGIC SYSRQ functionality.
++ *
++ * sr <magic-sysrq-code>
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * None.
++ */
++static int
++kdb_sr(int argc, const char **argv)
++{
++ extern int __sysrq_enabled;
++ if (argc != 1) {
++ return KDB_ARGCOUNT;
++ }
++ if (!__sysrq_enabled) {
++ kdb_printf("Auto activating sysrq\n");
++ __sysrq_enabled = 1;
++ }
++
++ handle_sysrq(*argv[1], NULL);
++
++ return 0;
++}
++#endif /* CONFIG_MAGIC_SYSRQ */
++
++/*
++ * kdb_ef
++ *
++ * This function implements the 'regs' (display exception frame)
++ * command. This command takes an address and expects to find
++ * an exception frame at that address, formats and prints it.
++ *
++ * regs address-expression
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * Not done yet.
++ */
++
++static int
++kdb_ef(int argc, const char **argv)
++{
++ int diag;
++ kdb_machreg_t addr;
++ long offset;
++ int nextarg;
++
++ if (argc == 1) {
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (diag)
++ return diag;
++
++ return kdba_dumpregs((struct pt_regs *)addr, NULL, NULL);
++ }
++
++ return KDB_ARGCOUNT;
++}
++
++#if defined(CONFIG_MODULES)
++extern struct list_head *kdb_modules;
++extern void free_module(struct module *);
++
++/* modules using other modules */
++struct module_use
++{
++ struct list_head list;
++ struct module *module_which_uses;
++};
++
++/*
++ * kdb_lsmod
++ *
++ * This function implements the 'lsmod' command. Lists currently
++ * loaded kernel modules.
++ *
++ * Mostly taken from userland lsmod.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ *
++ */
++
++static int
++kdb_lsmod(int argc, const char **argv)
++{
++ struct module *mod;
++
++ if (argc != 0)
++ return KDB_ARGCOUNT;
++
++ kdb_printf("Module Size modstruct Used by\n");
++ list_for_each_entry(mod, kdb_modules, list) {
++
++ kdb_printf("%-20s%8lu 0x%p ", mod->name,
++ mod->core_size, (void *)mod);
++#ifdef CONFIG_MODULE_UNLOAD
++ kdb_printf("%4d ", module_refcount(mod));
++#endif
++ if (mod->state == MODULE_STATE_GOING)
++ kdb_printf(" (Unloading)");
++ else if (mod->state == MODULE_STATE_COMING)
++ kdb_printf(" (Loading)");
++ else
++ kdb_printf(" (Live)");
++
++#ifdef CONFIG_MODULE_UNLOAD
++ {
++ struct module_use *use;
++ kdb_printf(" [ ");
++ list_for_each_entry(use, &mod->modules_which_use_me, list)
++ kdb_printf("%s ", use->module_which_uses->name);
++ kdb_printf("]\n");
++ }
++#endif
++ }
++
++ return 0;
++}
++
++#endif /* CONFIG_MODULES */
++
++/*
++ * kdb_env
++ *
++ * This function implements the 'env' command. Display the current
++ * environment variables.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_env(int argc, const char **argv)
++{
++ int i;
++
++ for(i=0; i<__nenv; i++) {
++ if (__env[i]) {
++ kdb_printf("%s\n", __env[i]);
++ }
++ }
++
++ if (KDB_DEBUG(MASK))
++ kdb_printf("KDBFLAGS=0x%x\n", kdb_flags);
++
++ return 0;
++}
++
++/*
++ * kdb_dmesg
++ *
++ * This function implements the 'dmesg' command to display the contents
++ * of the syslog buffer.
++ *
++ * dmesg [lines] [adjust]
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * None.
++ */
++
++static int
++kdb_dmesg(int argc, const char **argv)
++{
++ char *syslog_data[4], *start, *end, c = '\0', *p;
++ int diag, logging, logsize, lines = 0, adjust = 0, n;
++
++ if (argc > 2)
++ return KDB_ARGCOUNT;
++ if (argc) {
++ char *cp;
++ lines = simple_strtol(argv[1], &cp, 0);
++ if (*cp)
++ lines = 0;
++ if (argc > 1) {
++ adjust = simple_strtoul(argv[2], &cp, 0);
++ if (*cp || adjust < 0)
++ adjust = 0;
++ }
++ }
++
++ /* disable LOGGING if set */
++ diag = kdbgetintenv("LOGGING", &logging);
++ if (!diag && logging) {
++ const char *setargs[] = { "set", "LOGGING", "0" };
++ kdb_set(2, setargs);
++ }
++
++ /* syslog_data[0,1] physical start, end+1. syslog_data[2,3] logical start, end+1. */
++ kdb_syslog_data(syslog_data);
++ if (syslog_data[2] == syslog_data[3])
++ return 0;
++ logsize = syslog_data[1] - syslog_data[0];
++ start = syslog_data[2];
++ end = syslog_data[3];
++#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
++ for (n = 0, p = start; p < end; ++p) {
++ if ((c = *KDB_WRAP(p)) == '\n')
++ ++n;
++ }
++ if (c != '\n')
++ ++n;
++ if (lines < 0) {
++ if (adjust >= n)
++ kdb_printf("buffer only contains %d lines, nothing printed\n", n);
++ else if (adjust - lines >= n)
++ kdb_printf("buffer only contains %d lines, last %d lines printed\n",
++ n, n - adjust);
++ if (adjust) {
++ for (; start < end && adjust; ++start) {
++ if (*KDB_WRAP(start) == '\n')
++ --adjust;
++ }
++ if (start < end)
++ ++start;
++ }
++ for (p = start; p < end && lines; ++p) {
++ if (*KDB_WRAP(p) == '\n')
++ ++lines;
++ }
++ end = p;
++ } else if (lines > 0) {
++ int skip = n - (adjust + lines);
++ if (adjust >= n) {
++ kdb_printf("buffer only contains %d lines, nothing printed\n", n);
++ skip = n;
++ } else if (skip < 0) {
++ lines += skip;
++ skip = 0;
++ kdb_printf("buffer only contains %d lines, first %d lines printed\n",
++ n, lines);
++ }
++ for (; start < end && skip; ++start) {
++ if (*KDB_WRAP(start) == '\n')
++ --skip;
++ }
++ for (p = start; p < end && lines; ++p) {
++ if (*KDB_WRAP(p) == '\n')
++ --lines;
++ }
++ end = p;
++ }
++ /* Do a line at a time (max 200 chars) to reduce protocol overhead */
++ c = '\n';
++ while (start != end) {
++ char buf[201];
++ p = buf;
++ while (start < end && (c = *KDB_WRAP(start)) && (p - buf) < sizeof(buf)-1) {
++ ++start;
++ *p++ = c;
++ if (c == '\n')
++ break;
++ }
++ *p = '\0';
++ kdb_printf("%s", buf);
++ }
++ if (c != '\n')
++ kdb_printf("\n");
++
++ return 0;
++}
++
++/*
++ * kdb_cpu
++ *
++ * This function implements the 'cpu' command.
++ *
++ * cpu [<cpunum>]
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * KDB_CMD_CPU for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * All cpu's should be spinning in kdb(). However just in case
++ * a cpu did not take the smp_kdb_stop NMI, check that a cpu
++ * entered kdb() before passing control to it.
++ */
++
++static void
++kdb_cpu_status(void)
++{
++ int i, start_cpu, first_print = 1;
++ char state, prev_state = '?';
++
++ kdb_printf("Currently on cpu %d\n", smp_processor_id());
++ kdb_printf("Available cpus: ");
++ for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
++ if (!cpu_online(i))
++ state = 'F'; /* cpu is offline */
++ else {
++ struct kdb_running_process *krp = kdb_running_process+i;
++ if (KDB_STATE_CPU(KDB, i)) {
++ state = ' '; /* cpu is responding to kdb */
++ if (kdb_task_state_char(krp->p) == 'I')
++ state = 'I'; /* running the idle task */
++ } else if (krp->seqno && krp->p && krp->seqno >= kdb_seqno - 1)
++ state = '+'; /* some kdb data, but not responding */
++ else
++ state = '*'; /* no kdb data */
++ }
++ if (state != prev_state) {
++ if (prev_state != '?') {
++ if (!first_print)
++ kdb_printf(", ");
++ first_print = 0;
++ kdb_printf("%d", start_cpu);
++ if (start_cpu < i-1)
++ kdb_printf("-%d", i-1);
++ if (prev_state != ' ')
++ kdb_printf("(%c)", prev_state);
++ }
++ prev_state = state;
++ start_cpu = i;
++ }
++ }
++ /* print the trailing cpus, ignoring them if they are all offline */
++ if (prev_state != 'F') {
++ if (!first_print)
++ kdb_printf(", ");
++ kdb_printf("%d", start_cpu);
++ if (start_cpu < i-1)
++ kdb_printf("-%d", i-1);
++ if (prev_state != ' ')
++ kdb_printf("(%c)", prev_state);
++ }
++ kdb_printf("\n");
++}
++
++static int
++kdb_cpu(int argc, const char **argv)
++{
++ unsigned long cpunum;
++ int diag, i;
++
++ /* ask the other cpus if they are still active */
++ for (i=0; i<NR_CPUS; i++) {
++ if (cpu_online(i))
++ KDB_STATE_CLEAR_CPU(KDB, i);
++ }
++ KDB_STATE_SET(KDB);
++ barrier();
++ /* wait for the other cpus to notice and set state KDB again,
++ * see kdb_main_loop
++ */
++ udelay(1000);
++
++ if (argc == 0) {
++ kdb_cpu_status();
++ return 0;
++ }
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ diag = kdbgetularg(argv[1], &cpunum);
++ if (diag)
++ return diag;
++
++ /*
++ * Validate cpunum
++ */
++ if ((cpunum > NR_CPUS)
++ || !cpu_online(cpunum)
++ || !KDB_STATE_CPU(KDB, cpunum))
++ return KDB_BADCPUNUM;
++
++ kdb_new_cpu = cpunum;
++
++ /*
++ * Switch to other cpu
++ */
++ return KDB_CMD_CPU;
++}
++
++/* The user may not realize that ps/bta with no parameters does not print idle
++ * or sleeping system daemon processes, so tell them how many were suppressed.
++ */
++void
++kdb_ps_suppressed(void)
++{
++ int idle = 0, daemon = 0;
++ unsigned long mask_I = kdb_task_state_string("I"),
++ mask_M = kdb_task_state_string("M");
++ unsigned long cpu;
++ const struct task_struct *p, *g;
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (!cpu_online(cpu))
++ continue;
++ p = kdb_curr_task(cpu);
++ if (kdb_task_state(p, mask_I))
++ ++idle;
++ }
++ kdb_do_each_thread(g, p) {
++ if (kdb_task_state(p, mask_M))
++ ++daemon;
++ } kdb_while_each_thread(g, p);
++ if (idle || daemon) {
++ if (idle)
++ kdb_printf("%d idle process%s (state I)%s",
++ idle, idle == 1 ? "" : "es",
++ daemon ? " and " : "");
++ if (daemon)
++ kdb_printf("%d sleeping system daemon (state M) process%s",
++ daemon, daemon == 1 ? "" : "es");
++ kdb_printf(" suppressed\n");
++ }
++}
++
++/*
++ * kdb_ps
++ *
++ * This function implements the 'ps' command which shows
++ * a list of the active processes.
++ *
++ * ps [DRSTCZEUIMA] All processes, optionally filtered by state
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++void
++kdb_ps1(const struct task_struct *p)
++{
++ struct kdb_running_process *krp = kdb_running_process + kdb_process_cpu(p);
++ kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n",
++ (void *)p, p->pid, p->parent->pid,
++ kdb_task_has_cpu(p), kdb_process_cpu(p),
++ kdb_task_state_char(p),
++ (void *)(&p->thread),
++ p == kdb_curr_task(smp_processor_id()) ? '*': ' ',
++ p->comm);
++ if (kdb_task_has_cpu(p)) {
++ if (!krp->seqno || !krp->p)
++ kdb_printf(" Error: no saved data for this cpu\n");
++ else {
++ if (krp->seqno < kdb_seqno - 1)
++ kdb_printf(" Warning: process state is stale\n");
++ if (krp->p != p)
++ kdb_printf(" Error: does not match running process table (0x%p)\n", krp->p);
++ }
++ }
++}
++
++static int
++kdb_ps(int argc, const char **argv)
++{
++ struct task_struct *g, *p;
++ unsigned long mask, cpu;
++
++ if (argc == 0)
++ kdb_ps_suppressed();
++ kdb_printf("%-*s Pid Parent [*] cpu State %-*s Command\n",
++ (int)(2*sizeof(void *))+2, "Task Addr",
++ (int)(2*sizeof(void *))+2, "Thread");
++ mask = kdb_task_state_string(argc ? argv[1] : NULL);
++ /* Run the active tasks first */
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (!cpu_online(cpu))
++ continue;
++ p = kdb_curr_task(cpu);
++ if (kdb_task_state(p, mask))
++ kdb_ps1(p);
++ }
++ kdb_printf("\n");
++ /* Now the real tasks */
++ kdb_do_each_thread(g, p) {
++ if (kdb_task_state(p, mask))
++ kdb_ps1(p);
++ } kdb_while_each_thread(g, p);
++
++ return 0;
++}
++
++/*
++ * kdb_pid
++ *
++ * This function implements the 'pid' command which switches
++ * the currently active process.
++ *
++ * pid [<pid> | R]
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++
++static int
++kdb_pid(int argc, const char **argv)
++{
++ struct task_struct *p;
++ unsigned long val;
++ int diag;
++
++ if (argc > 1)
++ return KDB_ARGCOUNT;
++
++ if (argc) {
++ if (strcmp(argv[1], "R") == 0) {
++ p = KDB_RUNNING_PROCESS_ORIGINAL[kdb_initial_cpu].p;
++ } else {
++ diag = kdbgetularg(argv[1], &val);
++ if (diag)
++ return KDB_BADINT;
++
++ p = find_task_by_pid((pid_t)val);
++ if (!p) {
++ kdb_printf("No task with pid=%d\n", (pid_t)val);
++ return 0;
++ }
++ }
++
++ kdba_set_current_task(p);
++ }
++
++ kdb_printf("KDB current process is %s(pid=%d)\n", kdb_current_task->comm,
++ kdb_current_task->pid);
++
++ return 0;
++}
++
++/*
++ * kdb_ll
++ *
++ * This function implements the 'll' command which follows a linked
++ * list and executes an arbitrary command for each element.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_ll(int argc, const char **argv)
++{
++ int diag;
++ kdb_machreg_t addr;
++ long offset = 0;
++ kdb_machreg_t va;
++ unsigned long linkoffset;
++ int nextarg;
++ const char *command;
++
++ if (argc != 3) {
++ return KDB_ARGCOUNT;
++ }
++
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (diag)
++ return diag;
++
++ diag = kdbgetularg(argv[2], &linkoffset);
++ if (diag)
++ return diag;
++
++ /*
++ * Using the starting address as
++ * the first element in the list, and assuming that
++ * the list ends with a null pointer.
++ */
++
++ va = addr;
++ if (!(command = kdb_strdup(argv[3], GFP_KDB))) {
++ kdb_printf("%s: cannot duplicate command\n", __FUNCTION__);
++ return 0;
++ }
++ /* Recursive use of kdb_parse, do not use argv after this point */
++ argv = NULL;
++
++ while (va) {
++ char buf[80];
++
++ sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
++ diag = kdb_parse(buf);
++ if (diag)
++ return diag;
++
++ addr = va + linkoffset;
++ if (kdb_getword(&va, addr, sizeof(va)))
++ return 0;
++ }
++ kfree(command);
++
++ return 0;
++}
++
++/*
++ * kdb_help
++ *
++ * This function implements the 'help' and '?' commands.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_help(int argc, const char **argv)
++{
++ kdbtab_t *kt;
++ int i;
++
++ kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description");
++ kdb_printf("----------------------------------------------------------\n");
++ for(i=0, kt=kdb_commands; i<kdb_max_commands; i++, kt++) {
++ if (kt->cmd_name)
++ kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name,
++ kt->cmd_usage, kt->cmd_help);
++ }
++ return 0;
++}
++
++extern int kdb_wake_up_process(struct task_struct * p);
++
++/*
++ * kdb_kill
++ *
++ * This function implements the 'kill' commands.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_kill(int argc, const char **argv)
++{
++ long sig, pid;
++ char *endp;
++ struct task_struct *p;
++ struct siginfo info;
++
++ if (argc!=2)
++ return KDB_ARGCOUNT;
++
++ sig = simple_strtol(argv[1], &endp, 0);
++ if (*endp)
++ return KDB_BADINT;
++ if (sig >= 0 ) {
++ kdb_printf("Invalid signal parameter.<-signal>\n");
++ return 0;
++ }
++ sig=-sig;
++
++ pid = simple_strtol(argv[2], &endp, 0);
++ if (*endp)
++ return KDB_BADINT;
++ if (pid <=0 ) {
++ kdb_printf("Process ID must be large than 0.\n");
++ return 0;
++ }
++
++ /* Find the process. */
++ if (!(p = find_task_by_pid(pid))) {
++ kdb_printf("The specified process isn't found.\n");
++ return 0;
++ }
++ p = p->group_leader;
++ info.si_signo = sig;
++ info.si_errno = 0;
++ info.si_code = SI_USER;
++ info.si_pid = pid; /* use same capabilities as process being signalled */
++ info.si_uid = 0; /* kdb has root authority */
++ kdb_send_sig_info(p, &info, kdb_seqno);
++ return 0;
++}
++
++struct kdb_tm {
++ int tm_sec; /* seconds */
++ int tm_min; /* minutes */
++ int tm_hour; /* hours */
++ int tm_mday; /* day of the month */
++ int tm_mon; /* month */
++ int tm_year; /* year */
++};
++
++static void
++kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
++{
++ /* This will work from 1970-2099, 2100 is not a leap year */
++ static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
++ memset(tm, 0, sizeof(*tm));
++ tm->tm_sec = tv->tv_sec % (24 * 60 * 60);
++ tm->tm_mday = tv->tv_sec / (24 * 60 * 60) + (2 * 365 + 1); /* shift base from 1970 to 1968 */
++ tm->tm_min = tm->tm_sec / 60 % 60;
++ tm->tm_hour = tm->tm_sec / 60 / 60;
++ tm->tm_sec = tm->tm_sec % 60;
++ tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1));
++ tm->tm_mday %= (4*365+1);
++ mon_day[1] = 29;
++ while (tm->tm_mday >= mon_day[tm->tm_mon]) {
++ tm->tm_mday -= mon_day[tm->tm_mon];
++ if (++tm->tm_mon == 12) {
++ tm->tm_mon = 0;
++ ++tm->tm_year;
++ mon_day[1] = 28;
++ }
++ }
++ ++tm->tm_mday;
++}
++
++/*
++ * Most of this code has been lifted from kernel/timer.c::sys_sysinfo().
++ * I cannot call that code directly from kdb, it has an unconditional
++ * cli()/sti() and calls routines that take locks which can stop the debugger.
++ */
++
++static void
++kdb_sysinfo(struct sysinfo *val)
++{
++ struct timespec uptime;
++ do_posix_clock_monotonic_gettime(&uptime);
++ memset(val, 0, sizeof(*val));
++ val->uptime = uptime.tv_sec;
++ val->loads[0] = avenrun[0];
++ val->loads[1] = avenrun[1];
++ val->loads[2] = avenrun[2];
++ val->procs = nr_threads-1;
++ si_meminfo(val);
++ kdb_si_swapinfo(val);
++
++ return;
++}
++
++/*
++ * kdb_summary
++ *
++ * This function implements the 'summary' command.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_summary(int argc, const char **argv)
++{
++ extern struct timespec xtime;
++ extern struct timezone sys_tz;
++ struct kdb_tm tm;
++ struct sysinfo val;
++
++ if (argc)
++ return KDB_ARGCOUNT;
++
++ kdb_printf("sysname %s\n", init_uts_ns.name.sysname);
++ kdb_printf("release %s\n", init_uts_ns.name.release);
++ kdb_printf("version %s\n", init_uts_ns.name.version);
++ kdb_printf("machine %s\n", init_uts_ns.name.machine);
++ kdb_printf("nodename %s\n", init_uts_ns.name.nodename);
++ kdb_printf("domainname %s\n", init_uts_ns.name.domainname);
++
++ kdb_gmtime(&xtime, &tm);
++ kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d tz_minuteswest %d\n",
++ 1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday,
++ tm.tm_hour, tm.tm_min, tm.tm_sec,
++ sys_tz.tz_minuteswest);
++
++ kdb_sysinfo(&val);
++ kdb_printf("uptime ");
++ if (val.uptime > (24*60*60)) {
++ int days = val.uptime / (24*60*60);
++ val.uptime %= (24*60*60);
++ kdb_printf("%d day%s ", days, days == 1 ? "" : "s");
++ }
++ kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60);
++
++ /* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */
++
++#define LOAD_INT(x) ((x) >> FSHIFT)
++#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
++ kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n",
++ LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]),
++ LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]),
++ LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2]));
++ kdb_printf("\n");
++#undef LOAD_INT
++#undef LOAD_FRAC
++
++ kdb_meminfo_read_proc(); /* in fs/proc/proc_misc.c */
++
++ return 0;
++}
++
++/*
++ * kdb_per_cpu
++ *
++ * This function implements the 'per_cpu' command.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++
++static int
++kdb_per_cpu(int argc, const char **argv)
++{
++ char buf[256], fmtstr[64];
++ kdb_symtab_t symtab;
++ cpumask_t suppress = CPU_MASK_NONE;
++ int cpu, diag;
++ unsigned long addr, val, bytesperword = 0, whichcpu = ~0UL;
++
++ if (argc < 1 || argc > 3)
++ return KDB_ARGCOUNT;
++
++ snprintf(buf, sizeof(buf), "per_cpu__%s", argv[1]);
++ if (!kdbgetsymval(buf, &symtab)) {
++ kdb_printf("%s is not a per_cpu variable\n", argv[1]);
++ return KDB_BADADDR;
++ }
++ if (argc >=2 && (diag = kdbgetularg(argv[2], &bytesperword)))
++ return diag;
++ if (!bytesperword)
++ bytesperword = KDB_WORD_SIZE;
++ else if (bytesperword > KDB_WORD_SIZE)
++ return KDB_BADWIDTH;
++ sprintf(fmtstr, "%%0%dlx ", (int)(2*bytesperword));
++ if (argc >= 3) {
++ if ((diag = kdbgetularg(argv[3], &whichcpu)))
++ return diag;
++ if (!cpu_online(whichcpu)) {
++ kdb_printf("cpu %ld is not online\n", whichcpu);
++ return KDB_BADCPUNUM;
++ }
++ }
++
++ /* Most architectures use __per_cpu_offset[cpu], some use
++ * __per_cpu_offset(cpu), smp has no __per_cpu_offset.
++ */
++#ifdef __per_cpu_offset
++#define KDB_PCU(cpu) __per_cpu_offset(cpu)
++#else
++#ifdef CONFIG_SMP
++#define KDB_PCU(cpu) __per_cpu_offset[cpu]
++#else
++#define KDB_PCU(cpu) 0
++#endif
++#endif
++
++ for_each_online_cpu(cpu) {
++ if (whichcpu != ~0UL && whichcpu != cpu)
++ continue;
++ addr = symtab.sym_start + KDB_PCU(cpu);
++ if ((diag = kdb_getword(&val, addr, bytesperword))) {
++ kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to read, diag=%d\n",
++ cpu, addr, diag);
++ continue;
++ }
++#ifdef CONFIG_SMP
++ if (!val) {
++ cpu_set(cpu, suppress);
++ continue;
++ }
++#endif /* CONFIG_SMP */
++ kdb_printf("%5d ", cpu);
++ kdb_md_line(fmtstr, addr,
++ bytesperword == KDB_WORD_SIZE,
++ 1, bytesperword, 1, 1, 0);
++ }
++ if (cpus_weight(suppress) == 0)
++ return 0;
++ kdb_printf("Zero suppressed cpu(s):");
++ for (cpu = first_cpu(suppress); cpu < NR_CPUS; cpu = next_cpu(cpu, suppress)) {
++ kdb_printf(" %d", cpu);
++ if (cpu == NR_CPUS-1 || next_cpu(cpu, suppress) != cpu + 1)
++ continue;
++ while (cpu < NR_CPUS && next_cpu(cpu, suppress) == cpu + 1)
++ ++cpu;
++ kdb_printf("-%d", cpu);
++ }
++ kdb_printf("\n");
++
++#undef KDB_PCU
++
++ return 0;
++}
++
++
++/*
++ * kdb_register_repeat
++ *
++ * This function is used to register a kernel debugger command.
++ *
++ * Inputs:
++ * cmd Command name
++ * func Function to execute the command
++ * usage A simple usage string showing arguments
++ * help A simple help string describing command
++ * repeat Does the command auto repeat on enter?
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, one if a duplicate command.
++ * Locking:
++ * none.
++ * Remarks:
++ *
++ */
++
++#define kdb_command_extend 50 /* arbitrary */
++int
++kdb_register_repeat(char *cmd,
++ kdb_func_t func,
++ char *usage,
++ char *help,
++ short minlen,
++ kdb_repeat_t repeat)
++{
++ int i;
++ kdbtab_t *kp;
++
++ /*
++ * Brute force method to determine duplicates
++ */
++ for (i=0, kp=kdb_commands; i<kdb_max_commands; i++, kp++) {
++ if (kp->cmd_name && (strcmp(kp->cmd_name, cmd)==0)) {
++ kdb_printf("Duplicate kdb command registered: '%s'\n",
++ cmd);
++ return 1;
++ }
++ }
++
++ /*
++ * Insert command into first available location in table
++ */
++ for (i=0, kp=kdb_commands; i<kdb_max_commands; i++, kp++) {
++ if (kp->cmd_name == NULL) {
++ break;
++ }
++ }
++
++ if (i >= kdb_max_commands) {
++ kdbtab_t *new = kmalloc((kdb_max_commands + kdb_command_extend) * sizeof(*new), GFP_KDB);
++ if (!new) {
++ kdb_printf("Could not allocate new kdb_command table\n");
++ return 1;
++ }
++ if (kdb_commands) {
++ memcpy(new, kdb_commands, kdb_max_commands * sizeof(*new));
++ kfree(kdb_commands);
++ }
++ memset(new + kdb_max_commands, 0, kdb_command_extend * sizeof(*new));
++ kdb_commands = new;
++ kp = kdb_commands + kdb_max_commands;
++ kdb_max_commands += kdb_command_extend;
++ }
++
++ kp->cmd_name = cmd;
++ kp->cmd_func = func;
++ kp->cmd_usage = usage;
++ kp->cmd_help = help;
++ kp->cmd_flags = 0;
++ kp->cmd_minlen = minlen;
++ kp->cmd_repeat = repeat;
++
++ return 0;
++}
++
++/*
++ * kdb_register
++ *
++ * Compatibility register function for commands that do not need to
++ * specify a repeat state. Equivalent to kdb_register_repeat with
++ * KDB_REPEAT_NONE.
++ *
++ * Inputs:
++ * cmd Command name
++ * func Function to execute the command
++ * usage A simple usage string showing arguments
++ * help A simple help string describing command
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, one if a duplicate command.
++ * Locking:
++ * none.
++ * Remarks:
++ *
++ */
++
++int
++kdb_register(char *cmd,
++ kdb_func_t func,
++ char *usage,
++ char *help,
++ short minlen)
++{
++ return kdb_register_repeat(cmd, func, usage, help, minlen, KDB_REPEAT_NONE);
++}
++
++/*
++ * kdb_unregister
++ *
++ * This function is used to unregister a kernel debugger command.
++ * It is generally called when a module which implements kdb
++ * commands is unloaded.
++ *
++ * Inputs:
++ * cmd Command name
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, one command not registered.
++ * Locking:
++ * none.
++ * Remarks:
++ *
++ */
++
++int
++kdb_unregister(char *cmd)
++{
++ int i;
++ kdbtab_t *kp;
++
++ /*
++ * find the command.
++ */
++ for (i=0, kp=kdb_commands; i<kdb_max_commands; i++, kp++) {
++ if (kp->cmd_name && (strcmp(kp->cmd_name, cmd)==0)) {
++ kp->cmd_name = NULL;
++ return 0;
++ }
++ }
++
++ /*
++ * Couldn't find it.
++ */
++ return 1;
++}
++
++/*
++ * kdb_inittab
++ *
++ * This function is called by the kdb_init function to initialize
++ * the kdb command table. It must be called prior to any other
++ * call to kdb_register_repeat.
++ *
++ * Inputs:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ *
++ */
++
++static void __init
++kdb_inittab(void)
++{
++ int i;
++ kdbtab_t *kp;
++
++ for(i=0, kp=kdb_commands; i < kdb_max_commands; i++,kp++) {
++ kp->cmd_name = NULL;
++ }
++
++ kdb_register_repeat("md", kdb_md, "<vaddr>", "Display Memory Contents, also mdWcN, e.g. md8c1", 1, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>", "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>", "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("mds", kdb_md, "<vaddr>", "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>", "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("id", kdb_id, "<vaddr>", "Display Instructions", 1, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("go", kdb_go, "[<vaddr>]", "Continue Execution", 1, KDB_REPEAT_NONE);
++ kdb_register_repeat("rd", kdb_rd, "", "Display Registers", 1, KDB_REPEAT_NONE);
++ kdb_register_repeat("rm", kdb_rm, "<reg> <contents>", "Modify Registers", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("ef", kdb_ef, "<vaddr>", "Display exception frame", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("bt", kdb_bt, "[<vaddr>]", "Stack traceback", 1, KDB_REPEAT_NONE);
++ kdb_register_repeat("btp", kdb_bt, "<pid>", "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]", "Display stack all processes", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("btc", kdb_bt, "", "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("btt", kdb_bt, "<vaddr>", "Backtrace process given its struct task address", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>", "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("env", kdb_env, "", "Show environment variables", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("set", kdb_set, "", "Set environment variables", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("help", kdb_help, "", "Display Help Message", 1, KDB_REPEAT_NONE);
++ kdb_register_repeat("?", kdb_help, "", "Display Help Message", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("cpu", kdb_cpu, "<cpunum>","Switch to new cpu", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("ps", kdb_ps, "", "Display active task list", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("pid", kdb_pid, "<pidnum>", "Switch to another task", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("reboot", kdb_reboot, "", "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
++#if defined(CONFIG_MODULES)
++ kdb_register_repeat("lsmod", kdb_lsmod, "", "List loaded kernel modules", 0, KDB_REPEAT_NONE);
++#endif
++#if defined(CONFIG_MAGIC_SYSRQ)
++ kdb_register_repeat("sr", kdb_sr, "<key>", "Magic SysRq key", 0, KDB_REPEAT_NONE);
++#endif
++ kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", "Display syslog buffer", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>", "Send a signal to a process", 0, KDB_REPEAT_NONE);
++ kdb_register_repeat("summary", kdb_summary, "", "Summarize the system", 4, KDB_REPEAT_NONE);
++ kdb_register_repeat("per_cpu", kdb_per_cpu, "", "Display per_cpu variables", 3, KDB_REPEAT_NONE);
++}
++
++/*
++ * kdb_cmd_init
++ *
++ * This function is called by the kdb_init function to execute any
++ * commands defined in kdb_cmds.
++ *
++ * Inputs:
++ * Commands in *kdb_cmds[];
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ *
++ */
++
++static void __init
++kdb_cmd_init(void)
++{
++ int i, diag;
++ for (i = 0; kdb_cmds[i]; ++i) {
++ if (!defcmd_in_progress)
++ kdb_printf("kdb_cmd[%d]: %s", i, kdb_cmds[i]);
++ diag = kdb_parse(kdb_cmds[i]);
++ if (diag)
++ kdb_printf("command failed, kdb diag %d\n", diag);
++ }
++ if (defcmd_in_progress) {
++ kdb_printf("Incomplete 'defcmd' set, forcing endefcmd\n");
++ kdb_parse("endefcmd");
++ }
++}
++
++/*
++ * kdb_panic
++ *
++ * Invoked via the panic_notifier_list.
++ *
++ * Inputs:
++ * None.
++ * Outputs:
++ * None.
++ * Returns:
++ * Zero.
++ * Locking:
++ * None.
++ * Remarks:
++ * When this function is called from panic(), the other cpus have already
++ * been stopped.
++ *
++ */
++
++static int
++kdb_panic(struct notifier_block *self, unsigned long command, void *ptr)
++{
++ KDB_FLAG_SET(CATASTROPHIC); /* kernel state is dubious now */
++ KDB_ENTER();
++ return 0;
++}
++
++static struct notifier_block kdb_block = { kdb_panic, NULL, 0 };
++
++#ifdef CONFIG_SYSCTL
++static int proc_do_kdb(ctl_table *table, int write, struct file *filp,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ if (KDB_FLAG(NO_CONSOLE) && write) {
++ printk(KERN_ERR "kdb has no working console and has switched itself off\n");
++ return -EINVAL;
++ }
++ return proc_dointvec(table, write, filp, buffer, lenp, ppos);
++}
++
++static ctl_table kdb_kern_table[] = {
++ {
++ .ctl_name = KERN_KDB,
++ .procname = "kdb",
++ .data = &kdb_on,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_do_kdb,
++ },
++ {}
++};
++
++static ctl_table kdb_root_table[] = {
++ {
++ .ctl_name = CTL_KERN,
++ .procname = "kernel",
++ .mode = 0555,
++ .child = kdb_kern_table,
++ },
++ {}
++};
++#endif /* CONFIG_SYSCTL */
++
++static int
++kdb_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
++{
++ if (action == CPU_ONLINE) {
++ int cpu =(unsigned long)hcpu;
++ cpumask_t save_cpus_allowed = current->cpus_allowed;
++ cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu);
++ set_cpus_allowed(current, new_cpus_allowed);
++ kdb(KDB_REASON_CPU_UP, 0, NULL); /* do kdb setup on this cpu */
++ set_cpus_allowed(current, save_cpus_allowed);
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block kdb_cpu_nfb = {
++ .notifier_call = kdb_cpu_callback
++};
++
++/*
++ * kdb_init
++ *
++ * Initialize the kernel debugger environment.
++ *
++ * Parameters:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * None.
++ * Remarks:
++ * None.
++ */
++
++void __init
++kdb_init(void)
++{
++ kdb_initial_cpu = smp_processor_id();
++ /*
++ * This must be called before any calls to kdb_printf.
++ */
++ kdb_io_init();
++
++ kdb_inittab(); /* Initialize Command Table */
++ kdb_initbptab(); /* Initialize Breakpoint Table */
++ kdb_id_init(); /* Initialize Disassembler */
++ kdba_init(); /* Architecture Dependent Initialization */
++
++ /*
++ * Use printk() to get message in log_buf[];
++ */
++ printk("kdb version %d.%d%s by Keith Owens, Scott Lurndal. "\
++ "Copyright SGI, All Rights Reserved\n",
++ KDB_MAJOR_VERSION, KDB_MINOR_VERSION, KDB_TEST_VERSION);
++
++ kdb_cmd_init(); /* Preset commands from kdb_cmds */
++ kdb_initial_cpu = -1; /* Avoid recursion problems */
++ kdb(KDB_REASON_CPU_UP, 0, NULL); /* do kdb setup on boot cpu */
++ kdb_initial_cpu = smp_processor_id();
++ atomic_notifier_chain_register(&panic_notifier_list, &kdb_block);
++ register_cpu_notifier(&kdb_cpu_nfb);
++
++#ifdef kdba_setjmp
++ kdbjmpbuf = vmalloc(NR_CPUS * sizeof(*kdbjmpbuf));
++ if (!kdbjmpbuf)
++ printk(KERN_ERR "Cannot allocate kdbjmpbuf, no kdb recovery will be possible\n");
++#endif /* kdba_setjmp */
++
++ kdb_initial_cpu = -1;
++ kdb_wait_for_cpus_secs = max(10, 2*num_online_cpus());
++}
++
++#ifdef CONFIG_SYSCTL
++static int __init
++kdb_late_init(void)
++{
++ register_sysctl_table(kdb_root_table);
++ return 0;
++}
++
++__initcall(kdb_late_init);
++#endif
++
++EXPORT_SYMBOL(kdb_register);
++EXPORT_SYMBOL(kdb_register_repeat);
++EXPORT_SYMBOL(kdb_unregister);
++EXPORT_SYMBOL(kdb_getarea_size);
++EXPORT_SYMBOL(kdb_putarea_size);
++EXPORT_SYMBOL(kdb_getuserarea_size);
++EXPORT_SYMBOL(kdb_putuserarea_size);
++EXPORT_SYMBOL(kdbgetularg);
++EXPORT_SYMBOL(kdbgetenv);
++EXPORT_SYMBOL(kdbgetintenv);
++EXPORT_SYMBOL(kdbgetaddrarg);
++EXPORT_SYMBOL(kdb);
++EXPORT_SYMBOL(kdb_on);
++EXPORT_SYMBOL(kdb_seqno);
++EXPORT_SYMBOL(kdb_initial_cpu);
++EXPORT_SYMBOL(kdbnearsym);
++EXPORT_SYMBOL(kdb_printf);
++EXPORT_SYMBOL(kdb_symbol_print);
++EXPORT_SYMBOL(kdb_running_process);
+diff -Nurp linux-2.6.22-590/kdb/kdbsupport.c linux-2.6.22-600/kdb/kdbsupport.c
+--- linux-2.6.22-590/kdb/kdbsupport.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/kdbsupport.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,1162 @@
++/*
++ * Kernel Debugger Architecture Independent Support Functions
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ * 03/02/13 added new 2.5 kallsyms <xavier.bru@bull.net>
++ */
++
++#include <stdarg.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/kallsyms.h>
++#include <linux/stddef.h>
++#include <linux/vmalloc.h>
++#include <linux/ptrace.h>
++#include <linux/module.h>
++#include <linux/highmem.h>
++#include <linux/hardirq.h>
++#include <linux/delay.h>
++
++#include <asm/uaccess.h>
++
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++
++/*
++ * Symbol table functions.
++ */
++
++/*
++ * kdbgetsymval
++ *
++ * Return the address of the given symbol.
++ *
++ * Parameters:
++ * symname Character string containing symbol name
++ * symtab Structure to receive results
++ * Outputs:
++ * Returns:
++ * 0 Symbol not found, symtab zero filled
++ * 1 Symbol mapped to module/symbol/section, data in symtab
++ * Locking:
++ * None.
++ * Remarks:
++ */
++
++int
++kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
++{
++ if (KDB_DEBUG(AR))
++ kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname, symtab);
++ memset(symtab, 0, sizeof(*symtab));
++
++ if ((symtab->sym_start = kallsyms_lookup_name(symname))) {
++ if (KDB_DEBUG(AR))
++ kdb_printf("kdbgetsymval: returns 1, symtab->sym_start=0x%lx\n", symtab->sym_start);
++ return 1;
++ }
++ if (KDB_DEBUG(AR))
++ kdb_printf("kdbgetsymval: returns 0\n");
++ return 0;
++}
++
++/*
++ * kdbnearsym
++ *
++ * Return the name of the symbol with the nearest address
++ * less than 'addr'.
++ *
++ * Parameters:
++ * addr Address to check for symbol near
++ * symtab Structure to receive results
++ * Outputs:
++ * Returns:
++ * 0 No sections contain this address, symtab zero filled
++ * 1 Address mapped to module/symbol/section, data in symtab
++ * Locking:
++ * None.
++ * Remarks:
++ * 2.6 kallsyms has a "feature" where it unpacks the name into a string.
++ * If that string is reused before the caller expects it then the caller
++ * sees its string change without warning. To avoid cluttering up the
++ * main kdb code with lots of kdb_strdup, tests and kfree calls, kdbnearsym
++ * maintains an LRU list of the last few unique strings. The list is sized
++ * large enough to hold active strings, no kdb caller of kdbnearsym makes
++ * more than ~20 later calls before using a saved value.
++ */
++
++static char *kdb_name_table[100]; /* arbitrary size */
++
++int
++kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
++{
++ int ret = 0;
++ unsigned long symbolsize;
++ unsigned long offset;
++#define knt1_size 128 /* must be >= kallsyms table size */
++ char *knt1 = NULL;
++
++ if (KDB_DEBUG(AR))
++ kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab);
++ memset(symtab, 0, sizeof(*symtab));
++
++ if (addr < 4096)
++ goto out;
++ knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC);
++ if (!knt1) {
++ kdb_printf("kdbnearsym: addr=0x%lx cannot kmalloc knt1\n", addr);
++ goto out;
++ }
++ symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset, (char **)(&symtab->mod_name), knt1);
++ if (offset > 8*1024*1024) {
++ symtab->sym_name = NULL;
++ addr = offset = symbolsize = 0;
++ }
++ symtab->sym_start = addr - offset;
++ symtab->sym_end = symtab->sym_start + symbolsize;
++ ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0';
++
++ if (ret) {
++ int i;
++ /* Another 2.6 kallsyms "feature". Sometimes the sym_name is
++ * set but the buffer passed into kallsyms_lookup is not used,
++ * so it contains garbage. The caller has to work out which
++ * buffer needs to be saved.
++ *
++ * What was Rusty smoking when he wrote that code?
++ */
++ if (symtab->sym_name != knt1) {
++ strncpy(knt1, symtab->sym_name, knt1_size);
++ knt1[knt1_size-1] = '\0';
++ }
++ for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
++ if (kdb_name_table[i] && strcmp(kdb_name_table[i], knt1) == 0)
++ break;
++ }
++ if (i >= ARRAY_SIZE(kdb_name_table)) {
++ debug_kfree(kdb_name_table[0]);
++ memcpy(kdb_name_table, kdb_name_table+1,
++ sizeof(kdb_name_table[0])*(ARRAY_SIZE(kdb_name_table)-1));
++ } else {
++ debug_kfree(knt1);
++ knt1 = kdb_name_table[i];
++ memcpy(kdb_name_table+i, kdb_name_table+i+1,
++ sizeof(kdb_name_table[0])*(ARRAY_SIZE(kdb_name_table)-i-1));
++ }
++ i = ARRAY_SIZE(kdb_name_table) - 1;
++ kdb_name_table[i] = knt1;
++ symtab->sym_name = kdb_name_table[i];
++ knt1 = NULL;
++ }
++
++ if (symtab->mod_name == NULL)
++ symtab->mod_name = "kernel";
++ if (KDB_DEBUG(AR))
++ kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret, symtab->sym_start, symtab->mod_name, symtab->sym_name, symtab->sym_name);
++
++out:
++ debug_kfree(knt1);
++ return ret;
++}
++
++void
++kdbnearsym_cleanup(void)
++{
++ int i;
++ for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
++ if (kdb_name_table[i]) {
++ debug_kfree(kdb_name_table[i]);
++ kdb_name_table[i] = NULL;
++ }
++ }
++}
++
++/*
++ * kallsyms_symbol_complete
++ *
++ * Parameters:
++ * prefix_name prefix of a symbol name to lookup
++ * max_len maximum length that can be returned
++ * Returns:
++ * Number of symbols which match the given prefix.
++ * Notes:
++ * prefix_name is changed to contain the longest unique prefix that
++ * starts with this prefix (tab completion).
++ */
++
++static char ks_namebuf[KSYM_NAME_LEN+1], ks_namebuf_prev[KSYM_NAME_LEN+1];
++
++int kallsyms_symbol_complete(char *prefix_name, int max_len)
++{
++ loff_t pos = 0;
++ int prefix_len = strlen(prefix_name), prev_len = 0;
++ int i, number = 0;
++ const char *name;
++
++ while ((name = kdb_walk_kallsyms(&pos))) {
++ if (strncmp(name, prefix_name, prefix_len) == 0) {
++ strcpy(ks_namebuf, name);
++ /* Work out the longest name that matches the prefix */
++ if (++number == 1) {
++ prev_len = min_t(int, max_len-1, strlen(ks_namebuf));
++ memcpy(ks_namebuf_prev, ks_namebuf, prev_len);
++ ks_namebuf_prev[prev_len] = '\0';
++ } else for (i = 0; i < prev_len; ++i) {
++ if (ks_namebuf[i] != ks_namebuf_prev[i]) {
++ prev_len = i;
++ ks_namebuf_prev[i] = '\0';
++ break;
++ }
++ }
++ }
++ }
++ if (prev_len > prefix_len)
++ memcpy(prefix_name, ks_namebuf_prev, prev_len+1);
++ return number;
++}
++
++/*
++ * kallsyms_symbol_next
++ *
++ * Parameters:
++ * prefix_name prefix of a symbol name to lookup
++ * flag 0 means search from the head, 1 means continue search.
++ * Returns:
++ * 1 if a symbol matches the given prefix.
++ * 0 if no string found
++ */
++
++int kallsyms_symbol_next(char *prefix_name, int flag)
++{
++ int prefix_len = strlen(prefix_name);
++ static loff_t pos;
++ const char *name;
++
++ if (!flag)
++ pos = 0;
++
++ while ((name = kdb_walk_kallsyms(&pos))) {
++ if (strncmp(name, prefix_name, prefix_len) == 0) {
++ strncpy(prefix_name, name, strlen(name)+1);
++ return 1;
++ }
++ }
++ return 0;
++}
++
++#if defined(CONFIG_SMP)
++/*
++ * kdb_ipi
++ *
++ * This function is called from the non-maskable interrupt
++ * handler to handle a kdb IPI instruction.
++ *
++ * Inputs:
++ * regs = Exception frame pointer
++ * Outputs:
++ * None.
++ * Returns:
++ * 0 - Did not handle NMI
++ * 1 - Handled NMI
++ * Locking:
++ * None.
++ * Remarks:
++ * Initially one processor is invoked in the kdb() code. That
++ * processor sends an ipi which drives this routine on the other
++ * processors. All this does is call kdb() with reason SWITCH.
++ * This puts all processors into the kdb() routine and all the
++ * code for breakpoints etc. is in one place.
++ * One problem with the way the kdb NMI is sent, the NMI has no
++ * identification that says it came from kdb. If the cpu's kdb state is
++ * marked as "waiting for kdb_ipi" then the NMI is treated as coming from
++ * kdb, otherwise it is assumed to be for another reason and is ignored.
++ */
++
++int
++kdb_ipi(struct pt_regs *regs, void (*ack_interrupt)(void))
++{
++ /* Do not print before checking and clearing WAIT_IPI, IPIs are
++ * going all the time.
++ */
++ if (KDB_STATE(WAIT_IPI)) {
++ /*
++ * Stopping other processors via smp_kdb_stop().
++ */
++ if (ack_interrupt)
++ (*ack_interrupt)(); /* Acknowledge the interrupt */
++ KDB_STATE_CLEAR(WAIT_IPI);
++ KDB_DEBUG_STATE("kdb_ipi 1", 0);
++ kdb(KDB_REASON_SWITCH, 0, regs); /* Spin in kdb() */
++ KDB_DEBUG_STATE("kdb_ipi 2", 0);
++ return 1;
++ }
++ return 0;
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * kdb_symbol_print
++ *
++ * Standard method for printing a symbol name and offset.
++ * Inputs:
++ * addr Address to be printed.
++ * symtab Address of symbol data, if NULL this routine does its
++ * own lookup.
++ * punc Punctuation for string, bit field.
++ * Outputs:
++ * None.
++ * Returns:
++ * Always 0.
++ * Locking:
++ * none.
++ * Remarks:
++ * The string and its punctuation is only printed if the address
++ * is inside the kernel, except that the value is always printed
++ * when requested.
++ */
++
++void
++kdb_symbol_print(kdb_machreg_t addr, const kdb_symtab_t *symtab_p, unsigned int punc)
++{
++ kdb_symtab_t symtab, *symtab_p2;
++ if (symtab_p) {
++ symtab_p2 = (kdb_symtab_t *)symtab_p;
++ }
++ else {
++ symtab_p2 = &symtab;
++ kdbnearsym(addr, symtab_p2);
++ }
++ if (symtab_p2->sym_name || (punc & KDB_SP_VALUE)) {
++ ; /* drop through */
++ }
++ else {
++ return;
++ }
++ if (punc & KDB_SP_SPACEB) {
++ kdb_printf(" ");
++ }
++ if (punc & KDB_SP_VALUE) {
++ kdb_printf(kdb_machreg_fmt0, addr);
++ }
++ if (symtab_p2->sym_name) {
++ if (punc & KDB_SP_VALUE) {
++ kdb_printf(" ");
++ }
++ if (punc & KDB_SP_PAREN) {
++ kdb_printf("(");
++ }
++ if (strcmp(symtab_p2->mod_name, "kernel")) {
++ kdb_printf("[%s]", symtab_p2->mod_name);
++ }
++ kdb_printf("%s", symtab_p2->sym_name);
++ if (addr != symtab_p2->sym_start) {
++ kdb_printf("+0x%lx", addr - symtab_p2->sym_start);
++ }
++ if (punc & KDB_SP_SYMSIZE) {
++ kdb_printf("/0x%lx", symtab_p2->sym_end - symtab_p2->sym_start);
++ }
++ if (punc & KDB_SP_PAREN) {
++ kdb_printf(")");
++ }
++ }
++ if (punc & KDB_SP_SPACEA) {
++ kdb_printf(" ");
++ }
++ if (punc & KDB_SP_NEWLINE) {
++ kdb_printf("\n");
++ }
++}
++
++/*
++ * kdb_strdup
++ *
++ * kdb equivalent of strdup, for disasm code.
++ * Inputs:
++ * str The string to duplicate.
++ * type Flags to kmalloc for the new string.
++ * Outputs:
++ * None.
++ * Returns:
++ * Address of the new string, NULL if storage could not be allocated.
++ * Locking:
++ * none.
++ * Remarks:
++ * This is not in lib/string.c because it uses kmalloc which is not
++ * available when string.o is used in boot loaders.
++ */
++
++char *kdb_strdup(const char *str, gfp_t type)
++{
++ int n = strlen(str)+1;
++ char *s = kmalloc(n, type);
++ if (!s) return NULL;
++ return strcpy(s, str);
++}
++
++/*
++ * kdb_getarea_size
++ *
++ * Read an area of data. The kdb equivalent of copy_from_user, with
++ * kdb messages for invalid addresses.
++ * Inputs:
++ * res Pointer to the area to receive the result.
++ * addr Address of the area to copy.
++ * size Size of the area.
++ * Outputs:
++ * none.
++ * Returns:
++ * 0 for success, < 0 for error.
++ * Locking:
++ * none.
++ */
++
++int kdb_getarea_size(void *res, unsigned long addr, size_t size)
++{
++ int ret = kdba_getarea_size(res, addr, size);
++ if (ret) {
++ if (!KDB_STATE(SUPPRESS)) {
++ kdb_printf("kdb_getarea: Bad address 0x%lx\n", addr);
++ KDB_STATE_SET(SUPPRESS);
++ }
++ ret = KDB_BADADDR;
++ }
++ else {
++ KDB_STATE_CLEAR(SUPPRESS);
++ }
++ return(ret);
++}
++
++/*
++ * kdb_putarea_size
++ *
++ * Write an area of data. The kdb equivalent of copy_to_user, with
++ * kdb messages for invalid addresses.
++ * Inputs:
++ * addr Address of the area to write to.
++ * res Pointer to the area holding the data.
++ * size Size of the area.
++ * Outputs:
++ * none.
++ * Returns:
++ * 0 for success, < 0 for error.
++ * Locking:
++ * none.
++ */
++
++int kdb_putarea_size(unsigned long addr, void *res, size_t size)
++{
++ int ret = kdba_putarea_size(addr, res, size);
++ if (ret) {
++ if (!KDB_STATE(SUPPRESS)) {
++ kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr);
++ KDB_STATE_SET(SUPPRESS);
++ }
++ ret = KDB_BADADDR;
++ }
++ else {
++ KDB_STATE_CLEAR(SUPPRESS);
++ }
++ return(ret);
++}
++
++/*
++ * kdb_getphys
++ *
++ * Read data from a physical address. Validate the address is in range,
++ * use kmap_atomic() to get data
++ *
++ * Similar to kdb_getarea() - but for phys addresses
++ *
++ * Inputs:
++ * res Pointer to the word to receive the result
++ * addr Physical address of the area to copy
++ * size Size of the area
++ * Outputs:
++ * none.
++ * Returns:
++ * 0 for success, < 0 for error.
++ * Locking:
++ * none.
++ */
++static int kdb_getphys(void *res, unsigned long addr, size_t size)
++{
++ unsigned long pfn;
++ void *vaddr;
++ struct page *page;
++
++ pfn = (addr >> PAGE_SHIFT);
++ if (!pfn_valid(pfn))
++ return 1;
++ page = pfn_to_page(pfn);
++ vaddr = kmap_atomic(page, KM_KDB);
++ memcpy(res, vaddr + (addr & (PAGE_SIZE -1)), size);
++ kunmap_atomic(vaddr, KM_KDB);
++
++ return 0;
++}
++
++/*
++ * kdb_getphysword
++ *
++ * Inputs:
++ * word Pointer to the word to receive the result.
++ * addr Address of the area to copy.
++ * size Size of the area.
++ * Outputs:
++ * none.
++ * Returns:
++ * 0 for success, < 0 for error.
++ * Locking:
++ * none.
++ */
++int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
++{
++ int diag;
++ __u8 w1;
++ __u16 w2;
++ __u32 w4;
++ __u64 w8;
++ *word = 0; /* Default value if addr or size is invalid */
++
++ switch (size) {
++ case 1:
++ if (!(diag = kdb_getphys(&w1, addr, sizeof(w1))))
++ *word = w1;
++ break;
++ case 2:
++ if (!(diag = kdb_getphys(&w2, addr, sizeof(w2))))
++ *word = w2;
++ break;
++ case 4:
++ if (!(diag = kdb_getphys(&w4, addr, sizeof(w4))))
++ *word = w4;
++ break;
++ case 8:
++ if (size <= sizeof(*word)) {
++ if (!(diag = kdb_getphys(&w8, addr, sizeof(w8))))
++ *word = w8;
++ break;
++ }
++ /* drop through */
++ default:
++ diag = KDB_BADWIDTH;
++ kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
++ }
++ return(diag);
++}
++
++/*
++ * kdb_getword
++ *
++ * Read a binary value. Unlike kdb_getarea, this treats data as numbers.
++ * Inputs:
++ * word Pointer to the word to receive the result.
++ * addr Address of the area to copy.
++ * size Size of the area.
++ * Outputs:
++ * none.
++ * Returns:
++ * 0 for success, < 0 for error.
++ * Locking:
++ * none.
++ */
++
++int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
++{
++ int diag;
++ __u8 w1;
++ __u16 w2;
++ __u32 w4;
++ __u64 w8;
++ *word = 0; /* Default value if addr or size is invalid */
++ switch (size) {
++ case 1:
++ if (!(diag = kdb_getarea(w1, addr)))
++ *word = w1;
++ break;
++ case 2:
++ if (!(diag = kdb_getarea(w2, addr)))
++ *word = w2;
++ break;
++ case 4:
++ if (!(diag = kdb_getarea(w4, addr)))
++ *word = w4;
++ break;
++ case 8:
++ if (size <= sizeof(*word)) {
++ if (!(diag = kdb_getarea(w8, addr)))
++ *word = w8;
++ break;
++ }
++ /* drop through */
++ default:
++ diag = KDB_BADWIDTH;
++ kdb_printf("kdb_getword: bad width %ld\n", (long) size);
++ }
++ return(diag);
++}
++
++/*
++ * kdb_putword
++ *
++ * Write a binary value. Unlike kdb_putarea, this treats data as numbers.
++ * Inputs:
++ * addr Address of the area to write to..
++ * word The value to set.
++ * size Size of the area.
++ * Outputs:
++ * none.
++ * Returns:
++ * 0 for success, < 0 for error.
++ * Locking:
++ * none.
++ */
++
++int kdb_putword(unsigned long addr, unsigned long word, size_t size)
++{
++ int diag;
++ __u8 w1;
++ __u16 w2;
++ __u32 w4;
++ __u64 w8;
++ switch (size) {
++ case 1:
++ w1 = word;
++ diag = kdb_putarea(addr, w1);
++ break;
++ case 2:
++ w2 = word;
++ diag = kdb_putarea(addr, w2);
++ break;
++ case 4:
++ w4 = word;
++ diag = kdb_putarea(addr, w4);
++ break;
++ case 8:
++ if (size <= sizeof(word)) {
++ w8 = word;
++ diag = kdb_putarea(addr, w8);
++ break;
++ }
++ /* drop through */
++ default:
++ diag = KDB_BADWIDTH;
++ kdb_printf("kdb_putword: bad width %ld\n", (long) size);
++ }
++ return(diag);
++}
++
++/*
++ * kdb_task_state_string
++ *
++ * Convert a string containing any of the letters DRSTCZEUIMA to a mask
++ * for the process state field and return the value. If no argument is
++ * supplied, return the mask that corresponds to environment variable PS,
++ * DRSTCZEU by default.
++ * Inputs:
++ * s String to convert
++ * Outputs:
++ * none.
++ * Returns:
++ * Mask for process state.
++ * Locking:
++ * none.
++ * Notes:
++ * The mask folds data from several sources into a single long value, so
++ * be carefull not to overlap the bits. TASK_* bits are in the LSB,
++ * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there
++ * is no overlap between TASK_* and EXIT_* but that may not always be
++ * true, so EXIT_* bits are shifted left 16 bits before being stored in
++ * the mask.
++ */
++
++#define UNRUNNABLE (1UL << (8*sizeof(unsigned long) - 1)) /* unrunnable is < 0 */
++#define RUNNING (1UL << (8*sizeof(unsigned long) - 2))
++#define IDLE (1UL << (8*sizeof(unsigned long) - 3))
++#define DAEMON (1UL << (8*sizeof(unsigned long) - 4))
++
++unsigned long
++kdb_task_state_string(const char *s)
++{
++ long res = 0;
++ if (!s && !(s = kdbgetenv("PS"))) {
++ s = "DRSTCZEU"; /* default value for ps */
++ }
++ while (*s) {
++ switch (*s) {
++ case 'D': res |= TASK_UNINTERRUPTIBLE; break;
++ case 'R': res |= RUNNING; break;
++ case 'S': res |= TASK_INTERRUPTIBLE; break;
++ case 'T': res |= TASK_STOPPED; break;
++ case 'C': res |= TASK_TRACED; break;
++ case 'Z': res |= EXIT_ZOMBIE << 16; break;
++ case 'E': res |= EXIT_DEAD << 16; break;
++ case 'U': res |= UNRUNNABLE; break;
++ case 'I': res |= IDLE; break;
++ case 'M': res |= DAEMON; break;
++ case 'A': res = ~0UL; break;
++ default:
++ kdb_printf("%s: unknown flag '%c' ignored\n", __FUNCTION__, *s);
++ break;
++ }
++ ++s;
++ }
++ return res;
++}
++
++/*
++ * kdb_task_state_char
++ *
++ * Return the character that represents the task state.
++ * Inputs:
++ * p struct task for the process
++ * Outputs:
++ * none.
++ * Returns:
++ * One character to represent the task state.
++ * Locking:
++ * none.
++ */
++
++char
++kdb_task_state_char (const struct task_struct *p)
++{
++ int cpu = kdb_process_cpu(p);
++ struct kdb_running_process *krp = kdb_running_process + cpu;
++ char state = (p->state == 0) ? 'R' :
++ (p->state < 0) ? 'U' :
++ (p->state & TASK_UNINTERRUPTIBLE) ? 'D' :
++ (p->state & TASK_STOPPED) ? 'T' :
++ (p->state & TASK_TRACED) ? 'C' :
++ (p->exit_state & EXIT_ZOMBIE) ? 'Z' :
++ (p->exit_state & EXIT_DEAD) ? 'E' :
++ (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
++ if (p->pid == 0) {
++ /* Idle task. Is it really idle, apart from the kdb interrupt? */
++ if (!kdb_task_has_cpu(p) || krp->irq_depth == 1) {
++ /* There is a corner case when the idle task takes an
++ * interrupt and dies in the interrupt code. It has an
++ * interrupt count of 1 but that did not come from kdb.
++ * This corner case can only occur on the initial cpu,
++ * all the others were entered via the kdb IPI.
++ */
++ if (cpu != kdb_initial_cpu || KDB_STATE_CPU(KEYBOARD, cpu))
++ state = 'I'; /* idle task */
++ }
++ }
++ else if (!p->mm && state == 'S') {
++ state = 'M'; /* sleeping system daemon */
++ }
++ return state;
++}
++
++/*
++ * kdb_task_state
++ *
++ * Return true if a process has the desired state given by the mask.
++ * Inputs:
++ * p struct task for the process
++ * mask mask from kdb_task_state_string to select processes
++ * Outputs:
++ * none.
++ * Returns:
++ * True if the process matches at least one criteria defined by the mask.
++ * Locking:
++ * none.
++ */
++
++unsigned long
++kdb_task_state(const struct task_struct *p, unsigned long mask)
++{
++ char state[] = { kdb_task_state_char(p), '\0' };
++ return (mask & kdb_task_state_string(state)) != 0;
++}
++
++struct kdb_running_process kdb_running_process[NR_CPUS];
++
++/*
++ * kdb_save_running
++ *
++ * Save the state of a running process. This is invoked on the current
++ * process on each cpu (assuming the cpu is responding).
++ * Inputs:
++ * regs struct pt_regs for the process
++ * Outputs:
++ * Updates kdb_running_process[] for this cpu.
++ * Returns:
++ * none.
++ * Locking:
++ * none.
++ */
++
++void
++kdb_save_running(struct pt_regs *regs)
++{
++ struct kdb_running_process *krp = kdb_running_process + smp_processor_id();
++ krp->p = current;
++ krp->regs = regs;
++ krp->seqno = kdb_seqno;
++ krp->irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
++ kdba_save_running(&(krp->arch), regs);
++}
++
++/*
++ * kdb_unsave_running
++ *
++ * Reverse the effect of kdb_save_running.
++ * Inputs:
++ * regs struct pt_regs for the process
++ * Outputs:
++ * Updates kdb_running_process[] for this cpu.
++ * Returns:
++ * none.
++ * Locking:
++ * none.
++ */
++
++void
++kdb_unsave_running(struct pt_regs *regs)
++{
++ struct kdb_running_process *krp = kdb_running_process + smp_processor_id();
++ kdba_unsave_running(&(krp->arch), regs);
++ krp->seqno = 0;
++}
++
++
++/*
++ * kdb_print_nameval
++ *
++ * Print a name and its value, converting the value to a symbol lookup
++ * if possible.
++ * Inputs:
++ * name field name to print
++ * val value of field
++ * Outputs:
++ * none.
++ * Returns:
++ * none.
++ * Locking:
++ * none.
++ */
++
++void
++kdb_print_nameval(const char *name, unsigned long val)
++{
++ kdb_symtab_t symtab;
++ kdb_printf(" %-11.11s ", name);
++ if (kdbnearsym(val, &symtab))
++ kdb_symbol_print(val, &symtab, KDB_SP_VALUE|KDB_SP_SYMSIZE|KDB_SP_NEWLINE);
++ else
++ kdb_printf("0x%lx\n", val);
++}
++
++static struct page * kdb_get_one_user_page(const struct task_struct *tsk, unsigned long start,
++ int len, int write)
++{
++ struct mm_struct *mm = tsk->mm;
++ unsigned int flags;
++ struct vm_area_struct * vma;
++
++ /* shouldn't cross a page boundary. */
++ if ((start & PAGE_MASK) != ((start+len) & PAGE_MASK))
++ return NULL;
++
++ /* we need to align start address to the current page boundy, PAGE_ALIGN
++ * aligns to next page boundry.
++ * FIXME: What about hugetlb?
++ */
++ start = start & PAGE_MASK;
++ flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
++
++ vma = find_extend_vma(mm, start);
++
++ /* may be we can allow access to VM_IO pages inside KDB? */
++ if (!vma || (vma->vm_flags & VM_IO) || !(flags & vma->vm_flags))
++ return NULL;
++
++ return follow_page(vma, start, write ? FOLL_WRITE : 0);
++}
++
++int kdb_getuserarea_size(void *to, unsigned long from, size_t size)
++{
++ struct page *page;
++ void *vaddr;
++
++ page = kdb_get_one_user_page(kdb_current_task, from, size, 0);
++ if (!page)
++ return size;
++
++ vaddr = kmap_atomic(page, KM_KDB);
++ memcpy(to, vaddr+ (from & (PAGE_SIZE - 1)), size);
++ kunmap_atomic(vaddr, KM_KDB);
++
++ return 0;
++}
++
++int kdb_putuserarea_size(unsigned long to, void *from, size_t size)
++{
++ struct page *page;
++ void *vaddr;
++
++ page = kdb_get_one_user_page(kdb_current_task, to, size, 1);
++ if (!page)
++ return size;
++
++ vaddr = kmap_atomic(page, KM_KDB);
++ memcpy(vaddr+ (to & (PAGE_SIZE - 1)), from, size);
++ kunmap_atomic(vaddr, KM_KDB);
++
++ return 0;
++}
++
++/* Last ditch allocator for debugging, so we can still debug even when the
++ * GFP_ATOMIC pool has been exhausted. The algorithms are tuned for space
++ * usage, not for speed. One smallish memory pool, the free chain is always in
++ * ascending address order to allow coalescing, allocations are done in brute
++ * force best fit.
++ */
++
++struct debug_alloc_header {
++ u32 next; /* offset of next header from start of pool */
++ u32 size;
++ void *caller;
++};
++
++/* The memory returned by this allocator must be aligned, which means so must
++ * the header size. Do not assume that sizeof(struct debug_alloc_header) is a
++ * multiple of the alignment, explicitly calculate the overhead of this header,
++ * including the alignment. The rest of this code must not use sizeof() on any
++ * header or pointer to a header.
++ */
++#define dah_align 8
++#define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align)
++
++static u64 debug_alloc_pool_aligned[128*1024/dah_align]; /* 128K pool */
++static char *debug_alloc_pool = (char *)debug_alloc_pool_aligned;
++static u32 dah_first, dah_first_call = 1, dah_used = 0, dah_used_max = 0;
++
++/* Locking is awkward. The debug code is called from all contexts, including
++ * non maskable interrupts. A normal spinlock is not safe in NMI context. Try
++ * to get the debug allocator lock, if it cannot be obtained after a second
++ * then give up. If the lock could not be previously obtained on this cpu then
++ * only try once.
++ *
++ * sparse has no annotation for "this function _sometimes_ acquires a lock", so
++ * fudge the acquire/release notation.
++ */
++static DEFINE_SPINLOCK(dap_lock);
++static int
++get_dap_lock(void)
++ __acquires(dap_lock)
++{
++ static int dap_locked = -1;
++ int count;
++ if (dap_locked == smp_processor_id())
++ count = 1;
++ else
++ count = 1000;
++ while (1) {
++ if (spin_trylock(&dap_lock)) {
++ dap_locked = -1;
++ return 1;
++ }
++ if (!count--)
++ break;
++ udelay(1000);
++ }
++ dap_locked = smp_processor_id();
++ __acquire(dap_lock);
++ return 0;
++}
++
++void
++*debug_kmalloc(size_t size, gfp_t flags)
++{
++ unsigned int rem, h_offset;
++ struct debug_alloc_header *best, *bestprev, *prev, *h;
++ void *p = NULL;
++ if (!get_dap_lock()) {
++ __release(dap_lock); /* we never actually got it */
++ return NULL;
++ }
++ h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
++ if (dah_first_call) {
++ h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead;
++ dah_first_call = 0;
++ }
++ size = ALIGN(size, dah_align);
++ prev = best = bestprev = NULL;
++ while (1) {
++ if (h->size >= size && (!best || h->size < best->size)) {
++ best = h;
++ bestprev = prev;
++ if (h->size == size)
++ break;
++ }
++ if (!h->next)
++ break;
++ prev = h;
++ h = (struct debug_alloc_header *)(debug_alloc_pool + h->next);
++ }
++ if (!best)
++ goto out;
++ rem = best->size - size;
++ /* The pool must always contain at least one header */
++ if (best->next == 0 && bestprev == NULL && rem < dah_overhead)
++ goto out;
++ if (rem >= dah_overhead) {
++ best->size = size;
++ h_offset = ((char *)best - debug_alloc_pool) +
++ dah_overhead + best->size;
++ h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset);
++ h->size = rem - dah_overhead;
++ h->next = best->next;
++ } else
++ h_offset = best->next;
++ best->caller = __builtin_return_address(0);
++ dah_used += best->size;
++ dah_used_max = max(dah_used, dah_used_max);
++ if (bestprev)
++ bestprev->next = h_offset;
++ else
++ dah_first = h_offset;
++ p = (char *)best + dah_overhead;
++ memset(p, POISON_INUSE, best->size - 1);
++ *((char *)p + best->size - 1) = POISON_END;
++out:
++ spin_unlock(&dap_lock);
++ return p;
++}
++
++void
++debug_kfree(void *p)
++{
++ struct debug_alloc_header *h;
++ unsigned int h_offset;
++ if (!p)
++ return;
++ if ((char *)p < debug_alloc_pool ||
++ (char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) {
++ kfree(p);
++ return;
++ }
++ if (!get_dap_lock()) {
++ __release(dap_lock); /* we never actually got it */
++ return; /* memory leak, cannot be helped */
++ }
++ h = (struct debug_alloc_header *)((char *)p - dah_overhead);
++ memset(p, POISON_FREE, h->size - 1);
++ *((char *)p + h->size - 1) = POISON_END;
++ h->caller = NULL;
++ dah_used -= h->size;
++ h_offset = (char *)h - debug_alloc_pool;
++ if (h_offset < dah_first) {
++ h->next = dah_first;
++ dah_first = h_offset;
++ } else {
++ struct debug_alloc_header *prev;
++ unsigned int prev_offset;
++ prev = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
++ while (1) {
++ if (!prev->next || prev->next > h_offset)
++ break;
++ prev = (struct debug_alloc_header *)
++ (debug_alloc_pool + prev->next);
++ }
++ prev_offset = (char *)prev - debug_alloc_pool;
++ if (prev_offset + dah_overhead + prev->size == h_offset) {
++ prev->size += dah_overhead + h->size;
++ memset(h, POISON_FREE, dah_overhead - 1);
++ *((char *)h + dah_overhead - 1) = POISON_END;
++ h = prev;
++ h_offset = prev_offset;
++ } else {
++ h->next = prev->next;
++ prev->next = h_offset;
++ }
++ }
++ if (h_offset + dah_overhead + h->size == h->next) {
++ struct debug_alloc_header *next;
++ next = (struct debug_alloc_header *)
++ (debug_alloc_pool + h->next);
++ h->size += dah_overhead + next->size;
++ h->next = next->next;
++ memset(next, POISON_FREE, dah_overhead - 1);
++ *((char *)next + dah_overhead - 1) = POISON_END;
++ }
++ spin_unlock(&dap_lock);
++}
++
++void
++debug_kusage(void)
++{
++ struct debug_alloc_header *h_free, *h_used;
++#ifdef CONFIG_IA64
++ /* FIXME: using dah for ia64 unwind always results in a memory leak.
++ * Fix that memory leak first, then set debug_kusage_one_time = 1 for
++ * all architectures.
++ */
++ static int debug_kusage_one_time = 0;
++#else
++ static int debug_kusage_one_time = 1;
++#endif
++ if (!get_dap_lock()) {
++ __release(dap_lock); /* we never actually got it */
++ return;
++ }
++ h_free = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
++ if (dah_first == 0 &&
++ (h_free->size == sizeof(debug_alloc_pool_aligned) - dah_overhead ||
++ dah_first_call))
++ goto out;
++ if (!debug_kusage_one_time)
++ goto out;
++ debug_kusage_one_time = 0;
++ kdb_printf("%s: debug_kmalloc memory leak dah_first %d\n",
++ __FUNCTION__, dah_first);
++ if (dah_first) {
++ h_used = (struct debug_alloc_header *)debug_alloc_pool;
++ kdb_printf("%s: h_used %p size %d\n", __FUNCTION__, h_used, h_used->size);
++ }
++ do {
++ h_used = (struct debug_alloc_header *)
++ ((char *)h_free + dah_overhead + h_free->size);
++ kdb_printf("%s: h_used %p size %d caller %p\n",
++ __FUNCTION__, h_used, h_used->size, h_used->caller);
++ h_free = (struct debug_alloc_header *)
++ (debug_alloc_pool + h_free->next);
++ } while (h_free->next);
++ h_used = (struct debug_alloc_header *)
++ ((char *)h_free + dah_overhead + h_free->size);
++ if ((char *)h_used - debug_alloc_pool !=
++ sizeof(debug_alloc_pool_aligned))
++ kdb_printf("%s: h_used %p size %d caller %p\n",
++ __FUNCTION__, h_used, h_used->size, h_used->caller);
++out:
++ spin_unlock(&dap_lock);
++}
++
++/* Maintain a small stack of kdb_flags to allow recursion without disturbing
++ * the global kdb state.
++ */
++
++static int kdb_flags_stack[4], kdb_flags_index;
++
++void
++kdb_save_flags(void)
++{
++ BUG_ON(kdb_flags_index >= ARRAY_SIZE(kdb_flags_stack));
++ kdb_flags_stack[kdb_flags_index++] = kdb_flags;
++}
++
++void
++kdb_restore_flags(void)
++{
++ BUG_ON(kdb_flags_index <= 0);
++ kdb_flags = kdb_flags_stack[--kdb_flags_index];
++}
+diff -Nurp linux-2.6.22-590/kdb/Makefile linux-2.6.22-600/kdb/Makefile
+--- linux-2.6.22-590/kdb/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/Makefile 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,45 @@
++#
++# This file is subject to the terms and conditions of the GNU General Public
++# License. See the file "COPYING" in the main directory of this archive
++# for more details.
++#
++# Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++#
++
++obj-y := kdb_bt.o kdb_bp.o kdb_id.o kdbsupport.o gen-kdb_cmds.o kdbmain.o kdb_io.o
++
++# The i386 and x86_64 backtrace commands are handled by common code.
++ifdef CONFIG_X86
++ obj-y += kdba_bt_x86.o
++ ifneq (,$(findstring -fno-optimize-sibling-calls,$(CFLAGS)))
++ CFLAGS_kdba_bt_x86.o += -DNO_SIBLINGS
++ endif
++ REGPARM := $(subst -mregparm=,,$(filter -mregparm=%,$(CFLAGS)))
++ ifeq (,$(REGPARM))
++ ifdef CONFIG_X86_64
++ REGPARM := 6
++ else
++ REGPARM := 0
++ endif
++ endif
++ CFLAGS_kdba_bt_x86.o += -DREGPARM=$(REGPARM)
++endif
++
++subdir-$(CONFIG_KDB_MODULES) := modules
++obj-y += $(addsuffix /built-in.o, $(subdir-y))
++
++clean-files := gen-kdb_cmds.c
++
++override CFLAGS := $(CFLAGS:%-pg=% )
++
++quiet_cmd_gen-kdb = GENKDB $@
++ cmd_gen-kdb = $(AWK) 'BEGIN {print "\#include <linux/stddef.h>"; print "\#include <linux/init.h>"} \
++ /^\#/{next} \
++ /^[ \t]*$$/{next} \
++ {gsub(/"/, "\\\"", $$0); \
++ print "static __initdata char kdb_cmd" cmds++ "[] = \"" $$0 "\\n\";"} \
++ END {print "extern char *kdb_cmds[]; char __initdata *kdb_cmds[] = {"; for (i = 0; i < cmds; ++i) {print " kdb_cmd" i ","}; print(" NULL\n};");}' \
++ $(filter-out %/Makefile,$^) > $@
++
++$(obj)/gen-kdb_cmds.c: $(src)/kdb_cmds $(wildcard $(TOPDIR)/arch/$(ARCH)/kdb/kdb_cmds) $(src)/Makefile
++ $(call cmd,gen-kdb)
+diff -Nurp linux-2.6.22-590/kdb/modules/kdbm_pg.c linux-2.6.22-600/kdb/modules/kdbm_pg.c
+--- linux-2.6.22-590/kdb/modules/kdbm_pg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/modules/kdbm_pg.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,645 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <linux/pagemap.h>
++#include <linux/fs.h>
++#include <linux/bio.h>
++#include <linux/buffer_head.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/blkdev.h>
++#include <linux/ctype.h>
++
++MODULE_AUTHOR("SGI");
++MODULE_DESCRIPTION("Debug page information");
++MODULE_LICENSE("GPL");
++
++/* Standard Linux page stuff */
++
++#ifndef CONFIG_DISCONTIGMEM
++/* From include/linux/page-flags.h */
++static char *pg_flag_vals[] = {
++ "PG_locked", "PG_error", "PG_referenced", "PG_uptodate",
++ "PG_dirty", "PG_lru", "PG_active", "PG_slab",
++ "PG_checked", "PG_arch_1", "PG_reserved", "PG_private",
++ "PG_writeback", "?? 13 ??", "PG_compound", "PG_swapcache",
++ "PG_mappedtodisk", "PG_reclaim", "?? 18 ??", "PG_buddy",
++ NULL };
++#endif
++
++/* From include/linux/buffer_head.h */
++static char *bh_state_vals[] = {
++ "Uptodate", "Dirty", "Lock", "Req",
++ "Uptodate_Lock", "Mapped", "New", "Async_read",
++ "Async_write", "Delay", "Boundary", "Write_EIO",
++ "Ordered", "Eopnotsupp", "Unwritten", "Private",
++ NULL };
++
++/* From include/linux/bio.h */
++static char *bio_flag_vals[] = {
++ "Uptodate", "RW_block", "EOF", "Seg_valid",
++ "Cloned", "Bounced", "User_mapped", "Eopnotsupp",
++ NULL };
++
++/* From include/linux/fs.h */
++static char *inode_flag_vals[] = {
++ "I_DIRTY_SYNC", "I_DIRTY_DATASYNC", "I_DIRTY_PAGES", "I_LOCK",
++ "I_FREEING", "I_CLEAR", "I_NEW", "I_WILL_FREE",
++ NULL };
++
++static char *map_flags(unsigned long flags, char *mapping[])
++{
++ static char buffer[256];
++ int index;
++ int offset = 12;
++
++ buffer[0] = '\0';
++
++ for (index = 0; flags && mapping[index]; flags >>= 1, index++) {
++ if (flags & 1) {
++ if ((offset + strlen(mapping[index]) + 1) >= 80) {
++ strcat(buffer, "\n ");
++ offset = 12;
++ } else if (offset > 12) {
++ strcat(buffer, " ");
++ offset++;
++ }
++ strcat(buffer, mapping[index]);
++ offset += strlen(mapping[index]);
++ }
++ }
++
++ return (buffer);
++}
++
++static int
++kdbm_buffers(int argc, const char **argv)
++{
++ struct buffer_head bh;
++ unsigned long addr;
++ long offset = 0;
++ int nextarg;
++ int diag;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) ||
++ (diag = kdb_getarea(bh, addr)))
++ return(diag);
++
++ kdb_printf("buffer_head at 0x%lx\n", addr);
++ kdb_printf(" bno %llu size %llu dev 0x%x\n",
++ (unsigned long long)bh.b_blocknr,
++ (unsigned long long)bh.b_size,
++ bh.b_bdev ? bh.b_bdev->bd_dev : 0);
++ kdb_printf(" count %d state 0x%lx [%s]\n",
++ bh.b_count.counter, bh.b_state,
++ map_flags(bh.b_state, bh_state_vals));
++ kdb_printf(" b_data 0x%p\n",
++ bh.b_data);
++ kdb_printf(" b_page 0x%p b_this_page 0x%p b_private 0x%p\n",
++ bh.b_page, bh.b_this_page, bh.b_private);
++ kdb_printf(" b_end_io ");
++ if (bh.b_end_io)
++ kdb_symbol_print(kdba_funcptr_value(bh.b_end_io), NULL, KDB_SP_VALUE);
++ else
++ kdb_printf("(NULL)");
++ kdb_printf("\n");
++
++ return 0;
++}
++
++static int
++print_biovec(struct bio_vec *vec, int vcount)
++{
++ struct bio_vec bvec;
++ unsigned long addr;
++ int diag;
++ int i;
++
++ if (vcount < 1 || vcount > BIO_MAX_PAGES) {
++ kdb_printf(" [skipped iovecs, vcnt is %d]\n", vcount);
++ return 0;
++ }
++
++ addr = (unsigned long)vec;
++ for (i = 0; i < vcount; i++) {
++ if ((diag = kdb_getarea(bvec, addr)))
++ return(diag);
++ addr += sizeof(bvec);
++ kdb_printf(" [%d] page 0x%p length=%u offset=%u\n",
++ i, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
++ }
++ return 0;
++}
++
++static int
++kdbm_bio(int argc, const char **argv)
++{
++ struct bio bio;
++ unsigned long addr;
++ long offset = 0;
++ int nextarg;
++ int diag;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) ||
++ (diag = kdb_getarea(bio, addr)))
++ return(diag);
++
++ kdb_printf("bio at 0x%lx\n", addr);
++ kdb_printf(" bno %llu next 0x%p dev 0x%x\n",
++ (unsigned long long)bio.bi_sector,
++ bio.bi_next, bio.bi_bdev ? bio.bi_bdev->bd_dev : 0);
++ kdb_printf(" vcnt %u vec 0x%p rw 0x%lx flags 0x%lx [%s]\n",
++ bio.bi_vcnt, bio.bi_io_vec, bio.bi_rw, bio.bi_flags,
++ map_flags(bio.bi_flags, bio_flag_vals));
++ print_biovec(bio.bi_io_vec, bio.bi_vcnt);
++ kdb_printf(" count %d private 0x%p\n",
++ atomic_read(&bio.bi_cnt), bio.bi_private);
++ kdb_printf(" bi_end_io ");
++ if (bio.bi_end_io)
++ kdb_symbol_print(kdba_funcptr_value(bio.bi_end_io), NULL, KDB_SP_VALUE);
++ else
++ kdb_printf("(NULL)");
++ kdb_printf("\n");
++
++ return 0;
++}
++
++#ifndef CONFIG_DISCONTIGMEM
++static char *page_flags(unsigned long flags)
++{
++ return(map_flags(flags, pg_flag_vals));
++}
++
++static int
++kdbm_page(int argc, const char **argv)
++{
++ struct page page;
++ unsigned long addr;
++ long offset = 0;
++ int nextarg;
++ int diag;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (diag)
++ return diag;
++
++#ifdef __ia64__
++ if (rgn_index(addr) == 0)
++ addr = (unsigned long) &mem_map[addr]; /* assume region 0 is a page index, not an address */
++#else
++ if (addr < PAGE_OFFSET)
++ addr = (unsigned long) &mem_map[addr];
++#endif
++
++ if ((diag = kdb_getarea(page, addr)))
++ return(diag);
++
++ kdb_printf("struct page at 0x%lx\n", addr);
++ kdb_printf(" addr space 0x%p index %lu (offset 0x%llx)\n",
++ page.mapping, page.index,
++ (unsigned long long)page.index << PAGE_CACHE_SHIFT);
++ kdb_printf(" count %d flags %s\n",
++ page._count.counter, page_flags(page.flags));
++ kdb_printf(" virtual 0x%p\n", page_address((struct page *)addr));
++ if (page_has_buffers(&page))
++ kdb_printf(" buffers 0x%p\n", page_buffers(&page));
++ else
++ kdb_printf(" private 0x%lx\n", page_private(&page));
++
++ return 0;
++}
++#endif /* CONFIG_DISCONTIGMEM */
++
++static unsigned long
++print_request(unsigned long addr)
++{
++ struct request rq;
++
++ if (kdb_getarea(rq, addr))
++ return(0);
++
++ kdb_printf("struct request at 0x%lx\n", addr);
++ kdb_printf(" errors %d sector %llu nr_sectors %lu\n",
++ rq.errors,
++ (unsigned long long)rq.sector, rq.nr_sectors);
++
++ kdb_printf(" hsect %llu hnrsect %lu nrseg %u nrhwseg %u currnrsect %u\n",
++ (unsigned long long)rq.hard_sector, rq.hard_nr_sectors,
++ rq.nr_phys_segments, rq.nr_hw_segments,
++ rq.current_nr_sectors);
++
++ return (unsigned long) rq.queuelist.next;
++}
++
++static int
++kdbm_request(int argc, const char **argv)
++{
++ long offset = 0;
++ unsigned long addr;
++ int nextarg;
++ int diag;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (diag)
++ return diag;
++
++ print_request(addr);
++ return 0;
++}
++
++
++static int
++kdbm_rqueue(int argc, const char **argv)
++{
++ struct request_queue rq;
++ unsigned long addr, head_addr, next;
++ long offset = 0;
++ int nextarg;
++ int i, diag;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) ||
++ (diag = kdb_getarea(rq, addr)))
++ return(diag);
++
++ kdb_printf("struct request_queue at 0x%lx\n", addr);
++ i = 0;
++ next = (unsigned long)rq.queue_head.next;
++ head_addr = addr + offsetof(struct request_queue, queue_head);
++ kdb_printf(" request queue: %s\n", next == head_addr ?
++ "empty" : "");
++ while (next != head_addr) {
++ i++;
++ next = print_request(next);
++ }
++
++ if (i)
++ kdb_printf("%d requests found\n", i);
++
++ return 0;
++}
++
++
++static void
++do_buffer(unsigned long addr)
++{
++ struct buffer_head bh;
++
++ if (kdb_getarea(bh, addr))
++ return;
++
++ kdb_printf("\tbh 0x%lx bno %8llu [%s]\n", addr,
++ (unsigned long long)bh.b_blocknr,
++ map_flags(bh.b_state, bh_state_vals));
++}
++
++static void
++kdbm_show_page(struct page *page, int first)
++{
++ if (first)
++ kdb_printf("page_struct index cnt zone nid flags\n");
++ kdb_printf("%p%s %6lu %5d %3d %3d 0x%lx",
++ page_address(page), sizeof(void *) == 4 ? " " : "",
++ page->index, atomic_read(&(page->_count)),
++ page_zonenum(page), page_to_nid(page),
++ page->flags & (~0UL >> ZONES_SHIFT));
++#define kdb_page_flags(page, type) if (Page ## type(page)) kdb_printf(" " #type);
++ kdb_page_flags(page, Locked);
++ kdb_page_flags(page, Error);
++ kdb_page_flags(page, Referenced);
++ kdb_page_flags(page, Uptodate);
++ kdb_page_flags(page, Dirty);
++ kdb_page_flags(page, LRU);
++ kdb_page_flags(page, Active);
++ kdb_page_flags(page, Slab);
++ kdb_page_flags(page, Checked);
++ if (page->flags & (1UL << PG_arch_1))
++ kdb_printf(" arch_1");
++ kdb_page_flags(page, Reserved);
++ kdb_page_flags(page, Private);
++ kdb_page_flags(page, Writeback);
++ kdb_page_flags(page, Compound);
++ kdb_page_flags(page, SwapCache);
++ kdb_page_flags(page, MappedToDisk);
++ kdb_page_flags(page, Reclaim);
++ kdb_page_flags(page, Buddy);
++
++ /* PageHighMem is not a flag any more, but treat it as one */
++ kdb_page_flags(page, HighMem);
++
++ if (page_has_buffers(page)) {
++ struct buffer_head *head, *bh;
++ kdb_printf("\n");
++ head = bh = page_buffers(page);
++ do {
++ do_buffer((unsigned long) bh);
++ } while ((bh = bh->b_this_page) != head);
++ } else if (page_private(page)) {
++ kdb_printf(" private= 0x%lx", page_private(page));
++ }
++ /* Cannot use page_mapping(page) here, it needs swapper_space which is
++ * not exported.
++ */
++ if (page->mapping)
++ kdb_printf(" mapping= %p", page->mapping);
++ kdb_printf("\n");
++#undef kdb_page_flags
++}
++
++static int
++kdbm_inode_pages(int argc, const char **argv)
++{
++ struct inode *inode = NULL;
++ struct address_space *ap = NULL;
++ unsigned long addr, addr1 = 0;
++ long offset = 0;
++ int nextarg;
++ int diag;
++ pgoff_t next = 0;
++ struct page *page;
++ int first;
++
++ nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (diag)
++ goto out;
++
++ if (argc == 2) {
++ nextarg = 2;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr1,
++ &offset, NULL);
++ if (diag)
++ goto out;
++ kdb_printf("Looking for page index 0x%lx ... \n", addr1);
++ next = addr1;
++ }
++
++ if (!(inode = kmalloc(sizeof(*inode), GFP_ATOMIC))) {
++ kdb_printf("kdbm_inode_pages: cannot kmalloc inode\n");
++ goto out;
++ }
++ if (!(ap = kmalloc(sizeof(*ap), GFP_ATOMIC))) {
++ kdb_printf("kdbm_inode_pages: cannot kmalloc ap\n");
++ goto out;
++ }
++ if ((diag = kdb_getarea(*inode, addr)))
++ goto out;
++ if (!inode->i_mapping) {
++ kdb_printf("inode has no mapping\n");
++ goto out;
++ }
++ if ((diag = kdb_getarea(*ap, (unsigned long) inode->i_mapping)))
++ goto out;
++
++ /* Run the pages in the radix tree, printing the state of each page */
++ first = 1;
++ while (radix_tree_gang_lookup(&ap->page_tree, (void **)&page, next, 1)) {
++ kdbm_show_page(page, first);
++ if (addr1)
++ break;
++ first = 0;
++ next = page->index + 1;
++ }
++
++out:
++ if (inode)
++ kfree(inode);
++ if (ap)
++ kfree(ap);
++ return diag;
++}
++
++static int
++kdbm_inode(int argc, const char **argv)
++{
++ struct inode *inode = NULL;
++ unsigned long addr;
++ unsigned char *iaddr;
++ long offset = 0;
++ int nextarg;
++ int diag;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)))
++ goto out;
++ if (!(inode = kmalloc(sizeof(*inode), GFP_ATOMIC))) {
++ kdb_printf("kdbm_inode: cannot kmalloc inode\n");
++ goto out;
++ }
++ if ((diag = kdb_getarea(*inode, addr)))
++ goto out;
++
++ kdb_printf("struct inode at 0x%lx\n", addr);
++
++ kdb_printf(" i_ino = %lu i_count = %u i_size %Ld\n",
++ inode->i_ino, atomic_read(&inode->i_count),
++ inode->i_size);
++
++ kdb_printf(" i_mode = 0%o i_nlink = %d i_rdev = 0x%x\n",
++ inode->i_mode, inode->i_nlink,
++ inode->i_rdev);
++
++ kdb_printf(" i_hash.nxt = 0x%p i_hash.pprev = 0x%p\n",
++ inode->i_hash.next,
++ inode->i_hash.pprev);
++
++ kdb_printf(" i_list.nxt = 0x%p i_list.prv = 0x%p\n",
++ list_entry(inode->i_list.next, struct inode, i_list),
++ list_entry(inode->i_list.prev, struct inode, i_list));
++
++ kdb_printf(" i_dentry.nxt = 0x%p i_dentry.prv = 0x%p\n",
++ list_entry(inode->i_dentry.next, struct dentry, d_alias),
++ list_entry(inode->i_dentry.prev, struct dentry, d_alias));
++
++ kdb_printf(" i_sb = 0x%p i_op = 0x%p i_data = 0x%lx nrpages = %lu\n",
++ inode->i_sb, inode->i_op,
++ addr + offsetof(struct inode, i_data),
++ inode->i_data.nrpages);
++ kdb_printf(" i_fop= 0x%p i_flock = 0x%p i_mapping = 0x%p\n",
++ inode->i_fop, inode->i_flock, inode->i_mapping);
++
++ kdb_printf(" i_flags 0x%x i_state 0x%lx [%s]",
++ inode->i_flags, inode->i_state,
++ map_flags(inode->i_state, inode_flag_vals));
++
++ iaddr = (char *)addr;
++ iaddr += offsetof(struct inode, i_private);
++
++ kdb_printf(" fs specific info @ 0x%p\n", iaddr);
++out:
++ if (inode)
++ kfree(inode);
++ return diag;
++}
++
++static int
++kdbm_sb(int argc, const char **argv)
++{
++ struct super_block *sb = NULL;
++ unsigned long addr;
++ long offset = 0;
++ int nextarg;
++ int diag;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)))
++ goto out;
++ if (!(sb = kmalloc(sizeof(*sb), GFP_ATOMIC))) {
++ kdb_printf("kdbm_sb: cannot kmalloc sb\n");
++ goto out;
++ }
++ if ((diag = kdb_getarea(*sb, addr)))
++ goto out;
++
++ kdb_printf("struct super_block at 0x%lx\n", addr);
++ kdb_printf(" s_dev 0x%x blocksize 0x%lx\n", sb->s_dev, sb->s_blocksize);
++ kdb_printf(" s_flags 0x%lx s_root 0x%p\n", sb->s_flags, sb->s_root);
++ kdb_printf(" s_dirt %d s_dirty.next 0x%p s_dirty.prev 0x%p\n",
++ sb->s_dirt, sb->s_dirty.next, sb->s_dirty.prev);
++ kdb_printf(" s_frozen %d s_id [%s]\n", sb->s_frozen, sb->s_id);
++out:
++ if (sb)
++ kfree(sb);
++ return diag;
++}
++
++
++
++#if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
++/* According to Steve Lord, this code is ix86 specific. Patches to extend it to
++ * other architectures will be greatefully accepted.
++ */
++static int
++kdbm_memmap(int argc, const char **argv)
++{
++ struct page page;
++ int i, page_count;
++ int slab_count = 0;
++ int dirty_count = 0;
++ int locked_count = 0;
++ int page_counts[9];
++ int buffered_count = 0;
++#ifdef buffer_delay
++ int delay_count = 0;
++#endif
++ int diag;
++ unsigned long addr;
++
++ addr = (unsigned long)mem_map;
++ page_count = max_mapnr;
++ memset(page_counts, 0, sizeof(page_counts));
++
++ for (i = 0; i < page_count; i++) {
++ if ((diag = kdb_getarea(page, addr)))
++ return(diag);
++ addr += sizeof(page);
++
++ if (PageSlab(&page))
++ slab_count++;
++ if (PageDirty(&page))
++ dirty_count++;
++ if (PageLocked(&page))
++ locked_count++;
++ if (page._count.counter < 8)
++ page_counts[page._count.counter]++;
++ else
++ page_counts[8]++;
++ if (page_has_buffers(&page)) {
++ buffered_count++;
++#ifdef buffer_delay
++ if (buffer_delay(page.buffers))
++ delay_count++;
++#endif
++ }
++
++ }
++
++ kdb_printf(" Total pages: %6d\n", page_count);
++ kdb_printf(" Slab pages: %6d\n", slab_count);
++ kdb_printf(" Dirty pages: %6d\n", dirty_count);
++ kdb_printf(" Locked pages: %6d\n", locked_count);
++ kdb_printf(" Buffer pages: %6d\n", buffered_count);
++#ifdef buffer_delay
++ kdb_printf(" Delalloc pages: %6d\n", delay_count);
++#endif
++ for (i = 0; i < 8; i++) {
++ kdb_printf(" %d page count: %6d\n",
++ i, page_counts[i]);
++ }
++ kdb_printf(" high page count: %6d\n", page_counts[8]);
++ return 0;
++}
++#endif /* CONFIG_X86 && !CONFIG_X86_64 */
++
++static int __init kdbm_pg_init(void)
++{
++#ifndef CONFIG_DISCONTIGMEM
++ kdb_register("page", kdbm_page, "<vaddr>", "Display page", 0);
++#endif
++ kdb_register("inode", kdbm_inode, "<vaddr>", "Display inode", 0);
++ kdb_register("sb", kdbm_sb, "<vaddr>", "Display super_block", 0);
++ kdb_register("bh", kdbm_buffers, "<buffer head address>", "Display buffer", 0);
++ kdb_register("bio", kdbm_bio, "<bio address>", "Display bio", 0);
++ kdb_register("inode_pages", kdbm_inode_pages, "<inode *>", "Display pages in an inode", 0);
++ kdb_register("req", kdbm_request, "<vaddr>", "dump request struct", 0);
++ kdb_register("rqueue", kdbm_rqueue, "<vaddr>", "dump request queue", 0);
++#if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
++ kdb_register("memmap", kdbm_memmap, "", "page table summary", 0);
++#endif
++
++ return 0;
++}
++
++
++static void __exit kdbm_pg_exit(void)
++{
++#ifndef CONFIG_DISCONTIGMEM
++ kdb_unregister("page");
++#endif
++ kdb_unregister("inode");
++ kdb_unregister("sb");
++ kdb_unregister("bh");
++ kdb_unregister("bio");
++ kdb_unregister("inode_pages");
++ kdb_unregister("req");
++ kdb_unregister("rqueue");
++#if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
++ kdb_unregister("memmap");
++#endif
++}
++
++module_init(kdbm_pg_init)
++module_exit(kdbm_pg_exit)
+diff -Nurp linux-2.6.22-590/kdb/modules/kdbm_sched.c linux-2.6.22-600/kdb/modules/kdbm_sched.c
+--- linux-2.6.22-590/kdb/modules/kdbm_sched.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/modules/kdbm_sched.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,57 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 2005 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++
++MODULE_AUTHOR("SGI");
++MODULE_DESCRIPTION("Debug scheduler information");
++MODULE_LICENSE("GPL");
++
++static int
++kdbm_runqueues(int argc, const char **argv)
++{
++ unsigned long cpu;
++ int ret = 0;
++
++ if (argc == 1) {
++ ret = kdbgetularg((char *)argv[1], &cpu);
++ if (!ret) {
++ if (!cpu_online(cpu)) {
++ kdb_printf("Invalid cpu number\n");
++ } else
++ kdb_runqueue(cpu, kdb_printf);
++ }
++ } else if (argc == 0) {
++ for_each_online_cpu(cpu)
++ kdb_runqueue(cpu, kdb_printf);
++ } else {
++ /* More than one arg */
++ kdb_printf("Specify one cpu number\n");
++ }
++ return ret;
++}
++
++static int __init kdbm_sched_init(void)
++{
++ kdb_register("rq", kdbm_runqueues, "<cpunum>", "Display runqueue for <cpunum>", 0);
++ kdb_register("rqa", kdbm_runqueues, "", "Display all runqueues", 0);
++ return 0;
++}
++
++static void __exit kdbm_sched_exit(void)
++{
++ kdb_unregister("rq");
++ kdb_unregister("rqa");
++}
++
++module_init(kdbm_sched_init)
++module_exit(kdbm_sched_exit)
+diff -Nurp linux-2.6.22-590/kdb/modules/kdbm_task.c linux-2.6.22-600/kdb/modules/kdbm_task.c
+--- linux-2.6.22-590/kdb/modules/kdbm_task.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/modules/kdbm_task.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,199 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/blkdev.h>
++#include <linux/types.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <asm/signal.h>
++
++MODULE_AUTHOR("SGI");
++MODULE_DESCRIPTION("Debug struct task and sigset information");
++MODULE_LICENSE("GPL");
++
++static char *
++kdb_cpus_allowed_string(struct task_struct *tp)
++{
++ static char maskbuf[NR_CPUS * 8];
++ if (cpus_equal(tp->cpus_allowed, cpu_online_map))
++ strcpy(maskbuf, "ALL");
++ else if (cpus_full(tp->cpus_allowed))
++ strcpy(maskbuf, "ALL(NR_CPUS)");
++ else if (cpus_empty(tp->cpus_allowed))
++ strcpy(maskbuf, "NONE");
++ else if (cpus_weight(tp->cpus_allowed) == 1)
++ snprintf(maskbuf, sizeof(maskbuf), "ONLY(%d)", first_cpu(tp->cpus_allowed));
++ else
++ cpulist_scnprintf(maskbuf, sizeof(maskbuf), tp->cpus_allowed);
++ return maskbuf;
++}
++
++static int
++kdbm_task(int argc, const char **argv)
++{
++ unsigned long addr;
++ long offset=0;
++ int nextarg;
++ int e = 0;
++ struct task_struct *tp = NULL, *tp1;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((e = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) != 0)
++ return(e);
++
++ if (!(tp = kmalloc(sizeof(*tp), GFP_ATOMIC))) {
++ kdb_printf("%s: cannot kmalloc tp\n", __FUNCTION__);
++ goto out;
++ }
++ if ((e = kdb_getarea(*tp, addr))) {
++ kdb_printf("%s: invalid task address\n", __FUNCTION__);
++ goto out;
++ }
++
++ tp1 = (struct task_struct *)addr;
++ kdb_printf(
++ "struct task at 0x%lx, pid=%d flags=0x%x state=%ld comm=\"%s\"\n",
++ addr, tp->pid, tp->flags, tp->state, tp->comm);
++
++ kdb_printf(" cpu=%d policy=%u ", kdb_process_cpu(tp), tp->policy);
++ kdb_printf(
++ "prio=%d static_prio=%d cpus_allowed=",
++ tp->prio, tp->static_prio);
++ {
++ /* The cpus allowed string may be longer than kdb_printf() can
++ * handle. Print it in chunks.
++ */
++ char c, *p;
++ p = kdb_cpus_allowed_string(tp);
++ while (1) {
++ if (strlen(p) < 100) {
++ kdb_printf("%s", p);
++ break;
++ }
++ c = p[100];
++ p[100] = '\0';
++ kdb_printf("%s", p);
++ p[100] = c;
++ p += 100;
++ }
++ }
++ kdb_printf(" &thread=0x%p\n", &tp1->thread);
++
++ kdb_printf(" need_resched=%d ",
++ test_tsk_thread_flag(tp, TIF_NEED_RESCHED));
++ kdb_printf(
++ "timestamp=%llu time_slice=%u",
++ tp->timestamp, tp->time_slice);
++ kdb_printf(" lock_depth=%d\n", tp->lock_depth);
++
++ kdb_printf(
++ " fs=0x%p files=0x%p mm=0x%p\n",
++ tp->fs, tp->files, tp->mm);
++
++ kdb_printf(
++ " uid=%d euid=%d suid=%d fsuid=%d gid=%d egid=%d sgid=%d fsgid=%d\n",
++ tp->uid, tp->euid, tp->suid, tp->fsuid, tp->gid, tp->egid, tp->sgid, tp->fsgid);
++
++ kdb_printf(
++ " user=0x%p\n",
++ tp->user);
++
++ if (tp->sysvsem.undo_list)
++ kdb_printf(
++ " sysvsem.sem_undo refcnt %d proc_list=0x%p\n",
++ atomic_read(&tp->sysvsem.undo_list->refcnt),
++ tp->sysvsem.undo_list->proc_list);
++
++ kdb_printf(
++ " signal=0x%p &blocked=0x%p &pending=0x%p\n",
++ tp->signal, &tp1->blocked, &tp1->pending);
++
++ kdb_printf(
++ " utime=%ld stime=%ld cutime=%ld cstime=%ld\n",
++ tp->utime, tp->stime,
++ tp->signal ? tp->signal->cutime : 0L,
++ tp->signal ? tp->signal->cstime : 0L);
++
++ kdb_printf(" thread_info=0x%p\n", task_thread_info(tp));
++ kdb_printf(" ti flags=0x%lx\n", (unsigned long)task_thread_info(tp)->flags);
++
++out:
++ if (tp)
++ kfree(tp);
++ return e;
++}
++
++static int
++kdbm_sigset(int argc, const char **argv)
++{
++ sigset_t *sp = NULL;
++ unsigned long addr;
++ long offset=0;
++ int nextarg;
++ int e = 0;
++ int i;
++ char fmt[32];
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++#ifndef _NSIG_WORDS
++ kdb_printf("unavailable on this platform, _NSIG_WORDS not defined.\n");
++#else
++ nextarg = 1;
++ if ((e = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) != 0)
++ return(e);
++
++ if (!(sp = kmalloc(sizeof(*sp), GFP_ATOMIC))) {
++ kdb_printf("%s: cannot kmalloc sp\n", __FUNCTION__);
++ goto out;
++ }
++ if ((e = kdb_getarea(*sp, addr))) {
++ kdb_printf("%s: invalid sigset address\n", __FUNCTION__);
++ goto out;
++ }
++
++ sprintf(fmt, "[%%d]=0x%%0%dlx ", (int)sizeof(sp->sig[0])*2);
++ kdb_printf("sigset at 0x%p : ", sp);
++ for (i=_NSIG_WORDS-1; i >= 0; i--) {
++ if (i == 0 || sp->sig[i]) {
++ kdb_printf(fmt, i, sp->sig[i]);
++ }
++ }
++ kdb_printf("\n");
++#endif /* _NSIG_WORDS */
++
++out:
++ if (sp)
++ kfree(sp);
++ return e;
++}
++
++static int __init kdbm_task_init(void)
++{
++ kdb_register("task", kdbm_task, "<vaddr>", "Display task_struct", 0);
++ kdb_register("sigset", kdbm_sigset, "<vaddr>", "Display sigset_t", 0);
++
++ return 0;
++}
++
++static void __exit kdbm_task_exit(void)
++{
++ kdb_unregister("task");
++ kdb_unregister("sigset");
++}
++
++module_init(kdbm_task_init)
++module_exit(kdbm_task_exit)
+diff -Nurp linux-2.6.22-590/kdb/modules/kdbm_vm.c linux-2.6.22-600/kdb/modules/kdbm_vm.c
+--- linux-2.6.22-590/kdb/modules/kdbm_vm.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/modules/kdbm_vm.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,841 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/blkdev.h>
++#include <linux/types.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/swapops.h>
++
++#include <scsi.h>
++#include <scsi/scsi_host.h>
++
++MODULE_AUTHOR("SGI");
++MODULE_DESCRIPTION("Debug VM information");
++MODULE_LICENSE("GPL");
++
++struct __vmflags {
++ unsigned long mask;
++ char *name;
++};
++
++static struct __vmflags vmflags[] = {
++ { VM_READ, "VM_READ " },
++ { VM_WRITE, "VM_WRITE " },
++ { VM_EXEC, "VM_EXEC " },
++ { VM_SHARED, "VM_SHARED " },
++ { VM_MAYREAD, "VM_MAYREAD " },
++ { VM_MAYWRITE, "VM_MAYWRITE " },
++ { VM_MAYEXEC, "VM_MAYEXEC " },
++ { VM_MAYSHARE, "VM_MAYSHARE " },
++ { VM_GROWSDOWN, "VM_GROWSDOWN " },
++ { VM_GROWSUP, "VM_GROWSUP " },
++ { VM_PFNMAP, "VM_PFNMAP " },
++ { VM_DENYWRITE, "VM_DENYWRITE " },
++ { VM_EXECUTABLE, "VM_EXECUTABLE " },
++ { VM_LOCKED, "VM_LOCKED " },
++ { VM_IO, "VM_IO " },
++ { VM_SEQ_READ, "VM_SEQ_READ " },
++ { VM_RAND_READ, "VM_RAND_READ " },
++ { VM_DONTCOPY, "VM_DONTCOPY " },
++ { VM_DONTEXPAND, "VM_DONTEXPAND " },
++ { VM_RESERVED, "VM_RESERVED " },
++ { VM_ACCOUNT, "VM_ACCOUNT " },
++ { VM_HUGETLB, "VM_HUGETLB " },
++ { VM_NONLINEAR, "VM_NONLINEAR " },
++ { VM_MAPPED_COPY, "VM_MAPPED_COPY " },
++ { VM_INSERTPAGE, "VM_INSERTPAGE " },
++ { 0, "" }
++};
++
++static int
++kdbm_print_vm(struct vm_area_struct *vp, unsigned long addr, int verbose_flg)
++{
++ struct __vmflags *tp;
++
++ kdb_printf("struct vm_area_struct at 0x%lx for %d bytes\n",
++ addr, (int) sizeof (struct vm_area_struct));
++
++ kdb_printf("vm_start = 0x%p vm_end = 0x%p\n", (void *) vp->vm_start,
++ (void *) vp->vm_end);
++ kdb_printf("vm_page_prot = 0x%lx\n", pgprot_val(vp->vm_page_prot));
++
++ kdb_printf("vm_flags: ");
++ for (tp = vmflags; tp->mask; tp++) {
++ if (vp->vm_flags & tp->mask) {
++ kdb_printf(" %s", tp->name);
++ }
++ }
++ kdb_printf("\n");
++
++ if (!verbose_flg)
++ return 0;
++
++ kdb_printf("vm_mm = 0x%p\n", (void *) vp->vm_mm);
++ kdb_printf("vm_next = 0x%p\n", (void *) vp->vm_next);
++ kdb_printf("shared.vm_set.list.next = 0x%p\n", (void *) vp->shared.vm_set.list.next);
++ kdb_printf("shared.vm_set.list.prev = 0x%p\n", (void *) vp->shared.vm_set.list.prev);
++ kdb_printf("shared.vm_set.parent = 0x%p\n", (void *) vp->shared.vm_set.parent);
++ kdb_printf("shared.vm_set.head = 0x%p\n", (void *) vp->shared.vm_set.head);
++ kdb_printf("anon_vma_node.next = 0x%p\n", (void *) vp->anon_vma_node.next);
++ kdb_printf("anon_vma_node.prev = 0x%p\n", (void *) vp->anon_vma_node.prev);
++ kdb_printf("vm_ops = 0x%p\n", (void *) vp->vm_ops);
++ if (vp->vm_ops != NULL) {
++ kdb_printf("vm_ops->open = 0x%p\n", vp->vm_ops->open);
++ kdb_printf("vm_ops->close = 0x%p\n", vp->vm_ops->close);
++ kdb_printf("vm_ops->nopage = 0x%p\n", vp->vm_ops->nopage);
++#ifdef HAVE_VMOP_MPROTECT
++ kdb_printf("vm_ops->mprotect = 0x%p\n", vp->vm_ops->mprotect);
++#endif
++ }
++ kdb_printf("vm_pgoff = 0x%lx\n", vp->vm_pgoff);
++ kdb_printf("vm_file = 0x%p\n", (void *) vp->vm_file);
++ kdb_printf("vm_private_data = 0x%p\n", vp->vm_private_data);
++
++ return 0;
++}
++
++static int
++kdbm_print_vmp(struct vm_area_struct *vp, int verbose_flg)
++{
++ struct __vmflags *tp;
++
++ if (verbose_flg) {
++ kdb_printf("0x%lx: ", (unsigned long) vp);
++ }
++
++ kdb_printf("0x%p 0x%p ", (void *) vp->vm_start, (void *) vp->vm_end);
++
++ for (tp = vmflags; tp->mask; tp++) {
++ if (vp->vm_flags & tp->mask) {
++ kdb_printf(" %s", tp->name);
++ }
++ }
++ kdb_printf("\n");
++
++ return 0;
++}
++
++/*
++ * kdbm_vm
++ *
++ * This function implements the 'vm' command. Print a vm_area_struct.
++ *
++ * vm [-v] <address> Print vm_area_struct at <address>
++ * vmp [-v] <pid> Print all vm_area_structs for <pid>
++ */
++
++static int
++kdbm_vm(int argc, const char **argv)
++{
++ unsigned long addr;
++ long offset = 0;
++ int nextarg;
++ int diag;
++ int verbose_flg = 0;
++
++ if (argc == 2) {
++ if (strcmp(argv[1], "-v") != 0) {
++ return KDB_ARGCOUNT;
++ }
++ verbose_flg = 1;
++ } else if (argc != 1) {
++ return KDB_ARGCOUNT;
++ }
++
++ if (strcmp(argv[0], "vmp") == 0) {
++ struct task_struct *g, *tp;
++ struct vm_area_struct *vp;
++ pid_t pid;
++
++ if ((diag = kdbgetularg(argv[argc], (unsigned long *) &pid)))
++ return diag;
++
++ kdb_do_each_thread(g, tp) {
++ if (tp->pid == pid) {
++ if (tp->mm != NULL) {
++ if (verbose_flg)
++ kdb_printf
++ ("vm_area_struct ");
++ kdb_printf
++ ("vm_start vm_end vm_flags\n");
++ vp = tp->mm->mmap;
++ while (vp != NULL) {
++ kdbm_print_vmp(vp, verbose_flg);
++ vp = vp->vm_next;
++ }
++ }
++ return 0;
++ }
++ } kdb_while_each_thread(g, tp);
++
++ kdb_printf("No process with pid == %d found\n", pid);
++
++ } else {
++ struct vm_area_struct v;
++
++ nextarg = argc;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset,
++ NULL))
++ || (diag = kdb_getarea(v, addr)))
++ return (diag);
++
++ kdbm_print_vm(&v, addr, verbose_flg);
++ }
++
++ return 0;
++}
++
++static int
++kdbm_print_pte(pte_t * pte)
++{
++ kdb_printf("0x%lx (", (unsigned long) pte_val(*pte));
++
++ if (pte_present(*pte)) {
++ if (pte_exec(*pte))
++ kdb_printf("X");
++ if (pte_write(*pte))
++ kdb_printf("W");
++ if (pte_read(*pte))
++ kdb_printf("R");
++ if (pte_young(*pte))
++ kdb_printf("A");
++ if (pte_dirty(*pte))
++ kdb_printf("D");
++
++ } else {
++ kdb_printf("OFFSET=0x%lx ", swp_offset(pte_to_swp_entry(*pte)));
++ kdb_printf("TYPE=0x%ulx", swp_type(pte_to_swp_entry(*pte)));
++ }
++
++ kdb_printf(")");
++
++ /* final newline is output by caller of kdbm_print_pte() */
++
++ return 0;
++}
++
++/*
++ * kdbm_pte
++ *
++ * This function implements the 'pte' command. Print all pte_t structures
++ * that map to the given virtual address range (<address> through <address>
++ * plus <nbytes>) for the given process. The default value for nbytes is
++ * one.
++ *
++ * pte -m <mm> <address> [<nbytes>] Print all pte_t structures for
++ * virtual <address> in address space
++ * of <mm> which is a pointer to a
++ * mm_struct
++ * pte -p <pid> <address> [<nbytes>] Print all pte_t structures for
++ * virtual <address> in address space
++ * of <pid>
++ */
++
++static int
++kdbm_pte(int argc, const char **argv)
++{
++ unsigned long addr;
++ long offset = 0;
++ int nextarg;
++ unsigned long nbytes = 1;
++ long npgs;
++ int diag;
++ int found;
++ pid_t pid;
++ struct task_struct *tp;
++ struct mm_struct *mm, copy_of_mm;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if (argc < 3 || argc > 4) {
++ return KDB_ARGCOUNT;
++ }
++
++ if (strcmp(argv[1], "-p") == 0) {
++ if ((diag = kdbgetularg(argv[2], (unsigned long *) &pid))) {
++ return diag;
++ }
++
++ found = 0;
++ for_each_process(tp) {
++ if (tp->pid == pid) {
++ if (tp->mm != NULL) {
++ found = 1;
++ break;
++ }
++ kdb_printf("task structure's mm field is NULL\n");
++ return 0;
++ }
++ }
++
++ if (!found) {
++ kdb_printf("No process with pid == %d found\n", pid);
++ return 0;
++ }
++ mm = tp->mm;
++ } else if (strcmp(argv[1], "-m") == 0) {
++
++
++ nextarg = 2;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset,
++ NULL))
++ || (diag = kdb_getarea(copy_of_mm, addr)))
++ return (diag);
++ mm = ©_of_mm;
++ } else {
++ return KDB_ARGCOUNT;
++ }
++
++ if ((diag = kdbgetularg(argv[3], &addr))) {
++ return diag;
++ }
++
++ if (argc == 4) {
++ if ((diag = kdbgetularg(argv[4], &nbytes))) {
++ return diag;
++ }
++ }
++
++ kdb_printf("vaddr pte\n");
++
++ npgs = ((((addr & ~PAGE_MASK) + nbytes) + ~PAGE_MASK) >> PAGE_SHIFT);
++ while (npgs-- > 0) {
++
++ kdb_printf("0x%p ", (void *) (addr & PAGE_MASK));
++
++ pgd = pgd_offset(mm, addr);
++ if (pgd_present(*pgd)) {
++ pud = pud_offset(pgd, addr);
++ if (pud_present(*pud)) {
++ pmd = pmd_offset(pud, addr);
++ if (pmd_present(*pmd)) {
++ pte = pte_offset_map(pmd, addr);
++ if (pte_present(*pte)) {
++ kdbm_print_pte(pte);
++ }
++ }
++ }
++ }
++
++ kdb_printf("\n");
++ addr += PAGE_SIZE;
++ }
++
++ return 0;
++}
++
++/*
++ * kdbm_rpte
++ *
++ * This function implements the 'rpte' command. Print all pte_t structures
++ * that contain the given physical page range (<pfn> through <pfn>
++ * plus <npages>) for the given process. The default value for npages is
++ * one.
++ *
++ * rpte -m <mm> <pfn> [<npages>] Print all pte_t structures for
++ * physical page <pfn> in address space
++ * of <mm> which is a pointer to a
++ * mm_struct
++ * rpte -p <pid> <pfn> [<npages>] Print all pte_t structures for
++ * physical page <pfn> in address space
++ * of <pid>
++ */
++
++static int
++kdbm_rpte(int argc, const char **argv)
++{
++ unsigned long addr;
++ unsigned long pfn;
++ long offset = 0;
++ int nextarg;
++ unsigned long npages = 1;
++ int diag;
++ int found;
++ pid_t pid;
++ struct task_struct *tp;
++ struct mm_struct *mm, copy_of_mm;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ unsigned long g, u, m, t;
++
++ if (argc < 3 || argc > 4) {
++ return KDB_ARGCOUNT;
++ }
++
++ if (strcmp(argv[1], "-p") == 0) {
++ if ((diag = kdbgetularg(argv[2], (unsigned long *) &pid))) {
++ return diag;
++ }
++
++ found = 0;
++ for_each_process(tp) {
++ if (tp->pid == pid) {
++ if (tp->mm != NULL) {
++ found = 1;
++ break;
++ }
++ kdb_printf("task structure's mm field is NULL\n");
++ return 0;
++ }
++ }
++
++ if (!found) {
++ kdb_printf("No process with pid == %d found\n", pid);
++ return 0;
++ }
++ mm = tp->mm;
++ } else if (strcmp(argv[1], "-m") == 0) {
++
++
++ nextarg = 2;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset,
++ NULL))
++ || (diag = kdb_getarea(copy_of_mm, addr)))
++ return (diag);
++ mm = ©_of_mm;
++ } else {
++ return KDB_ARGCOUNT;
++ }
++
++ if ((diag = kdbgetularg(argv[3], &pfn))) {
++ return diag;
++ }
++
++ if (argc == 4) {
++ if ((diag = kdbgetularg(argv[4], &npages))) {
++ return diag;
++ }
++ }
++
++ /* spaces after vaddr depends on sizeof(unsigned long) */
++ kdb_printf("pfn vaddr%*s pte\n",
++ (int)(2*sizeof(unsigned long) + 2 - 5), " ");
++
++ for (g = 0, pgd = pgd_offset(mm, 0UL); g < PTRS_PER_PGD; ++g, ++pgd) {
++ if (pgd_none(*pgd) || pgd_bad(*pgd))
++ continue;
++ for (u = 0, pud = pud_offset(pgd, 0UL); u < PTRS_PER_PUD; ++u, ++pud) {
++ if (pud_none(*pud) || pud_bad(*pud))
++ continue;
++ for (m = 0, pmd = pmd_offset(pud, 0UL); m < PTRS_PER_PMD; ++m, ++pmd) {
++ if (pmd_none(*pmd) || pmd_bad(*pmd))
++ continue;
++ for (t = 0, pte = pte_offset_map(pmd, 0UL); t < PTRS_PER_PTE; ++t, ++pte) {
++ if (pte_none(*pte))
++ continue;
++ if (pte_pfn(*pte) < pfn || pte_pfn(*pte) >= (pfn + npages))
++ continue;
++ addr = g << PGDIR_SHIFT;
++#ifdef __ia64__
++ /* IA64 plays tricks with the pgd mapping to save space.
++ * This reverses pgd_index().
++ */
++ {
++ unsigned long region = g >> (PAGE_SHIFT - 6);
++ unsigned long l1index = g - (region << (PAGE_SHIFT - 6));
++ addr = (region << 61) + (l1index << PGDIR_SHIFT);
++ }
++#endif
++ addr += (m << PMD_SHIFT) + (t << PAGE_SHIFT);
++ kdb_printf("0x%-14lx " kdb_bfd_vma_fmt0 " ",
++ pte_pfn(*pte), addr);
++ kdbm_print_pte(pte);
++ kdb_printf("\n");
++ }
++ }
++ }
++ }
++
++ return 0;
++}
++
++static int
++kdbm_print_dentry(unsigned long daddr)
++{
++ struct dentry d;
++ int diag;
++ char buf[256];
++
++ kdb_printf("Dentry at 0x%lx\n", daddr);
++ if ((diag = kdb_getarea(d, (unsigned long)daddr)))
++ return diag;
++
++ if ((d.d_name.len > sizeof(buf)) || (diag = kdb_getarea_size(buf, (unsigned long)(d.d_name.name), d.d_name.len)))
++ kdb_printf(" d_name.len = %d d_name.name = 0x%p\n",
++ d.d_name.len, d.d_name.name);
++ else
++ kdb_printf(" d_name.len = %d d_name.name = 0x%p <%.*s>\n",
++ d.d_name.len, d.d_name.name,
++ (int)(d.d_name.len), d.d_name.name);
++
++ kdb_printf(" d_count = %d d_flags = 0x%x d_inode = 0x%p\n",
++ atomic_read(&d.d_count), d.d_flags, d.d_inode);
++
++ kdb_printf(" d_parent = 0x%p\n", d.d_parent);
++
++ kdb_printf(" d_hash.nxt = 0x%p d_hash.prv = 0x%p\n",
++ d.d_hash.next, d.d_hash.pprev);
++
++ kdb_printf(" d_lru.nxt = 0x%p d_lru.prv = 0x%p\n",
++ d.d_lru.next, d.d_lru.prev);
++
++ kdb_printf(" d_child.nxt = 0x%p d_child.prv = 0x%p\n",
++ d.d_u.d_child.next, d.d_u.d_child.prev);
++
++ kdb_printf(" d_subdirs.nxt = 0x%p d_subdirs.prv = 0x%p\n",
++ d.d_subdirs.next, d.d_subdirs.prev);
++
++ kdb_printf(" d_alias.nxt = 0x%p d_alias.prv = 0x%p\n",
++ d.d_alias.next, d.d_alias.prev);
++
++ kdb_printf(" d_op = 0x%p d_sb = 0x%p d_fsdata = 0x%p\n",
++ d.d_op, d.d_sb, d.d_fsdata);
++
++ kdb_printf(" d_iname = %s\n",
++ d.d_iname);
++
++ if (d.d_inode) {
++ struct inode i;
++ kdb_printf("\nInode Entry at 0x%p\n", d.d_inode);
++ if ((diag = kdb_getarea(i, (unsigned long)d.d_inode)))
++ return diag;
++ kdb_printf(" i_mode = 0%o i_nlink = %d i_rdev = 0x%x\n",
++ i.i_mode, i.i_nlink, i.i_rdev);
++
++ kdb_printf(" i_ino = %ld i_count = %d\n",
++ i.i_ino, atomic_read(&i.i_count));
++
++ kdb_printf(" i_hash.nxt = 0x%p i_hash.prv = 0x%p\n",
++ i.i_hash.next, i.i_hash.pprev);
++
++ kdb_printf(" i_list.nxt = 0x%p i_list.prv = 0x%p\n",
++ i.i_list.next, i.i_list.prev);
++
++ kdb_printf(" i_dentry.nxt = 0x%p i_dentry.prv = 0x%p\n",
++ i.i_dentry.next, i.i_dentry.prev);
++
++ }
++ kdb_printf("\n");
++ return 0;
++}
++
++static int
++kdbm_filp(int argc, const char **argv)
++{
++ struct file f;
++ int nextarg;
++ unsigned long addr;
++ long offset;
++ int diag;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) ||
++ (diag = kdb_getarea(f, addr)))
++ return diag;
++
++ kdb_printf("File Pointer at 0x%lx\n", addr);
++
++ kdb_printf(" fu_list.nxt = 0x%p fu_list.prv = 0x%p\n",
++ f.f_u.fu_list.next, f.f_u.fu_list.prev);
++
++ kdb_printf(" f_dentry = 0x%p f_vfsmnt = 0x%p f_op = 0x%p\n",
++ f.f_dentry, f.f_vfsmnt, f.f_op);
++
++ kdb_printf(" f_count = %d f_flags = 0x%x f_mode = 0x%x\n",
++ atomic_read(&f.f_count), f.f_flags, f.f_mode);
++
++ kdb_printf(" f_pos = %Ld\n", f.f_pos);
++#ifdef CONFIG_SECURITY
++ kdb_printf(" security = 0x%p\n", f.f_security);
++#endif
++
++ kdb_printf(" private_data = 0x%p f_mapping = 0x%p\n\n",
++ f.private_data, f.f_mapping);
++
++ return kdbm_print_dentry((unsigned long)f.f_dentry);
++}
++
++static int
++kdbm_fl(int argc, const char **argv)
++{
++ struct file_lock fl;
++ int nextarg;
++ unsigned long addr;
++ long offset;
++ int diag;
++
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) ||
++ (diag = kdb_getarea(fl, addr)))
++ return diag;
++
++ kdb_printf("File_lock at 0x%lx\n", addr);
++
++ kdb_printf(" fl_next = 0x%p fl_link.nxt = 0x%p fl_link.prv = 0x%p\n",
++ fl.fl_next, fl.fl_link.next, fl.fl_link.prev);
++ kdb_printf(" fl_block.nxt = 0x%p fl_block.prv = 0x%p\n",
++ fl.fl_block.next, fl.fl_block.prev);
++ kdb_printf(" fl_owner = 0x%p fl_pid = %d fl_wait = 0x%p\n",
++ fl.fl_owner, fl.fl_pid, &fl.fl_wait);
++ kdb_printf(" fl_file = 0x%p fl_flags = 0x%x\n",
++ fl.fl_file, fl.fl_flags);
++ kdb_printf(" fl_type = %d fl_start = 0x%llx fl_end = 0x%llx\n",
++ fl.fl_type, fl.fl_start, fl.fl_end);
++
++ kdb_printf(" file_lock_operations");
++ if (fl.fl_ops)
++ kdb_printf("\n fl_insert = 0x%p fl_remove = 0x%p fl_copy_lock = 0x%p fl_release_private = 0x%p\n",
++ fl.fl_ops->fl_insert, fl.fl_ops->fl_remove,
++ fl.fl_ops->fl_copy_lock, fl.fl_ops->fl_release_private);
++ else
++ kdb_printf(" empty\n");
++
++ kdb_printf(" lock_manager_operations");
++ if (fl.fl_lmops)
++ kdb_printf("\n fl_compare_owner = 0x%p fl_notify = 0x%p\n",
++ fl.fl_lmops->fl_compare_owner, fl.fl_lmops->fl_notify);
++ else
++ kdb_printf(" empty\n");
++
++ kdb_printf(" fl_fasync = 0x%p fl_break 0x%lx\n",
++ fl.fl_fasync, fl.fl_break_time);
++
++ return 0;
++}
++
++
++static int
++kdbm_dentry(int argc, const char **argv)
++{
++ int nextarg;
++ unsigned long addr;
++ long offset;
++ int diag;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)))
++ return diag;
++
++ return kdbm_print_dentry(addr);
++}
++
++static int
++kdbm_kobject(int argc, const char **argv)
++{
++ struct kobject k;
++ int nextarg;
++ unsigned long addr;
++ long offset;
++ int diag;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) ||
++ (diag = kdb_getarea(k, addr)))
++ return diag;
++
++
++ kdb_printf("kobject at 0x%lx\n", addr);
++
++ if (k.k_name) {
++ char c;
++ kdb_printf(" k_name 0x%p", k.k_name);
++ if (kdb_getarea(c, (unsigned long)k.k_name) == 0)
++ kdb_printf(" '%s'", k.k_name);
++ kdb_printf("\n");
++ }
++
++ if (k.k_name != ((struct kobject *)addr)->name)
++ kdb_printf(" name '%." __stringify(KOBJ_NAME_LEN) "s'\n", k.k_name);
++
++ kdb_printf(" kref.refcount %d'\n", atomic_read(&k.kref.refcount));
++
++ kdb_printf(" entry.next = 0x%p entry.prev = 0x%p\n",
++ k.entry.next, k.entry.prev);
++
++ kdb_printf(" parent = 0x%p kset = 0x%p ktype = 0x%p dentry = 0x%p\n",
++ k.parent, k.kset, k.ktype, k.dentry);
++
++ return 0;
++}
++
++static int
++kdbm_sh(int argc, const char **argv)
++{
++ int diag;
++ int nextarg;
++ unsigned long addr;
++ long offset = 0L;
++ struct Scsi_Host sh;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)) ||
++ (diag = kdb_getarea(sh, addr)))
++ return diag;
++
++ kdb_printf("Scsi_Host at 0x%lx\n", addr);
++ kdb_printf("host_queue = 0x%p\n", sh.__devices.next);
++ kdb_printf("ehandler = 0x%p eh_action = 0x%p\n",
++ sh.ehandler, sh.eh_action);
++ kdb_printf("host_wait = 0x%p hostt = 0x%p\n",
++ &sh.host_wait, sh.hostt);
++ kdb_printf("host_failed = %d host_no = %d resetting = %d\n",
++ sh.host_failed, sh.host_no, sh.resetting);
++ kdb_printf("max id/lun/channel = [%d/%d/%d] this_id = %d\n",
++ sh.max_id, sh.max_lun, sh.max_channel, sh.this_id);
++ kdb_printf("can_queue = %d cmd_per_lun = %d sg_tablesize = %d u_isa_dma = %d\n",
++ sh.can_queue, sh.cmd_per_lun, sh.sg_tablesize, sh.unchecked_isa_dma);
++ kdb_printf("host_blocked = %d reverse_ordering = %d \n",
++ sh.host_blocked, sh.reverse_ordering);
++
++ return 0;
++}
++
++static int
++kdbm_sd(int argc, const char **argv)
++{
++ int diag;
++ int nextarg;
++ unsigned long addr;
++ long offset = 0L;
++ struct scsi_device *sd = NULL;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)))
++ goto out;
++ if (!(sd = kmalloc(sizeof(*sd), GFP_ATOMIC))) {
++ kdb_printf("kdbm_sd: cannot kmalloc sd\n");
++ goto out;
++ }
++ if ((diag = kdb_getarea(*sd, addr)))
++ goto out;
++
++ kdb_printf("scsi_device at 0x%lx\n", addr);
++ kdb_printf("next = 0x%p prev = 0x%p host = 0x%p\n",
++ sd->siblings.next, sd->siblings.prev, sd->host);
++ kdb_printf("device_busy = %d current_cmnd 0x%p\n",
++ sd->device_busy, sd->current_cmnd);
++ kdb_printf("id/lun/chan = [%d/%d/%d] single_lun = %d device_blocked = %d\n",
++ sd->id, sd->lun, sd->channel, sd->single_lun, sd->device_blocked);
++ kdb_printf("queue_depth = %d current_tag = %d scsi_level = %d\n",
++ sd->queue_depth, sd->current_tag, sd->scsi_level);
++ kdb_printf("%8.8s %16.16s %4.4s\n", sd->vendor, sd->model, sd->rev);
++out:
++ if (sd)
++ kfree(sd);
++ return diag;
++}
++
++static int
++kdbm_sc(int argc, const char **argv)
++{
++ int diag;
++ int nextarg;
++ unsigned long addr;
++ long offset = 0L;
++ struct scsi_cmnd *sc = NULL;
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ nextarg = 1;
++ if ((diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL)))
++ goto out;
++ if (!(sc = kmalloc(sizeof(*sc), GFP_ATOMIC))) {
++ kdb_printf("kdbm_sc: cannot kmalloc sc\n");
++ goto out;
++ }
++ if ((diag = kdb_getarea(*sc, addr)))
++ goto out;
++
++ kdb_printf("scsi_cmnd at 0x%lx\n", addr);
++ kdb_printf("device = 0x%p next = 0x%p done = 0x%p\n",
++ sc->device, sc->list.next, sc->done);
++ kdb_printf("serial_number = %ld retries = %d\n",
++ sc->serial_number, sc->retries);
++ kdb_printf("cmd_len = %d\n", sc->cmd_len);
++ kdb_printf("cmnd = [%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x/%2.2x]\n",
++ sc->cmnd[0], sc->cmnd[1], sc->cmnd[2], sc->cmnd[3], sc->cmnd[4],
++ sc->cmnd[5], sc->cmnd[6], sc->cmnd[7], sc->cmnd[8], sc->cmnd[9],
++ sc->cmnd[10], sc->cmnd[11]);
++ kdb_printf("request_buffer = 0x%p request_bufflen = %d\n",
++ sc->request_buffer, sc->request_bufflen);
++ kdb_printf("use_sg = %d sglist_len = %d\n",
++ sc->use_sg, sc->sglist_len);
++ kdb_printf("underflow = %d transfersize = %d\n",
++ sc->underflow, sc->transfersize);
++ kdb_printf("tag = %d pid = %ld\n",
++ sc->tag, sc->pid);
++
++out:
++ if (sc)
++ kfree(sc);
++ return diag;
++}
++
++static int __init kdbm_vm_init(void)
++{
++ kdb_register("vm", kdbm_vm, "[-v] <vaddr>", "Display vm_area_struct", 0);
++ kdb_register("vmp", kdbm_vm, "[-v] <pid>", "Display all vm_area_struct for <pid>", 0);
++ kdb_register("pte", kdbm_pte, "( -m <mm> | -p <pid> ) <vaddr> [<nbytes>]", "Display pte_t for mm_struct or pid", 0);
++ kdb_register("rpte", kdbm_rpte, "( -m <mm> | -p <pid> ) <pfn> [<npages>]", "Find pte_t containing pfn for mm_struct or pid", 0);
++ kdb_register("dentry", kdbm_dentry, "<dentry>", "Display interesting dentry stuff", 0);
++ kdb_register("kobject", kdbm_kobject, "<kobject>", "Display interesting kobject stuff", 0);
++ kdb_register("filp", kdbm_filp, "<filp>", "Display interesting filp stuff", 0);
++ kdb_register("fl", kdbm_fl, "<fl>", "Display interesting file_lock stuff", 0);
++ kdb_register("sh", kdbm_sh, "<vaddr>", "Show scsi_host", 0);
++ kdb_register("sd", kdbm_sd, "<vaddr>", "Show scsi_device", 0);
++ kdb_register("sc", kdbm_sc, "<vaddr>", "Show scsi_cmnd", 0);
++
++ return 0;
++}
++
++static void __exit kdbm_vm_exit(void)
++{
++ kdb_unregister("vm");
++ kdb_unregister("vmp");
++ kdb_unregister("pte");
++ kdb_unregister("rpte");
++ kdb_unregister("dentry");
++ kdb_unregister("kobject");
++ kdb_unregister("filp");
++ kdb_unregister("fl");
++ kdb_unregister("sh");
++ kdb_unregister("sd");
++ kdb_unregister("sc");
++}
++
++module_init(kdbm_vm_init)
++module_exit(kdbm_vm_exit)
+diff -Nurp linux-2.6.22-590/kdb/modules/kdbm_x86.c linux-2.6.22-600/kdb/modules/kdbm_x86.c
+--- linux-2.6.22-590/kdb/modules/kdbm_x86.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/modules/kdbm_x86.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,1096 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Author: Vamsi Krishna S. <vamsi_krishna@in.ibm.com>
++ * (C) 2003 IBM Corporation.
++ * 2006-10-10 Keith Owens
++ * Reworked to include x86_64 support
++ * Copyright (c) 2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++#include <linux/interrupt.h>
++#include <linux/types.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++
++#include <asm/processor.h>
++#include <asm/uaccess.h>
++#include <asm/desc.h>
++#include <asm/debugreg.h>
++#if 0
++#include <asm/pgtable.h>
++#endif
++
++MODULE_AUTHOR("Vamsi Krishna S./IBM");
++MODULE_DESCRIPTION("x86 specific information (gdt/idt/ldt/page tables)");
++MODULE_LICENSE("GPL");
++
++/* Isolate as many of the i386/x86_64 differences as possible in one spot */
++
++#ifdef CONFIG_X86_64
++
++#define KDB_X86_64 1
++#define MOVLQ "movq"
++
++typedef struct desc_struct kdb_desc_t;
++typedef struct gate_struct kdb_gate_desc_t;
++
++#define KDB_SYS_DESC_OFFSET(d) ((unsigned long)d->offset_high << 32 | d->offset_middle << 16 | d->offset_low)
++#define KDB_SYS_DESC_CALLG_COUNT(d) 0
++
++#else /* !CONFIG_X86_64 */
++
++#define KDB_X86_64 0
++#define desc_ptr Xgt_desc_struct
++#define MOVLQ "movl"
++
++/* i386 has no detailed mapping for the 8 byte segment descriptor, copy the
++ * x86_64 one and merge the l and avl bits.
++ */
++struct kdb_desc {
++ u16 limit0;
++ u16 base0;
++ unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
++ unsigned limit : 4, avl : 2, d : 1, g : 1, base2 : 8;
++} __attribute__((packed));
++typedef struct kdb_desc kdb_desc_t;
++
++/* i386 has no detailed mapping for the 8 byte gate descriptor, base it on the
++ * x86_64 one.
++ */
++struct kdb_gate_desc {
++ u16 offset_low;
++ u16 segment;
++ unsigned res : 8, type : 4, s : 1, dpl : 2, p : 1;
++ u16 offset_middle;
++} __attribute__((packed));
++typedef struct kdb_gate_desc kdb_gate_desc_t;
++
++#define KDB_SYS_DESC_OFFSET(d) ((unsigned long)(d->offset_middle << 16 | d->offset_low))
++#define KDB_SYS_DESC_CALLG_COUNT(d) ((unsigned int)(d->res & 0x0F))
++
++#endif /* CONFIG_X86_64 */
++
++#define KDB_SEL_MAX 0x2000
++#define KDB_IDT_MAX 0x100
++#define KDB_SYS_DESC_TYPE_TSS16 0x01
++#define KDB_SYS_DESC_TYPE_LDT 0x02
++#define KDB_SYS_DESC_TYPE_TSSB16 0x03
++#define KDB_SYS_DESC_TYPE_CALLG16 0x04
++#define KDB_SYS_DESC_TYPE_TASKG 0x05
++#define KDB_SYS_DESC_TYPE_INTG16 0x06
++#define KDB_SYS_DESC_TYPE_TRAP16 0x07
++
++#define KDB_SYS_DESC_TYPE_TSS 0x09
++#define KDB_SYS_DESC_TYPE_TSSB 0x0b
++#define KDB_SYS_DESC_TYPE_CALLG 0x0c
++#define KDB_SYS_DESC_TYPE_INTG 0x0e
++#define KDB_SYS_DESC_TYPE_TRAPG 0x0f
++
++#define KDB_SEG_DESC_TYPE_CODE 0x08
++#define KDB_SEG_DESC_TYPE_CODE_R 0x02
++#define KDB_SEG_DESC_TYPE_DATA_W 0x02
++#define KDB_SEG_DESC_TYPE_CODE_C 0x02 /* conforming */
++#define KDB_SEG_DESC_TYPE_DATA_D 0x02 /* expand-down */
++#define KDB_SEG_DESC_TYPE_A 0x01 /* accessed */
++
++#define _LIMIT(d) ((unsigned long)((d)->limit << 16 | (d)->limit0))
++#define KDB_SEG_DESC_LIMIT(d) ((d)->g ? ((_LIMIT(d)+1) << 12) -1 : _LIMIT(d))
++
++static unsigned long kdb_seg_desc_base(kdb_desc_t *d)
++{
++ unsigned long base = d->base2 << 24 | d->base1 << 16 | d->base0;
++#ifdef CONFIG_X86_64
++ switch (d->type) {
++ case KDB_SYS_DESC_TYPE_TSS:
++ case KDB_SYS_DESC_TYPE_TSSB:
++ case KDB_SYS_DESC_TYPE_LDT:
++ base += (unsigned long)(((struct ldttss_desc *)d)->base3) << 32;
++ break;
++ }
++#endif
++ return base;
++}
++
++/* helper functions to display system registers in verbose mode */
++static void display_gdtr(void)
++{
++ struct desc_ptr gdtr;
++
++ __asm__ __volatile__ ("sgdt %0\n\t" : "=m"(gdtr));
++ kdb_printf("gdtr.address = " kdb_machreg_fmt0 ", gdtr.size = 0x%x\n",
++ gdtr.address, gdtr.size);
++
++ return;
++}
++
++static void display_ldtr(void)
++{
++ struct desc_ptr gdtr;
++ unsigned long ldtr;
++
++ __asm__ __volatile__ ("sgdt %0\n\t" : "=m"(gdtr));
++ __asm__ __volatile__ ("sldt %0\n\t" : "=m"(ldtr));
++ ldtr &= 0xfff8; /* extract the index */
++
++ kdb_printf("ldtr = " kdb_machreg_fmt0 " ", ldtr);
++
++ if (ldtr < gdtr.size) {
++ kdb_desc_t *ldt_desc =
++ (kdb_desc_t *)(gdtr.address + ldtr);
++ kdb_printf("base=" kdb_machreg_fmt0
++ ", limit=" kdb_machreg_fmt "\n",
++ kdb_seg_desc_base(ldt_desc),
++ KDB_SEG_DESC_LIMIT(ldt_desc));
++ } else {
++ kdb_printf("invalid\n");
++ }
++
++ return;
++}
++
++static void display_idtr(void)
++{
++ struct desc_ptr idtr;
++ __asm__ __volatile__ ("sidt %0\n\t" : "=m"(idtr));
++ kdb_printf("idtr.address = " kdb_machreg_fmt0 ", idtr.size = 0x%x\n",
++ idtr.address, idtr.size);
++ return;
++}
++
++static const char *cr0_flags[] = {
++ "pe", "mp", "em", "ts", "et", "ne", NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ "wp", NULL, "am", NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, "nw", "cd", "pg"};
++
++static void display_cr0(void)
++{
++ kdb_machreg_t cr0;
++ int i;
++ __asm__ (MOVLQ " %%cr0,%0\n\t":"=r"(cr0));
++ kdb_printf("cr0 = " kdb_machreg_fmt0, cr0);
++ for (i = 0; i < ARRAY_SIZE(cr0_flags); i++) {
++ if (test_bit(i, &cr0) && cr0_flags[i])
++ kdb_printf(" %s", cr0_flags[i]);
++ }
++ kdb_printf("\n");
++ return;
++}
++
++static void display_cr3(void)
++{
++ kdb_machreg_t cr3;
++ __asm__ (MOVLQ " %%cr3,%0\n\t":"=r"(cr3));
++ kdb_printf("cr3 = " kdb_machreg_fmt0 " ", cr3);
++ if (cr3 & 0x08)
++ kdb_printf("pwt ");
++ if (cr3 & 0x10)
++ kdb_printf("pcd ");
++ kdb_printf("%s=" kdb_machreg_fmt0 "\n",
++ KDB_X86_64 ? "pml4" : "pgdir", cr3 & PAGE_MASK);
++ return;
++}
++
++static const char *cr4_flags[] = {
++ "vme", "pvi", "tsd", "de",
++ "pse", "pae", "mce", "pge",
++ "pce", "osfxsr" "osxmmexcpt"};
++
++static void display_cr4(void)
++{
++ kdb_machreg_t cr4;
++ int i;
++ __asm__ (MOVLQ " %%cr4,%0\n\t":"=r"(cr4));
++ kdb_printf("cr4 = " kdb_machreg_fmt0, cr4);
++ for (i = 0; i < ARRAY_SIZE(cr4_flags); i++) {
++ if (test_bit(i, &cr4))
++ kdb_printf(" %s", cr4_flags[i]);
++ }
++ kdb_printf("\n");
++ return;
++}
++
++static void display_cr8(void)
++{
++#ifdef CONFIG_X86_64
++ kdb_machreg_t cr8;
++ __asm__ (MOVLQ " %%cr8,%0\n\t":"=r"(cr8));
++ kdb_printf("cr8 = " kdb_machreg_fmt0 "\n", cr8);
++ return;
++#endif /* CONFIG_X86_64 */
++}
++
++static char *dr_type_name[] = { "exec", "write", "io", "rw" };
++
++static void display_dr_status(int nr, int enabled, int local, int len, int type)
++{
++ if (!enabled) {
++ kdb_printf("\tdebug register %d: not enabled\n", nr);
++ return;
++ }
++
++ kdb_printf(" debug register %d: %s, len = %d, type = %s\n",
++ nr,
++ local? " local":"global",
++ len,
++ dr_type_name[type]);
++}
++
++static void display_dr(void)
++{
++ kdb_machreg_t dr0, dr1, dr2, dr3, dr6, dr7;
++ int dbnr, set;
++
++ __asm__ (MOVLQ " %%db0,%0\n\t":"=r"(dr0));
++ __asm__ (MOVLQ " %%db1,%0\n\t":"=r"(dr1));
++ __asm__ (MOVLQ " %%db2,%0\n\t":"=r"(dr2));
++ __asm__ (MOVLQ " %%db3,%0\n\t":"=r"(dr3));
++ __asm__ (MOVLQ " %%db6,%0\n\t":"=r"(dr6));
++ __asm__ (MOVLQ " %%db7,%0\n\t":"=r"(dr7));
++
++ kdb_printf("dr0 = " kdb_machreg_fmt0 " dr1 = " kdb_machreg_fmt0
++ " dr2 = " kdb_machreg_fmt0 " dr3 = " kdb_machreg_fmt0 "\n",
++ dr0, dr1, dr2, dr3);
++ kdb_printf("dr6 = " kdb_machreg_fmt0 " ", dr6);
++ dbnr = dr6 & DR6_DR_MASK;
++ if (dbnr) {
++ int nr;
++ switch(dbnr) {
++ case 1:
++ nr = 0; break;
++ case 2:
++ nr = 1; break;
++ case 4:
++ nr = 2; break;
++ default:
++ nr = 3; break;
++ }
++ kdb_printf("debug register hit = %d", nr);
++ } else if (dr6 & DR_STEP) {
++ kdb_printf("single step");
++ } else if (dr6 & DR_SWITCH) {
++ kdb_printf("task switch");
++ }
++ kdb_printf("\n");
++
++ kdb_printf("dr7 = " kdb_machreg_fmt0 "\n", dr7);
++ set = DR7_L0(dr7) || DR7_G0(dr7);
++ display_dr_status(0, set, DR7_L0(dr7), DR7_LEN0(dr7), DR7_RW0(dr7));
++ set = DR7_L1(dr7) || DR7_G1(dr7);
++ display_dr_status(1, set, DR7_L1(dr7), DR7_LEN1(dr7), DR7_RW1(dr7));
++ set = DR7_L2(dr7) || DR7_G2(dr7);
++ display_dr_status(2, set, DR7_L2(dr7), DR7_LEN2(dr7), DR7_RW2(dr7));
++ set = DR7_L3(dr7) || DR7_G3(dr7);
++ display_dr_status(3, set, DR7_L3(dr7), DR7_LEN3(dr7), DR7_RW3(dr7));
++}
++
++static char *set_eflags[] = {
++ "carry", NULL, "parity", NULL, "adjust", NULL, "zero", "sign",
++ "trace", "intr-on", "dir", "overflow", NULL, NULL, "nestedtask", NULL,
++ "resume", "vm", "align", "vif", "vip", "id"};
++
++static void display_eflags(unsigned long ef)
++{
++ int i, iopl;
++ kdb_printf("eflags = " kdb_machreg_fmt0 " ", ef);
++ for (i = 0; i < ARRAY_SIZE(set_eflags); i++) {
++ if (test_bit(i, &ef) && set_eflags[i])
++ kdb_printf("%s ", set_eflags[i]);
++ }
++
++ iopl = (ef & 0x00003000) >> 12;
++ kdb_printf("iopl=%d\n", iopl);
++ return;
++}
++
++static void display_tss(struct tss_struct *t)
++{
++#ifdef CONFIG_X86_64
++ int i;
++ kdb_printf(" rsp0 = 0x%016Lx, rsp1 = 0x%016Lx\n",
++ t->rsp0, t->rsp1);
++ kdb_printf(" rsp2 = 0x%016Lx\n", t->rsp2);
++ for (i = 0; i < ARRAY_SIZE(t->ist); ++i)
++ kdb_printf(" ist[%d] = 0x%016Lx\n",
++ i, t->ist[i]);
++ kdb_printf(" iomap = 0x%04x\n", t->io_bitmap_base);
++#else /* !CONFIG_X86_64 */
++ kdb_printf(" cs = %04x, eip = " kdb_machreg_fmt0 "\n",
++ t->x86_tss.es, t->x86_tss.eip);
++ kdb_printf(" ss = %04x, esp = " kdb_machreg_fmt0 "\n",
++ t->x86_tss.ss, t->x86_tss.esp);
++ kdb_printf(" ss0 = %04x, esp0 = " kdb_machreg_fmt0 "\n",
++ t->x86_tss.ss0, t->x86_tss.esp0);
++ kdb_printf(" ss1 = %04x, esp1 = " kdb_machreg_fmt0 "\n",
++ t->x86_tss.ss1, t->x86_tss.esp1);
++ kdb_printf(" ss2 = %04x, esp2 = " kdb_machreg_fmt0 "\n",
++ t->x86_tss.ss2, t->x86_tss.esp2);
++ kdb_printf(" ldt = %04x, cr3 = " kdb_machreg_fmt0 "\n",
++ t->x86_tss.ldt, t->x86_tss.__cr3);
++ kdb_printf(" ds = %04x, es = %04x fs = %04x gs = %04x\n",
++ t->x86_tss.ds, t->x86_tss.es, t->x86_tss.fs, t->x86_tss.gs);
++ kdb_printf(" eax = " kdb_machreg_fmt0 ", ebx = " kdb_machreg_fmt0
++ " ecx = " kdb_machreg_fmt0 " edx = " kdb_machreg_fmt0 "\n",
++ t->x86_tss.eax, t->x86_tss.ebx, t->x86_tss.ecx, t->x86_tss.edx);
++ kdb_printf(" esi = " kdb_machreg_fmt0 ", edi = " kdb_machreg_fmt0
++ " ebp = " kdb_machreg_fmt0 "\n",
++ t->x86_tss.esi, t->x86_tss.edi, t->x86_tss.ebp);
++ kdb_printf(" trace = %d, iomap = 0x%04x\n", t->x86_tss.trace, t->x86_tss.io_bitmap_base);
++#endif /* CONFIG_X86_64 */
++}
++
++static char *gate_desc_types[] = {
++#ifdef CONFIG_X86_64
++ "reserved-0", "reserved-1", "ldt", "reserved-3",
++ "reserved-4", "reserved-5", "reserved-6", "reserved-7",
++ "reserved-8", "tss-avlb", "reserved-10", "tss-busy",
++ "callgate", "reserved-13", "intgate", "trapgate",
++#else /* CONFIG_X86_64 */
++ "reserved-0", "tss16-avlb", "ldt", "tss16-busy",
++ "callgate16", "taskgate", "intgate16", "trapgate16",
++ "reserved-8", "tss-avlb", "reserved-10", "tss-busy",
++ "callgate", "reserved-13", "intgate", "trapgate",
++#endif /* CONFIG_X86_64 */
++};
++
++static void
++display_gate_desc(kdb_gate_desc_t *d)
++{
++ kdb_printf("%-11s ", gate_desc_types[d->type]);
++
++ switch(d->type) {
++ case KDB_SYS_DESC_TYPE_LDT:
++ kdb_printf("base=");
++ kdb_symbol_print(kdb_seg_desc_base((kdb_desc_t *)d), NULL,
++ KDB_SP_DEFAULT);
++ kdb_printf(" limit=" kdb_machreg_fmt " dpl=%d\n",
++ KDB_SEG_DESC_LIMIT((kdb_desc_t *)d), d->dpl);
++ break;
++ case KDB_SYS_DESC_TYPE_TSS:
++ case KDB_SYS_DESC_TYPE_TSS16:
++ case KDB_SYS_DESC_TYPE_TSSB:
++ case KDB_SYS_DESC_TYPE_TSSB16:
++ {
++ struct tss_struct *tss =
++ (struct tss_struct *)
++ kdb_seg_desc_base((kdb_desc_t *)d);
++ kdb_printf("base=");
++ kdb_symbol_print((unsigned long)tss, NULL, KDB_SP_DEFAULT);
++ kdb_printf(" limit=" kdb_machreg_fmt " dpl=%d\n",
++ KDB_SEG_DESC_LIMIT((kdb_desc_t *)d), d->dpl);
++ display_tss(tss);
++ break;
++ }
++ case KDB_SYS_DESC_TYPE_CALLG16:
++ kdb_printf("segment=0x%4.4x off=", d->segment);
++ kdb_symbol_print(KDB_SYS_DESC_OFFSET(d), NULL, KDB_SP_DEFAULT);
++ kdb_printf(" dpl=%d wc=%d\n",
++ d->dpl, KDB_SYS_DESC_CALLG_COUNT(d));
++ break;
++ case KDB_SYS_DESC_TYPE_CALLG:
++ kdb_printf("segment=0x%4.4x off=", d->segment);
++ kdb_symbol_print(KDB_SYS_DESC_OFFSET(d), NULL, KDB_SP_DEFAULT);
++ kdb_printf(" dpl=%d\n", d->dpl);
++ break;
++ default:
++ kdb_printf("segment=0x%4.4x off=", d->segment);
++ if (KDB_SYS_DESC_OFFSET(d))
++ kdb_symbol_print(KDB_SYS_DESC_OFFSET(d), NULL,
++ KDB_SP_DEFAULT);
++ else
++ kdb_printf(kdb_machreg_fmt0, KDB_SYS_DESC_OFFSET(d));
++ kdb_printf(" dpl=%d", d->dpl);
++#ifdef CONFIG_X86_64
++ if (d->ist)
++ kdb_printf(" ist=%d", d->ist);
++#endif /* CONFIG_X86_64 */
++ kdb_printf("\n");
++ break;
++ }
++}
++
++static void
++display_seg_desc(kdb_desc_t *d)
++{
++ unsigned char type = d->type;
++
++ if (type & KDB_SEG_DESC_TYPE_CODE) {
++ kdb_printf("%-11s base=" kdb_machreg_fmt0 " limit="
++ kdb_machreg_fmt " dpl=%d %c%c%c %s %s %s \n",
++ "code",
++ kdb_seg_desc_base(d), KDB_SEG_DESC_LIMIT(d),
++ d->dpl,
++ (type & KDB_SEG_DESC_TYPE_CODE_R)?'r':'-',
++ '-', 'x',
++#ifdef CONFIG_X86_64
++ d->l ? "64b" : d->d ? "32b" : "16b",
++#else /* !CONFIG_X86_64 */
++ d->d ? "32b" : "16b",
++#endif /* CONFIG_X86_64 */
++ (type & KDB_SEG_DESC_TYPE_A)?"ac":"",
++ (type & KDB_SEG_DESC_TYPE_CODE_C)?"conf":"");
++ } else {
++ kdb_printf("%-11s base=" kdb_machreg_fmt0 " limit="
++ kdb_machreg_fmt " dpl=%d %c%c%c %s %s %s \n",
++ "data",
++ kdb_seg_desc_base(d), KDB_SEG_DESC_LIMIT(d),
++ d->dpl,
++ 'r',
++ (type & KDB_SEG_DESC_TYPE_DATA_W)?'w':'-',
++ '-',
++ d->d ? "32b" : "16b",
++ (type & KDB_SEG_DESC_TYPE_A)?"ac":"",
++ (type & KDB_SEG_DESC_TYPE_DATA_D)?"down":"");
++ }
++}
++
++static int
++kdb_parse_two_numbers(int argc, const char **argv, int *sel, int *count,
++ int *last_sel, int *last_count)
++{
++ int diag;
++
++ if (argc > 2)
++ return KDB_ARGCOUNT;
++
++ kdbgetintenv("MDCOUNT", count);
++
++ if (argc == 0) {
++ *sel = *last_sel;
++ if (*last_count)
++ *count = *last_count;
++ } else {
++ unsigned long val;
++
++ if (argc >= 1) {
++ diag = kdbgetularg(argv[1], &val);
++ if (diag)
++ return diag;
++ *sel = val;
++ }
++ if (argc >= 2) {
++ diag = kdbgetularg(argv[2], &val);
++ if (diag)
++ return diag;
++ *count = (int) val;
++ *last_count = (int) val;
++ } else if (*last_count) {
++ *count = *last_count;
++ }
++ }
++ return 0;
++}
++
++/*
++ * kdb_gdt
++ *
++ * This function implements the 'gdt' command.
++ *
++ * gdt [<selector> [<line count>]]
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++static int
++kdb_gdt(int argc, const char **argv)
++{
++ int sel = 0;
++ struct desc_ptr gdtr;
++ int diag, count = 8;
++ kdb_desc_t *gdt;
++ unsigned int max_sel;
++ static int last_sel = 0, last_count = 0;
++
++ diag = kdb_parse_two_numbers(argc, argv, &sel, &count,
++ &last_sel, &last_count);
++ if (diag)
++ return diag;
++
++ __asm__ __volatile__ ("sgdt %0\n\t" : "=m"(gdtr));
++ gdt = (kdb_desc_t *) gdtr.address;
++
++ max_sel = (gdtr.size + 1) / sizeof(kdb_desc_t);
++ if (sel >= max_sel) {
++ kdb_printf("Maximum selector (%d) reached\n", max_sel);
++ return 0;
++ }
++
++ if (sel + count > max_sel)
++ count = max_sel - sel;
++
++ while (count--) {
++ kdb_desc_t *d = &gdt[sel];
++ kdb_printf("0x%4.4x ", sel++);
++
++ if (!d->p) {
++ kdb_printf("not present\n");
++ continue;
++ }
++ if (d->s) {
++ display_seg_desc(d);
++ } else {
++ display_gate_desc((kdb_gate_desc_t *)d);
++ if (KDB_X86_64 && count) {
++ ++sel; /* this descriptor occupies two slots */
++ --count;
++ }
++ }
++ }
++
++ last_sel = sel;
++ return 0;
++}
++
++/*
++ * kdb_ldt
++ *
++ * This function implements the 'ldt' command.
++ *
++ * ldt [<selector> [<line count>]]
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++static int
++kdb_ldt(int argc, const char **argv)
++{
++ int sel = 0;
++ struct desc_ptr gdtr;
++ unsigned long ldtr = 0;
++ int diag, count = 8;
++ kdb_desc_t *ldt, *ldt_desc;
++ unsigned int max_sel;
++ static int last_sel = 0, last_count = 0;
++
++ diag = kdb_parse_two_numbers(argc, argv, &sel, &count,
++ &last_sel, &last_count);
++ if (diag)
++ return diag;
++
++ if (strcmp(argv[0], "ldtp") == 0) {
++ kdb_printf("pid=%d, process=%s\n",
++ kdb_current_task->pid, kdb_current_task->comm);
++ if (!kdb_current_task->mm ||
++ !kdb_current_task->mm->context.ldt) {
++ kdb_printf("no special LDT for this process\n");
++ return 0;
++ }
++ ldt = kdb_current_task->mm->context.ldt;
++ max_sel = kdb_current_task->mm->context.size;
++ } else {
++
++ /* sldt gives the GDT selector for the segment containing LDT */
++ __asm__ __volatile__ ("sgdt %0\n\t" : "=m"(gdtr));
++ __asm__ __volatile__ ("sldt %0\n\t" : "=m"(ldtr));
++ ldtr &= 0xfff8; /* extract the index */
++
++ if (ldtr > gdtr.size+1) {
++ kdb_printf("invalid ldtr\n");
++ return 0;
++ }
++
++ ldt_desc = (kdb_desc_t *)(gdtr.address + ldtr);
++ ldt = (kdb_desc_t *)kdb_seg_desc_base(ldt_desc);
++ max_sel = (KDB_SEG_DESC_LIMIT(ldt_desc)+1) / sizeof(kdb_desc_t);
++ }
++
++ if (sel >= max_sel) {
++ kdb_printf("Maximum selector (%d) reached\n", max_sel);
++ return 0;
++ }
++
++ if (sel + count > max_sel)
++ count = max_sel - sel;
++
++ while (count--) {
++ kdb_desc_t *d = &ldt[sel];
++ kdb_printf("0x%4.4x ", sel++);
++
++ if (!d->p) {
++ kdb_printf("not present\n");
++ continue;
++ }
++ if (d->s) {
++ display_seg_desc(d);
++ } else {
++ display_gate_desc((kdb_gate_desc_t *)d);
++ if (KDB_X86_64 && count) {
++ ++sel; /* this descriptor occupies two slots */
++ --count;
++ }
++ }
++ }
++
++ last_sel = sel;
++ return 0;
++}
++
++/*
++ * kdb_idt
++ *
++ * This function implements the 'idt' command.
++ *
++ * idt [<vector> [<line count>]]
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++static int
++kdb_idt(int argc, const char **argv)
++{
++ int vec = 0;
++ struct desc_ptr idtr;
++ int diag, count = 8;
++ kdb_gate_desc_t *idt;
++ unsigned int max_entries;
++ static int last_vec = 0, last_count = 0;
++
++ diag = kdb_parse_two_numbers(argc, argv, &vec, &count,
++ &last_vec, &last_count);
++ if (diag)
++ return diag;
++
++ __asm__ __volatile__ ("sidt %0\n\t" : "=m"(idtr));
++ idt = (kdb_gate_desc_t *)idtr.address;
++
++ max_entries = (idtr.size+1) / sizeof(kdb_gate_desc_t);
++ if (vec >= max_entries) {
++ kdb_printf("Maximum vector (%d) reached\n", max_entries);
++ return 0;
++ }
++
++ if (vec + count > max_entries)
++ count = max_entries - vec;
++
++ while (count--) {
++ kdb_gate_desc_t *d = &idt[vec];
++ kdb_printf("0x%4.4x ", vec++);
++ if (!d->p) {
++ kdb_printf("not present\n");
++ continue;
++ }
++#ifndef CONFIG_X86_64
++ if (d->s) {
++ kdb_printf("invalid\n");
++ continue;
++ }
++#endif /* CONFIG_X86_64 */
++ display_gate_desc(d);
++ }
++
++ last_vec = vec;
++
++ return 0;
++}
++
++#define _PAGE_PSE 0x080
++
++#if 0
++static int
++get_pagetables(unsigned long addr, pgd_t **pgdir, pmd_t **pgmiddle, pte_t **pte)
++{
++ pgd_t *d;
++ pmd_t *m;
++ pte_t *t;
++
++ if (addr > PAGE_OFFSET) {
++ d = pgd_offset_k(addr);
++ } else {
++ kdb_printf("pid=%d, process=%s\n", kdb_current_task->pid, kdb_current_task->comm);
++ d = pgd_offset(kdb_current_task->mm, addr);
++ }
++
++ if (pgd_none(*d) || pgd_bad(*d)) {
++ *pgdir = NULL;
++ *pgmiddle = NULL;
++ *pte = NULL;
++ return 0;
++ } else {
++ *pgdir = d;
++ }
++
++ /* if _PAGE_PSE is set, pgdir points directly to the page. */
++ if (pgd_val(*d) & _PAGE_PSE) {
++ *pgmiddle = NULL;
++ *pte = NULL;
++ return 0;
++ }
++
++ m = pmd_offset(d, addr);
++ if (pmd_none(*m) || pmd_bad(*m)) {
++ *pgmiddle = NULL;
++ *pte = NULL;
++ return 0;
++ } else {
++ *pgmiddle = m;
++ }
++
++ t = pte_offset(m, addr);
++ if (pte_none(*t)) {
++ *pte = NULL;
++ return 0;
++ } else {
++ *pte = t;
++ }
++ kdb_printf("\naddr=%08lx, pgd=%08lx, pmd=%08lx, pte=%08lx\n",
++ addr,
++ (unsigned long) pgd_val(*d),
++ (unsigned long) pmd_val(*m),
++ (unsigned long) pte_val(*t));
++ return 0;
++}
++#endif
++
++#define FORMAT_PGDIR(entry) \
++ kdb_printf("frame=%05lx %c %s %c %c %c %s %c %s %s \n",\
++ (entry >> PAGE_SHIFT), \
++ (entry & _PAGE_PRESENT)?'p':'n', \
++ (entry & _PAGE_RW)?"rw":"ro", \
++ (entry & _PAGE_USER)?'u':'s', \
++ (entry & _PAGE_ACCESSED)?'a':' ', \
++ ' ', \
++ (entry & _PAGE_PSE)?"4M":"4K", \
++ (entry & _PAGE_GLOBAL)?'g':' ', \
++ (entry & _PAGE_PWT)?"wt":"wb", \
++ (entry & _PAGE_PCD)?"cd":" ");
++
++#define FORMAT_PTE(p, entry) \
++ kdb_printf("frame=%05lx %c%c%c %c %c %c %s %c %s %s\n", \
++ (entry >> PAGE_SHIFT), \
++ (pte_read(p))? 'r':'-', \
++ (pte_write(p))? 'w':'-', \
++ (pte_exec(p))? 'x':'-', \
++ (pte_dirty(p))? 'd':' ', \
++ (pte_young(p))? 'a':' ', \
++ (entry & _PAGE_USER)? 'u':'s', \
++ " ", \
++ (entry & _PAGE_GLOBAL)? 'g':' ', \
++ (entry & _PAGE_PWT)? "wt":"wb", \
++ (entry & _PAGE_PCD)? "cd":" ");
++#if 0
++static int
++display_pgdir(unsigned long addr, pgd_t *pgdir, int count)
++{
++ unsigned long entry;
++ int i;
++ int index = pgdir - ((pgd_t *)(((unsigned long)pgdir) & PAGE_MASK));
++
++ count = min(count, PTRS_PER_PGD - index);
++ addr &= ~(PGDIR_SIZE-1);
++
++ for (i = 0; i < count; i++, pgdir++) {
++ entry = pgd_val(*pgdir);
++ kdb_printf("pgd: addr=%08lx ", addr);
++ if (pgd_none(*pgdir)) {
++ kdb_printf("pgdir not present\n");
++ } else {
++ FORMAT_PGDIR(entry);
++ }
++ addr += PGDIR_SIZE;
++ }
++ return i;
++}
++#endif
++
++#if 0 /* for now, let's not print pgmiddle. */
++static int
++display_pgmiddle(unsigned long addr, pmd_t *pgmiddle, int count)
++{
++ unsigned long entry;
++ int i;
++ int index = pgmiddle - ((pmd_t *)(((unsigned long)pgmiddle) & PAGE_MASK));
++
++ count = min(count, PTRS_PER_PMD - index);
++ addr &= ~(PMD_SIZE-1);
++
++ for (i = 0; i < count; i++, pgmiddle++) {
++ entry = pmd_val(*pgmiddle);
++ kdb_printf("pmd: addr=%08lx ", addr);
++ if (pmd_none(*pgmiddle)) {
++ kdb_printf("pgmiddle not present\n");
++ } else {
++ FORMAT_PGDIR(entry);
++ }
++ addr += PMD_SIZE;
++ }
++ return i;
++}
++#endif
++
++#if 0
++static int
++display_pte(unsigned long addr, pte_t *pte, int count)
++{
++ unsigned long entry;
++ int i;
++ int index = pte - ((pte_t *)(((unsigned long)pte) & PAGE_MASK));
++
++ count = min(count, PTRS_PER_PTE - index);
++ addr &= PAGE_MASK;
++
++ for (i = 0; i < count; i++, pte++) {
++ entry = pte_val(*pte);
++ kdb_printf("pte: addr=%08lx ", addr);
++ if (pte_none(*pte)) {
++ kdb_printf("pte not present\n");
++ } else if (!pte_present(*pte)) {
++ kdb_printf("page swapped out. swp_offset=%08lx ", SWP_OFFSET(pte_to_swp_entry(*pte)));
++ kdb_printf("swp_type=%8lx", SWP_TYPE(pte_to_swp_entry(*pte)));
++ } else {
++ FORMAT_PTE(*pte, entry);
++ }
++ addr += PAGE_SIZE;
++ }
++ return i;
++}
++
++
++/*
++ * kdb_pte
++ *
++ * This function implements the 'pte' command.
++ *
++ * pte <addr arg> [<line count>]
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ */
++static int
++kdb_pte(int argc, const char **argv)
++{
++ static unsigned long last_addr = 0, last_count = 0;
++ int count = 8;
++ unsigned long addr;
++ long offset = 0;
++ pgd_t *pgdir;
++ pmd_t *pgmiddle;
++ pte_t *pte;
++
++#ifdef CONFIG_X86_PAE
++ kdb_printf("This kernel is compiled with PAE support.");
++ return KDB_NOTIMP;
++#endif
++ kdbgetintenv("MDCOUNT", &count);
++
++ if (argc == 0) {
++ if (last_addr == 0)
++ return KDB_ARGCOUNT;
++ addr = last_addr;
++ if (last_count)
++ count = last_count;
++ } else {
++ kdb_machreg_t val;
++ int diag, nextarg = 1;
++ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (diag)
++ return diag;
++ if (argc > nextarg+1)
++ return KDB_ARGCOUNT;
++
++ if (argc >= nextarg) {
++ diag = kdbgetularg(argv[nextarg], &val);
++ if (!diag) {
++ count = (int) val;
++ last_count = count;
++ } else if (last_count) {
++ count = last_count;
++ }
++ }
++ }
++
++ /*
++ * round off the addr to a page boundary.
++ */
++ addr &= PAGE_MASK;
++
++ get_pagetables(addr, &pgdir, &pgmiddle, &pte);
++
++ if (pgdir)
++ display_pgdir(addr, pgdir, 1);
++#if 0 /* for now, let's not print pgmiddle. */
++ if (pgmiddle)
++ display_pgmiddle(addr, pgmiddle, 1);
++#endif
++ if (pte) {
++ int displayed;
++ displayed = display_pte(addr, pte, count);
++ addr += (displayed << PAGE_SHIFT);
++ }
++ last_addr = addr;
++ return 0;
++}
++#else
++/*
++ * Todo - In 2.5 the pte_offset macro in asm/pgtable.h seems to be
++ * renamed to pte_offset_kernel.
++ */
++static int
++kdb_pte(int argc, const char **argv)
++{
++ kdb_printf("not supported.");
++ return KDB_NOTIMP;
++}
++#endif
++
++/*
++ * kdb_rdv
++ *
++ * This function implements the 'rdv' command.
++ * It displays all registers of the current processor
++ * included control registers in verbose mode.
++ *
++ * Inputs:
++ * argc argument count
++ * argv argument vector
++ * Outputs:
++ * None.
++ * Returns:
++ * zero for success, a kdb diagnostic if error
++ * Locking:
++ * none.
++ * Remarks:
++ * This should have been an option to rd command say "rd v",
++ * but it is here as it is a non-essential x86-only command,
++ * that need not clutter arch/i386/kdb/kdbasupport.c.
++ */
++static int
++kdb_rdv(int argc, const char **argv)
++{
++ struct pt_regs *regs = get_irq_regs();
++ kdba_dumpregs(regs, NULL, NULL);
++ kdb_printf("\n");
++ display_eflags(regs->eflags);
++ kdb_printf("\n");
++ display_gdtr();
++ display_idtr();
++ display_ldtr();
++ kdb_printf("\n");
++ display_cr0();
++ display_cr3();
++ display_cr4();
++ display_cr8();
++ kdb_printf("\n");
++ display_dr();
++ return 0;
++}
++
++static int
++kdb_rdmsr(int argc, const char **argv)
++{
++ unsigned long addr;
++ uint32_t l, h;
++ int diag;
++ struct cpuinfo_x86 *c = cpu_data + smp_processor_id();
++
++ if (argc != 1)
++ return KDB_ARGCOUNT;
++
++ if ((diag = kdbgetularg(argv[1], &addr)))
++ return diag;
++
++ if (!cpu_has(c, X86_FEATURE_MSR))
++ return KDB_NOTIMP;
++
++ kdb_printf("msr(0x%lx) = ", addr);
++ if ((diag = rdmsr_safe(addr, &l, &h))) {
++ kdb_printf("error %d\n", diag);
++ return KDB_BADINT;
++ } else {
++ kdb_printf("0x%08x_%08x\n", h, l);
++ }
++
++ return 0;
++}
++
++static int
++kdb_wrmsr(int argc, const char **argv)
++{
++ unsigned long addr;
++ unsigned long l, h;
++ int diag;
++ struct cpuinfo_x86 *c = cpu_data + smp_processor_id();
++
++ if (argc != 3)
++ return KDB_ARGCOUNT;
++
++ if ((diag = kdbgetularg(argv[1], &addr))
++ || (diag = kdbgetularg(argv[2], &h))
++ || (diag = kdbgetularg(argv[3], &l)))
++ return diag;
++
++ if (!cpu_has(c, X86_FEATURE_MSR))
++ return KDB_NOTIMP;
++
++ if ((diag = wrmsr_safe(addr, l, h))) {
++ kdb_printf("error %d\n", diag);
++ return KDB_BADINT;
++ }
++
++ return 0;
++}
++
++static int __init kdbm_x86_init(void)
++{
++ kdb_register("rdv", kdb_rdv, NULL, "Display registers in verbose mode", 0);
++ kdb_register_repeat("gdt", kdb_gdt, "<sel> [<count>]", "Display GDT", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("idt", kdb_idt, "<int> [<count>]", "Display IDT", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("ldt", kdb_ldt, "<sel> [<count>]", "Display LDT", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("ptex", kdb_pte, "<addr> [<count>]", "Display pagetables", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register_repeat("ldtp", kdb_ldt, "<sel> [<count>]", "Display Process LDT", 0, KDB_REPEAT_NO_ARGS);
++ kdb_register("rdmsr", kdb_rdmsr, "<maddr>", "Display Model Specific Register", 0);
++ kdb_register("wrmsr", kdb_wrmsr, "<maddr> <h> <l>", "Modify Model Specific Register", 0);
++ return 0;
++}
++
++static void __exit kdbm_x86_exit(void)
++{
++ kdb_unregister("rdv");
++ kdb_unregister("gdt");
++ kdb_unregister("ldt");
++ kdb_unregister("idt");
++ kdb_unregister("ptex");
++ kdb_unregister("ldtp");
++ kdb_unregister("rdmsr");
++ kdb_unregister("wrmsr");
++}
++
++module_init(kdbm_x86_init)
++module_exit(kdbm_x86_exit)
+diff -Nurp linux-2.6.22-590/kdb/modules/kdbm_xpc.c linux-2.6.22-600/kdb/modules/kdbm_xpc.c
+--- linux-2.6.22-590/kdb/modules/kdbm_xpc.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/modules/kdbm_xpc.c 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,1105 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (c) 2006 Silicon Graphics, Inc. All Rights Reserved.
++ */
++
++
++/*
++ * Cross Partition Communication (XPC) kdb support.
++ *
++ * This provides kdb commands for debugging XPC.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#include <asm/sn/sn_sal.h>
++#include <asm/sn/xpc.h>
++
++
++MODULE_AUTHOR("SGI");
++MODULE_DESCRIPTION("Debug XPC information");
++MODULE_LICENSE("GPL");
++
++
++static int
++kdbm_xpc_down(int argc, const char **argv)
++{
++ if (xpc_rsvd_page == NULL) {
++ kdb_printf("Reserved Page has not been initialized.\n");
++
++ } else if (xpc_kdebug_force_disengage()) {
++ kdb_printf("Unable to force XPC disengage.\n");
++ }
++ return 0;
++}
++
++
++static char *
++kdbm_xpc_get_ascii_reason_code(enum xpc_retval reason)
++{
++ switch (reason) {
++ case xpcSuccess: return "";
++ case xpcNotConnected: return "xpcNotConnected";
++ case xpcConnected: return "xpcConnected";
++ case xpcRETIRED1: return "xpcRETIRED1";
++ case xpcMsgReceived: return "xpcMsgReceived";
++ case xpcMsgDelivered: return "xpcMsgDelivered";
++ case xpcRETIRED2: return "xpcRETIRED2";
++ case xpcNoWait: return "xpcNoWait";
++ case xpcRetry: return "xpcRetry";
++ case xpcTimeout: return "xpcTimeout";
++ case xpcInterrupted: return "xpcInterrupted";
++ case xpcUnequalMsgSizes: return "xpcUnequalMsgSizes";
++ case xpcInvalidAddress: return "xpcInvalidAddress";
++ case xpcNoMemory: return "xpcNoMemory";
++ case xpcLackOfResources: return "xpcLackOfResources";
++ case xpcUnregistered: return "xpcUnregistered";
++ case xpcAlreadyRegistered: return "xpcAlreadyRegistered";
++ case xpcPartitionDown: return "xpcPartitionDown";
++ case xpcNotLoaded: return "xpcNotLoaded";
++ case xpcUnloading: return "xpcUnloading";
++ case xpcBadMagic: return "xpcBadMagic";
++ case xpcReactivating: return "xpcReactivating";
++ case xpcUnregistering: return "xpcUnregistering";
++ case xpcOtherUnregistering: return "xpcOtherUnregistering";
++ case xpcCloneKThread: return "xpcCloneKThread";
++ case xpcCloneKThreadFailed: return "xpcCloneKThreadFailed";
++ case xpcNoHeartbeat: return "xpcNoHeartbeat";
++ case xpcPioReadError: return "xpcPioReadError";
++ case xpcPhysAddrRegFailed: return "xpcPhysAddrRegFailed";
++ case xpcBteDirectoryError: return "xpcBteDirectoryError";
++ case xpcBtePoisonError: return "xpcBtePoisonError";
++ case xpcBteWriteError: return "xpcBteWriteError";
++ case xpcBteAccessError: return "xpcBteAccessError";
++ case xpcBtePWriteError: return "xpcBtePWriteError";
++ case xpcBtePReadError: return "xpcBtePReadError";
++ case xpcBteTimeOutError: return "xpcBteTimeOutError";
++ case xpcBteXtalkError: return "xpcBteXtalkError";
++ case xpcBteNotAvailable: return "xpcBteNotAvailable";
++ case xpcBteUnmappedError: return "xpcBteUnmappedError";
++ case xpcBadVersion: return "xpcBadVersion";
++ case xpcVarsNotSet: return "xpcVarsNotSet";
++ case xpcNoRsvdPageAddr: return "xpcNoRsvdPageAddr";
++ case xpcInvalidPartid: return "xpcInvalidPartid";
++ case xpcLocalPartid: return "xpcLocalPartid";
++ case xpcOtherGoingDown: return "xpcOtherGoingDown";
++ case xpcSystemGoingDown: return "xpcSystemGoingDown";
++ case xpcSystemHalt: return "xpcSystemHalt";
++ case xpcSystemReboot: return "xpcSystemReboot";
++ case xpcSystemPoweroff: return "xpcSystemPoweroff";
++ case xpcDisconnecting: return "xpcDisconnecting";
++ case xpcOpenCloseError: return "xpcOpenCloseError";
++ case xpcUnknownReason: return "xpcUnknownReason";
++ default: return "undefined reason code";
++ }
++}
++
++
++/*
++ * Display the reserved page used by XPC.
++ *
++ * xpcrp
++ */
++static int
++kdbm_xpc_rsvd_page(int argc, const char **argv)
++{
++ struct xpc_rsvd_page *rp = (struct xpc_rsvd_page *) xpc_rsvd_page;
++
++
++ if (argc > 0) {
++ return KDB_ARGCOUNT;
++ }
++
++ if (rp == NULL) {
++ kdb_printf("Reserved Page has not been initialized.\n");
++ return 0;
++ }
++
++ kdb_printf("struct xpc_rsvd_page @ (0x%p):\n", (void *) rp);
++ kdb_printf("\tSAL_signature=0x%lx\n", rp->SAL_signature);
++ kdb_printf("\tSAL_version=0x%lx\n", rp->SAL_version);
++ kdb_printf("\tpartid=%d\n", rp->partid);
++ kdb_printf("\tversion=0x%x %d.%d\n", rp->version,
++ XPC_VERSION_MAJOR(rp->version),
++ XPC_VERSION_MINOR(rp->version));
++ kdb_printf("\tvars_pa=0x%lx\n", rp->vars_pa);
++ kdb_printf("\tstamp=0x%lx:0x%lx\n",
++ rp->stamp.tv_sec, rp->stamp.tv_nsec);
++ kdb_printf("\tnasids_size=%ld\n", rp->nasids_size);
++
++ return 0;
++}
++
++
++static void
++kdbm_xpc_print_vars_part(struct xpc_vars_part *vars_part, partid_t partid)
++{
++ kdb_printf("struct xpc_vars_part @ (0x%p) [partid=%d]:\n",
++ (void *) vars_part, partid);
++ kdb_printf("\tmagic=0x%lx ", vars_part->magic);
++ if (vars_part->magic != 0) {
++ kdb_printf("%s", (char *) &vars_part->magic);
++ }
++ kdb_printf("\n");
++ kdb_printf("\tGPs_pa=0x%lx\n", vars_part->GPs_pa);
++ kdb_printf("\topenclose_args_pa=0x%lx\n",
++ vars_part->openclose_args_pa);
++ kdb_printf("\tIPI_amo_pa=0x%lx\n", vars_part->IPI_amo_pa);
++ kdb_printf("\tIPI_nasid=0x%x\n", vars_part->IPI_nasid);
++ kdb_printf("\tIPI_phys_cpuid=0x%x\n", vars_part->IPI_phys_cpuid);
++ kdb_printf("\tnchannels=%d\n", vars_part->nchannels);
++}
++
++
++/*
++ * Display XPC variables.
++ *
++ * xpcvars [ <partid> ]
++ *
++ * no partid - displays xpc_vars structure
++ * partid=0 - displays all initialized xpc_vars_part structures
++ * partid=i - displays xpc_vars_part structure for specified
++ * partition, if initialized
++ */
++static int
++kdbm_xpc_variables(int argc, const char **argv)
++{
++ int ret;
++ unsigned long ulong_partid;
++ partid_t partid;
++ struct xpc_vars_part *vars_part;
++
++
++ if (xpc_rsvd_page == NULL) {
++ kdb_printf("Reserved Page has not been initialized.\n");
++ return 0;
++ }
++ DBUG_ON(xpc_vars == NULL);
++
++ if (argc == 0) {
++
++ /* just display the xpc_vars structure */
++
++ kdb_printf("struct xpc_vars @ (0x%p):\n", (void *) xpc_vars);
++ kdb_printf("\tversion=0x%x %d.%d\n", xpc_vars->version,
++ XPC_VERSION_MAJOR(xpc_vars->version),
++ XPC_VERSION_MINOR(xpc_vars->version));
++ kdb_printf("\theartbeat=%ld\n", xpc_vars->heartbeat);
++ kdb_printf("\theartbeating_to_mask=0x%lx",
++ xpc_vars->heartbeating_to_mask);
++ for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
++ if (xpc_hb_allowed(partid, xpc_vars)) {
++ kdb_printf(" %d", partid);
++ }
++ }
++ kdb_printf("\n");
++ kdb_printf("\theartbeat_offline=0x%lx\n",
++ xpc_vars->heartbeat_offline);
++ kdb_printf("\tact_nasid=0x%x\n", xpc_vars->act_nasid);
++ kdb_printf("\tact_phys_cpuid=0x%x\n",
++ xpc_vars->act_phys_cpuid);
++ kdb_printf("\tvars_part_pa=0x%lx\n", xpc_vars->vars_part_pa);
++ kdb_printf("\tamos_page_pa=0x%lx\n", xpc_vars->amos_page_pa);
++ kdb_printf("\tamos_page=0x%p\n", (void *) xpc_vars->amos_page);
++ return 0;
++
++ } else if (argc != 1) {
++ return KDB_ARGCOUNT;
++ }
++
++ ret = kdbgetularg(argv[1], (unsigned long *) &ulong_partid);
++ if (ret) {
++ return ret;
++ }
++ partid = (partid_t) ulong_partid;
++ if (partid < 0 || partid >= XP_MAX_PARTITIONS) {
++ kdb_printf("invalid partid\n");
++ return KDB_BADINT;
++ }
++
++ vars_part = (struct xpc_vars_part *) __va(xpc_vars->vars_part_pa);
++ DBUG_ON(vars_part == NULL);
++
++ if (partid == 0) {
++
++ /* display all initialized xpc_vars_part structure */
++
++ for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
++ if (vars_part[partid].magic == 0) {
++ continue;
++ }
++ kdbm_xpc_print_vars_part(&vars_part[partid], partid);
++ }
++
++ } else {
++
++ /* display specified xpc_vars_part structure */
++
++ if (vars_part[partid].magic != 0) {
++ kdbm_xpc_print_vars_part(&vars_part[partid], partid);
++ } else {
++ kdb_printf("struct xpc_vars_part for partid %d not "
++ "initialized\n", partid);
++ }
++ }
++
++ return 0;
++}
++
++
++static void
++kdbm_xpc_print_engaged(char *string, u64 mask, int verbose)
++{
++ partid_t partid;
++
++
++ kdb_printf("%s=0x%lx", string, mask);
++
++ if (verbose) {
++ partid = 0;
++ while (mask != 0) {
++ if (mask & 1UL) {
++ kdb_printf(" %d", partid);
++ }
++ partid++;
++ mask >>= 1;
++ }
++ }
++ kdb_printf("\n");
++}
++
++
++/*
++ * Display XPC's 'engaged partitions' and 'disengage request' AMOs.
++ *
++ * xpcengaged [ -v ]
++ *
++ * -v - verbose mode, displays partition numbers.
++ */
++static int
++kdbm_xpc_engaged(int argc, const char **argv)
++{
++ int nextarg = 1;
++ int verbose = 0;
++ u64 mask;
++
++
++ if (argc > 1) {
++ return KDB_ARGCOUNT;
++ }
++ if (argc == 1) {
++ if (strcmp(argv[nextarg], "-v") != 0) {
++ return KDB_ARGCOUNT;
++ }
++ verbose = 1;
++ }
++
++ mask = xpc_partition_engaged(-1UL);
++ kdbm_xpc_print_engaged("engaged partitions", mask, verbose);
++
++ mask = xpc_partition_disengage_requested(-1UL);
++ kdbm_xpc_print_engaged("disengage request", mask, verbose);
++
++ return 0;
++}
++
++
++static void
++kdbm_xpc_print_IPI_flags_for_channel(u8 IPI_flags)
++{
++ if (IPI_flags & XPC_IPI_MSGREQUEST) kdb_printf(" MSGREQUEST");
++ if (IPI_flags & XPC_IPI_OPENREPLY) kdb_printf(" OPENREPLY");
++ if (IPI_flags & XPC_IPI_OPENREQUEST) kdb_printf(" OPENREQUEST");
++ if (IPI_flags & XPC_IPI_CLOSEREPLY) kdb_printf(" CLOSEREPLY");
++ if (IPI_flags & XPC_IPI_CLOSEREQUEST) kdb_printf(" CLOSEREQUEST");
++}
++
++
++static void
++kdbm_xpc_print_IPI_flags(u64 IPI_amo)
++{
++ int ch_number;
++ u8 IPI_flags;
++
++
++ for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
++
++ /* get the IPI flags for the specific channel */
++ IPI_flags = XPC_GET_IPI_FLAGS(IPI_amo, ch_number);
++ if (IPI_flags == 0) {
++ continue;
++ }
++
++ kdb_printf("\t channel_%d=0x%x", ch_number, IPI_flags);
++ kdbm_xpc_print_IPI_flags_for_channel(IPI_flags);
++ kdb_printf("\n");
++ }
++}
++
++
++static void
++kdbm_xpc_print_part(struct xpc_partition *part, partid_t partid)
++{
++ kdb_printf("xpc_partitions[partid=%d] (0x%p):\n", partid,
++ (void *) part);
++ kdb_printf("\tremote_rp_version=0x%x %d.%d\n", part->remote_rp_version,
++ XPC_VERSION_MAJOR(part->remote_rp_version),
++ XPC_VERSION_MINOR(part->remote_rp_version));
++ kdb_printf("\tremote_rp_stamp=0x%lx:0x%lx\n",
++ part->remote_rp_stamp.tv_sec, part->remote_rp_stamp.tv_nsec);
++ kdb_printf("\tremote_rp_pa=0x%lx\n", part->remote_rp_pa);
++ kdb_printf("\tremote_vars_pa=0x%lx\n", part->remote_vars_pa);
++ kdb_printf("\tremote_vars_part_pa=0x%lx\n", part->remote_vars_part_pa);
++ kdb_printf("\tlast_heartbeat=%ld\n", part->last_heartbeat);
++ kdb_printf("\tremote_amos_page_pa=0x%lx\n", part->remote_amos_page_pa);
++ kdb_printf("\tremote_act_nasid=0x%x\n", part->remote_act_nasid);
++ kdb_printf("\tremote_act_phys_cpuid=0x%x\n",
++ part->remote_act_phys_cpuid);
++ kdb_printf("\tact_IRQ_rcvd=%d\n", part->act_IRQ_rcvd);
++ kdb_printf("\tact_state=%d", part->act_state);
++ switch (part->act_state) {
++ case XPC_P_INACTIVE: kdb_printf(" INACTIVE\n"); break;
++ case XPC_P_ACTIVATION_REQ: kdb_printf(" ACTIVATION_REQ\n"); break;
++ case XPC_P_ACTIVATING: kdb_printf(" ACTIVATING\n"); break;
++ case XPC_P_ACTIVE: kdb_printf(" ACTIVE\n"); break;
++ case XPC_P_DEACTIVATING: kdb_printf(" DEACTIVATING\n"); break;
++ default: kdb_printf(" unknown\n");
++ }
++ kdb_printf("\tremote_vars_version=0x%x %d.%d\n",
++ part->remote_vars_version,
++ XPC_VERSION_MAJOR(part->remote_vars_version),
++ XPC_VERSION_MINOR(part->remote_vars_version));
++ kdb_printf("\treactivate_nasid=%d\n", part->reactivate_nasid);
++ kdb_printf("\treason=%d %s\n", part->reason,
++ kdbm_xpc_get_ascii_reason_code(part->reason));
++ kdb_printf("\treason_line=%d\n", part->reason_line);
++
++ kdb_printf("\tdisengage_request_timeout=0x%lx\n",
++ part->disengage_request_timeout);
++ kdb_printf("\t&disengage_request_timer=0x%p\n",
++ (void *) &part->disengage_request_timer);
++
++ kdb_printf("\tsetup_state=%d", part->setup_state);
++ switch (part->setup_state) {
++ case XPC_P_UNSET: kdb_printf(" UNSET\n"); break;
++ case XPC_P_SETUP: kdb_printf(" SETUP\n"); break;
++ case XPC_P_WTEARDOWN: kdb_printf(" WTEARDOWN\n"); break;
++ case XPC_P_TORNDOWN: kdb_printf(" TORNDOWN\n"); break;
++ default: kdb_printf(" unknown\n");
++ }
++ kdb_printf("\treferences=%d\n", atomic_read(&part->references));
++ kdb_printf("\tnchannels=%d\n", part->nchannels);
++ kdb_printf("\tnchannels_active=%d\n",
++ atomic_read(&part->nchannels_active));
++ kdb_printf("\tnchannels_engaged=%d\n",
++ atomic_read(&part->nchannels_engaged));
++ kdb_printf("\tchannels=0x%p\n", (void *) part->channels);
++ kdb_printf("\tlocal_GPs=0x%p\n", (void *) part->local_GPs);
++ kdb_printf("\tremote_GPs=0x%p\n", (void *) part->remote_GPs);
++ kdb_printf("\tremote_GPs_pa=0x%lx\n", part->remote_GPs_pa);
++ kdb_printf("\tlocal_openclose_args=0x%p\n",
++ (void *) part->local_openclose_args);
++ kdb_printf("\tremote_openclose_args=0x%p\n",
++ (void *) part->remote_openclose_args);
++ kdb_printf("\tremote_openclose_args_pa=0x%lx\n",
++ part->remote_openclose_args_pa);
++ kdb_printf("\tremote_IPI_nasid=0x%x\n", part->remote_IPI_nasid);
++ kdb_printf("\tremote_IPI_phys_cpuid=0x%x\n",
++ part->remote_IPI_phys_cpuid);
++ kdb_printf("\tremote_IPI_amo_va=0x%p\n",
++ (void *) part->remote_IPI_amo_va);
++ kdb_printf("\tlocal_IPI_amo_va=0x%p\n",
++ (void *) part->local_IPI_amo_va);
++ kdb_printf("\tlocal_IPI_amo=0x%lx\n", part->local_IPI_amo);
++ kdbm_xpc_print_IPI_flags(part->local_IPI_amo);
++ kdb_printf("\tIPI_owner=%s\n", part->IPI_owner);
++ kdb_printf("\t&dropped_IPI_timer=0x%p\n",
++ (void *) &part->dropped_IPI_timer);
++
++ kdb_printf("\tchannel_mgr_requests=%d\n", atomic_read(&part->
++ channel_mgr_requests));
++}
++
++
++/*
++ * Display XPC partitions.
++ *
++ * xpcpart [ <vaddr> | <partid> ]
++ */
++static int
++kdbm_xpc_partitions(int argc, const char **argv)
++{
++ int ret;
++ int nextarg = 1;
++ long offset = 0;
++ unsigned long addr;
++ struct xpc_partition *part;
++ partid_t partid;
++
++
++ if (argc > 1) {
++ return KDB_ARGCOUNT;
++
++ } else if (argc == 1) {
++ ret = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset,
++ NULL);
++ if (ret) {
++ return ret;
++ }
++ if (addr > 0 && addr < XP_MAX_PARTITIONS) {
++ partid = (partid_t) addr;
++ part = &xpc_partitions[partid];
++ } else {
++ part = (struct xpc_partition *) addr;
++ partid = part - &xpc_partitions[0];
++ if (partid <= 0 || partid >= XP_MAX_PARTITIONS ||
++ part != &xpc_partitions[partid]) {
++ kdb_printf("invalid partition entry address\n");
++ return KDB_BADADDR;
++ }
++ }
++ kdbm_xpc_print_part(part, partid);
++
++ } else {
++ for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
++ part = &xpc_partitions[partid];
++ if (part->setup_state == XPC_P_UNSET &&
++ part->reason == 0) {
++ continue;
++ }
++ kdbm_xpc_print_part(part, partid);
++ }
++ }
++ return 0;
++}
++
++
++static void
++kdbm_xpc_print_channel_flags(u32 flags)
++{
++ kdb_printf("\tflags=0x%x", flags);
++
++ if (flags & XPC_C_WDISCONNECT) kdb_printf(" WDISCONNECT");
++ if (flags & XPC_C_DISCONNECTINGCALLOUT_MADE) kdb_printf(" DISCONNECTINGCALLOUT_MADE");
++ if (flags & XPC_C_DISCONNECTINGCALLOUT) kdb_printf(" DISCONNECTINGCALLOUT");
++ if (flags & XPC_C_DISCONNECTING) kdb_printf(" DISCONNECTING");
++ if (flags & XPC_C_DISCONNECTED) kdb_printf(" DISCONNECTED");
++
++ if (flags & XPC_C_CLOSEREQUEST) kdb_printf(" CLOSEREQUEST");
++ if (flags & XPC_C_RCLOSEREQUEST) kdb_printf(" RCLOSEREQUEST");
++ if (flags & XPC_C_CLOSEREPLY) kdb_printf(" CLOSEREPLY");
++ if (flags & XPC_C_RCLOSEREPLY) kdb_printf(" RCLOSEREPLY");
++
++ if (flags & XPC_C_CONNECTING) kdb_printf(" CONNECTING");
++ if (flags & XPC_C_CONNECTED) kdb_printf(" CONNECTED");
++ if (flags & XPC_C_CONNECTEDCALLOUT_MADE) kdb_printf(" CONNECTEDCALLOUT_MADE");
++ if (flags & XPC_C_CONNECTEDCALLOUT) kdb_printf(" CONNECTEDCALLOUT");
++ if (flags & XPC_C_SETUP) kdb_printf(" SETUP");
++
++ if (flags & XPC_C_OPENREQUEST) kdb_printf(" OPENREQUEST");
++ if (flags & XPC_C_ROPENREQUEST) kdb_printf(" ROPENREQUEST");
++ if (flags & XPC_C_OPENREPLY) kdb_printf(" OPENREPLY");
++ if (flags & XPC_C_ROPENREPLY) kdb_printf(" ROPENREPLY");
++
++ if (flags & XPC_C_WASCONNECTED) kdb_printf(" WASCONNECTED");
++
++ kdb_printf("\n");
++}
++
++
++static void
++kdbm_xpc_print_channel(struct xpc_channel *ch)
++{
++ kdb_printf("channel %d (0x%p):\n", ch->number, (void *) ch);
++ kdb_printf("\tpartid=%d\n", ch->partid);
++
++ kdbm_xpc_print_channel_flags(ch->flags);
++
++ kdb_printf("\treason=%d %s\n", ch->reason,
++ kdbm_xpc_get_ascii_reason_code(ch->reason));
++ kdb_printf("\treason_line=%d\n", ch->reason_line);
++ kdb_printf("\tnumber=%d\n", ch->number);
++ kdb_printf("\tmsg_size=%d\n", ch->msg_size);
++ kdb_printf("\tlocal_nentries=%d\n", ch->local_nentries);
++ kdb_printf("\tremote_nentries=%d\n", ch->remote_nentries);
++ kdb_printf("\tlocal_msgqueue=0x%p\n", (void *) ch->local_msgqueue);
++ kdb_printf("\tremote_msgqueue_pa=0x%lx\n", ch->remote_msgqueue_pa);
++ kdb_printf("\tremote_msgqueue=0x%p\n",
++ (void *) ch->remote_msgqueue);
++ kdb_printf("\treferences=%d\n", atomic_read(&ch->references));
++ kdb_printf("\tn_on_msg_allocate_wq=%d\n",
++ atomic_read(&ch->n_on_msg_allocate_wq));
++ kdb_printf("\t&msg_allocate_wq=0x%p\n",
++ (void *) &ch->msg_allocate_wq);
++
++ kdb_printf("\tdelayed_IPI_flags=0x%x", ch->delayed_IPI_flags);
++ kdbm_xpc_print_IPI_flags_for_channel(ch->delayed_IPI_flags);
++ kdb_printf("\n");
++
++ kdb_printf("\tn_to_notify=%d\n", atomic_read(&ch->n_to_notify));
++ kdb_printf("\tnotify_queue=0x%p\n", (void *) ch->notify_queue);
++ kdb_printf("\tfunc=0x%p\n", (void *) ch->func);
++ kdb_printf("\tkey=0x%p\n", ch->key);
++ kdb_printf("\t&msg_to_pull_mutex=0x%p\n",
++ (void *) &ch->msg_to_pull_mutex);
++ kdb_printf("\t&wdisconnect_wait=0x%p\n",
++ (void *) &ch->wdisconnect_wait);
++ kdb_printf("\tlocal_GP=0x%p (%ld:%ld)\n", (void *) ch->local_GP,
++ ch->local_GP->get,
++ ch->local_GP->put);
++ kdb_printf("\tremote_GP=%ld:%ld\n", ch->remote_GP.get,
++ ch->remote_GP.put);
++ kdb_printf("\tw_local_GP=%ld:%ld\n", ch->w_local_GP.get,
++ ch->w_local_GP.put);
++ kdb_printf("\tw_remote_GP=%ld:%ld\n", ch->w_remote_GP.get,
++ ch->w_remote_GP.put);
++ kdb_printf("\tnext_msg_to_pull=%ld\n", ch->next_msg_to_pull);
++ kdb_printf("\tkthreads_assigned=%d\n",
++ atomic_read(&ch->kthreads_assigned));
++ kdb_printf("\tkthreads_assigned_limit=%d\n",
++ ch->kthreads_assigned_limit);
++ kdb_printf("\tkthreads_idle=%d\n",
++ atomic_read(&ch->kthreads_idle));
++ kdb_printf("\tkthreads_idle_limit=%d\n", ch->kthreads_idle_limit);
++ kdb_printf("\tkthreads_active=%d\n",
++ atomic_read(&ch->kthreads_active));
++ kdb_printf("\tkthreads_created=%d\n", ch->kthreads_created);
++ kdb_printf("\t&idle_wq=0x%p\n", (void *) &ch->idle_wq);
++
++ if (ch->flags & XPC_C_CONNECTED) {
++ kdb_printf("\n\t#of local msg queue entries available =%ld\n",
++ ch->local_nentries - (ch->w_local_GP.put -
++ ch->w_remote_GP.get));
++
++ kdb_printf("\t#of local msgs allocated !sent =%ld\n",
++ ch->w_local_GP.put - ch->local_GP->put);
++ kdb_printf("\t#of local msgs allocated sent !ACK'd =%ld\n",
++ ch->local_GP->put - ch->remote_GP.get);
++ kdb_printf("\t#of local msgs allocated sent ACK'd !notified ="
++ "%ld\n", ch->remote_GP.get - ch->w_remote_GP.get);
++
++ kdb_printf("\t#of remote msgs sent !pulled =%ld\n",
++ ch->w_remote_GP.put - ch->next_msg_to_pull);
++ kdb_printf("\t#of remote msgs sent !delivered =%ld\n",
++ ch->next_msg_to_pull - ch->w_local_GP.get);
++ kdb_printf("\t#of remote msgs sent delivered !received =%ld\n",
++ ch->w_local_GP.get - ch->local_GP->get);
++ }
++}
++
++
++/*
++ * Display a XPC partition's channels.
++ *
++ * xpcchan <vaddr> | <partid> [ <channel> ]
++ */
++static int
++kdbm_xpc_channels(int argc, const char **argv)
++{
++ int ret;
++ int nextarg = 1;
++ long offset = 0;
++ unsigned long addr;
++ partid_t partid;
++ struct xpc_partition *part;
++ int ch_number;
++ struct xpc_channel *ch;
++
++
++ if (argc < 1 || argc > 2) {
++ return KDB_ARGCOUNT;
++ }
++
++ ret = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (ret) {
++ return ret;
++ }
++ if (addr > 0 && addr < XP_MAX_PARTITIONS) {
++ partid = (partid_t) addr;
++ part = &xpc_partitions[partid];
++ if (part->setup_state == XPC_P_UNSET) {
++ kdb_printf("partition is UNSET\n");
++ return 0;
++ }
++ if (part->setup_state == XPC_P_TORNDOWN) {
++ kdb_printf("partition is TORNDOWN\n");
++ return 0;
++ }
++
++ if (argc == 2) {
++ ret = kdbgetularg(argv[2],
++ (unsigned long *) &ch_number);
++ if (ret) {
++ return ret;
++ }
++ if (ch_number < 0 || ch_number >= part->nchannels) {
++ kdb_printf("invalid channel #\n");
++ return KDB_BADINT;
++ }
++ kdbm_xpc_print_channel(&part->channels[ch_number]);
++ } else {
++ for (ch_number = 0; ch_number < part->nchannels;
++ ch_number++) {
++ kdbm_xpc_print_channel(&part->
++ channels[ch_number]);
++ }
++ }
++
++ } else {
++ ch = (struct xpc_channel *) addr;
++
++ for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
++ part = &xpc_partitions[partid];
++ if (part->setup_state != XPC_P_UNSET &&
++ part->setup_state != XPC_P_TORNDOWN &&
++ ch >= part->channels) {
++ ch_number = ch - part->channels;
++ if (ch_number < part->nchannels &&
++ ch == &part->channels[ch_number]) {
++ break;
++ }
++ }
++ }
++ if (partid == XP_MAX_PARTITIONS) {
++ kdb_printf("invalid channel address\n");
++ return KDB_BADADDR;
++ }
++ kdbm_xpc_print_channel(ch);
++ }
++
++ return 0;
++}
++
++
++static void
++kdbm_xpc_print_local_msgqueue(struct xpc_channel *ch)
++{
++ int i;
++ char *prefix;
++ struct xpc_msg *msg = ch->local_msgqueue;
++ s64 w_remote_GP_get = ch->w_remote_GP.get % ch->local_nentries;
++ s64 remote_GP_get = ch->remote_GP.get % ch->local_nentries;
++ s64 local_GP_put = ch->local_GP->put % ch->local_nentries;
++ s64 w_local_GP_put = ch->w_local_GP.put % ch->local_nentries;
++
++
++ kdb_printf("local message queue (0x%p):\n\n", (void *) msg);
++
++ for (i = 0; i < ch->local_nentries; i++) {
++ kdb_printf("0x%p: flags=0x%x number=%ld", (void *) msg,
++ msg->flags, msg->number);
++
++ prefix = " <--";
++
++ if (i == w_remote_GP_get) {
++ kdb_printf("%s w_remote_GP.get", prefix);
++ prefix = ",";
++ }
++ if (i == remote_GP_get) {
++ kdb_printf("%s remote_GP.get", prefix);
++ prefix = ",";
++ }
++ if (i == local_GP_put) {
++ kdb_printf("%s local_GP->put", prefix);
++ prefix = ",";
++ }
++ if (i == w_local_GP_put) {
++ kdb_printf("%s w_local_GP.put", prefix);
++ }
++ kdb_printf("\n");
++
++ msg = (struct xpc_msg *) ((u64) msg + ch->msg_size);
++ }
++}
++
++
++static void
++kdbm_xpc_print_remote_msgqueue(struct xpc_channel *ch)
++{
++ int i;
++ char *prefix;
++ struct xpc_msg *msg = ch->remote_msgqueue;
++ s64 local_GP_get = ch->local_GP->get % ch->remote_nentries;
++ s64 w_local_GP_get = ch->w_local_GP.get % ch->remote_nentries;
++ s64 next_msg_to_pull = ch->next_msg_to_pull % ch->remote_nentries;
++ s64 w_remote_GP_put = ch->w_remote_GP.put % ch->remote_nentries;
++ s64 remote_GP_put = ch->remote_GP.put % ch->remote_nentries;
++
++
++ kdb_printf("cached remote message queue (0x%p):\n\n", (void *) msg);
++
++ for (i = 0; i < ch->remote_nentries; i++) {
++ kdb_printf("0x%p: flags=0x%x number=%ld", (void *) msg,
++ msg->flags, msg->number);
++
++ prefix = " <--";
++
++ if (i == local_GP_get) {
++ kdb_printf("%s local_GP->get", prefix);
++ prefix = ",";
++ }
++ if (i == w_local_GP_get) {
++ kdb_printf("%s w_local_GP.get", prefix);
++ prefix = ",";
++ }
++ if (i == next_msg_to_pull) {
++ kdb_printf("%s next_msg_to_pull", prefix);
++ prefix = ",";
++ }
++ if (i == w_remote_GP_put) {
++ kdb_printf("%s w_remote_GP.put", prefix);
++ prefix = ",";
++ }
++ if (i == remote_GP_put) {
++ kdb_printf("%s remote_GP.put", prefix);
++ }
++ kdb_printf("\n");
++
++ msg = (struct xpc_msg *) ((u64) msg + ch->msg_size);
++ }
++}
++
++
++/*
++ * Display XPC specified message queue.
++ *
++ * xpcmque <partid> <channel> local|remote
++ */
++static int
++kdbm_xpc_msgqueue(int argc, const char **argv)
++{
++ int ret, ch_number;
++ unsigned long ulong_partid;
++ partid_t partid;
++ struct xpc_partition *part;
++ struct xpc_channel *ch;
++
++
++ if (argc != 3) {
++ return KDB_ARGCOUNT;
++ }
++
++ ret = kdbgetularg(argv[1], (unsigned long *) &ulong_partid);
++ if (ret) {
++ return ret;
++ }
++ partid = (partid_t) ulong_partid;
++ if (partid <= 0 || partid >= XP_MAX_PARTITIONS) {
++ kdb_printf("invalid partid\n");
++ return KDB_BADINT;
++ }
++
++ ret = kdbgetularg(argv[2], (unsigned long *) &ch_number);
++ if (ret) {
++ return ret;
++ }
++ if (ch_number < 0 || ch_number >= XPC_NCHANNELS) {
++ kdb_printf("invalid channel #\n");
++ return KDB_BADINT;
++ }
++
++ part = &xpc_partitions[partid];
++
++ if (part->setup_state == XPC_P_UNSET) {
++ kdb_printf("partition is UNSET\n");
++ return 0;
++ }
++ if (part->setup_state == XPC_P_TORNDOWN) {
++ kdb_printf("partition is TORNDOWN\n");
++ return 0;
++ }
++
++ if (ch_number >= part->nchannels) {
++ kdb_printf("unsupported channel #\n");
++ return KDB_BADINT;
++ }
++
++ ch = &part->channels[ch_number];
++
++ if (!(ch->flags & XPC_C_SETUP)) {
++ kdb_printf("message queues are not SETUP\n");
++ return 0;
++ }
++
++ if (strcmp(argv[3], "r") == 0 || strcmp(argv[3], "remote") == 0) {
++ kdbm_xpc_print_remote_msgqueue(ch);
++ } else if (strcmp(argv[3], "l") == 0 || strcmp(argv[3], "local") == 0) {
++ kdbm_xpc_print_local_msgqueue(ch);
++ } else {
++ kdb_printf("unknown msg queue selected\n");
++ return KDB_BADINT;
++ }
++
++ return 0;
++}
++
++
++static void
++kdbm_xpc_print_msg_flags(u8 flags)
++{
++ kdb_printf("\tflags=0x%x", flags);
++
++ if (flags & XPC_M_INTERRUPT) kdb_printf(" INTERRUPT");
++ if (flags & XPC_M_READY) kdb_printf(" READY");
++ if (flags & XPC_M_DONE) kdb_printf(" DONE");
++
++ kdb_printf("\n");
++}
++
++
++/*
++ * Display XPC message.
++ *
++ * xpcmsg <vaddr>
++ */
++static int
++kdbm_xpc_msg(int argc, const char **argv)
++{
++ int ret, nextarg = argc;
++ long offset = 0;
++ unsigned long addr;
++ struct xpc_msg *msg;
++
++
++ if (argc != 1) {
++ return KDB_ARGCOUNT;
++ }
++
++ ret = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
++ if (ret) {
++ return ret;
++ }
++
++ msg = (struct xpc_msg *) addr;
++ kdb_printf("msg (0x%p):\n", (void *) msg);
++ kdbm_xpc_print_msg_flags(msg->flags);
++ kdb_printf("\tnumber=%ld\n", msg->number);
++ kdb_printf("\t&payload=0x%p\n", (void *) &msg->payload);
++
++ return 0;
++}
++
++
++static void
++kdbm_xpc_print_notify_queue(struct xpc_channel *ch)
++{
++ int i;
++ char *prefix;
++ struct xpc_notify *notify = ch->notify_queue;
++ s64 w_remote_GP_get = ch->w_remote_GP.get % ch->local_nentries;
++ s64 remote_GP_get = ch->remote_GP.get % ch->local_nentries;
++ s64 local_GP_put = ch->local_GP->put % ch->local_nentries;
++ s64 w_local_GP_put = ch->w_local_GP.put % ch->local_nentries;
++
++
++ kdb_printf("notify queue (0x%p):\n\n", (void *) notify);
++
++ for (i = 0; i < ch->local_nentries; i++) {
++ kdb_printf("0x%p: type=0x%x", (void *) notify, notify->type);
++
++ if (notify->type == XPC_N_CALL) {
++ kdb_printf(" CALL func=0x%p key=0x%p",
++ (void *) notify->func, notify->key);
++ }
++
++ prefix = " <--";
++
++ if (i == w_remote_GP_get) {
++ kdb_printf("%s w_remote_GP.get", prefix);
++ prefix = ",";
++ }
++ if (i == remote_GP_get) {
++ kdb_printf("%s remote_GP.get", prefix);
++ prefix = ",";
++ }
++ if (i == local_GP_put) {
++ kdb_printf("%s local_GP->put", prefix);
++ prefix = ",";
++ }
++ if (i == w_local_GP_put) {
++ kdb_printf("%s w_local_GP.put", prefix);
++ }
++ kdb_printf("\n");
++
++ notify++;
++ }
++}
++
++
++/*
++ * Display XPC specified notify queue.
++ *
++ * xpcnque <partid> <channel>
++ */
++static int
++kdbm_xpc_notify_queue(int argc, const char **argv)
++{
++ int ret, ch_number;
++ unsigned long ulong_partid;
++ partid_t partid;
++ struct xpc_partition *part;
++ struct xpc_channel *ch;
++
++
++ if (argc != 2) {
++ return KDB_ARGCOUNT;
++ }
++
++ ret = kdbgetularg(argv[1], (unsigned long *) &ulong_partid);
++ if (ret) {
++ return ret;
++ }
++ partid = (partid_t) ulong_partid;
++ if (partid <= 0 || partid >= XP_MAX_PARTITIONS) {
++ kdb_printf("invalid partid\n");
++ return KDB_BADINT;
++ }
++
++ ret = kdbgetularg(argv[2], (unsigned long *) &ch_number);
++ if (ret) {
++ return ret;
++ }
++ if (ch_number < 0 || ch_number >= XPC_NCHANNELS) {
++ kdb_printf("invalid channel #\n");
++ return KDB_BADINT;
++ }
++
++ part = &xpc_partitions[partid];
++
++ if (part->setup_state == XPC_P_UNSET) {
++ kdb_printf("partition is UNSET\n");
++ return 0;
++ }
++ if (part->setup_state == XPC_P_TORNDOWN) {
++ kdb_printf("partition is TORNDOWN\n");
++ return 0;
++ }
++
++ if (ch_number >= part->nchannels) {
++ kdb_printf("unsupported channel #\n");
++ return KDB_BADINT;
++ }
++
++ ch = &part->channels[ch_number];
++
++ if (!(ch->flags & XPC_C_SETUP)) {
++ kdb_printf("notify queue is not SETUP\n");
++ return 0;
++ }
++
++ kdbm_xpc_print_notify_queue(ch);
++
++ return 0;
++}
++
++
++static void
++kdbm_xpc_print_users(struct xpc_registration *registration, int ch_number)
++{
++ kdb_printf("xpc_registrations[channel=%d] (0x%p):\n", ch_number,
++ (void *) registration);
++
++ kdb_printf("\t&mutex=0x%p\n", (void *) ®istration->mutex);
++ kdb_printf("\tfunc=0x%p\n", (void *) registration->func);
++ kdb_printf("\tkey=0x%p\n", registration->key);
++ kdb_printf("\tnentries=%d\n", registration->nentries);
++ kdb_printf("\tmsg_size=%d\n", registration->msg_size);
++ kdb_printf("\tassigned_limit=%d\n", registration->assigned_limit);
++ kdb_printf("\tidle_limit=%d\n", registration->idle_limit);
++}
++
++
++/*
++ * Display current XPC users who have registered via xpc_connect().
++ *
++ * xpcusers [ <channel> ]
++ */
++static int
++kdbm_xpc_users(int argc, const char **argv)
++{
++ int ret;
++ struct xpc_registration *registration;
++ int ch_number;
++
++
++ if (argc > 1) {
++ return KDB_ARGCOUNT;
++
++ } else if (argc == 1) {
++ ret = kdbgetularg(argv[1], (unsigned long *) &ch_number);
++ if (ret) {
++ return ret;
++ }
++ if (ch_number < 0 || ch_number >= XPC_NCHANNELS) {
++ kdb_printf("invalid channel #\n");
++ return KDB_BADINT;
++ }
++ registration = &xpc_registrations[ch_number];
++ kdbm_xpc_print_users(registration, ch_number);
++
++ } else {
++ for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) {
++ registration = &xpc_registrations[ch_number];
++
++ /* if !XPC_CHANNEL_REGISTERED(ch_number) */
++ if (registration->func == NULL) {
++ continue;
++ }
++ kdbm_xpc_print_users(registration, ch_number);
++ }
++ }
++ return 0;
++}
++
++
++static int __init
++kdbm_xpc_register(void)
++{
++ (void) kdb_register("xpcdown", kdbm_xpc_down, "",
++ "Mark this partition as being down", 0);
++ (void) kdb_register("xpcrp", kdbm_xpc_rsvd_page, "",
++ "Display XPC reserved page", 0);
++ (void) kdb_register("xpcvars", kdbm_xpc_variables, "[<partid>]",
++ "Display XPC variables", 0);
++ (void) kdb_register("xpcengaged", kdbm_xpc_engaged, "[-v]",
++ "Display XPC engaged partitions AMOs", 0);
++ (void) kdb_register("xpcpart", kdbm_xpc_partitions, "[<vaddr>|"
++ "<partid>]", "Display struct xpc_partition entries", 0);
++ (void) kdb_register("xpcchan", kdbm_xpc_channels, "<vaddr> | <partid> "
++ "[<channel>]", "Display struct xpc_channel entries", 0);
++ (void) kdb_register("xpcmque", kdbm_xpc_msgqueue, "<partid> <channel> "
++ "local|remote", "Display local or remote msg queue", 0);
++ (void) kdb_register("xpcmsg", kdbm_xpc_msg, "<vaddr>",
++ "Display struct xpc_msg", 0);
++ (void) kdb_register("xpcnque", kdbm_xpc_notify_queue, "<partid> "
++ "<channel>", "Display notify queue", 0);
++ (void) kdb_register("xpcusers", kdbm_xpc_users, "[ <channel> ]",
++ "Display struct xpc_registration entries", 0);
++ return 0;
++}
++
++
++static void __exit
++kdbm_xpc_unregister(void)
++{
++ (void) kdb_unregister("xpcdown");
++ (void) kdb_unregister("xpcrp");
++ (void) kdb_unregister("xpcvars");
++ (void) kdb_unregister("xpcengaged");
++ (void) kdb_unregister("xpcpart");
++ (void) kdb_unregister("xpcchan");
++ (void) kdb_unregister("xpcmque");
++ (void) kdb_unregister("xpcmsg");
++ (void) kdb_unregister("xpcnque");
++ (void) kdb_unregister("xpcusers");
++}
++
++
++module_init(kdbm_xpc_register);
++module_exit(kdbm_xpc_unregister);
++
+diff -Nurp linux-2.6.22-590/kdb/modules/Makefile linux-2.6.22-600/kdb/modules/Makefile
+--- linux-2.6.22-590/kdb/modules/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-2.6.22-600/kdb/modules/Makefile 2008-04-09 18:14:28.000000000 +0200
+@@ -0,0 +1,14 @@
++#
++# This file is subject to the terms and conditions of the GNU General Public
++# License. See the file "COPYING" in the main directory of this archive
++# for more details.
++#
++# Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
++#
++
++obj-$(CONFIG_KDB_MODULES) += kdbm_pg.o kdbm_task.o kdbm_vm.o kdbm_sched.o
++ifdef CONFIG_X86
++obj-$(CONFIG_KDB_MODULES) += kdbm_x86.o
++endif
++obj-$(CONFIG_KDB_MODULES_XP) += kdbm_xpc.o
++CFLAGS_kdbm_vm.o += -I $(srctree)/drivers/scsi
+diff -Nurp linux-2.6.22-590/kernel/exit.c linux-2.6.22-600/kernel/exit.c
+--- linux-2.6.22-590/kernel/exit.c 2008-04-09 18:10:53.000000000 +0200
++++ linux-2.6.22-600/kernel/exit.c 2008-04-09 18:14:28.000000000 +0200
+@@ -4,6 +4,9 @@
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#endif
+ #include <linux/mm.h>
+ #include <linux/slab.h>
+ #include <linux/interrupt.h>
+diff -Nurp linux-2.6.22-590/kernel/kallsyms.c linux-2.6.22-600/kernel/kallsyms.c
+--- linux-2.6.22-590/kernel/kallsyms.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/kernel/kallsyms.c 2008-04-09 18:14:28.000000000 +0200
+@@ -491,3 +491,25 @@ __initcall(kallsyms_init);
+
+ EXPORT_SYMBOL(__print_symbol);
+ EXPORT_SYMBOL_GPL(sprint_symbol);
++
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++
++const char *kdb_walk_kallsyms(loff_t *pos)
++{
++ static struct kallsym_iter kdb_walk_kallsyms_iter;
++ if (*pos == 0) {
++ memset(&kdb_walk_kallsyms_iter, 0, sizeof(kdb_walk_kallsyms_iter));
++ reset_iter(&kdb_walk_kallsyms_iter, 0);
++ }
++ while (1) {
++ if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
++ return NULL;
++ ++*pos;
++ /* Some debugging symbols have no name. Ignore them. */
++ if (kdb_walk_kallsyms_iter.name[0])
++ return kdb_walk_kallsyms_iter.name;
++ }
++}
++#endif /* CONFIG_KDB */
+diff -Nurp linux-2.6.22-590/kernel/module.c linux-2.6.22-600/kernel/module.c
+--- linux-2.6.22-590/kernel/module.c 2007-07-09 01:32:17.000000000 +0200
++++ linux-2.6.22-600/kernel/module.c 2008-04-09 18:14:28.000000000 +0200
+@@ -2176,12 +2176,23 @@ out:
+ return -ERANGE;
+ }
+
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
++#endif /* CONFIG_KDB */
++
+ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ char *name, char *module_name, int *exported)
+ {
+ struct module *mod;
++#ifdef CONFIG_KDB
++ int get_lock = !KDB_IS_RUNNING();
++#else
++#define get_lock 1
++#endif
+
+- mutex_lock(&module_mutex);
++ if (get_lock)
++ mutex_lock(&module_mutex);
+ list_for_each_entry(mod, &modules, list) {
+ if (symnum < mod->num_symtab) {
+ *value = mod->symtab[symnum].st_value;
+@@ -2190,12 +2201,14 @@ int module_get_kallsym(unsigned int symn
+ KSYM_NAME_LEN + 1);
+ strlcpy(module_name, mod->name, MODULE_NAME_LEN + 1);
+ *exported = is_exported(name, mod);
+- mutex_unlock(&module_mutex);
++ if (get_lock)
++ mutex_unlock(&module_mutex);
+ return 0;
+ }
+ symnum -= mod->num_symtab;
+ }
+- mutex_unlock(&module_mutex);
++ if (get_lock)
++ mutex_unlock(&module_mutex);
+ return -ERANGE;
+ }
+
+diff -Nurp linux-2.6.22-590/kernel/printk.c linux-2.6.22-600/kernel/printk.c
+--- linux-2.6.22-590/kernel/printk.c 2008-04-09 18:10:53.000000000 +0200
++++ linux-2.6.22-600/kernel/printk.c 2008-04-09 18:14:28.000000000 +0200
+@@ -313,6 +313,20 @@ asmlinkage long sys_syslog(int type, cha
+ return do_syslog(type, buf, len);
+ }
+
++#ifdef CONFIG_KDB
++/* kdb dmesg command needs access to the syslog buffer. do_syslog() uses locks
++ * so it cannot be used during debugging. Just tell kdb where the start and
++ * end of the physical and logical logs are. This is equivalent to do_syslog(3).
++ */
++void kdb_syslog_data(char *syslog_data[4])
++{
++ syslog_data[0] = log_buf;
++ syslog_data[1] = log_buf + log_buf_len;
++ syslog_data[2] = log_buf + log_end - (logged_chars < log_buf_len ? logged_chars : log_buf_len);
++ syslog_data[3] = log_buf + log_end;
++}
++#endif /* CONFIG_KDB */
++
+ /*
+ * Call the console drivers on a range of log_buf
+ */
+diff -Nurp linux-2.6.22-590/kernel/sched.c linux-2.6.22-600/kernel/sched.c
+--- linux-2.6.22-590/kernel/sched.c 2008-04-09 18:11:16.000000000 +0200
++++ linux-2.6.22-600/kernel/sched.c 2008-04-09 18:14:28.000000000 +0200
+@@ -7315,7 +7315,7 @@ void normalize_rt_tasks(void)
+
+ #endif /* CONFIG_MAGIC_SYSRQ */
+
+-#ifdef CONFIG_IA64
++#if defined(CONFIG_IA64) || defined(CONFIG_KDB)
+ /*
+ * These functions are only useful for the IA64 MCA handling.
+ *
+@@ -7364,3 +7364,80 @@ void (*rec_event)(void *,unsigned int);
+ EXPORT_SYMBOL(rec_event);
+ EXPORT_SYMBOL(in_sched_functions);
+ #endif
++
++#ifdef CONFIG_KDB
++
++#include <linux/kdb.h>
++
++static void
++kdb_prio(char *name, struct prio_array *array, kdb_printf_t xxx_printf)
++{
++ int pri;
++
++ xxx_printf(" %s nr_active:%d bitmap: 0x%lx 0x%lx 0x%lx\n",
++ name, array->nr_active,
++ array->bitmap[0], array->bitmap[1], array->bitmap[2]);
++
++ pri = sched_find_first_bit(array->bitmap);
++ if (pri != MAX_PRIO) {
++ xxx_printf(" bitmap priorities:");
++ while (pri != MAX_PRIO) {
++ xxx_printf(" %d", pri);
++ pri++;
++ pri = find_next_bit(array->bitmap, MAX_PRIO, pri);
++ }
++ xxx_printf("\n");
++ }
++
++ for (pri = 0; pri < MAX_PRIO; pri++) {
++ int printed_hdr = 0;
++ struct list_head *head, *curr;
++
++ head = array->queue + pri;
++ curr = head->next;
++ while(curr != head) {
++ struct task_struct *task;
++ if (!printed_hdr) {
++ xxx_printf(" queue at priority=%d\n", pri);
++ printed_hdr = 1;
++ }
++ task = list_entry(curr, struct task_struct, run_list);
++ xxx_printf(" 0x%p %d %s time_slice:%d\n",
++ task, task->pid, task->comm,
++ task->time_slice);
++ curr = curr->next;
++ }
++ }
++}
++
++/* This code must be in sched.c because struct rq is only defined in this
++ * source. To allow most of kdb to be modular, this code cannot call any kdb
++ * functions directly, any external functions that it needs must be passed in
++ * as parameters.
++ */
++
++void
++kdb_runqueue(unsigned long cpu, kdb_printf_t xxx_printf)
++{
++ struct rq *rq;
++
++ rq = cpu_rq(cpu);
++
++ xxx_printf("CPU%ld lock:%s curr:0x%p(%d)(%s)",
++ cpu, (spin_is_locked(&rq->lock))?"LOCKED":"free",
++ rq->curr, rq->curr->pid, rq->curr->comm);
++ if (rq->curr == rq->idle)
++ xxx_printf(" is idle");
++ xxx_printf("\n ");
++#ifdef CONFIG_SMP
++ xxx_printf(" cpu_load:%lu %lu %lu",
++ rq->cpu_load[0], rq->cpu_load[1], rq->cpu_load[2]);
++#endif
++ xxx_printf(" nr_running:%lu nr_switches:%llu\n",
++ rq->nr_running, rq->nr_switches);
++ kdb_prio("active", rq->active, xxx_printf);
++ kdb_prio("expired", rq->expired, xxx_printf);
++}
++EXPORT_SYMBOL(kdb_runqueue);
++
++#endif /* CONFIG_KDB */
+diff -Nurp linux-2.6.22-590/kernel/signal.c linux-2.6.22-600/kernel/signal.c
+--- linux-2.6.22-590/kernel/signal.c 2008-04-09 18:10:53.000000000 +0200
++++ linux-2.6.22-600/kernel/signal.c 2008-04-09 18:14:28.000000000 +0200
+@@ -2586,3 +2586,52 @@ void __init signals_init(void)
+ {
+ sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
+ }
++
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++/*
++ * kdb_send_sig_info
++ *
++ * Allows kdb to send signals without exposing signal internals.
++ *
++ * Inputs:
++ * t task
++ * siginfo signal information
++ * seqno current kdb sequence number (avoid including kdbprivate.h)
++ * Outputs:
++ * None.
++ * Returns:
++ * None.
++ * Locking:
++ * Checks if the required locks are available before calling the main
++ * signal code, to avoid kdb deadlocks.
++ * Remarks:
++ */
++void
++kdb_send_sig_info(struct task_struct *t, struct siginfo *info, int seqno)
++{
++ static struct task_struct *kdb_prev_t;
++ static int kdb_prev_seqno;
++ int sig, new_t;
++ if (!spin_trylock(&t->sighand->siglock)) {
++ kdb_printf("Can't do kill command now.\n"
++ "The sigmask lock is held somewhere else in kernel, try again later\n");
++ return;
++ }
++ spin_unlock(&t->sighand->siglock);
++ new_t = kdb_prev_t != t || kdb_prev_seqno != seqno;
++ kdb_prev_t = t;
++ kdb_prev_seqno = seqno;
++ if (t->state != TASK_RUNNING && new_t) {
++ kdb_printf("Process is not RUNNING, sending a signal from kdb risks deadlock\n"
++ "on the run queue locks. The signal has _not_ been sent.\n"
++ "Reissue the kill command if you want to risk the deadlock.\n");
++ return;
++ }
++ sig = info->si_signo;
++ if (send_sig_info(sig, info, t))
++ kdb_printf("Fail to deliver Signal %d to process %d.\n", sig, t->pid);
++ else
++ kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
++}
++#endif /* CONFIG_KDB */
+diff -Nurp linux-2.6.22-590/Makefile linux-2.6.22-600/Makefile
+--- linux-2.6.22-590/Makefile 2008-04-09 18:11:17.000000000 +0200
++++ linux-2.6.22-600/Makefile 2008-04-09 18:14:28.000000000 +0200
+@@ -560,6 +560,7 @@ export mod_strip_cmd
+
+ ifeq ($(KBUILD_EXTMOD),)
+ core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
++core-$(CONFIG_KDB) += kdb/
+
+ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+diff -Nurp linux-2.6.22-590/mm/hugetlb.c linux-2.6.22-600/mm/hugetlb.c
+--- linux-2.6.22-590/mm/hugetlb.c 2008-04-09 18:10:53.000000000 +0200
++++ linux-2.6.22-600/mm/hugetlb.c 2008-04-09 18:14:28.000000000 +0200
+@@ -266,6 +266,25 @@ int hugetlb_sysctl_handler(struct ctl_ta
+ }
+ #endif /* CONFIG_SYSCTL */
+
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++/* Like hugetlb_report_meminfo() but using kdb_printf() */
++void
++kdb_hugetlb_report_meminfo(void)
++{
++ kdb_printf(
++ "HugePages_Total: %5lu\n"
++ "HugePages_Free: %5lu\n"
++ "HugePages_Rsvd: %5lu\n"
++ "Hugepagesize: %5lu kB\n",
++ nr_huge_pages,
++ free_huge_pages,
++ resv_huge_pages,
++ HPAGE_SIZE/1024);
++}
++#endif /* CONFIG_KDB */
++
+ int hugetlb_report_meminfo(char *buf)
+ {
+ return sprintf(buf,
+diff -Nurp linux-2.6.22-590/mm/swapfile.c linux-2.6.22-600/mm/swapfile.c
+--- linux-2.6.22-590/mm/swapfile.c 2008-04-09 18:10:53.000000000 +0200
++++ linux-2.6.22-600/mm/swapfile.c 2008-04-09 18:14:28.000000000 +0200
+@@ -13,6 +13,10 @@
+ #include <linux/swap.h>
+ #include <linux/vmalloc.h>
+ #include <linux/pagemap.h>
++#ifdef CONFIG_KDB
++#include <linux/kdb.h>
++#include <linux/kdbprivate.h>
++#endif /* CONFIG_KDB */
+ #include <linux/namei.h>
+ #include <linux/shm.h>
+ #include <linux/blkdev.h>
+@@ -1718,6 +1722,24 @@ void si_swapinfo(struct sysinfo *val)
+ vx_vsi_swapinfo(val);
+ }
+
++#ifdef CONFIG_KDB
++/* Like si_swapinfo() but without the locks */
++void kdb_si_swapinfo(struct sysinfo *val)
++{
++ unsigned int i;
++ unsigned long nr_to_be_unused = 0;
++
++ for (i = 0; i < nr_swapfiles; i++) {
++ if (!(swap_info[i].flags & SWP_USED) ||
++ (swap_info[i].flags & SWP_WRITEOK))
++ continue;
++ nr_to_be_unused += swap_info[i].inuse_pages;
++ }
++ val->freeswap = nr_swap_pages + nr_to_be_unused;
++ val->totalswap = total_swap_pages + nr_to_be_unused;
++}
++#endif /* CONFIG_KDB */
++
+ /*
+ * Verify that a swap entry is valid and increment its swap map count.
+ *