This commit was manufactured by cvs2svn to create branch 'vserver'.
authorPlanet-Lab Support <support@planet-lab.org>
Mon, 12 Jul 2004 21:57:25 +0000 (21:57 +0000)
committerPlanet-Lab Support <support@planet-lab.org>
Mon, 12 Jul 2004 21:57:25 +0000 (21:57 +0000)
116 files changed:
Documentation/arm/IXP4xx [new file with mode: 0644]
Documentation/cpu-freq/amd-powernow.txt [new file with mode: 0644]
Documentation/sound/alsa/Audigy-mixer.txt [new file with mode: 0644]
arch/arm/configs/ixp4xx_defconfig [new file with mode: 0644]
arch/arm/configs/mainstone_defconfig [new file with mode: 0644]
arch/arm/configs/smdk2410_defconfig [new file with mode: 0644]
arch/arm/mach-ixp4xx/Makefile [new file with mode: 0644]
arch/arm/mach-ixp4xx/common-pci.c [new file with mode: 0644]
arch/arm/mach-ixp4xx/common.c [new file with mode: 0644]
arch/arm/mach-ixp4xx/coyote-pci.c [new file with mode: 0644]
arch/arm/mach-ixp4xx/ixdp425-pci.c [new file with mode: 0644]
arch/arm/mach-ixp4xx/prpmc1100-pci.c [new file with mode: 0644]
arch/arm/mach-ixp4xx/prpmc1100-setup.c [new file with mode: 0644]
arch/arm/mach-s3c2410/mach-smdk2410.c [new file with mode: 0644]
arch/cris/arch-v10/drivers/ide.c [new file with mode: 0644]
arch/cris/kernel/crisksyms.c [new file with mode: 0644]
arch/i386/mach-generic/es7000.c [new file with mode: 0644]
arch/ia64/configs/sim_defconfig [new file with mode: 0644]
arch/ia64/dig/topology.c [new file with mode: 0644]
arch/ia64/lib/bitop.c [new file with mode: 0644]
arch/mips/au1000/common/cputable.c [new file with mode: 0644]
arch/parisc/kernel/unwind.c [new file with mode: 0644]
arch/ppc/kernel/dma-mapping.c [new file with mode: 0644]
arch/ppc/kernel/vecemu.c [new file with mode: 0644]
arch/ppc/kernel/vector.S [new file with mode: 0644]
arch/ppc/platforms/4xx/bubinga.c [new file with mode: 0644]
arch/ppc/platforms/4xx/bubinga.h [new file with mode: 0644]
arch/ppc/platforms/4xx/ibm405ep.c [new file with mode: 0644]
arch/ppc/platforms/4xx/ibm405ep.h [new file with mode: 0644]
arch/ppc/platforms/sbc82xx.c [new file with mode: 0644]
arch/ppc/platforms/sbc82xx.h [new file with mode: 0644]
arch/ppc/syslib/dcr.S [new file with mode: 0644]
arch/ppc/syslib/ibm440gx_common.c [new file with mode: 0644]
arch/ppc/syslib/ibm440gx_common.h [new file with mode: 0644]
arch/ppc/syslib/ibm44x_common.h [new file with mode: 0644]
arch/ppc/syslib/ocp.c [new file with mode: 0644]
arch/ppc64/lib/locks.c [new file with mode: 0644]
arch/s390/lib/string.c [new file with mode: 0644]
arch/sparc64/lib/find_bit.c [new file with mode: 0644]
arch/sparc64/lib/splock.S [new file with mode: 0644]
arch/x86_64/kernel/domain.c [new file with mode: 0644]
drivers/char/drm/drm_irq.h [new file with mode: 0644]
drivers/char/drm/drm_pciids.h [new file with mode: 0644]
drivers/char/watchdog/ixp4xx_wdt.c [new file with mode: 0644]
drivers/i2c/busses/i2c-ixp4xx.c [new file with mode: 0644]
drivers/i2c/chips/max1619.c [new file with mode: 0644]
drivers/i2c/chips/rtc8564.c [new file with mode: 0644]
drivers/i2c/chips/rtc8564.h [new file with mode: 0644]
drivers/ide/h8300/ide-h8300.c [new file with mode: 0644]
drivers/mtd/maps/ixp4xx.c [new file with mode: 0644]
drivers/mtd/maps/wr_sbc82xx_flash.c [new file with mode: 0644]
drivers/net/ibm_emac/ibm_emac.h [new file with mode: 0644]
drivers/net/ibm_emac/ibm_emac_core.h [new file with mode: 0644]
drivers/net/ibm_emac/ibm_emac_debug.c [new file with mode: 0644]
drivers/net/ibm_emac/ibm_emac_mal.c [new file with mode: 0644]
drivers/net/ibm_emac/ibm_emac_mal.h [new file with mode: 0644]
drivers/net/ibm_emac/ibm_emac_phy.c [new file with mode: 0644]
drivers/net/ibm_emac/ibm_emac_rgmii.h [new file with mode: 0644]
drivers/net/ibm_emac/ibm_emac_tah.h [new file with mode: 0644]
drivers/net/ibm_emac/ibm_emac_zmii.h [new file with mode: 0644]
drivers/net/ne-h8300.c [new file with mode: 0644]
drivers/pcmcia/pxa2xx_base.h [new file with mode: 0644]
drivers/scsi/ipr.c [new file with mode: 0644]
drivers/scsi/ipr.h [new file with mode: 0644]
drivers/scsi/pcmcia/sym53c500_cs.c [new file with mode: 0644]
drivers/scsi/qlogicfas408.h [new file with mode: 0644]
drivers/scsi/sata_promise.h [new file with mode: 0644]
drivers/usb/core/sysfs.c [new file with mode: 0644]
drivers/usb/input/touchkitusb.c [new file with mode: 0644]
drivers/usb/misc/phidgetservo.c [new file with mode: 0644]
drivers/video/asiliantfb.c [new file with mode: 0644]
drivers/video/gbefb.c [new file with mode: 0644]
drivers/video/pxafb.h [new file with mode: 0644]
fs/reiserfs/xattr.c [new file with mode: 0644]
fs/reiserfs/xattr_acl.c [new file with mode: 0644]
fs/xfs/linux-2.6/kmem.h [new file with mode: 0644]
fs/xfs/linux-2.6/mrlock.h [new file with mode: 0644]
fs/xfs/linux-2.6/sema.h [new file with mode: 0644]
fs/xfs/linux-2.6/sv.h [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_aops.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_buf.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_buf.h [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_file.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_fs_subr.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_globals.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_ioctl.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_iops.h [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_linux.h [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_lrw.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_lrw.h [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_stats.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_super.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_super.h [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_sysctl.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_sysctl.h [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_vfs.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_vfs.h [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_vnode.c [new file with mode: 0644]
fs/xfs/linux-2.6/xfs_vnode.h [new file with mode: 0644]
include/asm-arm/arch-ixp4xx/dma.h [new file with mode: 0644]
include/asm-arm/arch-ixp4xx/io.h [new file with mode: 0644]
include/asm-arm/arch-ixp4xx/irq.h [new file with mode: 0644]
include/asm-arm/arch-ixp4xx/memory.h [new file with mode: 0644]
include/asm-arm/arch-ixp4xx/param.h [new file with mode: 0644]
include/asm-arm/arch-ixp4xx/platform.h [new file with mode: 0644]
include/asm-arm/arch-ixp4xx/serial.h [new file with mode: 0644]
include/asm-arm/arch-ixp4xx/system.h [new file with mode: 0644]
include/asm-arm/arch-ixp4xx/time.h [new file with mode: 0644]
include/asm-arm/arch-ixp4xx/uncompress.h [new file with mode: 0644]
include/asm-arm/arch-pxa/pxafb.h [new file with mode: 0644]
include/linux/mempolicy.h [new file with mode: 0644]
include/linux/reiserfs_acl.h [new file with mode: 0644]
include/linux/reiserfs_xattr.h [new file with mode: 0644]
net/bridge/br_sysfs_br.c [new file with mode: 0644]
scripts/checkstack.pl [new file with mode: 0644]
scripts/reference_init.pl [new file with mode: 0644]

diff --git a/Documentation/arm/IXP4xx b/Documentation/arm/IXP4xx
new file mode 100644 (file)
index 0000000..d86d818
--- /dev/null
@@ -0,0 +1,155 @@
+
+-------------------------------------------------------------------------
+Release Notes for Linux on Intel's IXP4xx Network Processor
+
+Maintained by Deepak Saxena <dsaxena@plexity.net>
+-------------------------------------------------------------------------
+
+1. Overview
+
+Intel's IXP4xx network processor is a highly integrated SOC that
+is targeted for network applications, though it has become popular 
+in industrial control and other areas due to low cost and power
+consumption. The IXP4xx family currently consists of several processors
+that support different network offload functions such as encryption,
+routing, firewalling, etc. For more information on the various
+versions of the CPU, see:
+
+   http://developer.intel.com/design/network/products/npfamily/ixp4xx.htm
+
+Intel also made the IXCP1100 CPU for sometime which is an IXP4xx 
+stripped of much of the network intelligence.
+
+2. Linux Support
+
+Linux currently supports the following features on the IXP4xx chips:
+
+- Dual serial ports
+- PCI interface
+- Flash access (MTD/JFFS)
+- I2C through GPIO
+- GPIO for input/output/interrupts 
+  See include/asm-arm/arch-ixp4xx/platform.h for access functions.
+- Timers (watchdog, OS)
+
+The following components of the chips are not supported by Linux and
+require the use of Intel's propietary CSR softare:
+
+- USB device interface
+- Network interfaces (HSS, Utopia, NPEs, etc)
+- Network offload functionality
+
+If you need to use any of the above, you need to download Intel's
+software from:
+
+   http://developer.intel.com/design/network/products/npfamily/ixp425swr1.htm
+
+DO NOT POST QUESTIONS TO THE LINUX MAILING LISTS REGARDING THE PROPIETARY
+SOFTWARE.
+
+There are several websites that provide directions/pointers on using
+Intel's software:
+
+http://ixp4xx-osdg.sourceforge.net/ 
+   Open Source Developer's Guide for using uClinux and the Intel libraries 
+
+http://gatewaymaker.sourceforge.net/ 
+   Simple one page summary of building a gateway using an IXP425 and Linux
+
+http://ixp425.sourceforge.net/
+   ATM device driver for IXP425 that relies on Intel's libraries
+
+3. Known Issues/Limitations
+
+3a. Limited inbound PCI window
+
+The IXP4xx family allows for up to 256MB of memory but the PCI interface
+can only expose 64MB of that memory to the PCI bus. This means that if
+you are running with > 64MB, all PCI buffers outside of the accessible
+range will be bounced using the routines in arch/arm/common/dmabounce.c.
+   
+3b. Limited outbound PCI window
+
+IXP4xx provides two methods of accessing PCI memory space:
+
+1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB).
+   To access PCI via this space, we simply ioremap() the BAR
+   into the kernel and we can use the standard read[bwl]/write[bwl]
+   macros. This is the preffered method due to speed but it
+   limits the system to just 64MB of PCI memory. This can be 
+   problamatic if using video cards and other memory-heavy devices.
+          
+2) If > 64MB of memory space is required, the IXP4xx can be 
+   configured to use indirect registers to access PCI This allows 
+   for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus. 
+   The disadvantadge of this is that every PCI access requires 
+   three local register accesses plus a spinlock, but in some 
+   cases the performance hit is acceptable. In addition, you cannot 
+   mmap() PCI devices in this case due to the indirect nature
+   of the PCI window.
+
+By default, the direct method is used for performance reasons. If
+you need more PCI memory, enable the IXP4XX_INDIRECT_PCI config option.
+
+3c. GPIO as Interrupts
+
+Currently the code only handles level-sensitive GPIO interrupts 
+
+4. Supported platforms
+
+ADI Engineering Coyote Gateway Reference Platform
+http://www.adiengineering.com/productsCoyote.html
+
+   The ADI Coyote platform is reference design for those building 
+   small residential/office gateways. One NPE is connected to a 10/100
+   interface, one to 4-port 10/100 switch, and the third to and ADSL
+   interface. In addition, it also supports to POTs interfaces connected
+   via SLICs. Note that those are not supported by Linux ATM. Finally,
+   the platform has two mini-PCI slots used for 802.11[bga] cards.
+   Finally, there is an IDE port hanging off the expansion bus.
+
+Gateworks Avila Network Platform
+http://www.gateworks.com/avila_sbc.htm
+
+   The Avila platform is basically and IXDP425 with the 4 PCI slots
+   replaced with mini-PCI slots and a CF IDE interface hanging off
+   the expansion bus.
+
+Intel IXDP425 Development Platform
+http://developer.intel.com/design/network/products/npfamily/ixdp425.htm
+
+   This is Intel's standard reference platform for the IXDP425 and is 
+   also known as the Richfield board. It contains 4 PCI slots, 16MB
+   of flash, two 10/100 ports and one ADSL port.
+
+Motorola PrPMC1100 Processor Mezanine Card
+http://www.fountainsys.com/datasheet/PrPMC1100.pdf
+
+   The PrPMC1100 is based on the IXCP1100 and is meant to plug into
+   and IXP2400/2800 system to act as the system controller. It simply
+   contains a CPU and 16MB of flash on the board and needs to be
+   plugged into a carrier board to function. Currently Linux only
+   supports the Motorola PrPMC carrier board for this platform.
+   See https://mcg.motorola.com/us/ds/pdf/ds0144.pdf for info
+   on the carrier board.
+
+5. TODO LIST
+
+- Add support for Coyote IDE
+- Add support for edge-based GPIO interrupts
+- Add support for CF IDE on expansion bus
+
+6. Thanks
+
+The IXP4xx work has been funded by Intel Corp. and MontaVista Software, Inc.
+
+The following people have contributed patches/comments/etc:
+
+Lutz Jaenicke
+Justin Mayfield
+Robert E. Ranslam
+[I know I've forgotten others, please email me to be added] 
+
+-------------------------------------------------------------------------
+
+Last Update: 5/13/2004
diff --git a/Documentation/cpu-freq/amd-powernow.txt b/Documentation/cpu-freq/amd-powernow.txt
new file mode 100644 (file)
index 0000000..254da15
--- /dev/null
@@ -0,0 +1,38 @@
+
+PowerNow! and Cool'n'Quiet are AMD names for frequency
+management capabilities in AMD processors. As the hardware
+implementation changes in new generations of the processors,
+there is a different cpu-freq driver for each generation.
+
+Note that the driver's will not load on the "wrong" hardware,
+so it is safe to try each driver in turn when in doubt as to
+which is the correct driver.
+
+Note that the functionality to change frequency (and voltage)
+is not available in all processors. The drivers will refuse
+to load on processors without this capability. The capability
+is detected with the cpuid instruction.
+
+The drivers use BIOS supplied tables to obtain frequency and
+voltage information appropriate for a particular platform.
+Frequency transitions will be unavailable if the BIOS does
+not supply these tables.
+
+6th Generation: powernow-k6
+
+7th Generation: powernow-k7: Athlon, Duron, Geode.
+
+8th Generation: powernow-k8: Athlon, Athlon 64, Opteron, Sempron.
+Documentation on this functionality in 8th generation processors
+is available in the "BIOS and Kernel Developer's Guide", publication
+26094, in chapter 9, available for download from www.amd.com. 
+
+BIOS supplied data, for powernow-k7 and for powernow-k8, may be
+from either the PSB table or from ACPI objects. The ACPI support
+is only available if the kernel config sets CONFIG_ACPI_PROCESSOR.
+The powernow-k8 driver will attempt to use ACPI if so configured,
+and fall back to PST if that fails.
+The powernow-k7 driver will try to use the PSB support first, and
+fall back to ACPI if the PSB support fails. A module parameter,
+acpi_force, is provided to force ACPI support to be used instead 
+of PSB support.
diff --git a/Documentation/sound/alsa/Audigy-mixer.txt b/Documentation/sound/alsa/Audigy-mixer.txt
new file mode 100644 (file)
index 0000000..5132fd9
--- /dev/null
@@ -0,0 +1,345 @@
+
+               Sound Blaster Audigy mixer / default DSP code
+               ===========================================
+
+This is based on SB-Live-mixer.txt.
+
+The EMU10K2 chips have a DSP part which can be programmed to support 
+various ways of sample processing, which is described here.
+(This acticle does not deal with the overall functionality of the 
+EMU10K2 chips. See the manuals section for further details.)
+
+The ALSA driver programs this portion of chip by default code
+(can be altered later) which offers the following functionality:
+
+
+1) Digital mixer controls
+-------------------------
+
+These controls are built using the DSP instructions. They offer extended
+functionality. Only the default build-in code in the ALSA driver is described
+here. Note that the controls work as attenuators: the maximum value is the 
+neutral position leaving the signal unchanged. Note that if the  same destination 
+is mentioned in multiple controls, the signal is accumulated and can be wrapped 
+(set to maximal or minimal value without checking of overflow).
+
+
+Explanation of used abbreviations:
+
+DAC    - digital to analog converter
+ADC    - analog to digital converter
+I2S    - one-way three wire serial bus for digital sound by Philips Semiconductors
+         (this standard is used for connecting standalone DAC and ADC converters)
+LFE    - low frequency effects (subwoofer signal)
+AC97   - a chip containing an analog mixer, DAC and ADC converters
+IEC958 - S/PDIF
+FX-bus - the EMU10K2 chip has an effect bus containing 64 accumulators.
+         Each of the synthesizer voices can feed its output to these accumulators
+         and the DSP microcontroller can operate with the resulting sum.
+
+name='PCM Front Playback Volume',index=0
+
+This control is used to attenuate samples for left and right front PCM FX-bus
+accumulators. ALSA uses accumulators 8 and 9 for left and right front PCM 
+samples for 5.1 playback. The result samples are forwarded to the front DAC PCM 
+slots of the Philips DAC.
+
+name='PCM Surround Playback Volume',index=0
+
+This control is used to attenuate samples for left and right surround PCM FX-bus
+accumulators. ALSA uses accumulators 2 and 3 for left and right surround PCM 
+samples for 5.1 playback. The result samples are forwarded to the surround DAC PCM 
+slots of the Philips DAC.
+
+name='PCM Center Playback Volume',index=0
+
+This control is used to attenuate samples for center PCM FX-bus accumulator.
+ALSA uses accumulator 6 for center PCM sample for 5.1 playback. The result sample
+is forwarded to the center DAC PCM slot of the Philips DAC.
+
+name='PCM LFE Playback Volume',index=0
+
+This control is used to attenuate sample for LFE PCM FX-bus accumulator. 
+ALSA uses accumulator 7 for LFE PCM sample for 5.1 playback. The result sample 
+is forwarded to the LFE DAC PCM slot of the Philips DAC.
+
+name='PCM Playback Volume',index=0
+
+This control is used to attenuate samples for left and right PCM FX-bus
+accumulators. ALSA uses accumulators 0 and 1 for left and right PCM samples for
+stereo playback. The result samples are forwarded to the front DAC PCM slots 
+of the Philips DAC.
+
+name='PCM Capture Volume',index=0
+
+This control is used to attenuate samples for left and right PCM FX-bus
+accumulator. ALSA uses accumulators 0 and 1 for left and right PCM.
+The result is forwarded to the ADC capture FIFO (thus to the standard capture
+PCM device).
+
+name='Music Playback Volume',index=0
+
+This control is used to attenuate samples for left and right MIDI FX-bus
+accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples.
+The result samples are forwarded to the front DAC PCM slots of the AC97 codec.
+
+name='Music Capture Volume',index=0
+
+These controls are used to attenuate samples for left and right MIDI FX-bus
+accumulator. ALSA uses accumulators 4 and 5 for left and right PCM.
+The result is forwarded to the ADC capture FIFO (thus to the standard capture
+PCM device).
+
+name='Mic Playback Volume',index=0
+
+This control is used to attenuate samples for left and right Mic input.
+For Mic input is used AC97 codec. The result samples are forwarded to 
+the front DAC PCM slots of the Philips DAC. Samples are forwarded to Mic
+capture FIFO (device 1 - 16bit/8KHz mono) too without volume control.
+
+name='Mic Capture Volume',index=0
+
+This control is used to attenuate samples for left and right Mic input.
+The result is forwarded to the ADC capture FIFO (thus to the standard capture
+PCM device).
+
+name='Audigy CD Playback Volume',index=0
+
+This control is used to attenuate samples from left and right IEC958 TTL
+digital inputs (usually used by a CDROM drive). The result samples are
+forwarded to the front DAC PCM slots of the Philips DAC.
+
+name='Audigy CD Capture Volume',index=0
+
+This control is used to attenuate samples from left and right IEC958 TTL
+digital inputs (usually used by a CDROM drive). The result samples are
+forwarded to the ADC capture FIFO (thus to the standard capture PCM device).
+
+name='IEC958 Optical Playback Volume',index=0
+
+This control is used to attenuate samples from left and right IEC958 optical
+digital input. The result samples are forwarded to the front DAC PCM slots
+of the Philips DAC.
+
+name='IEC958 Optical Capture Volume',index=0
+
+This control is used to attenuate samples from left and right IEC958 optical
+digital inputs. The result samples are forwarded to the ADC capture FIFO
+(thus to the standard capture PCM device).
+
+name='Line2 Playback Volume',index=0
+
+This control is used to attenuate samples from left and right I2S ADC
+inputs (on the AudigyDrive). The result samples are forwarded to the front
+DAC PCM slots of the Philips DAC.
+
+name='Line2 Capture Volume',index=1
+
+This control is used to attenuate samples from left and right I2S ADC
+inputs (on the AudigyDrive). The result samples are forwarded to the ADC
+capture FIFO (thus to the standard capture PCM device).
+
+name='Analog Mix Playback Volume',index=0
+
+This control is used to attenuate samples from left and right I2S ADC
+inputs from Philips ADC. The result samples are forwarded to the front
+DAC PCM slots of the Philips DAC. This contains mix from analog sources
+like CD, Line In, Aux, ....
+
+name='Analog Mix Capture Volume',index=1
+
+This control is used to attenuate samples from left and right I2S ADC
+inputs Philips ADC. The result samples are forwarded to the ADC
+capture FIFO (thus to the standard capture PCM device).
+
+name='Aux2 Playback Volume',index=0
+
+This control is used to attenuate samples from left and right I2S ADC
+inputs (on the AudigyDrive). The result samples are forwarded to the front
+DAC PCM slots of the Philips DAC.
+
+name='Aux2 Capture Volume',index=1
+
+This control is used to attenuate samples from left and right I2S ADC
+inputs (on the AudigyDrive). The result samples are forwarded to the ADC
+capture FIFO (thus to the standard capture PCM device).
+
+name='Front Playback Volume',index=0
+
+All stereo signals are mixed together and mirrored to surround, center and LFE.
+This control is used to attenuate samples for left and right front speakers of
+this mix.
+
+name='Surround Playback Volume',index=0
+
+All stereo signals are mixed together and mirrored to surround, center and LFE.
+This control is used to attenuate samples for left and right surround speakers of
+this mix.
+
+name='Center Playback Volume',index=0
+
+All stereo signals are mixed together and mirrored to surround, center and LFE.
+This control is used to attenuate sample for center speaker of this mix.
+
+name='LFE Playback Volume',index=0
+
+All stereo signals are mixed together and mirrored to surround, center and LFE.
+This control is used to attenuate sample for LFE speaker of this mix.
+
+name='Tone Control - Switch',index=0
+
+This control turns the tone control on or off. The samples for front, rear
+and center / LFE outputs are affected.
+
+name='Tone Control - Bass',index=0
+
+This control sets the bass intensity. There is no neutral value!!
+When the tone control code is activated, the samples are always modified.
+The closest value to pure signal is 20.
+
+name='Tone Control - Treble',index=0
+
+This control sets the treble intensity. There is no neutral value!!
+When the tone control code is activated, the samples are always modified.
+The closest value to pure signal is 20.
+
+name='Master Playback Volume',index=0
+
+This control is used to attenuate samples for front, surround, center and 
+LFE outputs.
+
+name='IEC958 Optical Raw Playback Switch',index=0
+
+If this switch is on, then the samples for the IEC958 (S/PDIF) digital
+output are taken only from the raw FX8010 PCM, otherwise standard front
+PCM samples are taken.
+
+
+2) PCM stream related controls
+------------------------------
+
+name='EMU10K1 PCM Volume',index 0-31
+
+Channel volume attenuation in range 0-0xffff. The maximum value (no
+attenuation) is default. The channel mapping for three values is
+as follows:
+
+       0 - mono, default 0xffff (no attenuation)
+       1 - left, default 0xffff (no attenuation)
+       2 - right, default 0xffff (no attenuation)
+
+name='EMU10K1 PCM Send Routing',index 0-31
+
+This control specifies the destination - FX-bus accumulators. There 24
+values with this mapping:
+
+        0 -  mono, A destination (FX-bus 0-63), default 0
+        1 -  mono, B destination (FX-bus 0-63), default 1
+        2 -  mono, C destination (FX-bus 0-63), default 2
+        3 -  mono, D destination (FX-bus 0-63), default 3
+        4 -  mono, E destination (FX-bus 0-63), default 0
+        5 -  mono, F destination (FX-bus 0-63), default 0
+        6 -  mono, G destination (FX-bus 0-63), default 0
+        7 -  mono, H destination (FX-bus 0-63), default 0
+        8 -  left, A destination (FX-bus 0-63), default 0
+        9 -  left, B destination (FX-bus 0-63), default 1
+       10 -  left, C destination (FX-bus 0-63), default 2
+       11 -  left, D destination (FX-bus 0-63), default 3
+       12 -  left, E destination (FX-bus 0-63), default 0
+       13 -  left, F destination (FX-bus 0-63), default 0
+       14 -  left, G destination (FX-bus 0-63), default 0
+       15 -  left, H destination (FX-bus 0-63), default 0
+       16 - right, A destination (FX-bus 0-63), default 0
+       17 - right, B destination (FX-bus 0-63), default 1
+       18 - right, C destination (FX-bus 0-63), default 2
+       19 - right, D destination (FX-bus 0-63), default 3
+       20 - right, E destination (FX-bus 0-63), default 0
+       21 - right, F destination (FX-bus 0-63), default 0
+       22 - right, G destination (FX-bus 0-63), default 0
+       23 - right, H destination (FX-bus 0-63), default 0
+
+Don't forget that it's illegal to assign a channel to the same FX-bus accumulator 
+more than once (it means 0=0 && 1=0 is an invalid combination).
+name='EMU10K1 PCM Send Volume',index 0-31
+
+It specifies the attenuation (amount) for given destination in range 0-255.
+The channel mapping is following:
+
+        0 -  mono, A destination attn, default 255 (no attenuation)
+        1 -  mono, B destination attn, default 255 (no attenuation)
+        2 -  mono, C destination attn, default 0 (mute)
+        3 -  mono, D destination attn, default 0 (mute)
+        4 -  mono, E destination attn, default 0 (mute)
+        5 -  mono, F destination attn, default 0 (mute)
+        6 -  mono, G destination attn, default 0 (mute)
+        7 -  mono, H destination attn, default 0 (mute)
+        8 -  left, A destination attn, default 255 (no attenuation)
+        9 -  left, B destination attn, default 0 (mute)
+       10 -  left, C destination attn, default 0 (mute)
+       11 -  left, D destination attn, default 0 (mute)
+       12 -  left, E destination attn, default 0 (mute)
+       13 -  left, F destination attn, default 0 (mute)
+       14 -  left, G destination attn, default 0 (mute)
+       15 -  left, H destination attn, default 0 (mute)
+       16 - right, A destination attn, default 0 (mute)
+       17 - right, B destination attn, default 255 (no attenuation)
+       18 - right, C destination attn, default 0 (mute)
+       19 - right, D destination attn, default 0 (mute)
+       20 - right, E destination attn, default 0 (mute)
+       21 - right, F destination attn, default 0 (mute)
+       22 - right, G destination attn, default 0 (mute)
+       23 - right, H destination attn, default 0 (mute)
+
+
+
+4) MANUALS/PATENTS:
+-------------------
+
+ftp://opensource.creative.com/pub/doc
+-------------------------------------
+
+        Files:
+        LM4545.pdf      AC97 Codec
+
+        m2049.pdf       The EMU10K1 Digital Audio Processor
+
+        hog63.ps        FX8010 - A DSP Chip Architecture for Audio Effects
+
+
+WIPO Patents
+------------
+        Patent numbers:
+        WO 9901813 (A1) Audio Effects Processor with multiple asynchronous (Jan. 14, 1999)
+                        streams
+
+        WO 9901814 (A1) Processor with Instruction Set for Audio Effects (Jan. 14, 1999)
+
+        WO 9901953 (A1) Audio Effects Processor having Decoupled Instruction
+                        Execution and Audio Data Sequencing (Jan. 14, 1999)
+
+
+US Patents (http://www.uspto.gov/)
+----------------------------------
+
+        US 5925841      Digital Sampling Instrument employing cache memory (Jul. 20, 1999)
+
+        US 5928342      Audio Effects Processor integrated on a single chip (Jul. 27, 1999)
+                        with a multiport memory onto which multiple asynchronous
+                        digital sound samples can be concurrently loaded
+
+        US 5930158      Processor with Instruction Set for Audio Effects (Jul. 27, 1999)
+
+        US 6032235      Memory initialization circuit (Tram) (Feb. 29, 2000)
+
+        US 6138207      Interpolation looping of audio samples in cache connected to    (Oct. 24, 2000)
+                        system bus with prioritization and modification of bus transfers
+                        in accordance with loop ends and minimum block sizes
+
+        US 6151670      Method for conserving memory storage using a (Nov. 21, 2000)
+                        pool of  short term memory registers
+
+        US 6195715      Interrupt control for multiple programs communicating with      (Feb. 27, 2001)
+                        a common interrupt by associating programs to GP registers,
+                        defining interrupt register, polling GP registers, and invoking
+                        callback routine associated with defined interrupt register
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
new file mode 100644 (file)
index 0000000..fd95f39
--- /dev/null
@@ -0,0 +1,1081 @@
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_EMBEDDED=y
+CONFIG_KALLSYMS=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+# CONFIG_MODULE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+CONFIG_MODVERSIONS=y
+CONFIG_KMOD=y
+
+#
+# System Type
+#
+# CONFIG_ARCH_ADIFCC is not set
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+CONFIG_ARCH_IXP4XX=y
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_VERSATILE_PB is not set
+
+#
+# CLPS711X/EP721X Implementations
+#
+
+#
+# Epxa10db
+#
+
+#
+# Footbridge Implementations
+#
+
+#
+# IOP3xx Implementation Options
+#
+# CONFIG_ARCH_IOP310 is not set
+# CONFIG_ARCH_IOP321 is not set
+
+#
+# IOP3xx Chipset Features
+#
+CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y
+
+#
+# Intel IXP4xx Implementation Options
+#
+
+#
+# IXP4xx Platforms
+#
+CONFIG_ARCH_IXDP425=y
+CONFIG_ARCH_IXCDP1100=y
+CONFIG_ARCH_PRPMC1100=y
+CONFIG_ARCH_ADI_COYOTE=y
+# CONFIG_ARCH_AVILA is not set
+CONFIG_ARCH_IXDP4XX=y
+
+#
+# IXP4xx Options
+#
+# CONFIG_IXP4XX_INDIRECT_PCI is not set
+
+#
+# Intel PXA250/210 Implementations
+#
+
+#
+# SA11x0 Implementations
+#
+
+#
+# TI OMAP Implementations
+#
+
+#
+# OMAP Core Type
+#
+
+#
+# OMAP Board Type
+#
+
+#
+# OMAP Feature Selections
+#
+
+#
+# S3C2410 Implementations
+#
+
+#
+# LH7A40X Implementations
+#
+CONFIG_DMABOUNCE=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_XSCALE=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5T=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_MINICACHE=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+CONFIG_CPU_BIG_ENDIAN=y
+CONFIG_XSCALE_PMU=y
+
+#
+# General setup
+#
+CONFIG_PCI=y
+# CONFIG_ZBOOT_ROM is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_PCI_LEGACY_PROC=y
+CONFIG_PCI_NAMES=y
+
+#
+# At least one math emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Generic Driver Options
+#
+# CONFIG_DEBUG_DRIVER is not set
+CONFIG_PM=y
+# CONFIG_PREEMPT is not set
+CONFIG_APM=y
+# CONFIG_ARTHUR is not set
+CONFIG_CMDLINE="console=ttyS0,115200 ip=bootp root=/dev/nfs"
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_REDBOOT_PARTS=y
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+CONFIG_MTD_IXP4XX=y
+# CONFIG_MTD_EDB7312 is not set
+# CONFIG_MTD_PCI is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+CONFIG_MTD_NAND=m
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+CONFIG_MTD_NAND_IDS=m
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_CARMEL is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_INITRD=y
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=m
+CONFIG_PACKET_MMAP=y
+CONFIG_NETLINK_DEV=m
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_FWMARK=y
+CONFIG_IP_ROUTE_NAT=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_TOS=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+
+#
+# IP: Virtual Server Configuration
+#
+CONFIG_IP_VS=m
+CONFIG_IP_VS_DEBUG=y
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+# CONFIG_IP_VS_PROTO_TCP is not set
+# CONFIG_IP_VS_PROTO_UDP is not set
+# CONFIG_IP_VS_PROTO_ESP is not set
+# CONFIG_IP_VS_PROTO_AH is not set
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+# CONFIG_IP_VS_SED is not set
+# CONFIG_IP_VS_NQ is not set
+
+#
+# IPVS application helper
+#
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_NF_CONNTRACK=m
+CONFIG_IP_NF_FTP=m
+CONFIG_IP_NF_IRC=m
+# CONFIG_IP_NF_TFTP is not set
+# CONFIG_IP_NF_AMANDA is not set
+CONFIG_IP_NF_QUEUE=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_LIMIT=m
+# CONFIG_IP_NF_MATCH_IPRANGE is not set
+CONFIG_IP_NF_MATCH_MAC=m
+# CONFIG_IP_NF_MATCH_PKTTYPE is not set
+CONFIG_IP_NF_MATCH_MARK=m
+CONFIG_IP_NF_MATCH_MULTIPORT=m
+CONFIG_IP_NF_MATCH_TOS=m
+# CONFIG_IP_NF_MATCH_RECENT is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_DSCP is not set
+CONFIG_IP_NF_MATCH_AH_ESP=m
+CONFIG_IP_NF_MATCH_LENGTH=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_MATCH_TCPMSS=m
+# CONFIG_IP_NF_MATCH_HELPER is not set
+CONFIG_IP_NF_MATCH_STATE=m
+# CONFIG_IP_NF_MATCH_CONNTRACK is not set
+CONFIG_IP_NF_MATCH_OWNER=m
+# CONFIG_IP_NF_MATCH_PHYSDEV is not set
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_NAT_NEEDED=y
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+# CONFIG_IP_NF_TARGET_SAME is not set
+CONFIG_IP_NF_NAT_LOCAL=y
+CONFIG_IP_NF_NAT_SNMP_BASIC=m
+CONFIG_IP_NF_NAT_IRC=m
+CONFIG_IP_NF_NAT_FTP=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_TOS=m
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_DSCP is not set
+CONFIG_IP_NF_TARGET_MARK=m
+# CONFIG_IP_NF_TARGET_CLASSIFY is not set
+CONFIG_IP_NF_TARGET_LOG=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_TARGET_TCPMSS=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+# CONFIG_IP_NF_ARP_MANGLE is not set
+CONFIG_IP_NF_COMPAT_IPCHAINS=m
+CONFIG_IP_NF_COMPAT_IPFWADM=m
+# CONFIG_IP_NF_RAW is not set
+
+#
+# Bridge: Netfilter Configuration
+#
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+CONFIG_ATM=y
+CONFIG_ATM_CLIP=y
+# CONFIG_ATM_CLIP_NO_ICMP is not set
+CONFIG_ATM_LANE=m
+CONFIG_ATM_MPOA=m
+CONFIG_ATM_BR2684=m
+# CONFIG_ATM_BR2684_IPFILTER is not set
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+CONFIG_IPX=m
+# CONFIG_IPX_INTERN is not set
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=y
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_IPDDP_DECAP=y
+CONFIG_X25=m
+CONFIG_LAPB=m
+# CONFIG_NET_DIVERT is not set
+CONFIG_ECONET=m
+CONFIG_ECONET_AUNUDP=y
+CONFIG_ECONET_NATIVE=y
+CONFIG_WAN_ROUTER=m
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+# CONFIG_NET_SCH_HFSC is not set
+CONFIG_NET_SCH_CSZ=m
+# CONFIG_NET_SCH_ATM is not set
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+# CONFIG_NET_SCH_DELAY is not set
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_QOS=y
+CONFIG_NET_ESTIMATOR=y
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_ROUTE=y
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_POLICE=y
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=m
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_ETHERTAP is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+CONFIG_EEPRO100=y
+# CONFIG_EEPRO100_PIO is not set
+# CONFIG_E100 is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_TIGON3 is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+CONFIG_NET_RADIO=y
+
+#
+# Obsolete Wireless cards support (pre-802.11)
+#
+# CONFIG_STRIP is not set
+
+#
+# Wireless 802.11b ISA/PCI cards support
+#
+# CONFIG_AIRO is not set
+CONFIG_HERMES=y
+# CONFIG_PLX_HERMES is not set
+# CONFIG_TMD_HERMES is not set
+CONFIG_PCI_HERMES=y
+# CONFIG_ATMEL is not set
+
+#
+# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support
+#
+CONFIG_NET_WIRELESS=y
+
+#
+# Wan interfaces
+#
+CONFIG_WAN=y
+# CONFIG_DSCC4 is not set
+# CONFIG_LANMEDIA is not set
+# CONFIG_SYNCLINK_SYNCPPP is not set
+CONFIG_HDLC=m
+CONFIG_HDLC_RAW=y
+# CONFIG_HDLC_RAW_ETH is not set
+CONFIG_HDLC_CISCO=y
+CONFIG_HDLC_FR=y
+CONFIG_HDLC_PPP=y
+CONFIG_HDLC_X25=y
+# CONFIG_PCI200SYN is not set
+# CONFIG_WANXL is not set
+# CONFIG_PC300 is not set
+# CONFIG_FARSYNC is not set
+CONFIG_DLCI=m
+CONFIG_DLCI_COUNT=24
+CONFIG_DLCI_MAX=8
+CONFIG_WAN_ROUTER_DRIVERS=y
+# CONFIG_CYCLADES_SYNC is not set
+# CONFIG_LAPBETHER is not set
+# CONFIG_X25_ASY is not set
+
+#
+# ATM drivers
+#
+CONFIG_ATM_TCP=m
+# CONFIG_ATM_LANAI is not set
+# CONFIG_ATM_ENI is not set
+# CONFIG_ATM_FIRESTREAM is not set
+# CONFIG_ATM_ZATM is not set
+# CONFIG_ATM_NICSTAR is not set
+# CONFIG_ATM_IDT77252 is not set
+# CONFIG_ATM_AMBASSADOR is not set
+# CONFIG_ATM_HORIZON is not set
+# CONFIG_ATM_IA is not set
+# CONFIG_ATM_FORE200E_MAYBE is not set
+# CONFIG_ATM_HE is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_RCPCI is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_IDEDISK_STROKE is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+# CONFIG_IDE_TASKFILE_IO is not set
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_IDEPCI=y
+# CONFIG_IDEPCI_SHARE_IRQ is not set
+# CONFIG_BLK_DEV_OFFBOARD is not set
+# CONFIG_BLK_DEV_GENERIC is not set
+# CONFIG_BLK_DEV_OPTI621 is not set
+# CONFIG_BLK_DEV_SL82C105 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_IDEDMA_FORCED is not set
+# CONFIG_IDEDMA_PCI_AUTO is not set
+CONFIG_BLK_DEV_ADMA=y
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+CONFIG_BLK_DEV_CMD64X=y
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CY82C693 is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT34X is not set
+CONFIG_BLK_DEV_HPT366=y
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+CONFIG_BLK_DEV_PDC202XX_NEW=y
+# CONFIG_PDC202XX_FORCE is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+CONFIG_BLK_DEV_IDEDMA=y
+# CONFIG_IDEDMA_IVB is not set
+# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+# CONFIG_SCSI is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+# CONFIG_SERIO is not set
+# CONFIG_SERIO_I8042 is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_IXP4XX_WATCHDOG=y
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_ALGOPCF is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_ISA is not set
+CONFIG_I2C_IXP4XX=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+
+#
+# Hardware Sensors Chip support
+#
+CONFIG_I2C_SENSOR=y
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+
+#
+# Other I2C Chip support
+#
+CONFIG_SENSORS_EEPROM=y
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+# CONFIG_EXT2_FS_SECURITY is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_FAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+# CONFIG_JFFS2_FS_NAND is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# Misc devices
+#
+
+#
+# USB support
+#
+# CONFIG_USB is not set
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# Kernel hacking
+#
+CONFIG_FRAME_POINTER=y
+# CONFIG_DEBUG_USER is not set
+# CONFIG_DEBUG_INFO is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_WAITQ is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_ERRORS=y
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+# CONFIG_DEBUG_BDI2000_XSCALE is not set
+
+#
+# Security options
+#
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
diff --git a/arch/arm/configs/mainstone_defconfig b/arch/arm/configs/mainstone_defconfig
new file mode 100644 (file)
index 0000000..925b277
--- /dev/null
@@ -0,0 +1,743 @@
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_HOTPLUG=y
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+# CONFIG_MODULE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_KMOD is not set
+
+#
+# System Type
+#
+# CONFIG_ARCH_ADIFCC is not set
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+CONFIG_ARCH_PXA=y
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_VERSATILE_PB is not set
+
+#
+# CLPS711X/EP721X Implementations
+#
+
+#
+# Epxa10db
+#
+
+#
+# Footbridge Implementations
+#
+
+#
+# IOP3xx Implementation Options
+#
+# CONFIG_ARCH_IOP310 is not set
+# CONFIG_ARCH_IOP321 is not set
+
+#
+# IOP3xx Chipset Features
+#
+
+#
+# Intel PXA2xx Implementations
+#
+# CONFIG_ARCH_LUBBOCK is not set
+CONFIG_MACH_MAINSTONE=y
+# CONFIG_ARCH_PXA_IDP is not set
+CONFIG_PXA27x=y
+CONFIG_IWMMXT=y
+
+#
+# SA11x0 Implementations
+#
+
+#
+# TI OMAP Implementations
+#
+
+#
+# OMAP Core Type
+#
+
+#
+# OMAP Board Type
+#
+
+#
+# OMAP Feature Selections
+#
+
+#
+# S3C2410 Implementations
+#
+
+#
+# LH7A40X Implementations
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_XSCALE=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5T=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_MINICACHE=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+CONFIG_XSCALE_PMU=y
+
+#
+# General setup
+#
+# CONFIG_ZBOOT_ROM is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+
+#
+# PCMCIA/CardBus support
+#
+CONFIG_PCMCIA=y
+# CONFIG_PCMCIA_DEBUG is not set
+# CONFIG_TCIC is not set
+CONFIG_PCMCIA_PXA2XX=y
+
+#
+# At least one math emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Generic Driver Options
+#
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_PM is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_ARTHUR is not set
+CONFIG_CMDLINE="root=/dev/nfs ip=bootp console=ttyS0,115200 mem=64M"
+CONFIG_LEDS=y
+CONFIG_LEDS_TIMER=y
+CONFIG_LEDS_CPU=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_REDBOOT_PARTS=y
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_NOSWAP=y
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
+CONFIG_MTD_CFI_GEOMETRY=y
+# CONFIG_MTD_CFI_B1 is not set
+# CONFIG_MTD_CFI_B2 is not set
+CONFIG_MTD_CFI_B4=y
+# CONFIG_MTD_CFI_B8 is not set
+# CONFIG_MTD_CFI_I1 is not set
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_EDB7312 is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_PACKET is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_SMC91X=y
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDE=y
+
+#
+# Please see Documentation/ide.txt for help/info on IDE drives
+#
+CONFIG_BLK_DEV_IDEDISK=y
+# CONFIG_IDEDISK_MULTI_MODE is not set
+# CONFIG_IDEDISK_STROKE is not set
+CONFIG_BLK_DEV_IDECS=y
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_BLK_DEV_IDEFLOPPY is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+# CONFIG_IDE_TASKFILE_IO is not set
+
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_IDE_GENERIC is not set
+# CONFIG_BLK_DEV_IDEDMA is not set
+# CONFIG_IDEDMA_AUTO is not set
+# CONFIG_BLK_DEV_HD is not set
+
+#
+# SCSI device support
+#
+# CONFIG_SCSI is not set
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_SERIO_CT82C710 is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_PXA=y
+CONFIG_SERIAL_PXA_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+# CONFIG_JFFS2_FS_NAND is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+
+#
+# Native Language Support
+#
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# Misc devices
+#
+
+#
+# USB support
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# Kernel hacking
+#
+CONFIG_FRAME_POINTER=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_WAITQ is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_ERRORS=y
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
diff --git a/arch/arm/configs/smdk2410_defconfig b/arch/arm/configs/smdk2410_defconfig
new file mode 100644 (file)
index 0000000..a88724f
--- /dev/null
@@ -0,0 +1,667 @@
+#
+# Automatically generated make config: don't edit
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_STANDALONE=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_HOTPLUG is not set
+# CONFIG_IKCONFIG is not set
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+
+#
+# Loadable module support
+#
+# CONFIG_MODULES is not set
+
+#
+# System Type
+#
+# CONFIG_ARCH_ADIFCC is not set
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_SHARK is not set
+CONFIG_ARCH_S3C2410=y
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_VERSATILE_PB is not set
+
+#
+# CLPS711X/EP721X Implementations
+#
+
+#
+# Epxa10db
+#
+
+#
+# Footbridge Implementations
+#
+
+#
+# IOP3xx Implementation Options
+#
+# CONFIG_ARCH_IOP310 is not set
+# CONFIG_ARCH_IOP321 is not set
+
+#
+# IOP3xx Chipset Features
+#
+
+#
+# Intel PXA250/210 Implementations
+#
+
+#
+# SA11x0 Implementations
+#
+
+#
+# TI OMAP Implementations
+#
+
+#
+# OMAP Core Type
+#
+
+#
+# OMAP Board Type
+#
+
+#
+# OMAP Feature Selections
+#
+
+#
+# S3C2410 Implementations
+#
+# CONFIG_ARCH_BAST is not set
+# CONFIG_ARCH_H1940 is not set
+CONFIG_ARCH_SMDK2410=y
+# CONFIG_MACH_VR1000 is not set
+
+#
+# LH7A40X Implementations
+#
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+
+#
+# General setup
+#
+# CONFIG_ZBOOT_ROM is not set
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+
+#
+# At least one math emulation must be selected
+#
+# CONFIG_FPE_NWFPE is not set
+# CONFIG_FPE_FASTFPE is not set
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_AOUT=y
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Generic Driver Options
+#
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_PM is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_ARTHUR is not set
+CONFIG_CMDLINE="root=1f04 mem=32M"
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_PARTITIONS is not set
+# CONFIG_MTD_CONCAT is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_EDB7312 is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_INITRD is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+# CONFIG_PACKET is not set
+# CONFIG_NETLINK_DEV is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_SCSI is not set
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_S3C2410=y
+CONFIG_SERIAL_S3C2410_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+CONFIG_ROMFS_FS=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_FAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+# CONFIG_TMPFS is not set
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+# CONFIG_EXPORTFS is not set
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_INTERMEZZO_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_PCI_CONSOLE=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+
+#
+# Logo configuration
+#
+# CONFIG_LOGO is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# Misc devices
+#
+
+#
+# USB support
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# Kernel hacking
+#
+CONFIG_FRAME_POINTER=y
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_INFO is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_WAITQ is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_ERRORS is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+CONFIG_DEBUG_LL_PRINTK=y
+CONFIG_DEBUG_S3C2410_PORT=y
+CONFIG_DEBUG_S3C2410_UART=0
+
+#
+# Security options
+#
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Library routines
+#
+CONFIG_CRC32=y
+CONFIG_LIBCRC32C=y
diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile
new file mode 100644 (file)
index 0000000..f656397
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y  += common.o common-pci.o 
+
+obj-$(CONFIG_ARCH_IXDP4XX)     += ixdp425-pci.o ixdp425-setup.o
+obj-$(CONFIG_ARCH_ADI_COYOTE)  += coyote-pci.o coyote-setup.o
+obj-$(CONFIG_ARCH_PRPMC1100)   += prpmc1100-pci.o prpmc1100-setup.o
+
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
new file mode 100644 (file)
index 0000000..c20dc32
--- /dev/null
@@ -0,0 +1,543 @@
+/*
+ * arch/arm/mach-ixp4xx/common-pci.c 
+ *
+ * IXP4XX PCI routines for all platforms
+ *
+ * Maintainer: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright (C) 2002 Intel Corporation.
+ * Copyright (C) 2003 Greg Ungerer <gerg@snapgear.com>
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <asm/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/sizes.h>
+#include <asm/system.h>
+#include <asm/mach/pci.h>
+#include <asm/hardware.h>
+#include <asm/sizes.h>
+
+
+/*
+ * IXP4xx PCI read function is dependent on whether we are 
+ * running A0 or B0 (AppleGate) silicon.
+ */
+int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
+
+/*
+ * Base address for PCI regsiter region
+ */
+unsigned long ixp4xx_pci_reg_base = 0;
+
+/*
+ * PCI cfg an I/O routines are done by programming a 
+ * command/byte enable register, and then read/writing
+ * the data from a data regsiter. We need to ensure
+ * these transactions are atomic or we will end up
+ * with corrupt data on the bus or in a driver.
+ */
+static spinlock_t ixp4xx_pci_lock = SPIN_LOCK_UNLOCKED;
+
+/*
+ * Read from PCI config space
+ */
+static void crp_read(u32 ad_cbe, u32 *data)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+       *PCI_CRP_AD_CBE = ad_cbe;
+       *data = *PCI_CRP_RDATA;
+       spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+}
+
+/*
+ * Write to PCI config space
+ */
+static void crp_write(u32 ad_cbe, u32 data)
+{ 
+       unsigned long flags;
+       spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+       *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe;
+       *PCI_CRP_WDATA = data;
+       spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+}
+
+static inline int check_master_abort(void)
+{
+       /* check Master Abort bit after access */
+       unsigned long isr = *PCI_ISR;
+
+       if (isr & PCI_ISR_PFE) {
+               /* make sure the Master Abort bit is reset */    
+               *PCI_ISR = PCI_ISR_PFE;
+               pr_debug("%s failed\n", __FUNCTION__);
+               return 1;
+       }
+
+       return 0;
+}
+
+int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data)
+{
+       unsigned long flags;
+       int retval = 0;
+       int i;
+
+       spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+
+       *PCI_NP_AD = addr;
+
+       /* 
+        * PCI workaround  - only works if NP PCI space reads have 
+        * no side effects!!! Read 8 times. last one will be good.
+        */
+       for (i = 0; i < 8; i++) {
+               *PCI_NP_CBE = cmd;
+               *data = *PCI_NP_RDATA;
+               *data = *PCI_NP_RDATA;
+       }
+
+       if(check_master_abort())
+               retval = 1;
+
+       spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+       return retval;
+}
+
+int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data)
+{
+       unsigned long flags;
+       int retval = 0;
+
+       spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+
+       *PCI_NP_AD = addr;
+
+       /* set up and execute the read */    
+       *PCI_NP_CBE = cmd;
+
+       /* the result of the read is now in NP_RDATA */
+       *data = *PCI_NP_RDATA; 
+
+       if(check_master_abort())
+               retval = 1;
+
+       spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+       return retval;
+}
+
+int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data)
+{    
+       unsigned long flags;
+       int retval = 0;
+
+       spin_lock_irqsave(&ixp4xx_pci_lock, flags);
+
+       *PCI_NP_AD = addr;
+
+       /* set up the write */
+       *PCI_NP_CBE = cmd;
+
+       /* execute the write by writing to NP_WDATA */
+       *PCI_NP_WDATA = data;
+
+       if(check_master_abort())
+               retval = 1;
+
+       spin_unlock_irqrestore(&ixp4xx_pci_lock, flags);
+       return retval;
+}
+
+static u32 ixp4xx_config_addr(u8 bus_num, u16 devfn, int where)
+{
+       u32 addr;
+       if (!bus_num) {
+               /* type 0 */
+               addr = BIT(32-PCI_SLOT(devfn)) | ((PCI_FUNC(devfn)) << 8) | 
+                   (where & ~3);       
+       } else {
+               /* type 1 */
+               addr = (bus_num << 16) | ((PCI_SLOT(devfn)) << 11) | 
+                       ((PCI_FUNC(devfn)) << 8) | (where & ~3) | 1;
+       }
+       return addr;
+}
+
+/*
+ * Mask table, bits to mask for quantity of size 1, 2 or 4 bytes.
+ * 0 and 3 are not valid indexes...
+ */
+static u32 bytemask[] = {
+       /*0*/   0,
+       /*1*/   0xff,
+       /*2*/   0xffff,
+       /*3*/   0,
+       /*4*/   0xffffffff,
+};
+
+static u32 local_byte_lane_enable_bits(u32 n, int size)
+{
+       if (size == 1)
+               return (0xf & ~BIT(n)) << CRP_AD_CBE_BESL;
+       if (size == 2)
+               return (0xf & ~(BIT(n) | BIT(n+1))) << CRP_AD_CBE_BESL;
+       if (size == 4)
+               return 0;
+       return 0xffffffff;
+}
+
+static int local_read_config(int where, int size, u32 *value)
+{ 
+       u32 n, data;
+       pr_debug("local_read_config from %d size %d\n", where, size);
+       n = where % 4;
+       crp_read(where & ~3, &data);
+       *value = (data >> (8*n)) & bytemask[size];
+       pr_debug("local_read_config read %#x\n", *value);
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int local_write_config(int where, int size, u32 value)
+{
+       u32 n, byte_enables, data;
+       pr_debug("local_write_config %#x to %d size %d\n", value, where, size);
+       n = where % 4;
+       byte_enables = local_byte_lane_enable_bits(n, size);
+       if (byte_enables == 0xffffffff)
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+       data = value << (8*n);
+       crp_write((where & ~3) | byte_enables, data);
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static u32 byte_lane_enable_bits(u32 n, int size)
+{
+       if (size == 1)
+               return (0xf & ~BIT(n)) << 4;
+       if (size == 2)
+               return (0xf & ~(BIT(n) | BIT(n+1))) << 4;
+       if (size == 4)
+               return 0;
+       return 0xffffffff;
+}
+
+static int read_config(u8 bus_num, u16 devfn, int where, int size, u32 *value)
+{
+       u32 n, byte_enables, addr, data;
+
+       pr_debug("read_config from %d size %d dev %d:%d:%d\n", where, size,
+               bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+       *value = 0xffffffff;
+       n = where % 4;
+       byte_enables = byte_lane_enable_bits(n, size);
+       if (byte_enables == 0xffffffff)
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+
+       addr = ixp4xx_config_addr(bus_num, devfn, where);
+       if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_CONFIGREAD, &data))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       *value = (data >> (8*n)) & bytemask[size];
+       pr_debug("read_config_byte read %#x\n", *value);
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int write_config(u8 bus_num, u16 devfn, int where, int size, u32 value)
+{
+       u32 n, byte_enables, addr, data;
+
+       pr_debug("write_config_byte %#x to %d size %d dev %d:%d:%d\n", value, where,
+               size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+       n = where % 4;
+       byte_enables = byte_lane_enable_bits(n, size);
+       if (byte_enables == 0xffffffff)
+               return PCIBIOS_BAD_REGISTER_NUMBER;
+
+       addr = ixp4xx_config_addr(bus_num, devfn, where);
+       data = value << (8*n);
+       if (ixp4xx_pci_write(addr, byte_enables | NP_CMD_CONFIGWRITE, data))
+               return PCIBIOS_DEVICE_NOT_FOUND;
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ *     Generalized PCI config access functions.
+ */
+static int ixp4xx_read_config(struct pci_bus *bus, unsigned int devfn,
+       int where, int size, u32 *value)
+{
+       if (bus->number && !PCI_SLOT(devfn))
+               return local_read_config(where, size, value);
+       return read_config(bus->number, devfn, where, size, value);
+}
+
+static int ixp4xx_write_config(struct pci_bus *bus, unsigned int devfn,
+       int where, int size, u32 value)
+{
+       if (bus->number && !PCI_SLOT(devfn))
+               return local_write_config(where, size, value);
+       return write_config(bus->number, devfn, where, size, value);
+}
+
+struct pci_ops ixp4xx_ops = {
+       .read =  ixp4xx_read_config,
+       .write = ixp4xx_write_config,
+};
+
+
+/*
+ * PCI abort handler
+ */
+static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+       u32 isr, status;
+
+       isr = *PCI_ISR;
+       local_read_config(PCI_STATUS, 2, &status);
+       pr_debug("PCI: abort_handler addr = %#lx, isr = %#x, "
+               "status = %#x\n", addr, isr, status);
+
+       /* make sure the Master Abort bit is reset */    
+       *PCI_ISR = PCI_ISR_PFE;
+       status |= PCI_STATUS_REC_MASTER_ABORT;
+       local_write_config(PCI_STATUS, 2, status);
+
+       /*
+        * If it was an imprecise abort, then we need to correct the
+        * return address to be _after_ the instruction.
+        */
+       if (fsr & (1 << 10))
+               regs->ARM_pc += 4;
+
+       return 0;
+}
+
+
+/*
+ * Setup DMA mask to 64MB on PCI devices. Ignore all other devices.
+ */
+static int ixp4xx_pci_platform_notify(struct device *dev)
+{
+       if(dev->bus == &pci_bus_type) {
+               *dev->dma_mask =  SZ_64M - 1;
+               dev->coherent_dma_mask = SZ_64M - 1;
+               dmabounce_register_dev(dev, 2048, 4096);
+       }
+       return 0;
+}
+
+static int ixp4xx_pci_platform_notify_remove(struct device *dev)
+{
+       if(dev->bus == &pci_bus_type) {
+               dmabounce_unregister_dev(dev);
+       }
+       return 0;
+}
+
+int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
+{
+       return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
+}
+
+void __init ixp4xx_pci_preinit(void)
+{  
+       unsigned long processor_id;
+
+       asm("mrc p15, 0, %0, cr0, cr0, 0;" : "=r"(processor_id) :);
+
+       /*
+        * Determine which PCI read method to use
+        */
+       if (!(processor_id & 0xf)) {
+               printk("PCI: IXP4xx A0 silicon detected - "
+                       "PCI Non-Prefetch Workaround Enabled\n");
+               ixp4xx_pci_read = ixp4xx_pci_read_errata;
+       } else
+               ixp4xx_pci_read = ixp4xx_pci_read_no_errata;
+
+
+       /* hook in our fault handler for PCI errors */
+       hook_fault_code(16+6, abort_handler, SIGBUS, "imprecise external abort");
+
+       pr_debug("setup PCI-AHB(inbound) and AHB-PCI(outbound) address mappings\n");
+
+       /* 
+        * We use identity AHB->PCI address translation
+        * in the 0x48000000 to 0x4bffffff address space
+        */
+       *PCI_PCIMEMBASE = 0x48494A4B;
+
+       /* 
+        * We also use identity PCI->AHB address translation
+        * in 4 16MB BARs that begin at the physical memory start
+        */
+       *PCI_AHBMEMBASE = (PHYS_OFFSET & 0xFF000000) + 
+               ((PHYS_OFFSET & 0xFF000000) >> 8) +
+               ((PHYS_OFFSET & 0xFF000000) >> 16) +
+               ((PHYS_OFFSET & 0xFF000000) >> 24) +
+               0x00010203;
+
+       if (*PCI_CSR & PCI_CSR_HOST) {
+               printk("PCI: IXP4xx is host\n");
+
+               pr_debug("setup BARs in controller\n");
+
+               /*
+                * We configure the PCI inbound memory windows to be 
+                * 1:1 mapped to SDRAM
+                */
+               local_write_config(PCI_BASE_ADDRESS_0, 4, PHYS_OFFSET + 0x00000000);
+               local_write_config(PCI_BASE_ADDRESS_1, 4, PHYS_OFFSET + 0x01000000);
+               local_write_config(PCI_BASE_ADDRESS_2, 4, PHYS_OFFSET + 0x02000000);
+               local_write_config(PCI_BASE_ADDRESS_3, 4, PHYS_OFFSET + 0x03000000);
+
+               /*
+                * Enable CSR window at 0xff000000.
+                */
+               local_write_config(PCI_BASE_ADDRESS_4, 4, 0xff000008);
+
+               /*
+                * Enable the IO window to be way up high, at 0xfffffc00
+                */
+               local_write_config(PCI_BASE_ADDRESS_5, 4, 0xfffffc01);
+       } else {
+               printk("PCI: IXP4xx is target - No bus scan performed\n");
+       }
+
+       printk("PCI: IXP4xx Using %s access for memory space\n",
+#ifndef CONFIG_IXP4XX_INDIRECT_PCI
+                       "direct"
+#else
+                       "indirect"
+#endif
+               );
+
+       pr_debug("clear error bits in ISR\n");
+       *PCI_ISR = PCI_ISR_PSE | PCI_ISR_PFE | PCI_ISR_PPE | PCI_ISR_AHBE;
+
+       /*
+        * Set Initialize Complete in PCI Control Register: allow IXP4XX to
+        * respond to PCI configuration cycles. Specify that the AHB bus is
+        * operating in big endian mode. Set up byte lane swapping between 
+        * little-endian PCI and the big-endian AHB bus 
+        */
+#ifdef __ARMEB__
+       *PCI_CSR = PCI_CSR_IC | PCI_CSR_ABE | PCI_CSR_PDS | PCI_CSR_ADS;
+#else
+       *PCI_CSR = PCI_CSR_IC;
+#endif
+
+       pr_debug("DONE\n");
+}
+
+int ixp4xx_setup(int nr, struct pci_sys_data *sys)
+{
+       struct resource *res;
+
+       if (nr >= 1)
+               return 0;
+
+       res = kmalloc(sizeof(*res) * 2, GFP_KERNEL);
+       if (res == NULL) {
+               /* 
+                * If we're out of memory this early, something is wrong,
+                * so we might as well catch it here.
+                */
+               panic("PCI: unable to allocate resources?\n");
+       }
+       memset(res, 0, sizeof(*res) * 2);
+
+       local_write_config(PCI_COMMAND, 2, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
+
+       res[0].name = "PCI I/O Space";
+       res[0].start = 0x00001000;
+       res[0].end = 0xffff0000;
+       res[0].flags = IORESOURCE_IO;
+
+       res[1].name = "PCI Memory Space";
+       res[1].start = 0x48000000;
+#ifndef CONFIG_IXP4XX_INDIRECT_PCI
+       res[1].end = 0x4bffffff;
+#else
+       res[1].end = 0x4fffffff;
+#endif
+       res[1].flags = IORESOURCE_MEM;
+
+       request_resource(&ioport_resource, &res[0]);
+       request_resource(&iomem_resource, &res[1]);
+
+       sys->resource[0] = &res[0];
+       sys->resource[1] = &res[1];
+       sys->resource[2] = NULL;
+
+       platform_notify = ixp4xx_pci_platform_notify;
+       platform_notify_remove = ixp4xx_pci_platform_notify_remove;
+
+       return 1;
+}
+
+struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys)
+{
+       return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys);
+}
+
+/*
+ * We override these so we properly do dmabounce otherwise drivers
+ * are able to set the dma_mask to 0xffffffff and we can no longer
+ * trap bounces. :(
+ *
+ * We just return true on everyhing except for < 64MB in which case 
+ * we will fail miseralby and die since we can't handle that case.
+ */
+int
+pci_set_dma_mask(struct pci_dev *dev, u64 mask)
+{
+       if (mask >= SZ_64M - 1 )
+               return 0;
+
+       return -EIO;
+}
+    
+int
+pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask)
+{
+       if (mask >= SZ_64M - 1 )
+               return 0;
+
+       return -EIO;
+}
+
+int
+pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
+{
+       if (mask >= SZ_64M - 1 )
+               return 0;
+
+       return -EIO;
+}
+
+EXPORT_SYMBOL(pci_set_dma_mask);
+EXPORT_SYMBOL(pci_dac_set_dma_mask);
+EXPORT_SYMBOL(pci_set_consistent_dma_mask);
+
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
new file mode 100644 (file)
index 0000000..f016650
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * arch/arm/mach-ixp4xx/common.c
+ *
+ * Generic code shared across all IXP4XX platforms
+ *
+ * Maintainer: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2002 (c) Intel Corporation
+ * Copyright 2003-2004 (c) MontaVista, Software, Inc. 
+ * 
+ * This file is licensed under  the terms of the GNU General Public 
+ * License version 2. This program is licensed "as is" without any 
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/sched.h>
+#include <linux/tty.h>
+#include <linux/serial_core.h>
+#include <linux/bootmem.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+
+#include <asm/hardware.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/irq.h>
+
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+
+/*************************************************************************
+ * GPIO acces functions
+ *************************************************************************/
+
+/*
+ * Configure GPIO line for input, interrupt, or output operation
+ *
+ * TODO: Enable/disable the irq_desc based on interrupt or output mode.
+ * TODO: Should these be named ixp4xx_gpio_?
+ */
+void gpio_line_config(u8 line, u32 style)
+{
+       u32 enable;
+       volatile u32 *int_reg;
+       u32 int_style;
+
+       enable = *IXP4XX_GPIO_GPOER;
+
+       if (style & IXP4XX_GPIO_OUT) {
+               enable &= ~((1) << line);
+       } else if (style & IXP4XX_GPIO_IN) {
+               enable |= ((1) << line);
+
+               switch (style & IXP4XX_GPIO_INTSTYLE_MASK)
+               {
+               case (IXP4XX_GPIO_ACTIVE_HIGH):
+                       int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
+                       break;
+               case (IXP4XX_GPIO_ACTIVE_LOW):
+                       int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
+                       break;
+               case (IXP4XX_GPIO_RISING_EDGE):
+                       int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
+                       break;
+               case (IXP4XX_GPIO_FALLING_EDGE):
+                       int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
+                       break;
+               case (IXP4XX_GPIO_TRANSITIONAL):
+                       int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
+                       break;
+               default:
+                       int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
+                       break;
+               }
+
+               if (line >= 8) {        /* pins 8-15 */ 
+                       line -= 8;
+                       int_reg = IXP4XX_GPIO_GPIT2R;
+               }
+               else {                  /* pins 0-7 */
+                       int_reg = IXP4XX_GPIO_GPIT1R;
+               }
+
+               /* Clear the style for the appropriate pin */
+               *int_reg &= ~(IXP4XX_GPIO_STYLE_CLEAR << 
+                               (line * IXP4XX_GPIO_STYLE_SIZE));
+
+               /* Set the new style */
+               *int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
+       }
+
+       *IXP4XX_GPIO_GPOER = enable;
+}
+
+EXPORT_SYMBOL(gpio_line_config);
+
+/*************************************************************************
+ * IXP4xx chipset I/O mapping
+ *************************************************************************/
+static struct map_desc ixp4xx_io_desc[] __initdata = {
+       {       /* UART, Interrupt ctrl, GPIO, timers, NPEs, MACs, USB .... */
+               .virtual        = IXP4XX_PERIPHERAL_BASE_VIRT,
+               .physical       = IXP4XX_PERIPHERAL_BASE_PHYS,
+               .length         = IXP4XX_PERIPHERAL_REGION_SIZE,
+               .type           = MT_DEVICE
+       }, {    /* Expansion Bus Config Registers */
+               .virtual        = IXP4XX_EXP_CFG_BASE_VIRT,
+               .physical       = IXP4XX_EXP_CFG_BASE_PHYS,
+               .length         = IXP4XX_EXP_CFG_REGION_SIZE,
+               .type           = MT_DEVICE
+       }, {    /* PCI Registers */
+               .virtual        = IXP4XX_PCI_CFG_BASE_VIRT,
+               .physical       = IXP4XX_PCI_CFG_BASE_PHYS,
+               .length         = IXP4XX_PCI_CFG_REGION_SIZE,
+               .type           = MT_DEVICE
+       }
+};
+
+void __init ixp4xx_map_io(void)
+{
+       iotable_init(ixp4xx_io_desc, ARRAY_SIZE(ixp4xx_io_desc));
+}
+
+
+/*************************************************************************
+ * IXP4xx chipset IRQ handling
+ *
+ * TODO: GPIO IRQs should be marked invalid until the user of the IRQ
+ *       (be it PCI or something else) configures that GPIO line
+ *       as an IRQ. Also, we should use a different chip structure for 
+ *       level-based GPIO vs edge-based GPIO. Currently nobody needs this as 
+ *       all HW that's publically available uses level IRQs, so we'll
+ *       worry about it if/when we have HW to test.
+ **************************************************************************/
+static void ixp4xx_irq_mask(unsigned int irq)
+{
+       *IXP4XX_ICMR &= ~(1 << irq);
+}
+
+static void ixp4xx_irq_mask_ack(unsigned int irq)
+{
+       ixp4xx_irq_mask(irq);
+}
+
+static void ixp4xx_irq_unmask(unsigned int irq)
+{
+       static int irq2gpio[NR_IRQS] = {
+               -1, -1, -1, -1, -1, -1,  0,  1,
+               -1, -1, -1, -1, -1, -1, -1, -1,
+               -1, -1, -1,  2,  3,  4,  5,  6,
+                7,  8,  9, 10, 11, 12, -1, -1,
+       };
+       int line = irq2gpio[irq];
+
+       /*
+        * This only works for LEVEL gpio IRQs as per the IXP4xx developer's
+        * manual. If edge-triggered, need to move it to the mask_ack.
+        * Nobody seems to be using the edge-triggered mode on the GPIOs. 
+        */
+       if (line >= 0)
+               gpio_line_isr_clear(line);
+
+       *IXP4XX_ICMR |= (1 << irq);
+}
+
+static struct irqchip ixp4xx_irq_chip = {
+       .ack    = ixp4xx_irq_mask_ack,
+       .mask   = ixp4xx_irq_mask,
+       .unmask = ixp4xx_irq_unmask,
+};
+
+void __init ixp4xx_init_irq(void)
+{
+       int i = 0;
+
+       /* Route all sources to IRQ instead of FIQ */
+       *IXP4XX_ICLR = 0x0;
+
+       /* Disable all interrupt */
+       *IXP4XX_ICMR = 0x0; 
+
+       for(i = 0; i < NR_IRQS; i++)
+       {
+               set_irq_chip(i, &ixp4xx_irq_chip);
+               set_irq_handler(i, do_level_IRQ);
+               set_irq_flags(i, IRQF_VALID);
+       }
+}
+
+
+/*************************************************************************
+ * IXP4xx timer tick
+ * We use OS timer1 on the CPU for the timer tick and the timestamp 
+ * counter as a source of real clock ticks to account for missed jiffies.
+ *************************************************************************/
+
+static unsigned volatile last_jiffy_time;
+
+#define CLOCK_TICKS_PER_USEC   (CLOCK_TICK_RATE / USEC_PER_SEC)
+
+/* IRQs are disabled before entering here from do_gettimeofday() */
+static unsigned long ixp4xx_gettimeoffset(void)
+{
+       u32 elapsed;
+
+       elapsed = *IXP4XX_OSTS - last_jiffy_time;
+
+       return elapsed / CLOCK_TICKS_PER_USEC;
+}
+
+static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+       /* Clear Pending Interrupt by writing '1' to it */
+       *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND;
+
+       /*
+        * Catch up with the real idea of time
+        */
+       do {    
+               do_timer(regs);
+               last_jiffy_time += LATCH;
+       } while((*IXP4XX_OSTS - last_jiffy_time) > LATCH);
+
+       return IRQ_HANDLED;
+}
+
+extern unsigned long (*gettimeoffset)(void);
+
+static struct irqaction timer_irq = {
+       .name   = "IXP4xx Timer Tick",
+       .flags  = SA_INTERRUPT
+};
+
+void __init time_init(void)
+{
+       gettimeoffset = ixp4xx_gettimeoffset;
+       timer_irq.handler = ixp4xx_timer_interrupt;
+
+       /* Clear Pending Interrupt by writing '1' to it */
+       *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND;
+
+       /* Setup the Timer counter value */
+       *IXP4XX_OSRT1 = (LATCH & ~IXP4XX_OST_RELOAD_MASK) | IXP4XX_OST_ENABLE;
+
+       /* Reset time-stamp counter */
+       *IXP4XX_OSTS = 0;
+       last_jiffy_time = 0;
+
+       /* Connect the interrupt handler and enable the interrupt */
+       setup_irq(IRQ_IXP4XX_TIMER1, &timer_irq);
+}
+
+
diff --git a/arch/arm/mach-ixp4xx/coyote-pci.c b/arch/arm/mach-ixp4xx/coyote-pci.c
new file mode 100644 (file)
index 0000000..b46c743
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * arch/arch/mach-ixp4xx/coyote-pci.c
+ *
+ * PCI setup routines for ADI Engineering Coyote platform
+ *
+ * Copyright (C) 2002 Jungo Software Technologies.
+ * Copyright (C) 2003 MontaVista Softwrae, Inc.
+ *
+ * Maintainer: Deepak Saxena <dsaxena@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/mach-types.h>
+#include <asm/hardware.h>
+#include <asm/irq.h>
+
+#include <asm/mach/pci.h>
+
+extern void ixp4xx_pci_preinit(void);
+extern int ixp4xx_setup(int nr, struct pci_sys_data *sys);
+extern struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys);
+
+void __init coyote_pci_preinit(void)
+{
+       gpio_line_config(COYOTE_PCI_SLOT0_PIN,
+                       IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW);
+
+       gpio_line_config(COYOTE_PCI_SLOT1_PIN,
+                       IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW);
+
+       gpio_line_isr_clear(COYOTE_PCI_SLOT0_PIN);
+       gpio_line_isr_clear(COYOTE_PCI_SLOT1_PIN);
+
+       ixp4xx_pci_preinit();
+}
+
+static int __init coyote_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+       if (slot == COYOTE_PCI_SLOT0_DEVID)
+               return IRQ_COYOTE_PCI_SLOT0;
+       else if (slot == COYOTE_PCI_SLOT1_DEVID)
+               return IRQ_COYOTE_PCI_SLOT1;
+       else return -1;
+}
+
+struct hw_pci coyote_pci __initdata = {
+       .nr_controllers = 1,
+       .preinit =        coyote_pci_preinit,
+       .swizzle =        pci_std_swizzle,
+       .setup =          ixp4xx_setup,
+       .scan =           ixp4xx_scan_bus,
+       .map_irq =        coyote_map_irq,
+};
+
+int __init coyote_pci_init(void)
+{
+       if (machine_is_adi_coyote())
+               pci_common_init(&coyote_pci);
+       return 0;
+}
+
+subsys_initcall(coyote_pci_init);
diff --git a/arch/arm/mach-ixp4xx/ixdp425-pci.c b/arch/arm/mach-ixp4xx/ixdp425-pci.c
new file mode 100644 (file)
index 0000000..7baa60c
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * arch/arm/mach-ixp4xx/ixdp425-pci.c 
+ *
+ * IXDP425 board-level PCI initialization
+ *
+ * Copyright (C) 2002 Intel Corporation.
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * Maintainer: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/mach/pci.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+
+void __init ixdp425_pci_preinit(void)
+{
+       gpio_line_config(IXDP425_PCI_INTA_PIN,
+                               IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW);
+       gpio_line_config(IXDP425_PCI_INTB_PIN, 
+                               IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW);
+       gpio_line_config(IXDP425_PCI_INTC_PIN, 
+                               IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW);
+       gpio_line_config(IXDP425_PCI_INTD_PIN, 
+                               IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW);
+
+       gpio_line_isr_clear(IXDP425_PCI_INTA_PIN);
+       gpio_line_isr_clear(IXDP425_PCI_INTB_PIN);
+       gpio_line_isr_clear(IXDP425_PCI_INTC_PIN);
+       gpio_line_isr_clear(IXDP425_PCI_INTD_PIN);
+
+       ixp4xx_pci_preinit();
+}
+
+static int __init ixdp425_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+       static int pci_irq_table[IXDP425_PCI_IRQ_LINES] = {
+               IRQ_IXDP425_PCI_INTA,
+               IRQ_IXDP425_PCI_INTB,
+               IRQ_IXDP425_PCI_INTC,
+               IRQ_IXDP425_PCI_INTD
+       };
+
+       int irq = -1;
+
+       if (slot >= 1 && slot <= IXDP425_PCI_MAX_DEV && 
+               pin >= 1 && pin <= IXDP425_PCI_IRQ_LINES) {
+               irq = pci_irq_table[(slot + pin - 2) % 4];
+       }
+
+       return irq;
+}
+
+struct hw_pci ixdp425_pci __initdata = {
+       .nr_controllers = 1,
+       .preinit        = ixdp425_pci_preinit,
+       .swizzle        = pci_std_swizzle,
+       .setup          = ixp4xx_setup,
+       .scan           = ixp4xx_scan_bus,
+       .map_irq        = ixdp425_map_irq,
+};
+
+int __init ixdp425_pci_init(void)
+{
+       if (machine_is_ixdp425() || 
+               machine_is_ixcdp1100() || 
+               machine_is_avila())
+               pci_common_init(&ixdp425_pci);
+       return 0;
+}
+
+subsys_initcall(ixdp425_pci_init);
+
diff --git a/arch/arm/mach-ixp4xx/prpmc1100-pci.c b/arch/arm/mach-ixp4xx/prpmc1100-pci.c
new file mode 100644 (file)
index 0000000..a0aed9c
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * arch/arm/mach-ixp4xx/prpmc1100-pci.c 
+ *
+ * PrPMC1100 PCI initialization
+ *
+ * Copyright (C) 2003-2004 MontaVista Sofwtare, Inc. 
+ * Based on IXDP425 code originally (C) Intel Corporation
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * PrPMC1100 PCI init code.  GPIO usage is similar to that on 
+ * IXDP425, but the IRQ routing is completely different and
+ * depends on what carrier you are using. This code is written
+ * to work on the Motorola PrPMC800 ATX carrier board.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+#include <asm/hardware.h>
+
+#include <asm/mach/pci.h>
+
+
+void __init prpmc1100_pci_preinit(void)
+{
+       gpio_line_config(PRPMC1100_PCI_INTA_PIN, 
+                               IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW);
+       gpio_line_config(PRPMC1100_PCI_INTB_PIN, 
+                               IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW);
+       gpio_line_config(PRPMC1100_PCI_INTC_PIN, 
+                               IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW);
+       gpio_line_config(PRPMC1100_PCI_INTD_PIN, 
+                               IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW);
+
+       gpio_line_isr_clear(PRPMC1100_PCI_INTA_PIN);
+       gpio_line_isr_clear(PRPMC1100_PCI_INTB_PIN);
+       gpio_line_isr_clear(PRPMC1100_PCI_INTC_PIN);
+       gpio_line_isr_clear(PRPMC1100_PCI_INTD_PIN);
+
+       ixp4xx_pci_preinit();
+}
+
+
+static int __init prpmc1100_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+       int irq = -1;
+
+       static int pci_irq_table[][4] = { 
+               {       /* IDSEL 16 - PMC A1 */
+                       IRQ_PRPMC1100_PCI_INTD, 
+                       IRQ_PRPMC1100_PCI_INTA, 
+                       IRQ_PRPMC1100_PCI_INTB, 
+                       IRQ_PRPMC1100_PCI_INTC
+               }, {    /* IDSEL 17 - PRPMC-A-B */
+                       IRQ_PRPMC1100_PCI_INTD, 
+                       IRQ_PRPMC1100_PCI_INTA, 
+                       IRQ_PRPMC1100_PCI_INTB, 
+                       IRQ_PRPMC1100_PCI_INTC
+               }, {    /* IDSEL 18 - PMC A1-B */
+                       IRQ_PRPMC1100_PCI_INTA, 
+                       IRQ_PRPMC1100_PCI_INTB, 
+                       IRQ_PRPMC1100_PCI_INTC, 
+                       IRQ_PRPMC1100_PCI_INTD
+               }, {    /* IDSEL 19 - Unused */
+                       0, 0, 0, 0 
+               }, {    /* IDSEL 20 - P2P Bridge */
+                       IRQ_PRPMC1100_PCI_INTA, 
+                       IRQ_PRPMC1100_PCI_INTB, 
+                       IRQ_PRPMC1100_PCI_INTC, 
+                       IRQ_PRPMC1100_PCI_INTD
+               }, {    /* IDSEL 21 - PMC A2 */
+                       IRQ_PRPMC1100_PCI_INTC, 
+                       IRQ_PRPMC1100_PCI_INTD, 
+                       IRQ_PRPMC1100_PCI_INTA, 
+                       IRQ_PRPMC1100_PCI_INTB
+               }, {    /* IDSEL 22 - PMC A2-B */
+                       IRQ_PRPMC1100_PCI_INTD, 
+                       IRQ_PRPMC1100_PCI_INTA, 
+                       IRQ_PRPMC1100_PCI_INTB, 
+                       IRQ_PRPMC1100_PCI_INTC
+               },
+       };
+
+       if (slot >= PRPMC1100_PCI_MIN_DEVID && slot <= PRPMC1100_PCI_MAX_DEVID 
+               && pin >= 1 && pin <= PRPMC1100_PCI_IRQ_LINES) {
+               irq = pci_irq_table[slot - PRPMC1100_PCI_MIN_DEVID][pin - 1];
+       }
+
+       return irq;
+}
+
+
+struct hw_pci prpmc1100_pci __initdata = {
+       .nr_controllers = 1,
+       .preinit =        prpmc1100_pci_preinit,
+       .swizzle =        pci_std_swizzle,
+       .setup =          ixp4xx_setup,
+       .scan =           ixp4xx_scan_bus,
+       .map_irq =        prpmc1100_map_irq,
+};
+
+int __init prpmc1100_pci_init(void)
+{
+       if (machine_is_prpmc1100())
+               pci_common_init(&prpmc1100_pci);
+       return 0;
+}
+
+subsys_initcall(prpmc1100_pci_init);
+
diff --git a/arch/arm/mach-ixp4xx/prpmc1100-setup.c b/arch/arm/mach-ixp4xx/prpmc1100-setup.c
new file mode 100644 (file)
index 0000000..b060320
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * arch/arm/mach-ixp4xx/prpmc1100-setup.c
+ *
+ * Motorola PrPMC1100 board setup
+ *
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/serial.h>
+#include <linux/tty.h>
+#include <linux/serial_core.h>
+
+#include <asm/types.h>
+#include <asm/setup.h>
+#include <asm/memory.h>
+#include <asm/hardware.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/flash.h>
+
+#ifdef __ARMEB__
+#define        REG_OFFSET      3
+#else
+#define        REG_OFFSET      0
+#endif
+
+/*
+ * Only one serial port is connected on the PrPMC1100
+ */
+static struct uart_port prpmc1100_serial_port = {
+       .membase        = (char*)(IXP4XX_UART1_BASE_VIRT + REG_OFFSET),
+       .mapbase        = (IXP4XX_UART1_BASE_PHYS),
+       .irq            = IRQ_IXP4XX_UART1,
+       .flags          = UPF_SKIP_TEST,
+       .iotype         = UPIO_MEM,     
+       .regshift       = 2,
+       .uartclk        = IXP4XX_UART_XTAL,
+       .line           = 0,
+       .type           = PORT_XSCALE,
+       .fifosize       = 32
+};
+
+void __init prpmc1100_map_io(void)
+{
+       early_serial_setup(&prpmc1100_serial_port);
+
+       ixp4xx_map_io();
+}
+
+static struct flash_platform_data prpmc1100_flash_data = {
+       .map_name       = "cfi_probe",
+       .width          = 2,
+};
+
+static struct resource prpmc1100_flash_resource = {
+       .start          = PRPMC1100_FLASH_BASE,
+       .end            = PRPMC1100_FLASH_BASE + PRPMC1100_FLASH_SIZE,
+       .flags          = IORESOURCE_MEM,
+};
+
+static struct platform_device prpmc1100_flash_device = {
+       .name           = "IXP4XX-Flash",
+       .id             = 0,
+       .dev            = {
+               .platform_data = &prpmc1100_flash_data,
+       },
+       .num_resources  = 1,
+       .resource       = &prpmc1100_flash_resource,
+};
+
+static void __init prpmc1100_init(void)
+{
+       platform_add_device(&prpmc1100_flash_device);
+}
+
+MACHINE_START(PRPMC1100, "Motorola PrPMC1100")
+        MAINTAINER("MontaVista Software, Inc.")
+        BOOT_MEM(PHYS_OFFSET, IXP4XX_PERIPHERAL_BASE_PHYS,
+                IXP4XX_PERIPHERAL_BASE_VIRT)
+        MAPIO(prpmc1100_map_io)
+        INITIRQ(ixp4xx_init_irq)
+        BOOT_PARAMS(0x0100)
+       INIT_MACHINE(prpmc1100_init)
+MACHINE_END
+
diff --git a/arch/arm/mach-s3c2410/mach-smdk2410.c b/arch/arm/mach-s3c2410/mach-smdk2410.c
new file mode 100644 (file)
index 0000000..4e0282b
--- /dev/null
@@ -0,0 +1,109 @@
+/***********************************************************************
+ *
+ * linux/arch/arm/mach-s3c2410/mach-smdk2410.c
+ *
+ * Copyright (C) 2004 by FS Forth-Systeme GmbH
+ * All rights reserved.
+ *
+ * $Id: mach-smdk2410.c,v 1.1 2004/05/11 14:15:38 mpietrek Exp $
+ * @Author: Jonas Dietsche
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * @History:
+ * derived from linux/arch/arm/mach-s3c2410/mach-bast.c, written by
+ * Ben Dooks <ben@simtec.co.uk>
+ ***********************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#include <asm/arch/regs-serial.h>
+
+#include "s3c2410.h"
+
+
+static struct map_desc smdk2410_iodesc[] __initdata = {
+  /* nothing here yet */
+};
+
+#define UCON S3C2410_UCON_DEFAULT
+#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
+#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
+
+/* base baud rate for all our UARTs */
+static unsigned long smdk2410_serial_clock = 24*1000*1000;
+
+static struct s3c2410_uartcfg smdk2410_uartcfgs[] = {
+       [0] = {
+               .hwport      = 0,
+               .flags       = 0,
+               .clock       = &smdk2410_serial_clock,
+               .ucon        = UCON,
+               .ulcon       = ULCON,
+               .ufcon       = UFCON,
+       },
+       [1] = {
+               .hwport      = 1,
+               .flags       = 0,
+               .clock       = &smdk2410_serial_clock,
+               .ucon        = UCON,
+               .ulcon       = ULCON,
+               .ufcon       = UFCON,
+       },
+       [2] = {
+               .hwport      = 2,
+               .flags       = 0,
+               .clock       = &smdk2410_serial_clock,
+               .ucon        = UCON,
+               .ulcon       = ULCON,
+               .ufcon       = UFCON,
+       }
+};
+
+
+void __init smdk2410_map_io(void)
+{
+       s3c2410_map_io(smdk2410_iodesc, ARRAY_SIZE(smdk2410_iodesc));
+       s3c2410_uartcfgs = smdk2410_uartcfgs;
+}
+
+void __init smdk2410_init_irq(void)
+{
+       s3c2410_init_irq();
+}
+
+MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switch
+                                   * to SMDK2410 */
+     MAINTAINER("Jonas Dietsche")
+     BOOT_MEM(S3C2410_SDRAM_PA, S3C2410_PA_UART, S3C2410_VA_UART)
+     BOOT_PARAMS(S3C2410_SDRAM_PA + 0x100)
+     MAPIO(smdk2410_map_io)
+     INITIRQ(smdk2410_init_irq)
+MACHINE_END
diff --git a/arch/cris/arch-v10/drivers/ide.c b/arch/cris/arch-v10/drivers/ide.c
new file mode 100644 (file)
index 0000000..335473c
--- /dev/null
@@ -0,0 +1,945 @@
+/* $Id: ide.c,v 1.1 2004/01/22 08:22:58 starvik Exp $
+ *
+ * Etrax specific IDE functions, like init and PIO-mode setting etc.
+ * Almost the entire ide.c is used for the rest of the Etrax ATA driver.
+ * Copyright (c) 2000-2004 Axis Communications AB
+ *
+ * Authors:    Bjorn Wesen        (initial version)
+ *             Mikael Starvik     (pio setup stuff, Linux 2.6 port)
+ */
+
+/* Regarding DMA:
+ *
+ * There are two forms of DMA - "DMA handshaking" between the interface and the drive,
+ * and DMA between the memory and the interface. We can ALWAYS use the latter, since it's
+ * something built-in in the Etrax. However only some drives support the DMA-mode handshaking
+ * on the ATA-bus. The normal PC driver and Triton interface disables memory-if DMA when the
+ * device can't do DMA handshaking for some stupid reason. We don't need to do that.
+ */
+
+#undef REALLY_SLOW_IO           /* most systems can safely undef this */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/ide.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/arch/svinto.h>
+#include <asm/dma.h>
+
+/* number of Etrax DMA descriptors */
+#define MAX_DMA_DESCRS 64
+
+/* number of times to retry busy-flags when reading/writing IDE-registers
+ * this can't be too high because a hung harddisk might cause the watchdog
+ * to trigger (sometimes INB and OUTB are called with irq's disabled)
+ */
+
+#define IDE_REGISTER_TIMEOUT 300
+
+#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET
+/* address where the memory-mapped IDE reset bit lives, if used */
+static volatile unsigned long *reset_addr;
+#endif
+
+static int e100_read_command = 0;
+
+#define LOWDB(x)
+#define D(x)
+
+void
+etrax100_ide_outw(unsigned short data, ide_ioreg_t reg) {
+       int timeleft;
+       LOWDB(printk("ow: data 0x%x, reg 0x%x\n", data, reg));
+
+       /* note the lack of handling any timeouts. we stop waiting, but we don't
+        * really notify anybody.
+        */
+
+       timeleft = IDE_REGISTER_TIMEOUT;
+       /* wait for busy flag */
+       while(timeleft && (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)))
+               timeleft--;
+
+       /*
+        * Fall through at a timeout, so the ongoing command will be
+        * aborted by the write below, which is expected to be a dummy
+        * command to the command register.  This happens when a faulty
+        * drive times out on a command.  See comment on timeout in
+        * INB.
+        */
+       if(!timeleft)
+               printk("ATA timeout reg 0x%lx := 0x%x\n", reg, data);
+
+       *R_ATA_CTRL_DATA = reg | data; /* write data to the drive's register */
+
+       timeleft = IDE_REGISTER_TIMEOUT;
+       /* wait for transmitter ready */
+       while(timeleft && !(*R_ATA_STATUS_DATA &
+                           IO_MASK(R_ATA_STATUS_DATA, tr_rdy)))
+               timeleft--;
+}
+
+void
+etrax100_ide_outb(unsigned char data, ide_ioreg_t reg)
+{
+       etrax100_ide_outw(data, reg);
+}
+
+void
+etrax100_ide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port)
+{
+       etrax100_ide_outw(addr, port);
+}
+
+unsigned short
+etrax100_ide_inw(ide_ioreg_t reg) {
+       int status;
+       int timeleft;
+
+       timeleft = IDE_REGISTER_TIMEOUT;
+       /* wait for busy flag */
+       while(timeleft && (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)))
+               timeleft--;
+
+       if(!timeleft) {
+               /*
+                * If we're asked to read the status register, like for
+                * example when a command does not complete for an
+                * extended time, but the ATA interface is stuck in a
+                * busy state at the *ETRAX* ATA interface level (as has
+                * happened repeatedly with at least one bad disk), then
+                * the best thing to do is to pretend that we read
+                * "busy" in the status register, so the IDE driver will
+                * time-out, abort the ongoing command and perform a
+                * reset sequence.  Note that the subsequent OUT_BYTE
+                * call will also timeout on busy, but as long as the
+                * write is still performed, everything will be fine.
+                */
+               if ((reg & IO_MASK (R_ATA_CTRL_DATA, addr))
+                   == IO_FIELD (R_ATA_CTRL_DATA, addr, IDE_STATUS_OFFSET))
+                       return BUSY_STAT;
+               else
+                       /* For other rare cases we assume 0 is good enough.  */
+                       return 0;
+       }
+
+       *R_ATA_CTRL_DATA = reg | IO_STATE(R_ATA_CTRL_DATA, rw, read); /* read data */
+
+       timeleft = IDE_REGISTER_TIMEOUT;
+       /* wait for available */
+       while(timeleft && !((status = *R_ATA_STATUS_DATA) &
+                           IO_MASK(R_ATA_STATUS_DATA, dav)))
+               timeleft--;
+
+       if(!timeleft)
+               return 0;
+
+       LOWDB(printk("inb: 0x%x from reg 0x%x\n", status & 0xff, reg));
+
+        return (unsigned short)status;
+}
+
+unsigned char
+etrax100_ide_inb(ide_ioreg_t reg)
+{
+       return (unsigned char)etrax100_ide_inw(reg);
+}
+
+/* PIO timing (in R_ATA_CONFIG)
+ *
+ *                        _____________________________
+ * ADDRESS :     ________/
+ *
+ *                            _______________
+ * DIOR    :     ____________/               \__________
+ *
+ *                               _______________
+ * DATA    :     XXXXXXXXXXXXXXXX_______________XXXXXXXX
+ *
+ *
+ * DIOR is unbuffered while address and data is buffered.
+ * This creates two problems:
+ * 1. The DIOR pulse is to early (because it is unbuffered)
+ * 2. The rise time of DIOR is long
+ *
+ * There are at least three different plausible solutions
+ * 1. Use a pad capable of larger currents in Etrax
+ * 2. Use an external buffer
+ * 3. Make the strobe pulse longer
+ *
+ * Some of the strobe timings below are modified to compensate
+ * for this. This implies a slight performance decrease.
+ *
+ * THIS SHOULD NEVER BE CHANGED!
+ *
+ * TODO: Is this true for the latest LX boards still ?
+ */
+
+#define ATA_DMA2_STROBE  4
+#define ATA_DMA2_HOLD    0
+#define ATA_DMA1_STROBE  4
+#define ATA_DMA1_HOLD    1
+#define ATA_DMA0_STROBE 12
+#define ATA_DMA0_HOLD    9
+#define ATA_PIO4_SETUP   1
+#define ATA_PIO4_STROBE  5
+#define ATA_PIO4_HOLD    0
+#define ATA_PIO3_SETUP   1
+#define ATA_PIO3_STROBE  5
+#define ATA_PIO3_HOLD    1
+#define ATA_PIO2_SETUP   1
+#define ATA_PIO2_STROBE  6
+#define ATA_PIO2_HOLD    2
+#define ATA_PIO1_SETUP   2
+#define ATA_PIO1_STROBE 11
+#define ATA_PIO1_HOLD    4
+#define ATA_PIO0_SETUP   4
+#define ATA_PIO0_STROBE 19
+#define ATA_PIO0_HOLD    4
+
+static int e100_dma_check (ide_drive_t *drive);
+static int e100_dma_begin (ide_drive_t *drive);
+static int e100_dma_end (ide_drive_t *drive);
+static int e100_dma_read (ide_drive_t *drive);
+static int e100_dma_write (ide_drive_t *drive);
+static void e100_ide_input_data (ide_drive_t *drive, void *, unsigned int);
+static void e100_ide_output_data (ide_drive_t *drive, void *, unsigned int);
+static void e100_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
+static void e100_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
+static int e100_dma_off (ide_drive_t *drive);
+static int e100_dma_verbose (ide_drive_t *drive);
+
+
+/*
+ * good_dma_drives() lists the model names (from "hdparm -i")
+ * of drives which do not support mword2 DMA but which are
+ * known to work fine with this interface under Linux.
+ */
+
+const char *good_dma_drives[] = {"Micropolis 2112A",
+                                "CONNER CTMA 4000",
+                                "CONNER CTT8000-A",
+                                NULL};
+
+static void tune_e100_ide(ide_drive_t *drive, byte pio)
+{
+       pio = 4;
+       /* pio = ide_get_best_pio_mode(drive, pio, 4, NULL); */
+
+       /* set pio mode! */
+
+       switch(pio) {
+               case 0:
+                       *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable,     1 ) |
+                                         IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
+                                         IO_FIELD( R_ATA_CONFIG, dma_hold,   ATA_DMA2_HOLD ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_setup,  ATA_PIO0_SETUP ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO0_STROBE ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_hold,   ATA_PIO0_HOLD ) );
+                       break;
+               case 1:
+                       *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable,     1 ) |
+                                         IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
+                                         IO_FIELD( R_ATA_CONFIG, dma_hold,   ATA_DMA2_HOLD ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_setup,  ATA_PIO1_SETUP ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO1_STROBE ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_hold,   ATA_PIO1_HOLD ) );
+                       break;
+               case 2:
+                       *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable,     1 ) |
+                                         IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
+                                         IO_FIELD( R_ATA_CONFIG, dma_hold,   ATA_DMA2_HOLD ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_setup,  ATA_PIO2_SETUP ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO2_STROBE ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_hold,   ATA_PIO2_HOLD ) );
+                       break;
+               case 3:
+                       *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable,     1 ) |
+                                         IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
+                                         IO_FIELD( R_ATA_CONFIG, dma_hold,   ATA_DMA2_HOLD ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_setup,  ATA_PIO3_SETUP ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO3_STROBE ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_hold,   ATA_PIO3_HOLD ) );
+                       break;
+               case 4:
+                       *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable,     1 ) |
+                                         IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
+                                         IO_FIELD( R_ATA_CONFIG, dma_hold,   ATA_DMA2_HOLD ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_setup,  ATA_PIO4_SETUP ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO4_STROBE ) |
+                                         IO_FIELD( R_ATA_CONFIG, pio_hold,   ATA_PIO4_HOLD ) );
+                       break;
+       }
+}
+
+void __init
+init_e100_ide (void)
+{
+       volatile unsigned int dummy;
+       int h;
+
+       printk("ide: ETRAX 100LX built-in ATA DMA controller\n");
+
+       /* first fill in some stuff in the ide_hwifs fields */
+
+       for(h = 0; h < MAX_HWIFS; h++) {
+               ide_hwif_t *hwif = &ide_hwifs[h];
+               hwif->mmio = 2;
+               hwif->chipset = ide_etrax100;
+               hwif->tuneproc = &tune_e100_ide;
+                hwif->ata_input_data = &e100_ide_input_data;
+                hwif->ata_output_data = &e100_ide_output_data;
+                hwif->atapi_input_bytes = &e100_atapi_input_bytes;
+                hwif->atapi_output_bytes = &e100_atapi_output_bytes;
+                hwif->ide_dma_check = &e100_dma_check;
+                hwif->ide_dma_end = &e100_dma_end;
+               hwif->ide_dma_write = &e100_dma_write;
+               hwif->ide_dma_read = &e100_dma_read;
+               hwif->ide_dma_begin = &e100_dma_begin;
+               hwif->OUTB = &etrax100_ide_outb;
+               hwif->OUTW = &etrax100_ide_outw;
+               hwif->OUTBSYNC = &etrax100_ide_outbsync;
+               hwif->INB = &etrax100_ide_inb;
+               hwif->INW = &etrax100_ide_inw;
+               hwif->ide_dma_off_quietly = &e100_dma_off;
+               hwif->ide_dma_verbose = &e100_dma_verbose;
+               hwif->sg_table =
+                 kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES, GFP_KERNEL);
+       }
+
+       /* actually reset and configure the etrax100 ide/ata interface */
+
+       *R_ATA_CTRL_DATA = 0;
+       *R_ATA_TRANSFER_CNT = 0;
+       *R_ATA_CONFIG = 0;
+
+       genconfig_shadow = (genconfig_shadow &
+                           ~IO_MASK(R_GEN_CONFIG, dma2) &
+                           ~IO_MASK(R_GEN_CONFIG, dma3) &
+                           ~IO_MASK(R_GEN_CONFIG, ata)) |
+               ( IO_STATE( R_GEN_CONFIG, dma3, ata    ) |
+                 IO_STATE( R_GEN_CONFIG, dma2, ata    ) |
+                 IO_STATE( R_GEN_CONFIG, ata,  select ) );
+
+       *R_GEN_CONFIG = genconfig_shadow;
+
+        /* pull the chosen /reset-line low */
+
+#ifdef CONFIG_ETRAX_IDE_G27_RESET
+        REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, 0);
+#endif
+#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET
+        REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, 0);
+#endif
+#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET
+        REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, 0);
+#endif
+#ifdef CONFIG_ETRAX_IDE_PB7_RESET
+       port_pb_dir_shadow = port_pb_dir_shadow |
+               IO_STATE(R_PORT_PB_DIR, dir7, output);
+       *R_PORT_PB_DIR = port_pb_dir_shadow;
+       REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, 7, 1);
+#endif
+
+       /* wait some */
+
+       udelay(25);
+
+       /* de-assert bus-reset */
+
+#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET
+       REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, 1);
+#endif
+#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET
+       REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, 1);
+#endif
+#ifdef CONFIG_ETRAX_IDE_G27_RESET
+       REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, 1);
+#endif
+
+       /* make a dummy read to set the ata controller in a proper state */
+       dummy = *R_ATA_STATUS_DATA;
+
+       *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable,     1 ) |
+                         IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) |
+                         IO_FIELD( R_ATA_CONFIG, dma_hold,   ATA_DMA2_HOLD ) |
+                         IO_FIELD( R_ATA_CONFIG, pio_setup,  ATA_PIO4_SETUP ) |
+                         IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO4_STROBE ) |
+                         IO_FIELD( R_ATA_CONFIG, pio_hold,   ATA_PIO4_HOLD ) );
+
+       *R_ATA_CTRL_DATA = ( IO_STATE( R_ATA_CTRL_DATA, rw,   read) |
+                            IO_FIELD( R_ATA_CTRL_DATA, addr, 1   ) );
+
+       while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); /* wait for busy flag*/
+
+       *R_IRQ_MASK0_SET = ( IO_STATE( R_IRQ_MASK0_SET, ata_irq0, set ) |
+                            IO_STATE( R_IRQ_MASK0_SET, ata_irq1, set ) |
+                            IO_STATE( R_IRQ_MASK0_SET, ata_irq2, set ) |
+                            IO_STATE( R_IRQ_MASK0_SET, ata_irq3, set ) );
+
+       printk("ide: waiting %d seconds for drives to regain consciousness\n",
+              CONFIG_ETRAX_IDE_DELAY);
+
+       h = jiffies + (CONFIG_ETRAX_IDE_DELAY * HZ);
+       while(time_before(jiffies, h)) /* nothing */ ;
+
+       /* reset the dma channels we will use */
+
+       RESET_DMA(ATA_TX_DMA_NBR);
+       RESET_DMA(ATA_RX_DMA_NBR);
+       WAIT_DMA(ATA_TX_DMA_NBR);
+       WAIT_DMA(ATA_RX_DMA_NBR);
+
+}
+
+static int e100_dma_off (ide_drive_t *drive)
+{
+       return 0;
+}
+
+static int e100_dma_verbose (ide_drive_t *drive)
+{
+       printk(", DMA(mode 2)");
+       return 0;
+}
+
+static etrax_dma_descr mydescr;
+
+/*
+ * The following routines are mainly used by the ATAPI drivers.
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd bytecount is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
+ */
+static void
+e100_atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
+{
+       ide_ioreg_t data_reg = IDE_DATA_REG;
+
+       D(printk("atapi_input_bytes, dreg 0x%x, buffer 0x%x, count %d\n",
+                data_reg, buffer, bytecount));
+
+       if(bytecount & 1) {
+               printk("warning, odd bytecount in cdrom_in_bytes = %d.\n", bytecount);
+               bytecount++; /* to round off */
+       }
+
+       /* make sure the DMA channel is available */
+       RESET_DMA(ATA_RX_DMA_NBR);
+       WAIT_DMA(ATA_RX_DMA_NBR);
+
+       /* setup DMA descriptor */
+
+       mydescr.sw_len = bytecount;
+       mydescr.ctrl   = d_eol;
+       mydescr.buf    = virt_to_phys(buffer);
+
+       /* start the dma channel */
+
+       *R_DMA_CH3_FIRST = virt_to_phys(&mydescr);
+       *R_DMA_CH3_CMD   = IO_STATE(R_DMA_CH3_CMD, cmd, start);
+
+       /* initiate a multi word dma read using PIO handshaking */
+
+       *R_ATA_TRANSFER_CNT = IO_FIELD(R_ATA_TRANSFER_CNT, count, bytecount >> 1);
+
+       *R_ATA_CTRL_DATA = data_reg |
+               IO_STATE(R_ATA_CTRL_DATA, rw,       read) |
+               IO_STATE(R_ATA_CTRL_DATA, src_dst,  dma) |
+               IO_STATE(R_ATA_CTRL_DATA, handsh,   pio) |
+               IO_STATE(R_ATA_CTRL_DATA, multi,    on) |
+               IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
+
+       /* wait for completion */
+
+       LED_DISK_READ(1);
+       WAIT_DMA(ATA_RX_DMA_NBR);
+       LED_DISK_READ(0);
+
+#if 0
+        /* old polled transfer code
+        * this should be moved into a new function that can do polled
+        * transfers if DMA is not available
+        */
+
+        /* initiate a multi word read */
+
+        *R_ATA_TRANSFER_CNT = wcount << 1;
+
+        *R_ATA_CTRL_DATA = data_reg |
+                IO_STATE(R_ATA_CTRL_DATA, rw,       read) |
+                IO_STATE(R_ATA_CTRL_DATA, src_dst,  register) |
+                IO_STATE(R_ATA_CTRL_DATA, handsh,   pio) |
+                IO_STATE(R_ATA_CTRL_DATA, multi,    on) |
+                IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
+
+        /* svinto has a latency until the busy bit actually is set */
+
+        nop(); nop();
+        nop(); nop();
+        nop(); nop();
+        nop(); nop();
+        nop(); nop();
+
+        /* unit should be busy during multi transfer */
+        while((status = *R_ATA_STATUS_DATA) & IO_MASK(R_ATA_STATUS_DATA, busy)) {
+                while(!(status & IO_MASK(R_ATA_STATUS_DATA, dav)))
+                        status = *R_ATA_STATUS_DATA;
+                *ptr++ = (unsigned short)(status & 0xffff);
+        }
+#endif
+}
+
+static void
+e100_atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
+{
+       ide_ioreg_t data_reg = IDE_DATA_REG;
+
+       D(printk("atapi_output_bytes, dreg 0x%x, buffer 0x%x, count %d\n",
+                data_reg, buffer, bytecount));
+
+       if(bytecount & 1) {
+               printk("odd bytecount %d in atapi_out_bytes!\n", bytecount);
+               bytecount++;
+       }
+
+       /* make sure the DMA channel is available */
+       RESET_DMA(ATA_TX_DMA_NBR);
+       WAIT_DMA(ATA_TX_DMA_NBR);
+
+       /* setup DMA descriptor */
+
+       mydescr.sw_len = bytecount;
+       mydescr.ctrl   = d_eol;
+       mydescr.buf    = virt_to_phys(buffer);
+
+       /* start the dma channel */
+
+       *R_DMA_CH2_FIRST = virt_to_phys(&mydescr);
+       *R_DMA_CH2_CMD   = IO_STATE(R_DMA_CH2_CMD, cmd, start);
+
+       /* initiate a multi word dma write using PIO handshaking */
+
+       *R_ATA_TRANSFER_CNT = IO_FIELD(R_ATA_TRANSFER_CNT, count, bytecount >> 1);
+
+       *R_ATA_CTRL_DATA = data_reg |
+               IO_STATE(R_ATA_CTRL_DATA, rw,       write) |
+               IO_STATE(R_ATA_CTRL_DATA, src_dst,  dma) |
+               IO_STATE(R_ATA_CTRL_DATA, handsh,   pio) |
+               IO_STATE(R_ATA_CTRL_DATA, multi,    on) |
+               IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
+
+       /* wait for completion */
+
+       LED_DISK_WRITE(1);
+       WAIT_DMA(ATA_TX_DMA_NBR);
+       LED_DISK_WRITE(0);
+
+#if 0
+        /* old polled write code - see comment in input_bytes */
+
+       /* wait for busy flag */
+        while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy));
+
+        /* initiate a multi word write */
+
+        *R_ATA_TRANSFER_CNT = bytecount >> 1;
+
+        ctrl = data_reg |
+                IO_STATE(R_ATA_CTRL_DATA, rw,       write) |
+                IO_STATE(R_ATA_CTRL_DATA, src_dst,  register) |
+                IO_STATE(R_ATA_CTRL_DATA, handsh,   pio) |
+                IO_STATE(R_ATA_CTRL_DATA, multi,    on) |
+                IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
+
+        LED_DISK_WRITE(1);
+
+        /* Etrax will set busy = 1 until the multi pio transfer has finished
+         * and tr_rdy = 1 after each successful word transfer.
+         * When the last byte has been transferred Etrax will first set tr_tdy = 1
+         * and then busy = 0 (not in the same cycle). If we read busy before it
+         * has been set to 0 we will think that we should transfer more bytes
+         * and then tr_rdy would be 0 forever. This is solved by checking busy
+         * in the inner loop.
+         */
+
+        do {
+                *R_ATA_CTRL_DATA = ctrl | *ptr++;
+                while(!(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy)) &&
+                      (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)));
+        } while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy));
+
+        LED_DISK_WRITE(0);
+#endif
+
+}
+
+/*
+ * This is used for most PIO data transfers *from* the IDE interface
+ */
+static void
+e100_ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
+{
+       e100_atapi_input_bytes(drive, buffer, wcount << 2);
+}
+
+/*
+ * This is used for most PIO data transfers *to* the IDE interface
+ */
+static void
+e100_ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
+{
+       e100_atapi_output_bytes(drive, buffer, wcount << 2);
+}
+
+/* we only have one DMA channel on the chip for ATA, so we can keep these statically */
+static etrax_dma_descr ata_descrs[MAX_DMA_DESCRS];
+static unsigned int ata_tot_size;
+
+/*
+ * e100_ide_build_dmatable() prepares a dma request.
+ * Returns 0 if all went okay, returns 1 otherwise.
+ */
+static int e100_ide_build_dmatable (ide_drive_t *drive)
+{
+       ide_hwif_t *hwif = HWIF(drive);
+       struct scatterlist* sg;
+       struct request *rq  = HWGROUP(drive)->rq;
+       unsigned long size, addr;
+       unsigned int count = 0;
+       int i = 0;
+
+       sg = hwif->sg_table;
+
+       ata_tot_size = 0;
+
+       if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE) {
+               u8 *virt_addr = rq->buffer;
+               int sector_count = rq->nr_sectors;
+               memset(&sg[0], 0, sizeof(*sg));
+               sg[0].page = virt_to_page(virt_addr);
+               sg[0].offset = offset_in_page(virt_addr);
+               sg[0].length =  sector_count  * SECTOR_SIZE;
+               hwif->sg_nents = i = 1;
+       }
+       else
+       {
+               hwif->sg_nents = i = blk_rq_map_sg(drive->queue, rq, hwif->sg_table);
+       }
+
+
+       while(i) {
+               /*
+                * Determine addr and size of next buffer area.  We assume that
+                * individual virtual buffers are always composed linearly in
+                * physical memory.  For example, we assume that any 8kB buffer
+                * is always composed of two adjacent physical 4kB pages rather
+                * than two possibly non-adjacent physical 4kB pages.
+                */
+               /* group sequential buffers into one large buffer */
+               addr = page_to_phys(sg->page) + sg->offset;
+               size = sg_dma_len(sg);
+               while (sg++, --i) {
+                       if ((addr + size) != page_to_phys(sg->page) + sg->offset)
+                               break;
+                       size += sg_dma_len(sg);
+               }
+
+               /* did we run out of descriptors? */
+
+               if(count >= MAX_DMA_DESCRS) {
+                       printk("%s: too few DMA descriptors\n", drive->name);
+                       return 1;
+               }
+
+               /* however, this case is more difficult - R_ATA_TRANSFER_CNT cannot be more
+                  than 65536 words per transfer, so in that case we need to either
+                  1) use a DMA interrupt to re-trigger R_ATA_TRANSFER_CNT and continue with
+                     the descriptors, or
+                  2) simply do the request here, and get dma_intr to only ide_end_request on
+                     those blocks that were actually set-up for transfer.
+               */
+
+               if(ata_tot_size + size > 131072) {
+                       printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, (int)size);
+                       return 1;
+               }
+
+               /* If size > 65536 it has to be splitted into new descriptors. Since we don't handle
+                   size > 131072 only one split is necessary */
+
+               if(size > 65536) {
+                       /* ok we want to do IO at addr, size bytes. set up a new descriptor entry */
+                        ata_descrs[count].sw_len = 0;  /* 0 means 65536, this is a 16-bit field */
+                        ata_descrs[count].ctrl = 0;
+                        ata_descrs[count].buf = addr;
+                        ata_descrs[count].next = virt_to_phys(&ata_descrs[count + 1]);
+                        count++;
+                        ata_tot_size += 65536;
+                        /* size and addr should refere to not handled data */
+                        size -= 65536;
+                        addr += 65536;
+                }
+               /* ok we want to do IO at addr, size bytes. set up a new descriptor entry */
+                if(size == 65536) {
+                       ata_descrs[count].sw_len = 0;  /* 0 means 65536, this is a 16-bit field */
+                } else {
+                       ata_descrs[count].sw_len = size;
+                }
+               ata_descrs[count].ctrl = 0;
+               ata_descrs[count].buf = addr;
+               ata_descrs[count].next = virt_to_phys(&ata_descrs[count + 1]);
+               count++;
+               ata_tot_size += size;
+       }
+
+       if (count) {
+               /* set the end-of-list flag on the last descriptor */
+               ata_descrs[count - 1].ctrl |= d_eol;
+               /* return and say all is ok */
+               return 0;
+       }
+
+       printk("%s: empty DMA table?\n", drive->name);
+       return 1;       /* let the PIO routines handle this weirdness */
+}
+
+static int config_drive_for_dma (ide_drive_t *drive)
+{
+        const char **list;
+        struct hd_driveid *id = drive->id;
+
+        if (id && (id->capability & 1)) {
+                /* Enable DMA on any drive that supports mword2 DMA */
+                if ((id->field_valid & 2) && (id->dma_mword & 0x404) == 0x404) {
+                        drive->using_dma = 1;
+                        return 0;               /* DMA enabled */
+                }
+
+                /* Consult the list of known "good" drives */
+                list = good_dma_drives;
+                while (*list) {
+                        if (!strcmp(*list++,id->model)) {
+                                drive->using_dma = 1;
+                                return 0;       /* DMA enabled */
+                        }
+                }
+        }
+        return 1;       /* DMA not enabled */
+}
+
+/*
+ * etrax_dma_intr() is the handler for disk read/write DMA interrupts
+ */
+static ide_startstop_t etrax_dma_intr (ide_drive_t *drive)
+{
+       int i, dma_stat;
+       byte stat;
+
+       LED_DISK_READ(0);
+       LED_DISK_WRITE(0);
+
+       dma_stat = HWIF(drive)->ide_dma_end(drive);
+       stat = HWIF(drive)->INB(IDE_STATUS_REG);                /* get drive status */
+       if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
+               if (!dma_stat) {
+                       struct request *rq;
+                       rq = HWGROUP(drive)->rq;
+                       for (i = rq->nr_sectors; i > 0;) {
+                               i -= rq->current_nr_sectors;
+                               DRIVER(drive)->end_request(drive, 1, rq->nr_sectors);
+                       }
+                       return ide_stopped;
+               }
+               printk("%s: bad DMA status\n", drive->name);
+       }
+       return DRIVER(drive)->error(drive, "dma_intr", stat);
+}
+
+/*
+ * Functions below initiates/aborts DMA read/write operations on a drive.
+ *
+ * The caller is assumed to have selected the drive and programmed the drive's
+ * sector address using CHS or LBA.  All that remains is to prepare for DMA
+ * and then issue the actual read/write DMA/PIO command to the drive.
+ *
+ * For ATAPI devices, we just prepare for DMA and return. The caller should
+ * then issue the packet command to the drive and call us again with
+ * ide_dma_begin afterwards.
+ *
+ * Returns 0 if all went well.
+ * Returns 1 if DMA read/write could not be started, in which case
+ * the caller should revert to PIO for the current request.
+ */
+
+static int e100_dma_check(ide_drive_t *drive)
+{
+       return config_drive_for_dma (drive);
+}
+
+static int e100_dma_end(ide_drive_t *drive)
+{
+       /* TODO: check if something went wrong with the DMA */
+       return 0;
+}
+
+static int e100_start_dma(ide_drive_t *drive, int atapi, int reading)
+{
+       if(reading) {
+
+               RESET_DMA(ATA_RX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
+               WAIT_DMA(ATA_RX_DMA_NBR);
+
+               /* set up the Etrax DMA descriptors */
+
+               if(e100_ide_build_dmatable (drive))
+                       return 1;
+
+               if(!atapi) {
+                       /* set the irq handler which will finish the request when DMA is done */
+
+                       ide_set_handler(drive, &etrax_dma_intr, WAIT_CMD, NULL);
+
+                       /* issue cmd to drive */
+                        if ((HWGROUP(drive)->rq->cmd == IDE_DRIVE_TASKFILE) &&
+                           (drive->addressing == 1)) {
+                               ide_task_t *args = HWGROUP(drive)->rq->special;
+                               etrax100_ide_outb(args->tfRegister[IDE_COMMAND_OFFSET], IDE_COMMAND_REG);
+                       } else if (drive->addressing) {
+                               etrax100_ide_outb(WIN_READDMA_EXT, IDE_COMMAND_REG);
+                       } else {
+                               etrax100_ide_outb(WIN_READDMA, IDE_COMMAND_REG);
+                       }
+               }
+
+               /* begin DMA */
+
+               /* need to do this before RX DMA due to a chip bug
+                * it is enough to just flush the part of the cache that
+                * corresponds to the buffers we start, but since HD transfers
+                * usually are more than 8 kB, it is easier to optimize for the
+                * normal case and just flush the entire cache. its the only
+                * way to be sure! (OB movie quote)
+                */
+               flush_etrax_cache();
+               *R_DMA_CH3_FIRST = virt_to_phys(ata_descrs);
+               *R_DMA_CH3_CMD   = IO_STATE(R_DMA_CH3_CMD, cmd, start);
+
+               /* initiate a multi word dma read using DMA handshaking */
+
+               *R_ATA_TRANSFER_CNT =
+                       IO_FIELD(R_ATA_TRANSFER_CNT, count, ata_tot_size >> 1);
+
+               *R_ATA_CTRL_DATA =
+                       IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) |
+                       IO_STATE(R_ATA_CTRL_DATA, rw,       read) |
+                       IO_STATE(R_ATA_CTRL_DATA, src_dst,  dma)  |
+                       IO_STATE(R_ATA_CTRL_DATA, handsh,   dma)  |
+                       IO_STATE(R_ATA_CTRL_DATA, multi,    on)   |
+                       IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
+
+               LED_DISK_READ(1);
+
+               D(printk("dma read of %d bytes.\n", ata_tot_size));
+
+       } else {
+               /* writing */
+
+               RESET_DMA(ATA_TX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
+               WAIT_DMA(ATA_TX_DMA_NBR);
+
+               /* set up the Etrax DMA descriptors */
+
+               if(e100_ide_build_dmatable (drive))
+                       return 1;
+
+               if(!atapi) {
+                       /* set the irq handler which will finish the request when DMA is done */
+
+                       ide_set_handler(drive, &etrax_dma_intr, WAIT_CMD, NULL);
+
+                       /* issue cmd to drive */
+                       if ((HWGROUP(drive)->rq->cmd == IDE_DRIVE_TASKFILE) &&
+                           (drive->addressing == 1)) {
+                               ide_task_t *args = HWGROUP(drive)->rq->special;
+                               etrax100_ide_outb(args->tfRegister[IDE_COMMAND_OFFSET], IDE_COMMAND_REG);
+                       } else if (drive->addressing) {
+                               etrax100_ide_outb(WIN_WRITEDMA_EXT, IDE_COMMAND_REG);
+                       } else {
+                               etrax100_ide_outb(WIN_WRITEDMA, IDE_COMMAND_REG);
+                       }
+               }
+
+               /* begin DMA */
+
+               *R_DMA_CH2_FIRST = virt_to_phys(ata_descrs);
+               *R_DMA_CH2_CMD   = IO_STATE(R_DMA_CH2_CMD, cmd, start);
+
+               /* initiate a multi word dma write using DMA handshaking */
+
+               *R_ATA_TRANSFER_CNT =
+                       IO_FIELD(R_ATA_TRANSFER_CNT, count, ata_tot_size >> 1);
+
+               *R_ATA_CTRL_DATA =
+                       IO_FIELD(R_ATA_CTRL_DATA, data,     IDE_DATA_REG) |
+                       IO_STATE(R_ATA_CTRL_DATA, rw,       write) |
+                       IO_STATE(R_ATA_CTRL_DATA, src_dst,  dma) |
+                       IO_STATE(R_ATA_CTRL_DATA, handsh,   dma) |
+                       IO_STATE(R_ATA_CTRL_DATA, multi,    on) |
+                       IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
+
+               LED_DISK_WRITE(1);
+
+               D(printk("dma write of %d bytes.\n", ata_tot_size));
+       }
+       return 0;
+}
+
+static int e100_dma_write(ide_drive_t *drive)
+{
+       e100_read_command = 0;
+       /* ATAPI-devices (not disks) first call ide_dma_read/write to set the direction
+        * then they call ide_dma_begin after they have issued the appropriate drive command
+        * themselves to actually start the chipset DMA. so we just return here if we're
+        * not a diskdrive.
+        */
+       if (drive->media != ide_disk)
+                return 0;
+       return e100_start_dma(drive, 0, 0);
+}
+
+static int e100_dma_read(ide_drive_t *drive)
+{
+       e100_read_command = 1;
+       /* ATAPI-devices (not disks) first call ide_dma_read/write to set the direction
+        * then they call ide_dma_begin after they have issued the appropriate drive command
+        * themselves to actually start the chipset DMA. so we just return here if we're
+        * not a diskdrive.
+        */
+       if (drive->media != ide_disk)
+                return 0;
+       return e100_start_dma(drive, 0, 1);
+}
+
+static int e100_dma_begin(ide_drive_t *drive)
+{
+       /* begin DMA, used by ATAPI devices which want to issue the
+        * appropriate IDE command themselves.
+        *
+        * they have already called ide_dma_read/write to set the
+        * static reading flag, now they call ide_dma_begin to do
+        * the real stuff. we tell our code below not to issue
+        * any IDE commands itself and jump into it.
+        */
+        return e100_start_dma(drive, 1, e100_read_command);
+}
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
new file mode 100644 (file)
index 0000000..6ded633
--- /dev/null
@@ -0,0 +1,104 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/sched.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/pm.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+
+#include <asm/semaphore.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+#include <asm/delay.h>
+#include <asm/irq.h>
+#include <asm/pgtable.h>
+#include <asm/fasttimer.h>
+
+extern void dump_thread(struct pt_regs *, struct user *);
+extern unsigned long get_cmos_time(void);
+extern void __Udiv(void);
+extern void __Umod(void);
+extern void __Div(void);
+extern void __Mod(void);
+extern void __ashrdi3(void);
+extern void iounmap(void *addr);
+
+/* Platform dependent support */
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(get_cmos_time);
+EXPORT_SYMBOL(loops_per_usec);
+
+/* String functions */
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strncpy);
+
+/* Math functions */
+EXPORT_SYMBOL(__Udiv);
+EXPORT_SYMBOL(__Umod);
+EXPORT_SYMBOL(__Div);
+EXPORT_SYMBOL(__Mod);
+EXPORT_SYMBOL(__ashrdi3);
+
+/* Memory functions */
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+
+/* Semaphore functions */
+EXPORT_SYMBOL(__up);
+EXPORT_SYMBOL(__down);
+EXPORT_SYMBOL(__down_interruptible);
+EXPORT_SYMBOL(__down_trylock);
+
+/* Export shadow registers for the CPU I/O pins */
+EXPORT_SYMBOL(genconfig_shadow);
+EXPORT_SYMBOL(port_pa_data_shadow);
+EXPORT_SYMBOL(port_pa_dir_shadow);
+EXPORT_SYMBOL(port_pb_data_shadow);
+EXPORT_SYMBOL(port_pb_dir_shadow);
+EXPORT_SYMBOL(port_pb_config_shadow);
+EXPORT_SYMBOL(port_g_data_shadow);
+
+/* Userspace access functions */
+EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(__copy_user);
+
+/* Cache flush functions */
+EXPORT_SYMBOL(flush_etrax_cache);
+EXPORT_SYMBOL(prepare_rx_descriptor);
+
+#undef memcpy
+#undef memset
+extern void * memset(void *, int, __kernel_size_t);
+extern void * memcpy(void *, const void *, __kernel_size_t);
+EXPORT_SYMBOL_NOVERS(memcpy);
+EXPORT_SYMBOL_NOVERS(memset);
+
+#ifdef CONFIG_ETRAX_FAST_TIMER
+/* Fast timer functions */
+EXPORT_SYMBOL(fast_timer_list);
+EXPORT_SYMBOL(start_one_shot_timer);
+EXPORT_SYMBOL(del_fast_timer);
+EXPORT_SYMBOL(schedule_usleep);
+#endif
+
diff --git a/arch/i386/mach-generic/es7000.c b/arch/i386/mach-generic/es7000.c
new file mode 100644 (file)
index 0000000..48d3ec3
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * APIC driver for the Unisys ES7000 chipset.
+ */
+#define APIC_DEFINITION 1
+#include <linux/config.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/genapic.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <asm/mach-es7000/mach_apicdef.h>
+#include <asm/mach-es7000/mach_apic.h>
+#include <asm/mach-es7000/mach_ipi.h>
+#include <asm/mach-es7000/mach_mpparse.h>
+#include <asm/mach-es7000/mach_wakecpu.h>
+
+static __init int probe_es7000(void)
+{
+       /* probed later in mptable/ACPI hooks */
+       return 0;
+}
+
+struct genapic apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/ia64/configs/sim_defconfig b/arch/ia64/configs/sim_defconfig
new file mode 100644 (file)
index 0000000..5744b59
--- /dev/null
@@ -0,0 +1,535 @@
+#
+# Automatically generated make config: don't edit
+#
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+# CONFIG_CLEAN_COMPILE is not set
+# CONFIG_STANDALONE is not set
+CONFIG_BROKEN=y
+CONFIG_BROKEN_ON_SMP=y
+
+#
+# General setup
+#
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_LOG_BUF_SHIFT=16
+# CONFIG_HOTPLUG is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_OBSOLETE_MODPARM=y
+CONFIG_MODVERSIONS=y
+CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
+
+#
+# Processor type and features
+#
+CONFIG_IA64=y
+CONFIG_64BIT=y
+CONFIG_MMU=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_TIME_INTERPOLATION=y
+CONFIG_EFI=y
+# CONFIG_IA64_GENERIC is not set
+# CONFIG_IA64_DIG is not set
+# CONFIG_IA64_HP_ZX1 is not set
+# CONFIG_IA64_SGI_SN2 is not set
+CONFIG_IA64_HP_SIM=y
+# CONFIG_ITANIUM is not set
+CONFIG_MCKINLEY=y
+# CONFIG_IA64_PAGE_SIZE_4KB is not set
+# CONFIG_IA64_PAGE_SIZE_8KB is not set
+# CONFIG_IA64_PAGE_SIZE_16KB is not set
+CONFIG_IA64_PAGE_SIZE_64KB=y
+CONFIG_IA64_L1_CACHE_SHIFT=7
+# CONFIG_MCKINLEY_ASTEP_SPECIFIC is not set
+# CONFIG_VIRTUAL_MEM_MAP is not set
+# CONFIG_IA64_CYCLONE is not set
+CONFIG_FORCE_MAX_ZONEORDER=18
+CONFIG_SMP=y
+CONFIG_NR_CPUS=64
+CONFIG_PREEMPT=y
+CONFIG_HAVE_DEC_LOCK=y
+CONFIG_IA32_SUPPORT=y
+CONFIG_COMPAT=y
+# CONFIG_PERFMON is not set
+CONFIG_IA64_PALINFO=m
+
+#
+# Firmware Drivers
+#
+CONFIG_EFI_VARS=y
+# CONFIG_SMBIOS is not set
+CONFIG_BINFMT_ELF=y
+CONFIG_BINFMT_MISC=y
+
+#
+# Power management and ACPI
+#
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=4096
+# CONFIG_BLK_DEV_INITRD is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+
+#
+# SCSI Transport Attributes
+#
+CONFIG_SCSI_SPI_ATTRS=y
+# CONFIG_SCSI_FC_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_EATA_PIO is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+
+#
+# Networking support
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+# CONFIG_NETLINK_DEV is not set
+# CONFIG_UNIX is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+# CONFIG_IP_PNP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_FASTROUTE is not set
+# CONFIG_NET_HW_FLOWCONTROL is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_NETDEVICES is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input I/O drivers
+#
+# CONFIG_GAMEPORT is not set
+CONFIG_SOUND_GAMEPORT=y
+CONFIG_SERIO=y
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_CT82C710 is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_QIC02_TAPE is not set
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+# CONFIG_WATCHDOG is not set
+CONFIG_EFI_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_FTAPE is not set
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# I2C support
+#
+# CONFIG_I2C is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_MDA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_FAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+# CONFIG_DEVFS_FS is not set
+# CONFIG_DEVPTS_FS_XATTR is not set
+# CONFIG_TMPFS is not set
+CONFIG_HUGETLBFS=y
+CONFIG_HUGETLB_PAGE=y
+CONFIG_RAMFS=y
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+# CONFIG_NFS_V3 is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_NFS_DIRECTIO=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V4 is not set
+# CONFIG_NFSD_TCP is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_NEC98_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+
+#
+# HP Simulator drivers
+#
+CONFIG_HP_SIMETH=y
+CONFIG_HP_SIMSERIAL=y
+CONFIG_HP_SIMSERIAL_CONSOLE=y
+CONFIG_HP_SIMSCSI=y
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_IA64_GRANULE_16MB is not set
+CONFIG_IA64_GRANULE_64MB=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_IA64_PRINT_HAZARDS is not set
+# CONFIG_DISABLE_VHPT is not set
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_IA64_DEBUG_CMPXCHG is not set
+# CONFIG_IA64_DEBUG_IRQ is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_SYSVIPC_COMPAT=y
+
+#
+# Security options
+#
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
diff --git a/arch/ia64/dig/topology.c b/arch/ia64/dig/topology.c
new file mode 100644 (file)
index 0000000..8dc3137
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * arch/ia64/dig/topology.c
+ *     Popuate driverfs with topology information.
+ *     Derived entirely from i386/mach-default.c
+ *  Intel Corporation - Ashok Raj
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <asm/cpu.h>
+
+static DEFINE_PER_CPU(struct ia64_cpu, cpu_devices);
+
+/*
+ * First Pass: simply borrowed code for now. Later should hook into
+ * hotplug notification for node/cpu/memory as applicable
+ */
+
+static int arch_register_cpu(int num)
+{
+       struct node *parent = NULL;
+
+#ifdef CONFIG_NUMA
+       //parent = &node_devices[cpu_to_node(num)].node;
+#endif
+
+       return register_cpu(&per_cpu(cpu_devices,num).cpu, num, parent);
+}
+
+static int __init topology_init(void)
+{
+    int i;
+
+    for_each_cpu(i) {
+        arch_register_cpu(i);
+       }
+    return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/ia64/lib/bitop.c b/arch/ia64/lib/bitop.c
new file mode 100644 (file)
index 0000000..1c6ee49
--- /dev/null
@@ -0,0 +1,88 @@
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/intrinsics.h>
+#include <linux/module.h>
+#include <asm/bitops.h>
+
+/*
+ * Find next zero bit in a bitmap reasonably efficiently..
+ */
+
+int __find_next_zero_bit (void *addr, unsigned long size, unsigned long offset)
+{
+       unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
+       unsigned long result = offset & ~63UL;
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset &= 63UL;
+       if (offset) {
+               tmp = *(p++);
+               tmp |= ~0UL >> (64-offset);
+               if (size < 64)
+                       goto found_first;
+               if (~tmp)
+                       goto found_middle;
+               size -= 64;
+               result += 64;
+       }
+       while (size & ~63UL) {
+               if (~(tmp = *(p++)))
+                       goto found_middle;
+               result += 64;
+               size -= 64;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+found_first:
+       tmp |= ~0UL << size;
+       if (tmp == ~0UL)                /* any bits zero? */
+               return result + size;   /* nope */
+found_middle:
+       return result + ffz(tmp);
+}
+EXPORT_SYMBOL(__find_next_zero_bit);
+
+/*
+ * Find next bit in a bitmap reasonably efficiently..
+ */
+int __find_next_bit(const void *addr, unsigned long size, unsigned long offset)
+{
+       unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
+       unsigned long result = offset & ~63UL;
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset &= 63UL;
+       if (offset) {
+               tmp = *(p++);
+               tmp &= ~0UL << offset;
+               if (size < 64)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= 64;
+               result += 64;
+       }
+       while (size & ~63UL) {
+               if ((tmp = *(p++)))
+                       goto found_middle;
+               result += 64;
+               size -= 64;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+  found_first:
+       tmp &= ~0UL >> (64-size);
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size; /* Nope. */
+  found_middle:
+       return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(__find_next_bit);
diff --git a/arch/mips/au1000/common/cputable.c b/arch/mips/au1000/common/cputable.c
new file mode 100644 (file)
index 0000000..26744b3
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ *  arch/mips/au1000/common/cputable.c
+ *
+ *  Copyright (C) 2004 Dan Malek (dan@embeddededge.com)
+ *     Copied from PowerPC and updated for Alchemy Au1xxx processors.
+ *
+ *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/threads.h>
+#include <linux/init.h>
+#include <asm/mach-au1x00/au1000.h>
+
+struct cpu_spec* cur_cpu_spec[NR_CPUS];
+
+/* With some thought, we can probably use the mask to reduce the
+ * size of the table.
+ */
+struct cpu_spec        cpu_specs[] = {
+    { 0xffffffff, 0x00030100, "Au1000 DA", 1, 0 },
+    { 0xffffffff, 0x00030201, "Au1000 HA", 1, 0 },
+    { 0xffffffff, 0x00030202, "Au1000 HB", 1, 0 },
+    { 0xffffffff, 0x00030203, "Au1000 HC", 1, 1 },
+    { 0xffffffff, 0x00030204, "Au1000 HD", 1, 1 },
+    { 0xffffffff, 0x01030200, "Au1500 AB", 1, 1 },
+    { 0xffffffff, 0x01030201, "Au1500 AC", 0, 1 },
+    { 0xffffffff, 0x01030202, "Au1500 AD", 0, 1 },
+    { 0xffffffff, 0x02030200, "Au1100 AB", 1, 1 },
+    { 0xffffffff, 0x02030201, "Au1100 BA", 1, 1 },
+    { 0xffffffff, 0x02030202, "Au1100 BC", 1, 1 },
+    { 0xffffffff, 0x02030203, "Au1100 BD", 0, 1 },
+    { 0xffffffff, 0x02030204, "Au1100 BE", 0, 1 },
+    { 0xffffffff, 0x03030200, "Au1550 AA", 0, 1 },
+    { 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0 },
+};
+
+void
+set_cpuspec(void)
+{
+       struct  cpu_spec *sp;
+       u32     prid;
+
+       prid = read_c0_prid();
+       sp = cpu_specs;
+       while ((prid & sp->prid_mask) != sp->prid_value)
+               sp++;
+       cur_cpu_spec[0] = sp;
+}
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
new file mode 100644 (file)
index 0000000..ccfd5fe
--- /dev/null
@@ -0,0 +1,295 @@
+/*
+ * Kernel unwinding support
+ *
+ * (c) 2002-2004 Randolph Chung <tausq@debian.org>
+ *
+ * Derived partially from the IA64 implementation. The PA-RISC
+ * Runtime Architecture Document is also a useful reference to
+ * understand what is happening here
+ */
+
+/*
+ * J. David Anglin writes:
+ *
+ * "You have to adjust the current sp to that at the begining of the function.
+ * There can be up to two stack additions to allocate the frame in the
+ * prologue.  Similar things happen in the epilogue.  In the presence of
+ * interrupts, you have to be concerned about where you are in the function
+ * and what stack adjustments have taken place."
+ *
+ * For now these cases are not handled, but they should be!
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+
+#include <asm/unwind.h>
+
+/* #define DEBUG 1 */
+#ifdef DEBUG
+#define dbg(x...) printk(x)
+#else
+#define dbg(x...)
+#endif
+
+extern const struct unwind_table_entry __start___unwind[];
+extern const struct unwind_table_entry __stop___unwind[];
+
+static spinlock_t unwind_lock;
+/*
+ * the kernel unwind block is not dynamically allocated so that
+ * we can call unwind_init as early in the bootup process as 
+ * possible (before the slab allocator is initialized)
+ */
+static struct unwind_table kernel_unwind_table;
+static struct unwind_table *unwind_tables, *unwind_tables_end;
+
+
+static inline const struct unwind_table_entry *
+find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
+{
+       const struct unwind_table_entry *e = 0;
+       unsigned long lo, hi, mid;
+
+       addr -= table->base_addr;
+
+       for (lo = 0, hi = table->length; lo < hi; )
+       {
+               mid = (lo + hi) / 2;
+               e = &table->table[mid];
+               if (addr < e->region_start)
+                       hi = mid;
+               else if (addr > e->region_end)
+                       lo = mid + 1;
+               else
+                       break;
+       }
+
+       return e;
+}
+
+static inline const struct unwind_table_entry *
+find_unwind_entry(unsigned long addr)
+{
+       struct unwind_table *table = unwind_tables;
+       const struct unwind_table_entry *e = NULL;
+
+       if (addr >= kernel_unwind_table.start && 
+           addr <= kernel_unwind_table.end)
+               e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
+       else
+               for (; table; table = table->next)
+               {
+                       if (addr >= table->start && 
+                           addr <= table->end)
+                               e = find_unwind_entry_in_table(table, addr);
+                       if (e)
+                               break;
+               }
+
+       return e;
+}
+
+static void
+unwind_table_init(struct unwind_table *table, const char *name,
+                 unsigned long base_addr, unsigned long gp,
+                 const void *table_start, const void *table_end)
+{
+       const struct unwind_table_entry *start = table_start;
+       const struct unwind_table_entry *end = table_end - 1;
+
+       table->name = name;
+       table->base_addr = base_addr;
+       table->gp = gp;
+       table->start = base_addr + start->region_start;
+       table->end = base_addr + end->region_end;
+       table->table = (struct unwind_table_entry *)table_start;
+       table->length = end - start;
+       table->next = NULL;
+}
+
+void *
+unwind_table_add(const char *name, unsigned long base_addr, 
+                unsigned long gp,
+                 const void *start, const void *end)
+{
+       struct unwind_table *table;
+       unsigned long flags;
+
+       table = kmalloc(sizeof(struct unwind_table), GFP_USER);
+       if (table == NULL)
+               return 0;
+       unwind_table_init(table, name, base_addr, gp, start, end);
+       spin_lock_irqsave(&unwind_lock, flags);
+       if (unwind_tables)
+       {
+               unwind_tables_end->next = table;
+               unwind_tables_end = table;
+       }
+       else
+       {
+               unwind_tables = unwind_tables_end = table;
+       }
+       spin_unlock_irqrestore(&unwind_lock, flags);
+
+       return table;
+}
+
+/* Called from setup_arch to import the kernel unwind info */
+static int unwind_init(void)
+{
+       long start, stop;
+       register unsigned long gp __asm__ ("r27");
+
+       start = (long)&__start___unwind[0];
+       stop = (long)&__stop___unwind[0];
+
+       printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", 
+           start, stop,
+           (stop - start) / sizeof(struct unwind_table_entry));
+
+       unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
+                         gp, 
+                         &__start___unwind[0], &__stop___unwind[0]);
+#if 0
+       {
+               int i;
+               for (i = 0; i < 10; i++)
+               {
+                       printk("region 0x%x-0x%x\n", 
+                               __start___unwind[i].region_start, 
+                               __start___unwind[i].region_end);
+               }
+       }
+#endif
+       return 0;
+}
+
+static void unwind_frame_regs(struct unwind_frame_info *info)
+{
+       const struct unwind_table_entry *e;
+       unsigned long npc;
+       unsigned int insn;
+       long frame_size = 0;
+       int looking_for_rp, rpoffset = 0;
+
+       e = find_unwind_entry(info->ip);
+       if (!e) {
+               unsigned long sp;
+               extern char _stext[], _etext[];
+
+               dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
+
+               /* Since we are doing the unwinding blind, we don't know if
+                  we are adjusting the stack correctly or extracting the rp
+                  correctly. The rp is checked to see if it belongs to the
+                  kernel text section, if not we assume we don't have a 
+                  correct stack frame and we continue to unwind the stack.
+                  This is not quite correct, and will fail for loadable
+                  modules. */
+               sp = info->sp & ~63;
+               do {
+                       info->prev_sp = sp - 64;
+
+                       /* FIXME: what happens if we unwind too far so that 
+                          sp no longer falls in a mapped kernel page? */
+#ifndef __LP64__
+                       info->prev_ip = *(unsigned long *)(info->prev_sp - 20);
+#else
+                       info->prev_ip = *(unsigned long *)(info->prev_sp - 16);
+#endif
+
+                       sp = info->prev_sp;
+               } while (info->prev_ip < (unsigned long)_stext ||
+                        info->prev_ip > (unsigned long)_etext);
+       } else {
+
+               dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, Save_RP = %d size = %u\n",
+                               e->region_start, e->region_end, e->Save_SP, e->Save_RP, e->Total_frame_size);
+
+               looking_for_rp = e->Save_RP;
+
+               for (npc = e->region_start; 
+                    (frame_size < (e->Total_frame_size << 3) || looking_for_rp) && 
+                    npc < info->ip; 
+                    npc += 4) {
+
+                       insn = *(unsigned int *)npc;
+
+                       if ((insn & 0xffffc000) == 0x37de0000 ||
+                           (insn & 0xffe00000) == 0x6fc00000) {
+                               /* ldo X(sp), sp, or stwm X,D(sp) */
+                               frame_size += (insn & 0x1 ? -1 << 13 : 0) | 
+                                       ((insn & 0x3fff) >> 1);
+                       } else if ((insn & 0xffe00008) == 0x7ec00008) {
+                               /* std,ma X,D(sp) */
+                               frame_size += (insn & 0x1 ? -1 << 13 : 0) | 
+                                       (((insn >> 4) & 0x3ff) << 3);
+                       } else if (insn == 0x6bc23fd9) { 
+                               /* stw rp,-20(sp) */
+                               rpoffset = 20;
+                               looking_for_rp = 0;
+                       } else if (insn == 0x0fc212c1) {
+                               /* std rp,-16(sr0,sp) */
+                               rpoffset = 16;
+                               looking_for_rp = 0;
+                       }
+               }
+
+               info->prev_sp = info->sp - frame_size;
+               if (rpoffset)
+                       info->prev_ip = *(unsigned long *)(info->prev_sp - rpoffset);
+       }
+}
+
+void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 
+                      struct pt_regs *regs)
+{
+       memset(info, 0, sizeof(struct unwind_frame_info));
+       info->t = t;
+       info->sp = regs->ksp;
+       info->ip = regs->kpc;
+
+       dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", (int)t->pid, info->sp, info->ip);
+}
+
+void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
+{
+       struct pt_regs *regs = &t->thread.regs;
+       unwind_frame_init(info, t, regs);
+}
+
+int unwind_once(struct unwind_frame_info *next_frame)
+{
+       unwind_frame_regs(next_frame);
+
+       if (next_frame->prev_sp == 0 ||
+           next_frame->prev_ip == 0)
+               return -1;
+
+       next_frame->sp = next_frame->prev_sp;
+       next_frame->ip = next_frame->prev_ip;
+       next_frame->prev_sp = 0;
+       next_frame->prev_ip = 0;
+
+       dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", (int)next_frame->t->pid, next_frame->sp, next_frame->ip);
+
+       return 0;
+}
+
+int unwind_to_user(struct unwind_frame_info *info)
+{
+       int ret;
+       
+       do {
+               ret = unwind_once(info);
+       } while (!ret && !(info->ip & 3));
+
+       return ret;
+}
+
+module_init(unwind_init);
diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c
new file mode 100644 (file)
index 0000000..c859f11
--- /dev/null
@@ -0,0 +1,439 @@
+/*
+ *  PowerPC version derived from arch/arm/mm/consistent.c
+ *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
+ *
+ *  Copyright (C) 2000 Russell King
+ *
+ * Consistent memory allocators.  Used for DMA devices that want to
+ * share uncached memory with the processor core.  The function return
+ * is the virtual address and 'dma_handle' is the physical address.
+ * Mostly stolen from the ARM port, with some changes for PowerPC.
+ *                                             -- Dan
+ *
+ * Reorganized to get rid of the arch-specific consistent_* functions
+ * and provide non-coherent implementations for the DMA API. -Matt
+ *
+ * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
+ * implementation. This is pulled straight from ARM and barely
+ * modified. -Matt
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/pgalloc.h>
+#include <asm/prom.h>
+#include <asm/io.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/mmu.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+
+int map_page(unsigned long va, phys_addr_t pa, int flags);
+
+#include <asm/tlbflush.h>
+
+/*
+ * This address range defaults to a value that is safe for all
+ * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
+ * can be further configured for specific applications under
+ * the "Advanced Setup" menu. -Matt
+ */
+#define CONSISTENT_BASE        (CONFIG_CONSISTENT_START)
+#define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE)
+#define CONSISTENT_OFFSET(x)   (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
+
+/*
+ * This is the page table (2MB) covering uncached, DMA consistent allocations
+ */
+static pte_t *consistent_pte;
+static spinlock_t consistent_lock = SPIN_LOCK_UNLOCKED;
+
+/*
+ * VM region handling support.
+ *
+ * This should become something generic, handling VM region allocations for
+ * vmalloc and similar (ioremap, module space, etc).
+ *
+ * I envisage vmalloc()'s supporting vm_struct becoming:
+ *
+ *  struct vm_struct {
+ *    struct vm_region region;
+ *    unsigned long    flags;
+ *    struct page      **pages;
+ *    unsigned int     nr_pages;
+ *    unsigned long    phys_addr;
+ *  };
+ *
+ * get_vm_area() would then call vm_region_alloc with an appropriate
+ * struct vm_region head (eg):
+ *
+ *  struct vm_region vmalloc_head = {
+ *     .vm_list        = LIST_HEAD_INIT(vmalloc_head.vm_list),
+ *     .vm_start       = VMALLOC_START,
+ *     .vm_end         = VMALLOC_END,
+ *  };
+ *
+ * However, vmalloc_head.vm_start is variable (typically, it is dependent on
+ * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
+ * would have to initialise this each time prior to calling vm_region_alloc().
+ */
+struct vm_region {
+       struct list_head        vm_list;
+       unsigned long           vm_start;
+       unsigned long           vm_end;
+};
+
+static struct vm_region consistent_head = {
+       .vm_list        = LIST_HEAD_INIT(consistent_head.vm_list),
+       .vm_start       = CONSISTENT_BASE,
+       .vm_end         = CONSISTENT_END,
+};
+
+static struct vm_region *
+vm_region_alloc(struct vm_region *head, size_t size, int gfp)
+{
+       unsigned long addr = head->vm_start, end = head->vm_end - size;
+       unsigned long flags;
+       struct vm_region *c, *new;
+
+       new = kmalloc(sizeof(struct vm_region), gfp);
+       if (!new)
+               goto out;
+
+       spin_lock_irqsave(&consistent_lock, flags);
+
+       list_for_each_entry(c, &head->vm_list, vm_list) {
+               if ((addr + size) < addr)
+                       goto nospc;
+               if ((addr + size) <= c->vm_start)
+                       goto found;
+               addr = c->vm_end;
+               if (addr > end)
+                       goto nospc;
+       }
+
+ found:
+       /*
+        * Insert this entry _before_ the one we found.
+        */
+       list_add_tail(&new->vm_list, &c->vm_list);
+       new->vm_start = addr;
+       new->vm_end = addr + size;
+
+       spin_unlock_irqrestore(&consistent_lock, flags);
+       return new;
+
+ nospc:
+       spin_unlock_irqrestore(&consistent_lock, flags);
+       kfree(new);
+ out:
+       return NULL;
+}
+
+static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
+{
+       struct vm_region *c;
+
+       list_for_each_entry(c, &head->vm_list, vm_list) {
+               if (c->vm_start == addr)
+                       goto out;
+       }
+       c = NULL;
+ out:
+       return c;
+}
+
+/*
+ * Allocate DMA-coherent memory space and return both the kernel remapped
+ * virtual and bus address for that space.
+ */
+void *
+__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp)
+{
+       struct page *page;
+       struct vm_region *c;
+       unsigned long order;
+       u64 mask = 0x00ffffff, limit; /* ISA default */
+
+       if (!consistent_pte) {
+               printk(KERN_ERR "%s: not initialised\n", __func__);
+               dump_stack();
+               return NULL;
+       }
+
+       size = PAGE_ALIGN(size);
+       limit = (mask + 1) & ~mask;
+       if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) {
+               printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
+                      size, mask);
+               return NULL;
+       }
+
+       order = get_order(size);
+
+       if (mask != 0xffffffff)
+               gfp |= GFP_DMA;
+
+       page = alloc_pages(gfp, order);
+       if (!page)
+               goto no_page;
+
+       /*
+        * Invalidate any data that might be lurking in the
+        * kernel direct-mapped region for device DMA.
+        */
+       {
+               unsigned long kaddr = (unsigned long)page_address(page);
+               memset(page_address(page), 0, size);
+               flush_dcache_range(kaddr, kaddr + size);
+       }
+
+       /*
+        * Allocate a virtual address in the consistent mapping region.
+        */
+       c = vm_region_alloc(&consistent_head, size,
+                           gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
+       if (c) {
+               pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+               struct page *end = page + (1 << order);
+
+               /*
+                * Set the "dma handle"
+                */
+               *handle = page_to_bus(page);
+
+               do {
+                       BUG_ON(!pte_none(*pte));
+
+                       set_page_count(page, 1);
+                       SetPageReserved(page);
+                       set_pte(pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL)));
+                       page++;
+                       pte++;
+               } while (size -= PAGE_SIZE);
+
+               /*
+                * Free the otherwise unused pages.
+                */
+               while (page < end) {
+                       set_page_count(page, 1);
+                       __free_page(page);
+                       page++;
+               }
+
+               return (void *)c->vm_start;
+       }
+
+       if (page)
+               __free_pages(page, order);
+ no_page:
+       return NULL;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ */
+void __dma_free_coherent(size_t size, void *vaddr)
+{
+       struct vm_region *c;
+       unsigned long flags;
+       pte_t *ptep;
+
+       size = PAGE_ALIGN(size);
+
+       spin_lock_irqsave(&consistent_lock, flags);
+
+       c = vm_region_find(&consistent_head, (unsigned long)vaddr);
+       if (!c)
+               goto no_area;
+
+       if ((c->vm_end - c->vm_start) != size) {
+               printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+                      __func__, c->vm_end - c->vm_start, size);
+               dump_stack();
+               size = c->vm_end - c->vm_start;
+       }
+
+       ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+       do {
+               pte_t pte = ptep_get_and_clear(ptep);
+               unsigned long pfn;
+
+               ptep++;
+
+               if (!pte_none(pte) && pte_present(pte)) {
+                       pfn = pte_pfn(pte);
+
+                       if (pfn_valid(pfn)) {
+                               struct page *page = pfn_to_page(pfn);
+                               ClearPageReserved(page);
+
+                               __free_page(page);
+                               continue;
+                       }
+               }
+
+               printk(KERN_CRIT "%s: bad page in kernel page table\n",
+                      __func__);
+       } while (size -= PAGE_SIZE);
+
+       flush_tlb_kernel_range(c->vm_start, c->vm_end);
+
+       list_del(&c->vm_list);
+
+       spin_unlock_irqrestore(&consistent_lock, flags);
+
+       kfree(c);
+       return;
+
+ no_area:
+       spin_unlock_irqrestore(&consistent_lock, flags);
+       printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
+              __func__, vaddr);
+       dump_stack();
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+/*
+ * Initialise the consistent memory allocation.
+ */
+static int __init dma_alloc_init(void)
+{
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+       int ret = 0;
+
+       spin_lock(&init_mm.page_table_lock);
+
+       do {
+               pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
+               pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
+               if (!pmd) {
+                       printk(KERN_ERR "%s: no pmd tables\n", __func__);
+                       ret = -ENOMEM;
+                       break;
+               }
+               WARN_ON(!pmd_none(*pmd));
+
+               pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE);
+               if (!pte) {
+                       printk(KERN_ERR "%s: no pte tables\n", __func__);
+                       ret = -ENOMEM;
+                       break;
+               }
+
+               consistent_pte = pte;
+       } while (0);
+
+       spin_unlock(&init_mm.page_table_lock);
+
+       return ret;
+}
+
+core_initcall(dma_alloc_init);
+
+/*
+ * make an area consistent.
+ */
+void __dma_sync(void *vaddr, size_t size, int direction)
+{
+       unsigned long start = (unsigned long)vaddr;
+       unsigned long end   = start + size;
+
+       switch (direction) {
+       case DMA_NONE:
+               BUG();
+       case DMA_FROM_DEVICE:   /* invalidate only */
+               invalidate_dcache_range(start, end);
+               break;
+       case DMA_TO_DEVICE:             /* writeback only */
+               clean_dcache_range(start, end);
+               break;
+       case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+               flush_dcache_range(start, end);
+               break;
+       }
+}
+
+#ifdef CONFIG_HIGHMEM
+/*
+ * __dma_sync_page() implementation for systems using highmem.
+ * In this case, each page of a buffer must be kmapped/kunmapped
+ * in order to have a virtual address for __dma_sync(). This must
+ * not sleep so kmap_atmomic()/kunmap_atomic() are used.
+ *
+ * Note: yes, it is possible and correct to have a buffer extend
+ * beyond the first page.
+ */
+static inline void __dma_sync_page_highmem(struct page *page,
+               unsigned long offset, size_t size, int direction)
+{
+       size_t seg_size = min((size_t)PAGE_SIZE, size) - offset;
+       size_t cur_size = seg_size;
+       unsigned long flags, start, seg_offset = offset;
+       int nr_segs = PAGE_ALIGN(size + (PAGE_SIZE - offset))/PAGE_SIZE;
+       int seg_nr = 0;
+
+       local_irq_save(flags);
+
+       do {
+               start = (unsigned long)kmap_atomic(page + seg_nr,
+                               KM_PPC_SYNC_PAGE) + seg_offset;
+
+               /* Sync this buffer segment */
+               __dma_sync((void *)start, seg_size, direction);
+               kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
+               seg_nr++;
+
+               /* Calculate next buffer segment size */
+               seg_size = min((size_t)PAGE_SIZE, size - cur_size);
+
+               /* Add the segment size to our running total */
+               cur_size += seg_size;
+               seg_offset = 0;
+       } while (seg_nr < nr_segs);
+
+       local_irq_restore(flags);
+}
+#endif /* CONFIG_HIGHMEM */
+
+/*
+ * __dma_sync_page makes memory consistent. identical to __dma_sync, but
+ * takes a struct page instead of a virtual address
+ */
+void __dma_sync_page(struct page *page, unsigned long offset,
+       size_t size, int direction)
+{
+#ifdef CONFIG_HIGHMEM
+       __dma_sync_page_highmem(page, offset, size, direction);
+#else
+       unsigned long start = (unsigned long)page_address(page) + offset;
+       __dma_sync((void *)start, size, direction);
+#endif
+}
diff --git a/arch/ppc/kernel/vecemu.c b/arch/ppc/kernel/vecemu.c
new file mode 100644 (file)
index 0000000..1430ef5
--- /dev/null
@@ -0,0 +1,346 @@
+/*
+ * Routines to emulate some Altivec/VMX instructions, specifically
+ * those that can trap when given denormalized operands in Java mode.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+
+/* Functions in vector.S */
+extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
+extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b);
+extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
+extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c);
+extern void vrefp(vector128 *dst, vector128 *src);
+extern void vrsqrtefp(vector128 *dst, vector128 *src);
+extern void vexptep(vector128 *dst, vector128 *src);
+
+static unsigned int exp2s[8] = {
+       0x800000,
+       0x8b95c2,
+       0x9837f0,
+       0xa5fed7,
+       0xb504f3,
+       0xc5672a,
+       0xd744fd,
+       0xeac0c7
+};
+
+/*
+ * Computes an estimate of 2^x.  The `s' argument is the 32-bit
+ * single-precision floating-point representation of x.
+ */
+static unsigned int eexp2(unsigned int s)
+{
+       int exp, pwr;
+       unsigned int mant, frac;
+
+       /* extract exponent field from input */
+       exp = ((s >> 23) & 0xff) - 127;
+       if (exp > 7) {
+               /* check for NaN input */
+               if (exp == 128 && (s & 0x7fffff) != 0)
+                       return s | 0x400000;    /* return QNaN */
+               /* 2^-big = 0, 2^+big = +Inf */
+               return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */
+       }
+       if (exp < -23)
+               return 0x3f800000;      /* 1.0 */
+
+       /* convert to fixed point integer in 9.23 representation */
+       pwr = (s & 0x7fffff) | 0x800000;
+       if (exp > 0)
+               pwr <<= exp;
+       else
+               pwr >>= -exp;
+       if (s & 0x80000000)
+               pwr = -pwr;
+
+       /* extract integer part, which becomes exponent part of result */
+       exp = (pwr >> 23) + 126;
+       if (exp >= 254)
+               return 0x7f800000;
+       if (exp < -23)
+               return 0;
+
+       /* table lookup on top 3 bits of fraction to get mantissa */
+       mant = exp2s[(pwr >> 20) & 7];
+
+       /* linear interpolation using remaining 20 bits of fraction */
+       asm("mulhwu %0,%1,%2" : "=r" (frac)
+           : "r" (pwr << 12), "r" (0x172b83ff));
+       asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant));
+       mant += frac;
+
+       if (exp >= 0)
+               return mant + (exp << 23);
+
+       /* denormalized result */
+       exp = -exp;
+       mant += 1 << (exp - 1);
+       return mant >> exp;
+}
+
+/*
+ * Computes an estimate of log_2(x).  The `s' argument is the 32-bit
+ * single-precision floating-point representation of x.
+ */
+static unsigned int elog2(unsigned int s)
+{
+       int exp, mant, lz, frac;
+
+       exp = s & 0x7f800000;
+       mant = s & 0x7fffff;
+       if (exp == 0x7f800000) {        /* Inf or NaN */
+               if (mant != 0)
+                       s |= 0x400000;  /* turn NaN into QNaN */
+               return s;
+       }
+       if ((exp | mant) == 0)          /* +0 or -0 */
+               return 0xff800000;      /* return -Inf */
+
+       if (exp == 0) {
+               /* denormalized */
+               asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant));
+               mant <<= lz - 8;
+               exp = (-118 - lz) << 23;
+       } else {
+               mant |= 0x800000;
+               exp -= 127 << 23;
+       }
+
+       if (mant >= 0xb504f3) {                         /* 2^0.5 * 2^23 */
+               exp |= 0x400000;                        /* 0.5 * 2^23 */
+               asm("mulhwu %0,%1,%2" : "=r" (mant)
+                   : "r" (mant), "r" (0xb504f334));    /* 2^-0.5 * 2^32 */
+       }
+       if (mant >= 0x9837f0) {                         /* 2^0.25 * 2^23 */
+               exp |= 0x200000;                        /* 0.25 * 2^23 */
+               asm("mulhwu %0,%1,%2" : "=r" (mant)
+                   : "r" (mant), "r" (0xd744fccb));    /* 2^-0.25 * 2^32 */
+       }
+       if (mant >= 0x8b95c2) {                         /* 2^0.125 * 2^23 */
+               exp |= 0x100000;                        /* 0.125 * 2^23 */
+               asm("mulhwu %0,%1,%2" : "=r" (mant)
+                   : "r" (mant), "r" (0xeac0c6e8));    /* 2^-0.125 * 2^32 */
+       }
+       if (mant > 0x800000) {                          /* 1.0 * 2^23 */
+               /* calculate (mant - 1) * 1.381097463 */
+               /* 1.381097463 == 0.125 / (2^0.125 - 1) */
+               asm("mulhwu %0,%1,%2" : "=r" (frac)
+                   : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a));
+               exp += frac;
+       }
+       s = exp & 0x80000000;
+       if (exp != 0) {
+               if (s)
+                       exp = -exp;
+               asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp));
+               lz = 8 - lz;
+               if (lz > 0)
+                       exp >>= lz;
+               else if (lz < 0)
+                       exp <<= -lz;
+               s += ((lz + 126) << 23) + exp;
+       }
+       return s;
+}
+
+#define VSCR_SAT       1
+
+static int ctsxs(unsigned int x, int scale, unsigned int *vscrp)
+{
+       int exp, mant;
+
+       exp = (x >> 23) & 0xff;
+       mant = x & 0x7fffff;
+       if (exp == 255 && mant != 0)
+               return 0;               /* NaN -> 0 */
+       exp = exp - 127 + scale;
+       if (exp < 0)
+               return 0;               /* round towards zero */
+       if (exp >= 31) {
+               /* saturate, unless the result would be -2^31 */
+               if (x + (scale << 23) != 0xcf000000)
+                       *vscrp |= VSCR_SAT;
+               return (x & 0x80000000)? 0x80000000: 0x7fffffff;
+       }
+       mant |= 0x800000;
+       mant = (mant << 7) >> (30 - exp);
+       return (x & 0x80000000)? -mant: mant;
+}
+
+static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp)
+{
+       int exp;
+       unsigned int mant;
+
+       exp = (x >> 23) & 0xff;
+       mant = x & 0x7fffff;
+       if (exp == 255 && mant != 0)
+               return 0;               /* NaN -> 0 */
+       exp = exp - 127 + scale;
+       if (exp < 0)
+               return 0;               /* round towards zero */
+       if (x & 0x80000000) {
+               /* negative => saturate to 0 */
+               *vscrp |= VSCR_SAT;
+               return 0;
+       }
+       if (exp >= 32) {
+               /* saturate */
+               *vscrp |= VSCR_SAT;
+               return 0xffffffff;
+       }
+       mant |= 0x800000;
+       mant = (mant << 8) >> (31 - exp);
+       return mant;
+}
+
+/* Round to floating integer, towards 0 */
+static unsigned int rfiz(unsigned int x)
+{
+       int exp;
+
+       exp = ((x >> 23) & 0xff) - 127;
+       if (exp == 128 && (x & 0x7fffff) != 0)
+               return x | 0x400000;    /* NaN -> make it a QNaN */
+       if (exp >= 23)
+               return x;               /* it's an integer already (or Inf) */
+       if (exp < 0)
+               return x & 0x80000000;  /* |x| < 1.0 rounds to 0 */
+       return x & ~(0x7fffff >> exp);
+}
+
+/* Round to floating integer, towards +/- Inf */
+static unsigned int rfii(unsigned int x)
+{
+       int exp, mask;
+
+       exp = ((x >> 23) & 0xff) - 127;
+       if (exp == 128 && (x & 0x7fffff) != 0)
+               return x | 0x400000;    /* NaN -> make it a QNaN */
+       if (exp >= 23)
+               return x;               /* it's an integer already (or Inf) */
+       if ((x & 0x7fffffff) == 0)
+               return x;               /* +/-0 -> +/-0 */
+       if (exp < 0)
+               /* 0 < |x| < 1.0 rounds to +/- 1.0 */
+               return (x & 0x80000000) | 0x3f800000;
+       mask = 0x7fffff >> exp;
+       /* mantissa overflows into exponent - that's OK,
+          it can't overflow into the sign bit */
+       return (x + mask) & ~mask;
+}
+
+/* Round to floating integer, to nearest */
+static unsigned int rfin(unsigned int x)
+{
+       int exp, half;
+
+       exp = ((x >> 23) & 0xff) - 127;
+       if (exp == 128 && (x & 0x7fffff) != 0)
+               return x | 0x400000;    /* NaN -> make it a QNaN */
+       if (exp >= 23)
+               return x;               /* it's an integer already (or Inf) */
+       if (exp < -1)
+               return x & 0x80000000;  /* |x| < 0.5 -> +/-0 */
+       if (exp == -1)
+               /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */
+               return (x & 0x80000000) | 0x3f800000;
+       half = 0x400000 >> exp;
+       /* add 0.5 to the magnitude and chop off the fraction bits */
+       return (x + half) & ~(0x7fffff >> exp);
+}
+
+int
+emulate_altivec(struct pt_regs *regs)
+{
+       unsigned int instr, i;
+       unsigned int va, vb, vc, vd;
+       vector128 *vrs;
+
+       if (get_user(instr, (unsigned int *) regs->nip))
+               return -EFAULT;
+       if ((instr >> 26) != 4)
+               return -EINVAL;         /* not an altivec instruction */
+       vd = (instr >> 21) & 0x1f;
+       va = (instr >> 16) & 0x1f;
+       vb = (instr >> 11) & 0x1f;
+       vc = (instr >> 6) & 0x1f;
+
+       vrs = current->thread.vr;
+       switch (instr & 0x3f) {
+       case 10:
+               switch (vc) {
+               case 0: /* vaddfp */
+                       vaddfp(&vrs[vd], &vrs[va], &vrs[vb]);
+                       break;
+               case 1: /* vsubfp */
+                       vsubfp(&vrs[vd], &vrs[va], &vrs[vb]);
+                       break;
+               case 4: /* vrefp */
+                       vrefp(&vrs[vd], &vrs[vb]);
+                       break;
+               case 5: /* vrsqrtefp */
+                       vrsqrtefp(&vrs[vd], &vrs[vb]);
+                       break;
+               case 6: /* vexptefp */
+                       for (i = 0; i < 4; ++i)
+                               vrs[vd].u[i] = eexp2(vrs[vb].u[i]);
+                       break;
+               case 7: /* vlogefp */
+                       for (i = 0; i < 4; ++i)
+                               vrs[vd].u[i] = elog2(vrs[vb].u[i]);
+                       break;
+               case 8:         /* vrfin */
+                       for (i = 0; i < 4; ++i)
+                               vrs[vd].u[i] = rfin(vrs[vb].u[i]);
+                       break;
+               case 9:         /* vrfiz */
+                       for (i = 0; i < 4; ++i)
+                               vrs[vd].u[i] = rfiz(vrs[vb].u[i]);
+                       break;
+               case 10:        /* vrfip */
+                       for (i = 0; i < 4; ++i) {
+                               u32 x = vrs[vb].u[i];
+                               x = (x & 0x80000000)? rfiz(x): rfii(x);
+                               vrs[vd].u[i] = x;
+                       }
+                       break;
+               case 11:        /* vrfim */
+                       for (i = 0; i < 4; ++i) {
+                               u32 x = vrs[vb].u[i];
+                               x = (x & 0x80000000)? rfii(x): rfiz(x);
+                               vrs[vd].u[i] = x;
+                       }
+                       break;
+               case 14:        /* vctuxs */
+                       for (i = 0; i < 4; ++i)
+                               vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
+                                               &current->thread.vscr.u[3]);
+                       break;
+               case 15:        /* vctsxs */
+                       for (i = 0; i < 4; ++i)
+                               vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
+                                               &current->thread.vscr.u[3]);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       case 46:        /* vmaddfp */
+               vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
+               break;
+       case 47:        /* vnmsubfp */
+               vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
diff --git a/arch/ppc/kernel/vector.S b/arch/ppc/kernel/vector.S
new file mode 100644 (file)
index 0000000..d8fe6b5
--- /dev/null
@@ -0,0 +1,217 @@
+#include <asm/ppc_asm.h>
+#include <asm/processor.h>
+
+/*
+ * The routines below are in assembler so we can closely control the
+ * usage of floating-point registers.  These routines must be called
+ * with preempt disabled.
+ */
+       .data
+fpzero:
+       .long   0
+fpone:
+       .long   0x3f800000      /* 1.0 in single-precision FP */
+fphalf:
+       .long   0x3f000000      /* 0.5 in single-precision FP */
+
+       .text
+/*
+ * Internal routine to enable floating point and set FPSCR to 0.
+ * Don't call it from C; it doesn't use the normal calling convention.
+ */
+fpenable:
+       mfmsr   r10
+       ori     r11,r10,MSR_FP
+       mtmsr   r11
+       isync
+       stfd    fr0,24(r1)
+       stfd    fr1,16(r1)
+       stfd    fr31,8(r1)
+       lis     r11,fpzero@ha
+       mffs    fr31
+       lfs     fr1,fpzero@l(r11)
+       mtfsf   0xff,fr1
+       blr
+
+fpdisable:
+       mtfsf   0xff,fr31
+       lfd     fr31,8(r1)
+       lfd     fr1,16(r1)
+       lfd     fr0,24(r1)
+       mtmsr   r10
+       isync
+       blr
+
+/*
+ * Vector add, floating point.
+ */
+       .globl  vaddfp
+vaddfp:
+       stwu    r1,-32(r1)
+       mflr    r0
+       stw     r0,36(r1)
+       bl      fpenable
+       li      r0,4
+       mtctr   r0
+       li      r6,0
+1:     lfsx    fr0,r4,r6
+       lfsx    fr1,r5,r6
+       fadds   fr0,fr0,fr1
+       stfsx   fr0,r3,r6
+       addi    r6,r6,4
+       bdnz    1b
+       bl      fpdisable
+       lwz     r0,36(r1)
+       mtlr    r0
+       addi    r1,r1,32
+       blr
+
+/*
+ * Vector subtract, floating point.
+ */
+       .globl  vsubfp
+vsubfp:
+       stwu    r1,-32(r1)
+       mflr    r0
+       stw     r0,36(r1)
+       bl      fpenable
+       li      r0,4
+       mtctr   r0
+       li      r6,0
+1:     lfsx    fr0,r4,r6
+       lfsx    fr1,r5,r6
+       fsubs   fr0,fr0,fr1
+       stfsx   fr0,r3,r6
+       addi    r6,r6,4
+       bdnz    1b
+       bl      fpdisable
+       lwz     r0,36(r1)
+       mtlr    r0
+       addi    r1,r1,32
+       blr
+
+/*
+ * Vector multiply and add, floating point.
+ */
+       .globl  vmaddfp
+vmaddfp:
+       stwu    r1,-48(r1)
+       mflr    r0
+       stw     r0,52(r1)
+       bl      fpenable
+       stfd    fr2,32(r1)
+       li      r0,4
+       mtctr   r0
+       li      r7,0
+1:     lfsx    fr0,r4,r7
+       lfsx    fr1,r5,r7
+       lfsx    fr2,r6,r7
+       fmadds  fr0,fr0,fr1,fr2
+       stfsx   fr0,r3,r7
+       addi    r7,r7,4
+       bdnz    1b
+       lfd     fr2,32(r1)
+       bl      fpdisable
+       lwz     r0,52(r1)
+       mtlr    r0
+       addi    r1,r1,48
+       blr
+
+/*
+ * Vector negative multiply and subtract, floating point.
+ */
+       .globl  vnmsubfp
+vnmsubfp:
+       stwu    r1,-48(r1)
+       mflr    r0
+       stw     r0,52(r1)
+       bl      fpenable
+       stfd    fr2,32(r1)
+       li      r0,4
+       mtctr   r0
+       li      r7,0
+1:     lfsx    fr0,r4,r7
+       lfsx    fr1,r5,r7
+       lfsx    fr2,r6,r7
+       fnmsubs fr0,fr0,fr1,fr2
+       stfsx   fr0,r3,r7
+       addi    r7,r7,4
+       bdnz    1b
+       lfd     fr2,32(r1)
+       bl      fpdisable
+       lwz     r0,52(r1)
+       mtlr    r0
+       addi    r1,r1,48
+       blr
+
+/*
+ * Vector reciprocal estimate.  We just compute 1.0/x.
+ * r3 -> destination, r4 -> source.
+ */
+       .globl  vrefp
+vrefp:
+       stwu    r1,-32(r1)
+       mflr    r0
+       stw     r0,36(r1)
+       bl      fpenable
+       lis     r9,fpone@ha
+       li      r0,4
+       lfs     fr1,fpone@l(r9)
+       mtctr   r0
+       li      r6,0
+1:     lfsx    fr0,r4,r6
+       fdivs   fr0,fr1,fr0
+       stfsx   fr0,r3,r6
+       addi    r6,r6,4
+       bdnz    1b
+       bl      fpdisable
+       lwz     r0,36(r1)
+       mtlr    r0
+       addi    r1,r1,32
+       blr
+
+/*
+ * Vector reciprocal square-root estimate, floating point.
+ * We use the frsqrte instruction for the initial estimate followed
+ * by 2 iterations of Newton-Raphson to get sufficient accuracy.
+ * r3 -> destination, r4 -> source.
+ */
+       .globl  vrsqrtefp
+vrsqrtefp:
+       stwu    r1,-48(r1)
+       mflr    r0
+       stw     r0,52(r1)
+       bl      fpenable
+       stfd    fr2,32(r1)
+       stfd    fr3,40(r1)
+       stfd    fr4,48(r1)
+       stfd    fr5,56(r1)
+       lis     r9,fpone@ha
+       lis     r8,fphalf@ha
+       li      r0,4
+       lfs     fr4,fpone@l(r9)
+       lfs     fr5,fphalf@l(r8)
+       mtctr   r0
+       li      r6,0
+1:     lfsx    fr0,r4,r6
+       frsqrte fr1,fr0         /* r = frsqrte(s) */
+       fmuls   fr3,fr1,fr0     /* r * s */
+       fmuls   fr2,fr1,fr5     /* r * 0.5 */
+       fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
+       fmadds  fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
+       fmuls   fr3,fr1,fr0     /* r * s */
+       fmuls   fr2,fr1,fr5     /* r * 0.5 */
+       fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
+       fmadds  fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
+       stfsx   fr1,r3,r6
+       addi    r6,r6,4
+       bdnz    1b
+       lfd     fr5,56(r1)
+       lfd     fr4,48(r1)
+       lfd     fr3,40(r1)
+       lfd     fr2,32(r1)
+       bl      fpdisable
+       lwz     r0,36(r1)
+       mtlr    r0
+       addi    r1,r1,32
+       blr
diff --git a/arch/ppc/platforms/4xx/bubinga.c b/arch/ppc/platforms/4xx/bubinga.c
new file mode 100644 (file)
index 0000000..3678abf
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * Support for IBM PPC 405EP evaluation board (Bubinga).
+ *
+ * Author: SAW (IBM), derived from walnut.c.
+ *         Maintained by MontaVista Software <source@mvista.com>
+ *
+ * 2003 (c) MontaVista Softare Inc.  This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/blkdev.h>
+#include <linux/pci.h>
+#include <linux/rtc.h>
+#include <linux/tty.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+
+#include <asm/system.h>
+#include <asm/pci-bridge.h>
+#include <asm/processor.h>
+#include <asm/machdep.h>
+#include <asm/page.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/todc.h>
+#include <asm/kgdb.h>
+#include <asm/ocp.h>
+#include <asm/ibm_ocp_pci.h>
+
+#include <platforms/4xx/ibm405ep.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+extern bd_t __res;
+
+void *bubinga_rtc_base;
+
+/* Some IRQs unique to the board
+ * Used by the generic 405 PCI setup functions in ppc4xx_pci.c
+ */
+int __init
+ppc405_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+{
+       static char pci_irq_table[][4] =
+           /*
+            *      PCI IDSEL/INTPIN->INTLINE
+            *      A       B       C       D
+            */
+       {
+               {28, 28, 28, 28},       /* IDSEL 1 - PCI slot 1 */
+               {29, 29, 29, 29},       /* IDSEL 2 - PCI slot 2 */
+               {30, 30, 30, 30},       /* IDSEL 3 - PCI slot 3 */
+               {31, 31, 31, 31},       /* IDSEL 4 - PCI slot 4 */
+       };
+
+       const long min_idsel = 1, max_idsel = 4, irqs_per_slot = 4;
+       return PCI_IRQ_TABLE_LOOKUP;
+};
+
+/* The serial clock for the chip is an internal clock determined by
+ * different clock speeds/dividers.
+ * Calculate the proper input baud rate and setup the serial driver.
+ */
+static void __init
+bubinga_early_serial_map(void)
+{
+       u32 uart_div;
+       int uart_clock;
+       struct uart_port port;
+
+         /* Calculate the serial clock input frequency
+          *
+          * The base baud is the PLL OUTA (provided in the board info
+          * structure) divided by the external UART Divisor, divided
+          * by 16.
+          */
+       uart_div = (mfdcr(DCRN_CPC0_UCR_BASE) & DCRN_CPC0_UCR_U0DIV);
+       uart_clock = __res.bi_pllouta_freq / uart_div;
+
+       /* Setup serial port access */
+       memset(&port, 0, sizeof(port));
+       port.membase = (void*)ACTING_UART0_IO_BASE;
+       port.irq = ACTING_UART0_INT;
+       port.uartclk = uart_clock;
+       port.regshift = 0;
+       port.iotype = SERIAL_IO_MEM;
+       port.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
+       port.line = 0;
+
+       if (early_serial_setup(&port) != 0) {
+               printk("Early serial init of port 0 failed\n");
+       }
+
+       port.membase = (void*)ACTING_UART1_IO_BASE;
+       port.irq = ACTING_UART1_INT;
+       port.line = 1;
+
+       if (early_serial_setup(&port) != 0) {
+               printk("Early serial init of port 1 failed\n");
+       }
+}
+
+void __init
+bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip)
+{
+
+       unsigned int bar_response, bar;
+       /*
+        * Expected PCI mapping:
+        *
+        *  PLB addr             PCI memory addr
+        *  ---------------------       ---------------------
+        *  0000'0000 - 7fff'ffff <---  0000'0000 - 7fff'ffff
+        *  8000'0000 - Bfff'ffff --->  8000'0000 - Bfff'ffff
+        *
+        *  PLB addr             PCI io addr
+        *  ---------------------       ---------------------
+        *  e800'0000 - e800'ffff --->  0000'0000 - 0001'0000
+        *
+        * The following code is simplified by assuming that the bootrom
+        * has been well behaved in following this mapping.
+        */
+
+#ifdef DEBUG
+       int i;
+
+       printk("ioremap PCLIO_BASE = 0x%x\n", pcip);
+       printk("PCI bridge regs before fixup \n");
+       for (i = 0; i <= 3; i++) {
+               printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].ma)));
+               printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].la)));
+               printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pcila)));
+               printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pciha)));
+       }
+       printk(" ptm1ms\t0x%x\n", in_le32(&(pcip->ptm1ms)));
+       printk(" ptm1la\t0x%x\n", in_le32(&(pcip->ptm1la)));
+       printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms)));
+       printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la)));
+
+#endif
+
+       /* added for IBM boot rom version 1.15 bios bar changes  -AK */
+
+       /* Disable region first */
+       out_le32((void *) &(pcip->pmm[0].ma), 0x00000000);
+       /* PLB starting addr, PCI: 0x80000000 */
+       out_le32((void *) &(pcip->pmm[0].la), 0x80000000);
+       /* PCI start addr, 0x80000000 */
+       out_le32((void *) &(pcip->pmm[0].pcila), PPC405_PCI_MEM_BASE);
+       /* 512MB range of PLB to PCI */
+       out_le32((void *) &(pcip->pmm[0].pciha), 0x00000000);
+       /* Enable no pre-fetch, enable region */
+       out_le32((void *) &(pcip->pmm[0].ma), ((0xffffffff -
+                                               (PPC405_PCI_UPPER_MEM -
+                                                PPC405_PCI_MEM_BASE)) | 0x01));
+
+       /* Disable region one */
+       out_le32((void *) &(pcip->pmm[1].ma), 0x00000000);
+       out_le32((void *) &(pcip->pmm[1].la), 0x00000000);
+       out_le32((void *) &(pcip->pmm[1].pcila), 0x00000000);
+       out_le32((void *) &(pcip->pmm[1].pciha), 0x00000000);
+       out_le32((void *) &(pcip->pmm[1].ma), 0x00000000);
+       out_le32((void *) &(pcip->ptm1ms), 0x00000001);
+
+       /* Disable region two */
+       out_le32((void *) &(pcip->pmm[2].ma), 0x00000000);
+       out_le32((void *) &(pcip->pmm[2].la), 0x00000000);
+       out_le32((void *) &(pcip->pmm[2].pcila), 0x00000000);
+       out_le32((void *) &(pcip->pmm[2].pciha), 0x00000000);
+       out_le32((void *) &(pcip->pmm[2].ma), 0x00000000);
+       out_le32((void *) &(pcip->ptm2ms), 0x00000000);
+       out_le32((void *) &(pcip->ptm2la), 0x00000000);
+
+       /* Zero config bars */
+       for (bar = PCI_BASE_ADDRESS_1; bar <= PCI_BASE_ADDRESS_2; bar += 4) {
+               early_write_config_dword(hose, hose->first_busno,
+                                        PCI_FUNC(hose->first_busno), bar,
+                                        0x00000000);
+               early_read_config_dword(hose, hose->first_busno,
+                                       PCI_FUNC(hose->first_busno), bar,
+                                       &bar_response);
+               DBG("BUS %d, device %d, Function %d bar 0x%8.8x is 0x%8.8x\n",
+                   hose->first_busno, PCI_SLOT(hose->first_busno),
+                   PCI_FUNC(hose->first_busno), bar, bar_response);
+       }
+       /* end work arround */
+
+#ifdef DEBUG
+       printk("PCI bridge regs after fixup \n");
+       for (i = 0; i <= 3; i++) {
+               printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].ma)));
+               printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].la)));
+               printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pcila)));
+               printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pciha)));
+       }
+       printk(" ptm1ms\t0x%x\n", in_le32(&(pcip->ptm1ms)));
+       printk(" ptm1la\t0x%x\n", in_le32(&(pcip->ptm1la)));
+       printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms)));
+       printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la)));
+
+#endif
+}
+
+void __init
+bubinga_setup_arch(void)
+{
+       ppc4xx_setup_arch();
+
+       ibm_ocp_set_emac(0, 1);
+
+        bubinga_early_serial_map();
+
+        /* RTC step for the evb405ep */
+        bubinga_rtc_base = (void *) BUBINGA_RTC_VADDR;
+        TODC_INIT(TODC_TYPE_DS1743, bubinga_rtc_base, bubinga_rtc_base,
+                  bubinga_rtc_base, 8);
+        /* Identify the system */
+        printk("IBM Bubinga port (MontaVista Software, Inc. <source@mvista.com>)\n");
+}
+
+void __init
+bubinga_map_io(void)
+{
+       ppc4xx_map_io();
+       io_block_mapping(BUBINGA_RTC_VADDR,
+                         BUBINGA_RTC_PADDR, BUBINGA_RTC_SIZE, _PAGE_IO);
+}
+
+void __init
+platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+             unsigned long r6, unsigned long r7)
+{
+       ppc4xx_init(r3, r4, r5, r6, r7);
+
+       ppc_md.setup_arch = bubinga_setup_arch;
+       ppc_md.setup_io_mappings = bubinga_map_io;
+
+#ifdef CONFIG_GEN_RTC
+       ppc_md.time_init = todc_time_init;
+       ppc_md.set_rtc_time = todc_set_rtc_time;
+       ppc_md.get_rtc_time = todc_get_rtc_time;
+       ppc_md.nvram_read_val = todc_direct_read_val;
+       ppc_md.nvram_write_val = todc_direct_write_val;
+#endif
+#ifdef CONFIG_KGDB
+       ppc_md.early_serial_map = bubinga_early_serial_map;
+#endif
+}
+
diff --git a/arch/ppc/platforms/4xx/bubinga.h b/arch/ppc/platforms/4xx/bubinga.h
new file mode 100644 (file)
index 0000000..b1df856
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Support for IBM PPC 405EP evaluation board (Bubinga).
+ *
+ * Author: SAW (IBM), derived from walnut.h.
+ *         Maintained by MontaVista Software <source@mvista.com>
+ *
+ * 2003 (c) MontaVista Softare Inc.  This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifdef __KERNEL__
+#ifndef __BUBINGA_H__
+#define __BUBINGA_H__
+
+/* 405EP */
+#include <platforms/4xx/ibm405ep.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * Data structure defining board information maintained by the boot
+ * ROM on IBM's evaluation board. An effort has been made to
+ * keep the field names consistent with the 8xx 'bd_t' board info
+ * structures.
+ */
+
+typedef struct board_info {
+        unsigned char    bi_s_version[4];       /* Version of this structure */
+        unsigned char    bi_r_version[30];      /* Version of the IBM ROM */
+        unsigned int     bi_memsize;            /* DRAM installed, in bytes */
+        unsigned char    bi_enetaddr[2][6];     /* Local Ethernet MAC address */        unsigned char    bi_pci_enetaddr[6];    /* PCI Ethernet MAC address */
+        unsigned int     bi_intfreq;            /* Processor speed, in Hz */
+        unsigned int     bi_busfreq;            /* PLB Bus speed, in Hz */
+        unsigned int     bi_pci_busfreq;        /* PCI Bus speed, in Hz */
+        unsigned int     bi_opb_busfreq;        /* OPB Bus speed, in Hz */
+        unsigned int     bi_pllouta_freq;       /* PLL OUTA speed, in Hz */
+} bd_t;
+
+/* Some 4xx parts use a different timebase frequency from the internal clock.
+*/
+#define bi_tbfreq bi_intfreq
+
+
+/* Memory map for the Bubinga board.
+ * Generic 4xx plus RTC.
+ */
+
+extern void *bubinga_rtc_base;
+#define BUBINGA_RTC_PADDR      ((uint)0xf0000000)
+#define BUBINGA_RTC_VADDR      BUBINGA_RTC_PADDR
+#define BUBINGA_RTC_SIZE       ((uint)8*1024)
+
+/* The UART clock is based off an internal clock -
+ * define BASE_BAUD based on the internal clock and divider(s).
+ * Since BASE_BAUD must be a constant, we will initialize it
+ * using clock/divider values which OpenBIOS initializes
+ * for typical configurations at various CPU speeds.
+ * The base baud is calculated as (FWDA / EXT UART DIV / 16)
+ */
+#define BASE_BAUD       0
+
+#define BUBINGA_FPGA_BASE      0xF0300000
+
+#define PPC4xx_MACHINE_NAME     "IBM Bubinga"
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __BUBINGA_H__ */
+#endif /* __KERNEL__ */
diff --git a/arch/ppc/platforms/4xx/ibm405ep.c b/arch/ppc/platforms/4xx/ibm405ep.c
new file mode 100644 (file)
index 0000000..fb48e82
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ * arch/ppc/platforms/ibm405ep.c
+ *
+ * Support for IBM PPC 405EP processors.
+ *
+ * Author: SAW (IBM), derived from ibmnp405l.c.
+ *         Maintained by MontaVista Software <source@mvista.com>
+ *
+ * 2003 (c) MontaVista Softare Inc.  This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/param.h>
+#include <linux/string.h>
+
+#include <asm/ibm4xx.h>
+#include <asm/ocp.h>
+
+#include <platforms/4xx/ibm405ep.h>
+
+static struct ocp_func_mal_data ibm405ep_mal0_def = {
+       .num_tx_chans   = 4,            /* Number of TX channels */
+       .num_rx_chans   = 2,            /* Number of RX channels */
+       .txeob_irq      = 11,           /* TX End Of Buffer IRQ  */
+       .rxeob_irq      = 12,           /* RX End Of Buffer IRQ  */
+       .txde_irq       = 13,           /* TX Descriptor Error IRQ */
+       .rxde_irq       = 14,           /* RX Descriptor Error IRQ */
+       .serr_irq       = 10,           /* MAL System Error IRQ    */
+};
+OCP_SYSFS_MAL_DATA()
+
+static struct ocp_func_emac_data ibm405ep_emac0_def = {
+       .rgmii_idx      = -1,           /* No RGMII */
+       .rgmii_mux      = -1,           /* No RGMII */
+       .zmii_idx       = -1,           /* ZMII device index */
+       .zmii_mux       = 0,            /* ZMII input of this EMAC */
+       .mal_idx        = 0,            /* MAL device index */
+       .mal_rx_chan    = 0,            /* MAL rx channel number */
+       .mal_tx_chan    = 0,            /* MAL tx channel number */
+       .wol_irq        = 9,            /* WOL interrupt number */
+       .mdio_idx       = 0,            /* MDIO via EMAC0 */
+       .tah_idx        = -1,           /* No TAH */
+};
+
+static struct ocp_func_emac_data ibm405ep_emac1_def = {
+       .rgmii_idx      = -1,           /* No RGMII */
+       .rgmii_mux      = -1,           /* No RGMII */
+       .zmii_idx       = -1,           /* ZMII device index */
+       .zmii_mux       = 0,            /* ZMII input of this EMAC */
+       .mal_idx        = 0,            /* MAL device index */
+       .mal_rx_chan    = 1,            /* MAL rx channel number */
+       .mal_tx_chan    = 2,            /* MAL tx channel number */
+       .wol_irq        = 9,            /* WOL interrupt number */
+       .mdio_idx       = 0,            /* MDIO via EMAC0 */
+       .tah_idx        = -1,           /* No TAH */
+};
+OCP_SYSFS_EMAC_DATA()
+
+static struct ocp_func_iic_data ibm405ep_iic0_def = {
+       .fast_mode      = 0,            /* Use standad mode (100Khz) */
+};
+OCP_SYSFS_IIC_DATA()
+
+struct ocp_def core_ocp[] = {
+       { .vendor       = OCP_VENDOR_IBM,
+         .function     = OCP_FUNC_OPB,
+         .index        = 0,
+         .paddr        = 0xEF600000,
+         .irq          = OCP_IRQ_NA,
+         .pm           = OCP_CPM_NA,
+       },
+       { .vendor       = OCP_VENDOR_IBM,
+         .function     = OCP_FUNC_16550,
+         .index        = 0,
+         .paddr        = UART0_IO_BASE,
+         .irq          = UART0_INT,
+         .pm           = IBM_CPM_UART0
+       },
+       { .vendor       = OCP_VENDOR_IBM,
+         .function     = OCP_FUNC_16550,
+         .index        = 1,
+         .paddr        = UART1_IO_BASE,
+         .irq          = UART1_INT,
+         .pm           = IBM_CPM_UART1
+       },
+       { .vendor       = OCP_VENDOR_IBM,
+         .function     = OCP_FUNC_IIC,
+         .paddr        = 0xEF600500,
+         .irq          = 2,
+         .pm           = IBM_CPM_IIC0,
+         .additions    = &ibm405ep_iic0_def,
+         .show         = &ocp_show_iic_data
+       },
+       { .vendor       = OCP_VENDOR_IBM,
+         .function     = OCP_FUNC_GPIO,
+         .paddr        = 0xEF600700,
+         .irq          = OCP_IRQ_NA,
+         .pm           = IBM_CPM_GPIO0
+       },
+       { .vendor       = OCP_VENDOR_IBM,
+         .function     = OCP_FUNC_MAL,
+         .paddr        = OCP_PADDR_NA,
+         .irq          = OCP_IRQ_NA,
+         .pm           = OCP_CPM_NA,
+         .additions    = &ibm405ep_mal0_def,
+         .show         = &ocp_show_mal_data
+       },
+       { .vendor       = OCP_VENDOR_IBM,
+         .function     = OCP_FUNC_EMAC,
+         .index        = 0,
+         .paddr        = EMAC0_BASE,
+         .irq          = 15,
+         .pm           = OCP_CPM_NA,
+         .additions    = &ibm405ep_emac0_def,
+         .show         = &ocp_show_emac_data
+       },
+       { .vendor       = OCP_VENDOR_IBM,
+         .function     = OCP_FUNC_EMAC,
+         .index        = 1,
+         .paddr        = 0xEF600900,
+         .irq          = 17,
+         .pm           = OCP_CPM_NA,
+         .additions    = &ibm405ep_emac1_def,
+         .show         = &ocp_show_emac_data
+       },
+       { .vendor       = OCP_VENDOR_INVALID
+       }
+};
diff --git a/arch/ppc/platforms/4xx/ibm405ep.h b/arch/ppc/platforms/4xx/ibm405ep.h
new file mode 100644 (file)
index 0000000..e051e3f
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * arch/ppc/platforms/4xx/ibm405ep.h
+ *
+ * IBM PPC 405EP processor defines.
+ *
+ * Author: SAW (IBM), derived from ibm405gp.h.
+ *         Maintained by MontaVista Software <source@mvista.com>
+ *
+ * 2003 (c) MontaVista Softare Inc.  This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASM_IBM405EP_H__
+#define __ASM_IBM405EP_H__
+
+#include <linux/config.h>
+
+/* ibm405.h at bottom of this file */
+
+/* PCI
+ * PCI Bridge config reg definitions
+ * see 17-19 of manual
+ */
+
+#define PPC405_PCI_CONFIG_ADDR 0xeec00000
+#define PPC405_PCI_CONFIG_DATA 0xeec00004
+
+#define PPC405_PCI_PHY_MEM_BASE        0x80000000      /* hose_a->pci_mem_offset */
+                                               /* setbat */
+#define PPC405_PCI_MEM_BASE    PPC405_PCI_PHY_MEM_BASE /* setbat */
+#define PPC405_PCI_PHY_IO_BASE 0xe8000000      /* setbat */
+#define PPC405_PCI_IO_BASE     PPC405_PCI_PHY_IO_BASE  /* setbat */
+
+#define PPC405_PCI_LOWER_MEM   0x80000000      /* hose_a->mem_space.start */
+#define PPC405_PCI_UPPER_MEM   0xBfffffff      /* hose_a->mem_space.end */
+#define PPC405_PCI_LOWER_IO    0x00000000      /* hose_a->io_space.start */
+#define PPC405_PCI_UPPER_IO    0x0000ffff      /* hose_a->io_space.end */
+
+#define PPC405_ISA_IO_BASE     PPC405_PCI_IO_BASE
+
+#define PPC4xx_PCI_IO_PADDR    ((uint)PPC405_PCI_PHY_IO_BASE)
+#define PPC4xx_PCI_IO_VADDR    PPC4xx_PCI_IO_PADDR
+#define PPC4xx_PCI_IO_SIZE     ((uint)64*1024)
+#define PPC4xx_PCI_CFG_PADDR   ((uint)PPC405_PCI_CONFIG_ADDR)
+#define PPC4xx_PCI_CFG_VADDR   PPC4xx_PCI_CFG_PADDR
+#define PPC4xx_PCI_CFG_SIZE    ((uint)4*1024)
+#define PPC4xx_PCI_LCFG_PADDR  ((uint)0xef400000)
+#define PPC4xx_PCI_LCFG_VADDR  PPC4xx_PCI_LCFG_PADDR
+#define PPC4xx_PCI_LCFG_SIZE   ((uint)4*1024)
+#define PPC4xx_ONB_IO_PADDR    ((uint)0xef600000)
+#define PPC4xx_ONB_IO_VADDR    PPC4xx_ONB_IO_PADDR
+#define PPC4xx_ONB_IO_SIZE     ((uint)4*1024)
+
+/* serial port defines */
+#define RS_TABLE_SIZE  2
+
+#define UART0_INT      0
+#define UART1_INT      1
+
+#define PCIL0_BASE     0xEF400000
+#define UART0_IO_BASE  0xEF600300
+#define UART1_IO_BASE  0xEF600400
+#define EMAC0_BASE     0xEF600800
+
+#define BD_EMAC_ADDR(e,i) bi_enetaddr[e][i]
+
+#if defined(CONFIG_UART0_TTYS0)
+#define ACTING_UART0_IO_BASE   UART0_IO_BASE
+#define ACTING_UART1_IO_BASE   UART1_IO_BASE
+#define ACTING_UART0_INT       UART0_INT
+#define ACTING_UART1_INT       UART1_INT
+#else
+#define ACTING_UART0_IO_BASE   UART1_IO_BASE
+#define ACTING_UART1_IO_BASE   UART0_IO_BASE
+#define ACTING_UART0_INT       UART1_INT
+#define ACTING_UART1_INT       UART0_INT
+#endif
+
+#define STD_UART_OP(num)                                       \
+       { 0, BASE_BAUD, 0, ACTING_UART##num##_INT,                      \
+               (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST),        \
+               iomem_base: (u8 *)ACTING_UART##num##_IO_BASE,           \
+               io_type: SERIAL_IO_MEM},
+
+#define SERIAL_DEBUG_IO_BASE   ACTING_UART0_IO_BASE
+#define SERIAL_PORT_DFNS       \
+       STD_UART_OP(0)          \
+       STD_UART_OP(1)
+
+/* DCR defines */
+#define DCRN_CPMSR_BASE         0x0BA
+#define DCRN_CPMFR_BASE         0x0B9
+
+#define DCRN_CPC0_PLLMR0_BASE   0x0F0
+#define DCRN_CPC0_BOOT_BASE     0x0F1
+#define DCRN_CPC0_CR1_BASE      0x0F2
+#define DCRN_CPC0_EPRCSR_BASE   0x0F3
+#define DCRN_CPC0_PLLMR1_BASE   0x0F4
+#define DCRN_CPC0_UCR_BASE      0x0F5
+#define DCRN_CPC0_UCR_U0DIV     0x07F
+#define DCRN_CPC0_SRR_BASE      0x0F6
+#define DCRN_CPC0_JTAGID_BASE   0x0F7
+#define DCRN_CPC0_SPARE_BASE    0x0F8
+#define DCRN_CPC0_PCI_BASE      0x0F9
+
+
+#define IBM_CPM_GPT             0x80000000      /* GPT interface */
+#define IBM_CPM_PCI             0x40000000      /* PCI bridge */
+#define IBM_CPM_UIC             0x00010000      /* Universal Int Controller */
+#define IBM_CPM_CPU             0x00008000      /* processor core */
+#define IBM_CPM_EBC             0x00002000      /* EBC controller */
+#define IBM_CPM_SDRAM0          0x00004000      /* SDRAM memory controller */
+#define IBM_CPM_GPIO0           0x00001000      /* General Purpose IO */
+#define IBM_CPM_TMRCLK          0x00000400      /* CPU timers */
+#define IBM_CPM_PLB             0x00000100      /* PLB bus arbiter */
+#define IBM_CPM_OPB             0x00000080      /* PLB to OPB bridge */
+#define IBM_CPM_DMA             0x00000040      /* DMA controller */
+#define IBM_CPM_IIC0            0x00000010      /* IIC interface */
+#define IBM_CPM_UART1           0x00000002      /* serial port 0 */
+#define IBM_CPM_UART0           0x00000001      /* serial port 1 */
+#define DFLT_IBM4xx_PM          ~(IBM_CPM_PCI | IBM_CPM_CPU | IBM_CPM_DMA \
+                                        | IBM_CPM_OPB | IBM_CPM_EBC \
+                                        | IBM_CPM_SDRAM0 | IBM_CPM_PLB \
+                                        | IBM_CPM_UIC | IBM_CPM_TMRCLK)
+#define DCRN_DMA0_BASE          0x100
+#define DCRN_DMA1_BASE          0x108
+#define DCRN_DMA2_BASE          0x110
+#define DCRN_DMA3_BASE          0x118
+#define DCRNCAP_DMA_SG          1       /* have DMA scatter/gather capability */
+#define DCRN_DMASR_BASE         0x120
+#define DCRN_EBC_BASE           0x012
+#define DCRN_DCP0_BASE          0x014
+#define DCRN_MAL_BASE           0x180
+#define DCRN_OCM0_BASE          0x018
+#define DCRN_PLB0_BASE          0x084
+#define DCRN_PLLMR_BASE         0x0B0
+#define DCRN_POB0_BASE          0x0A0
+#define DCRN_SDRAM0_BASE        0x010
+#define DCRN_UIC0_BASE          0x0C0
+#define UIC0 DCRN_UIC0_BASE
+
+#include <asm/ibm405.h>
+
+#endif                         /* __ASM_IBM405EP_H__ */
+#endif                         /* __KERNEL__ */
diff --git a/arch/ppc/platforms/sbc82xx.c b/arch/ppc/platforms/sbc82xx.c
new file mode 100644 (file)
index 0000000..0da699d
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * arch/ppc/platforms/sbc82xx.c
+ *
+ * SBC82XX platform support
+ *
+ * Author: Guy Streeter <streeter@redhat.com>
+ *
+ * Derived from: est8260_setup.c by Allen Curtis, ONZ
+ *
+ * Copyright 2004 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/seq_file.h>
+#include <linux/stddef.h>
+
+#include <asm/mpc8260.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/todc.h>
+#include <asm/immap_8260.h>
+
+static void (*callback_setup_arch)(void);
+
+extern unsigned char __res[sizeof(bd_t)];
+
+extern void m8260_init(unsigned long r3, unsigned long r4,
+       unsigned long r5, unsigned long r6, unsigned long r7);
+
+extern void (*late_time_init)(void);
+
+static int
+sbc82xx_show_cpuinfo(struct seq_file *m)
+{
+       bd_t    *binfo = (bd_t *)__res;
+
+       seq_printf(m, "vendor\t\t: Wind River\n"
+                     "machine\t\t: SBC PowerQUICC II\n"
+                     "\n"
+                     "mem size\t\t: 0x%08lx\n"
+                     "console baud\t\t: %ld\n"
+                     "\n",
+                     binfo->bi_memsize,
+                     binfo->bi_baudrate);
+       return 0;
+}
+
+static void __init
+sbc82xx_setup_arch(void)
+{
+       printk("SBC PowerQUICC II Port\n");
+       callback_setup_arch();
+}
+
+TODC_ALLOC();
+
+/*
+ * Timer init happens before mem_init but after paging init, so we cannot
+ * directly use ioremap() at that time.
+ * late_time_init() is call after paging init.
+ */
+#ifdef CONFIG_GEN_RTC
+static void sbc82xx_time_init(void)
+{
+       volatile memctl8260_t *mc = &immr->im_memctl;
+       TODC_INIT(TODC_TYPE_MK48T59, 0, 0, SBC82xx_TODC_NVRAM_ADDR, 0);
+
+       /* Set up CS11 for RTC chip */
+       mc->memc_br11=0;
+       mc->memc_or11=0xffff0836;
+       mc->memc_br11=0x80000801;
+
+       todc_info->nvram_data =
+               (unsigned int)ioremap(todc_info->nvram_data, 0x2000);
+       BUG_ON(!todc_info->nvram_data);
+       ppc_md.get_rtc_time     = todc_get_rtc_time;
+       ppc_md.set_rtc_time     = todc_set_rtc_time;
+       ppc_md.nvram_read_val   = todc_direct_read_val;
+       ppc_md.nvram_write_val  = todc_direct_write_val;
+       todc_time_init();
+}
+#endif /* CONFIG_GEN_RTC */
+
+void __init
+platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
+             unsigned long r6, unsigned long r7)
+{
+       /* Generic 8260 platform initialization */
+       m8260_init(r3, r4, r5, r6, r7);
+
+       /* u-boot may be using one of the FCC Ethernet devices.
+          Use the MAC address to the SCC. */
+       __res[offsetof(bd_t, bi_enetaddr[5])] &= ~3;
+
+       /* Anything special for this platform */
+       ppc_md.show_cpuinfo     = sbc82xx_show_cpuinfo;
+
+       callback_setup_arch     = ppc_md.setup_arch;
+       ppc_md.setup_arch       = sbc82xx_setup_arch;
+#ifdef CONFIG_GEN_RTC
+       ppc_md.time_init        = NULL;
+       ppc_md.get_rtc_time     = NULL;
+       ppc_md.set_rtc_time     = NULL;
+       ppc_md.nvram_read_val   = NULL;
+       ppc_md.nvram_write_val  = NULL;
+       late_time_init          = sbc82xx_time_init;
+#endif /* CONFIG_GEN_RTC */
+}
diff --git a/arch/ppc/platforms/sbc82xx.h b/arch/ppc/platforms/sbc82xx.h
new file mode 100644 (file)
index 0000000..b9d1c8d
--- /dev/null
@@ -0,0 +1,24 @@
+/* Board information for the SBCPowerQUICCII, which should be generic for
+ * all 8260 boards.  The IMMR is now given to us so the hard define
+ * will soon be removed.  All of the clock values are computed from
+ * the configuration SCMR and the Power-On-Reset word.
+ */
+
+#ifndef __PPC_SBC82xx_H__
+#define __PPC_SBC82xx_H__
+
+#include <asm/ppcboot.h>
+
+#define IMAP_ADDR                      0xf0000000
+#define CPM_MAP_ADDR                   0xf0000000
+
+#define SBC82xx_TODC_NVRAM_ADDR                0x80000000
+
+#define SBC82xx_MACADDR_NVRAM_FCC1     0x220000c9      /* JP6B */
+#define SBC82xx_MACADDR_NVRAM_SCC1     0x220000cf      /* JP6A */
+#define SBC82xx_MACADDR_NVRAM_FCC2     0x220000d5      /* JP7A */
+#define SBC82xx_MACADDR_NVRAM_FCC3     0x220000db      /* JP7B */
+
+#define BOOTROM_RESTART_ADDR      ((uint)0x40000104)
+
+#endif /* __PPC_SBC82xx_H__ */
diff --git a/arch/ppc/syslib/dcr.S b/arch/ppc/syslib/dcr.S
new file mode 100644 (file)
index 0000000..895f102
--- /dev/null
@@ -0,0 +1,41 @@
+/*
+ * arch/ppc/syslib/dcr.S
+ *
+ * "Indirect" DCR access
+ *
+ * Copyright (c) 2004 Eugene Surovegin <ebs@ebshome.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under  the terms of  the GNU General Public License as published by the
+ * Free Software Foundation;  either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/processor.h>
+
+#define DCR_ACCESS_PROLOG(table) \
+       rlwinm  r3,r3,4,18,27;   \
+       lis     r5,table@h;      \
+       ori     r5,r5,table@l;   \
+       add     r3,r3,r5;        \
+       mtctr   r3;              \
+       bctr
+
+_GLOBAL(__mfdcr)
+       DCR_ACCESS_PROLOG(__mfdcr_table)
+
+_GLOBAL(__mtdcr)
+       DCR_ACCESS_PROLOG(__mtdcr_table)
+
+__mfdcr_table:
+       mfdcr  r3,0; blr
+__mtdcr_table:
+       mtdcr  0,r4; blr
+
+dcr     = 1
+        .rept   1023
+       mfdcr   r3,dcr; blr
+       mtdcr   dcr,r4; blr
+       dcr     = dcr + 1
+       .endr
diff --git a/arch/ppc/syslib/ibm440gx_common.c b/arch/ppc/syslib/ibm440gx_common.c
new file mode 100644 (file)
index 0000000..5da7bca
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * arch/ppc/kernel/ibm440gx_common.c
+ *
+ * PPC440GX system library
+ *
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ * Copyright (c) 2003 Zultys Technologies
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <asm/ibm44x.h>
+#include <asm/mmu.h>
+#include <asm/processor.h>
+#include <syslib/ibm440gx_common.h>
+
+/*
+ * Calculate 440GX clocks
+ */
+static inline u32 __fix_zero(u32 v, u32 def){
+       return v ? v : def;
+}
+
+void __init ibm440gx_get_clocks(struct ibm44x_clocks* p, unsigned int sys_clk,
+       unsigned int ser_clk)
+{
+       u32 pllc  = CPR_READ(DCRN_CPR_PLLC);
+       u32 plld  = CPR_READ(DCRN_CPR_PLLD);
+       u32 uart0 = SDR_READ(DCRN_SDR_UART0);
+       u32 uart1 = SDR_READ(DCRN_SDR_UART1);
+
+       /* Dividers */
+       u32 fbdv   = __fix_zero((plld >> 24) & 0x1f, 32);
+       u32 fwdva  = __fix_zero((plld >> 16) & 0xf, 16);
+       u32 fwdvb  = __fix_zero((plld >> 8) & 7, 8);
+       u32 lfbdv  = __fix_zero(plld & 0x3f, 64);
+       u32 pradv0 = __fix_zero((CPR_READ(DCRN_CPR_PRIMAD) >> 24) & 7, 8);
+       u32 prbdv0 = __fix_zero((CPR_READ(DCRN_CPR_PRIMBD) >> 24) & 7, 8);
+       u32 opbdv0 = __fix_zero((CPR_READ(DCRN_CPR_OPBD) >> 24) & 3, 4);
+       u32 perdv0 = __fix_zero((CPR_READ(DCRN_CPR_PERD) >> 24) & 3, 4);
+
+       /* Input clocks for primary dividers */
+       u32 clk_a, clk_b;
+
+       if (pllc & 0x40000000){
+               u32 m;
+
+               /* Feedback path */
+               switch ((pllc >> 24) & 7){
+               case 0:
+                       /* PLLOUTx */
+                       m = ((pllc & 0x20000000) ? fwdvb : fwdva) * lfbdv;
+                       break;
+               case 1:
+                       /* CPU */
+                       m = fwdva * pradv0;
+                       break;
+               case 5:
+                       /* PERClk */
+                       m = fwdvb * prbdv0 * opbdv0 * perdv0;
+                       break;
+               default:
+                       printk(KERN_EMERG "invalid PLL feedback source\n");
+                       goto bypass;
+               }
+               m *= fbdv;
+               p->vco = sys_clk * m;
+               clk_a = p->vco / fwdva;
+               clk_b = p->vco / fwdvb;
+       }
+       else {
+bypass:
+               /* Bypass system PLL */
+               p->vco = 0;
+               clk_a = clk_b = sys_clk;
+       }
+
+       p->cpu = clk_a / pradv0;
+       p->plb = clk_b / prbdv0;
+       p->opb = p->plb / opbdv0;
+       p->ebc = p->opb / perdv0;
+
+       /* UARTs clock */
+       if (uart0 & 0x00800000)
+               p->uart0 = ser_clk;
+       else
+               p->uart0 = p->plb / __fix_zero(uart0 & 0xff, 256);
+
+       if (uart1 & 0x00800000)
+               p->uart1 = ser_clk;
+       else
+               p->uart1 = p->plb / __fix_zero(uart1 & 0xff, 256);
+}
+
+/* Enable L2 cache (call with IRQs disabled) */
+void __init ibm440gx_l2c_enable(void){
+       u32 r;
+
+       asm volatile ("sync" ::: "memory");
+
+       /* Disable SRAM */
+       mtdcr(DCRN_SRAM0_DPC,   mfdcr(DCRN_SRAM0_DPC)   & ~SRAM_DPC_ENABLE);
+       mtdcr(DCRN_SRAM0_SB0CR, mfdcr(DCRN_SRAM0_SB0CR) & ~SRAM_SBCR_BU_MASK);
+       mtdcr(DCRN_SRAM0_SB1CR, mfdcr(DCRN_SRAM0_SB1CR) & ~SRAM_SBCR_BU_MASK);
+       mtdcr(DCRN_SRAM0_SB2CR, mfdcr(DCRN_SRAM0_SB2CR) & ~SRAM_SBCR_BU_MASK);
+       mtdcr(DCRN_SRAM0_SB3CR, mfdcr(DCRN_SRAM0_SB3CR) & ~SRAM_SBCR_BU_MASK);
+
+       /* Enable L2_MODE without ICU/DCU */
+       r = mfdcr(DCRN_L2C0_CFG) & ~(L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_SS_MASK);
+       r |= L2C_CFG_L2M | L2C_CFG_SS_256;
+       mtdcr(DCRN_L2C0_CFG, r);
+
+       mtdcr(DCRN_L2C0_ADDR, 0);
+
+       /* Hardware Clear Command */
+       mtdcr(DCRN_L2C0_CMD, L2C_CMD_HCC);
+       while (!(mfdcr(DCRN_L2C0_SR) & L2C_SR_CC)) ;
+
+       /* Clear Cache Parity and Tag Errors */
+       mtdcr(DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE);
+
+       /* Enable 64G snoop region starting at 0 */
+       r = mfdcr(DCRN_L2C0_SNP0) & ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK);
+       r |= L2C_SNP_SSR_32G | L2C_SNP_ESR;
+       mtdcr(DCRN_L2C0_SNP0, r);
+
+       r = mfdcr(DCRN_L2C0_SNP1) & ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK);
+       r |= 0x80000000 | L2C_SNP_SSR_32G | L2C_SNP_ESR;
+       mtdcr(DCRN_L2C0_SNP1, r);
+
+       asm volatile ("sync" ::: "memory");
+
+       /* Enable ICU/DCU ports */
+       r = mfdcr(DCRN_L2C0_CFG);
+       r &= ~(L2C_CFG_DCW_MASK | L2C_CFG_CPIM | L2C_CFG_TPIM | L2C_CFG_LIM
+               | L2C_CFG_PMUX_MASK | L2C_CFG_PMIM | L2C_CFG_TPEI | L2C_CFG_CPEI
+               | L2C_CFG_NAM | L2C_CFG_NBRM);
+       r |= L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_TPC | L2C_CFG_CPC | L2C_CFG_FRAN
+               | L2C_CFG_SMCM;
+       mtdcr(DCRN_L2C0_CFG, r);
+
+       asm volatile ("sync; isync" ::: "memory");
+}
+
+/* Disable L2 cache (call with IRQs disabled) */
+void __init ibm440gx_l2c_disable(void){
+       u32 r;
+
+       asm volatile ("sync" ::: "memory");
+
+       /* Disable L2C mode */
+       r = mfdcr(DCRN_L2C0_CFG) & ~(L2C_CFG_L2M | L2C_CFG_ICU | L2C_CFG_DCU);
+       mtdcr(DCRN_L2C0_CFG, r);
+
+       /* Enable SRAM */
+       mtdcr(DCRN_SRAM0_DPC, mfdcr(DCRN_SRAM0_DPC) | SRAM_DPC_ENABLE);
+       mtdcr(DCRN_SRAM0_SB0CR,
+             SRAM_SBCR_BAS0 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW);
+       mtdcr(DCRN_SRAM0_SB1CR,
+             SRAM_SBCR_BAS1 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW);
+       mtdcr(DCRN_SRAM0_SB2CR,
+             SRAM_SBCR_BAS2 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW);
+       mtdcr(DCRN_SRAM0_SB3CR,
+             SRAM_SBCR_BAS3 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW);
+
+       asm volatile ("sync; isync" ::: "memory");
+}
+
+int __init ibm440gx_get_eth_grp(void)
+{
+       return (SDR_READ(DCRN_SDR_PFC1) & DCRN_SDR_PFC1_EPS) >> DCRN_SDR_PFC1_EPS_SHIFT;
+}
+
+void __init ibm440gx_set_eth_grp(int group)
+{
+       SDR_WRITE(DCRN_SDR_PFC1, (SDR_READ(DCRN_SDR_PFC1) & ~DCRN_SDR_PFC1_EPS) | (group << DCRN_SDR_PFC1_EPS_SHIFT));
+}
+
+void __init ibm440gx_tah_enable(void)
+{
+       /* Enable TAH0 and TAH1 */
+       SDR_WRITE(DCRN_SDR_MFR,SDR_READ(DCRN_SDR_MFR) &
+                       ~DCRN_SDR_MFR_TAH0);
+       SDR_WRITE(DCRN_SDR_MFR,SDR_READ(DCRN_SDR_MFR) &
+                       ~DCRN_SDR_MFR_TAH1);
+}
+
+int ibm440gx_show_cpuinfo(struct seq_file *m){
+
+       u32 l2c_cfg = mfdcr(DCRN_L2C0_CFG);
+       const char* s;
+       if (l2c_cfg & L2C_CFG_L2M){
+           switch (l2c_cfg & (L2C_CFG_ICU | L2C_CFG_DCU)){
+               case L2C_CFG_ICU: s = "I-Cache only";    break;
+               case L2C_CFG_DCU: s = "D-Cache only";    break;
+               default:          s = "I-Cache/D-Cache"; break;
+           }
+       }
+       else
+           s = "disabled";
+
+       seq_printf(m, "L2-Cache\t: %s (0x%08x 0x%08x)\n", s,
+           l2c_cfg, mfdcr(DCRN_L2C0_SR));
+
+       return 0;
+}
+
diff --git a/arch/ppc/syslib/ibm440gx_common.h b/arch/ppc/syslib/ibm440gx_common.h
new file mode 100644 (file)
index 0000000..5dbca98
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * arch/ppc/kernel/ibm440gx_common.h
+ *
+ * PPC440GX system library
+ *
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ * Copyright (c) 2003 Zultys Technologies
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#ifdef __KERNEL__
+#ifndef __PPC_SYSLIB_IBM440GX_COMMON_H
+#define __PPC_SYSLIB_IBM440GX_COMMON_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <syslib/ibm44x_common.h>
+
+/*
+ * Please, refer to the Figure 14.1 in 440GX user manual
+ *
+ * if internal UART clock is used, ser_clk is ignored
+ */
+void ibm440gx_get_clocks(struct ibm44x_clocks*, unsigned int sys_clk,
+       unsigned int ser_clk) __init;
+
+/* Enable L2 cache */
+void ibm440gx_l2c_enable(void) __init;
+
+/* Disable L2 cache */
+void ibm440gx_l2c_disable(void) __init;
+
+/* Get Ethernet Group */
+int ibm440gx_get_eth_grp(void) __init;
+
+/* Set Ethernet Group */
+void ibm440gx_set_eth_grp(int group) __init;
+
+/* Enable TAH devices */
+void ibm440gx_tah_enable(void) __init;
+
+/* Add L2C info to /proc/cpuinfo */
+int ibm440gx_show_cpuinfo(struct seq_file*);
+
+#endif /* __ASSEMBLY__ */
+#endif /* __PPC_SYSLIB_IBM440GX_COMMON_H */
+#endif /* __KERNEL__ */
diff --git a/arch/ppc/syslib/ibm44x_common.h b/arch/ppc/syslib/ibm44x_common.h
new file mode 100644 (file)
index 0000000..ee1053a
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * arch/ppc/kernel/ibm44x_common.h
+ *
+ * PPC44x system library
+ *
+ * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
+ * Copyright (c) 2003 Zultys Technologies
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#ifdef __KERNEL__
+#ifndef __PPC_SYSLIB_IBM44x_COMMON_H
+#define __PPC_SYSLIB_IBM44x_COMMON_H
+
+#ifndef __ASSEMBLY__
+
+/*
+ * All clocks are in Hz
+ */
+struct ibm44x_clocks {
+       unsigned int vco;       /* VCO, 0 if system PLL is bypassed */
+       unsigned int cpu;       /* CPUCoreClk */
+       unsigned int plb;       /* PLBClk */
+       unsigned int opb;       /* OPBClk */
+       unsigned int ebc;       /* PerClk */
+       unsigned int uart0;
+       unsigned int uart1;
+};
+
+#endif /* __ASSEMBLY__ */
+#endif /* __PPC_SYSLIB_IBM44x_COMMON_H */
+#endif /* __KERNEL__ */
diff --git a/arch/ppc/syslib/ocp.c b/arch/ppc/syslib/ocp.c
new file mode 100644 (file)
index 0000000..a5156c5
--- /dev/null
@@ -0,0 +1,485 @@
+/*
+ * ocp.c
+ *
+ *      (c) Benjamin Herrenschmidt (benh@kernel.crashing.org)
+ *          Mipsys - France
+ *
+ *          Derived from work (c) Armin Kuster akuster@pacbell.net
+ *
+ *          Additional support and port to 2.6 LDM/sysfs by
+ *          Matt Porter <mporter@kernel.crashing.org>
+ *          Copyright 2004 MontaVista Software, Inc.
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ *
+ *  OCP (On Chip Peripheral) is a software emulated "bus" with a
+ *  pseudo discovery method for dumb peripherals. Usually these type
+ *  of peripherals are found on embedded SoC (System On a Chip)
+ *  processors or highly integrated system controllers that have
+ *  a host bridge and many peripherals.  Common examples where
+ *  this is already used include the PPC4xx, PPC85xx, MPC52xx,
+ *  and MV64xxx parts.
+ *
+ *  This subsystem creates a standard OCP bus type within the
+ *  device model.  The devices on the OCP bus are seeded by an
+ *  an initial OCP device array created by the arch-specific
+ *  Device entries can be added/removed/modified through OCP
+ *  helper functions to accomodate system and  board-specific
+ *  parameters commonly found in embedded systems. OCP also
+ *  provides a standard method for devices to describe extended
+ *  attributes about themselves to the system.  A standard access
+ *  method allows OCP drivers to obtain the information, both
+ *  SoC-specific and system/board-specific, needed for operation.
+ */
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <linux/bootmem.h>
+#include <linux/device.h>
+
+#include <asm/io.h>
+#include <asm/ocp.h>
+#include <asm/errno.h>
+#include <asm/rwsem.h>
+#include <asm/semaphore.h>
+
+//#define DBG(x)       printk x
+#define DBG(x)
+
+extern int mem_init_done;
+
+extern struct ocp_def core_ocp[];      /* Static list of devices, provided by
+                                          CPU core */
+
+LIST_HEAD(ocp_devices);                        /* List of all OCP devices */
+DECLARE_RWSEM(ocp_devices_sem);                /* Global semaphores for those lists */
+
+static int ocp_inited;
+
+/* Sysfs support */
+#define OCP_DEF_ATTR(field, format_string)                             \
+static ssize_t                                                         \
+show_##field(struct device *dev, char *buf)                            \
+{                                                                      \
+       struct ocp_device *odev = to_ocp_dev(dev);                      \
+                                                                       \
+       return sprintf(buf, format_string, odev->def->field);           \
+}                                                                      \
+static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
+
+OCP_DEF_ATTR(vendor, "0x%04x\n");
+OCP_DEF_ATTR(function, "0x%04x\n");
+OCP_DEF_ATTR(index, "0x%04x\n");
+#ifdef CONFIG_PTE_64BIT
+OCP_DEF_ATTR(paddr, "0x%016Lx\n");
+#else
+OCP_DEF_ATTR(paddr, "0x%08lx\n");
+#endif
+OCP_DEF_ATTR(irq, "%d\n");
+OCP_DEF_ATTR(pm, "%lu\n");
+
+void ocp_create_sysfs_dev_files(struct ocp_device *odev)
+{
+       struct device *dev = &odev->dev;
+
+       /* Current OCP device def attributes */
+       device_create_file(dev, &dev_attr_vendor);
+       device_create_file(dev, &dev_attr_function);
+       device_create_file(dev, &dev_attr_index);
+       device_create_file(dev, &dev_attr_paddr);
+       device_create_file(dev, &dev_attr_irq);
+       device_create_file(dev, &dev_attr_pm);
+       /* Current OCP device additions attributes */
+       if (odev->def->additions && odev->def->show)
+               odev->def->show(dev);
+}
+
+/**
+ *     ocp_device_match        -       Match one driver to one device
+ *     @drv: driver to match
+ *     @dev: device to match
+ *
+ *     This function returns 0 if the driver and device don't match
+ */
+static int
+ocp_device_match(struct device *dev, struct device_driver *drv)
+{
+       struct ocp_device *ocp_dev = to_ocp_dev(dev);
+       struct ocp_driver *ocp_drv = to_ocp_drv(drv);
+       const struct ocp_device_id *ids = ocp_drv->id_table;
+
+       if (!ids)
+               return 0;
+
+       while (ids->vendor || ids->function) {
+               if ((ids->vendor == OCP_ANY_ID
+                    || ids->vendor == ocp_dev->def->vendor)
+                   && (ids->function == OCP_ANY_ID
+                       || ids->function == ocp_dev->def->function))
+                       return 1;
+               ids++;
+       }
+       return 0;
+}
+
+static int
+ocp_device_probe(struct device *dev)
+{
+       int error = 0;
+       struct ocp_driver *drv;
+       struct ocp_device *ocp_dev;
+
+       drv = to_ocp_drv(dev->driver);
+       ocp_dev = to_ocp_dev(dev);
+
+       if (drv->probe) {
+               error = drv->probe(ocp_dev);
+               if (error >= 0) {
+                       ocp_dev->driver = drv;
+                       error = 0;
+               }
+       }
+       return error;
+}
+
+static int
+ocp_device_remove(struct device *dev)
+{
+       struct ocp_device *ocp_dev = to_ocp_dev(dev);
+
+       if (ocp_dev->driver) {
+               if (ocp_dev->driver->remove)
+                       ocp_dev->driver->remove(ocp_dev);
+               ocp_dev->driver = NULL;
+       }
+       return 0;
+}
+
+static int
+ocp_device_suspend(struct device *dev, u32 state)
+{
+       struct ocp_device *ocp_dev = to_ocp_dev(dev);
+       struct ocp_driver *ocp_drv = to_ocp_drv(dev->driver);
+
+       if (dev->driver && ocp_drv->suspend)
+               return ocp_drv->suspend(ocp_dev, state);
+       return 0;
+}
+
+static int
+ocp_device_resume(struct device *dev)
+{
+       struct ocp_device *ocp_dev = to_ocp_dev(dev);
+       struct ocp_driver *ocp_drv = to_ocp_drv(dev->driver);
+
+       if (dev->driver && ocp_drv->resume)
+               return ocp_drv->resume(ocp_dev);
+       return 0;
+}
+
+struct bus_type ocp_bus_type = {
+       .name = "ocp",
+       .match = ocp_device_match,
+       .suspend = ocp_device_suspend,
+       .resume = ocp_device_resume,
+};
+
+/**
+ *     ocp_register_driver     -       Register an OCP driver
+ *     @drv: pointer to statically defined ocp_driver structure
+ *
+ *     The driver's probe() callback is called either recursively
+ *     by this function or upon later call of ocp_driver_init
+ *
+ *     NOTE: Detection of devices is a 2 pass step on this implementation,
+ *     hotswap isn't supported. First, all OCP devices are put in the device
+ *     list, _then_ all drivers are probed on each match.
+ */
+int
+ocp_register_driver(struct ocp_driver *drv)
+{
+       /* initialize common driver fields */
+       drv->driver.name = drv->name;
+       drv->driver.bus = &ocp_bus_type;
+       drv->driver.probe = ocp_device_probe;
+       drv->driver.remove = ocp_device_remove;
+
+       /* register with core */
+       return driver_register(&drv->driver);
+}
+
+/**
+ *     ocp_unregister_driver   -       Unregister an OCP driver
+ *     @drv: pointer to statically defined ocp_driver structure
+ *
+ *     The driver's remove() callback is called recursively
+ *     by this function for any device already registered
+ */
+void
+ocp_unregister_driver(struct ocp_driver *drv)
+{
+       DBG(("ocp: ocp_unregister_driver(%s)...\n", drv->name));
+
+       driver_unregister(&drv->driver);
+
+       DBG(("ocp: ocp_unregister_driver(%s)... done.\n", drv->name));
+}
+
+/* Core of ocp_find_device(). Caller must hold ocp_devices_sem */
+static struct ocp_device *
+__ocp_find_device(unsigned int vendor, unsigned int function, int index)
+{
+       struct list_head        *entry;
+       struct ocp_device       *dev, *found = NULL;
+
+       DBG(("ocp: __ocp_find_device(vendor: %x, function: %x, index: %d)...\n", vendor, function, index));
+
+       list_for_each(entry, &ocp_devices) {
+               dev = list_entry(entry, struct ocp_device, link);
+               if (vendor != OCP_ANY_ID && vendor != dev->def->vendor)
+                       continue;
+               if (function != OCP_ANY_ID && function != dev->def->function)
+                       continue;
+               if (index != OCP_ANY_INDEX && index != dev->def->index)
+                       continue;
+               found = dev;
+               break;
+       }
+
+       DBG(("ocp: __ocp_find_device(vendor: %x, function: %x, index: %d)... done\n", vendor, function, index));
+
+       return found;
+}
+
+/**
+ *     ocp_find_device -       Find a device by function & index
+ *      @vendor: vendor ID of the device (or OCP_ANY_ID)
+ *     @function: function code of the device (or OCP_ANY_ID)
+ *     @idx: index of the device (or OCP_ANY_INDEX)
+ *
+ *     This function allows a lookup of a given function by it's
+ *     index, it's typically used to find the MAL or ZMII associated
+ *     with an EMAC or similar horrors.
+ *      You can pass vendor, though you usually want OCP_ANY_ID there...
+ */
+struct ocp_device *
+ocp_find_device(unsigned int vendor, unsigned int function, int index)
+{
+       struct ocp_device       *dev;
+
+       down_read(&ocp_devices_sem);
+       dev = __ocp_find_device(vendor, function, index);
+       up_read(&ocp_devices_sem);
+
+       return dev;
+}
+
+/**
+ *     ocp_get_one_device -    Find a def by function & index
+ *      @vendor: vendor ID of the device (or OCP_ANY_ID)
+ *     @function: function code of the device (or OCP_ANY_ID)
+ *     @idx: index of the device (or OCP_ANY_INDEX)
+ *
+ *     This function allows a lookup of a given ocp_def by it's
+ *     vendor, function, and index.  The main purpose for is to
+ *     allow modification of the def before binding to the driver
+ */
+struct ocp_def *
+ocp_get_one_device(unsigned int vendor, unsigned int function, int index)
+{
+       struct ocp_device       *dev;
+       struct ocp_def          *found = NULL;
+
+       DBG(("ocp: ocp_get_one_device(vendor: %x, function: %x, index: %d)...\n",
+               vendor, function, index));
+
+       dev = ocp_find_device(vendor, function, index);
+
+       if (dev)
+               found = dev->def;
+
+       DBG(("ocp: ocp_get_one_device(vendor: %x, function: %x, index: %d)... done.\n",
+               vendor, function, index));
+
+       return found;
+}
+
+/**
+ *     ocp_add_one_device      -       Add a device
+ *     @def: static device definition structure
+ *
+ *     This function adds a device definition to the
+ *     device list. It may only be called before
+ *     ocp_driver_init() and will return an error
+ *     otherwise.
+ */
+int
+ocp_add_one_device(struct ocp_def *def)
+{
+       struct  ocp_device      *dev;
+
+       DBG(("ocp: ocp_add_one_device()...\n"));
+
+       /* Can't be called after ocp driver init */
+       if (ocp_inited)
+               return 1;
+
+       if (mem_init_done)
+               dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+       else
+               dev = alloc_bootmem(sizeof(*dev));
+
+       if (dev == NULL)
+               return 1;
+       memset(dev, 0, sizeof(*dev));
+       dev->def = def;
+       dev->current_state = 4;
+       sprintf(dev->name, "OCP device %04x:%04x:%04x",
+               dev->def->vendor, dev->def->function, dev->def->index);
+       down_write(&ocp_devices_sem);
+       list_add_tail(&dev->link, &ocp_devices);
+       up_write(&ocp_devices_sem);
+
+       DBG(("ocp: ocp_add_one_device()...done\n"));
+
+       return 0;
+}
+
+/**
+ *     ocp_remove_one_device - Remove a device by function & index
+ *      @vendor: vendor ID of the device (or OCP_ANY_ID)
+ *     @function: function code of the device (or OCP_ANY_ID)
+ *     @idx: index of the device (or OCP_ANY_INDEX)
+ *
+ *     This function allows removal of a given function by its
+ *     index. It may only be called before ocp_driver_init()
+ *     and will return an error otherwise.
+ */
+int
+ocp_remove_one_device(unsigned int vendor, unsigned int function, int index)
+{
+       struct ocp_device *dev;
+
+       DBG(("ocp: ocp_remove_one_device(vendor: %x, function: %x, index: %d)...\n", vendor, function, index));
+
+       /* Can't be called after ocp driver init */
+       if (ocp_inited)
+               return 1;
+
+       down_write(&ocp_devices_sem);
+       dev = __ocp_find_device(vendor, function, index);
+       list_del((struct list_head *)dev);
+       up_write(&ocp_devices_sem);
+
+       DBG(("ocp: ocp_remove_one_device(vendor: %x, function: %x, index: %d)... done.\n", vendor, function, index));
+
+       return 0;
+}
+
+/**
+ *     ocp_for_each_device     -       Iterate over OCP devices
+ *     @callback: routine to execute for each ocp device.
+ *     @arg: user data to be passed to callback routine.
+ *
+ *     This routine holds the ocp_device semaphore, so the
+ *     callback routine cannot modify the ocp_device list.
+ */
+void
+ocp_for_each_device(void(*callback)(struct ocp_device *, void *arg), void *arg)
+{
+       struct list_head *entry;
+
+       if (callback) {
+               down_read(&ocp_devices_sem);
+               list_for_each(entry, &ocp_devices)
+                       callback(list_entry(entry, struct ocp_device, link),
+                               arg);
+               up_read(&ocp_devices_sem);
+       }
+}
+
+/**
+ *     ocp_early_init  -       Init OCP device management
+ *
+ *     This function builds the list of devices before setup_arch.
+ *     This allows platform code to modify the device lists before
+ *     they are bound to drivers (changes to paddr, removing devices
+ *     etc)
+ */
+int __init
+ocp_early_init(void)
+{
+       struct ocp_def  *def;
+
+       DBG(("ocp: ocp_early_init()...\n"));
+
+       /* Fill the devices list */
+       for (def = core_ocp; def->vendor != OCP_VENDOR_INVALID; def++)
+               ocp_add_one_device(def);
+
+       DBG(("ocp: ocp_early_init()... done.\n"));
+
+       return 0;
+}
+
+/**
+ *     ocp_driver_init -       Init OCP device management
+ *
+ *     This function is meant to be called via OCP bus registration.
+ */
+static int __init
+ocp_driver_init(void)
+{
+       int ret = 0, index = 0;
+       struct device *ocp_bus;
+       struct list_head *entry;
+       struct ocp_device *dev;
+
+       if (ocp_inited)
+               return ret;
+       ocp_inited = 1;
+
+       DBG(("ocp: ocp_driver_init()...\n"));
+
+       /* Allocate/register primary OCP bus */
+       ocp_bus = kmalloc(sizeof(struct device), GFP_KERNEL);
+       if (ocp_bus == NULL)
+               return 1;
+       memset(ocp_bus, 0, sizeof(struct device));
+       strcpy(ocp_bus->bus_id, "ocp");
+
+       bus_register(&ocp_bus_type);
+
+       device_register(ocp_bus);
+
+       /* Put each OCP device into global device list */
+       list_for_each(entry, &ocp_devices) {
+               dev = list_entry(entry, struct ocp_device, link);
+               sprintf(dev->dev.bus_id, "%2.2x", index);
+               dev->dev.parent = ocp_bus;
+               dev->dev.bus = &ocp_bus_type;
+               device_register(&dev->dev);
+               ocp_create_sysfs_dev_files(dev);
+               index++;
+       }
+
+       DBG(("ocp: ocp_driver_init()... done.\n"));
+
+       return 0;
+}
+
+postcore_initcall(ocp_driver_init);
+
+EXPORT_SYMBOL(ocp_bus_type);
+EXPORT_SYMBOL(ocp_find_device);
+EXPORT_SYMBOL(ocp_register_driver);
+EXPORT_SYMBOL(ocp_unregister_driver);
diff --git a/arch/ppc64/lib/locks.c b/arch/ppc64/lib/locks.c
new file mode 100644 (file)
index 0000000..4610cb9
--- /dev/null
@@ -0,0 +1,285 @@
+/*
+ * Spin and read/write lock operations.
+ *
+ * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
+ *   Rework to support virtual processors
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <asm/hvcall.h>
+#include <asm/iSeries/HvCall.h>
+
+#ifndef CONFIG_SPINLINE
+
+/*
+ * On a system with shared processors (that is, where a physical
+ * processor is multiplexed between several virtual processors),
+ * there is no point spinning on a lock if the holder of the lock
+ * isn't currently scheduled on a physical processor.  Instead
+ * we detect this situation and ask the hypervisor to give the
+ * rest of our timeslice to the lock holder.
+ *
+ * So that we can tell which virtual processor is holding a lock,
+ * we put 0x80000000 | smp_processor_id() in the lock when it is
+ * held.  Conveniently, we have a word in the paca that holds this
+ * value.
+ */
+
+/* waiting for a spinlock... */
+#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+void __spin_yield(spinlock_t *lock)
+{
+       unsigned int lock_value, holder_cpu, yield_count;
+       struct paca_struct *holder_paca;
+
+       lock_value = lock->lock;
+       if (lock_value == 0)
+               return;
+       holder_cpu = lock_value & 0xffff;
+       BUG_ON(holder_cpu >= NR_CPUS);
+       holder_paca = &paca[holder_cpu];
+       yield_count = holder_paca->xLpPaca.xYieldCount;
+       if ((yield_count & 1) == 0)
+               return;         /* virtual cpu is currently running */
+       rmb();
+       if (lock->lock != lock_value)
+               return;         /* something has changed */
+#ifdef CONFIG_PPC_ISERIES
+       HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
+               ((u64)holder_cpu << 32) | yield_count);
+#else
+       plpar_hcall_norets(H_CONFER, holder_cpu, yield_count);
+#endif
+}
+
+#else /* SPLPAR || ISERIES */
+#define __spin_yield(x)        barrier()
+#endif
+
+/*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+ */
+static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
+{
+       unsigned long tmp, tmp2;
+
+       __asm__ __volatile__(
+"      lwz             %1,24(13)               # __spin_trylock\n\
+1:     lwarx           %0,0,%2\n\
+       cmpwi           0,%0,0\n\
+       bne-            2f\n\
+       stwcx.          %1,0,%2\n\
+       bne-            1b\n\
+       isync\n\
+2:"    : "=&r" (tmp), "=&r" (tmp2)
+       : "r" (&lock->lock)
+       : "cr0", "memory");
+
+       return tmp;
+}
+
+int _raw_spin_trylock(spinlock_t *lock)
+{
+       return __spin_trylock(lock) == 0;
+}
+
+EXPORT_SYMBOL(_raw_spin_trylock);
+
+void _raw_spin_lock(spinlock_t *lock)
+{
+       while (1) {
+               if (likely(__spin_trylock(lock) == 0))
+                       break;
+               do {
+                       HMT_low();
+                       __spin_yield(lock);
+               } while (likely(lock->lock != 0));
+               HMT_medium();
+       }
+}
+
+EXPORT_SYMBOL(_raw_spin_lock);
+
+void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
+{
+       unsigned long flags_dis;
+
+       while (1) {
+               if (likely(__spin_trylock(lock) == 0))
+                       break;
+               local_save_flags(flags_dis);
+               local_irq_restore(flags);
+               do {
+                       HMT_low();
+                       __spin_yield(lock);
+               } while (likely(lock->lock != 0));
+               HMT_medium();
+               local_irq_restore(flags_dis);
+       }
+}
+
+EXPORT_SYMBOL(_raw_spin_lock_flags);
+
+void spin_unlock_wait(spinlock_t *lock)
+{
+       while (lock->lock)
+               __spin_yield(lock);
+}
+
+EXPORT_SYMBOL(spin_unlock_wait);
+
+/*
+ * Waiting for a read lock or a write lock on a rwlock...
+ * This turns out to be the same for read and write locks, since
+ * we only know the holder if it is write-locked.
+ */
+#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+void __rw_yield(rwlock_t *rw)
+{
+       int lock_value;
+       unsigned int holder_cpu, yield_count;
+       struct paca_struct *holder_paca;
+
+       lock_value = rw->lock;
+       if (lock_value >= 0)
+               return;         /* no write lock at present */
+       holder_cpu = lock_value & 0xffff;
+       BUG_ON(holder_cpu >= NR_CPUS);
+       holder_paca = &paca[holder_cpu];
+       yield_count = holder_paca->xLpPaca.xYieldCount;
+       if ((yield_count & 1) == 0)
+               return;         /* virtual cpu is currently running */
+       rmb();
+       if (rw->lock != lock_value)
+               return;         /* something has changed */
+#ifdef CONFIG_PPC_ISERIES
+       HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
+               ((u64)holder_cpu << 32) | yield_count);
+#else
+       plpar_hcall_norets(H_CONFER, holder_cpu, yield_count);
+#endif
+}
+
+#else /* SPLPAR || ISERIES */
+#define __rw_yield(x)  barrier()
+#endif
+
+/*
+ * This returns the old value in the lock + 1,
+ * so we got a read lock if the return value is > 0.
+ */
+static __inline__ long __read_trylock(rwlock_t *rw)
+{
+       long tmp;
+
+       __asm__ __volatile__(
+"1:    lwarx           %0,0,%1         # read_trylock\n\
+       extsw           %0,%0\n\
+       addic.          %0,%0,1\n\
+       ble-            2f\n\
+       stwcx.          %0,0,%1\n\
+       bne-            1b\n\
+       isync\n\
+2:"    : "=&r" (tmp)
+       : "r" (&rw->lock)
+       : "cr0", "xer", "memory");
+
+       return tmp;
+}
+
+int _raw_read_trylock(rwlock_t *rw)
+{
+       return __read_trylock(rw) > 0;
+}
+
+EXPORT_SYMBOL(_raw_read_trylock);
+
+void _raw_read_lock(rwlock_t *rw)
+{
+       while (1) {
+               if (likely(__read_trylock(rw) > 0))
+                       break;
+               do {
+                       HMT_low();
+                       __rw_yield(rw);
+               } while (likely(rw->lock < 0));
+               HMT_medium();
+       }
+}
+
+EXPORT_SYMBOL(_raw_read_lock);
+
+void _raw_read_unlock(rwlock_t *rw)
+{
+       long tmp;
+
+       __asm__ __volatile__(
+       "eieio                          # read_unlock\n\
+1:     lwarx           %0,0,%1\n\
+       addic           %0,%0,-1\n\
+       stwcx.          %0,0,%1\n\
+       bne-            1b"
+       : "=&r"(tmp)
+       : "r"(&rw->lock)
+       : "cr0", "memory");
+}
+
+EXPORT_SYMBOL(_raw_read_unlock);
+
+/*
+ * This returns the old value in the lock,
+ * so we got the write lock if the return value is 0.
+ */
+static __inline__ long __write_trylock(rwlock_t *rw)
+{
+       long tmp, tmp2;
+
+       __asm__ __volatile__(
+"      lwz             %1,24(13)               # write_trylock\n\
+1:     lwarx           %0,0,%2\n\
+       cmpwi           0,%0,0\n\
+       bne-            2f\n\
+       stwcx.          %1,0,%2\n\
+       bne-            1b\n\
+       isync\n\
+2:"    : "=&r" (tmp), "=&r" (tmp2)
+       : "r" (&rw->lock)
+       : "cr0", "memory");
+
+       return tmp;
+}
+
+int _raw_write_trylock(rwlock_t *rw)
+{
+       return __write_trylock(rw) == 0;
+}
+
+EXPORT_SYMBOL(_raw_write_trylock);
+
+void _raw_write_lock(rwlock_t *rw)
+{
+       while (1) {
+               if (likely(__write_trylock(rw) == 0))
+                       break;
+               do {
+                       HMT_low();
+                       __rw_yield(rw);
+               } while (likely(rw->lock != 0));
+               HMT_medium();
+       }
+}
+
+EXPORT_SYMBOL(_raw_write_lock);
+
+#endif /* CONFIG_SPINLINE */
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
new file mode 100644 (file)
index 0000000..dea4957
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ *  arch/s390/lib/string.c
+ *    Optimized string functions
+ *
+ *  S390 version
+ *    Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#define IN_ARCH_STRING_C 1
+
+#include <linux/types.h>
+#include <linux/module.h>
+
+/*
+ * Helper functions to find the end of a string
+ */
+static inline char *__strend(const char *s)
+{
+       register unsigned long r0 asm("0") = 0;
+
+       asm volatile ("0: srst  %0,%1\n"
+                     "   jo    0b"
+                     : "+d" (r0), "+a" (s) :  : "cc" );
+       return (char *) r0;
+}
+
+static inline char *__strnend(const char *s, size_t n)
+{
+       register unsigned long r0 asm("0") = 0;
+       const char *p = s + n;
+
+       asm volatile ("0: srst  %0,%1\n"
+                     "   jo    0b"
+                     : "+d" (p), "+a" (s) : "d" (r0) : "cc" );
+       return (char *) p;
+}
+
+/**
+ * strlen - Find the length of a string
+ * @s: The string to be sized
+ *
+ * returns the length of @s
+ */
+size_t strlen(const char *s)
+{
+       return __strend(s) - s;
+}
+EXPORT_SYMBOL_NOVERS(strlen);
+
+/**
+ * strnlen - Find the length of a length-limited string
+ * @s: The string to be sized
+ * @n: The maximum number of bytes to search
+ *
+ * returns the minimum of the length of @s and @n
+ */
+size_t strnlen(const char * s, size_t n)
+{
+       return __strnend(s, n) - s;
+}
+EXPORT_SYMBOL_NOVERS(strnlen);
+
+/**
+ * strcpy - Copy a %NUL terminated string
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ *
+ * returns a pointer to @dest
+ */
+char *strcpy(char *dest, const char *src)
+{
+       register int r0 asm("0") = 0;
+       char *ret = dest;
+
+       asm volatile ("0: mvst  %0,%1\n"
+                     "   jo    0b"
+                     : "+&a" (dest), "+&a" (src) : "d" (r0)
+                     : "cc", "memory" );
+       return ret;
+}
+EXPORT_SYMBOL_NOVERS(strcpy);
+
+/**
+ * strlcpy - Copy a %NUL terminated string into a sized buffer
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @size: size of destination buffer
+ *
+ * Compatible with *BSD: the result is always a valid
+ * NUL-terminated string that fits in the buffer (unless,
+ * of course, the buffer size is zero). It does not pad
+ * out the result like strncpy() does.
+ */
+size_t strlcpy(char *dest, const char *src, size_t size)
+{
+       size_t ret = __strend(src) - src;
+
+       if (size) {
+               size_t len = (ret >= size) ? size-1 : ret;
+               dest[len] = '\0';
+               __builtin_memcpy(dest, src, len);
+       }
+       return ret;
+}
+EXPORT_SYMBOL_NOVERS(strlcpy);
+
+/**
+ * strncpy - Copy a length-limited, %NUL-terminated string
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @n: The maximum number of bytes to copy
+ *
+ * The result is not %NUL-terminated if the source exceeds
+ * @n bytes.
+ */
+char *strncpy(char *dest, const char *src, size_t n)
+{
+       size_t len = __strnend(src, n) - src;
+       __builtin_memset(dest + len, 0, n - len);
+       __builtin_memcpy(dest, src, len);
+       return dest;
+}
+EXPORT_SYMBOL_NOVERS(strncpy);
+
+/**
+ * strcat - Append one %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ *
+ * returns a pointer to @dest
+ */
+char *strcat(char *dest, const char *src)
+{
+       register int r0 asm("0") = 0;
+       unsigned long dummy;
+       char *ret = dest;
+
+       asm volatile ("0: srst  %0,%1\n"
+                     "   jo    0b\n"
+                     "1: mvst  %0,%2\n"
+                     "   jo    1b"
+                     : "=&a" (dummy), "+a" (dest), "+a" (src)
+                     : "d" (r0), "0" (0UL) : "cc", "memory" );
+       return ret;
+}
+EXPORT_SYMBOL_NOVERS(strcat);
+
+/**
+ * strlcat - Append a length-limited, %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ * @n: The size of the destination buffer.
+ */
+size_t strlcat(char *dest, const char *src, size_t n)
+{
+       size_t dsize = __strend(dest) - dest;
+       size_t len = __strend(src) - src;
+       size_t res = dsize + len;
+
+       if (dsize < n) {
+               dest += dsize;
+               n -= dsize;
+               if (len >= n)
+                       len = n - 1;
+               dest[len] = '\0';
+               __builtin_memcpy(dest, src, len);
+       }
+       return res;
+}
+EXPORT_SYMBOL_NOVERS(strlcat);
+
+/**
+ * strncat - Append a length-limited, %NUL-terminated string to another
+ * @dest: The string to be appended to
+ * @src: The string to append to it
+ * @n: The maximum numbers of bytes to copy
+ *
+ * returns a pointer to @dest
+ *
+ * Note that in contrast to strncpy, strncat ensures the result is
+ * terminated.
+ */
+char *strncat(char *dest, const char *src, size_t n)
+{
+       size_t len = __strnend(src, n) - src;
+       char *p = __strend(dest);
+
+       p[len] = '\0';
+       __builtin_memcpy(p, src, len);
+       return dest;
+}
+EXPORT_SYMBOL_NOVERS(strncat);
+
+/**
+ * strcmp - Compare two strings
+ * @cs: One string
+ * @ct: Another string
+ *
+ * returns   0 if @cs and @ct are equal,
+ *         < 0 if @cs is less than @ct
+ *         > 0 if @cs is greater than @ct
+ */
+int strcmp(const char *cs, const char *ct)
+{
+       register int r0 asm("0") = 0;
+       int ret = 0;
+
+       asm volatile ("0: clst %2,%3\n"
+                     "   jo   0b\n"
+                     "   je   1f\n"
+                     "   ic   %0,0(%2)\n"
+                     "   ic   %1,0(%3)\n"
+                     "   sr   %0,%1\n"
+                     "1:"
+                     : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct)
+                     : : "cc" );
+       return ret;
+}
+EXPORT_SYMBOL_NOVERS(strcmp);
+
+/**
+ * strrchr - Find the last occurrence of a character in a string
+ * @s: The string to be searched
+ * @c: The character to search for
+ */
+char * strrchr(const char * s, int c)
+{
+       size_t len = __strend(s) - s;
+
+       if (len)
+              do {
+                      if (s[len] == (char) c)
+                              return (char *) s + len;
+              } while (--len > 0);
+       return 0;
+}
+EXPORT_SYMBOL_NOVERS(strrchr);
+
+/**
+ * strstr - Find the first substring in a %NUL terminated string
+ * @s1: The string to be searched
+ * @s2: The string to search for
+ */
+char * strstr(const char * s1,const char * s2)
+{
+       int l1, l2;
+
+       l2 = __strend(s2) - s2;
+       if (!l2)
+               return (char *) s1;
+       l1 = __strend(s1) - s1;
+       while (l1-- >= l2) {
+               register unsigned long r2 asm("2") = (unsigned long) s1;
+               register unsigned long r3 asm("3") = (unsigned long) l2;
+               register unsigned long r4 asm("4") = (unsigned long) s2;
+               register unsigned long r5 asm("5") = (unsigned long) l2;
+               int cc;
+
+               asm volatile ("0: clcle %1,%3,0\n"
+                             "   jo    0b\n"
+                             "   ipm   %0\n"
+                             "   srl   %0,28"
+                             : "=&d" (cc), "+a" (r2), "+a" (r3),
+                               "+a" (r4), "+a" (r5) : : "cc" );
+               if (!cc)
+                       return (char *) s1;
+               s1++;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_NOVERS(strstr);
+
+/**
+ * memchr - Find a character in an area of memory.
+ * @s: The memory area
+ * @c: The byte to search for
+ * @n: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or %NULL
+ * if @c is not found
+ */
+void *memchr(const void *s, int c, size_t n)
+{
+       register int r0 asm("0") = (char) c;
+       const void *ret = s + n;
+
+       asm volatile ("0: srst  %0,%1\n"
+                     "   jo    0b\n"
+                     "   jl    1f\n"
+                     "   la    %0,0\n"
+                     "1:"
+                     : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
+       return (void *) ret;
+}
+EXPORT_SYMBOL_NOVERS(memchr);
+
+/**
+ * memcmp - Compare two areas of memory
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ */
+int memcmp(const void *cs, const void *ct, size_t n)
+{
+       register unsigned long r2 asm("2") = (unsigned long) cs;
+       register unsigned long r3 asm("3") = (unsigned long) n;
+       register unsigned long r4 asm("4") = (unsigned long) ct;
+       register unsigned long r5 asm("5") = (unsigned long) n;
+       int ret;
+
+       asm volatile ("0: clcle %1,%3,0\n"
+                     "   jo    0b\n"
+                     "   ipm   %0\n"
+                     "   srl   %0,28"
+                     : "=&d" (ret), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5)
+                     : : "cc" );
+       if (ret)
+               ret = *(char *) r2 - *(char *) r4;
+       return ret;
+}
+EXPORT_SYMBOL_NOVERS(memcmp);
+
+/**
+ * memscan - Find a character in an area of memory.
+ * @s: The memory area
+ * @c: The byte to search for
+ * @n: The size of the area.
+ *
+ * returns the address of the first occurrence of @c, or 1 byte past
+ * the area if @c is not found
+ */
+void *memscan(void *s, int c, size_t n)
+{
+       register int r0 asm("0") = (char) c;
+       const void *ret = s + n;
+
+       asm volatile ("0: srst  %0,%1\n"
+                     "   jo    0b\n"
+                     : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" );
+       return (void *) ret;
+}
+EXPORT_SYMBOL_NOVERS(memscan);
+
+/**
+ * memcpy - Copy one area of memory to another
+ * @dest: Where to copy to
+ * @src: Where to copy from
+ * @n: The size of the area.
+ *
+ * returns a pointer to @dest
+ */
+void *memcpy(void *dest, const void *src, size_t n)
+{
+       return __builtin_memcpy(dest, src, n);
+}
+EXPORT_SYMBOL_NOVERS(memcpy);
+
+/**
+ * bcopy - Copy one area of memory to another
+ * @src: Where to copy from
+ * @dest: Where to copy to
+ * @n: The size of the area.
+ *
+ * Note that this is the same as memcpy(), with the arguments reversed.
+ * memcpy() is the standard, bcopy() is a legacy BSD function.
+ */
+void bcopy(const void *srcp, void *destp, size_t n)
+{
+       __builtin_memcpy(destp, srcp, n);
+}
+EXPORT_SYMBOL_NOVERS(bcopy);
+
+/**
+ * memset - Fill a region of memory with the given value
+ * @s: Pointer to the start of the area.
+ * @c: The byte to fill the area with
+ * @n: The size of the area.
+ *
+ * returns a pointer to @s
+ */
+void *memset(void *s, int c, size_t n)
+{
+       char *xs;
+
+       if (c == 0)
+               return __builtin_memset(s, 0, n);
+
+       xs = (char *) s;
+       if (n > 0)
+               do {
+                       *xs++ = c;
+               } while (--n > 0);
+       return s;
+}
+EXPORT_SYMBOL_NOVERS(memset);
+
+/*
+ * missing exports for string functions defined in lib/string.c
+ */
+EXPORT_SYMBOL_NOVERS(memmove);
+EXPORT_SYMBOL_NOVERS(strchr);
+EXPORT_SYMBOL_NOVERS(strnchr);
+EXPORT_SYMBOL_NOVERS(strncmp);
+EXPORT_SYMBOL_NOVERS(strpbrk);
diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c
new file mode 100644 (file)
index 0000000..420dfba
--- /dev/null
@@ -0,0 +1,125 @@
+#include <asm/bitops.h>
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The maximum size to search
+ */
+unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
+{
+       unsigned long *p = addr + (offset >> 6);
+       unsigned long result = offset & ~63UL;
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset &= 63UL;
+       if (offset) {
+               tmp = *(p++);
+               tmp &= (~0UL << offset);
+               if (size < 64)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= 64;
+               result += 64;
+       }
+       while (size & ~63UL) {
+               if ((tmp = *(p++)))
+                       goto found_middle;
+               result += 64;
+               size -= 64;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+
+found_first:
+       tmp &= (~0UL >> (64 - size));
+       if (tmp == 0UL)        /* Are any bits set? */
+               return result + size; /* Nope. */
+found_middle:
+       return result + __ffs(tmp);
+}
+
+/* find_next_zero_bit() finds the first zero bit in a bit string of length
+ * 'size' bits, starting the search at bit 'offset'. This is largely based
+ * on Linus's ALPHA routines, which are pretty portable BTW.
+ */
+
+unsigned long find_next_zero_bit(unsigned long *addr, unsigned long size, unsigned long offset)
+{
+       unsigned long *p = addr + (offset >> 6);
+       unsigned long result = offset & ~63UL;
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset &= 63UL;
+       if (offset) {
+               tmp = *(p++);
+               tmp |= ~0UL >> (64-offset);
+               if (size < 64)
+                       goto found_first;
+               if (~tmp)
+                       goto found_middle;
+               size -= 64;
+               result += 64;
+       }
+       while (size & ~63UL) {
+               if (~(tmp = *(p++)))
+                       goto found_middle;
+               result += 64;
+               size -= 64;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+
+found_first:
+       tmp |= ~0UL << size;
+       if (tmp == ~0UL)        /* Are any bits zero? */
+               return result + size; /* Nope. */
+found_middle:
+       return result + ffz(tmp);
+}
+
+unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset)
+{
+       unsigned long *p = addr + (offset >> 6);
+       unsigned long result = offset & ~63UL;
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset &= 63UL;
+       if(offset) {
+               tmp = __swab64p(p++);
+               tmp |= (~0UL >> (64-offset));
+               if(size < 64)
+                       goto found_first;
+               if(~tmp)
+                       goto found_middle;
+               size -= 64;
+               result += 64;
+       }
+       while(size & ~63) {
+               if(~(tmp = __swab64p(p++)))
+                       goto found_middle;
+               result += 64;
+               size -= 64;
+       }
+       if(!size)
+               return result;
+       tmp = __swab64p(p);
+found_first:
+       tmp |= (~0UL << size);
+       if (tmp == ~0UL)        /* Are any bits zero? */
+               return result + size; /* Nope. */
+found_middle:
+       return result + ffz(tmp);
+}
diff --git a/arch/sparc64/lib/splock.S b/arch/sparc64/lib/splock.S
new file mode 100644 (file)
index 0000000..d17a3ba
--- /dev/null
@@ -0,0 +1,23 @@
+/* splock.S: Spinlock primitives too large to inline.
+ *
+ * Copyright (C) 2004 David S. Miller (davem@redhat.com)
+ */
+
+       .text
+       .align  64
+
+       .globl  _raw_spin_lock_flags
+_raw_spin_lock_flags:  /* %o0 = lock_ptr, %o1 = irq_flags */
+1:     ldstub          [%o0], %g7
+       brnz,pn         %g7, 2f
+        membar         #StoreLoad | #StoreStore
+       retl
+        nop
+
+2:     rdpr            %pil, %g2               ! Save PIL
+       wrpr            %o1, %pil               ! Set previous PIL
+3:     ldub            [%o0], %g7              ! Spin on lock set
+       brnz,pt         %g7, 3b
+        membar         #LoadLoad
+       ba,pt           %xcc, 1b                ! Retry lock acquire
+        wrpr           %g2, %pil               ! Restore PIL
diff --git a/arch/x86_64/kernel/domain.c b/arch/x86_64/kernel/domain.c
new file mode 100644 (file)
index 0000000..0694958
--- /dev/null
@@ -0,0 +1,93 @@
+#include <linux/init.h>
+#include <linux/sched.h>
+
+/* Don't do any NUMA setup on Opteron right now. They seem to be
+   better off with flat scheduling. This is just for SMT. */
+
+#ifdef CONFIG_SCHED_SMT
+
+static struct sched_group sched_group_cpus[NR_CPUS];
+static struct sched_group sched_group_phys[NR_CPUS];
+static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
+static DEFINE_PER_CPU(struct sched_domain, phys_domains);
+__init void arch_init_sched_domains(void)
+{
+       int i;
+       struct sched_group *first = NULL, *last = NULL;
+
+       /* Set up domains */
+       for_each_cpu(i) {
+               struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
+               struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
+
+               *cpu_domain = SD_SIBLING_INIT;
+               /* Disable SMT NICE for CMP */
+               /* RED-PEN use a generic flag */ 
+               if (cpu_data[i].x86_vendor == X86_VENDOR_AMD) 
+                       cpu_domain->flags &= ~SD_SHARE_CPUPOWER; 
+               cpu_domain->span = cpu_sibling_map[i];
+               cpu_domain->parent = phys_domain;
+               cpu_domain->groups = &sched_group_cpus[i];
+
+               *phys_domain = SD_CPU_INIT;
+               phys_domain->span = cpu_possible_map;
+               phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
+       }
+
+       /* Set up CPU (sibling) groups */
+       for_each_cpu(i) {
+               struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
+               int j;
+               first = last = NULL;
+
+               if (i != first_cpu(cpu_domain->span))
+                       continue;
+
+               for_each_cpu_mask(j, cpu_domain->span) {
+                       struct sched_group *cpu = &sched_group_cpus[j];
+
+                       cpus_clear(cpu->cpumask);
+                       cpu_set(j, cpu->cpumask);
+                       cpu->cpu_power = SCHED_LOAD_SCALE;
+
+                       if (!first)
+                               first = cpu;
+                       if (last)
+                               last->next = cpu;
+                       last = cpu;
+               }
+               last->next = first;
+       }
+
+       first = last = NULL;
+       /* Set up physical groups */
+       for_each_cpu(i) {
+               struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
+               struct sched_group *cpu = &sched_group_phys[i];
+
+               if (i != first_cpu(cpu_domain->span))
+                       continue;
+
+               cpu->cpumask = cpu_domain->span;
+               /*
+                * Make each extra sibling increase power by 10% of
+                * the basic CPU. This is very arbitrary.
+                */
+               cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
+
+               if (!first)
+                       first = cpu;
+               if (last)
+                       last->next = cpu;
+               last = cpu;
+       }
+       last->next = first;
+
+       mb();
+       for_each_cpu(i) {
+               struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
+               cpu_attach_domain(cpu_domain, i);
+       }
+}
+
+#endif
diff --git a/drivers/char/drm/drm_irq.h b/drivers/char/drm/drm_irq.h
new file mode 100644 (file)
index 0000000..1d1d951
--- /dev/null
@@ -0,0 +1,371 @@
+/**
+ * \file drm_irq.h 
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#include <linux/interrupt.h>   /* For task queue support */
+
+#ifndef __HAVE_SHARED_IRQ
+#define __HAVE_SHARED_IRQ      0
+#endif
+
+#if __HAVE_SHARED_IRQ
+#define DRM_IRQ_TYPE           SA_SHIRQ
+#else
+#define DRM_IRQ_TYPE           0
+#endif
+
+/**
+ * Get interrupt from bus id.
+ * 
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ * 
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
+int DRM(irq_by_busid)(struct inode *inode, struct file *filp,
+                  unsigned int cmd, unsigned long arg)
+{
+       drm_file_t *priv = filp->private_data;
+       drm_device_t *dev = priv->dev;
+       drm_irq_busid_t p;
+
+       if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p)))
+               return -EFAULT;
+
+       if ((p.busnum >> 8) != dev->pci_domain ||
+           (p.busnum & 0xff) != dev->pci_bus ||
+           p.devnum != dev->pci_slot ||
+           p.funcnum != dev->pci_func)
+               return -EINVAL;
+
+       p.irq = dev->irq;
+
+       DRM_DEBUG("%d:%d:%d => IRQ %d\n",
+                 p.busnum, p.devnum, p.funcnum, p.irq);
+       if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p)))
+               return -EFAULT;
+       return 0;
+}
+
+#if __HAVE_IRQ
+
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ * \param irq IRQ number.
+ *
+ * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
+ * \c DRM(driver_irq_preinstall)() and \c DRM(driver_irq_postinstall)() functions
+ * before and after the installation.
+ */
+int DRM(irq_install)( drm_device_t *dev )
+{
+       int ret;
+       if ( dev->irq == 0 )
+               return -EINVAL;
+
+       down( &dev->struct_sem );
+
+       /* Driver must have been initialized */
+       if ( !dev->dev_private ) {
+               up( &dev->struct_sem );
+               return -EINVAL;
+       }
+
+       if ( dev->irq_enabled ) {
+               up( &dev->struct_sem );
+               return -EBUSY;
+       }
+       dev->irq_enabled = 1;
+       up( &dev->struct_sem );
+
+       DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
+
+#if __HAVE_DMA
+       dev->dma->next_buffer = NULL;
+       dev->dma->next_queue = NULL;
+       dev->dma->this_buffer = NULL;
+#endif
+
+#if __HAVE_IRQ_BH
+       INIT_WORK(&dev->work, DRM(irq_immediate_bh), dev);
+#endif
+
+#if __HAVE_VBL_IRQ
+       init_waitqueue_head(&dev->vbl_queue);
+
+       spin_lock_init( &dev->vbl_lock );
+
+       INIT_LIST_HEAD( &dev->vbl_sigs.head );
+
+       dev->vbl_pending = 0;
+#endif
+
+                               /* Before installing handler */
+       DRM(driver_irq_preinstall)(dev);
+
+                               /* Install handler */
+       ret = request_irq( dev->irq, DRM(irq_handler),
+                          DRM_IRQ_TYPE, dev->devname, dev );
+       if ( ret < 0 ) {
+               down( &dev->struct_sem );
+               dev->irq_enabled = 0;
+               up( &dev->struct_sem );
+               return ret;
+       }
+
+                               /* After installing handler */
+       DRM(driver_irq_postinstall)(dev);
+
+       return 0;
+}
+
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c DRM(driver_irq_uninstall)() function, and stops the irq.
+ */
+int DRM(irq_uninstall)( drm_device_t *dev )
+{
+       int irq_enabled;
+
+       down( &dev->struct_sem );
+       irq_enabled = dev->irq_enabled;
+       dev->irq_enabled = 0;
+       up( &dev->struct_sem );
+
+       if ( !irq_enabled )
+               return -EINVAL;
+
+       DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
+
+       DRM(driver_irq_uninstall)( dev );
+
+       free_irq( dev->irq, dev );
+
+       return 0;
+}
+
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
+int DRM(control)( struct inode *inode, struct file *filp,
+                 unsigned int cmd, unsigned long arg )
+{
+       drm_file_t *priv = filp->private_data;
+       drm_device_t *dev = priv->dev;
+       drm_control_t ctl;
+
+       if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
+               return -EFAULT;
+
+       switch ( ctl.func ) {
+       case DRM_INST_HANDLER:
+               if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+                   ctl.irq != dev->irq)
+                       return -EINVAL;
+               return DRM(irq_install)( dev );
+       case DRM_UNINST_HANDLER:
+               return DRM(irq_uninstall)( dev );
+       default:
+               return -EINVAL;
+       }
+}
+
+#if __HAVE_VBL_IRQ
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the IRQ is installed. 
+ *
+ * If a signal is requested checks if this task has already scheduled the same signal
+ * for the same vblank sequence number - nothing to be done in
+ * that case. If the number of tasks waiting for the interrupt exceeds 100 the
+ * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
+ * task.
+ *
+ * If a signal is not requested, then calls vblank_wait().
+ */
+int DRM(wait_vblank)( DRM_IOCTL_ARGS )
+{
+       drm_file_t *priv = filp->private_data;
+       drm_device_t *dev = priv->dev;
+       drm_wait_vblank_t vblwait;
+       struct timeval now;
+       int ret = 0;
+       unsigned int flags;
+
+       if (!dev->irq)
+               return -EINVAL;
+
+       DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
+                                 sizeof(vblwait) );
+
+       switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
+       case _DRM_VBLANK_RELATIVE:
+               vblwait.request.sequence += atomic_read( &dev->vbl_received );
+               vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
+       case _DRM_VBLANK_ABSOLUTE:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+       
+       if ( flags & _DRM_VBLANK_SIGNAL ) {
+               unsigned long irqflags;
+               drm_vbl_sig_t *vbl_sig;
+               
+               vblwait.reply.sequence = atomic_read( &dev->vbl_received );
+
+               spin_lock_irqsave( &dev->vbl_lock, irqflags );
+
+               /* Check if this task has already scheduled the same signal
+                * for the same vblank sequence number; nothing to be done in
+                * that case
+                */
+               list_for_each_entry( vbl_sig, &dev->vbl_sigs.head, head ) {
+                       if (vbl_sig->sequence == vblwait.request.sequence
+                           && vbl_sig->info.si_signo == vblwait.request.signal
+                           && vbl_sig->task == current)
+                       {
+                               spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
+                               goto done;
+                       }
+               }
+
+               if ( dev->vbl_pending >= 100 ) {
+                       spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
+                       return -EBUSY;
+               }
+
+               dev->vbl_pending++;
+
+               spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
+
+               if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) {
+                       return -ENOMEM;
+               }
+
+               memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
+
+               vbl_sig->sequence = vblwait.request.sequence;
+               vbl_sig->info.si_signo = vblwait.request.signal;
+               vbl_sig->task = current;
+
+               spin_lock_irqsave( &dev->vbl_lock, irqflags );
+
+               list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
+
+               spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
+       } else {
+               ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
+
+               do_gettimeofday( &now );
+               vblwait.reply.tval_sec = now.tv_sec;
+               vblwait.reply.tval_usec = now.tv_usec;
+       }
+
+done:
+       DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
+                               sizeof(vblwait) );
+
+       return ret;
+}
+
+/**
+ * Send the VBLANK signals.
+ *
+ * \param dev DRM device.
+ *
+ * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
+ *
+ * If a signal is not requested, then calls vblank_wait().
+ */
+void DRM(vbl_send_signals)( drm_device_t *dev )
+{
+       struct list_head *list, *tmp;
+       drm_vbl_sig_t *vbl_sig;
+       unsigned int vbl_seq = atomic_read( &dev->vbl_received );
+       unsigned long flags;
+
+       spin_lock_irqsave( &dev->vbl_lock, flags );
+
+       list_for_each_safe( list, tmp, &dev->vbl_sigs.head ) {
+               vbl_sig = list_entry( list, drm_vbl_sig_t, head );
+               if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
+                       vbl_sig->info.si_code = vbl_seq;
+                       send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
+
+                       list_del( list );
+
+                       DRM_FREE( vbl_sig, sizeof(*vbl_sig) );
+
+                       dev->vbl_pending--;
+               }
+       }
+
+       spin_unlock_irqrestore( &dev->vbl_lock, flags );
+}
+
+#endif /* __HAVE_VBL_IRQ */
+
+#endif /* __HAVE_IRQ */
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
new file mode 100644 (file)
index 0000000..3b5f8d3
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+   This file is auto-generated from the drm_pciids.txt in the DRM CVS
+   Please contact dri-devel@lists.sf.net to add new cards to this list
+*/
+#define radeon_PCI_IDS \
+       {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x514A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x514B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x514E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x514F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5963, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5968, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define r128_PCI_IDS \
+       {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define mga_PCI_IDS \
+       {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define mach64_PCI_IDS \
+       {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define sisdrv_PCI_IDS \
+       {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define tdfx_PCI_IDS \
+       {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define viadrv_PCI_IDS \
+       {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define i810_PCI_IDS \
+       {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define i830_PCI_IDS \
+       {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define gamma_PCI_IDS \
+       {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define savage_PCI_IDS \
+       {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
+#define ffb_PCI_IDS \
+       {0, 0, 0}
+
diff --git a/drivers/char/watchdog/ixp4xx_wdt.c b/drivers/char/watchdog/ixp4xx_wdt.c
new file mode 100644 (file)
index 0000000..7949365
--- /dev/null
@@ -0,0 +1,233 @@
+/*
+ * drivers/watchdog/ixp4xx_wdt.c
+ *
+ * Watchdog driver for Intel IXP4xx network processors
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright 2004 (c) MontaVista, Software, Inc.
+ * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+
+#include <asm/hardware.h>
+#include <asm/bitops.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_WATCHDOG_NOWAYOUT
+static int nowayout = 1;
+#else
+static int nowayout = 0;
+#endif
+static int heartbeat = 60;     /* (secs) Default is 1 minute */
+static unsigned long wdt_status;
+static unsigned long boot_status;
+
+#define WDT_TICK_RATE (IXP4XX_PERIPHERAL_BUS_CLOCK * 1000000UL)
+
+#define        WDT_IN_USE              0
+#define        WDT_OK_TO_CLOSE         1
+
+static void
+wdt_enable(void)
+{
+       *IXP4XX_OSWK = IXP4XX_WDT_KEY;
+       *IXP4XX_OSWE = 0;
+       *IXP4XX_OSWT = WDT_TICK_RATE * heartbeat;
+       *IXP4XX_OSWE = IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE;
+       *IXP4XX_OSWK = 0;
+}
+
+static void
+wdt_disable(void)
+{
+       *IXP4XX_OSWK = IXP4XX_WDT_KEY;
+       *IXP4XX_OSWE = 0;
+       *IXP4XX_OSWK = 0;
+}
+
+static int
+ixp4xx_wdt_open(struct inode *inode, struct file *file)
+{
+       if (test_and_set_bit(WDT_IN_USE, &wdt_status))
+               return -EBUSY;
+
+       clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+       wdt_enable();
+
+       return 0;
+}
+
+static ssize_t
+ixp4xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
+{
+       /* Can't seek (pwrite) on this device  */
+       if (ppos != &file->f_pos)
+               return -ESPIPE;
+
+       if (len) {
+               if (!nowayout) {
+                       size_t i;
+
+                       clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+                       for (i = 0; i != len; i++) {
+                               char c;
+
+                               if (get_user(c, data + i))
+                                       return -EFAULT;
+                               if (c == 'V')
+                                       set_bit(WDT_OK_TO_CLOSE, &wdt_status);
+                       }
+               }
+               wdt_enable();
+       }
+
+       return len;
+}
+
+static struct watchdog_info ident = {
+       .options        = WDIOF_CARDRESET | WDIOF_MAGICCLOSE |
+                         WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+       .identity       = "IXP4xx Watchdog",
+};
+
+
+static int
+ixp4xx_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+                       unsigned long arg)
+{
+       int ret = -ENOIOCTLCMD;
+       int time;
+
+       switch (cmd) {
+       case WDIOC_GETSUPPORT:
+               ret = copy_to_user((struct watchdog_info *)arg, &ident,
+                                  sizeof(ident)) ? -EFAULT : 0;
+               break;
+
+       case WDIOC_GETSTATUS:
+               ret = put_user(0, (int *)arg);
+               break;
+
+       case WDIOC_GETBOOTSTATUS:
+               ret = put_user(boot_status, (int *)arg);
+               break;
+
+       case WDIOC_SETTIMEOUT:
+               ret = get_user(time, (int *)arg);
+               if (ret)
+                       break;
+
+               if (time <= 0 || time > 60) {
+                       ret = -EINVAL;
+                       break;
+               }
+
+               heartbeat = time;
+               wdt_enable();
+               /* Fall through */
+
+       case WDIOC_GETTIMEOUT:
+               ret = put_user(heartbeat, (int *)arg);
+               break;
+
+       case WDIOC_KEEPALIVE:
+               wdt_enable();
+               ret = 0;
+               break;
+       }
+       return ret;
+}
+
+static int
+ixp4xx_wdt_release(struct inode *inode, struct file *file)
+{
+       if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
+               wdt_disable();
+       } else {
+               printk(KERN_CRIT "WATCHDOG: Device closed unexpectdly - "
+                                       "timer will not stop\n");
+       }
+
+       clear_bit(WDT_IN_USE, &wdt_status);
+       clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+       return 0;
+}
+
+
+static struct file_operations ixp4xx_wdt_fops =
+{
+       .owner          = THIS_MODULE,
+       .write          = ixp4xx_wdt_write,
+       .ioctl          = ixp4xx_wdt_ioctl,
+       .open           = ixp4xx_wdt_open,
+       .release        = ixp4xx_wdt_release,
+};
+
+static struct miscdevice ixp4xx_wdt_miscdev =
+{
+       .minor          = WATCHDOG_MINOR,
+       .name           = "IXP4xx Watchdog",
+       .fops           = &ixp4xx_wdt_fops,
+};
+
+static int __init ixp4xx_wdt_init(void)
+{
+       int ret;
+       unsigned long processor_id;
+
+       asm("mrc p15, 0, %0, cr0, cr0, 0;" : "=r"(processor_id) :);
+       if (!(processor_id & 0xf)) {
+               printk("IXP4XXX Watchdog: Rev. A0 CPU detected - "
+                       "watchdog disabled\n");
+
+               return -ENODEV;
+       }
+
+       ret = misc_register(&ixp4xx_wdt_miscdev);
+       if (ret == 0)
+               printk("IXP4xx Watchdog Timer: heartbeat %d sec\n", heartbeat);
+
+       boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ?
+                       WDIOF_CARDRESET : 0;
+
+       return ret;
+}
+
+static void __exit ixp4xx_wdt_exit(void)
+{
+       misc_deregister(&ixp4xx_wdt_miscdev);
+}
+
+
+module_init(ixp4xx_wdt_init);
+module_exit(ixp4xx_wdt_exit);
+
+MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net">);
+MODULE_DESCRIPTION("IXP4xx Network Processor Watchdog");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 60s)");
+
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+
diff --git a/drivers/i2c/busses/i2c-ixp4xx.c b/drivers/i2c/busses/i2c-ixp4xx.c
new file mode 100644 (file)
index 0000000..d8bfd59
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * drivers/i2c/i2c-adap-ixp4xx.c
+ *
+ * Intel's IXP4xx XScale NPU chipsets (IXP420, 421, 422, 425) do not have
+ * an on board I2C controller but provide 16 GPIO pins that are often
+ * used to create an I2C bus. This driver provides an i2c_adapter 
+ * interface that plugs in under algo_bit and drives the GPIO pins
+ * as instructed by the alogorithm driver.
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright (c) 2003-2004 MontaVista Software Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public 
+ * License version 2. This program is licensed "as is" without any 
+ * warranty of any kind, whether express or implied.
+ *
+ * NOTE: Since different platforms will use different GPIO pins for
+ *       I2C, this driver uses an IXP4xx-specific platform_data
+ *       pointer to pass the GPIO numbers to the driver. This 
+ *       allows us to support all the different IXP4xx platforms
+ *       w/o having to put #ifdefs in this driver.
+ *
+ *       See arch/arm/mach-ixp4xx/ixdp425.c for an example of building a 
+ *       device list and filling in the ixp4xx_i2c_pins data structure 
+ *       that is passed as the platform_data to this driver.
+ */
+
+#include <linux/config.h>
+#ifdef CONFIG_I2C_DEBUG_BUS
+#define DEBUG  1
+#endif
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+
+#include <asm/hardware.h>      /* Pick up IXP4xx-specific bits */
+
+static inline int ixp4xx_scl_pin(void *data)
+{
+       return ((struct ixp4xx_i2c_pins*)data)->scl_pin;
+}
+
+static inline int ixp4xx_sda_pin(void *data)
+{
+       return ((struct ixp4xx_i2c_pins*)data)->sda_pin;
+}
+
+static void ixp4xx_bit_setscl(void *data, int val)
+{
+       gpio_line_set(ixp4xx_scl_pin(data), 0);
+       gpio_line_config(ixp4xx_scl_pin(data),
+               val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT );
+}
+
+static void ixp4xx_bit_setsda(void *data, int val)
+{
+       gpio_line_set(ixp4xx_sda_pin(data), 0);
+       gpio_line_config(ixp4xx_sda_pin(data),
+               val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT );
+}
+
+static int ixp4xx_bit_getscl(void *data)
+{
+       int scl;
+
+       gpio_line_config(ixp4xx_scl_pin(data), IXP4XX_GPIO_IN );
+       gpio_line_get(ixp4xx_scl_pin(data), &scl);
+
+       return scl;
+}      
+
+static int ixp4xx_bit_getsda(void *data)
+{
+       int sda;
+
+       gpio_line_config(ixp4xx_sda_pin(data), IXP4XX_GPIO_IN );
+       gpio_line_get(ixp4xx_sda_pin(data), &sda);
+
+       return sda;
+}      
+
+struct ixp4xx_i2c_data {
+       struct ixp4xx_i2c_pins *gpio_pins;
+       struct i2c_adapter adapter;
+       struct i2c_algo_bit_data algo_data;
+};
+
+static int ixp4xx_i2c_remove(struct device *dev)
+{
+       struct platform_device *plat_dev = to_platform_device(dev);
+       struct ixp4xx_i2c_data *drv_data = dev_get_drvdata(&plat_dev->dev);
+
+       dev_set_drvdata(&plat_dev->dev, NULL);
+
+       i2c_bit_del_bus(&drv_data->adapter);
+
+       kfree(drv_data);
+
+       return 0;
+}
+
+static int ixp4xx_i2c_probe(struct device *dev)
+{
+       int err;
+       struct platform_device *plat_dev = to_platform_device(dev);
+       struct ixp4xx_i2c_pins *gpio = plat_dev->dev.platform_data;
+       struct ixp4xx_i2c_data *drv_data = 
+               kmalloc(sizeof(struct ixp4xx_i2c_data), GFP_KERNEL);
+
+       if(!drv_data)
+               return -ENOMEM;
+
+       memzero(drv_data, sizeof(struct ixp4xx_i2c_data));
+       drv_data->gpio_pins = gpio;
+
+       /*
+        * We could make a lot of these structures static, but
+        * certain platforms may have multiple GPIO-based I2C
+        * buses for various device domains, so we need per-device
+        * algo_data->data. 
+        */
+       drv_data->algo_data.data = gpio;
+       drv_data->algo_data.setsda = ixp4xx_bit_setsda;
+       drv_data->algo_data.setscl = ixp4xx_bit_setscl;
+       drv_data->algo_data.getsda = ixp4xx_bit_getsda;
+       drv_data->algo_data.getscl = ixp4xx_bit_getscl;
+       drv_data->algo_data.udelay = 10;
+       drv_data->algo_data.mdelay = 10;
+       drv_data->algo_data.timeout = 100;
+
+       drv_data->adapter.id = I2C_HW_B_IXP4XX,
+       drv_data->adapter.algo_data = &drv_data->algo_data,
+
+       drv_data->adapter.dev.parent = &plat_dev->dev;
+
+       gpio_line_config(gpio->scl_pin, IXP4XX_GPIO_IN);
+       gpio_line_config(gpio->sda_pin, IXP4XX_GPIO_IN);
+       gpio_line_set(gpio->scl_pin, 0);
+       gpio_line_set(gpio->sda_pin, 0);
+
+       if ((err = i2c_bit_add_bus(&drv_data->adapter) != 0)) {
+               printk(KERN_ERR "ERROR: Could not install %s\n", dev->bus_id);
+
+               kfree(drv_data);
+               return err;
+       }
+
+       dev_set_drvdata(&plat_dev->dev, drv_data);
+
+       return 0;
+}
+
+static struct device_driver ixp4xx_i2c_driver = {
+       .name           = "IXP4XX-I2C",
+       .bus            = &platform_bus_type,
+       .probe          = ixp4xx_i2c_probe,
+       .remove         = ixp4xx_i2c_remove,
+};
+
+static int __init ixp4xx_i2c_init(void)
+{
+       return driver_register(&ixp4xx_i2c_driver);
+}
+
+static void __exit ixp4xx_i2c_exit(void)
+{
+       driver_unregister(&ixp4xx_i2c_driver);
+}
+
+module_init(ixp4xx_i2c_init);
+module_exit(ixp4xx_i2c_exit);
+
+MODULE_DESCRIPTION("GPIO-based I2C adapter for IXP4xx systems");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
+
diff --git a/drivers/i2c/chips/max1619.c b/drivers/i2c/chips/max1619.c
new file mode 100644 (file)
index 0000000..0f8a5ac
--- /dev/null
@@ -0,0 +1,378 @@
+/*
+ * max1619.c - Part of lm_sensors, Linux kernel modules for hardware
+ *             monitoring
+ * Copyright (C) 2003-2004 Alexey Fisher <fishor@mail.ru>
+ *                         Jean Delvare <khali@linux-fr.org>
+ *
+ * Based on the lm90 driver. The MAX1619 is a sensor chip made by Maxim.
+ * It reports up to two temperatures (its own plus up to
+ * one external one). Complete datasheet can be
+ * obtained from Maxim's website at:
+ *   http://pdfserv.maxim-ic.com/en/ds/MAX1619.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/i2c-sensor.h>
+
+
+static unsigned short normal_i2c[] = { I2C_CLIENT_END };
+static unsigned short normal_i2c_range[] = { 0x18, 0x1a, 0x29, 0x2b,
+                                               0x4c, 0x4e, I2C_CLIENT_END };
+static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END };
+static unsigned int normal_isa_range[] = { I2C_CLIENT_ISA_END };
+
+/*
+ * Insmod parameters
+ */
+
+SENSORS_INSMOD_1(max1619);
+
+/*
+ * The MAX1619 registers
+ */
+
+#define MAX1619_REG_R_MAN_ID           0xFE
+#define MAX1619_REG_R_CHIP_ID          0xFF
+#define MAX1619_REG_R_CONFIG           0x03
+#define MAX1619_REG_W_CONFIG           0x09
+#define MAX1619_REG_R_CONVRATE         0x04
+#define MAX1619_REG_W_CONVRATE         0x0A
+#define MAX1619_REG_R_STATUS           0x02
+#define MAX1619_REG_R_LOCAL_TEMP       0x00
+#define MAX1619_REG_R_REMOTE_TEMP      0x01
+#define MAX1619_REG_R_REMOTE_HIGH      0x07
+#define MAX1619_REG_W_REMOTE_HIGH      0x0D
+#define MAX1619_REG_R_REMOTE_LOW       0x08
+#define MAX1619_REG_W_REMOTE_LOW       0x0E
+#define MAX1619_REG_R_REMOTE_CRIT      0x10
+#define MAX1619_REG_W_REMOTE_CRIT      0x12
+#define MAX1619_REG_R_TCRIT_HYST       0x11
+#define MAX1619_REG_W_TCRIT_HYST       0x13
+
+/*
+ * Conversions and various macros
+ */
+
+#define TEMP_FROM_REG(val)     ((val & 0x80 ? val-0x100 : val) * 1000)
+#define TEMP_TO_REG(val)       ((val < 0 ? val+0x100*1000 : val) / 1000)
+
+/*
+ * Functions declaration
+ */
+
+static int max1619_attach_adapter(struct i2c_adapter *adapter);
+static int max1619_detect(struct i2c_adapter *adapter, int address,
+       int kind);
+static void max1619_init_client(struct i2c_client *client);
+static int max1619_detach_client(struct i2c_client *client);
+static struct max1619_data *max1619_update_device(struct device *dev);
+
+/*
+ * Driver data (common to all clients)
+ */
+
+static struct i2c_driver max1619_driver = {
+       .owner          = THIS_MODULE,
+       .name           = "max1619",
+       .flags          = I2C_DF_NOTIFY,
+       .attach_adapter = max1619_attach_adapter,
+       .detach_client  = max1619_detach_client,
+};
+
+/*
+ * Client data (each client gets its own)
+ */
+
+struct max1619_data {
+       struct i2c_client client;
+       struct semaphore update_lock;
+       char valid; /* zero until following fields are valid */
+       unsigned long last_updated; /* in jiffies */
+
+       /* registers values */
+       u8 temp_input1; /* local */
+       u8 temp_input2, temp_low2, temp_high2; /* remote */
+       u8 temp_crit2;
+       u8 temp_hyst2;
+       u8 alarms; 
+};
+
+/*
+ * Internal variables
+ */
+
+static int max1619_id = 0;
+
+/*
+ * Sysfs stuff
+ */
+
+#define show_temp(value) \
+static ssize_t show_##value(struct device *dev, char *buf) \
+{ \
+       struct max1619_data *data = max1619_update_device(dev); \
+       return sprintf(buf, "%d\n", TEMP_FROM_REG(data->value)); \
+}
+show_temp(temp_input1);
+show_temp(temp_input2);
+show_temp(temp_low2);
+show_temp(temp_high2);
+show_temp(temp_crit2);
+show_temp(temp_hyst2);
+
+#define set_temp2(value, reg) \
+static ssize_t set_##value(struct device *dev, const char *buf, \
+       size_t count) \
+{ \
+       struct i2c_client *client = to_i2c_client(dev); \
+       struct max1619_data *data = i2c_get_clientdata(client); \
+       data->value = TEMP_TO_REG(simple_strtol(buf, NULL, 10)); \
+       i2c_smbus_write_byte_data(client, reg, data->value); \
+       return count; \
+}
+
+set_temp2(temp_low2, MAX1619_REG_W_REMOTE_LOW);
+set_temp2(temp_high2, MAX1619_REG_W_REMOTE_HIGH);
+set_temp2(temp_crit2, MAX1619_REG_W_REMOTE_CRIT);
+set_temp2(temp_hyst2, MAX1619_REG_W_TCRIT_HYST);
+
+static ssize_t show_alarms(struct device *dev, char *buf)
+{
+       struct max1619_data *data = max1619_update_device(dev);
+       return sprintf(buf, "%d\n", data->alarms);
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL);
+static DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input2, NULL);
+static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_low2,
+       set_temp_low2);
+static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_high2,
+       set_temp_high2);
+static DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit2,
+       set_temp_crit2);
+static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp_hyst2,
+       set_temp_hyst2);
+static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+
+/*
+ * Real code
+ */
+
+static int max1619_attach_adapter(struct i2c_adapter *adapter)
+{
+       if (!(adapter->class & I2C_CLASS_HWMON))
+               return 0;
+       return i2c_detect(adapter, &addr_data, max1619_detect);
+}
+
+/*
+ * The following function does more than just detection. If detection
+ * succeeds, it also registers the new chip.
+ */
+static int max1619_detect(struct i2c_adapter *adapter, int address, int kind)
+{
+       struct i2c_client *new_client;
+       struct max1619_data *data;
+       int err = 0;
+       const char *name = "";  
+       u8 reg_config=0, reg_convrate=0, reg_status=0;
+       u8 man_id, chip_id;
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+               goto exit;
+
+       if (!(data = kmalloc(sizeof(struct max1619_data), GFP_KERNEL))) {
+               err = -ENOMEM;
+               goto exit;
+       }
+       memset(data, 0, sizeof(struct max1619_data));
+
+       /* The common I2C client data is placed right before the
+          MAX1619-specific data. */
+       new_client = &data->client;
+       i2c_set_clientdata(new_client, data);
+       new_client->addr = address;
+       new_client->adapter = adapter;
+       new_client->driver = &max1619_driver;
+       new_client->flags = 0;
+
+       /*
+        * Now we do the remaining detection. A negative kind means that
+        * the driver was loaded with no force parameter (default), so we
+        * must both detect and identify the chip. A zero kind means that
+        * the driver was loaded with the force parameter, the detection
+        * step shall be skipped. A positive kind means that the driver
+        * was loaded with the force parameter and a given kind of chip is
+        * requested, so both the detection and the identification steps
+        * are skipped.
+        */
+       if (kind < 0) { /* detection */
+               reg_config = i2c_smbus_read_byte_data(new_client,
+                             MAX1619_REG_R_CONFIG);
+               reg_convrate = i2c_smbus_read_byte_data(new_client,
+                              MAX1619_REG_R_CONVRATE);
+               reg_status = i2c_smbus_read_byte_data(new_client,
+                               MAX1619_REG_R_STATUS);
+               if ((reg_config & 0x03) != 0x00
+                || reg_convrate > 0x07 || (reg_status & 0x61 ) !=0x00) {
+                       dev_dbg(&adapter->dev,
+                               "MAX1619 detection failed at 0x%02x.\n",
+                               address);
+                       goto exit_free;
+               }
+       }
+
+       if (kind <= 0) { /* identification */
+       
+               man_id = i2c_smbus_read_byte_data(new_client,
+                        MAX1619_REG_R_MAN_ID);
+               chip_id = i2c_smbus_read_byte_data(new_client,
+                         MAX1619_REG_R_CHIP_ID);
+               
+               if ((man_id == 0x4D) && (chip_id == 0x04)){  
+                               kind = max1619;
+                       }
+               }
+
+               if (kind <= 0) { /* identification failed */
+                       dev_info(&adapter->dev,
+                           "Unsupported chip (man_id=0x%02X, "
+                           "chip_id=0x%02X).\n", man_id, chip_id);
+                       goto exit_free;
+               }
+       
+
+       if (kind == max1619){
+               name = "max1619";
+       }
+
+       /* We can fill in the remaining client fields */
+       strlcpy(new_client->name, name, I2C_NAME_SIZE);
+       new_client->id = max1619_id++;
+       data->valid = 0;
+       init_MUTEX(&data->update_lock);
+
+       /* Tell the I2C layer a new client has arrived */
+       if ((err = i2c_attach_client(new_client)))
+               goto exit_free;
+
+       /* Initialize the MAX1619 chip */
+       max1619_init_client(new_client);
+
+       /* Register sysfs hooks */
+       device_create_file(&new_client->dev, &dev_attr_temp1_input);
+       device_create_file(&new_client->dev, &dev_attr_temp2_input);
+       device_create_file(&new_client->dev, &dev_attr_temp2_min);
+       device_create_file(&new_client->dev, &dev_attr_temp2_max);
+       device_create_file(&new_client->dev, &dev_attr_temp2_crit);
+       device_create_file(&new_client->dev, &dev_attr_temp2_crit_hyst);
+       device_create_file(&new_client->dev, &dev_attr_alarms);
+
+       return 0;
+
+exit_free:
+       kfree(data);
+exit:
+       return err;
+}
+
+static void max1619_init_client(struct i2c_client *client)
+{
+       u8 config;
+
+       /*
+        * Start the conversions.
+        */
+       i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONVRATE,
+                                 5); /* 2 Hz */
+       config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG);
+       if (config & 0x40)
+               i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONFIG,
+                                         config & 0xBF); /* run */
+}
+
+static int max1619_detach_client(struct i2c_client *client)
+{
+       int err;
+
+       if ((err = i2c_detach_client(client))) {
+               dev_err(&client->dev, "Client deregistration failed, "
+                       "client not detached.\n");
+               return err;
+       }
+
+       kfree(i2c_get_clientdata(client));
+       return 0;
+}
+
+static struct max1619_data *max1619_update_device(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct max1619_data *data = i2c_get_clientdata(client);
+
+       down(&data->update_lock);
+
+       if ((jiffies - data->last_updated > HZ * 2) ||
+           (jiffies < data->last_updated) ||
+           !data->valid) {
+               
+               dev_dbg(&client->dev, "Updating max1619 data.\n");
+               data->temp_input1 = i2c_smbus_read_byte_data(client,
+                                       MAX1619_REG_R_LOCAL_TEMP);
+               data->temp_input2 = i2c_smbus_read_byte_data(client,
+                                       MAX1619_REG_R_REMOTE_TEMP);
+               data->temp_high2 = i2c_smbus_read_byte_data(client,
+                                       MAX1619_REG_R_REMOTE_HIGH);
+               data->temp_low2 = i2c_smbus_read_byte_data(client,
+                                       MAX1619_REG_R_REMOTE_LOW);
+               data->temp_crit2 = i2c_smbus_read_byte_data(client,
+                                       MAX1619_REG_R_REMOTE_CRIT);
+               data->temp_hyst2 = i2c_smbus_read_byte_data(client,
+                                       MAX1619_REG_R_TCRIT_HYST);
+               data->alarms = i2c_smbus_read_byte_data(client,
+                                       MAX1619_REG_R_STATUS);
+
+               data->last_updated = jiffies;
+               data->valid = 1;
+       }
+
+       up(&data->update_lock);
+
+       return data;
+}
+
+static int __init sensors_max1619_init(void)
+{
+       return i2c_add_driver(&max1619_driver);
+}
+
+static void __exit sensors_max1619_exit(void)
+{
+       i2c_del_driver(&max1619_driver);
+}
+
+MODULE_AUTHOR("Alexey Fisher <fishor@mail.ru> and"
+       "Jean Delvare <khali@linux-fr.org>");
+MODULE_DESCRIPTION("MAX1619 sensor driver");
+MODULE_LICENSE("GPL");
+
+module_init(sensors_max1619_init);
+module_exit(sensors_max1619_exit);
diff --git a/drivers/i2c/chips/rtc8564.c b/drivers/i2c/chips/rtc8564.c
new file mode 100644 (file)
index 0000000..0fa55d4
--- /dev/null
@@ -0,0 +1,396 @@
+/*
+ *  linux/drivers/i2c/chips/rtc8564.c
+ *
+ *  Copyright (C) 2002-2004 Stefan Eletzhofer
+ *
+ *     based on linux/drivers/acron/char/pcf8583.c
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Driver for system3's EPSON RTC 8564 chip
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/rtc.h>         /* get the user-level API */
+#include <linux/init.h>
+#include <linux/init.h>
+
+#include "rtc8564.h"
+
+#ifdef DEBUG
+# define _DBG(x, fmt, args...) do{ if (debug>=x) printk(KERN_DEBUG"%s: " fmt "\n", __FUNCTION__, ##args); } while(0);
+#else
+# define _DBG(x, fmt, args...) do { } while(0);
+#endif
+
+#define _DBGRTCTM(x, rtctm) if (debug>=x) printk("%s: secs=%d, mins=%d, hours=%d, mday=%d, " \
+                       "mon=%d, year=%d, wday=%d VL=%d\n", __FUNCTION__, \
+                       (rtctm).secs, (rtctm).mins, (rtctm).hours, (rtctm).mday, \
+                       (rtctm).mon, (rtctm).year, (rtctm).wday, (rtctm).vl);
+
+struct rtc8564_data {
+       struct i2c_client client;
+       u16 ctrl;
+};
+
+static inline u8 _rtc8564_ctrl1(struct i2c_client *client)
+{
+       struct rtc8564_data *data = i2c_get_clientdata(client);
+       return data->ctrl & 0xff;
+}
+static inline u8 _rtc8564_ctrl2(struct i2c_client *client)
+{
+       struct rtc8564_data *data = i2c_get_clientdata(client);
+       return (data->ctrl & 0xff00) >> 8;
+}
+
+#define CTRL1(c) _rtc8564_ctrl1(c)
+#define CTRL2(c) _rtc8564_ctrl2(c)
+
+#define BCD_TO_BIN(val) (((val)&15) + ((val)>>4)*10)
+#define BIN_TO_BCD(val) ((((val)/10)<<4) + (val)%10)
+
+static int debug = 0;
+MODULE_PARM(debug, "i");
+
+static struct i2c_driver rtc8564_driver;
+
+static unsigned short ignore[] = { I2C_CLIENT_END };
+static unsigned short normal_addr[] = { 0x51, I2C_CLIENT_END };
+
+static struct i2c_client_address_data addr_data = {
+       .normal_i2c             = normal_addr,
+       .normal_i2c_range       = ignore,
+       .probe                  = ignore,
+       .probe_range            = ignore,
+       .ignore                 = ignore,
+       .ignore_range           = ignore,
+       .force                  = ignore,
+};
+
+static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem);
+static int rtc8564_write_mem(struct i2c_client *client, struct mem *mem);
+
+static int rtc8564_read(struct i2c_client *client, unsigned char adr,
+                       unsigned char *buf, unsigned char len)
+{
+       int ret = -EIO;
+       unsigned char addr[1] = { adr };
+       struct i2c_msg msgs[2] = {
+               {client->addr, 0, 1, addr},
+               {client->addr, I2C_M_RD, len, buf}
+       };
+
+       _DBG(1, "client=%p, adr=%d, buf=%p, len=%d", client, adr, buf, len);
+
+       if (!buf || !client) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       ret = i2c_transfer(client->adapter, msgs, 2);
+       if (ret == 2) {
+               ret = 0;
+       }
+
+done:
+       return ret;
+}
+
+static int rtc8564_write(struct i2c_client *client, unsigned char adr,
+                        unsigned char *data, unsigned char len)
+{
+       int ret = 0;
+       unsigned char _data[16];
+       struct i2c_msg wr;
+       int i;
+
+       if (!client || !data || len > 15) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       _DBG(1, "client=%p, adr=%d, buf=%p, len=%d", client, adr, data, len);
+
+       _data[0] = adr;
+       for (i = 0; i < len; i++) {
+               _data[i + 1] = data[i];
+               _DBG(5, "data[%d] = 0x%02x (%d)", i, data[i], data[i]);
+       }
+
+       wr.addr = client->addr;
+       wr.flags = 0;
+       wr.len = len + 1;
+       wr.buf = _data;
+
+       ret = i2c_transfer(client->adapter, &wr, 1);
+       if (ret == 1) {
+               ret = 0;
+       }
+
+done:
+       return ret;
+}
+
+static int rtc8564_attach(struct i2c_adapter *adap, int addr, int kind)
+{
+       int ret;
+       struct i2c_client *new_client;
+       struct rtc8564_data *d;
+       unsigned char data[10];
+       unsigned char ad[1] = { 0 };
+       struct i2c_msg ctrl_wr[1] = {
+               {addr, 0, 2, data}
+       };
+       struct i2c_msg ctrl_rd[2] = {
+               {addr, 0, 1, ad},
+               {addr, I2C_M_RD, 2, data}
+       };
+
+       d = kmalloc(sizeof(struct rtc8564_data), GFP_KERNEL);
+       if (!d) {
+               ret = -ENOMEM;
+               goto done;
+       }
+       memset(d, 0, sizeof(struct rtc8564_data));
+       new_client = &d->client;
+
+       strlcpy(new_client->name, "RTC8564", I2C_NAME_SIZE);
+       i2c_set_clientdata(new_client, d);
+       new_client->id = rtc8564_driver.id;
+       new_client->flags = I2C_CLIENT_ALLOW_USE | I2C_DF_NOTIFY;
+       new_client->addr = addr;
+       new_client->adapter = adap;
+       new_client->driver = &rtc8564_driver;
+
+       _DBG(1, "client=%p", new_client);
+       _DBG(1, "client.id=%d", new_client->id);
+
+       /* init ctrl1 reg */
+       data[0] = 0;
+       data[1] = 0;
+       ret = i2c_transfer(new_client->adapter, ctrl_wr, 1);
+       if (ret != 1) {
+               printk(KERN_INFO "rtc8564: cant init ctrl1\n");
+               ret = -ENODEV;
+               goto done;
+       }
+
+       /* read back ctrl1 and ctrl2 */
+       ret = i2c_transfer(new_client->adapter, ctrl_rd, 2);
+       if (ret != 2) {
+               printk(KERN_INFO "rtc8564: cant read ctrl\n");
+               ret = -ENODEV;
+               goto done;
+       }
+
+       d->ctrl = data[0] | (data[1] << 8);
+
+       _DBG(1, "RTC8564_REG_CTRL1=%02x, RTC8564_REG_CTRL2=%02x",
+            data[0], data[1]);
+
+       ret = i2c_attach_client(new_client);
+done:
+       if (ret) {
+               kfree(d);
+       }
+       return ret;
+}
+
+static int rtc8564_probe(struct i2c_adapter *adap)
+{
+       return i2c_probe(adap, &addr_data, rtc8564_attach);
+}
+
+static int rtc8564_detach(struct i2c_client *client)
+{
+       i2c_detach_client(client);
+       kfree(i2c_get_clientdata(client));
+       return 0;
+}
+
+static int rtc8564_get_datetime(struct i2c_client *client, struct rtc_tm *dt)
+{
+       int ret = -EIO;
+       unsigned char buf[15];
+
+       _DBG(1, "client=%p, dt=%p", client, dt);
+
+       if (!dt || !client)
+               return -EINVAL;
+
+       memset(buf, 0, sizeof(buf));
+
+       ret = rtc8564_read(client, 0, buf, 15);
+       if (ret)
+               return ret;
+
+       /* century stored in minute alarm reg */
+       dt->year = BCD_TO_BIN(buf[RTC8564_REG_YEAR]);
+       dt->year += 100 * BCD_TO_BIN(buf[RTC8564_REG_AL_MIN] & 0x3f);
+       dt->mday = BCD_TO_BIN(buf[RTC8564_REG_DAY] & 0x3f);
+       dt->wday = BCD_TO_BIN(buf[RTC8564_REG_WDAY] & 7);
+       dt->mon = BCD_TO_BIN(buf[RTC8564_REG_MON_CENT] & 0x1f);
+
+       dt->secs = BCD_TO_BIN(buf[RTC8564_REG_SEC] & 0x7f);
+       dt->vl = (buf[RTC8564_REG_SEC] & 0x80) == 0x80;
+       dt->mins = BCD_TO_BIN(buf[RTC8564_REG_MIN] & 0x7f);
+       dt->hours = BCD_TO_BIN(buf[RTC8564_REG_HR] & 0x3f);
+
+       _DBGRTCTM(2, *dt);
+
+       return 0;
+}
+
+static int
+rtc8564_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo)
+{
+       int ret, len = 5;
+       unsigned char buf[15];
+
+       _DBG(1, "client=%p, dt=%p", client, dt);
+
+       if (!dt || !client)
+               return -EINVAL;
+
+       _DBGRTCTM(2, *dt);
+
+       buf[RTC8564_REG_CTRL1] = CTRL1(client) | RTC8564_CTRL1_STOP;
+       buf[RTC8564_REG_CTRL2] = CTRL2(client);
+       buf[RTC8564_REG_SEC] = BIN_TO_BCD(dt->secs);
+       buf[RTC8564_REG_MIN] = BIN_TO_BCD(dt->mins);
+       buf[RTC8564_REG_HR] = BIN_TO_BCD(dt->hours);
+
+       if (datetoo) {
+               len += 5;
+               buf[RTC8564_REG_DAY] = BIN_TO_BCD(dt->mday);
+               buf[RTC8564_REG_WDAY] = BIN_TO_BCD(dt->wday);
+               buf[RTC8564_REG_MON_CENT] = BIN_TO_BCD(dt->mon) & 0x1f;
+               /* century stored in minute alarm reg */
+               buf[RTC8564_REG_YEAR] = BIN_TO_BCD(dt->year % 100);
+               buf[RTC8564_REG_AL_MIN] = BIN_TO_BCD(dt->year / 100);
+       }
+
+       ret = rtc8564_write(client, 0, buf, len);
+       if (ret) {
+               _DBG(1, "error writing data! %d", ret);
+       }
+
+       buf[RTC8564_REG_CTRL1] = CTRL1(client);
+       ret = rtc8564_write(client, 0, buf, 1);
+       if (ret) {
+               _DBG(1, "error writing data! %d", ret);
+       }
+
+       return ret;
+}
+
+static int rtc8564_get_ctrl(struct i2c_client *client, unsigned int *ctrl)
+{
+       struct rtc8564_data *data = i2c_get_clientdata(client);
+
+       if (!ctrl || !client)
+               return -1;
+
+       *ctrl = data->ctrl;
+       return 0;
+}
+
+static int rtc8564_set_ctrl(struct i2c_client *client, unsigned int *ctrl)
+{
+       struct rtc8564_data *data = i2c_get_clientdata(client);
+       unsigned char buf[2];
+
+       if (!ctrl || !client)
+               return -1;
+
+       buf[0] = *ctrl & 0xff;
+       buf[1] = (*ctrl & 0xff00) >> 8;
+       data->ctrl = *ctrl;
+
+       return rtc8564_write(client, 0, buf, 2);
+}
+
+static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem)
+{
+
+       if (!mem || !client)
+               return -EINVAL;
+
+       return rtc8564_read(client, mem->loc, mem->data, mem->nr);
+}
+
+static int rtc8564_write_mem(struct i2c_client *client, struct mem *mem)
+{
+
+       if (!mem || !client)
+               return -EINVAL;
+
+       return rtc8564_write(client, mem->loc, mem->data, mem->nr);
+}
+
+static int
+rtc8564_command(struct i2c_client *client, unsigned int cmd, void *arg)
+{
+
+       _DBG(1, "cmd=%d", cmd);
+
+       switch (cmd) {
+       case RTC_GETDATETIME:
+               return rtc8564_get_datetime(client, arg);
+
+       case RTC_SETTIME:
+               return rtc8564_set_datetime(client, arg, 0);
+
+       case RTC_SETDATETIME:
+               return rtc8564_set_datetime(client, arg, 1);
+
+       case RTC_GETCTRL:
+               return rtc8564_get_ctrl(client, arg);
+
+       case RTC_SETCTRL:
+               return rtc8564_set_ctrl(client, arg);
+
+       case MEM_READ:
+               return rtc8564_read_mem(client, arg);
+
+       case MEM_WRITE:
+               return rtc8564_write_mem(client, arg);
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static struct i2c_driver rtc8564_driver = {
+       .owner          = THIS_MODULE,
+       .name           = "RTC8564",
+       .id             = I2C_DRIVERID_RTC8564,
+       .flags          = I2C_DF_NOTIFY,
+       .attach_adapter = rtc8564_probe,
+       .detach_client  = rtc8564_detach,
+       .command        = rtc8564_command
+};
+
+static __init int rtc8564_init(void)
+{
+       return i2c_add_driver(&rtc8564_driver);
+}
+
+static __exit void rtc8564_exit(void)
+{
+       i2c_del_driver(&rtc8564_driver);
+}
+
+MODULE_AUTHOR("Stefan Eletzhofer <Stefan.Eletzhofer@eletztrick.de>");
+MODULE_DESCRIPTION("EPSON RTC8564 Driver");
+MODULE_LICENSE("GPL");
+
+module_init(rtc8564_init);
+module_exit(rtc8564_exit);
diff --git a/drivers/i2c/chips/rtc8564.h b/drivers/i2c/chips/rtc8564.h
new file mode 100644 (file)
index 0000000..e5342d1
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ *  linux/drivers/i2c/chips/rtc8564.h
+ *
+ *  Copyright (C) 2002-2004 Stefan Eletzhofer
+ *
+ *     based on linux/drivers/acron/char/pcf8583.h
+ *  Copyright (C) 2000 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+struct rtc_tm {
+       unsigned char   secs;
+       unsigned char   mins;
+       unsigned char   hours;
+       unsigned char   mday;
+       unsigned char   mon;
+       unsigned short  year; /* xxxx 4 digits :) */
+       unsigned char   wday;
+       unsigned char   vl;
+};
+
+struct mem {
+       unsigned int    loc;
+       unsigned int    nr;
+       unsigned char   *data;
+};
+
+#define RTC_GETDATETIME        0
+#define RTC_SETTIME    1
+#define RTC_SETDATETIME        2
+#define RTC_GETCTRL    3
+#define RTC_SETCTRL    4
+#define MEM_READ       5
+#define MEM_WRITE      6
+
+#define RTC8564_REG_CTRL1              0x0 /* T  0 S 0 | T 0 0 0 */
+#define RTC8564_REG_CTRL2              0x1 /* 0  0 0 TI/TP | AF TF AIE TIE */
+#define RTC8564_REG_SEC                        0x2 /* VL 4 2 1 | 8 4 2 1 */
+#define RTC8564_REG_MIN                        0x3 /* x  4 2 1 | 8 4 2 1 */
+#define RTC8564_REG_HR                 0x4 /* x  x 2 1 | 8 4 2 1 */
+#define RTC8564_REG_DAY                        0x5 /* x  x 2 1 | 8 4 2 1 */
+#define RTC8564_REG_WDAY               0x6 /* x  x x x | x 4 2 1 */
+#define RTC8564_REG_MON_CENT   0x7 /* C  x x 1 | 8 4 2 1 */
+#define RTC8564_REG_YEAR               0x8 /* 8  4 2 1 | 8 4 2 1 */
+#define RTC8564_REG_AL_MIN             0x9 /* AE 4 2 1 | 8 4 2 1 */
+#define RTC8564_REG_AL_HR              0xa /* AE 4 2 1 | 8 4 2 1 */
+#define RTC8564_REG_AL_DAY             0xb /* AE x 2 1 | 8 4 2 1 */
+#define RTC8564_REG_AL_WDAY            0xc /* AE x x x | x 4 2 1 */
+#define RTC8564_REG_CLKOUT             0xd /* FE x x x | x x FD1 FD0 */
+#define RTC8564_REG_TCTL               0xe /* TE x x x | x x FD1 FD0 */
+#define RTC8564_REG_TIMER              0xf /* 8 bit binary */
+
+/* Control reg */
+#define RTC8564_CTRL1_TEST1            (1<<3)
+#define RTC8564_CTRL1_STOP             (1<<5)
+#define RTC8564_CTRL1_TEST2            (1<<7)
+
+#define RTC8564_CTRL2_TIE              (1<<0)
+#define RTC8564_CTRL2_AIE              (1<<1)
+#define RTC8564_CTRL2_TF               (1<<2)
+#define RTC8564_CTRL2_AF               (1<<3)
+#define RTC8564_CTRL2_TI_TP            (1<<4)
+
+/* CLKOUT frequencies */
+#define RTC8564_FD_32768HZ             (0x0)
+#define RTC8564_FD_1024HZ              (0x1)
+#define RTC8564_FD_32                  (0x2)
+#define RTC8564_FD_1HZ                 (0x3)
+
+/* Timer CTRL */
+#define RTC8564_TD_4096HZ              (0x0)
+#define RTC8564_TD_64HZ                        (0x1)
+#define RTC8564_TD_1HZ                 (0x2)
+#define RTC8564_TD_1_60HZ              (0x3)
+
+#define I2C_DRIVERID_RTC8564 0xf000
diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c
new file mode 100644 (file)
index 0000000..fb91cb8
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * drivers/ide/ide-h8300.c
+ * H8/300 generic IDE interface
+ */
+
+#include <linux/init.h>
+#include <linux/ide.h>
+#include <linux/config.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#define bswap(d) \
+({                                     \
+       u16 r;                          \
+       __asm__("mov.b %w1,r1h\n\t"     \
+               "mov.b %x1,r1l\n\t"     \
+               "mov.w r1,%0"           \
+               :"=r"(r)                \
+               :"r"(d)                 \
+               :"er1");                \
+       (r);                            \
+})
+
+static void mm_outw(u16 d, unsigned long a)
+{
+       __asm__("mov.b %w0,r2h\n\t"
+               "mov.b %x0,r2l\n\t"
+               "mov.w r2,@%1"
+               :
+               :"r"(d),"r"(a)
+               :"er2");
+}
+
+static u16 mm_inw(unsigned long a)
+{
+       register u16 r __asm__("er0");
+       __asm__("mov.w @%1,r2\n\t"
+               "mov.b r2l,%x0\n\t"
+               "mov.b r2h,%w0"
+               :"=r"(r)
+               :"r"(a)
+               :"er2");
+       return r;
+}
+
+static void mm_outsw(unsigned long addr, void *buf, u32 len)
+{
+       unsigned short *bp = (unsigned short *)buf;
+       for (; len > 0; len--, bp++)
+               *(volatile u16 *)addr = bswap(*bp);
+}
+
+static void mm_insw(unsigned long addr, void *buf, u32 len)
+{
+       unsigned short *bp = (unsigned short *)buf;
+       for (; len > 0; len--, bp++)
+               *bp = bswap(*(volatile u16 *)addr);
+}
+
+#define H8300_IDE_GAP (2)
+
+static inline void hw_setup(hw_regs_t *hw)
+{
+       int i;
+
+       memset(hw, 0, sizeof(hw_regs_t));
+       for (i = 0; i <= IDE_STATUS_OFFSET; i++)
+               hw->io_ports[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
+       hw->io_ports[IDE_CONTROL_OFFSET] = CONFIG_H8300_IDE_ALT;
+       hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
+       hw->dma = NO_DMA;
+       hw->chipset = ide_generic;
+}
+
+static inline void hwif_setup(ide_hwif_t *hwif)
+{
+       default_hwif_iops(hwif);
+
+       hwif->mmio  = 2;
+       hwif->OUTW  = mm_outw;
+       hwif->OUTSW = mm_outsw;
+       hwif->INW   = mm_inw;
+       hwif->INSW  = mm_insw;
+       hwif->OUTL  = NULL;
+       hwif->INL   = NULL;
+       hwif->OUTSL = NULL;
+       hwif->INSL  = NULL;
+}
+
+void __init h8300_ide_init(void)
+{
+       hw_regs_t hw;
+       ide_hwif_t *hwif;
+       int idx;
+
+       if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300"))
+               goto out_busy;
+       if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) {
+               release_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8);
+               goto out_busy;
+       }
+
+       hw_setup(&hw);
+
+       /* register if */
+       idx = ide_register_hw(&hw, &hwif);
+       if (idx == -1) {
+               printk(KERN_ERR "ide-h8300: IDE I/F register failed\n");
+               return;
+       }
+
+       hwif_setup(hwif);
+       printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", idx);
+       return;
+
+out_busy:
+       printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
+}
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
new file mode 100644 (file)
index 0000000..a10f921
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * $Id: ixp4xx.c,v 1.1 2004/05/13 22:21:26 dsaxena Exp $
+ *
+ * drivers/mtd/maps/ixp4xx.c
+ *
+ * MTD Map file for IXP4XX based systems. Please do not make per-board
+ * changes in here. If your board needs special setup, do it in your
+ * platform level code in arch/arm/mach-ixp4xx/board-setup.c
+ *
+ * Original Author: Intel Corporation
+ * Maintainer: Deepak Saxena <dsaxena@mvista.com>
+ *
+ * Copyright (C) 2002 Intel Corporation
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/ioport.h>
+#include <linux/device.h>
+#include <asm/io.h>
+#include <asm/mach-types.h>
+#include <asm/mach/flash.h>
+
+#include <linux/reboot.h>
+
+#ifndef __ARMEB__
+#define        BYTE0(h)        ((h) & 0xFF)
+#define        BYTE1(h)        (((h) >> 8) & 0xFF)
+#else
+#define        BYTE0(h)        (((h) >> 8) & 0xFF)
+#define        BYTE1(h)        ((h) & 0xFF)
+#endif
+
+static __u16
+ixp4xx_read16(struct map_info *map, unsigned long ofs)
+{
+       return *(__u16 *) (map->map_priv_1 + ofs);
+}
+
+/*
+ * The IXP4xx expansion bus only allows 16-bit wide acceses
+ * when attached to a 16-bit wide device (such as the 28F128J3A),
+ * so we can't just memcpy_fromio().
+ */
+static void
+ixp4xx_copy_from(struct map_info *map, void *to,
+                unsigned long from, ssize_t len)
+{
+       int i;
+       u8 *dest = (u8 *) to;
+       u16 *src = (u16 *) (map->map_priv_1 + from);
+       u16 data;
+
+       for (i = 0; i < (len / 2); i++) {
+               data = src[i];
+               dest[i * 2] = BYTE0(data);
+               dest[i * 2 + 1] = BYTE1(data);
+       }
+
+       if (len & 1)
+               dest[len - 1] = BYTE0(src[i]);
+}
+
+static void
+ixp4xx_write16(struct map_info *map, __u16 d, unsigned long adr)
+{
+       *(__u16 *) (map->map_priv_1 + adr) = d;
+}
+
+struct ixp4xx_flash_info {
+       struct mtd_info *mtd;
+       struct map_info map;
+       struct mtd_partition *partitions;
+       struct resource *res;
+};
+
+static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
+
+static int
+ixp4xx_flash_remove(struct device *_dev)
+{
+       struct platform_device *dev = to_platform_device(_dev);
+       struct flash_platform_data *plat = dev->dev.platform_data;
+       struct ixp4xx_flash_info *info = dev_get_drvdata(&dev->dev);
+
+       dev_set_drvdata(&dev->dev, NULL);
+
+       if(!info)
+               return 0;
+
+       /*
+        * This is required for a soft reboot to work.
+        */
+       ixp4xx_write16(&info->map, 0xff, 0x55 * 0x2);
+
+       if (info->mtd) {
+               del_mtd_partitions(info->mtd);
+               map_destroy(info->mtd);
+       }
+       if (info->map.map_priv_1)
+               iounmap((void *) info->map.map_priv_1);
+
+       if (info->partitions)
+               kfree(info->partitions);
+
+       if (info->res) {
+               release_resource(info->res);
+               kfree(info->res);
+       }
+
+       if (plat->exit)
+               plat->exit();
+
+       /* Disable flash write */
+       *IXP4XX_EXP_CS0 &= ~IXP4XX_FLASH_WRITABLE;
+
+       return 0;
+}
+
+static int ixp4xx_flash_probe(struct device *_dev)
+{
+       struct platform_device *dev = to_platform_device(_dev);
+       struct flash_platform_data *plat = dev->dev.platform_data;
+       struct ixp4xx_flash_info *info;
+       int err = -1;
+
+       if (!plat)
+               return -ENODEV;
+
+       if (plat->init) {
+               err = plat->init();
+               if (err)
+                       return err;
+       }
+
+       info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL);
+       if(!info) {
+               err = -ENOMEM;
+               goto Error;
+       }       
+       memzero(info, sizeof(struct ixp4xx_flash_info));
+
+       dev_set_drvdata(&dev->dev, info);
+
+       /* 
+        * Enable flash write 
+        * TODO: Move this out to board specific code
+        */
+       *IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE;
+
+       /*
+        * Tell the MTD layer we're not 1:1 mapped so that it does
+        * not attempt to do a direct access on us.
+        */
+       info->map.phys = NO_XIP;
+       info->map.size = dev->resource->end - dev->resource->start + 1;
+
+       /*
+        * We only support 16-bit accesses for now. If and when
+        * any board use 8-bit access, we'll fixup the driver to
+        * handle that.
+        */
+       info->map.buswidth = 2;
+       info->map.name = dev->dev.bus_id;
+       info->map.read16 = ixp4xx_read16,
+       info->map.write16 = ixp4xx_write16,
+       info->map.copy_from = ixp4xx_copy_from,
+
+       info->res = request_mem_region(dev->resource->start, 
+                       dev->resource->end - dev->resource->start + 1, 
+                       "IXP4XXFlash");
+       if (!info->res) {
+               printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n");
+               err = -ENOMEM;
+               goto Error;
+       }
+
+       info->map.map_priv_1 =
+           (unsigned long) ioremap(dev->resource->start, 
+                                   dev->resource->end - dev->resource->start + 1);
+       if (!info->map.map_priv_1) {
+               printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n");
+               err = -EIO;
+               goto Error;
+       }
+
+       info->mtd = do_map_probe(plat->map_name, &info->map);
+       if (!info->mtd) {
+               printk(KERN_ERR "IXP4XXFlash: map_probe failed\n");
+               err = -ENXIO;
+               goto Error;
+       }
+       info->mtd->owner = THIS_MODULE;
+
+       err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
+       if (err > 0) {
+               err = add_mtd_partitions(info->mtd, info->partitions, err);
+               if(err)
+                       printk(KERN_ERR "Could not parse partitions\n");
+       }
+
+       if (err)
+               goto Error;
+
+       return 0;
+
+Error:
+       ixp4xx_flash_remove(_dev);
+       return err;
+}
+
+static struct device_driver ixp4xx_flash_driver = {
+       .name           = "IXP4XX-Flash",
+       .bus            = &platform_bus_type,
+       .probe          = ixp4xx_flash_probe,
+       .remove         = ixp4xx_flash_remove,
+};
+
+static int __init ixp4xx_flash_init(void)
+{
+       return driver_register(&ixp4xx_flash_driver);
+}
+
+static void __exit ixp4xx_flash_exit(void)
+{
+       driver_unregister(&ixp4xx_flash_driver);
+}
+
+
+module_init(ixp4xx_flash_init);
+module_exit(ixp4xx_flash_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems")
+MODULE_AUTHOR("Deepak Saxena");
+
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
new file mode 100644 (file)
index 0000000..1901302
--- /dev/null
@@ -0,0 +1,167 @@
+/*
+ * $Id: wr_sbc82xx_flash.c,v 1.1 2004/06/07 10:21:32 dwmw2 Exp $
+ *
+ * Map for flash chips on Wind River PowerQUICC II SBC82xx board.
+ *
+ * Copyright (C) 2004 Red Hat, Inc.
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/config.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/immap_8260.h>
+
+static struct mtd_info *sbcmtd[3];
+static struct mtd_partition *sbcmtd_parts[3];
+
+struct map_info sbc82xx_flash_map[3] = {
+       {.name = "Boot flash"},
+       {.name = "Alternate boot flash"},
+       {.name = "User flash"}
+};
+
+static struct mtd_partition smallflash_parts[] = {
+       {
+               .name =         "space",
+               .size =         0x100000,
+               .offset =       0,
+       }, {
+               .name =         "bootloader",
+               .size =         MTDPART_SIZ_FULL,
+               .offset =       MTDPART_OFS_APPEND,
+       }
+};
+
+static struct mtd_partition bigflash_parts[] = {
+       {
+               .name =         "bootloader",
+               .size =         0x80000,
+               .offset =       0,
+       }, {
+               .name =         "file system",
+               .size =         MTDPART_SIZ_FULL,
+               .offset =       MTDPART_OFS_APPEND,
+       }
+};
+
+static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
+
+int __init init_sbc82xx_flash(void)
+{
+       volatile  memctl8260_t *mc = &immr->im_memctl;
+       int bigflash;
+       int i;
+
+       /* First, register the boot flash, whichever we're booting from */
+       if ((mc->memc_br0 & 0x00001800) == 0x00001800) {
+               bigflash = 0;
+       } else if ((mc->memc_br0 & 0x00001800) == 0x00000800) {
+               bigflash = 1;
+       } else {
+               printk(KERN_WARNING "Bus Controller register BR0 is %08x. Cannot determine flash configuration\n", mc->memc_br0);
+               return 1;
+       }
+
+       /* Set parameters for the big flash chip (CS6 or CS0) */
+       sbc82xx_flash_map[bigflash].buswidth = 4;
+       sbc82xx_flash_map[bigflash].size = 0x4000000;
+
+       /* Set parameters for the small flash chip (CS0 or CS6) */
+       sbc82xx_flash_map[!bigflash].buswidth = 1;
+       sbc82xx_flash_map[!bigflash].size = 0x200000;
+
+       /* Set parameters for the user flash chip (CS1) */
+       sbc82xx_flash_map[2].buswidth = 4;
+       sbc82xx_flash_map[2].size = 0x4000000;
+
+       sbc82xx_flash_map[0].phys = mc->memc_br0 & 0xffff8000;
+       sbc82xx_flash_map[1].phys = mc->memc_br6 & 0xffff8000;
+       sbc82xx_flash_map[2].phys = mc->memc_br1 & 0xffff8000;
+
+       for (i=0; i<3; i++) {
+               int8_t flashcs[3] = { 0, 6, 1 };
+               int nr_parts;
+
+               printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
+                      sbc82xx_flash_map[i].name, sbc82xx_flash_map[i].size >> 20, flashcs[i]);
+               if (!sbc82xx_flash_map[i].phys) {
+                       /* We know it can't be at zero. */
+                       printk("): disabled by bootloader.\n");
+                       continue;
+               }
+               printk(" at %08lx)\n",  sbc82xx_flash_map[i].phys);
+
+               sbc82xx_flash_map[i].virt = (unsigned long)ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size);
+
+               if (!sbc82xx_flash_map[i].virt) {
+                       printk("Failed to ioremap\n");
+                       continue;
+               }
+
+               simple_map_init(&sbc82xx_flash_map[i]);
+
+               sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]);
+
+               if (!sbcmtd[i])
+                       continue;
+
+               sbcmtd[i]->owner = THIS_MODULE;
+
+               nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
+                                               &sbcmtd_parts[i], 0);
+               if (nr_parts > 0) {
+                       add_mtd_partitions (sbcmtd[i], sbcmtd_parts[i], nr_parts);
+                       continue;
+               }
+
+               /* No partitioning detected. Use default */
+               if (i == 2) {
+                       add_mtd_device(sbcmtd[i]);
+               } else if (i == bigflash) {
+                       add_mtd_partitions (sbcmtd[i], bigflash_parts, ARRAY_SIZE(bigflash_parts));
+               } else {
+                       add_mtd_partitions (sbcmtd[i], smallflash_parts, ARRAY_SIZE(smallflash_parts));
+               }
+       }
+       return 0;
+}
+
+static void __exit cleanup_sbc82xx_flash(void)
+{
+       int i;
+
+       for (i=0; i<3; i++) {
+               if (!sbcmtd[i])
+                       continue;
+
+               if (i<2 || sbcmtd_parts[i])
+                       del_mtd_partitions(sbcmtd[i]);
+               else
+                       del_mtd_device(sbcmtd[i]);
+                       
+               kfree(sbcmtd_parts[i]);
+               map_destroy(sbcmtd[i]);
+               
+               iounmap((void *)sbc82xx_flash_map[i].virt);
+               sbc82xx_flash_map[i].virt = 0;
+       }
+}
+
+module_init(init_sbc82xx_flash);
+module_exit(cleanup_sbc82xx_flash);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II");
diff --git a/drivers/net/ibm_emac/ibm_emac.h b/drivers/net/ibm_emac/ibm_emac.h
new file mode 100644 (file)
index 0000000..5310033
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * ibm_emac.h
+ *
+ *
+ *      Armin Kuster akuster@mvista.com
+ *      June, 2002
+ *
+ * Copyright 2002 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _IBM_EMAC_H_
+#define _IBM_EMAC_H_
+/* General defines needed for the driver */
+
+/* Emac */
+typedef struct emac_regs {
+       u32 em0mr0;
+       u32 em0mr1;
+       u32 em0tmr0;
+       u32 em0tmr1;
+       u32 em0rmr;
+       u32 em0isr;
+       u32 em0iser;
+       u32 em0iahr;
+       u32 em0ialr;
+       u32 em0vtpid;
+       u32 em0vtci;
+       u32 em0ptr;
+       u32 em0iaht1;
+       u32 em0iaht2;
+       u32 em0iaht3;
+       u32 em0iaht4;
+       u32 em0gaht1;
+       u32 em0gaht2;
+       u32 em0gaht3;
+       u32 em0gaht4;
+       u32 em0lsah;
+       u32 em0lsal;
+       u32 em0ipgvr;
+       u32 em0stacr;
+       u32 em0trtr;
+       u32 em0rwmr;
+} emac_t;
+
+/* MODE REG 0 */
+#define EMAC_M0_RXI                    0x80000000
+#define EMAC_M0_TXI                    0x40000000
+#define EMAC_M0_SRST                   0x20000000
+#define EMAC_M0_TXE                    0x10000000
+#define EMAC_M0_RXE                    0x08000000
+#define EMAC_M0_WKE                    0x04000000
+
+/* MODE Reg 1 */
+#define EMAC_M1_FDE                    0x80000000
+#define EMAC_M1_ILE                    0x40000000
+#define EMAC_M1_VLE                    0x20000000
+#define EMAC_M1_EIFC                   0x10000000
+#define EMAC_M1_APP                    0x08000000
+#define EMAC_M1_AEMI                   0x02000000
+#define EMAC_M1_IST                    0x01000000
+#define EMAC_M1_MF_1000GPCS            0x00c00000      /* Internal GPCS */
+#define EMAC_M1_MF_1000MBPS            0x00800000      /* External GPCS */
+#define EMAC_M1_MF_100MBPS             0x00400000
+#define EMAC_M1_RFS_16K                 0x00280000     /* 000 for 512 byte */
+#define EMAC_M1_TR                     0x00008000
+#ifdef CONFIG_IBM_EMAC4
+#define EMAC_M1_RFS_8K                  0x00200000
+#define EMAC_M1_RFS_4K                  0x00180000
+#define EMAC_M1_RFS_2K                  0x00100000
+#define EMAC_M1_RFS_1K                  0x00080000
+#define EMAC_M1_TX_FIFO_16K             0x00050000     /* 0's for 512 byte */
+#define EMAC_M1_TX_FIFO_8K              0x00040000
+#define EMAC_M1_TX_FIFO_4K              0x00030000
+#define EMAC_M1_TX_FIFO_2K              0x00020000
+#define EMAC_M1_TX_FIFO_1K              0x00010000
+#define EMAC_M1_TX_TR                   0x00008000
+#define EMAC_M1_TX_MWSW                 0x00001000     /* 0 wait for status */
+#define EMAC_M1_JUMBO_ENABLE            0x00000800     /* Upt to 9Kr status */
+#define EMAC_M1_OPB_CLK_66              0x00000008     /* 66Mhz */
+#define EMAC_M1_OPB_CLK_83              0x00000010     /* 83Mhz */
+#define EMAC_M1_OPB_CLK_100             0x00000018     /* 100Mhz */
+#define EMAC_M1_OPB_CLK_100P            0x00000020     /* 100Mhz+ */
+#else                          /* CONFIG_IBM_EMAC4 */
+#define EMAC_M1_RFS_4K                 0x00300000      /* ~4k for 512 byte */
+#define EMAC_M1_RFS_2K                 0x00200000
+#define EMAC_M1_RFS_1K                 0x00100000
+#define EMAC_M1_TX_FIFO_2K             0x00080000      /* 0's for 512 byte */
+#define EMAC_M1_TX_FIFO_1K             0x00040000
+#define EMAC_M1_TR0_DEPEND             0x00010000      /* 0'x for single packet */
+#define EMAC_M1_TR1_DEPEND             0x00004000
+#define EMAC_M1_TR1_MULTI              0x00002000
+#define EMAC_M1_JUMBO_ENABLE           0x00001000
+#endif                         /* CONFIG_IBM_EMAC4 */
+#define EMAC_M1_BASE                   (EMAC_M1_TX_FIFO_2K | \
+                                       EMAC_M1_APP | \
+                                       EMAC_M1_TR)
+
+/* Transmit Mode Register 0 */
+#define EMAC_TMR0_GNP0                 0x80000000
+#define EMAC_TMR0_GNP1                 0x40000000
+#define EMAC_TMR0_GNPD                 0x20000000
+#define EMAC_TMR0_FC                   0x10000000
+#define EMAC_TMR0_TFAE_2_32            0x00000001
+#define EMAC_TMR0_TFAE_4_64            0x00000002
+#define EMAC_TMR0_TFAE_8_128           0x00000003
+#define EMAC_TMR0_TFAE_16_256          0x00000004
+#define EMAC_TMR0_TFAE_32_512          0x00000005
+#define EMAC_TMR0_TFAE_64_1024         0x00000006
+#define EMAC_TMR0_TFAE_128_2048                0x00000007
+
+/* Receive Mode Register */
+#define EMAC_RMR_SP                    0x80000000
+#define EMAC_RMR_SFCS                  0x40000000
+#define EMAC_RMR_ARRP                  0x20000000
+#define EMAC_RMR_ARP                   0x10000000
+#define EMAC_RMR_AROP                  0x08000000
+#define EMAC_RMR_ARPI                  0x04000000
+#define EMAC_RMR_PPP                   0x02000000
+#define EMAC_RMR_PME                   0x01000000
+#define EMAC_RMR_PMME                  0x00800000
+#define EMAC_RMR_IAE                   0x00400000
+#define EMAC_RMR_MIAE                  0x00200000
+#define EMAC_RMR_BAE                   0x00100000
+#define EMAC_RMR_MAE                   0x00080000
+#define EMAC_RMR_RFAF_2_32             0x00000001
+#define EMAC_RMR_RFAF_4_64             0x00000002
+#define EMAC_RMR_RFAF_8_128            0x00000003
+#define EMAC_RMR_RFAF_16_256           0x00000004
+#define EMAC_RMR_RFAF_32_512           0x00000005
+#define EMAC_RMR_RFAF_64_1024          0x00000006
+#define EMAC_RMR_RFAF_128_2048         0x00000007
+#define EMAC_RMR_BASE                  (EMAC_RMR_IAE | EMAC_RMR_BAE)
+
+/* Interrupt Status & enable Regs */
+#define EMAC_ISR_OVR                   0x02000000
+#define EMAC_ISR_PP                    0x01000000
+#define EMAC_ISR_BP                    0x00800000
+#define EMAC_ISR_RP                    0x00400000
+#define EMAC_ISR_SE                    0x00200000
+#define EMAC_ISR_ALE                   0x00100000
+#define EMAC_ISR_BFCS                  0x00080000
+#define EMAC_ISR_PTLE                  0x00040000
+#define EMAC_ISR_ORE                   0x00020000
+#define EMAC_ISR_IRE                   0x00010000
+#define EMAC_ISR_DBDM                  0x00000200
+#define EMAC_ISR_DB0                   0x00000100
+#define EMAC_ISR_SE0                   0x00000080
+#define EMAC_ISR_TE0                   0x00000040
+#define EMAC_ISR_DB1                   0x00000020
+#define EMAC_ISR_SE1                   0x00000010
+#define EMAC_ISR_TE1                   0x00000008
+#define EMAC_ISR_MOS                   0x00000002
+#define EMAC_ISR_MOF                   0x00000001
+
+/* STA CONTROL REG */
+#define EMAC_STACR_OC                  0x00008000
+#define EMAC_STACR_PHYE                        0x00004000
+#define EMAC_STACR_WRITE               0x00002000
+#define EMAC_STACR_READ                        0x00001000
+#define EMAC_STACR_CLK_83MHZ           0x00000800      /* 0's for 50Mhz */
+#define EMAC_STACR_CLK_66MHZ           0x00000400
+#define EMAC_STACR_CLK_100MHZ          0x00000C00
+
+/* Transmit Request Threshold Register */
+#define EMAC_TRTR_1600                 0x18000000      /* 0's for 64 Bytes */
+#define EMAC_TRTR_1024                 0x0f000000
+#define EMAC_TRTR_512                  0x07000000
+#define EMAC_TRTR_256                  0x03000000
+#define EMAC_TRTR_192                  0x10000000
+#define EMAC_TRTR_128                  0x01000000
+
+#define EMAC_TX_CTRL_GFCS              0x0200
+#define EMAC_TX_CTRL_GP                        0x0100
+#define EMAC_TX_CTRL_ISA               0x0080
+#define EMAC_TX_CTRL_RSA               0x0040
+#define EMAC_TX_CTRL_IVT               0x0020
+#define EMAC_TX_CTRL_RVT               0x0010
+#define EMAC_TX_CTRL_TAH_CSUM          0x000e  /* TAH only */
+#define EMAC_TX_CTRL_TAH_SEG4          0x000a  /* TAH only */
+#define EMAC_TX_CTRL_TAH_SEG3          0x0008  /* TAH only */
+#define EMAC_TX_CTRL_TAH_SEG2          0x0006  /* TAH only */
+#define EMAC_TX_CTRL_TAH_SEG1          0x0004  /* TAH only */
+#define EMAC_TX_CTRL_TAH_SEG0          0x0002  /* TAH only */
+#define EMAC_TX_CTRL_TAH_DIS           0x0000  /* TAH only */
+
+#define EMAC_TX_CTRL_DFLT ( \
+       MAL_TX_CTRL_INTR | EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP )
+
+/* madmal transmit status / Control bits */
+#define EMAC_TX_ST_BFCS                        0x0200
+#define EMAC_TX_ST_BPP                 0x0100
+#define EMAC_TX_ST_LCS                 0x0080
+#define EMAC_TX_ST_ED                  0x0040
+#define EMAC_TX_ST_EC                  0x0020
+#define EMAC_TX_ST_LC                  0x0010
+#define EMAC_TX_ST_MC                  0x0008
+#define EMAC_TX_ST_SC                  0x0004
+#define EMAC_TX_ST_UR                  0x0002
+#define EMAC_TX_ST_SQE                 0x0001
+
+/* madmal receive status / Control bits */
+#define EMAC_RX_ST_OE                  0x0200
+#define EMAC_RX_ST_PP                  0x0100
+#define EMAC_RX_ST_BP                  0x0080
+#define EMAC_RX_ST_RP                  0x0040
+#define EMAC_RX_ST_SE                  0x0020
+#define EMAC_RX_ST_AE                  0x0010
+#define EMAC_RX_ST_BFCS                        0x0008
+#define EMAC_RX_ST_PTL                 0x0004
+#define EMAC_RX_ST_ORE                 0x0002
+#define EMAC_RX_ST_IRE                 0x0001
+#define EMAC_BAD_RX_PACKET             0x02ff
+#define EMAC_CSUM_VER_ERROR            0x0003
+
+/* identify a bad rx packet dependent on emac features */
+#ifdef CONFIG_IBM_EMAC4
+#define EMAC_IS_BAD_RX_PACKET(desc) \
+       (((desc & (EMAC_BAD_RX_PACKET & ~EMAC_CSUM_VER_ERROR)) || \
+       ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_ORE) || \
+       ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_IRE)))
+#else
+#define EMAC_IS_BAD_RX_PACKET(desc) \
+        (desc & EMAC_BAD_RX_PACKET)
+#endif
+
+/* Revision specific EMAC register defaults */
+#ifdef CONFIG_IBM_EMAC4
+#define EMAC_M1_DEFAULT                        (EMAC_M1_BASE | \
+                                       EMAC_M1_OPB_CLK_83 | \
+                                       EMAC_M1_TX_MWSW)
+#define EMAC_RMR_DEFAULT               (EMAC_RMR_BASE | \
+                                       EMAC_RMR_RFAF_128_2048)
+#define EMAC_TMR0_XMIT                 (EMAC_TMR0_GNP0 | \
+                                       EMAC_TMR0_TFAE_128_2048)
+#define EMAC_TRTR_DEFAULT              EMAC_TRTR_1024
+#else                          /* !CONFIG_IBM_EMAC4 */
+#define EMAC_M1_DEFAULT                        EMAC_M1_BASE
+#define EMAC_RMR_DEFAULT               EMAC_RMR_BASE
+#define EMAC_TMR0_XMIT                 EMAC_TMR0_GNP0
+#define EMAC_TRTR_DEFAULT              EMAC_TRTR_1600
+#endif                         /* CONFIG_IBM_EMAC4 */
+
+/* SoC implementation specific EMAC register defaults */
+#if defined(CONFIG_440GP)
+#define EMAC_RWMR_DEFAULT              0x80009000
+#define EMAC_TMR0_DEFAULT              0x00000000
+#define EMAC_TMR1_DEFAULT              0xf8640000
+#elif defined(CONFIG_440GX)
+#define EMAC_RWMR_DEFAULT              0x1000a200
+#define EMAC_TMR0_DEFAULT              EMAC_TMR0_TFAE_128_2048
+#define EMAC_TMR1_DEFAULT              0x88810000
+#else
+#define EMAC_RWMR_DEFAULT              0x0f002000
+#define EMAC_TMR0_DEFAULT              0x00000000
+#define EMAC_TMR1_DEFAULT              0x380f0000
+#endif                         /* CONFIG_440GP */
+
+#endif
diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h
new file mode 100644 (file)
index 0000000..691ce4e
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * ibm_emac_core.h
+ *
+ * Ethernet driver for the built in ethernet on the IBM 405 PowerPC
+ * processor.
+ *
+ *      Armin Kuster akuster@mvista.com
+ *      Sept, 2001
+ *
+ *      Orignial driver
+ *         Johnnie Peters
+ *         jpeters@mvista.com
+ *
+ * Copyright 2000 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _IBM_EMAC_CORE_H_
+#define _IBM_EMAC_CORE_H_
+
+#include <linux/netdevice.h>
+#include <asm/ocp.h>
+#include <asm/mmu.h>           /* For phys_addr_t */
+
+#include "ibm_emac.h"
+#include "ibm_emac_phy.h"
+#include "ibm_emac_rgmii.h"
+#include "ibm_emac_zmii.h"
+#include "ibm_emac_mal.h"
+#include "ibm_emac_tah.h"
+
+#ifndef CONFIG_IBM_EMAC_TXB
+#define NUM_TX_BUFF            64
+#define NUM_RX_BUFF            64
+#else
+#define NUM_TX_BUFF            CONFIG_IBM_EMAC_TXB
+#define NUM_RX_BUFF            CONFIG_IBM_EMAC_RXB
+#endif
+
+/* This does 16 byte alignment, exactly what we need.
+ * The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+#ifndef CONFIG_IBM_EMAC_SKBRES
+#define SKB_RES 2
+#else
+#define SKB_RES CONFIG_IBM_EMAC_SKBRES
+#endif
+
+/* Note about alignement. alloc_skb() returns a cache line
+ * aligned buffer. However, dev_alloc_skb() will add 16 more
+ * bytes and "reserve" them, so our buffer will actually end
+ * on a half cache line. What we do is to use directly
+ * alloc_skb, allocate 16 more bytes to match the total amount
+ * allocated by dev_alloc_skb(), but we don't reserve.
+ */
+#define MAX_NUM_BUF_DESC       255
+#define DESC_BUF_SIZE          4080    /* max 4096-16 */
+#define DESC_BUF_SIZE_REG      (DESC_BUF_SIZE / 16)
+
+/* Transmitter timeout. */
+#define TX_TIMEOUT             (2*HZ)
+
+/* MDIO latency delay */
+#define MDIO_DELAY             50
+
+/* Power managment shift registers */
+#define IBM_CPM_EMMII  0       /* Shift value for MII */
+#define IBM_CPM_EMRX   1       /* Shift value for recv */
+#define IBM_CPM_EMTX   2       /* Shift value for MAC */
+#define IBM_CPM_EMAC(x)        (((x)>>IBM_CPM_EMMII) | ((x)>>IBM_CPM_EMRX) | ((x)>>IBM_CPM_EMTX))
+
+#define ENET_HEADER_SIZE       14
+#define ENET_FCS_SIZE          4
+#define ENET_DEF_MTU_SIZE      1500
+#define ENET_DEF_BUF_SIZE      (ENET_DEF_MTU_SIZE + ENET_HEADER_SIZE + ENET_FCS_SIZE)
+#define EMAC_MIN_FRAME         64
+#define EMAC_MAX_FRAME         9018
+#define EMAC_MIN_MTU           (EMAC_MIN_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE)
+#define EMAC_MAX_MTU           (EMAC_MAX_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE)
+
+#ifdef CONFIG_IBM_EMAC_ERRMSG
+void emac_serr_dump_0(struct net_device *dev);
+void emac_serr_dump_1(struct net_device *dev);
+void emac_err_dump(struct net_device *dev, int em0isr);
+void emac_phy_dump(struct net_device *);
+void emac_desc_dump(struct net_device *);
+void emac_mac_dump(struct net_device *);
+void emac_mal_dump(struct net_device *);
+#else
+#define emac_serr_dump_0(dev) do { } while (0)
+#define emac_serr_dump_1(dev) do { } while (0)
+#define emac_err_dump(dev,x) do { } while (0)
+#define emac_phy_dump(dev) do { } while (0)
+#define emac_desc_dump(dev) do { } while (0)
+#define emac_mac_dump(dev) do { } while (0)
+#define emac_mal_dump(dev) do { } while (0)
+#endif
+
+struct ocp_enet_private {
+       struct sk_buff *tx_skb[NUM_TX_BUFF];
+       struct sk_buff *rx_skb[NUM_RX_BUFF];
+       struct mal_descriptor *tx_desc;
+       struct mal_descriptor *rx_desc;
+       struct mal_descriptor *rx_dirty;
+       struct net_device_stats stats;
+       int tx_cnt;
+       int rx_slot;
+       int dirty_rx;
+       int tx_slot;
+       int ack_slot;
+       int rx_buffer_size;
+
+       struct mii_phy phy_mii;
+       int mii_phy_addr;
+       int want_autoneg;
+       int timer_ticks;
+       struct timer_list link_timer;
+       struct net_device *mdio_dev;
+
+       struct ocp_device *rgmii_dev;
+       int rgmii_input;
+
+       struct ocp_device *zmii_dev;
+       int zmii_input;
+
+       struct ibm_ocp_mal *mal;
+       int mal_tx_chan, mal_rx_chan;
+       struct mal_commac commac;
+
+       struct ocp_device *tah_dev;
+
+       int opened;
+       int going_away;
+       int wol_irq;
+       emac_t *emacp;
+       struct ocp_device *ocpdev;
+       struct net_device *ndev;
+       spinlock_t lock;
+};
+#endif                         /* _IBM_EMAC_CORE_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c
new file mode 100644 (file)
index 0000000..c851204
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * ibm_ocp_debug.c
+ *
+ * This has all the debug routines that where in *_enet.c
+ *
+ *      Armin Kuster akuster@mvista.com
+ *      April , 2002
+ *
+ * Copyright 2002 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <asm/io.h>
+#include "ibm_ocp_mal.h"
+#include "ibm_ocp_zmii.h"
+#include "ibm_ocp_enet.h"
+
+extern int emac_phy_read(struct net_device *dev, int mii_id, int reg);
+
+void emac_phy_dump(struct net_device *dev)
+{
+       struct ocp_enet_private *fep = dev->priv;
+       unsigned long i;
+       uint data;
+
+       printk(KERN_DEBUG " Prepare for Phy dump....\n");
+       for (i = 0; i < 0x1A; i++) {
+               data = emac_phy_read(dev, fep->mii_phy_addr, i);
+               printk(KERN_DEBUG "Phy reg 0x%lx ==> %4x\n", i, data);
+               if (i == 0x07)
+                       i = 0x0f;
+       }
+}
+
+void emac_desc_dump(struct net_device *dev)
+{
+       struct ocp_enet_private *fep = dev->priv;
+       int curr_slot;
+
+       printk(KERN_DEBUG
+              "dumping the receive descriptors:  current slot is %d\n",
+              fep->rx_slot);
+       for (curr_slot = 0; curr_slot < NUM_RX_BUFF; curr_slot++) {
+               printk(KERN_DEBUG
+                      "Desc %02d: status 0x%04x, length %3d, addr 0x%x\n",
+                      curr_slot, fep->rx_desc[curr_slot].ctrl,
+                      fep->rx_desc[curr_slot].data_len,
+                      (unsigned int)fep->rx_desc[curr_slot].data_ptr);
+       }
+}
+
+void emac_mac_dump(struct net_device *dev)
+{
+       struct ocp_enet_private *fep = dev->priv;
+       volatile emac_t *emacp = fep->emacp;
+
+       printk(KERN_DEBUG "EMAC DEBUG ********** \n");
+       printk(KERN_DEBUG "EMAC_M0  ==> 0x%x\n", in_be32(&emacp->em0mr0));
+       printk(KERN_DEBUG "EMAC_M1  ==> 0x%x\n", in_be32(&emacp->em0mr1));
+       printk(KERN_DEBUG "EMAC_TXM0==> 0x%x\n", in_be32(&emacp->em0tmr0));
+       printk(KERN_DEBUG "EMAC_TXM1==> 0x%x\n", in_be32(&emacp->em0tmr1));
+       printk(KERN_DEBUG "EMAC_RXM ==> 0x%x\n", in_be32(&emacp->em0rmr));
+       printk(KERN_DEBUG "EMAC_ISR ==> 0x%x\n", in_be32(&emacp->em0isr));
+       printk(KERN_DEBUG "EMAC_IER ==> 0x%x\n", in_be32(&emacp->em0iser));
+       printk(KERN_DEBUG "EMAC_IAH ==> 0x%x\n", in_be32(&emacp->em0iahr));
+       printk(KERN_DEBUG "EMAC_IAL ==> 0x%x\n", in_be32(&emacp->em0ialr));
+       printk(KERN_DEBUG "EMAC_VLAN_TPID_REG ==> 0x%x\n",
+              in_be32(&emacp->em0vtpid));
+}
+
+void emac_mal_dump(struct net_device *dev)
+{
+       struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal;
+
+       printk(KERN_DEBUG " MAL DEBUG ********** \n");
+       printk(KERN_DEBUG " MCR      ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALCR));
+       printk(KERN_DEBUG " ESR      ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALESR));
+       printk(KERN_DEBUG " IER      ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALIER));
+#ifdef CONFIG_40x
+       printk(KERN_DEBUG " DBR      ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALDBR));
+#endif                         /* CONFIG_40x */
+       printk(KERN_DEBUG " TXCASR   ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCASR));
+       printk(KERN_DEBUG " TXCARR   ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCARR));
+       printk(KERN_DEBUG " TXEOBISR ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALTXEOBISR));
+       printk(KERN_DEBUG " TXDEIR   ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALTXDEIR));
+       printk(KERN_DEBUG " RXCASR   ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCASR));
+       printk(KERN_DEBUG " RXCARR   ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCARR));
+       printk(KERN_DEBUG " RXEOBISR ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALRXEOBISR));
+       printk(KERN_DEBUG " RXDEIR   ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALRXDEIR));
+       printk(KERN_DEBUG " TXCTP0R  ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP0R));
+       printk(KERN_DEBUG " TXCTP1R  ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP1R));
+       printk(KERN_DEBUG " TXCTP2R  ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP2R));
+       printk(KERN_DEBUG " TXCTP3R  ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP3R));
+       printk(KERN_DEBUG " RXCTP0R  ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP0R));
+       printk(KERN_DEBUG " RXCTP1R  ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP1R));
+       printk(KERN_DEBUG " RCBS0    ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS0));
+       printk(KERN_DEBUG " RCBS1    ==> 0x%x\n",
+              (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS1));
+}
+
+void emac_serr_dump_0(struct net_device *dev)
+{
+       struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal;
+       unsigned long int mal_error, plb_error, plb_addr;
+
+       mal_error = get_mal_dcrn(mal, DCRN_MALESR);
+       printk(KERN_DEBUG "ppc405_eth_serr: %s channel %ld \n",
+              (mal_error & 0x40000000) ? "Receive" :
+              "Transmit", (mal_error & 0x3e000000) >> 25);
+       printk(KERN_DEBUG "  -----  latched error  -----\n");
+       if (mal_error & MALESR_DE)
+               printk(KERN_DEBUG "  DE: descriptor error\n");
+       if (mal_error & MALESR_OEN)
+               printk(KERN_DEBUG "  ONE: OPB non-fullword error\n");
+       if (mal_error & MALESR_OTE)
+               printk(KERN_DEBUG "  OTE: OPB timeout error\n");
+       if (mal_error & MALESR_OSE)
+               printk(KERN_DEBUG "  OSE: OPB slave error\n");
+
+       if (mal_error & MALESR_PEIN) {
+               plb_error = mfdcr(DCRN_PLB0_BESR);
+               printk(KERN_DEBUG
+                      "  PEIN: PLB error, PLB0_BESR is 0x%x\n",
+                      (unsigned int)plb_error);
+               plb_addr = mfdcr(DCRN_PLB0_BEAR);
+               printk(KERN_DEBUG
+                      "  PEIN: PLB error, PLB0_BEAR is 0x%x\n",
+                      (unsigned int)plb_addr);
+       }
+}
+
+void emac_serr_dump_1(struct net_device *dev)
+{
+       struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal;
+       int mal_error = get_mal_dcrn(mal, DCRN_MALESR);
+
+       printk(KERN_DEBUG "  -----  cumulative errors  -----\n");
+       if (mal_error & MALESR_DEI)
+               printk(KERN_DEBUG "  DEI: descriptor error interrupt\n");
+       if (mal_error & MALESR_ONEI)
+               printk(KERN_DEBUG "  OPB non-fullword error interrupt\n");
+       if (mal_error & MALESR_OTEI)
+               printk(KERN_DEBUG "  OTEI: timeout error interrupt\n");
+       if (mal_error & MALESR_OSEI)
+               printk(KERN_DEBUG "  OSEI: slave error interrupt\n");
+       if (mal_error & MALESR_PBEI)
+               printk(KERN_DEBUG "  PBEI: PLB bus error interrupt\n");
+}
+
+void emac_err_dump(struct net_device *dev, int em0isr)
+{
+       printk(KERN_DEBUG "%s: on-chip ethernet error:\n", dev->name);
+
+       if (em0isr & EMAC_ISR_OVR)
+               printk(KERN_DEBUG "  OVR: overrun\n");
+       if (em0isr & EMAC_ISR_PP)
+               printk(KERN_DEBUG "  PP: control pause packet\n");
+       if (em0isr & EMAC_ISR_BP)
+               printk(KERN_DEBUG "  BP: packet error\n");
+       if (em0isr & EMAC_ISR_RP)
+               printk(KERN_DEBUG "  RP: runt packet\n");
+       if (em0isr & EMAC_ISR_SE)
+               printk(KERN_DEBUG "  SE: short event\n");
+       if (em0isr & EMAC_ISR_ALE)
+               printk(KERN_DEBUG "  ALE: odd number of nibbles in packet\n");
+       if (em0isr & EMAC_ISR_BFCS)
+               printk(KERN_DEBUG "  BFCS: bad FCS\n");
+       if (em0isr & EMAC_ISR_PTLE)
+               printk(KERN_DEBUG "  PTLE: oversized packet\n");
+       if (em0isr & EMAC_ISR_ORE)
+               printk(KERN_DEBUG
+                      "  ORE: packet length field > max allowed LLC\n");
+       if (em0isr & EMAC_ISR_IRE)
+               printk(KERN_DEBUG "  IRE: In Range error\n");
+       if (em0isr & EMAC_ISR_DBDM)
+               printk(KERN_DEBUG "  DBDM: xmit error or SQE\n");
+       if (em0isr & EMAC_ISR_DB0)
+               printk(KERN_DEBUG "  DB0: xmit error or SQE on TX channel 0\n");
+       if (em0isr & EMAC_ISR_SE0)
+               printk(KERN_DEBUG
+                      "  SE0: Signal Quality Error test failure from TX channel 0\n");
+       if (em0isr & EMAC_ISR_TE0)
+               printk(KERN_DEBUG "  TE0: xmit channel 0 aborted\n");
+       if (em0isr & EMAC_ISR_DB1)
+               printk(KERN_DEBUG "  DB1: xmit error or SQE on TX channel \n");
+       if (em0isr & EMAC_ISR_SE1)
+               printk(KERN_DEBUG
+                      "  SE1: Signal Quality Error test failure from TX channel 1\n");
+       if (em0isr & EMAC_ISR_TE1)
+               printk(KERN_DEBUG "  TE1: xmit channel 1 aborted\n");
+       if (em0isr & EMAC_ISR_MOS)
+               printk(KERN_DEBUG "  MOS\n");
+       if (em0isr & EMAC_ISR_MOF)
+               printk(KERN_DEBUG "  MOF\n");
+
+       emac_mac_dump(dev);
+       emac_mal_dump(dev);
+}
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c
new file mode 100644 (file)
index 0000000..02d847c
--- /dev/null
@@ -0,0 +1,467 @@
+/*
+ * ibm_ocp_mal.c
+ *
+ *      Armin Kuster akuster@mvista.com
+ *      Juen, 2002
+ *
+ * Copyright 2002 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/ocp.h>
+
+#include "ibm_emac_mal.h"
+
+// Locking: Should we share a lock with the client ? The client could provide
+// a lock pointer (optionally) in the commac structure... I don't think this is
+// really necessary though
+
+/* This lock protects the commac list. On today UP implementations, it's
+ * really only used as IRQ protection in mal_{register,unregister}_commac()
+ */
+static rwlock_t mal_list_lock = RW_LOCK_UNLOCKED;
+
+int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
+{
+       unsigned long flags;
+
+       write_lock_irqsave(&mal_list_lock, flags);
+
+       /* Don't let multiple commacs claim the same channel */
+       if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
+           (mal->rx_chan_mask & commac->rx_chan_mask)) {
+               write_unlock_irqrestore(&mal_list_lock, flags);
+               return -EBUSY;
+       }
+
+       mal->tx_chan_mask |= commac->tx_chan_mask;
+       mal->rx_chan_mask |= commac->rx_chan_mask;
+
+       list_add(&commac->list, &mal->commac);
+
+       write_unlock_irqrestore(&mal_list_lock, flags);
+
+       MOD_INC_USE_COUNT;
+
+       return 0;
+}
+
+int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
+{
+       unsigned long flags;
+
+       write_lock_irqsave(&mal_list_lock, flags);
+
+       mal->tx_chan_mask &= ~commac->tx_chan_mask;
+       mal->rx_chan_mask &= ~commac->rx_chan_mask;
+
+       list_del_init(&commac->list);
+
+       write_unlock_irqrestore(&mal_list_lock, flags);
+
+       MOD_DEC_USE_COUNT;
+
+       return 0;
+}
+
+int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)
+{
+       switch (channel) {
+       case 0:
+               set_mal_dcrn(mal, DCRN_MALRCBS0, size);
+               break;
+#ifdef DCRN_MALRCBS1
+       case 1:
+               set_mal_dcrn(mal, DCRN_MALRCBS1, size);
+               break;
+#endif
+#ifdef DCRN_MALRCBS2
+       case 2:
+               set_mal_dcrn(mal, DCRN_MALRCBS2, size);
+               break;
+#endif
+#ifdef DCRN_MALRCBS3
+       case 3:
+               set_mal_dcrn(mal, DCRN_MALRCBS3, size);
+               break;
+#endif
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs)
+{
+       struct ibm_ocp_mal *mal = dev_instance;
+       unsigned long mal_error;
+
+       /*
+        * This SERR applies to one of the devices on the MAL, here we charge
+        * it against the first EMAC registered for the MAL.
+        */
+
+       mal_error = get_mal_dcrn(mal, DCRN_MALESR);
+
+       printk(KERN_ERR "%s: System Error (MALESR=%lx)\n",
+              "MAL" /* FIXME: get the name right */ , mal_error);
+
+       /* FIXME: decipher error */
+       /* DIXME: distribute to commacs, if possible */
+
+       /* Clear the error status register */
+       set_mal_dcrn(mal, DCRN_MALESR, mal_error);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs)
+{
+       struct ibm_ocp_mal *mal = dev_instance;
+       struct list_head *l;
+       unsigned long isr;
+
+       isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR);
+       set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr);
+
+       read_lock(&mal_list_lock);
+       list_for_each(l, &mal->commac) {
+               struct mal_commac *mc = list_entry(l, struct mal_commac, list);
+
+               if (isr & mc->tx_chan_mask) {
+                       mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask);
+               }
+       }
+       read_unlock(&mal_list_lock);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs)
+{
+       struct ibm_ocp_mal *mal = dev_instance;
+       struct list_head *l;
+       unsigned long isr;
+
+       isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR);
+       set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr);
+
+       read_lock(&mal_list_lock);
+       list_for_each(l, &mal->commac) {
+               struct mal_commac *mc = list_entry(l, struct mal_commac, list);
+
+               if (isr & mc->rx_chan_mask) {
+                       mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask);
+               }
+       }
+       read_unlock(&mal_list_lock);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs)
+{
+       struct ibm_ocp_mal *mal = dev_instance;
+       struct list_head *l;
+       unsigned long deir;
+
+       deir = get_mal_dcrn(mal, DCRN_MALTXDEIR);
+
+       /* FIXME: print which MAL correctly */
+       printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n",
+              "MAL", deir);
+
+       read_lock(&mal_list_lock);
+       list_for_each(l, &mal->commac) {
+               struct mal_commac *mc = list_entry(l, struct mal_commac, list);
+
+               if (deir & mc->tx_chan_mask) {
+                       mc->ops->txde(mc->dev, deir & mc->tx_chan_mask);
+               }
+       }
+       read_unlock(&mal_list_lock);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * This interrupt should be very rare at best.  This occurs when
+ * the hardware has a problem with the receive descriptors.  The manual
+ * states that it occurs when the hardware cannot the receive descriptor
+ * empty bit is not set.  The recovery mechanism will be to
+ * traverse through the descriptors, handle any that are marked to be
+ * handled and reinitialize each along the way.  At that point the driver
+ * will be restarted.
+ */
+static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
+{
+       struct ibm_ocp_mal *mal = dev_instance;
+       struct list_head *l;
+       unsigned long deir;
+
+       deir = get_mal_dcrn(mal, DCRN_MALRXDEIR);
+
+       /*
+        * This really is needed.  This case encountered in stress testing.
+        */
+       if (deir == 0)
+               return IRQ_HANDLED;
+
+       /* FIXME: print which MAL correctly */
+       printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n",
+              "MAL", deir);
+
+       read_lock(&mal_list_lock);
+       list_for_each(l, &mal->commac) {
+               struct mal_commac *mc = list_entry(l, struct mal_commac, list);
+
+               if (deir & mc->rx_chan_mask) {
+                       mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask);
+               }
+       }
+       read_unlock(&mal_list_lock);
+
+       return IRQ_HANDLED;
+}
+
+static int __init mal_probe(struct ocp_device *ocpdev)
+{
+       struct ibm_ocp_mal *mal = NULL;
+       struct ocp_func_mal_data *maldata;
+       int err = 0;
+
+       maldata = (struct ocp_func_mal_data *)ocpdev->def->additions;
+       if (maldata == NULL) {
+               printk(KERN_ERR "mal%d: Missing additional datas !\n",
+                      ocpdev->def->index);
+               return -ENODEV;
+       }
+
+       mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL);
+       if (mal == NULL) {
+               printk(KERN_ERR
+                      "mal%d: Out of memory allocating MAL structure !\n",
+                      ocpdev->def->index);
+               return -ENOMEM;
+       }
+       memset(mal, 0, sizeof(*mal));
+
+       switch (ocpdev->def->index) {
+       case 0:
+               mal->dcrbase = DCRN_MAL_BASE;
+               break;
+#ifdef DCRN_MAL1_BASE
+       case 1:
+               mal->dcrbase = DCRN_MAL1_BASE;
+               break;
+#endif
+       default:
+               BUG();
+       }
+
+       /**************************/
+
+       INIT_LIST_HEAD(&mal->commac);
+
+       set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF);
+       set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF);
+
+       set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR);      /* 384 */
+       /* FIXME: Add delay */
+
+       /* Set the MAL configuration register */
+       set_mal_dcrn(mal, DCRN_MALCR,
+                    MALCR_PLBB | MALCR_OPBBL | MALCR_LEA |
+                    MALCR_PLBLT_DEFAULT);
+
+       /* It would be nice to allocate buffers separately for each
+        * channel, but we can't because the channels share the upper
+        * 13 bits of address lines.  Each channels buffer must also
+        * be 4k aligned, so we allocate 4k for each channel.  This is
+        * inefficient FIXME: do better, if possible */
+       mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
+                                              MAL_DT_ALIGN *
+                                              maldata->num_tx_chans,
+                                              &mal->tx_phys_addr, GFP_KERNEL);
+       if (mal->tx_virt_addr == NULL) {
+               printk(KERN_ERR
+                      "mal%d: Out of memory allocating MAL descriptors !\n",
+                      ocpdev->def->index);
+               err = -ENOMEM;
+               goto fail;
+       }
+
+       /* God, oh, god, I hate DCRs */
+       set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr);
+#ifdef DCRN_MALTXCTP1R
+       if (maldata->num_tx_chans > 1)
+               set_mal_dcrn(mal, DCRN_MALTXCTP1R,
+                            mal->tx_phys_addr + MAL_DT_ALIGN);
+#endif                         /* DCRN_MALTXCTP1R */
+#ifdef DCRN_MALTXCTP2R
+       if (maldata->num_tx_chans > 2)
+               set_mal_dcrn(mal, DCRN_MALTXCTP2R,
+                            mal->tx_phys_addr + 2 * MAL_DT_ALIGN);
+#endif                         /* DCRN_MALTXCTP2R */
+#ifdef DCRN_MALTXCTP3R
+       if (maldata->num_tx_chans > 3)
+               set_mal_dcrn(mal, DCRN_MALTXCTP3R,
+                            mal->tx_phys_addr + 3 * MAL_DT_ALIGN);
+#endif                         /* DCRN_MALTXCTP3R */
+#ifdef DCRN_MALTXCTP4R
+       if (maldata->num_tx_chans > 4)
+               set_mal_dcrn(mal, DCRN_MALTXCTP4R,
+                            mal->tx_phys_addr + 4 * MAL_DT_ALIGN);
+#endif                         /* DCRN_MALTXCTP4R */
+#ifdef DCRN_MALTXCTP5R
+       if (maldata->num_tx_chans > 5)
+               set_mal_dcrn(mal, DCRN_MALTXCTP5R,
+                            mal->tx_phys_addr + 5 * MAL_DT_ALIGN);
+#endif                         /* DCRN_MALTXCTP5R */
+#ifdef DCRN_MALTXCTP6R
+       if (maldata->num_tx_chans > 6)
+               set_mal_dcrn(mal, DCRN_MALTXCTP6R,
+                            mal->tx_phys_addr + 6 * MAL_DT_ALIGN);
+#endif                         /* DCRN_MALTXCTP6R */
+#ifdef DCRN_MALTXCTP7R
+       if (maldata->num_tx_chans > 7)
+               set_mal_dcrn(mal, DCRN_MALTXCTP7R,
+                            mal->tx_phys_addr + 7 * MAL_DT_ALIGN);
+#endif                         /* DCRN_MALTXCTP7R */
+
+       mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
+                                              MAL_DT_ALIGN *
+                                              maldata->num_rx_chans,
+                                              &mal->rx_phys_addr, GFP_KERNEL);
+
+       set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr);
+#ifdef DCRN_MALRXCTP1R
+       if (maldata->num_rx_chans > 1)
+               set_mal_dcrn(mal, DCRN_MALRXCTP1R,
+                            mal->rx_phys_addr + MAL_DT_ALIGN);
+#endif                         /* DCRN_MALRXCTP1R */
+#ifdef DCRN_MALRXCTP2R
+       if (maldata->num_rx_chans > 2)
+               set_mal_dcrn(mal, DCRN_MALRXCTP2R,
+                            mal->rx_phys_addr + 2 * MAL_DT_ALIGN);
+#endif                         /* DCRN_MALRXCTP2R */
+#ifdef DCRN_MALRXCTP3R
+       if (maldata->num_rx_chans > 3)
+               set_mal_dcrn(mal, DCRN_MALRXCTP3R,
+                            mal->rx_phys_addr + 3 * MAL_DT_ALIGN);
+#endif                         /* DCRN_MALRXCTP3R */
+
+       err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal);
+       if (err)
+               goto fail;
+       err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal);
+       if (err)
+               goto fail;
+       err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
+       if (err)
+               goto fail;
+       err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);
+       if (err)
+               goto fail;
+       err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
+       if (err)
+               goto fail;
+
+       set_mal_dcrn(mal, DCRN_MALIER,
+                    MALIER_DE | MALIER_NE | MALIER_TE |
+                    MALIER_OPBE | MALIER_PLBE);
+
+       /* Advertise me to the rest of the world */
+       ocp_set_drvdata(ocpdev, mal);
+
+       printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n",
+              ocpdev->def->index, maldata->num_tx_chans,
+              maldata->num_rx_chans);
+
+       return 0;
+
+      fail:
+       /* FIXME: dispose requested IRQs ! */
+       if (err && mal)
+               kfree(mal);
+       return err;
+}
+
+static void __exit mal_remove(struct ocp_device *ocpdev)
+{
+       struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev);
+       struct ocp_func_mal_data *maldata = ocpdev->def->additions;
+
+       BUG_ON(!maldata);
+
+       ocp_set_drvdata(ocpdev, NULL);
+
+       /* FIXME: shut down the MAL, deal with dependency with emac */
+       free_irq(maldata->serr_irq, mal);
+       free_irq(maldata->txde_irq, mal);
+       free_irq(maldata->txeob_irq, mal);
+       free_irq(maldata->rxde_irq, mal);
+       free_irq(maldata->rxeob_irq, mal);
+
+       if (mal->tx_virt_addr)
+               dma_free_coherent(&ocpdev->dev,
+                                 MAL_DT_ALIGN * maldata->num_tx_chans,
+                                 mal->tx_virt_addr, mal->tx_phys_addr);
+
+       if (mal->rx_virt_addr)
+               dma_free_coherent(&ocpdev->dev,
+                                 MAL_DT_ALIGN * maldata->num_rx_chans,
+                                 mal->rx_virt_addr, mal->rx_phys_addr);
+
+       kfree(mal);
+}
+
+/* Structure for a device driver */
+static struct ocp_device_id mal_ids[] = {
+       {.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL},
+       {.vendor = OCP_VENDOR_INVALID}
+};
+
+static struct ocp_driver mal_driver = {
+       .name = "mal",
+       .id_table = mal_ids,
+
+       .probe = mal_probe,
+       .remove = mal_remove,
+};
+
+static int __init init_mals(void)
+{
+       int rc;
+
+       rc = ocp_register_driver(&mal_driver);
+       if (rc < 0) {
+               ocp_unregister_driver(&mal_driver);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static void __exit exit_mals(void)
+{
+       ocp_unregister_driver(&mal_driver);
+}
+
+module_init(init_mals);
+module_exit(exit_mals);
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h
new file mode 100644 (file)
index 0000000..8e456ce
--- /dev/null
@@ -0,0 +1,130 @@
+#ifndef _IBM_EMAC_MAL_H
+#define _IBM_EMAC_MAL_H
+
+#include <linux/list.h>
+
+#define MAL_DT_ALIGN   (4096)  /* Alignment for each channel's descriptor table */
+
+#define MAL_CHAN_MASK(chan)    (0x80000000 >> (chan))
+
+/* MAL Buffer Descriptor structure */
+struct mal_descriptor {
+       unsigned short ctrl;    /* MAL / Commac status control bits */
+       short data_len;         /* Max length is 4K-1 (12 bits)     */
+       unsigned char *data_ptr;        /* pointer to actual data buffer    */
+} __attribute__ ((packed));
+
+/* the following defines are for the MadMAL status and control registers. */
+/* MADMAL transmit and receive status/control bits  */
+#define MAL_RX_CTRL_EMPTY              0x8000
+#define MAL_RX_CTRL_WRAP               0x4000
+#define MAL_RX_CTRL_CM                 0x2000
+#define MAL_RX_CTRL_LAST               0x1000
+#define MAL_RX_CTRL_FIRST              0x0800
+#define MAL_RX_CTRL_INTR               0x0400
+
+#define MAL_TX_CTRL_READY              0x8000
+#define MAL_TX_CTRL_WRAP               0x4000
+#define MAL_TX_CTRL_CM                 0x2000
+#define MAL_TX_CTRL_LAST               0x1000
+#define MAL_TX_CTRL_INTR               0x0400
+
+struct mal_commac_ops {
+       void (*txeob) (void *dev, u32 chanmask);
+       void (*txde) (void *dev, u32 chanmask);
+       void (*rxeob) (void *dev, u32 chanmask);
+       void (*rxde) (void *dev, u32 chanmask);
+};
+
+struct mal_commac {
+       struct mal_commac_ops *ops;
+       void *dev;
+       u32 tx_chan_mask, rx_chan_mask;
+       struct list_head list;
+};
+
+struct ibm_ocp_mal {
+       int dcrbase;
+
+       struct list_head commac;
+       u32 tx_chan_mask, rx_chan_mask;
+
+       dma_addr_t tx_phys_addr;
+       struct mal_descriptor *tx_virt_addr;
+
+       dma_addr_t rx_phys_addr;
+       struct mal_descriptor *rx_virt_addr;
+};
+
+#define GET_MAL_STANZA(base,dcrn) \
+       case base: \
+               x = mfdcr(dcrn(base)); \
+               break;
+
+#define SET_MAL_STANZA(base,dcrn, val) \
+       case base: \
+               mtdcr(dcrn(base), (val)); \
+               break;
+
+#define GET_MAL0_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL_BASE,dcrn)
+#define SET_MAL0_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL_BASE,dcrn,val)
+
+#ifdef DCRN_MAL1_BASE
+#define GET_MAL1_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL1_BASE,dcrn)
+#define SET_MAL1_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL1_BASE,dcrn,val)
+#else                          /* ! DCRN_MAL1_BASE */
+#define GET_MAL1_STANZA(dcrn)
+#define SET_MAL1_STANZA(dcrn,val)
+#endif
+
+#define get_mal_dcrn(mal, dcrn) ({ \
+       u32 x; \
+       switch ((mal)->dcrbase) { \
+               GET_MAL0_STANZA(dcrn) \
+               GET_MAL1_STANZA(dcrn) \
+       default: \
+               BUG(); \
+       } \
+x; })
+
+#define set_mal_dcrn(mal, dcrn, val) do { \
+       switch ((mal)->dcrbase) { \
+               SET_MAL0_STANZA(dcrn,val) \
+               SET_MAL1_STANZA(dcrn,val) \
+       default: \
+               BUG(); \
+       } } while (0)
+
+static inline void mal_enable_tx_channels(struct ibm_ocp_mal *mal, u32 chanmask)
+{
+       set_mal_dcrn(mal, DCRN_MALTXCASR,
+                    get_mal_dcrn(mal, DCRN_MALTXCASR) | chanmask);
+}
+
+static inline void mal_disable_tx_channels(struct ibm_ocp_mal *mal,
+                                          u32 chanmask)
+{
+       set_mal_dcrn(mal, DCRN_MALTXCARR, chanmask);
+}
+
+static inline void mal_enable_rx_channels(struct ibm_ocp_mal *mal, u32 chanmask)
+{
+       set_mal_dcrn(mal, DCRN_MALRXCASR,
+                    get_mal_dcrn(mal, DCRN_MALRXCASR) | chanmask);
+}
+
+static inline void mal_disable_rx_channels(struct ibm_ocp_mal *mal,
+                                          u32 chanmask)
+{
+       set_mal_dcrn(mal, DCRN_MALRXCARR, chanmask);
+}
+
+extern int mal_register_commac(struct ibm_ocp_mal *mal,
+                              struct mal_commac *commac);
+extern int mal_unregister_commac(struct ibm_ocp_mal *mal,
+                                struct mal_commac *commac);
+
+extern int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel,
+                       unsigned long size);
+
+#endif                         /* _IBM_EMAC_MAL_H */
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c
new file mode 100644 (file)
index 0000000..b439087
--- /dev/null
@@ -0,0 +1,297 @@
+/*
+ * ibm_ocp_phy.c
+ *
+ * PHY drivers for the ibm ocp ethernet driver. Borrowed
+ * from sungem_phy.c, though I only kept the generic MII
+ * driver for now.
+ * 
+ * This file should be shared with other drivers or eventually
+ * merged as the "low level" part of miilib
+ * 
+ * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org)
+ *
+ */
+
+#include <linux/config.h>
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+
+#include "ibm_emac_phy.h"
+
+static int reset_one_mii_phy(struct mii_phy *phy, int phy_id)
+{
+       u16 val;
+       int limit = 10000;
+
+       val = __phy_read(phy, phy_id, MII_BMCR);
+       val &= ~BMCR_ISOLATE;
+       val |= BMCR_RESET;
+       __phy_write(phy, phy_id, MII_BMCR, val);
+
+       udelay(100);
+
+       while (limit--) {
+               val = __phy_read(phy, phy_id, MII_BMCR);
+               if ((val & BMCR_RESET) == 0)
+                       break;
+               udelay(10);
+       }
+       if ((val & BMCR_ISOLATE) && limit > 0)
+               __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE);
+
+       return (limit <= 0);
+}
+
+static int cis8201_init(struct mii_phy *phy)
+{
+       u16 epcr;
+
+       epcr = phy_read(phy, MII_CIS8201_EPCR);
+       epcr &= ~EPCR_MODE_MASK;
+
+       switch (phy->mode) {
+       case PHY_MODE_TBI:
+               epcr |= EPCR_TBI_MODE;
+               break;
+       case PHY_MODE_RTBI:
+               epcr |= EPCR_RTBI_MODE;
+               break;
+       case PHY_MODE_GMII:
+               epcr |= EPCR_GMII_MODE;
+               break;
+       case PHY_MODE_RGMII:
+       default:
+               epcr |= EPCR_RGMII_MODE;
+       }
+
+       phy_write(phy, MII_CIS8201_EPCR, epcr);
+
+       return 0;
+}
+
+static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
+{
+       u16 ctl, adv;
+
+       phy->autoneg = 1;
+       phy->speed = SPEED_10;
+       phy->duplex = DUPLEX_HALF;
+       phy->pause = 0;
+       phy->advertising = advertise;
+
+       /* Setup standard advertise */
+       adv = phy_read(phy, MII_ADVERTISE);
+       adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+       if (advertise & ADVERTISED_10baseT_Half)
+               adv |= ADVERTISE_10HALF;
+       if (advertise & ADVERTISED_10baseT_Full)
+               adv |= ADVERTISE_10FULL;
+       if (advertise & ADVERTISED_100baseT_Half)
+               adv |= ADVERTISE_100HALF;
+       if (advertise & ADVERTISED_100baseT_Full)
+               adv |= ADVERTISE_100FULL;
+       phy_write(phy, MII_ADVERTISE, adv);
+
+       /* Start/Restart aneg */
+       ctl = phy_read(phy, MII_BMCR);
+       ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+       phy_write(phy, MII_BMCR, ctl);
+
+       return 0;
+}
+
+static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
+{
+       u16 ctl;
+
+       phy->autoneg = 0;
+       phy->speed = speed;
+       phy->duplex = fd;
+       phy->pause = 0;
+
+       ctl = phy_read(phy, MII_BMCR);
+       ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
+
+       /* First reset the PHY */
+       phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
+
+       /* Select speed & duplex */
+       switch (speed) {
+       case SPEED_10:
+               break;
+       case SPEED_100:
+               ctl |= BMCR_SPEED100;
+               break;
+       case SPEED_1000:
+       default:
+               return -EINVAL;
+       }
+       if (fd == DUPLEX_FULL)
+               ctl |= BMCR_FULLDPLX;
+       phy_write(phy, MII_BMCR, ctl);
+
+       return 0;
+}
+
+static int genmii_poll_link(struct mii_phy *phy)
+{
+       u16 status;
+
+       (void)phy_read(phy, MII_BMSR);
+       status = phy_read(phy, MII_BMSR);
+       if ((status & BMSR_LSTATUS) == 0)
+               return 0;
+       if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE))
+               return 0;
+       return 1;
+}
+
+#define        MII_CIS8201_ACSR        0x1c
+#define  ACSR_DUPLEX_STATUS    0x0020
+#define  ACSR_SPEED_1000BASET  0x0010
+#define  ACSR_SPEED_100BASET   0x0008
+
+static int cis8201_read_link(struct mii_phy *phy)
+{
+       u16 acsr;
+
+       if (phy->autoneg) {
+               acsr = phy_read(phy, MII_CIS8201_ACSR);
+
+               if (acsr & ACSR_DUPLEX_STATUS)
+                       phy->duplex = DUPLEX_FULL;
+               else
+                       phy->duplex = DUPLEX_HALF;
+               if (acsr & ACSR_SPEED_1000BASET) {
+                       phy->speed = SPEED_1000;
+               } else if (acsr & ACSR_SPEED_100BASET)
+                       phy->speed = SPEED_100;
+               else
+                       phy->speed = SPEED_10;
+               phy->pause = 0;
+       }
+       /* On non-aneg, we assume what we put in BMCR is the speed,
+        * though magic-aneg shouldn't prevent this case from occurring
+        */
+
+       return 0;
+}
+
+static int genmii_read_link(struct mii_phy *phy)
+{
+       u16 lpa;
+
+       if (phy->autoneg) {
+               lpa = phy_read(phy, MII_LPA);
+
+               if (lpa & (LPA_10FULL | LPA_100FULL))
+                       phy->duplex = DUPLEX_FULL;
+               else
+                       phy->duplex = DUPLEX_HALF;
+               if (lpa & (LPA_100FULL | LPA_100HALF))
+                       phy->speed = SPEED_100;
+               else
+                       phy->speed = SPEED_10;
+               phy->pause = 0;
+       }
+       /* On non-aneg, we assume what we put in BMCR is the speed,
+        * though magic-aneg shouldn't prevent this case from occurring
+        */
+
+       return 0;
+}
+
+#define MII_BASIC_FEATURES     (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
+                                SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
+                                SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII)
+#define MII_GBIT_FEATURES      (MII_BASIC_FEATURES | \
+                                SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
+
+/* CIS8201 phy ops */
+static struct mii_phy_ops cis8201_phy_ops = {
+       init:cis8201_init,
+       setup_aneg:genmii_setup_aneg,
+       setup_forced:genmii_setup_forced,
+       poll_link:genmii_poll_link,
+       read_link:cis8201_read_link
+};
+
+/* Generic implementation for most 10/100 PHYs */
+static struct mii_phy_ops generic_phy_ops = {
+       setup_aneg:genmii_setup_aneg,
+       setup_forced:genmii_setup_forced,
+       poll_link:genmii_poll_link,
+       read_link:genmii_read_link
+};
+
+static struct mii_phy_def cis8201_phy_def = {
+       phy_id:0x000fc410,
+       phy_id_mask:0x000ffff0,
+       name:"CIS8201 Gigabit Ethernet",
+       features:MII_GBIT_FEATURES,
+       magic_aneg:0,
+       ops:&cis8201_phy_ops
+};
+
+static struct mii_phy_def genmii_phy_def = {
+       phy_id:0x00000000,
+       phy_id_mask:0x00000000,
+       name:"Generic MII",
+       features:MII_BASIC_FEATURES,
+       magic_aneg:0,
+       ops:&generic_phy_ops
+};
+
+static struct mii_phy_def *mii_phy_table[] = {
+       &cis8201_phy_def,
+       &genmii_phy_def,
+       NULL
+};
+
+int mii_phy_probe(struct mii_phy *phy, int mii_id)
+{
+       int rc;
+       u32 id;
+       struct mii_phy_def *def;
+       int i;
+
+       phy->autoneg = 0;
+       phy->advertising = 0;
+       phy->mii_id = mii_id;
+       phy->speed = 0;
+       phy->duplex = 0;
+       phy->pause = 0;
+
+       /* Take PHY out of isloate mode and reset it. */
+       rc = reset_one_mii_phy(phy, mii_id);
+       if (rc)
+               return -ENODEV;
+
+       /* Read ID and find matching entry */
+       id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2))
+           & 0xfffffff0;
+       for (i = 0; (def = mii_phy_table[i]) != NULL; i++)
+               if ((id & def->phy_id_mask) == def->phy_id)
+                       break;
+       /* Should never be NULL (we have a generic entry), but... */
+       if (def == NULL)
+               return -ENODEV;
+
+       phy->def = def;
+
+       /* Setup default advertising */
+       phy->advertising = def->features;
+
+       return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h
new file mode 100644 (file)
index 0000000..49f188f
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Defines for the IBM RGMII bridge
+ *
+ * Based on ocp_zmii.h/ibm_emac_zmii.h
+ * Armin Kuster akuster@mvista.com
+ *
+ * Copyright 2004 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _IBM_EMAC_RGMII_H_
+#define _IBM_EMAC_RGMII_H_
+
+#include <linux/config.h>
+
+/* RGMII bridge */
+typedef struct rgmii_regs {
+       u32 fer;                /* Function enable register */
+       u32 ssr;                /* Speed select register */
+} rgmii_t;
+
+#define RGMII_INPUTS                   4
+
+/* RGMII device */
+struct ibm_ocp_rgmii {
+       struct rgmii_regs *base;
+       int mode[RGMII_INPUTS];
+       int users;              /* number of EMACs using this RGMII bridge */
+};
+
+/* Fuctional Enable Reg */
+#define RGMII_FER_MASK(x)              (0x00000007 << (4*x))
+#define RGMII_RTBI                     0x00000004
+#define RGMII_RGMII                    0x00000005
+#define RGMII_TBI                      0x00000006
+#define RGMII_GMII                     0x00000007
+
+/* Speed Selection reg */
+
+#define RGMII_SP2_100  0x00000002
+#define RGMII_SP2_1000 0x00000004
+#define RGMII_SP3_100  0x00000200
+#define RGMII_SP3_1000 0x00000400
+
+#define RGMII_MII2_SPDMASK      0x00000007
+#define RGMII_MII3_SPDMASK      0x00000700
+
+#define RGMII_MII2_100MB        RGMII_SP2_100 & ~RGMII_SP2_1000
+#define RGMII_MII2_1000MB       RGMII_SP2_1000 & ~RGMII_SP2_100
+#define RGMII_MII2_10MB                 ~(RGMII_SP2_100 | RGMII_SP2_1000)
+#define RGMII_MII3_100MB        RGMII_SP3_100 & ~RGMII_SP3_1000
+#define RGMII_MII3_1000MB       RGMII_SP3_1000 & ~RGMII_SP3_100
+#define RGMII_MII3_10MB                 ~(RGMII_SP3_100 | RGMII_SP3_1000)
+
+#define RTBI           0
+#define RGMII          1
+#define TBI            2
+#define GMII           3
+
+#endif                         /* _IBM_EMAC_RGMII_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.h b/drivers/net/ibm_emac/ibm_emac_tah.h
new file mode 100644 (file)
index 0000000..ecfc698
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Defines for the IBM TAH
+ *
+ * Copyright 2004 MontaVista Software, Inc.
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _IBM_EMAC_TAH_H
+#define _IBM_EMAC_TAH_H
+
+/* TAH */
+typedef struct tah_regs {
+       u32 tah_revid;
+       u32 pad[3];
+       u32 tah_mr;
+       u32 tah_ssr0;
+       u32 tah_ssr1;
+       u32 tah_ssr2;
+       u32 tah_ssr3;
+       u32 tah_ssr4;
+       u32 tah_ssr5;
+       u32 tah_tsr;
+} tah_t;
+
+/* TAH engine */
+#define TAH_MR_CVR                     0x80000000
+#define TAH_MR_SR                      0x40000000
+#define TAH_MR_ST_256                  0x01000000
+#define TAH_MR_ST_512                  0x02000000
+#define TAH_MR_ST_768                  0x03000000
+#define TAH_MR_ST_1024                 0x04000000
+#define TAH_MR_ST_1280                 0x05000000
+#define TAH_MR_ST_1536                 0x06000000
+#define TAH_MR_TFS_16KB                        0x00000000
+#define TAH_MR_TFS_2KB                 0x00200000
+#define TAH_MR_TFS_4KB                 0x00400000
+#define TAH_MR_TFS_6KB                 0x00600000
+#define TAH_MR_TFS_8KB                 0x00800000
+#define TAH_MR_TFS_10KB                        0x00a00000
+#define TAH_MR_DTFP                    0x00100000
+#define TAH_MR_DIG                     0x00080000
+
+#endif                         /* _IBM_EMAC_TAH_H */
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h
new file mode 100644 (file)
index 0000000..6f6cd2a
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * ocp_zmii.h
+ *
+ * Defines for the IBM ZMII bridge
+ *
+ *      Armin Kuster akuster@mvista.com
+ *      Dec, 2001
+ *
+ * Copyright 2001 MontaVista Softare Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _IBM_EMAC_ZMII_H_
+#define _IBM_EMAC_ZMII_H_
+
+#include <linux/config.h>
+
+/* ZMII bridge registers */
+struct zmii_regs {
+       u32 fer;                /* Function enable reg */
+       u32 ssr;                /* Speed select reg */
+       u32 smiirs;             /* SMII status reg */
+};
+
+#define ZMII_INPUTS    4
+
+/* ZMII device */
+struct ibm_ocp_zmii {
+       struct zmii_regs *base;
+       int mode[ZMII_INPUTS];
+       int users;              /* number of EMACs using this ZMII bridge */
+};
+
+/* Fuctional Enable Reg */
+
+#define ZMII_FER_MASK(x)       (0xf0000000 >> (4*x))
+
+#define ZMII_MDI0      0x80000000
+#define ZMII_SMII0     0x40000000
+#define ZMII_RMII0     0x20000000
+#define ZMII_MII0      0x10000000
+#define ZMII_MDI1      0x08000000
+#define ZMII_SMII1     0x04000000
+#define ZMII_RMII1     0x02000000
+#define ZMII_MII1      0x01000000
+#define ZMII_MDI2      0x00800000
+#define ZMII_SMII2     0x00400000
+#define ZMII_RMII2     0x00200000
+#define ZMII_MII2      0x00100000
+#define ZMII_MDI3      0x00080000
+#define ZMII_SMII3     0x00040000
+#define ZMII_RMII3     0x00020000
+#define ZMII_MII3      0x00010000
+
+/* Speed Selection reg */
+
+#define ZMII_SCI0      0x40000000
+#define ZMII_FSS0      0x20000000
+#define ZMII_SP0       0x10000000
+#define ZMII_SCI1      0x04000000
+#define ZMII_FSS1      0x02000000
+#define ZMII_SP1       0x01000000
+#define ZMII_SCI2      0x00400000
+#define ZMII_FSS2      0x00200000
+#define ZMII_SP2       0x00100000
+#define ZMII_SCI3      0x00040000
+#define ZMII_FSS3      0x00020000
+#define ZMII_SP3       0x00010000
+
+#define ZMII_MII0_100MB        ZMII_SP0
+#define ZMII_MII0_10MB ~ZMII_SP0
+#define ZMII_MII1_100MB        ZMII_SP1
+#define ZMII_MII1_10MB ~ZMII_SP1
+#define ZMII_MII2_100MB        ZMII_SP2
+#define ZMII_MII2_10MB ~ZMII_SP2
+#define ZMII_MII3_100MB        ZMII_SP3
+#define ZMII_MII3_10MB ~ZMII_SP3
+
+/* SMII Status reg */
+
+#define ZMII_STS0 0xFF000000   /* EMAC0 smii status mask */
+#define ZMII_STS1 0x00FF0000   /* EMAC1 smii status mask */
+
+#define SMII   0
+#define RMII   1
+#define MII    2
+#define MDI    3
+
+#endif                         /* _IBM_EMAC_ZMII_H_ */
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
new file mode 100644 (file)
index 0000000..86f34b5
--- /dev/null
@@ -0,0 +1,666 @@
+/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */
+/*
+    original ne.c
+    Written 1992-94 by Donald Becker.
+
+    Copyright 1993 United States Government as represented by the
+    Director, National Security Agency.
+
+    This software may be used and distributed according to the terms
+    of the GNU General Public License, incorporated herein by reference.
+
+    The author may be reached as becker@scyld.com, or C/O
+    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
+
+    H8/300 modified
+    Yoshinori Sato <ysato@users.sourceforge.jp>
+*/
+
+static const char version1[] =
+"ne-h8300.c:v1.00 2004/04/11 ysato\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "8390.h"
+
+/* Some defines that people can play with if so inclined. */
+
+/* Do we perform extra sanity checks on stuff ? */
+/* #define NE_SANITY_CHECK */
+
+/* Do we implement the read before write bugfix ? */
+/* #define NE_RW_BUGFIX */
+
+/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+/* #define PACKETBUF_MEMSIZE   0x40 */
+
+/* A zero-terminated list of I/O addresses to be probed at boot. */
+
+/* ---- No user-serviceable parts below ---- */
+
+#define NE_BASE         (dev->base_addr)
+#define NE_CMD         0x00
+#define NE_DATAPORT    (ei_status.word16?0x20:0x10)    /* NatSemi-defined port window offset. */
+#define NE_RESET       (ei_status.word16?0x3f:0x1f)    /* Issue a read to reset, a write to clear. */
+#define NE_IO_EXTENT   (ei_status.word16?0x40:0x20)
+
+#define NESM_START_PG  0x40    /* First page of TX buffer */
+#define NESM_STOP_PG   0x80    /* Last page +1 of RX ring */
+
+static int ne_probe1(struct net_device *dev, int ioaddr);
+
+static int ne_open(struct net_device *dev);
+static int ne_close(struct net_device *dev);
+
+static void ne_reset_8390(struct net_device *dev);
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+                         int ring_page);
+static void ne_block_input(struct net_device *dev, int count,
+                         struct sk_buff *skb, int ring_offset);
+static void ne_block_output(struct net_device *dev, const int count,
+               const unsigned char *buf, const int start_page);
+
+
+static u32 reg_offset[16];
+
+static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
+{
+       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       int i;
+       unsigned char bus_width;
+
+       bus_width = *(volatile unsigned char *)ABWCR;
+       bus_width &= 1 << ((base_addr >> 21) & 7);
+
+       for (i = 0; i < sizeof(reg_offset) / sizeof(u32); i++)
+               if (bus_width == 0)
+                       reg_offset[i] = i * 2 + 1;
+               else
+                       reg_offset[i] = i;
+
+       ei_local->reg_offset = reg_offset;
+       return 0;
+}
+
+static int __initdata h8300_ne_count = 0;
+#ifdef CONFIG_H8300H_H8MAX
+static unsigned long __initdata h8300_ne_base[] = { 0x800600 };
+static int h8300_ne_irq[] = {EXT_IRQ4};
+#endif
+#ifdef CONFIG_H8300H_AKI3068NET
+static unsigned long __initdata h8300_ne_base[] = { 0x200000 };
+static int h8300_ne_irq[] = {EXT_IRQ5};
+#endif
+
+static inline int init_dev(struct net_device *dev)
+{
+       if (h8300_ne_count < (sizeof(h8300_ne_base) / sizeof(unsigned long))) {
+               dev->base_addr = h8300_ne_base[h8300_ne_count];
+               dev->irq       = h8300_ne_irq[h8300_ne_count];
+               h8300_ne_count++;
+               return 0;
+       } else
+               return -ENODEV;
+}
+
+/*  Probe for various non-shared-memory ethercards.
+
+   NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
+   buffer memory space.  NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
+   the SAPROM, while other supposed NE2000 clones must be detected by their
+   SA prefix.
+
+   Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+   mode results in doubled values, which can be detected and compensated for.
+
+   The probe is also responsible for initializing the card and filling
+   in the 'dev' and 'ei_status' structures.
+
+   We use the minimum memory size for some ethercard product lines, iff we can't
+   distinguish models.  You can increase the packet buffer size by setting
+   PACKETBUF_MEMSIZE.  Reported Cabletron packet buffer locations are:
+       E1010   starts at 0x100 and ends at 0x2000.
+       E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
+       E2010    starts at 0x100 and ends at 0x4000.
+       E2010-x starts at 0x100 and ends at 0xffff.  */
+
+static int __init do_ne_probe(struct net_device *dev)
+{
+       unsigned int base_addr = dev->base_addr;
+
+       SET_MODULE_OWNER(dev);
+
+       /* First check any supplied i/o locations. User knows best. <cough> */
+       if (base_addr > 0x1ff)  /* Check a single specified location. */
+               return ne_probe1(dev, base_addr);
+       else if (base_addr != 0)        /* Don't probe at all. */
+               return -ENXIO;
+
+       return -ENODEV;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+       free_irq(dev->irq, dev);
+       release_region(dev->base_addr, NE_IO_EXTENT);
+}
+
+struct net_device * __init ne_probe(int unit)
+{
+       struct net_device *dev = alloc_ei_netdev();
+       int err;
+
+       if (!dev)
+               return ERR_PTR(-ENOMEM);
+
+       if (init_dev(dev))
+               return ERR_PTR(-ENODEV);
+
+       sprintf(dev->name, "eth%d", unit);
+       netdev_boot_setup_check(dev);
+
+       err = init_reg_offset(dev, dev->base_addr);
+       if (err)
+               goto out;
+
+       err = do_ne_probe(dev);
+       if (err)
+               goto out;
+       err = register_netdev(dev);
+       if (err)
+               goto out1;
+       return dev;
+out1:
+       cleanup_card(dev);
+out:
+       free_netdev(dev);
+       return ERR_PTR(err);
+}
+
+static int __init ne_probe1(struct net_device *dev, int ioaddr)
+{
+       int i;
+       unsigned char SA_prom[16];
+       int wordlength = 2;
+       const char *name = NULL;
+       int start_page, stop_page;
+       int reg0, ret;
+       static unsigned version_printed;
+       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       unsigned char bus_width;
+
+       if (!request_region(ioaddr, NE_IO_EXTENT, dev->name))
+               return -EBUSY;
+
+       reg0 = inb_p(ioaddr);
+       if (reg0 == 0xFF) {
+               ret = -ENODEV;
+               goto err_out;
+       }
+
+       /* Do a preliminary verification that we have a 8390. */
+       {
+               int regd;
+               outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+               regd = inb_p(ioaddr + EI_SHIFT(0x0d));
+               outb_p(0xff, ioaddr + EI_SHIFT(0x0d));
+               outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+               inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
+               if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
+                       outb_p(reg0, ioaddr + EI_SHIFT(0));
+                       outb_p(regd, ioaddr + EI_SHIFT(0x0d));  /* Restore the old values. */
+                       ret = -ENODEV;
+                       goto err_out;
+               }
+       }
+
+       if (ei_debug  &&  version_printed++ == 0)
+               printk(KERN_INFO "%s", version1);
+
+       printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr);
+
+       /* Read the 16 bytes of station address PROM.
+          We must first initialize registers, similar to NS8390_init(eifdev, 0).
+          We can't reliably read the SAPROM address without this.
+          (I learned the hard way!). */
+       {
+               struct {unsigned char value, offset; } program_seq[] =
+               {
+                       {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
+                       {0x48,  EN0_DCFG},      /* Set byte-wide (0x48) access. */
+                       {0x00,  EN0_RCNTLO},    /* Clear the count regs. */
+                       {0x00,  EN0_RCNTHI},
+                       {0x00,  EN0_IMR},       /* Mask completion irq. */
+                       {0xFF,  EN0_ISR},
+                       {E8390_RXOFF, EN0_RXCR},        /* 0x20  Set to monitor */
+                       {E8390_TXOFF, EN0_TXCR},        /* 0x02  and loopback mode. */
+                       {32,    EN0_RCNTLO},
+                       {0x00,  EN0_RCNTHI},
+                       {0x00,  EN0_RSARLO},    /* DMA starting at 0x0000. */
+                       {0x00,  EN0_RSARHI},
+                       {E8390_RREAD+E8390_START, E8390_CMD},
+               };
+
+               for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++)
+                       outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
+
+       }
+       bus_width = *(volatile unsigned char *)ABWCR;
+       bus_width &= 1 << ((ioaddr >> 21) & 7);
+       ei_status.word16 = (bus_width == 0); /* temporary setting */
+       for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) {
+               SA_prom[i] = inb_p(ioaddr + NE_DATAPORT);
+               inb_p(ioaddr + NE_DATAPORT); /* dummy read */
+       }
+
+       start_page = NESM_START_PG;
+       stop_page = NESM_STOP_PG;
+
+       if (bus_width)
+               wordlength = 1;
+       else
+               outb_p(0x49, ioaddr + EN0_DCFG);
+
+       /* Set up the rest of the parameters. */
+       name = (wordlength == 2) ? "NE2000" : "NE1000";
+
+       if (! dev->irq) {
+               printk(" failed to detect IRQ line.\n");
+               ret = -EAGAIN;
+               goto err_out;
+       }
+
+       /* Snarf the interrupt now.  There's no point in waiting since we cannot
+          share and the board will usually be enabled. */
+       ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
+       if (ret) {
+               printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+               goto err_out;
+       }
+
+       dev->base_addr = ioaddr;
+
+       for(i = 0; i < ETHER_ADDR_LEN; i++) {
+               printk(" %2.2x", SA_prom[i]);
+               dev->dev_addr[i] = SA_prom[i];
+       }
+
+       printk("\n%s: %s found at %#x, using IRQ %d.\n",
+               dev->name, name, ioaddr, dev->irq);
+
+       ei_status.name = name;
+       ei_status.tx_start_page = start_page;
+       ei_status.stop_page = stop_page;
+       ei_status.word16 = (wordlength == 2);
+
+       ei_status.rx_start_page = start_page + TX_PAGES;
+#ifdef PACKETBUF_MEMSIZE
+        /* Allow the packet buffer size to be overridden by know-it-alls. */
+       ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+#endif
+
+       ei_status.reset_8390 = &ne_reset_8390;
+       ei_status.block_input = &ne_block_input;
+       ei_status.block_output = &ne_block_output;
+       ei_status.get_8390_hdr = &ne_get_8390_hdr;
+       ei_status.priv = 0;
+       dev->open = &ne_open;
+       dev->stop = &ne_close;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       dev->poll_controller = ei_poll;
+#endif
+       NS8390_init(dev, 0);
+       return 0;
+
+err_out:
+       release_region(ioaddr, NE_IO_EXTENT);
+       return ret;
+}
+
+static int ne_open(struct net_device *dev)
+{
+       ei_open(dev);
+       return 0;
+}
+
+static int ne_close(struct net_device *dev)
+{
+       if (ei_debug > 1)
+               printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
+       ei_close(dev);
+       return 0;
+}
+
+/* Hard reset the card.  This used to pause for the same period that a
+   8390 reset command required, but that shouldn't be necessary. */
+
+static void ne_reset_8390(struct net_device *dev)
+{
+       unsigned long reset_start_time = jiffies;
+       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+
+       if (ei_debug > 1)
+               printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+
+       /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
+       outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
+
+       ei_status.txing = 0;
+       ei_status.dmaing = 0;
+
+       /* This check _should_not_ be necessary, omit eventually. */
+       while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+               if (jiffies - reset_start_time > 2*HZ/100) {
+                       printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
+                       break;
+               }
+       outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+}
+
+/* Grab the 8390 specific header. Similar to the block_input routine, but
+   we don't need to be concerned with ring wrap as the header will be at
+   the start of a page, so we optimize accordingly. */
+
+static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+{
+       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+
+       if (ei_status.dmaing)
+       {
+               printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
+                       "[DMAstat:%d][irqlock:%d].\n",
+                       dev->name, ei_status.dmaing, ei_status.irqlock);
+               return;
+       }
+
+       ei_status.dmaing |= 0x01;
+       outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
+       outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO);
+       outb_p(0, NE_BASE + EN0_RCNTHI);
+       outb_p(0, NE_BASE + EN0_RSARLO);                /* On page boundary */
+       outb_p(ring_page, NE_BASE + EN0_RSARHI);
+       outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
+
+       if (ei_status.word16) {
+               int len;
+               unsigned short *p = (unsigned short *)hdr;
+               for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--)
+                       *p++ = inw(NE_BASE + NE_DATAPORT);
+       } else
+               insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+
+       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);   /* Ack intr. */
+       ei_status.dmaing &= ~0x01;
+
+       le16_to_cpus(&hdr->count);
+}
+
+/* Block input and output, similar to the Crynwr packet driver.  If you
+   are porting to a new ethercard, look at the packet driver source for hints.
+   The NEx000 doesn't share the on-board packet memory -- you have to put
+   the packet out through the "remote DMA" dataport using outb. */
+
+static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+{
+       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+#ifdef NE_SANITY_CHECK
+       int xfer_count = count;
+#endif
+       char *buf = skb->data;
+
+       /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+       if (ei_status.dmaing)
+       {
+               printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
+                       "[DMAstat:%d][irqlock:%d].\n",
+                       dev->name, ei_status.dmaing, ei_status.irqlock);
+               return;
+       }
+       ei_status.dmaing |= 0x01;
+       outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
+       outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
+       outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
+       outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO);
+       outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI);
+       outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
+       if (ei_status.word16)
+       {
+               int len;
+               unsigned short *p = (unsigned short *)buf;
+               for (len = count>>1; len > 0; len--)
+                       *p++ = inw(NE_BASE + NE_DATAPORT);
+               if (count & 0x01)
+               {
+                       buf[count-1] = inb(NE_BASE + NE_DATAPORT);
+#ifdef NE_SANITY_CHECK
+                       xfer_count++;
+#endif
+               }
+       } else {
+               insb(NE_BASE + NE_DATAPORT, buf, count);
+       }
+
+#ifdef NE_SANITY_CHECK
+       /* This was for the ALPHA version only, but enough people have
+          been encountering problems so it is still here.  If you see
+          this message you either 1) have a slightly incompatible clone
+          or 2) have noise/speed problems with your bus. */
+
+       if (ei_debug > 1)
+       {
+               /* DMA termination address check... */
+               int addr, tries = 20;
+               do {
+                       /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
+                          -- it's broken for Rx on some cards! */
+                       int high = inb_p(NE_BASE + EN0_RSARHI);
+                       int low = inb_p(NE_BASE + EN0_RSARLO);
+                       addr = (high << 8) + low;
+                       if (((ring_offset + xfer_count) & 0xff) == low)
+                               break;
+               } while (--tries > 0);
+               if (tries <= 0)
+                       printk(KERN_WARNING "%s: RX transfer address mismatch,"
+                               "%#4.4x (expected) vs. %#4.4x (actual).\n",
+                               dev->name, ring_offset + xfer_count, addr);
+       }
+#endif
+       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);   /* Ack intr. */
+       ei_status.dmaing &= ~0x01;
+}
+
+static void ne_block_output(struct net_device *dev, int count,
+               const unsigned char *buf, const int start_page)
+{
+       struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+       unsigned long dma_start;
+#ifdef NE_SANITY_CHECK
+       int retries = 0;
+#endif
+
+       /* Round the count up for word writes.  Do we need to do this?
+          What effect will an odd byte count have on the 8390?
+          I should check someday. */
+
+       if (ei_status.word16 && (count & 0x01))
+               count++;
+
+       /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+       if (ei_status.dmaing)
+       {
+               printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
+                       "[DMAstat:%d][irqlock:%d]\n",
+                       dev->name, ei_status.dmaing, ei_status.irqlock);
+               return;
+       }
+       ei_status.dmaing |= 0x01;
+       /* We should already be in page 0, but to be safe... */
+       outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD);
+
+#ifdef NE_SANITY_CHECK
+retry:
+#endif
+
+#ifdef NE8390_RW_BUGFIX
+       /* Handle the read-before-write bug the same way as the
+          Crynwr packet driver -- the NatSemi method doesn't work.
+          Actually this doesn't always work either, but if you have
+          problems with your NEx000 this is better than nothing! */
+
+       outb_p(0x42, NE_BASE + EN0_RCNTLO);
+       outb_p(0x00, NE_BASE + EN0_RCNTHI);
+       outb_p(0x42, NE_BASE + EN0_RSARLO);
+       outb_p(0x00, NE_BASE + EN0_RSARHI);
+       outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
+       /* Make certain that the dummy read has occurred. */
+       udelay(6);
+#endif
+
+       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);
+
+       /* Now the normal output. */
+       outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
+       outb_p(count >> 8,   NE_BASE + EN0_RCNTHI);
+       outb_p(0x00, NE_BASE + EN0_RSARLO);
+       outb_p(start_page, NE_BASE + EN0_RSARHI);
+
+       outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD);
+       if (ei_status.word16) {
+               int len;
+               unsigned short *p = (unsigned short *)buf;
+               for (len = count>>1; len > 0; len--)
+                       outw(*p++, NE_BASE + NE_DATAPORT);
+       } else {
+               outsb(NE_BASE + NE_DATAPORT, buf, count);
+       }
+
+       dma_start = jiffies;
+
+#ifdef NE_SANITY_CHECK
+       /* This was for the ALPHA version only, but enough people have
+          been encountering problems so it is still here. */
+
+       if (ei_debug > 1)
+       {
+               /* DMA termination address check... */
+               int addr, tries = 20;
+               do {
+                       int high = inb_p(NE_BASE + EN0_RSARHI);
+                       int low = inb_p(NE_BASE + EN0_RSARLO);
+                       addr = (high << 8) + low;
+                       if ((start_page << 8) + count == addr)
+                               break;
+               } while (--tries > 0);
+
+               if (tries <= 0)
+               {
+                       printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
+                               "%#4.4x (expected) vs. %#4.4x (actual).\n",
+                               dev->name, (start_page << 8) + count, addr);
+                       if (retries++ == 0)
+                               goto retry;
+               }
+       }
+#endif
+
+       while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0)
+               if (jiffies - dma_start > 2*HZ/100) {           /* 20ms */
+                       printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+                       ne_reset_8390(dev);
+                       NS8390_init(dev,1);
+                       break;
+               }
+
+       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);   /* Ack intr. */
+       ei_status.dmaing &= ~0x01;
+       return;
+}
+
+\f
+#ifdef MODULE
+#define MAX_NE_CARDS   1       /* Max number of NE cards per module */
+static struct net_device *dev_ne[MAX_NE_CARDS];
+static int io[MAX_NE_CARDS];
+static int irq[MAX_NE_CARDS];
+static int bad[MAX_NE_CARDS];  /* 0xbad = bad sig or no reset ack */
+
+MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
+MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
+MODULE_PARM(bad, "1-" __MODULE_STRING(MAX_NE_CARDS) "i");
+MODULE_PARM_DESC(io, "I/O base address(es)");
+MODULE_PARM_DESC(irq, "IRQ number(s)");
+MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* This is set up so that no ISA autoprobe takes place. We can't guarantee
+that the ne2k probe is the last 8390 based probe to take place (as it
+is at boot) and so the probe will get confused by any other 8390 cards.
+ISA device autoprobes on a running machine are not recommended anyway. */
+
+int init_module(void)
+{
+       int this_dev, found = 0;
+       int err;
+
+       for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+               struct net_device *dev = alloc_ei_netdev();
+               if (!dev)
+                       break;
+               if (io[this_dev]) {
+                       dev->irq = irq[this_dev];
+                       dev->mem_end = bad[this_dev];
+                       dev->base_addr = io[this_dev];
+               } else {
+                       dev->base_addr = h8300_ne_base[this_dev];
+                       dev->irq = h8300_ne_irq[this_dev];
+               }
+               err = init_reg_offset(dev, dev->base_addr);
+               if (!err) {
+                       if (do_ne_probe(dev) == 0) {
+                               if (register_netdev(dev) == 0) {
+                                       dev_ne[found++] = dev;
+                                       continue;
+                               }
+                               cleanup_card(dev);
+                       }
+               }
+               free_netdev(dev);
+               if (found)
+                       break;
+               if (io[this_dev] != 0)
+                       printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr);
+               else
+                       printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n");
+               return -ENXIO;
+       }
+       if (found)
+               return 0;
+       return -ENODEV;
+}
+
+void cleanup_module(void)
+{
+       int this_dev;
+
+       for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
+               struct net_device *dev = dev_ne[this_dev];
+               if (dev) {
+                       unregister_netdev(dev);
+                       cleanup_card(dev);
+                       free_netdev(dev);
+               }
+       }
+}
+#endif /* MODULE */
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
new file mode 100644 (file)
index 0000000..e46cff3
--- /dev/null
@@ -0,0 +1,3 @@
+/* temporary measure */
+extern int pxa2xx_drv_pcmcia_probe(struct device *);
+
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
new file mode 100644 (file)
index 0000000..e9c5098
--- /dev/null
@@ -0,0 +1,6021 @@
+/*
+ * ipr.c -- driver for IBM Power Linux RAID adapters
+ *
+ * Written By: Brian King, IBM Corporation
+ *
+ * Copyright (C) 2003, 2004 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+/*
+ * Notes:
+ *
+ * This driver is used to control the following SCSI adapters:
+ *
+ * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
+ *
+ * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
+ *              PCI-X Dual Channel Ultra 320 SCSI Adapter
+ *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
+ *              Embedded SCSI adapter on p615 and p655 systems
+ *
+ * Supported Hardware Features:
+ *     - Ultra 320 SCSI controller
+ *     - PCI-X host interface
+ *     - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
+ *     - Non-Volatile Write Cache
+ *     - Supports attachment of non-RAID disks, tape, and optical devices
+ *     - RAID Levels 0, 5, 10
+ *     - Hot spare
+ *     - Background Parity Checking
+ *     - Background Data Scrubbing
+ *     - Ability to increase the capacity of an existing RAID 5 disk array
+ *             by adding disks
+ *
+ * Driver Features:
+ *     - Tagged command queuing
+ *     - Adapter microcode download
+ *     - PCI hot plug
+ *     - SCSI device hot plug
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/processor.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_request.h>
+#include "ipr.h"
+
+/*
+ *   Global Data
+ */
+static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
+static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
+static unsigned int ipr_max_speed = 1;
+static int ipr_testmode = 0;
+static spinlock_t ipr_driver_lock = SPIN_LOCK_UNLOCKED;
+
+/* This table describes the differences between DMA controller chips */
+static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
+       { /* Gemstone */
+               .mailbox = 0x0042C,
+               .cache_line_size = 0x20,
+               {
+                       .set_interrupt_mask_reg = 0x0022C,
+                       .clr_interrupt_mask_reg = 0x00230,
+                       .sense_interrupt_mask_reg = 0x0022C,
+                       .clr_interrupt_reg = 0x00228,
+                       .sense_interrupt_reg = 0x00224,
+                       .ioarrin_reg = 0x00404,
+                       .sense_uproc_interrupt_reg = 0x00214,
+                       .set_uproc_interrupt_reg = 0x00214,
+                       .clr_uproc_interrupt_reg = 0x00218
+               }
+       },
+       { /* Snipe */
+               .mailbox = 0x0052C,
+               .cache_line_size = 0x20,
+               {
+                       .set_interrupt_mask_reg = 0x00288,
+                       .clr_interrupt_mask_reg = 0x0028C,
+                       .sense_interrupt_mask_reg = 0x00288,
+                       .clr_interrupt_reg = 0x00284,
+                       .sense_interrupt_reg = 0x00280,
+                       .ioarrin_reg = 0x00504,
+                       .sense_uproc_interrupt_reg = 0x00290,
+                       .set_uproc_interrupt_reg = 0x00290,
+                       .clr_uproc_interrupt_reg = 0x00294
+               }
+       },
+};
+
+static int ipr_max_bus_speeds [] = {
+       IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
+};
+
+MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
+MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
+module_param_named(max_speed, ipr_max_speed, uint, 0);
+MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
+module_param_named(log_level, ipr_log_level, uint, 0);
+MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
+module_param_named(testmode, ipr_testmode, int, 0);
+MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(IPR_DRIVER_VERSION);
+
+static const char *ipr_gpdd_dev_end_states[] = {
+       "Command complete",
+       "Terminated by host",
+       "Terminated by device reset",
+       "Terminated by bus reset",
+       "Unknown",
+       "Command not started"
+};
+
+static const char *ipr_gpdd_dev_bus_phases[] = {
+       "Bus free",
+       "Arbitration",
+       "Selection",
+       "Message out",
+       "Command",
+       "Message in",
+       "Data out",
+       "Data in",
+       "Status",
+       "Reselection",
+       "Unknown"
+};
+
+/*  A constant array of IOASCs/URCs/Error Messages */
+static const
+struct ipr_error_table_t ipr_error_table[] = {
+       {0x00000000, 1, 1,
+       "8155: An unknown error was received"},
+       {0x00330000, 0, 0,
+       "Soft underlength error"},
+       {0x005A0000, 0, 0,
+       "Command to be cancelled not found"},
+       {0x00808000, 0, 0,
+       "Qualified success"},
+       {0x01080000, 1, 1,
+       "FFFE: Soft device bus error recovered by the IOA"},
+       {0x01170600, 0, 1,
+       "FFF9: Device sector reassign successful"},
+       {0x01170900, 0, 1,
+       "FFF7: Media error recovered by device rewrite procedures"},
+       {0x01180200, 0, 1,
+       "7001: IOA sector reassignment successful"},
+       {0x01180500, 0, 1,
+       "FFF9: Soft media error. Sector reassignment recommended"},
+       {0x01180600, 0, 1,
+       "FFF7: Media error recovered by IOA rewrite procedures"},
+       {0x01418000, 0, 1,
+       "FF3D: Soft PCI bus error recovered by the IOA"},
+       {0x01440000, 1, 1,
+       "FFF6: Device hardware error recovered by the IOA"},
+       {0x01448100, 0, 1,
+       "FFF6: Device hardware error recovered by the device"},
+       {0x01448200, 1, 1,
+       "FF3D: Soft IOA error recovered by the IOA"},
+       {0x01448300, 0, 1,
+       "FFFA: Undefined device response recovered by the IOA"},
+       {0x014A0000, 1, 1,
+       "FFF6: Device bus error, message or command phase"},
+       {0x015D0000, 0, 1,
+       "FFF6: Failure prediction threshold exceeded"},
+       {0x015D9200, 0, 1,
+       "8009: Impending cache battery pack failure"},
+       {0x02040400, 0, 0,
+       "34FF: Disk device format in progress"},
+       {0x023F0000, 0, 0,
+       "Synchronization required"},
+       {0x024E0000, 0, 0,
+       "No ready, IOA shutdown"},
+       {0x02670100, 0, 1,
+       "3020: Storage subsystem configuration error"},
+       {0x03110B00, 0, 0,
+       "FFF5: Medium error, data unreadable, recommend reassign"},
+       {0x03110C00, 0, 0,
+       "7000: Medium error, data unreadable, do not reassign"},
+       {0x03310000, 0, 1,
+       "FFF3: Disk media format bad"},
+       {0x04050000, 0, 1,
+       "3002: Addressed device failed to respond to selection"},
+       {0x04080000, 1, 1,
+       "3100: Device bus error"},
+       {0x04080100, 0, 1,
+       "3109: IOA timed out a device command"},
+       {0x04088000, 0, 0,
+       "3120: SCSI bus is not operational"},
+       {0x04118000, 0, 1,
+       "9000: IOA reserved area data check"},
+       {0x04118100, 0, 1,
+       "9001: IOA reserved area invalid data pattern"},
+       {0x04118200, 0, 1,
+       "9002: IOA reserved area LRC error"},
+       {0x04320000, 0, 1,
+       "102E: Out of alternate sectors for disk storage"},
+       {0x04330000, 1, 1,
+       "FFF4: Data transfer underlength error"},
+       {0x04338000, 1, 1,
+       "FFF4: Data transfer overlength error"},
+       {0x043E0100, 0, 1,
+       "3400: Logical unit failure"},
+       {0x04408500, 0, 1,
+       "FFF4: Device microcode is corrupt"},
+       {0x04418000, 1, 1,
+       "8150: PCI bus error"},
+       {0x04430000, 1, 0,
+       "Unsupported device bus message received"},
+       {0x04440000, 1, 1,
+       "FFF4: Disk device problem"},
+       {0x04448200, 1, 1,
+       "8150: Permanent IOA failure"},
+       {0x04448300, 0, 1,
+       "3010: Disk device returned wrong response to IOA"},
+       {0x04448400, 0, 1,
+       "8151: IOA microcode error"},
+       {0x04448500, 0, 0,
+       "Device bus status error"},
+       {0x04448600, 0, 1,
+       "8157: IOA error requiring IOA reset to recover"},
+       {0x04490000, 0, 0,
+       "Message reject received from the device"},
+       {0x04449200, 0, 1,
+       "8008: A permanent cache battery pack failure occurred"},
+       {0x0444A000, 0, 1,
+       "9090: Disk unit has been modified after the last known status"},
+       {0x0444A200, 0, 1,
+       "9081: IOA detected device error"},
+       {0x0444A300, 0, 1,
+       "9082: IOA detected device error"},
+       {0x044A0000, 1, 1,
+       "3110: Device bus error, message or command phase"},
+       {0x04670400, 0, 1,
+       "9091: Incorrect hardware configuration change has been detected"},
+       {0x046E0000, 0, 1,
+       "FFF4: Command to logical unit failed"},
+       {0x05240000, 1, 0,
+       "Illegal request, invalid request type or request packet"},
+       {0x05250000, 0, 0,
+       "Illegal request, invalid resource handle"},
+       {0x05260000, 0, 0,
+       "Illegal request, invalid field in parameter list"},
+       {0x05260100, 0, 0,
+       "Illegal request, parameter not supported"},
+       {0x05260200, 0, 0,
+       "Illegal request, parameter value invalid"},
+       {0x052C0000, 0, 0,
+       "Illegal request, command sequence error"},
+       {0x06040500, 0, 1,
+       "9031: Array protection temporarily suspended, protection resuming"},
+       {0x06040600, 0, 1,
+       "9040: Array protection temporarily suspended, protection resuming"},
+       {0x06290000, 0, 1,
+       "FFFB: SCSI bus was reset"},
+       {0x06290500, 0, 0,
+       "FFFE: SCSI bus transition to single ended"},
+       {0x06290600, 0, 0,
+       "FFFE: SCSI bus transition to LVD"},
+       {0x06298000, 0, 1,
+       "FFFB: SCSI bus was reset by another initiator"},
+       {0x063F0300, 0, 1,
+       "3029: A device replacement has occurred"},
+       {0x064C8000, 0, 1,
+       "9051: IOA cache data exists for a missing or failed device"},
+       {0x06670100, 0, 1,
+       "9025: Disk unit is not supported at its physical location"},
+       {0x06670600, 0, 1,
+       "3020: IOA detected a SCSI bus configuration error"},
+       {0x06678000, 0, 1,
+       "3150: SCSI bus configuration error"},
+       {0x06690200, 0, 1,
+       "9041: Array protection temporarily suspended"},
+       {0x066B0200, 0, 1,
+       "9030: Array no longer protected due to missing or failed disk unit"},
+       {0x07270000, 0, 0,
+       "Failure due to other device"},
+       {0x07278000, 0, 1,
+       "9008: IOA does not support functions expected by devices"},
+       {0x07278100, 0, 1,
+       "9010: Cache data associated with attached devices cannot be found"},
+       {0x07278200, 0, 1,
+       "9011: Cache data belongs to devices other than those attached"},
+       {0x07278400, 0, 1,
+       "9020: Array missing 2 or more devices with only 1 device present"},
+       {0x07278500, 0, 1,
+       "9021: Array missing 2 or more devices with 2 or more devices present"},
+       {0x07278600, 0, 1,
+       "9022: Exposed array is missing a required device"},
+       {0x07278700, 0, 1,
+       "9023: Array member(s) not at required physical locations"},
+       {0x07278800, 0, 1,
+       "9024: Array not functional due to present hardware configuration"},
+       {0x07278900, 0, 1,
+       "9026: Array not functional due to present hardware configuration"},
+       {0x07278A00, 0, 1,
+       "9027: Array is missing a device and parity is out of sync"},
+       {0x07278B00, 0, 1,
+       "9028: Maximum number of arrays already exist"},
+       {0x07278C00, 0, 1,
+       "9050: Required cache data cannot be located for a disk unit"},
+       {0x07278D00, 0, 1,
+       "9052: Cache data exists for a device that has been modified"},
+       {0x07278F00, 0, 1,
+       "9054: IOA resources not available due to previous problems"},
+       {0x07279100, 0, 1,
+       "9092: Disk unit requires initialization before use"},
+       {0x07279200, 0, 1,
+       "9029: Incorrect hardware configuration change has been detected"},
+       {0x07279600, 0, 1,
+       "9060: One or more disk pairs are missing from an array"},
+       {0x07279700, 0, 1,
+       "9061: One or more disks are missing from an array"},
+       {0x07279800, 0, 1,
+       "9062: One or more disks are missing from an array"},
+       {0x07279900, 0, 1,
+       "9063: Maximum number of functional arrays has been exceeded"},
+       {0x0B260000, 0, 0,
+       "Aborted command, invalid descriptor"},
+       {0x0B5A0000, 0, 0,
+       "Command terminated by host"}
+};
+
+static const struct ipr_ses_table_entry ipr_ses_table[] = {
+       { "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
+       { "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
+       { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
+       { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
+       { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
+       { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
+       { "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
+       { "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
+       { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
+       { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
+       { "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
+       { "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
+       { "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
+};
+
+/*
+ *  Function Prototypes
+ */
+static int ipr_reset_alert(struct ipr_cmnd *);
+static void ipr_process_ccn(struct ipr_cmnd *);
+static void ipr_process_error(struct ipr_cmnd *);
+static void ipr_reset_ioa_job(struct ipr_cmnd *);
+static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
+                                  enum ipr_shutdown_type);
+
+#ifdef CONFIG_SCSI_IPR_TRACE
+/**
+ * ipr_trc_hook - Add a trace entry to the driver trace
+ * @ipr_cmd:   ipr command struct
+ * @type:              trace type
+ * @add_data:  additional data
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
+                        u8 type, u32 add_data)
+{
+       struct ipr_trace_entry *trace_entry;
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+       trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
+       trace_entry->time = jiffies;
+       trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
+       trace_entry->type = type;
+       trace_entry->cmd_index = ipr_cmd->cmd_index;
+       trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
+       trace_entry->u.add_data = add_data;
+}
+#else
+#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
+#endif
+
+/**
+ * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
+ * @ipr_cmd:   ipr command struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+       struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+
+       memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
+       ioarcb->write_data_transfer_length = 0;
+       ioarcb->read_data_transfer_length = 0;
+       ioarcb->write_ioadl_len = 0;
+       ioarcb->read_ioadl_len = 0;
+       ioasa->ioasc = 0;
+       ioasa->residual_data_len = 0;
+
+       ipr_cmd->scsi_cmd = NULL;
+       ipr_cmd->sense_buffer[0] = 0;
+       ipr_cmd->dma_use_sg = 0;
+}
+
+/**
+ * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
+ * @ipr_cmd:   ipr command struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
+{
+       ipr_reinit_ipr_cmnd(ipr_cmd);
+       ipr_cmd->u.scratch = 0;
+       init_timer(&ipr_cmd->timer);
+}
+
+/**
+ * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     pointer to ipr command struct
+ **/
+static
+struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+{
+       struct ipr_cmnd *ipr_cmd;
+
+       ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
+       list_del(&ipr_cmd->queue);
+       ipr_init_ipr_cmnd(ipr_cmd);
+
+       return ipr_cmd;
+}
+
+/**
+ * ipr_unmap_sglist - Unmap scatterlist if mapped
+ * @ioa_cfg:   ioa config struct
+ * @ipr_cmd:   ipr command struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
+                            struct ipr_cmnd *ipr_cmd)
+{
+       struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+
+       if (ipr_cmd->dma_use_sg) {
+               if (scsi_cmd->use_sg > 0) {
+                       pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
+                                    scsi_cmd->use_sg,
+                                    scsi_cmd->sc_data_direction);
+               } else {
+                       pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
+                                        scsi_cmd->request_bufflen,
+                                        scsi_cmd->sc_data_direction);
+               }
+       }
+}
+
+/**
+ * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
+ * @ioa_cfg:   ioa config struct
+ * @clr_ints:     interrupts to clear
+ *
+ * This function masks all interrupts on the adapter, then clears the
+ * interrupts specified in the mask
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
+                                         u32 clr_ints)
+{
+       volatile u32 int_reg;
+
+       /* Stop new interrupts */
+       ioa_cfg->allow_interrupts = 0;
+
+       /* Set interrupt mask to stop all new interrupts */
+       writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
+
+       /* Clear any pending interrupts */
+       writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
+       int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+}
+
+/**
+ * ipr_save_pcix_cmd_reg - Save PCI-X command register
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     0 on success / -EIO on failure
+ **/
+static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
+
+       if (pcix_cmd_reg == 0) {
+               dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
+               return -EIO;
+       }
+
+       if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg,
+                                &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
+               dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
+               return -EIO;
+       }
+
+       ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
+       return 0;
+}
+
+/**
+ * ipr_set_pcix_cmd_reg - Setup PCI-X command register
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     0 on success / -EIO on failure
+ **/
+static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
+
+       if (pcix_cmd_reg) {
+               if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg,
+                                         ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
+                       dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
+                       return -EIO;
+               }
+       } else {
+               dev_err(&ioa_cfg->pdev->dev,
+                       "Failed to setup PCI-X command register\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/**
+ * ipr_scsi_eh_done - mid-layer done function for aborted ops
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function is invoked by the interrupt handler for
+ * ops generated by the SCSI mid-layer which are being aborted.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+
+       scsi_cmd->result |= (DID_ERROR << 16);
+
+       ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+       scsi_cmd->scsi_done(scsi_cmd);
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+}
+
+/**
+ * ipr_fail_all_ops - Fails all outstanding ops.
+ * @ioa_cfg:   ioa config struct
+ *
+ * This function fails all outstanding ops.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
+{
+       struct ipr_cmnd *ipr_cmd, *temp;
+
+       ENTER;
+       list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
+               list_del(&ipr_cmd->queue);
+
+               ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
+               ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
+
+               if (ipr_cmd->scsi_cmd)
+                       ipr_cmd->done = ipr_scsi_eh_done;
+
+               ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
+               del_timer(&ipr_cmd->timer);
+               ipr_cmd->done(ipr_cmd);
+       }
+
+       LEAVE;
+}
+
+/**
+ * ipr_do_req -  Send driver initiated requests.
+ * @ipr_cmd:           ipr command struct
+ * @done:                      done function
+ * @timeout_func:      timeout function
+ * @timeout:           timeout value
+ *
+ * This function sends the specified command to the adapter with the
+ * timeout given. The done function is invoked on command completion.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
+                      void (*done) (struct ipr_cmnd *),
+                      void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+
+       ipr_cmd->done = done;
+
+       ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+       ipr_cmd->timer.expires = jiffies + timeout;
+       ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
+
+       add_timer(&ipr_cmd->timer);
+
+       ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
+
+       mb();
+       writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
+              ioa_cfg->regs.ioarrin_reg);
+}
+
+/**
+ * ipr_internal_cmd_done - Op done function for an internally generated op.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function is the op done function for an internally generated,
+ * blocking op. It simply wakes the sleeping thread.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
+{
+       if (ipr_cmd->u.sibling)
+               ipr_cmd->u.sibling = NULL;
+       else
+               complete(&ipr_cmd->completion);
+}
+
+/**
+ * ipr_send_blocking_cmd - Send command and sleep on its completion.
+ * @ipr_cmd:   ipr command struct
+ * @timeout_func:      function to invoke if command times out
+ * @timeout:   timeout
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
+                                 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
+                                 u32 timeout)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+       init_completion(&ipr_cmd->completion);
+       ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
+
+       spin_unlock_irq(ioa_cfg->host->host_lock);
+       wait_for_completion(&ipr_cmd->completion);
+       spin_lock_irq(ioa_cfg->host->host_lock);
+}
+
+/**
+ * ipr_send_hcam - Send an HCAM to the adapter.
+ * @ioa_cfg:   ioa config struct
+ * @type:              HCAM type
+ * @hostrcb:   hostrcb struct
+ *
+ * This function will send a Host Controlled Async command to the adapter.
+ * If HCAMs are currently not allowed to be issued to the adapter, it will
+ * place the hostrcb on the free queue.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
+                         struct ipr_hostrcb *hostrcb)
+{
+       struct ipr_cmnd *ipr_cmd;
+       struct ipr_ioarcb *ioarcb;
+
+       if (ioa_cfg->allow_cmds) {
+               ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+               list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+               list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
+
+               ipr_cmd->u.hostrcb = hostrcb;
+               ioarcb = &ipr_cmd->ioarcb;
+
+               ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+               ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
+               ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
+               ioarcb->cmd_pkt.cdb[1] = type;
+               ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
+               ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
+
+               ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
+               ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+               ipr_cmd->ioadl[0].flags_and_data_len =
+                       cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
+               ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
+
+               if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
+                       ipr_cmd->done = ipr_process_ccn;
+               else
+                       ipr_cmd->done = ipr_process_error;
+
+               ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
+
+               mb();
+               writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
+                      ioa_cfg->regs.ioarrin_reg);
+       } else {
+               list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
+       }
+}
+
+/**
+ * ipr_init_res_entry - Initialize a resource entry struct.
+ * @res:       resource entry struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_init_res_entry(struct ipr_resource_entry *res)
+{
+       res->needs_sync_complete = 1;
+       res->in_erp = 0;
+       res->add_to_ml = 0;
+       res->del_from_ml = 0;
+       res->resetting_device = 0;
+       res->tcq_active = 0;
+       res->qdepth = IPR_MAX_CMD_PER_LUN;
+       res->sdev = NULL;
+}
+
+/**
+ * ipr_handle_config_change - Handle a config change from the adapter
+ * @ioa_cfg:   ioa config struct
+ * @hostrcb:   hostrcb
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
+                             struct ipr_hostrcb *hostrcb)
+{
+       struct ipr_resource_entry *res = NULL;
+       struct ipr_config_table_entry *cfgte;
+       u32 is_ndn = 1;
+
+       cfgte = &hostrcb->hcam.u.ccn.cfgte;
+
+       list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+               if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
+                           sizeof(cfgte->res_addr))) {
+                       is_ndn = 0;
+                       break;
+               }
+       }
+
+       if (is_ndn) {
+               if (list_empty(&ioa_cfg->free_res_q)) {
+                       ipr_send_hcam(ioa_cfg,
+                                     IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
+                                     hostrcb);
+                       return;
+               }
+
+               res = list_entry(ioa_cfg->free_res_q.next,
+                                struct ipr_resource_entry, queue);
+
+               list_del(&res->queue);
+               ipr_init_res_entry(res);
+               list_add_tail(&res->queue, &ioa_cfg->used_res_q);
+       }
+
+       memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
+
+       if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
+               if (res->sdev) {
+                       res->sdev->hostdata = NULL;
+                       res->del_from_ml = 1;
+                       if (ioa_cfg->allow_ml_add_del)
+                               schedule_work(&ioa_cfg->work_q);
+               } else
+                       list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+       } else if (!res->sdev) {
+               res->add_to_ml = 1;
+               if (ioa_cfg->allow_ml_add_del)
+                       schedule_work(&ioa_cfg->work_q);
+       }
+
+       ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
+}
+
+/**
+ * ipr_process_ccn - Op done function for a CCN.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function is the op done function for a configuration
+ * change notification host controlled async from the adapter.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
+       u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+       list_del(&hostrcb->queue);
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+
+       if (ioasc) {
+               if (ioasc != IPR_IOASC_IOA_WAS_RESET)
+                       dev_err(&ioa_cfg->pdev->dev,
+                               "Host RCB failed with IOASC: 0x%08X\n", ioasc);
+
+               ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
+       } else {
+               ipr_handle_config_change(ioa_cfg, hostrcb);
+       }
+}
+
+/**
+ * ipr_log_vpd - Log the passed VPD to the error log.
+ * @vpids:                     vendor/product id struct
+ * @serial_num:                serial number string
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_log_vpd(struct ipr_std_inq_vpids *vpids, u8 *serial_num)
+{
+       char buffer[max_t(int, sizeof(struct ipr_std_inq_vpids),
+                         IPR_SERIAL_NUM_LEN) + 1];
+
+       memcpy(buffer, vpids, sizeof(struct ipr_std_inq_vpids));
+       buffer[sizeof(struct ipr_std_inq_vpids)] = '\0';
+       ipr_err("Vendor/Product ID: %s\n", buffer);
+
+       memcpy(buffer, serial_num, IPR_SERIAL_NUM_LEN);
+       buffer[IPR_SERIAL_NUM_LEN] = '\0';
+       ipr_err("    Serial Number: %s\n", buffer);
+}
+
+/**
+ * ipr_log_cache_error - Log a cache error.
+ * @ioa_cfg:   ioa config struct
+ * @hostrcb:   hostrcb struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
+                               struct ipr_hostrcb *hostrcb)
+{
+       struct ipr_hostrcb_type_02_error *error =
+               &hostrcb->hcam.u.error.u.type_02_error;
+
+       ipr_err("-----Current Configuration-----\n");
+       ipr_err("Cache Directory Card Information:\n");
+       ipr_log_vpd(&error->ioa_vpids, error->ioa_sn);
+       ipr_err("Adapter Card Information:\n");
+       ipr_log_vpd(&error->cfc_vpids, error->cfc_sn);
+
+       ipr_err("-----Expected Configuration-----\n");
+       ipr_err("Cache Directory Card Information:\n");
+       ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpids,
+                   error->ioa_last_attached_to_cfc_sn);
+       ipr_err("Adapter Card Information:\n");
+       ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpids,
+                   error->cfc_last_attached_to_ioa_sn);
+
+       ipr_err("Additional IOA Data: %08X %08X %08X\n",
+                    be32_to_cpu(error->ioa_data[0]),
+                    be32_to_cpu(error->ioa_data[1]),
+                    be32_to_cpu(error->ioa_data[2]));
+}
+
+/**
+ * ipr_log_config_error - Log a configuration error.
+ * @ioa_cfg:   ioa config struct
+ * @hostrcb:   hostrcb struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
+                                struct ipr_hostrcb *hostrcb)
+{
+       int errors_logged, i;
+       struct ipr_hostrcb_device_data_entry *dev_entry;
+       struct ipr_hostrcb_type_03_error *error;
+
+       error = &hostrcb->hcam.u.error.u.type_03_error;
+       errors_logged = be32_to_cpu(error->errors_logged);
+
+       ipr_err("Device Errors Detected/Logged: %d/%d\n",
+               be32_to_cpu(error->errors_detected), errors_logged);
+
+       dev_entry = error->dev_entry;
+
+       for (i = 0; i < errors_logged; i++, dev_entry++) {
+               ipr_err_separator;
+
+               if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
+                       ipr_err("Device %d: missing\n", i + 1);
+               } else {
+                       ipr_err("Device %d: %d:%d:%d:%d\n", i + 1,
+                               ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus,
+                               dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun);
+               }
+               ipr_log_vpd(&dev_entry->dev_vpids, dev_entry->dev_sn);
+
+               ipr_err("-----New Device Information-----\n");
+               ipr_log_vpd(&dev_entry->new_dev_vpids, dev_entry->new_dev_sn);
+
+               ipr_err("Cache Directory Card Information:\n");
+               ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpids,
+                           dev_entry->ioa_last_with_dev_sn);
+
+               ipr_err("Adapter Card Information:\n");
+               ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpids,
+                           dev_entry->cfc_last_with_dev_sn);
+
+               ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
+                       be32_to_cpu(dev_entry->ioa_data[0]),
+                       be32_to_cpu(dev_entry->ioa_data[1]),
+                       be32_to_cpu(dev_entry->ioa_data[2]),
+                       be32_to_cpu(dev_entry->ioa_data[3]),
+                       be32_to_cpu(dev_entry->ioa_data[4]));
+       }
+}
+
+/**
+ * ipr_log_array_error - Log an array configuration error.
+ * @ioa_cfg:   ioa config struct
+ * @hostrcb:   hostrcb struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
+                               struct ipr_hostrcb *hostrcb)
+{
+       int i;
+       struct ipr_hostrcb_type_04_error *error;
+       struct ipr_hostrcb_array_data_entry *array_entry;
+       u8 zero_sn[IPR_SERIAL_NUM_LEN];
+
+       memset(zero_sn, '0', IPR_SERIAL_NUM_LEN);
+
+       error = &hostrcb->hcam.u.error.u.type_04_error;
+
+       ipr_err_separator;
+
+       ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
+               error->protection_level,
+               ioa_cfg->host->host_no,
+               error->last_func_vset_res_addr.bus,
+               error->last_func_vset_res_addr.target,
+               error->last_func_vset_res_addr.lun);
+
+       ipr_err_separator;
+
+       array_entry = error->array_member;
+
+       for (i = 0; i < 18; i++) {
+               if (!memcmp(array_entry->serial_num, zero_sn, IPR_SERIAL_NUM_LEN))
+                       continue;
+
+               if (error->exposed_mode_adn == i) {
+                       ipr_err("Exposed Array Member %d:\n", i);
+               } else {
+                       ipr_err("Array Member %d:\n", i);
+               }
+
+               ipr_log_vpd(&array_entry->vpids, array_entry->serial_num);
+
+               if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
+                       ipr_err("Current Location: unknown\n");
+               } else {
+                       ipr_err("Current Location: %d:%d:%d:%d\n",
+                               ioa_cfg->host->host_no,
+                               array_entry->dev_res_addr.bus,
+                               array_entry->dev_res_addr.target,
+                               array_entry->dev_res_addr.lun);
+               }
+
+               if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) {
+                       ipr_err("Expected Location: unknown\n");
+               } else {
+                       ipr_err("Expected Location: %d:%d:%d:%d\n",
+                               ioa_cfg->host->host_no,
+                               array_entry->expected_dev_res_addr.bus,
+                               array_entry->expected_dev_res_addr.target,
+                               array_entry->expected_dev_res_addr.lun);
+               }
+
+               ipr_err_separator;
+
+               if (i == 9)
+                       array_entry = error->array_member2;
+               else
+                       array_entry++;
+       }
+}
+
+/**
+ * ipr_log_generic_error - Log an adapter error.
+ * @ioa_cfg:   ioa config struct
+ * @hostrcb:   hostrcb struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
+                                 struct ipr_hostrcb *hostrcb)
+{
+       int i;
+       int ioa_data_len = be32_to_cpu(hostrcb->hcam.length);
+
+       if (ioa_data_len == 0)
+               return;
+
+       ipr_err("IOA Error Data:\n");
+       ipr_err("Offset    0 1 2 3  4 5 6 7  8 9 A B  C D E F\n");
+
+       for (i = 0; i < ioa_data_len / 4; i += 4) {
+               ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
+                       be32_to_cpu(hostrcb->hcam.u.raw.data[i]),
+                       be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]),
+                       be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]),
+                       be32_to_cpu(hostrcb->hcam.u.raw.data[i+3]));
+       }
+}
+
+/**
+ * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
+ * @ioasc:     IOASC
+ *
+ * This function will return the index of into the ipr_error_table
+ * for the specified IOASC. If the IOASC is not in the table,
+ * 0 will be returned, which points to the entry used for unknown errors.
+ *
+ * Return value:
+ *     index into the ipr_error_table
+ **/
+static u32 ipr_get_error(u32 ioasc)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
+               if (ipr_error_table[i].ioasc == ioasc)
+                       return i;
+
+       return 0;
+}
+
+/**
+ * ipr_handle_log_data - Log an adapter error.
+ * @ioa_cfg:   ioa config struct
+ * @hostrcb:   hostrcb struct
+ *
+ * This function logs an adapter error to the system.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
+                               struct ipr_hostrcb *hostrcb)
+{
+       u32 ioasc;
+       int error_index;
+
+       if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
+               return;
+
+       if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
+               dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
+
+       ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
+
+       if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
+           ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
+               /* Tell the midlayer we had a bus reset so it will handle the UA properly */
+               scsi_report_bus_reset(ioa_cfg->host,
+                                     hostrcb->hcam.u.error.failing_dev_res_addr.bus);
+       }
+
+       error_index = ipr_get_error(ioasc);
+
+       if (!ipr_error_table[error_index].log_hcam)
+               return;
+
+       if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
+               ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
+                           "%s\n", ipr_error_table[error_index].error);
+       } else {
+               dev_err(&ioa_cfg->pdev->dev, "%s\n",
+                       ipr_error_table[error_index].error);
+       }
+
+       /* Set indication we have logged an error */
+       ioa_cfg->errors_logged++;
+
+       if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
+               return;
+
+       switch (hostrcb->hcam.overlay_id) {
+       case IPR_HOST_RCB_OVERLAY_ID_1:
+               ipr_log_generic_error(ioa_cfg, hostrcb);
+               break;
+       case IPR_HOST_RCB_OVERLAY_ID_2:
+               ipr_log_cache_error(ioa_cfg, hostrcb);
+               break;
+       case IPR_HOST_RCB_OVERLAY_ID_3:
+               ipr_log_config_error(ioa_cfg, hostrcb);
+               break;
+       case IPR_HOST_RCB_OVERLAY_ID_4:
+       case IPR_HOST_RCB_OVERLAY_ID_6:
+               ipr_log_array_error(ioa_cfg, hostrcb);
+               break;
+       case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
+               ipr_log_generic_error(ioa_cfg, hostrcb);
+               break;
+       default:
+               dev_err(&ioa_cfg->pdev->dev,
+                       "Unknown error received. Overlay ID: %d\n",
+                       hostrcb->hcam.overlay_id);
+               break;
+       }
+}
+
+/**
+ * ipr_process_error - Op done function for an adapter error log.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function is the op done function for an error log host
+ * controlled async from the adapter. It will log the error and
+ * send the HCAM back to the adapter.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
+       u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+       list_del(&hostrcb->queue);
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+
+       if (!ioasc) {
+               ipr_handle_log_data(ioa_cfg, hostrcb);
+       } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
+               dev_err(&ioa_cfg->pdev->dev,
+                       "Host RCB failed with IOASC: 0x%08X\n", ioasc);
+       }
+
+       ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
+}
+
+/**
+ * ipr_timeout -  An internally generated op has timed out.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function blocks host requests and initiates an
+ * adapter reset.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
+{
+       unsigned long lock_flags = 0;
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+       ENTER;
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+       ioa_cfg->errors_logged++;
+       dev_err(&ioa_cfg->pdev->dev,
+               "Adapter being reset due to command timeout.\n");
+
+       if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+               ioa_cfg->sdt_state = GET_DUMP;
+
+       if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
+               ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       LEAVE;
+}
+
+/**
+ * ipr_reset_reload - Reset/Reload the IOA
+ * @ioa_cfg:           ioa config struct
+ * @shutdown_type:     shutdown type
+ *
+ * This function resets the adapter and re-initializes it.
+ * This function assumes that all new host commands have been stopped.
+ * Return value:
+ *     SUCCESS / FAILED
+ **/
+static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
+                           enum ipr_shutdown_type shutdown_type)
+{
+       if (!ioa_cfg->in_reset_reload)
+               ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
+
+       spin_unlock_irq(ioa_cfg->host->host_lock);
+       wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+       spin_lock_irq(ioa_cfg->host->host_lock);
+
+       /* If we got hit with a host reset while we were already resetting
+        the adapter for some reason, and the reset failed. */
+       if (ioa_cfg->ioa_is_dead) {
+               ipr_trace;
+               return FAILED;
+       }
+
+       return SUCCESS;
+}
+
+/**
+ * ipr_find_ses_entry - Find matching SES in SES table
+ * @res:       resource entry struct of SES
+ *
+ * Return value:
+ *     pointer to SES table entry / NULL on failure
+ **/
+static const struct ipr_ses_table_entry *
+ipr_find_ses_entry(struct ipr_resource_entry *res)
+{
+       int i, j, matches;
+       const struct ipr_ses_table_entry *ste = ipr_ses_table;
+
+       for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
+               for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
+                       if (ste->compare_product_id_byte[j] == 'X') {
+                               if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
+                                       matches++;
+                               else
+                                       break;
+                       } else
+                               matches++;
+               }
+
+               if (matches == IPR_PROD_ID_LEN)
+                       return ste;
+       }
+
+       return NULL;
+}
+
+/**
+ * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
+ * @ioa_cfg:   ioa config struct
+ * @bus:               SCSI bus
+ * @bus_width: bus width
+ *
+ * Return value:
+ *     SCSI bus speed in units of 100KHz, 1600 is 160 MHz
+ *     For a 2-byte wide SCSI bus, the maximum transfer speed is
+ *     twice the maximum transfer rate (e.g. for a wide enabled bus,
+ *     max 160MHz = max 320MB/sec).
+ **/
+static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
+{
+       struct ipr_resource_entry *res;
+       const struct ipr_ses_table_entry *ste;
+       u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
+
+       /* Loop through each config table entry in the config table buffer */
+       list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+               if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
+                       continue;
+
+               if (bus != res->cfgte.res_addr.bus)
+                       continue;
+
+               if (!(ste = ipr_find_ses_entry(res)))
+                       continue;
+
+               max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
+       }
+
+       return max_xfer_rate;
+}
+
+/**
+ * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
+ * @ioa_cfg:           ioa config struct
+ * @max_delay:         max delay in micro-seconds to wait
+ *
+ * Waits for an IODEBUG ACK from the IOA, doing busy looping.
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
+{
+       volatile u32 pcii_reg;
+       int delay = 1;
+
+       /* Read interrupt reg until IOA signals IO Debug Acknowledge */
+       while (delay < max_delay) {
+               pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+
+               if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
+                       return 0;
+
+               /* udelay cannot be used if delay is more than a few milliseconds */
+               if ((delay / 1000) > MAX_UDELAY_MS)
+                       mdelay(delay / 1000);
+               else
+                       udelay(delay);
+
+               delay += delay;
+       }
+       return -EIO;
+}
+
+/**
+ * ipr_get_ldump_data_section - Dump IOA memory
+ * @ioa_cfg:                   ioa config struct
+ * @start_addr:                        adapter address to dump
+ * @dest:                              destination kernel buffer
+ * @length_in_words:   length to dump in 4 byte words
+ *
+ * Return value:
+ *     0 on success / -EIO on failure
+ **/
+static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
+                                     u32 start_addr,
+                                     u32 *dest, u32 length_in_words)
+{
+       volatile u32 temp_pcii_reg;
+       int i, delay = 0;
+
+       /* Write IOA interrupt reg starting LDUMP state  */
+       writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
+              ioa_cfg->regs.set_uproc_interrupt_reg);
+
+       /* Wait for IO debug acknowledge */
+       if (ipr_wait_iodbg_ack(ioa_cfg,
+                              IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
+               dev_err(&ioa_cfg->pdev->dev,
+                       "IOA dump long data transfer timeout\n");
+               return -EIO;
+       }
+
+       /* Signal LDUMP interlocked - clear IO debug ack */
+       writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
+              ioa_cfg->regs.clr_interrupt_reg);
+
+       /* Write Mailbox with starting address */
+       writel(start_addr, ioa_cfg->ioa_mailbox);
+
+       /* Signal address valid - clear IOA Reset alert */
+       writel(IPR_UPROCI_RESET_ALERT,
+              ioa_cfg->regs.clr_uproc_interrupt_reg);
+
+       for (i = 0; i < length_in_words; i++) {
+               /* Wait for IO debug acknowledge */
+               if (ipr_wait_iodbg_ack(ioa_cfg,
+                                      IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
+                       dev_err(&ioa_cfg->pdev->dev,
+                               "IOA dump short data transfer timeout\n");
+                       return -EIO;
+               }
+
+               /* Read data from mailbox and increment destination pointer */
+               *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
+               dest++;
+
+               /* For all but the last word of data, signal data received */
+               if (i < (length_in_words - 1)) {
+                       /* Signal dump data received - Clear IO debug Ack */
+                       writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
+                              ioa_cfg->regs.clr_interrupt_reg);
+               }
+       }
+
+       /* Signal end of block transfer. Set reset alert then clear IO debug ack */
+       writel(IPR_UPROCI_RESET_ALERT,
+              ioa_cfg->regs.set_uproc_interrupt_reg);
+
+       writel(IPR_UPROCI_IO_DEBUG_ALERT,
+              ioa_cfg->regs.clr_uproc_interrupt_reg);
+
+       /* Signal dump data received - Clear IO debug Ack */
+       writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
+              ioa_cfg->regs.clr_interrupt_reg);
+
+       /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
+       while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
+               temp_pcii_reg =
+                   readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
+
+               if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
+                       return 0;
+
+               udelay(10);
+               delay += 10;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_SCSI_IPR_DUMP
+/**
+ * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
+ * @ioa_cfg:           ioa config struct
+ * @pci_address:       adapter address
+ * @length:                    length of data to copy
+ *
+ * Copy data from PCI adapter to kernel buffer.
+ * Note: length MUST be a 4 byte multiple
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
+                       unsigned long pci_address, u32 length)
+{
+       int bytes_copied = 0;
+       int cur_len, rc, rem_len, rem_page_len;
+       u32 *page;
+       unsigned long lock_flags = 0;
+       struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
+
+       while (bytes_copied < length &&
+              (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
+               if (ioa_dump->page_offset >= PAGE_SIZE ||
+                   ioa_dump->page_offset == 0) {
+                       page = (u32 *)__get_free_page(GFP_ATOMIC);
+
+                       if (!page) {
+                               ipr_trace;
+                               return bytes_copied;
+                       }
+
+                       ioa_dump->page_offset = 0;
+                       ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
+                       ioa_dump->next_page_index++;
+               } else
+                       page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
+
+               rem_len = length - bytes_copied;
+               rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
+               cur_len = min(rem_len, rem_page_len);
+
+               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+               if (ioa_cfg->sdt_state == ABORT_DUMP) {
+                       rc = -EIO;
+               } else {
+                       rc = ipr_get_ldump_data_section(ioa_cfg,
+                                                       pci_address + bytes_copied,
+                                                       &page[ioa_dump->page_offset / 4],
+                                                       (cur_len / sizeof(u32)));
+               }
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+               if (!rc) {
+                       ioa_dump->page_offset += cur_len;
+                       bytes_copied += cur_len;
+               } else {
+                       ipr_trace;
+                       break;
+               }
+               schedule();
+       }
+
+       return bytes_copied;
+}
+
+/**
+ * ipr_init_dump_entry_hdr - Initialize a dump entry header.
+ * @hdr:       dump entry header struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
+{
+       hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
+       hdr->num_elems = 1;
+       hdr->offset = sizeof(*hdr);
+       hdr->status = IPR_DUMP_STATUS_SUCCESS;
+}
+
+/**
+ * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
+ * @ioa_cfg:   ioa config struct
+ * @driver_dump:       driver dump struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
+                                  struct ipr_driver_dump *driver_dump)
+{
+       struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
+
+       ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
+       driver_dump->ioa_type_entry.hdr.len =
+               sizeof(struct ipr_dump_ioa_type_entry) -
+               sizeof(struct ipr_dump_entry_header);
+       driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
+       driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
+       driver_dump->ioa_type_entry.type = ioa_cfg->type;
+       driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
+               (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
+               ucode_vpd->minor_release[1];
+       driver_dump->hdr.num_entries++;
+}
+
+/**
+ * ipr_dump_version_data - Fill in the driver version in the dump.
+ * @ioa_cfg:   ioa config struct
+ * @driver_dump:       driver dump struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
+                                 struct ipr_driver_dump *driver_dump)
+{
+       ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
+       driver_dump->version_entry.hdr.len =
+               sizeof(struct ipr_dump_version_entry) -
+               sizeof(struct ipr_dump_entry_header);
+       driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
+       driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
+       strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
+       driver_dump->hdr.num_entries++;
+}
+
+/**
+ * ipr_dump_trace_data - Fill in the IOA trace in the dump.
+ * @ioa_cfg:   ioa config struct
+ * @driver_dump:       driver dump struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
+                                  struct ipr_driver_dump *driver_dump)
+{
+       ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
+       driver_dump->trace_entry.hdr.len =
+               sizeof(struct ipr_dump_trace_entry) -
+               sizeof(struct ipr_dump_entry_header);
+       driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
+       driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
+       memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
+       driver_dump->hdr.num_entries++;
+}
+
+/**
+ * ipr_dump_location_data - Fill in the IOA location in the dump.
+ * @ioa_cfg:   ioa config struct
+ * @driver_dump:       driver dump struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
+                                  struct ipr_driver_dump *driver_dump)
+{
+       ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
+       driver_dump->location_entry.hdr.len =
+               sizeof(struct ipr_dump_location_entry) -
+               sizeof(struct ipr_dump_entry_header);
+       driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
+       driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
+       strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
+       driver_dump->hdr.num_entries++;
+}
+
+/**
+ * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
+ * @ioa_cfg:   ioa config struct
+ * @dump:              dump struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
+{
+       unsigned long start_addr, sdt_word;
+       unsigned long lock_flags = 0;
+       struct ipr_driver_dump *driver_dump = &dump->driver_dump;
+       struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
+       u32 num_entries, start_off, end_off;
+       u32 bytes_to_copy, bytes_copied, rc;
+       struct ipr_sdt *sdt;
+       int i;
+
+       ENTER;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+       if (ioa_cfg->sdt_state != GET_DUMP) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return;
+       }
+
+       start_addr = readl(ioa_cfg->ioa_mailbox);
+
+       if (!ipr_sdt_is_fmt2(start_addr)) {
+               dev_err(&ioa_cfg->pdev->dev,
+                       "Invalid dump table format: %lx\n", start_addr);
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return;
+       }
+
+       dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
+
+       driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
+
+       /* Initialize the overall dump header */
+       driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
+       driver_dump->hdr.num_entries = 1;
+       driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
+       driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
+       driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
+       driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
+
+       ipr_dump_version_data(ioa_cfg, driver_dump);
+       ipr_dump_location_data(ioa_cfg, driver_dump);
+       ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
+       ipr_dump_trace_data(ioa_cfg, driver_dump);
+
+       /* Update dump_header */
+       driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
+
+       /* IOA Dump entry */
+       ipr_init_dump_entry_hdr(&ioa_dump->hdr);
+       ioa_dump->format = IPR_SDT_FMT2;
+       ioa_dump->hdr.len = 0;
+       ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
+       ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
+
+       /* First entries in sdt are actually a list of dump addresses and
+        lengths to gather the real dump data.  sdt represents the pointer
+        to the ioa generated dump table.  Dump data will be extracted based
+        on entries in this table */
+       sdt = &ioa_dump->sdt;
+
+       rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (u32 *)sdt,
+                                       sizeof(struct ipr_sdt) / sizeof(u32));
+
+       /* Smart Dump table is ready to use and the first entry is valid */
+       if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
+               dev_err(&ioa_cfg->pdev->dev,
+                       "Dump of IOA failed. Dump table not valid: %d, %X.\n",
+                       rc, be32_to_cpu(sdt->hdr.state));
+               driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
+               ioa_cfg->sdt_state = DUMP_OBTAINED;
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return;
+       }
+
+       num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
+
+       if (num_entries > IPR_NUM_SDT_ENTRIES)
+               num_entries = IPR_NUM_SDT_ENTRIES;
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+       for (i = 0; i < num_entries; i++) {
+               if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
+                       driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
+                       break;
+               }
+
+               if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
+                       sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
+                       start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
+                       end_off = be32_to_cpu(sdt->entry[i].end_offset);
+
+                       if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
+                               bytes_to_copy = end_off - start_off;
+                               if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
+                                       sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
+                                       continue;
+                               }
+
+                               /* Copy data from adapter to driver buffers */
+                               bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
+                                                           bytes_to_copy);
+
+                               ioa_dump->hdr.len += bytes_copied;
+
+                               if (bytes_copied != bytes_to_copy) {
+                                       driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
+
+       /* Update dump_header */
+       driver_dump->hdr.len += ioa_dump->hdr.len;
+       wmb();
+       ioa_cfg->sdt_state = DUMP_OBTAINED;
+       LEAVE;
+}
+
+#else
+#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
+#endif
+
+/**
+ * ipr_worker_thread - Worker thread
+ * @data:              ioa config struct
+ *
+ * Called at task level from a work thread. This function takes care
+ * of adding and removing device from the mid-layer as configuration
+ * changes are detected by the adapter.
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_worker_thread(void *data)
+{
+       unsigned long lock_flags;
+       struct ipr_resource_entry *res;
+       struct scsi_device *sdev;
+       struct ipr_dump *dump;
+       struct ipr_ioa_cfg *ioa_cfg = data;
+       u8 bus, target, lun;
+       int did_work;
+
+       ENTER;
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+       if (ioa_cfg->sdt_state == GET_DUMP) {
+               dump = ioa_cfg->dump;
+               if (!dump || !kobject_get(&dump->kobj)) {
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                       return;
+               }
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               ipr_get_ioa_dump(ioa_cfg, dump);
+               kobject_put(&dump->kobj);
+
+               spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+               if (ioa_cfg->sdt_state == DUMP_OBTAINED)
+                       ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return;
+       }
+
+restart:
+       do {
+               did_work = 0;
+               if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                       return;
+               }
+
+               list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+                       if (res->del_from_ml && res->sdev) {
+                               did_work = 1;
+                               sdev = res->sdev;
+                               if (!scsi_device_get(sdev)) {
+                                       res->sdev = NULL;
+                                       list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+                                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                                       scsi_remove_device(sdev);
+                                       scsi_device_put(sdev);
+                                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+                               }
+                               break;
+                       }
+               }
+       } while(did_work);
+
+       list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+               if (res->add_to_ml) {
+                       bus = res->cfgte.res_addr.bus;
+                       target = res->cfgte.res_addr.target;
+                       lun = res->cfgte.res_addr.lun;
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                       scsi_add_device(ioa_cfg->host, bus, target, lun);
+                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+                       goto restart;
+               }
+       }
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       LEAVE;
+}
+
+#ifdef CONFIG_SCSI_IPR_TRACE
+/**
+ * ipr_read_trace - Dump the adapter trace
+ * @kobj:              kobject struct
+ * @buf:               buffer
+ * @off:               offset
+ * @count:             buffer size
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
+                             loff_t off, size_t count)
+{
+       struct class_device *cdev = container_of(kobj,struct class_device,kobj);
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       unsigned long lock_flags = 0;
+       int size = IPR_TRACE_SIZE;
+       char *src = (char *)ioa_cfg->trace;
+
+       if (off > size)
+               return 0;
+       if (off + count > size) {
+               size -= off;
+               count = size;
+       }
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       memcpy(buf, &src[off], count);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       return count;
+}
+
+static struct bin_attribute ipr_trace_attr = {
+       .attr = {
+               .name = "trace",
+               .mode = S_IRUGO,
+       },
+       .size = 0,
+       .read = ipr_read_trace,
+};
+#endif
+
+/**
+ * ipr_show_fw_version - Show the firmware version
+ * @class_dev: class device struct
+ * @buf:               buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(class_dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
+       unsigned long lock_flags = 0;
+       int len;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
+                      ucode_vpd->major_release, ucode_vpd->card_type,
+                      ucode_vpd->minor_release[0],
+                      ucode_vpd->minor_release[1]);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       return len;
+}
+
+static struct class_device_attribute ipr_fw_version_attr = {
+       .attr = {
+               .name =         "fw_version",
+               .mode =         S_IRUGO,
+       },
+       .show = ipr_show_fw_version,
+};
+
+/**
+ * ipr_show_log_level - Show the adapter's error logging level
+ * @class_dev: class device struct
+ * @buf:               buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(class_dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       unsigned long lock_flags = 0;
+       int len;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       return len;
+}
+
+/**
+ * ipr_store_log_level - Change the adapter's error logging level
+ * @class_dev: class device struct
+ * @buf:               buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_log_level(struct class_device *class_dev,
+                                  const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(class_dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       unsigned long lock_flags = 0;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       return strlen(buf);
+}
+
+static struct class_device_attribute ipr_log_level_attr = {
+       .attr = {
+               .name =         "log_level",
+               .mode =         S_IRUGO | S_IWUSR,
+       },
+       .show = ipr_show_log_level,
+       .store = ipr_store_log_level
+};
+
+/**
+ * ipr_store_diagnostics - IOA Diagnostics interface
+ * @class_dev: class_device struct
+ * @buf:               buffer
+ * @count:             buffer size
+ *
+ * This function will reset the adapter and wait a reasonable
+ * amount of time for any errors that the adapter might log.
+ *
+ * Return value:
+ *     count on success / other on failure
+ **/
+static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
+                                    const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(class_dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       unsigned long lock_flags = 0;
+       int rc = count;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       ioa_cfg->errors_logged = 0;
+       ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+
+       if (ioa_cfg->in_reset_reload) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+
+               /* Wait for a second for any errors to be logged */
+               schedule_timeout(HZ);
+       } else {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return -EIO;
+       }
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
+               rc = -EIO;
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+       return rc;
+}
+
+static struct class_device_attribute ipr_diagnostics_attr = {
+       .attr = {
+               .name =         "run_diagnostics",
+               .mode =         S_IWUSR,
+       },
+       .store = ipr_store_diagnostics
+};
+
+/**
+ * ipr_store_reset_adapter - Reset the adapter
+ * @class_dev: class_device struct
+ * @buf:               buffer
+ * @count:             buffer size
+ *
+ * This function will reset the adapter.
+ *
+ * Return value:
+ *     count on success / other on failure
+ **/
+static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
+                                      const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(class_dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       unsigned long lock_flags;
+       int result = count;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       if (!ioa_cfg->in_reset_reload)
+               ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+
+       return result;
+}
+
+static struct class_device_attribute ipr_ioa_reset_attr = {
+       .attr = {
+               .name =         "reset_host",
+               .mode =         S_IWUSR,
+       },
+       .store = ipr_store_reset_adapter
+};
+
+/**
+ * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
+ * @buf_len:           buffer length
+ *
+ * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
+ * list to use for microcode download
+ *
+ * Return value:
+ *     pointer to sglist / NULL on failure
+ **/
+static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
+{
+       int sg_size, order, bsize_elem, num_elem, i, j;
+       struct ipr_sglist *sglist;
+       struct scatterlist *scatterlist;
+       struct page *page;
+
+       /* Get the minimum size per scatter/gather element */
+       sg_size = buf_len / (IPR_MAX_SGLIST - 1);
+
+       /* Get the actual size per element */
+       order = get_order(sg_size);
+
+       /* Determine the actual number of bytes per element */
+       bsize_elem = PAGE_SIZE * (1 << order);
+
+       /* Determine the actual number of sg entries needed */
+       if (buf_len % bsize_elem)
+               num_elem = (buf_len / bsize_elem) + 1;
+       else
+               num_elem = buf_len / bsize_elem;
+
+       /* Allocate a scatter/gather list for the DMA */
+       sglist = kmalloc(sizeof(struct ipr_sglist) +
+                        (sizeof(struct scatterlist) * (num_elem - 1)),
+                        GFP_KERNEL);
+
+       if (sglist == NULL) {
+               ipr_trace;
+               return NULL;
+       }
+
+       memset(sglist, 0, sizeof(struct ipr_sglist) +
+              (sizeof(struct scatterlist) * (num_elem - 1)));
+
+       scatterlist = sglist->scatterlist;
+
+       sglist->order = order;
+       sglist->num_sg = num_elem;
+
+       /* Allocate a bunch of sg elements */
+       for (i = 0; i < num_elem; i++) {
+               page = alloc_pages(GFP_KERNEL, order);
+               if (!page) {
+                       ipr_trace;
+
+                       /* Free up what we already allocated */
+                       for (j = i - 1; j >= 0; j--)
+                               __free_pages(scatterlist[j].page, order);
+                       kfree(sglist);
+                       return NULL;
+               }
+
+               scatterlist[i].page = page;
+       }
+
+       return sglist;
+}
+
+/**
+ * ipr_free_ucode_buffer - Frees a microcode download buffer
+ * @p_dnld:            scatter/gather list pointer
+ *
+ * Free a DMA'able ucode download buffer previously allocated with
+ * ipr_alloc_ucode_buffer
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
+{
+       int i;
+
+       for (i = 0; i < sglist->num_sg; i++)
+               __free_pages(sglist->scatterlist[i].page, sglist->order);
+
+       kfree(sglist);
+}
+
+/**
+ * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
+ * @sglist:            scatter/gather list pointer
+ * @buffer:            buffer pointer
+ * @len:               buffer length
+ *
+ * Copy a microcode image from a user buffer into a buffer allocated by
+ * ipr_alloc_ucode_buffer
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
+                                u8 *buffer, u32 len)
+{
+       int bsize_elem, i, result = 0;
+       struct scatterlist *scatterlist;
+       void *kaddr;
+
+       /* Determine the actual number of bytes per element */
+       bsize_elem = PAGE_SIZE * (1 << sglist->order);
+
+       scatterlist = sglist->scatterlist;
+
+       for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
+               kaddr = kmap(scatterlist[i].page);
+               memcpy(kaddr, buffer, bsize_elem);
+               kunmap(scatterlist[i].page);
+
+               scatterlist[i].length = bsize_elem;
+
+               if (result != 0) {
+                       ipr_trace;
+                       return result;
+               }
+       }
+
+       if (len % bsize_elem) {
+               kaddr = kmap(scatterlist[i].page);
+               memcpy(kaddr, buffer, len % bsize_elem);
+               kunmap(scatterlist[i].page);
+
+               scatterlist[i].length = len % bsize_elem;
+       }
+
+       sglist->buffer_len = len;
+       return result;
+}
+
+/**
+ * ipr_map_ucode_buffer - Map a microcode download buffer
+ * @ipr_cmd:   ipr command struct
+ * @sglist:            scatter/gather list
+ * @len:               total length of download buffer
+ *
+ * Maps a microcode download scatter/gather list for DMA and
+ * builds the IOADL.
+ *
+ * Return value:
+ *     0 on success / -EIO on failure
+ **/
+static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd,
+                               struct ipr_sglist *sglist, int len)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+       struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+       struct scatterlist *scatterlist = sglist->scatterlist;
+       int i;
+
+       ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist,
+                                        sglist->num_sg, DMA_TO_DEVICE);
+
+       ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+       ioarcb->write_data_transfer_length = cpu_to_be32(len);
+       ioarcb->write_ioadl_len =
+               cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+
+       for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+               ioadl[i].flags_and_data_len =
+                       cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
+               ioadl[i].address =
+                       cpu_to_be32(sg_dma_address(&scatterlist[i]));
+       }
+
+       if (likely(ipr_cmd->dma_use_sg)) {
+               ioadl[i-1].flags_and_data_len |=
+                       cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+       }
+       else {
+               dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/**
+ * ipr_store_update_fw - Update the firmware on the adapter
+ * @class_dev: class_device struct
+ * @buf:               buffer
+ * @count:             buffer size
+ *
+ * This function will update the firmware on the adapter.
+ *
+ * Return value:
+ *     count on success / other on failure
+ **/
+static ssize_t ipr_store_update_fw(struct class_device *class_dev,
+                                      const char *buf, size_t count)
+{
+       struct Scsi_Host *shost = class_to_shost(class_dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       struct ipr_ucode_image_header *image_hdr;
+       const struct firmware *fw_entry;
+       struct ipr_sglist *sglist;
+       unsigned long lock_flags;
+       char fname[100];
+       char *src;
+       int len, result, dnld_size;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       len = snprintf(fname, 99, "%s", buf);
+       fname[len-1] = '\0';
+
+       if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
+               dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
+               return -EIO;
+       }
+
+       image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
+
+       if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
+           (ioa_cfg->vpd_cbs->page3_data.card_type &&
+            ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
+               dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
+               release_firmware(fw_entry);
+               return -EINVAL;
+       }
+
+       src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
+       dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
+       sglist = ipr_alloc_ucode_buffer(dnld_size);
+
+       if (!sglist) {
+               dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
+               release_firmware(fw_entry);
+               return -ENOMEM;
+       }
+
+       result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
+
+       if (result) {
+               dev_err(&ioa_cfg->pdev->dev,
+                       "Microcode buffer copy to DMA buffer failed\n");
+               ipr_free_ucode_buffer(sglist);
+               release_firmware(fw_entry);
+               return result;
+       }
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+       if (ioa_cfg->ucode_sglist) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               dev_err(&ioa_cfg->pdev->dev,
+                       "Microcode download already in progress\n");
+               ipr_free_ucode_buffer(sglist);
+               release_firmware(fw_entry);
+               return -EIO;
+       }
+
+       ioa_cfg->ucode_sglist = sglist;
+       ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       ioa_cfg->ucode_sglist = NULL;
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+       ipr_free_ucode_buffer(sglist);
+       release_firmware(fw_entry);
+
+       return count;
+}
+
+static struct class_device_attribute ipr_update_fw_attr = {
+       .attr = {
+               .name =         "update_fw",
+               .mode =         S_IWUSR,
+       },
+       .store = ipr_store_update_fw
+};
+
+static struct class_device_attribute *ipr_ioa_attrs[] = {
+       &ipr_fw_version_attr,
+       &ipr_log_level_attr,
+       &ipr_diagnostics_attr,
+       &ipr_ioa_reset_attr,
+       &ipr_update_fw_attr,
+       NULL,
+};
+
+#ifdef CONFIG_SCSI_IPR_DUMP
+/**
+ * ipr_read_dump - Dump the adapter
+ * @kobj:              kobject struct
+ * @buf:               buffer
+ * @off:               offset
+ * @count:             buffer size
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
+                             loff_t off, size_t count)
+{
+       struct class_device *cdev = container_of(kobj,struct class_device,kobj);
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       struct ipr_dump *dump;
+       unsigned long lock_flags = 0;
+       char *src;
+       int len;
+       size_t rc = count;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       dump = ioa_cfg->dump;
+
+       if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump || !kobject_get(&dump->kobj)) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return 0;
+       }
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+       if (off > dump->driver_dump.hdr.len) {
+               kobject_put(&dump->kobj);
+               return 0;
+       }
+
+       if (off + count > dump->driver_dump.hdr.len) {
+               count = dump->driver_dump.hdr.len - off;
+               rc = count;
+       }
+
+       if (count && off < sizeof(dump->driver_dump)) {
+               if (off + count > sizeof(dump->driver_dump))
+                       len = sizeof(dump->driver_dump) - off;
+               else
+                       len = count;
+               src = (u8 *)&dump->driver_dump + off;
+               memcpy(buf, src, len);
+               buf += len;
+               off += len;
+               count -= len;
+       }
+
+       off -= sizeof(dump->driver_dump);
+
+       if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
+               if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
+                       len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
+               else
+                       len = count;
+               src = (u8 *)&dump->ioa_dump + off;
+               memcpy(buf, src, len);
+               buf += len;
+               off += len;
+               count -= len;
+       }
+
+       off -= offsetof(struct ipr_ioa_dump, ioa_data);
+
+       while (count) {
+               if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
+                       len = PAGE_ALIGN(off) - off;
+               else
+                       len = count;
+               src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
+               src += off & ~PAGE_MASK;
+               memcpy(buf, src, len);
+               buf += len;
+               off += len;
+               count -= len;
+       }
+
+       kobject_put(&dump->kobj);
+       return rc;
+}
+
+/**
+ * ipr_release_dump - Free adapter dump memory
+ * @kobj:      kobject struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_release_dump(struct kobject *kobj)
+{
+       struct ipr_dump *dump = container_of(kobj,struct ipr_dump,kobj);
+       struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
+       unsigned long lock_flags = 0;
+       int i;
+
+       ENTER;
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       ioa_cfg->dump = NULL;
+       ioa_cfg->sdt_state = INACTIVE;
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+       for (i = 0; i < dump->ioa_dump.next_page_index; i++)
+               free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
+
+       kfree(dump);
+       LEAVE;
+}
+
+static struct kobj_type ipr_dump_kobj_type = {
+       .release = ipr_release_dump,
+};
+
+/**
+ * ipr_alloc_dump - Prepare for adapter dump
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
+{
+       struct ipr_dump *dump;
+       unsigned long lock_flags = 0;
+
+       ENTER;
+       dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL);
+
+       if (!dump) {
+               ipr_err("Dump memory allocation failed\n");
+               return -ENOMEM;
+       }
+
+       memset(dump, 0, sizeof(struct ipr_dump));
+       kobject_init(&dump->kobj);
+       dump->kobj.ktype = &ipr_dump_kobj_type;
+       dump->ioa_cfg = ioa_cfg;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+       if (INACTIVE != ioa_cfg->sdt_state) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               kfree(dump);
+               return 0;
+       }
+
+       ioa_cfg->dump = dump;
+       ioa_cfg->sdt_state = WAIT_FOR_DUMP;
+       if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
+               ioa_cfg->dump_taken = 1;
+               schedule_work(&ioa_cfg->work_q);
+       }
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+       LEAVE;
+       return 0;
+}
+
+/**
+ * ipr_free_dump - Free adapter dump memory
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     0 on success / other on failure
+ **/
+static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
+{
+       struct ipr_dump *dump;
+       unsigned long lock_flags = 0;
+
+       ENTER;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       dump = ioa_cfg->dump;
+       if (!dump) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return 0;
+       }
+
+       ioa_cfg->dump = NULL;
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+       kobject_put(&dump->kobj);
+
+       LEAVE;
+       return 0;
+}
+
+/**
+ * ipr_write_dump - Setup dump state of adapter
+ * @kobj:              kobject struct
+ * @buf:               buffer
+ * @off:               offset
+ * @count:             buffer size
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
+                             loff_t off, size_t count)
+{
+       struct class_device *cdev = container_of(kobj,struct class_device,kobj);
+       struct Scsi_Host *shost = class_to_shost(cdev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+       int rc;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       if (buf[0] == '1')
+               rc = ipr_alloc_dump(ioa_cfg);
+       else if (buf[0] == '0')
+               rc = ipr_free_dump(ioa_cfg);
+       else
+               return -EINVAL;
+
+       if (rc)
+               return rc;
+       else
+               return count;
+}
+
+static struct bin_attribute ipr_dump_attr = {
+       .attr = {
+               .name = "dump",
+               .mode = S_IRUSR | S_IWUSR,
+       },
+       .size = 0,
+       .read = ipr_read_dump,
+       .write = ipr_write_dump
+};
+#else
+static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
+#endif
+
+/**
+ * ipr_store_queue_depth - Change the device's queue depth
+ * @dev:       device struct
+ * @buf:       buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_queue_depth(struct device *dev,
+                                   const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+       struct ipr_resource_entry *res;
+       int qdepth = simple_strtoul(buf, NULL, 10);
+       int tagged = 0;
+       unsigned long lock_flags = 0;
+       ssize_t len = -ENXIO;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       res = (struct ipr_resource_entry *)sdev->hostdata;
+       if (res) {
+               res->qdepth = qdepth;
+
+               if (ipr_is_gscsi(res) && res->tcq_active)
+                       tagged = MSG_ORDERED_TAG;
+
+               len = strlen(buf);
+       }
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       scsi_adjust_queue_depth(sdev, tagged, qdepth);
+       return len;
+}
+
+static struct device_attribute ipr_queue_depth_attr = {
+       .attr = {
+               .name =         "queue_depth",
+               .mode =         S_IRUSR | S_IWUSR,
+       },
+       .store = ipr_store_queue_depth
+};
+
+/**
+ * ipr_show_tcq_enable - Show if the device is enabled for tcqing
+ * @dev:       device struct
+ * @buf:       buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_tcq_enable(struct device *dev, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+       struct ipr_resource_entry *res;
+       unsigned long lock_flags = 0;
+       ssize_t len = -ENXIO;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       res = (struct ipr_resource_entry *)sdev->hostdata;
+       if (res)
+               len = snprintf(buf, PAGE_SIZE, "%d\n", res->tcq_active);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       return len;
+}
+
+/**
+ * ipr_store_tcq_enable - Change the device's TCQing state
+ * @dev:       device struct
+ * @buf:       buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_tcq_enable(struct device *dev,
+                                   const char *buf, size_t count)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+       struct ipr_resource_entry *res;
+       unsigned long lock_flags = 0;
+       int tcq_active = simple_strtoul(buf, NULL, 10);
+       int qdepth = IPR_MAX_CMD_PER_LUN;
+       int tagged = 0;
+       ssize_t len = -ENXIO;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+       res = (struct ipr_resource_entry *)sdev->hostdata;
+
+       if (res) {
+               res->tcq_active = 0;
+               qdepth = res->qdepth;
+
+               if (ipr_is_gscsi(res) && sdev->tagged_supported) {
+                       if (tcq_active) {
+                               tagged = MSG_ORDERED_TAG;
+                               res->tcq_active = 1;
+                       }
+
+                       len = strlen(buf);
+               } else if (tcq_active) {
+                       len = -EINVAL;
+               }
+       }
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       scsi_adjust_queue_depth(sdev, tagged, qdepth);
+       return len;
+}
+
+static struct device_attribute ipr_tcqing_attr = {
+       .attr = {
+               .name =         "tcq_enable",
+               .mode =         S_IRUSR | S_IWUSR,
+       },
+       .store = ipr_store_tcq_enable,
+       .show = ipr_show_tcq_enable
+};
+
+/**
+ * ipr_show_adapter_handle - Show the adapter's resource handle for this device
+ * @dev:       device struct
+ * @buf:       buffer
+ *
+ * Return value:
+ *     number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_adapter_handle(struct device *dev, char *buf)
+{
+       struct scsi_device *sdev = to_scsi_device(dev);
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+       struct ipr_resource_entry *res;
+       unsigned long lock_flags = 0;
+       ssize_t len = -ENXIO;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       res = (struct ipr_resource_entry *)sdev->hostdata;
+       if (res)
+               len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       return len;
+}
+
+static struct device_attribute ipr_adapter_handle_attr = {
+       .attr = {
+               .name =         "adapter_handle",
+               .mode =         S_IRUSR,
+       },
+       .show = ipr_show_adapter_handle
+};
+
+static struct device_attribute *ipr_dev_attrs[] = {
+       &ipr_queue_depth_attr,
+       &ipr_tcqing_attr,
+       &ipr_adapter_handle_attr,
+       NULL,
+};
+
+/**
+ * ipr_biosparam - Return the HSC mapping
+ * @sdev:                      scsi device struct
+ * @block_device:      block device pointer
+ * @capacity:          capacity of the device
+ * @parm:                      Array containing returned HSC values.
+ *
+ * This function generates the HSC parms that fdisk uses.
+ * We want to make sure we return something that places partitions
+ * on 4k boundaries for best performance with the IOA.
+ *
+ * Return value:
+ *     0 on success
+ **/
+static int ipr_biosparam(struct scsi_device *sdev,
+                        struct block_device *block_device,
+                        sector_t capacity, int *parm)
+{
+       int heads, sectors, cylinders;
+
+       heads = 128;
+       sectors = 32;
+
+       cylinders = capacity;
+       sector_div(cylinders, (128 * 32));
+
+       /* return result */
+       parm[0] = heads;
+       parm[1] = sectors;
+       parm[2] = cylinders;
+
+       return 0;
+}
+
+/**
+ * ipr_slave_destroy - Unconfigure a SCSI device
+ * @sdev:      scsi device struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_slave_destroy(struct scsi_device *sdev)
+{
+       struct ipr_resource_entry *res;
+       struct ipr_ioa_cfg *ioa_cfg;
+       unsigned long lock_flags = 0;
+
+       ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       res = (struct ipr_resource_entry *) sdev->hostdata;
+       if (res) {
+               sdev->hostdata = NULL;
+               res->sdev = NULL;
+       }
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+}
+
+/**
+ * ipr_slave_configure - Configure a SCSI device
+ * @sdev:      scsi device struct
+ *
+ * This function configures the specified scsi device.
+ *
+ * Return value:
+ *     0 on success
+ **/
+static int ipr_slave_configure(struct scsi_device *sdev)
+{
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
+       struct ipr_resource_entry *res;
+       unsigned long lock_flags = 0;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       res = sdev->hostdata;
+       if (res) {
+               if (ipr_is_af_dasd_device(res))
+                       sdev->type = TYPE_RAID;
+               if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res))
+                       sdev->scsi_level = 4;
+               if (ipr_is_vset_device(res))
+                       sdev->timeout = IPR_VSET_RW_TIMEOUT;
+
+               sdev->allow_restart = 1;
+               scsi_adjust_queue_depth(sdev, 0, res->qdepth);
+       }
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       return 0;
+}
+
+/**
+ * ipr_slave_alloc - Prepare for commands to a device.
+ * @sdev:      scsi device struct
+ *
+ * This function saves a pointer to the resource entry
+ * in the scsi device struct if the device exists. We
+ * can then use this pointer in ipr_queuecommand when
+ * handling new commands.
+ *
+ * Return value:
+ *     0 on success
+ **/
+static int ipr_slave_alloc(struct scsi_device *sdev)
+{
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
+       struct ipr_resource_entry *res;
+       unsigned long lock_flags;
+
+       sdev->hostdata = NULL;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+       list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+               if ((res->cfgte.res_addr.bus == sdev->channel) &&
+                   (res->cfgte.res_addr.target == sdev->id) &&
+                   (res->cfgte.res_addr.lun == sdev->lun)) {
+                       res->sdev = sdev;
+                       res->add_to_ml = 0;
+                       sdev->hostdata = res;
+                       res->needs_sync_complete = 1;
+                       break;
+               }
+       }
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+       return 0;
+}
+
+/**
+ * ipr_eh_host_reset - Reset the host adapter
+ * @scsi_cmd:  scsi command struct
+ *
+ * Return value:
+ *     SUCCESS / FAILED
+ **/
+static int ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg;
+       int rc;
+
+       ENTER;
+       ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+
+       dev_err(&ioa_cfg->pdev->dev,
+               "Adapter being reset as a result of error recovery.\n");
+
+       if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+               ioa_cfg->sdt_state = GET_DUMP;
+
+       rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
+
+       LEAVE;
+       return rc;
+}
+
+/**
+ * ipr_eh_dev_reset - Reset the device
+ * @scsi_cmd:  scsi command struct
+ *
+ * This function issues a device reset to the affected device.
+ * A LUN reset will be sent to the device first. If that does
+ * not work, a target reset will be sent.
+ *
+ * Return value:
+ *     SUCCESS / FAILED
+ **/
+static int ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
+{
+       struct ipr_cmnd *ipr_cmd;
+       struct ipr_ioa_cfg *ioa_cfg;
+       struct ipr_resource_entry *res;
+       struct ipr_cmd_pkt *cmd_pkt;
+       u32 ioasc;
+
+       ENTER;
+       ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+       res = scsi_cmd->device->hostdata;
+
+       if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
+               return FAILED;
+
+       /*
+        * If we are currently going through reset/reload, return failed. This will force the
+        * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
+        * reset to complete
+        */
+       if (ioa_cfg->in_reset_reload)
+               return FAILED;
+       if (ioa_cfg->ioa_is_dead)
+               return FAILED;
+
+       list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
+               if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
+                       if (ipr_cmd->scsi_cmd)
+                               ipr_cmd->done = ipr_scsi_eh_done;
+               }
+       }
+
+       res->resetting_device = 1;
+
+       ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+
+       ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
+       cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+       cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+       cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
+
+       ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
+       ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
+
+       ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+       res->resetting_device = 0;
+
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+
+       LEAVE;
+       return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
+}
+
+/**
+ * ipr_bus_reset_done - Op done function for bus reset.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function is the op done function for a bus reset
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_resource_entry *res;
+
+       ENTER;
+       list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+               if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
+                           sizeof(res->cfgte.res_handle))) {
+                       scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
+                       break;
+               }
+       }
+
+       /*
+        * If abort has not completed, indicate the reset has, else call the
+        * abort's done function to wake the sleeping eh thread
+        */
+       if (ipr_cmd->u.sibling->u.sibling)
+               ipr_cmd->u.sibling->u.sibling = NULL;
+       else
+               ipr_cmd->u.sibling->done(ipr_cmd->u.sibling);
+
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       LEAVE;
+}
+
+/**
+ * ipr_abort_timeout - An abort task has timed out
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function handles when an abort task times out. If this
+ * happens we issue a bus reset since we have resources tied
+ * up that must be freed before returning to the midlayer.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_cmnd *reset_cmd;
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_cmd_pkt *cmd_pkt;
+       unsigned long lock_flags = 0;
+
+       ENTER;
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return;
+       }
+
+       ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
+       reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+       ipr_cmd->u.sibling = reset_cmd;
+       reset_cmd->u.sibling = ipr_cmd;
+       reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
+       cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
+       cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+       cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
+       cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
+
+       ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       LEAVE;
+}
+
+/**
+ * ipr_cancel_op - Cancel specified op
+ * @scsi_cmd:  scsi command struct
+ *
+ * This function cancels specified op.
+ *
+ * Return value:
+ *     SUCCESS / FAILED
+ **/
+static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
+{
+       struct ipr_cmnd *ipr_cmd;
+       struct ipr_ioa_cfg *ioa_cfg;
+       struct ipr_resource_entry *res;
+       struct ipr_cmd_pkt *cmd_pkt;
+       u32 ioasc, ioarcb_addr;
+       int op_found = 0;
+
+       ENTER;
+       ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
+       res = scsi_cmd->device->hostdata;
+
+       if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
+               return FAILED;
+
+       list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
+               if (ipr_cmd->scsi_cmd == scsi_cmd) {
+                       ipr_cmd->done = ipr_scsi_eh_done;
+                       op_found = 1;
+                       break;
+               }
+       }
+
+       if (!op_found)
+               return SUCCESS;
+
+       ioarcb_addr = be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr);
+
+       ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+       ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
+       cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+       cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+       cmd_pkt->cdb[0] = IPR_ABORT_TASK;
+       cmd_pkt->cdb[2] = (ioarcb_addr >> 24) & 0xff;
+       cmd_pkt->cdb[3] = (ioarcb_addr >> 16) & 0xff;
+       cmd_pkt->cdb[4] = (ioarcb_addr >> 8) & 0xff;
+       cmd_pkt->cdb[5] = ioarcb_addr & 0xff;
+       ipr_cmd->u.sdev = scsi_cmd->device;
+
+       ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
+       ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_ABORT_TASK_TIMEOUT);
+       ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+       /*
+        * If the abort task timed out and we sent a bus reset, we will get
+        * one the following responses to the abort
+        */
+       if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
+               ioasc = 0;
+               ipr_trace;
+       }
+
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       res->needs_sync_complete = 1;
+
+       LEAVE;
+       return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
+}
+
+/**
+ * ipr_eh_abort - Abort a single op
+ * @scsi_cmd:  scsi command struct
+ *
+ * Return value:
+ *     SUCCESS / FAILED
+ **/
+static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg;
+
+       ENTER;
+       ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+
+       /* If we are currently going through reset/reload, return failed. This will force the
+          mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
+          reset to complete */
+       if (ioa_cfg->in_reset_reload)
+               return FAILED;
+       if (ioa_cfg->ioa_is_dead)
+               return FAILED;
+       if (!scsi_cmd->device->hostdata)
+               return FAILED;
+
+       LEAVE;
+       return ipr_cancel_op(scsi_cmd);
+}
+
+/**
+ * ipr_handle_other_interrupt - Handle "other" interrupts
+ * @ioa_cfg:   ioa config struct
+ * @int_reg:   interrupt register
+ *
+ * Return value:
+ *     IRQ_NONE / IRQ_HANDLED
+ **/
+static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
+                                             volatile u32 int_reg)
+{
+       irqreturn_t rc = IRQ_HANDLED;
+
+       if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
+               /* Mask the interrupt */
+               writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
+
+               /* Clear the interrupt */
+               writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
+               int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+
+               list_del(&ioa_cfg->reset_cmd->queue);
+               del_timer(&ioa_cfg->reset_cmd->timer);
+               ipr_reset_ioa_job(ioa_cfg->reset_cmd);
+       } else {
+               if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
+                       ioa_cfg->ioa_unit_checked = 1;
+               else
+                       dev_err(&ioa_cfg->pdev->dev,
+                               "Permanent IOA failure. 0x%08X\n", int_reg);
+
+               if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+                       ioa_cfg->sdt_state = GET_DUMP;
+
+               ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
+               ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+       }
+
+       return rc;
+}
+
+/**
+ * ipr_isr - Interrupt service routine
+ * @irq:       irq number
+ * @devp:      pointer to ioa config struct
+ * @regs:      pt_regs struct
+ *
+ * Return value:
+ *     IRQ_NONE / IRQ_HANDLED
+ **/
+static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
+{
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
+       unsigned long lock_flags = 0;
+       volatile u32 int_reg, int_mask_reg;
+       u32 ioasc;
+       u16 cmd_index;
+       struct ipr_cmnd *ipr_cmd;
+       irqreturn_t rc = IRQ_NONE;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+       /* If interrupts are disabled, ignore the interrupt */
+       if (!ioa_cfg->allow_interrupts) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return IRQ_NONE;
+       }
+
+       int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+       int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+
+       /* If an interrupt on the adapter did not occur, ignore it */
+       if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
+               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+               return IRQ_NONE;
+       }
+
+       while (1) {
+               ipr_cmd = NULL;
+
+               while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+                      ioa_cfg->toggle_bit) {
+
+                       cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
+                                    IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
+
+                       if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
+                               ioa_cfg->errors_logged++;
+                               dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
+
+                               if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+                                       ioa_cfg->sdt_state = GET_DUMP;
+
+                               ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+                               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                               return IRQ_HANDLED;
+                       }
+
+                       ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
+
+                       ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+                       ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
+
+                       list_del(&ipr_cmd->queue);
+                       del_timer(&ipr_cmd->timer);
+                       ipr_cmd->done(ipr_cmd);
+
+                       rc = IRQ_HANDLED;
+
+                       if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
+                               ioa_cfg->hrrq_curr++;
+                       } else {
+                               ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
+                               ioa_cfg->toggle_bit ^= 1u;
+                       }
+               }
+
+               if (ipr_cmd != NULL) {
+                       /* Clear the PCI interrupt */
+                       writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
+                       int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+               } else
+                       break;
+       }
+
+       if (unlikely(rc == IRQ_NONE))
+               rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       return rc;
+}
+
+/**
+ * ipr_build_ioadl - Build a scatter/gather list and map the buffer
+ * @ioa_cfg:   ioa config struct
+ * @ipr_cmd:   ipr command struct
+ *
+ * Return value:
+ *     0 on success / -1 on failure
+ **/
+static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
+                          struct ipr_cmnd *ipr_cmd)
+{
+       int i;
+       struct scatterlist *sglist;
+       u32 length;
+       u32 ioadl_flags = 0;
+       struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+       struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+       struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+
+       length = scsi_cmd->request_bufflen;
+
+       if (length == 0)
+               return 0;
+
+       if (scsi_cmd->use_sg) {
+               ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
+                                                scsi_cmd->request_buffer,
+                                                scsi_cmd->use_sg,
+                                                scsi_cmd->sc_data_direction);
+
+               if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
+                       ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+                       ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+                       ioarcb->write_data_transfer_length = cpu_to_be32(length);
+                       ioarcb->write_ioadl_len =
+                               cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+               } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+                       ioadl_flags = IPR_IOADL_FLAGS_READ;
+                       ioarcb->read_data_transfer_length = cpu_to_be32(length);
+                       ioarcb->read_ioadl_len =
+                               cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+               }
+
+               sglist = scsi_cmd->request_buffer;
+
+               for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+                       ioadl[i].flags_and_data_len =
+                               cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
+                       ioadl[i].address =
+                               cpu_to_be32(sg_dma_address(&sglist[i]));
+               }
+
+               if (likely(ipr_cmd->dma_use_sg)) {
+                       ioadl[i-1].flags_and_data_len |=
+                               cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+                       return 0;
+               } else
+                       dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
+       } else {
+               if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
+                       ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+                       ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+                       ioarcb->write_data_transfer_length = cpu_to_be32(length);
+                       ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+               } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+                       ioadl_flags = IPR_IOADL_FLAGS_READ;
+                       ioarcb->read_data_transfer_length = cpu_to_be32(length);
+                       ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+               }
+
+               ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
+                                                    scsi_cmd->request_buffer, length,
+                                                    scsi_cmd->sc_data_direction);
+
+               if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
+                       ipr_cmd->dma_use_sg = 1;
+                       ioadl[0].flags_and_data_len =
+                               cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
+                       ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
+                       return 0;
+               } else
+                       dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
+       }
+
+       return -1;
+}
+
+/**
+ * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
+ * @scsi_cmd:  scsi command struct
+ *
+ * Return value:
+ *     task attributes
+ **/
+static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
+{
+       u8 tag[2];
+       u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
+
+       if (scsi_populate_tag_msg(scsi_cmd, tag)) {
+               switch (tag[0]) {
+               case MSG_SIMPLE_TAG:
+                       rc = IPR_FLAGS_LO_SIMPLE_TASK;
+                       break;
+               case MSG_HEAD_TAG:
+                       rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
+                       break;
+               case MSG_ORDERED_TAG:
+                       rc = IPR_FLAGS_LO_ORDERED_TASK;
+                       break;
+               };
+       }
+
+       return rc;
+}
+
+/**
+ * ipr_erp_done - Process completion of ERP for a device
+ * @ipr_cmd:           ipr command struct
+ *
+ * This function copies the sense buffer into the scsi_cmd
+ * struct and pushes the scsi_done function.
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
+{
+       struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+       struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+       if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
+               scsi_cmd->result |= (DID_ERROR << 16);
+               ipr_sdev_err(scsi_cmd->device,
+                            "Request Sense failed with IOASC: 0x%08X\n", ioasc);
+       } else {
+               memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
+                      SCSI_SENSE_BUFFERSIZE);
+       }
+
+       if (res)
+               res->needs_sync_complete = 1;
+       ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       scsi_cmd->scsi_done(scsi_cmd);
+}
+
+/**
+ * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
+ * @ipr_cmd:   ipr command struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioarcb *ioarcb;
+       struct ipr_ioasa *ioasa;
+
+       ioarcb = &ipr_cmd->ioarcb;
+       ioasa = &ipr_cmd->ioasa;
+
+       memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
+       ioarcb->write_data_transfer_length = 0;
+       ioarcb->read_data_transfer_length = 0;
+       ioarcb->write_ioadl_len = 0;
+       ioarcb->read_ioadl_len = 0;
+       ioasa->ioasc = 0;
+       ioasa->residual_data_len = 0;
+}
+
+/**
+ * ipr_erp_request_sense - Send request sense to a device
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function sends a request sense to a device as a result
+ * of a check condition.
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+
+       ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
+
+       cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
+       cmd_pkt->cdb[0] = REQUEST_SENSE;
+       cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
+       cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
+       cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
+       cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
+
+       ipr_cmd->ioadl[0].flags_and_data_len =
+               cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
+       ipr_cmd->ioadl[0].address =
+               cpu_to_be32(ipr_cmd->sense_buffer_dma);
+
+       ipr_cmd->ioarcb.read_ioadl_len =
+               cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+       ipr_cmd->ioarcb.read_data_transfer_length =
+               cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
+
+       ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
+                  IPR_REQUEST_SENSE_TIMEOUT * 2);
+}
+
+/**
+ * ipr_erp_cancel_all - Send cancel all to a device
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function sends a cancel all to a device to clear the
+ * queue. If we are running TCQ on the device, QERR is set to 1,
+ * which means all outstanding ops have been dropped on the floor.
+ * Cancel all will return them to us.
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
+{
+       struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+       struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
+       struct ipr_cmd_pkt *cmd_pkt;
+
+       res->in_erp = 1;
+
+       ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
+
+       cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+       cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+       cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
+
+       ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
+                  IPR_CANCEL_ALL_TIMEOUT);
+}
+
+/**
+ * ipr_dump_ioasa - Dump contents of IOASA
+ * @ioa_cfg:   ioa config struct
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function is invoked by the interrupt handler when ops
+ * fail. It will log the IOASA if appropriate. Only called
+ * for GPDD ops.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
+                          struct ipr_cmnd *ipr_cmd)
+{
+       int i;
+       u16 data_len;
+       u32 ioasc;
+       struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+       u32 *ioasa_data = (u32 *)ioasa;
+       int error_index;
+
+       ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
+
+       if (0 == ioasc)
+               return;
+
+       if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
+               return;
+
+       error_index = ipr_get_error(ioasc);
+
+       if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
+               /* Don't log an error if the IOA already logged one */
+               if (ioasa->ilid != 0)
+                       return;
+
+               if (ipr_error_table[error_index].log_ioasa == 0)
+                       return;
+       }
+
+       ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
+                    ipr_error_table[error_index].error);
+
+       if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
+           (ioasa->u.gpdd.bus_phase <=  ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
+               ipr_sdev_err(ipr_cmd->scsi_cmd->device,
+                            "Device End state: %s Phase: %s\n",
+                            ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
+                            ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
+       }
+
+       if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
+               data_len = sizeof(struct ipr_ioasa);
+       else
+               data_len = be16_to_cpu(ioasa->ret_stat_len);
+
+       ipr_err("IOASA Dump:\n");
+
+       for (i = 0; i < data_len / 4; i += 4) {
+               ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
+                       be32_to_cpu(ioasa_data[i]),
+                       be32_to_cpu(ioasa_data[i+1]),
+                       be32_to_cpu(ioasa_data[i+2]),
+                       be32_to_cpu(ioasa_data[i+3]));
+       }
+}
+
+/**
+ * ipr_gen_sense - Generate SCSI sense data from an IOASA
+ * @ioasa:             IOASA
+ * @sense_buf: sense data buffer
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
+{
+       u32 failing_lba;
+       u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
+       struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
+       struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+       u32 ioasc = be32_to_cpu(ioasa->ioasc);
+
+       memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
+
+       if (ioasc >= IPR_FIRST_DRIVER_IOASC)
+               return;
+
+       ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
+
+       if (ipr_is_vset_device(res) &&
+           ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
+           ioasa->u.vset.failing_lba_hi != 0) {
+               sense_buf[0] = 0x72;
+               sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
+               sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
+               sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
+
+               sense_buf[7] = 12;
+               sense_buf[8] = 0;
+               sense_buf[9] = 0x0A;
+               sense_buf[10] = 0x80;
+
+               failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
+
+               sense_buf[12] = (failing_lba & 0xff000000) >> 24;
+               sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
+               sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
+               sense_buf[15] = failing_lba & 0x000000ff;
+
+               failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
+
+               sense_buf[16] = (failing_lba & 0xff000000) >> 24;
+               sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
+               sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
+               sense_buf[19] = failing_lba & 0x000000ff;
+       } else {
+               sense_buf[0] = 0x70;
+               sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
+               sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
+               sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
+
+               /* Illegal request */
+               if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
+                   (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
+                       sense_buf[7] = 10;      /* additional length */
+
+                       /* IOARCB was in error */
+                       if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
+                               sense_buf[15] = 0xC0;
+                       else    /* Parameter data was invalid */
+                               sense_buf[15] = 0x80;
+
+                       sense_buf[16] =
+                           ((IPR_FIELD_POINTER_MASK &
+                             be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
+                       sense_buf[17] =
+                           (IPR_FIELD_POINTER_MASK &
+                            be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
+               } else {
+                       if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
+                               if (ipr_is_vset_device(res))
+                                       failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
+                               else
+                                       failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
+
+                               sense_buf[0] |= 0x80;   /* Or in the Valid bit */
+                               sense_buf[3] = (failing_lba & 0xff000000) >> 24;
+                               sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
+                               sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
+                               sense_buf[6] = failing_lba & 0x000000ff;
+                       }
+
+                       sense_buf[7] = 6;       /* additional length */
+               }
+       }
+}
+
+/**
+ * ipr_erp_start - Process an error response for a SCSI op
+ * @ioa_cfg:   ioa config struct
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function determines whether or not to initiate ERP
+ * on the affected device.
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
+                             struct ipr_cmnd *ipr_cmd)
+{
+       struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+       struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
+       u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+       if (!res) {
+               ipr_scsi_eh_done(ipr_cmd);
+               return;
+       }
+
+       if (ipr_is_gscsi(res))
+               ipr_dump_ioasa(ioa_cfg, ipr_cmd);
+       else
+               ipr_gen_sense(ipr_cmd);
+
+       switch (ioasc & IPR_IOASC_IOASC_MASK) {
+       case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
+               scsi_cmd->result |= (DID_ERROR << 16);
+               break;
+       case IPR_IOASC_IR_RESOURCE_HANDLE:
+               scsi_cmd->result |= (DID_NO_CONNECT << 16);
+               break;
+       case IPR_IOASC_HW_SEL_TIMEOUT:
+               scsi_cmd->result |= (DID_NO_CONNECT << 16);
+               res->needs_sync_complete = 1;
+               break;
+       case IPR_IOASC_SYNC_REQUIRED:
+               if (!res->in_erp)
+                       res->needs_sync_complete = 1;
+               scsi_cmd->result |= (DID_IMM_RETRY << 16);
+               break;
+       case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
+               scsi_cmd->result |= (DID_PASSTHROUGH << 16);
+               break;
+       case IPR_IOASC_BUS_WAS_RESET:
+       case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
+               /*
+                * Report the bus reset and ask for a retry. The device
+                * will give CC/UA the next command.
+                */
+               if (!res->resetting_device)
+                       scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
+               scsi_cmd->result |= (DID_ERROR << 16);
+               res->needs_sync_complete = 1;
+               break;
+       case IPR_IOASC_HW_DEV_BUS_STATUS:
+               scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
+               if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
+                       ipr_erp_cancel_all(ipr_cmd);
+                       return;
+               }
+               break;
+       case IPR_IOASC_NR_INIT_CMD_REQUIRED:
+               break;
+       default:
+               scsi_cmd->result |= (DID_ERROR << 16);
+               if (!ipr_is_vset_device(res))
+                       res->needs_sync_complete = 1;
+               break;
+       }
+
+       ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       scsi_cmd->scsi_done(scsi_cmd);
+}
+
+/**
+ * ipr_scsi_done - mid-layer done function
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function is invoked by the interrupt handler for
+ * ops generated by the SCSI mid-layer
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+       u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+       scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
+
+       if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
+               ipr_unmap_sglist(ioa_cfg, ipr_cmd);
+               list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+               scsi_cmd->scsi_done(scsi_cmd);
+       } else
+               ipr_erp_start(ioa_cfg, ipr_cmd);
+}
+
+/**
+ * ipr_save_ioafp_mode_select - Save adapters mode select data
+ * @ioa_cfg:   ioa config struct
+ * @scsi_cmd:  scsi command struct
+ *
+ * This function saves mode select data for the adapter to
+ * use following an adapter reset.
+ *
+ * Return value:
+ *     0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
+ **/
+static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
+                                      struct scsi_cmnd *scsi_cmd)
+{
+       if (!ioa_cfg->saved_mode_pages) {
+               ioa_cfg->saved_mode_pages  = kmalloc(sizeof(struct ipr_mode_pages),
+                                                    GFP_ATOMIC);
+               if (!ioa_cfg->saved_mode_pages) {
+                       dev_err(&ioa_cfg->pdev->dev,
+                               "IOA mode select buffer allocation failed\n");
+                       return SCSI_MLQUEUE_HOST_BUSY;
+               }
+       }
+
+       memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
+       ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
+       return 0;
+}
+
+/**
+ * ipr_queuecommand - Queue a mid-layer request
+ * @scsi_cmd:  scsi command struct
+ * @done:              done function
+ *
+ * This function queues a request generated by the mid-layer.
+ *
+ * Return value:
+ *     0 on success
+ *     SCSI_MLQUEUE_DEVICE_BUSY if device is busy
+ *     SCSI_MLQUEUE_HOST_BUSY if host is busy
+ **/
+static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
+                           void (*done) (struct scsi_cmnd *))
+{
+       struct ipr_ioa_cfg *ioa_cfg;
+       struct ipr_resource_entry *res;
+       struct ipr_ioarcb *ioarcb;
+       struct ipr_cmnd *ipr_cmd;
+       int rc = 0;
+
+       scsi_cmd->scsi_done = done;
+       ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
+       res = scsi_cmd->device->hostdata;
+       scsi_cmd->result = (DID_OK << 16);
+
+       /*
+        * We are currently blocking all devices due to a host reset
+        * We have told the host to stop giving us new requests, but
+        * ERP ops don't count. FIXME
+        */
+       if (unlikely(!ioa_cfg->allow_cmds))
+               return SCSI_MLQUEUE_HOST_BUSY;
+
+       /*
+        * FIXME - Create scsi_set_host_offline interface
+        *  and the ioa_is_dead check can be removed
+        */
+       if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
+               memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+               scsi_cmd->result = (DID_NO_CONNECT << 16);
+               scsi_cmd->scsi_done(scsi_cmd);
+               return 0;
+       }
+
+       ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+       ioarcb = &ipr_cmd->ioarcb;
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+
+       memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
+       ipr_cmd->scsi_cmd = scsi_cmd;
+       ioarcb->res_handle = res->cfgte.res_handle;
+       ipr_cmd->done = ipr_scsi_done;
+       ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
+
+       if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
+               if (scsi_cmd->underflow == 0)
+                       ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
+
+               if (res->needs_sync_complete) {
+                       ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
+                       res->needs_sync_complete = 0;
+               }
+
+               ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
+               ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
+               ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
+               ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
+       }
+
+       if (!ipr_is_gscsi(res) && scsi_cmd->cmnd[0] >= 0xC0)
+               ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+
+       if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
+               rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
+
+       if (likely(rc == 0))
+               rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
+
+       if (likely(rc == 0)) {
+               mb();
+               writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
+                      ioa_cfg->regs.ioarrin_reg);
+       } else {
+                list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+                return SCSI_MLQUEUE_HOST_BUSY;
+       }
+
+       return 0;
+}
+
+/**
+ * ipr_info - Get information about the card/driver
+ * @scsi_host: scsi host struct
+ *
+ * Return value:
+ *     pointer to buffer with description string
+ **/
+static const char * ipr_ioa_info(struct Scsi_Host *host)
+{
+       static char buffer[512];
+       struct ipr_ioa_cfg *ioa_cfg;
+       unsigned long lock_flags = 0;
+
+       ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
+
+       spin_lock_irqsave(host->host_lock, lock_flags);
+       sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
+       spin_unlock_irqrestore(host->host_lock, lock_flags);
+
+       return buffer;
+}
+
+static struct scsi_host_template driver_template = {
+       .module = THIS_MODULE,
+       .name = "IPR",
+       .info = ipr_ioa_info,
+       .queuecommand = ipr_queuecommand,
+       .eh_abort_handler = ipr_eh_abort,
+       .eh_device_reset_handler = ipr_eh_dev_reset,
+       .eh_host_reset_handler = ipr_eh_host_reset,
+       .slave_alloc = ipr_slave_alloc,
+       .slave_configure = ipr_slave_configure,
+       .slave_destroy = ipr_slave_destroy,
+       .bios_param = ipr_biosparam,
+       .can_queue = IPR_MAX_COMMANDS,
+       .this_id = -1,
+       .sg_tablesize = IPR_MAX_SGLIST,
+       .max_sectors = IPR_MAX_SECTORS,
+       .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
+       .use_clustering = ENABLE_CLUSTERING,
+       .shost_attrs = ipr_ioa_attrs,
+       .sdev_attrs = ipr_dev_attrs,
+       .proc_name = IPR_NAME
+};
+
+#ifdef CONFIG_PPC_PSERIES
+static const u16 ipr_blocked_processors[] = {
+       PV_NORTHSTAR,
+       PV_PULSAR,
+       PV_POWER4,
+       PV_ICESTAR,
+       PV_SSTAR,
+       PV_POWER4p,
+       PV_630,
+       PV_630p
+};
+
+/**
+ * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
+ * @ioa_cfg:   ioa cfg struct
+ *
+ * Adapters that use Gemstone revision < 3.1 do not work reliably on
+ * certain pSeries hardware. This function determines if the given
+ * adapter is in one of these confgurations or not.
+ *
+ * Return value:
+ *     1 if adapter is not supported / 0 if adapter is supported
+ **/
+static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
+{
+       u8 rev_id;
+       int i;
+
+       if (ioa_cfg->type == 0x5702) {
+               if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
+                                        &rev_id) == PCIBIOS_SUCCESSFUL) {
+                       if (rev_id < 4) {
+                               for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
+                                       if (__is_processor(ipr_blocked_processors[i]))
+                                               return 1;
+                               }
+                       }
+               }
+       }
+       return 0;
+}
+#else
+#define ipr_invalid_adapter(ioa_cfg) 0
+#endif
+
+/**
+ * ipr_ioa_bringdown_done - IOA bring down completion.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function processes the completion of an adapter bring down.
+ * It wakes any reset sleepers.
+ *
+ * Return value:
+ *     IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+       ENTER;
+       ioa_cfg->in_reset_reload = 0;
+       ioa_cfg->reset_retries = 0;
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       wake_up_all(&ioa_cfg->reset_wait_q);
+
+       spin_unlock_irq(ioa_cfg->host->host_lock);
+       scsi_unblock_requests(ioa_cfg->host);
+       spin_lock_irq(ioa_cfg->host->host_lock);
+       LEAVE;
+
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_ioa_reset_done - IOA reset completion.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function processes the completion of an adapter reset.
+ * It schedules any necessary mid-layer add/removes and
+ * wakes any reset sleepers.
+ *
+ * Return value:
+ *     IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_resource_entry *res;
+       struct ipr_hostrcb *hostrcb, *temp;
+       int i = 0;
+
+       ENTER;
+       ioa_cfg->in_reset_reload = 0;
+       ioa_cfg->allow_cmds = 1;
+       ioa_cfg->reset_cmd = NULL;
+
+       list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+               if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
+                       ipr_trace;
+                       schedule_work(&ioa_cfg->work_q);
+                       break;
+               }
+       }
+
+       list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
+               list_del(&hostrcb->queue);
+               if (i++ < IPR_NUM_LOG_HCAMS)
+                       ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
+               else
+                       ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
+       }
+
+       dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
+
+       ioa_cfg->reset_retries = 0;
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       wake_up_all(&ioa_cfg->reset_wait_q);
+
+       spin_unlock_irq(ioa_cfg->host->host_lock);
+       scsi_unblock_requests(ioa_cfg->host);
+       spin_lock_irq(ioa_cfg->host->host_lock);
+
+       if (!ioa_cfg->allow_cmds)
+               scsi_block_requests(ioa_cfg->host);
+
+       LEAVE;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
+ * @supported_dev:     supported device struct
+ * @vpids:                     vendor product id struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
+                                struct ipr_std_inq_vpids *vpids)
+{
+       memset(supported_dev, 0, sizeof(struct ipr_supported_device));
+       memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
+       supported_dev->num_records = 1;
+       supported_dev->data_length =
+               cpu_to_be16(sizeof(struct ipr_supported_device));
+       supported_dev->reserved = 0;
+}
+
+/**
+ * ipr_set_supported_devs - Send Set Supported Devices for a device
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function send a Set Supported Devices to the adapter
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
+       struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+       struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+       struct ipr_resource_entry *res = ipr_cmd->u.res;
+
+       ipr_cmd->job_step = ipr_ioa_reset_done;
+
+       list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
+               if (!ipr_is_af_dasd_device(res))
+                       continue;
+
+               ipr_cmd->u.res = res;
+               ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
+
+               ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+               ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+               ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+
+               ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
+               ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
+               ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
+
+               ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
+                                                       sizeof(struct ipr_supported_device));
+               ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
+                                            offsetof(struct ipr_misc_cbs, supp_dev));
+               ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+               ioarcb->write_data_transfer_length =
+                       cpu_to_be32(sizeof(struct ipr_supported_device));
+
+               ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+                          IPR_SET_SUP_DEVICE_TIMEOUT);
+
+               ipr_cmd->job_step = ipr_set_supported_devs;
+               return IPR_RC_JOB_RETURN;
+       }
+
+       return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_get_mode_page - Locate specified mode page
+ * @mode_pages:        mode page buffer
+ * @page_code: page code to find
+ * @len:               minimum required length for mode page
+ *
+ * Return value:
+ *     pointer to mode page / NULL on failure
+ **/
+static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
+                              u32 page_code, u32 len)
+{
+       struct ipr_mode_page_hdr *mode_hdr;
+       u32 page_length;
+       u32 length;
+
+       if (!mode_pages || (mode_pages->hdr.length == 0))
+               return NULL;
+
+       length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
+       mode_hdr = (struct ipr_mode_page_hdr *)
+               (mode_pages->data + mode_pages->hdr.block_desc_len);
+
+       while (length) {
+               if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
+                       if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
+                               return mode_hdr;
+                       break;
+               } else {
+                       page_length = (sizeof(struct ipr_mode_page_hdr) +
+                                      mode_hdr->page_length);
+                       length -= page_length;
+                       mode_hdr = (struct ipr_mode_page_hdr *)
+                               ((unsigned long)mode_hdr + page_length);
+               }
+       }
+       return NULL;
+}
+
+/**
+ * ipr_check_term_power - Check for term power errors
+ * @ioa_cfg:   ioa config struct
+ * @mode_pages:        IOAFP mode pages buffer
+ *
+ * Check the IOAFP's mode page 28 for term power errors
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
+                                struct ipr_mode_pages *mode_pages)
+{
+       int i;
+       int entry_length;
+       struct ipr_dev_bus_entry *bus;
+       struct ipr_mode_page28 *mode_page;
+
+       mode_page = ipr_get_mode_page(mode_pages, 0x28,
+                                     sizeof(struct ipr_mode_page28));
+
+       entry_length = mode_page->entry_length;
+
+       bus = mode_page->bus;
+
+       for (i = 0; i < mode_page->num_entries; i++) {
+               if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
+                       dev_err(&ioa_cfg->pdev->dev,
+                               "Term power is absent on scsi bus %d\n",
+                               bus->res_addr.bus);
+               }
+
+               bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
+       }
+}
+
+/**
+ * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
+ * @ioa_cfg:   ioa config struct
+ *
+ * Looks through the config table checking for SES devices. If
+ * the SES device is in the SES table indicating a maximum SCSI
+ * bus speed, the speed is limited for the bus.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
+{
+       u32 max_xfer_rate;
+       int i;
+
+       for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
+               max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
+                                                      ioa_cfg->bus_attr[i].bus_width);
+
+               if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
+                       ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
+       }
+}
+
+/**
+ * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
+ * @ioa_cfg:   ioa config struct
+ * @mode_pages:        mode page 28 buffer
+ *
+ * Updates mode page 28 based on driver configuration
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
+                                               struct ipr_mode_pages *mode_pages)
+{
+       int i, entry_length;
+       struct ipr_dev_bus_entry *bus;
+       struct ipr_bus_attributes *bus_attr;
+       struct ipr_mode_page28 *mode_page;
+
+       mode_page = ipr_get_mode_page(mode_pages, 0x28,
+                                     sizeof(struct ipr_mode_page28));
+
+       entry_length = mode_page->entry_length;
+
+       /* Loop for each device bus entry */
+       for (i = 0, bus = mode_page->bus;
+            i < mode_page->num_entries;
+            i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
+               if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
+                       dev_err(&ioa_cfg->pdev->dev,
+                               "Invalid resource address reported: 0x%08X\n",
+                               IPR_GET_PHYS_LOC(bus->res_addr));
+                       continue;
+               }
+
+               bus_attr = &ioa_cfg->bus_attr[i];
+               bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
+               bus->bus_width = bus_attr->bus_width;
+               bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
+               bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
+               if (bus_attr->qas_enabled)
+                       bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
+               else
+                       bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
+       }
+}
+
+/**
+ * ipr_build_mode_select - Build a mode select command
+ * @ipr_cmd:   ipr command struct
+ * @res_handle:        resource handle to send command to
+ * @parm:              Byte 2 of Mode Sense command
+ * @dma_addr:  DMA buffer address
+ * @xfer_len:  data transfer length
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
+                                 u32 res_handle, u8 parm, u32 dma_addr,
+                                 u8 xfer_len)
+{
+       struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+       struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+
+       ioarcb->res_handle = res_handle;
+       ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
+       ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+       ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
+       ioarcb->cmd_pkt.cdb[1] = parm;
+       ioarcb->cmd_pkt.cdb[4] = xfer_len;
+
+       ioadl->flags_and_data_len =
+               cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
+       ioadl->address = cpu_to_be32(dma_addr);
+       ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+       ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
+}
+
+/**
+ * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function sets up the SCSI bus attributes and sends
+ * a Mode Select for Page 28 to activate them.
+ *
+ * Return value:
+ *     IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
+       int length;
+
+       ENTER;
+       if (ioa_cfg->saved_mode_pages) {
+               memcpy(mode_pages, ioa_cfg->saved_mode_pages,
+                      ioa_cfg->saved_mode_page_len);
+               length = ioa_cfg->saved_mode_page_len;
+       } else {
+               ipr_scsi_bus_speed_limit(ioa_cfg);
+               ipr_check_term_power(ioa_cfg, mode_pages);
+               ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
+               length = mode_pages->hdr.length + 1;
+               mode_pages->hdr.length = 0;
+       }
+
+       ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
+                             ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
+                             length);
+
+       ipr_cmd->job_step = ipr_set_supported_devs;
+       ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
+                                   struct ipr_resource_entry, queue);
+
+       ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+       LEAVE;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_build_mode_sense - Builds a mode sense command
+ * @ipr_cmd:   ipr command struct
+ * @res:               resource entry struct
+ * @parm:              Byte 2 of mode sense command
+ * @dma_addr:  DMA address of mode sense buffer
+ * @xfer_len:  Size of DMA buffer
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
+                                u32 res_handle,
+                                u8 parm, u32 dma_addr, u8 xfer_len)
+{
+       struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+       struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+
+       ioarcb->res_handle = res_handle;
+       ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
+       ioarcb->cmd_pkt.cdb[2] = parm;
+       ioarcb->cmd_pkt.cdb[4] = xfer_len;
+       ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
+
+       ioadl->flags_and_data_len =
+               cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
+       ioadl->address = cpu_to_be32(dma_addr);
+       ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+       ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
+}
+
+/**
+ * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function send a Page 28 mode sense to the IOA to
+ * retrieve SCSI bus attributes.
+ *
+ * Return value:
+ *     IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+       ENTER;
+       ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
+                            0x28, ioa_cfg->vpd_cbs_dma +
+                            offsetof(struct ipr_misc_cbs, mode_pages),
+                            sizeof(struct ipr_mode_pages));
+
+       ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
+
+       ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+       LEAVE;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_init_res_table - Initialize the resource table
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function looks through the existing resource table, comparing
+ * it with the config table. This function will take care of old/new
+ * devices and schedule adding/removing them from the mid-layer
+ * as appropriate.
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_resource_entry *res, *temp;
+       struct ipr_config_table_entry *cfgte;
+       int found, i;
+       LIST_HEAD(old_res);
+
+       ENTER;
+       if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
+               dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
+
+       list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
+               list_move_tail(&res->queue, &old_res);
+
+       for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
+               cfgte = &ioa_cfg->cfg_table->dev[i];
+               found = 0;
+
+               list_for_each_entry_safe(res, temp, &old_res, queue) {
+                       if (!memcmp(&res->cfgte.res_addr,
+                                   &cfgte->res_addr, sizeof(cfgte->res_addr))) {
+                               list_move_tail(&res->queue, &ioa_cfg->used_res_q);
+                               found = 1;
+                               break;
+                       }
+               }
+
+               if (!found) {
+                       if (list_empty(&ioa_cfg->free_res_q)) {
+                               dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
+                               break;
+                       }
+
+                       found = 1;
+                       res = list_entry(ioa_cfg->free_res_q.next,
+                                        struct ipr_resource_entry, queue);
+                       list_move_tail(&res->queue, &ioa_cfg->used_res_q);
+                       ipr_init_res_entry(res);
+                       res->add_to_ml = 1;
+               }
+
+               if (found)
+                       memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
+       }
+
+       list_for_each_entry_safe(res, temp, &old_res, queue) {
+               if (res->sdev) {
+                       res->del_from_ml = 1;
+                       list_move_tail(&res->queue, &ioa_cfg->used_res_q);
+               } else {
+                       list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+               }
+       }
+
+       ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
+
+       LEAVE;
+       return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function sends a Query IOA Configuration command
+ * to the adapter to retrieve the IOA configuration table.
+ *
+ * Return value:
+ *     IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+       struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+       struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
+
+       ENTER;
+       dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
+                ucode_vpd->major_release, ucode_vpd->card_type,
+                ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
+       ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+       ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+
+       ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
+       ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
+       ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
+
+       ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+       ioarcb->read_data_transfer_length =
+               cpu_to_be32(sizeof(struct ipr_config_table));
+
+       ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
+       ioadl->flags_and_data_len =
+               cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
+
+       ipr_cmd->job_step = ipr_init_res_table;
+
+       ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+       LEAVE;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This utility function sends an inquiry to the adapter.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
+                             u32 dma_addr, u8 xfer_len)
+{
+       struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+       struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+
+       ENTER;
+       ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
+       ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+
+       ioarcb->cmd_pkt.cdb[0] = INQUIRY;
+       ioarcb->cmd_pkt.cdb[1] = flags;
+       ioarcb->cmd_pkt.cdb[2] = page;
+       ioarcb->cmd_pkt.cdb[4] = xfer_len;
+
+       ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+       ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
+
+       ioadl->address = cpu_to_be32(dma_addr);
+       ioadl->flags_and_data_len =
+               cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
+
+       ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+       LEAVE;
+}
+
+/**
+ * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function sends a Page 3 inquiry to the adapter
+ * to retrieve software VPD information.
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       char type[5];
+
+       ENTER;
+
+       /* Grab the type out of the VPD and store it away */
+       memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
+       type[4] = '\0';
+       ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
+
+       ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
+
+       ipr_ioafp_inquiry(ipr_cmd, 1, 3,
+                         ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
+                         sizeof(struct ipr_inquiry_page3));
+
+       LEAVE;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function sends a standard inquiry to the adapter.
+ *
+ * Return value:
+ *     IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+       ENTER;
+       ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
+
+       ipr_ioafp_inquiry(ipr_cmd, 0, 0,
+                         ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
+                         sizeof(struct ipr_ioa_vpd));
+
+       LEAVE;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function send an Identify Host Request Response Queue
+ * command to establish the HRRQ with the adapter.
+ *
+ * Return value:
+ *     IPR_RC_JOB_RETURN
+ **/
+static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+
+       ENTER;
+       dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
+
+       ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
+       ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+
+       ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+       ioarcb->cmd_pkt.cdb[2] =
+               ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
+       ioarcb->cmd_pkt.cdb[3] =
+               ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
+       ioarcb->cmd_pkt.cdb[4] =
+               ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
+       ioarcb->cmd_pkt.cdb[5] =
+               ((u32) ioa_cfg->host_rrq_dma) & 0xff;
+       ioarcb->cmd_pkt.cdb[7] =
+               ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
+       ioarcb->cmd_pkt.cdb[8] =
+               (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
+
+       ipr_cmd->job_step = ipr_ioafp_std_inquiry;
+
+       ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+
+       LEAVE;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_timer_done - Adapter reset timer function
+ * @ipr_cmd:   ipr command struct
+ *
+ * Description: This function is used in adapter reset processing
+ * for timing events. If the reset_cmd pointer in the IOA
+ * config struct is not this adapter's we are doing nested
+ * resets and fail_all_ops will take care of freeing the
+ * command block.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       unsigned long lock_flags = 0;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+       if (ioa_cfg->reset_cmd == ipr_cmd) {
+               list_del(&ipr_cmd->queue);
+               ipr_cmd->done(ipr_cmd);
+       }
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+}
+
+/**
+ * ipr_reset_start_timer - Start a timer for adapter reset job
+ * @ipr_cmd:   ipr command struct
+ * @timeout:   timeout value
+ *
+ * Description: This function is used in adapter reset processing
+ * for timing events. If the reset_cmd pointer in the IOA
+ * config struct is not this adapter's we are doing nested
+ * resets and fail_all_ops will take care of freeing the
+ * command block.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
+                                 unsigned long timeout)
+{
+       list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+       ipr_cmd->done = ipr_reset_ioa_job;
+
+       ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+       ipr_cmd->timer.expires = jiffies + timeout;
+       ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
+       add_timer(&ipr_cmd->timer);
+}
+
+/**
+ * ipr_init_ioa_mem - Initialize ioa_cfg control block
+ * @ioa_cfg:   ioa cfg struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
+{
+       memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
+
+       /* Initialize Host RRQ pointers */
+       ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
+       ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
+       ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
+       ioa_cfg->toggle_bit = 1;
+
+       /* Zero out config table */
+       memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
+}
+
+/**
+ * ipr_reset_enable_ioa - Enable the IOA following a reset.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function reinitializes some control blocks and
+ * enables destructive diagnostics on the adapter.
+ *
+ * Return value:
+ *     IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       volatile u32 int_reg;
+
+       ENTER;
+       ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
+       ipr_init_ioa_mem(ioa_cfg);
+
+       ioa_cfg->allow_interrupts = 1;
+       int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+
+       if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
+               writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
+                      ioa_cfg->regs.clr_interrupt_mask_reg);
+               int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+               return IPR_RC_JOB_CONTINUE;
+       }
+
+       /* Enable destructive diagnostics on IOA */
+       writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg);
+
+       writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
+       int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+
+       dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
+
+       ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+       ipr_cmd->timer.expires = jiffies + IPR_OPERATIONAL_TIMEOUT;
+       ipr_cmd->timer.function = (void (*)(unsigned long))ipr_timeout;
+       add_timer(&ipr_cmd->timer);
+       list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+
+       LEAVE;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_wait_for_dump - Wait for a dump to timeout.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function is invoked when an adapter dump has run out
+ * of processing time.
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+       if (ioa_cfg->sdt_state == GET_DUMP)
+               ioa_cfg->sdt_state = ABORT_DUMP;
+
+       ipr_cmd->job_step = ipr_reset_alert;
+
+       return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_unit_check_no_data - Log a unit check/no data error log
+ * @ioa_cfg:           ioa config struct
+ *
+ * Logs an error indicating the adapter unit checked, but for some
+ * reason, we were unable to fetch the unit check buffer.
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
+{
+       ioa_cfg->errors_logged++;
+       dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
+}
+
+/**
+ * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
+ * @ioa_cfg:           ioa config struct
+ *
+ * Fetches the unit check buffer from the adapter by clocking the data
+ * through the mailbox register.
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
+{
+       unsigned long mailbox;
+       struct ipr_hostrcb *hostrcb;
+       struct ipr_uc_sdt sdt;
+       int rc, length;
+
+       mailbox = readl(ioa_cfg->ioa_mailbox);
+
+       if (!ipr_sdt_is_fmt2(mailbox)) {
+               ipr_unit_check_no_data(ioa_cfg);
+               return;
+       }
+
+       memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
+       rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (u32 *) &sdt,
+                                       (sizeof(struct ipr_uc_sdt)) / sizeof(u32));
+
+       if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
+           !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
+               ipr_unit_check_no_data(ioa_cfg);
+               return;
+       }
+
+       /* Find length of the first sdt entry (UC buffer) */
+       length = (be32_to_cpu(sdt.entry[0].end_offset) -
+                 be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
+
+       hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
+                            struct ipr_hostrcb, queue);
+       list_del(&hostrcb->queue);
+       memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
+
+       rc = ipr_get_ldump_data_section(ioa_cfg,
+                                       be32_to_cpu(sdt.entry[0].bar_str_offset),
+                                       (u32 *)&hostrcb->hcam,
+                                       min(length, (int)sizeof(hostrcb->hcam)) / sizeof(u32));
+
+       if (!rc)
+               ipr_handle_log_data(ioa_cfg, hostrcb);
+       else
+               ipr_unit_check_no_data(ioa_cfg);
+
+       list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
+}
+
+/**
+ * ipr_reset_restore_cfg_space - Restore PCI config space.
+ * @ipr_cmd:   ipr command struct
+ *
+ * Description: This function restores the saved PCI config space of
+ * the adapter, fails all outstanding ops back to the callers, and
+ * fetches the dump/unit check if applicable to this reset.
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       int rc;
+
+       ENTER;
+       rc = pci_restore_state(ioa_cfg->pdev, ioa_cfg->pci_cfg_buf);
+
+       if (rc != PCIBIOS_SUCCESSFUL) {
+               ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+               return IPR_RC_JOB_CONTINUE;
+       }
+
+       if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
+               ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+               return IPR_RC_JOB_CONTINUE;
+       }
+
+       ipr_fail_all_ops(ioa_cfg);
+
+       if (ioa_cfg->ioa_unit_checked) {
+               ioa_cfg->ioa_unit_checked = 0;
+               ipr_get_unit_check_buffer(ioa_cfg);
+               ipr_cmd->job_step = ipr_reset_alert;
+               ipr_reset_start_timer(ipr_cmd, 0);
+               return IPR_RC_JOB_RETURN;
+       }
+
+       if (ioa_cfg->in_ioa_bringdown) {
+               ipr_cmd->job_step = ipr_ioa_bringdown_done;
+       } else {
+               ipr_cmd->job_step = ipr_reset_enable_ioa;
+
+               if (GET_DUMP == ioa_cfg->sdt_state) {
+                       ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
+                       ipr_cmd->job_step = ipr_reset_wait_for_dump;
+                       schedule_work(&ioa_cfg->work_q);
+                       return IPR_RC_JOB_RETURN;
+               }
+       }
+
+       ENTER;
+       return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_start_bist - Run BIST on the adapter.
+ * @ipr_cmd:   ipr command struct
+ *
+ * Description: This function runs BIST on the adapter, then delays 2 seconds.
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       int rc;
+
+       ENTER;
+       rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
+
+       if (rc != PCIBIOS_SUCCESSFUL) {
+               ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+               rc = IPR_RC_JOB_CONTINUE;
+       } else {
+               ipr_cmd->job_step = ipr_reset_restore_cfg_space;
+               ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
+               rc = IPR_RC_JOB_RETURN;
+       }
+
+       LEAVE;
+       return rc;
+}
+
+/**
+ * ipr_reset_allowed - Query whether or not IOA can be reset
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     0 if reset not allowed / non-zero if reset is allowed
+ **/
+static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
+{
+       volatile u32 temp_reg;
+
+       temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+       return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
+}
+
+/**
+ * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
+ * @ipr_cmd:   ipr command struct
+ *
+ * Description: This function waits for adapter permission to run BIST,
+ * then runs BIST. If the adapter does not give permission after a
+ * reasonable time, we will reset the adapter anyway. The impact of
+ * resetting the adapter without warning the adapter is the risk of
+ * losing the persistent error log on the adapter. If the adapter is
+ * reset while it is writing to the flash on the adapter, the flash
+ * segment will have bad ECC and be zeroed.
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       int rc = IPR_RC_JOB_RETURN;
+
+       if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
+               ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
+               ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
+       } else {
+               ipr_cmd->job_step = ipr_reset_start_bist;
+               rc = IPR_RC_JOB_CONTINUE;
+       }
+
+       return rc;
+}
+
+/**
+ * ipr_reset_alert_part2 - Alert the adapter of a pending reset
+ * @ipr_cmd:   ipr command struct
+ *
+ * Description: This function alerts the adapter that it will be reset.
+ * If memory space is not currently enabled, proceed directly
+ * to running BIST on the adapter. The timer must always be started
+ * so we guarantee we do not run BIST from ipr_isr.
+ *
+ * Return value:
+ *     IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       u16 cmd_reg;
+       int rc;
+
+       ENTER;
+       rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
+
+       if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
+               ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
+               writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
+               ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
+       } else {
+               ipr_cmd->job_step = ipr_reset_start_bist;
+       }
+
+       ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
+       ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
+
+       LEAVE;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_ucode_download_done - Microcode download completion
+ * @ipr_cmd:   ipr command struct
+ *
+ * Description: This function unmaps the microcode download buffer.
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
+
+       pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
+                    sglist->num_sg, DMA_TO_DEVICE);
+
+       ipr_cmd->job_step = ipr_reset_alert;
+       return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_ucode_download - Download microcode to the adapter
+ * @ipr_cmd:   ipr command struct
+ *
+ * Description: This function checks to see if it there is microcode
+ * to download to the adapter. If there is, a download is performed.
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
+
+       ENTER;
+       ipr_cmd->job_step = ipr_reset_alert;
+
+       if (!sglist)
+               return IPR_RC_JOB_CONTINUE;
+
+       ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+       ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
+       ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
+       ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
+       ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
+       ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
+       ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
+
+       if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) {
+               dev_err(&ioa_cfg->pdev->dev,
+                       "Failed to map microcode download buffer\n");
+               return IPR_RC_JOB_CONTINUE;
+       }
+
+       ipr_cmd->job_step = ipr_reset_ucode_download_done;
+
+       ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+                  IPR_WRITE_BUFFER_TIMEOUT);
+
+       LEAVE;
+       return IPR_RC_JOB_RETURN;
+}
+
+/**
+ * ipr_reset_shutdown_ioa - Shutdown the adapter
+ * @ipr_cmd:   ipr command struct
+ *
+ * Description: This function issues an adapter shutdown of the
+ * specified type to the specified adapter as part of the
+ * adapter reset job.
+ *
+ * Return value:
+ *     IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
+{
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+       enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
+       unsigned long timeout;
+       int rc = IPR_RC_JOB_CONTINUE;
+
+       ENTER;
+       if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
+               ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+               ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+               ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
+               ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
+
+               if (shutdown_type == IPR_SHUTDOWN_ABBREV)
+                       timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
+               else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
+                       timeout = IPR_INTERNAL_TIMEOUT;
+               else
+                       timeout = IPR_SHUTDOWN_TIMEOUT;
+
+               ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
+
+               rc = IPR_RC_JOB_RETURN;
+               ipr_cmd->job_step = ipr_reset_ucode_download;
+       } else
+               ipr_cmd->job_step = ipr_reset_alert;
+
+       LEAVE;
+       return rc;
+}
+
+/**
+ * ipr_reset_ioa_job - Adapter reset job
+ * @ipr_cmd:   ipr command struct
+ *
+ * Description: This function is the job router for the adapter reset job.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
+{
+       u32 rc, ioasc;
+       unsigned long scratch = ipr_cmd->u.scratch;
+       struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+       do {
+               ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+               if (ioa_cfg->reset_cmd != ipr_cmd) {
+                       /*
+                        * We are doing nested adapter resets and this is
+                        * not the current reset job.
+                        */
+                       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+                       return;
+               }
+
+               if (IPR_IOASC_SENSE_KEY(ioasc)) {
+                       dev_err(&ioa_cfg->pdev->dev,
+                               "0x%02X failed with IOASC: 0x%08X\n",
+                               ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
+
+                       ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+                       list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+                       return;
+               }
+
+               ipr_reinit_ipr_cmnd(ipr_cmd);
+               ipr_cmd->u.scratch = scratch;
+               rc = ipr_cmd->job_step(ipr_cmd);
+       } while(rc == IPR_RC_JOB_CONTINUE);
+}
+
+/**
+ * _ipr_initiate_ioa_reset - Initiate an adapter reset
+ * @ioa_cfg:           ioa config struct
+ * @job_step:          first job step of reset job
+ * @shutdown_type:     shutdown type
+ *
+ * Description: This function will initiate the reset of the given adapter
+ * starting at the selected job step.
+ * If the caller needs to wait on the completion of the reset,
+ * the caller must sleep on the reset_wait_q.
+ *
+ * Return value:
+ *     none
+ **/
+static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
+                                   int (*job_step) (struct ipr_cmnd *),
+                                   enum ipr_shutdown_type shutdown_type)
+{
+       struct ipr_cmnd *ipr_cmd;
+
+       ioa_cfg->in_reset_reload = 1;
+       ioa_cfg->allow_cmds = 0;
+       scsi_block_requests(ioa_cfg->host);
+
+       ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+       ioa_cfg->reset_cmd = ipr_cmd;
+       ipr_cmd->job_step = job_step;
+       ipr_cmd->u.shutdown_type = shutdown_type;
+
+       ipr_reset_ioa_job(ipr_cmd);
+}
+
+/**
+ * ipr_initiate_ioa_reset - Initiate an adapter reset
+ * @ioa_cfg:           ioa config struct
+ * @shutdown_type:     shutdown type
+ *
+ * Description: This function will initiate the reset of the given adapter.
+ * If the caller needs to wait on the completion of the reset,
+ * the caller must sleep on the reset_wait_q.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
+                                  enum ipr_shutdown_type shutdown_type)
+{
+       if (ioa_cfg->ioa_is_dead)
+               return;
+
+       if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
+               ioa_cfg->sdt_state = ABORT_DUMP;
+
+       if (ioa_cfg->reset_retries++ > IPR_NUM_RESET_RELOAD_RETRIES) {
+               dev_err(&ioa_cfg->pdev->dev,
+                       "IOA taken offline - error recovery failed\n");
+
+               ioa_cfg->reset_retries = 0;
+               ioa_cfg->ioa_is_dead = 1;
+
+               if (ioa_cfg->in_ioa_bringdown) {
+                       ioa_cfg->reset_cmd = NULL;
+                       ioa_cfg->in_reset_reload = 0;
+                       ipr_fail_all_ops(ioa_cfg);
+                       wake_up_all(&ioa_cfg->reset_wait_q);
+
+                       spin_unlock_irq(ioa_cfg->host->host_lock);
+                       scsi_unblock_requests(ioa_cfg->host);
+                       spin_lock_irq(ioa_cfg->host->host_lock);
+                       return;
+               } else {
+                       ioa_cfg->in_ioa_bringdown = 1;
+                       shutdown_type = IPR_SHUTDOWN_NONE;
+               }
+       }
+
+       _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
+                               shutdown_type);
+}
+
+/**
+ * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
+ * @ioa_cfg:   ioa cfg struct
+ *
+ * Description: This is the second phase of adapter intialization
+ * This function takes care of initilizing the adapter to the point
+ * where it can accept new commands.
+
+ * Return value:
+ *     0 on sucess / -EIO on failure
+ **/
+static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int rc = 0;
+       unsigned long host_lock_flags = 0;
+
+       ENTER;
+       spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
+       dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
+       _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
+       wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+       spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
+
+       if (ioa_cfg->ioa_is_dead) {
+               rc = -EIO;
+       } else if (ipr_invalid_adapter(ioa_cfg)) {
+               if (!ipr_testmode)
+                       rc = -EIO;
+
+               dev_err(&ioa_cfg->pdev->dev,
+                       "Adapter not supported in this hardware configuration.\n");
+       }
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
+
+       LEAVE;
+       return rc;
+}
+
+/**
+ * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int i;
+
+       for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
+               if (ioa_cfg->ipr_cmnd_list[i])
+                       pci_pool_free(ioa_cfg->ipr_cmd_pool,
+                                     ioa_cfg->ipr_cmnd_list[i],
+                                     ioa_cfg->ipr_cmnd_list_dma[i]);
+
+               ioa_cfg->ipr_cmnd_list[i] = NULL;
+       }
+
+       if (ioa_cfg->ipr_cmd_pool)
+               pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
+
+       ioa_cfg->ipr_cmd_pool = NULL;
+}
+
+/**
+ * ipr_free_mem - Frees memory allocated for an adapter
+ * @ioa_cfg:   ioa cfg struct
+ *
+ * Return value:
+ *     nothing
+ **/
+static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int i;
+
+       kfree(ioa_cfg->res_entries);
+       pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
+                           ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
+       ipr_free_cmd_blks(ioa_cfg);
+       pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
+                           ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+       pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
+                           ioa_cfg->cfg_table,
+                           ioa_cfg->cfg_table_dma);
+
+       for (i = 0; i < IPR_NUM_HCAMS; i++) {
+               pci_free_consistent(ioa_cfg->pdev,
+                                   sizeof(struct ipr_hostrcb),
+                                   ioa_cfg->hostrcb[i],
+                                   ioa_cfg->hostrcb_dma[i]);
+       }
+
+       ipr_free_dump(ioa_cfg);
+       kfree(ioa_cfg->saved_mode_pages);
+       kfree(ioa_cfg->trace);
+}
+
+/**
+ * ipr_free_all_resources - Free all allocated resources for an adapter.
+ * @ipr_cmd:   ipr command struct
+ *
+ * This function frees all allocated resources for the
+ * specified adapter.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
+{
+       ENTER;
+       free_irq(ioa_cfg->pdev->irq, ioa_cfg);
+       iounmap((void *) ioa_cfg->hdw_dma_regs);
+       release_mem_region(ioa_cfg->hdw_dma_regs_pci,
+                          pci_resource_len(ioa_cfg->pdev, 0));
+       ipr_free_mem(ioa_cfg);
+       scsi_host_put(ioa_cfg->host);
+       LEAVE;
+}
+
+/**
+ * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     0 on success / -ENOMEM on allocation failure
+ **/
+static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
+{
+       struct ipr_cmnd *ipr_cmd;
+       struct ipr_ioarcb *ioarcb;
+       u32 dma_addr;
+       int i;
+
+       ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
+                                                sizeof(struct ipr_cmnd), 8, 0);
+
+       if (!ioa_cfg->ipr_cmd_pool)
+               return -ENOMEM;
+
+       for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
+               ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
+
+               if (!ipr_cmd) {
+                       ipr_free_cmd_blks(ioa_cfg);
+                       return -ENOMEM;
+               }
+
+               memset(ipr_cmd, 0, sizeof(*ipr_cmd));
+               ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
+               ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
+
+               ioarcb = &ipr_cmd->ioarcb;
+               ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
+               ioarcb->host_response_handle = cpu_to_be32(i << 2);
+               ioarcb->write_ioadl_addr =
+                       cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
+               ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+               ioarcb->ioasa_host_pci_addr =
+                       cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+               ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
+               ipr_cmd->cmd_index = i;
+               ipr_cmd->ioa_cfg = ioa_cfg;
+               ipr_cmd->sense_buffer_dma = dma_addr +
+                       offsetof(struct ipr_cmnd, sense_buffer);
+
+               list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+       }
+
+       return 0;
+}
+
+/**
+ * ipr_alloc_mem - Allocate memory for an adapter
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     0 on success / non-zero for error
+ **/
+static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int i;
+
+       ENTER;
+       ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) *
+                                      IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
+
+       if (!ioa_cfg->res_entries)
+               goto cleanup;
+
+       memset(ioa_cfg->res_entries, 0,
+              sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS);
+
+       for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
+               list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
+
+       ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
+                                               sizeof(struct ipr_misc_cbs),
+                                               &ioa_cfg->vpd_cbs_dma);
+
+       if (!ioa_cfg->vpd_cbs)
+               goto cleanup;
+
+       if (ipr_alloc_cmd_blks(ioa_cfg))
+               goto cleanup;
+
+       ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
+                                                sizeof(u32) * IPR_NUM_CMD_BLKS,
+                                                &ioa_cfg->host_rrq_dma);
+
+       if (!ioa_cfg->host_rrq)
+               goto cleanup;
+
+       ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
+                                                 sizeof(struct ipr_config_table),
+                                                 &ioa_cfg->cfg_table_dma);
+
+       if (!ioa_cfg->cfg_table)
+               goto cleanup;
+
+       for (i = 0; i < IPR_NUM_HCAMS; i++) {
+               ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
+                                                          sizeof(struct ipr_hostrcb),
+                                                          &ioa_cfg->hostrcb_dma[i]);
+
+               if (!ioa_cfg->hostrcb[i])
+                       goto cleanup;
+
+               memset(ioa_cfg->hostrcb[i], 0, sizeof(struct ipr_hostrcb));
+               ioa_cfg->hostrcb[i]->hostrcb_dma =
+                       ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
+               list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
+       }
+
+       ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) *
+                                IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
+
+       if (!ioa_cfg->trace)
+               goto cleanup;
+
+       memset(ioa_cfg->trace, 0,
+              sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES);
+
+       LEAVE;
+       return 0;
+
+cleanup:
+       ipr_free_mem(ioa_cfg);
+
+       LEAVE;
+       return -ENOMEM;
+}
+
+/**
+ * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
+ * @ioa_cfg:   ioa config struct
+ *
+ * Return value:
+ *     none
+ **/
+static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int i;
+
+       for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
+               ioa_cfg->bus_attr[i].bus = i;
+               ioa_cfg->bus_attr[i].qas_enabled = 0;
+               ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
+               if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
+                       ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
+               else
+                       ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
+       }
+}
+
+/**
+ * ipr_init_ioa_cfg - Initialize IOA config struct
+ * @ioa_cfg:   ioa config struct
+ * @host:              scsi host struct
+ * @pdev:              PCI dev struct
+ *
+ * Return value:
+ *     none
+ **/
+static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
+                                      struct Scsi_Host *host, struct pci_dev *pdev)
+{
+       ioa_cfg->host = host;
+       ioa_cfg->pdev = pdev;
+       ioa_cfg->log_level = ipr_log_level;
+       sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
+       sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
+       sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
+       sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
+       sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
+       sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
+       sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
+       sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
+
+       INIT_LIST_HEAD(&ioa_cfg->free_q);
+       INIT_LIST_HEAD(&ioa_cfg->pending_q);
+       INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
+       INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
+       INIT_LIST_HEAD(&ioa_cfg->free_res_q);
+       INIT_LIST_HEAD(&ioa_cfg->used_res_q);
+       INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
+       init_waitqueue_head(&ioa_cfg->reset_wait_q);
+       ioa_cfg->sdt_state = INACTIVE;
+
+       ipr_initialize_bus_attr(ioa_cfg);
+
+       host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
+       host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
+       host->max_channel = IPR_MAX_BUS_TO_SCAN;
+       host->unique_id = host->host_no;
+       host->max_cmd_len = IPR_MAX_CDB_LEN;
+       pci_set_drvdata(pdev, ioa_cfg);
+
+       memcpy(&ioa_cfg->regs, &ioa_cfg->chip_cfg->regs, sizeof(ioa_cfg->regs));
+
+       ioa_cfg->regs.set_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
+       ioa_cfg->regs.clr_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
+       ioa_cfg->regs.sense_interrupt_mask_reg += ioa_cfg->hdw_dma_regs;
+       ioa_cfg->regs.clr_interrupt_reg += ioa_cfg->hdw_dma_regs;
+       ioa_cfg->regs.sense_interrupt_reg += ioa_cfg->hdw_dma_regs;
+       ioa_cfg->regs.ioarrin_reg += ioa_cfg->hdw_dma_regs;
+       ioa_cfg->regs.sense_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
+       ioa_cfg->regs.set_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
+       ioa_cfg->regs.clr_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs;
+}
+
+/**
+ * ipr_probe_ioa - Allocates memory and does first stage of initialization
+ * @pdev:              PCI device struct
+ * @dev_id:            PCI device id struct
+ *
+ * Return value:
+ *     0 on success / non-zero on failure
+ **/
+static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
+                                  const struct pci_device_id *dev_id)
+{
+       struct ipr_ioa_cfg *ioa_cfg;
+       struct Scsi_Host *host;
+       unsigned long ipr_regs, ipr_regs_pci;
+       u32 rc = PCIBIOS_SUCCESSFUL;
+
+       ENTER;
+
+       if ((rc = pci_enable_device(pdev))) {
+               dev_err(&pdev->dev, "Cannot enable adapter\n");
+               return rc;
+       }
+
+       dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
+
+       host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
+
+       if (!host) {
+               dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
+               return -ENOMEM;
+       }
+
+       ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
+       memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
+
+       ioa_cfg->chip_cfg = (const struct ipr_chip_cfg_t *)dev_id->driver_data;
+
+       ipr_regs_pci = pci_resource_start(pdev, 0);
+
+       if (!request_mem_region(ipr_regs_pci,
+                               pci_resource_len(pdev, 0), IPR_NAME)) {
+               dev_err(&pdev->dev,
+                       "Couldn't register memory range of registers\n");
+               scsi_host_put(host);
+               return -ENOMEM;
+       }
+
+       ipr_regs = (unsigned long)ioremap(ipr_regs_pci,
+                                         pci_resource_len(pdev, 0));
+
+       if (!ipr_regs) {
+               dev_err(&pdev->dev,
+                       "Couldn't map memory range of registers\n");
+               release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0));
+               scsi_host_put(host);
+               return -ENOMEM;
+       }
+
+       ioa_cfg->hdw_dma_regs = ipr_regs;
+       ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
+       ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
+
+       ipr_init_ioa_cfg(ioa_cfg, host, pdev);
+
+       pci_set_master(pdev);
+       rc = pci_set_dma_mask(pdev, 0xffffffff);
+
+       if (rc != PCIBIOS_SUCCESSFUL) {
+               dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
+               rc = -EIO;
+               goto cleanup_nomem;
+       }
+
+       rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
+                                  ioa_cfg->chip_cfg->cache_line_size);
+
+       if (rc != PCIBIOS_SUCCESSFUL) {
+               dev_err(&pdev->dev, "Write of cache line size failed\n");
+               rc = -EIO;
+               goto cleanup_nomem;
+       }
+
+       /* Save away PCI config space for use following IOA reset */
+       rc = pci_save_state(pdev, ioa_cfg->pci_cfg_buf);
+
+       if (rc != PCIBIOS_SUCCESSFUL) {
+               dev_err(&pdev->dev, "Failed to save PCI config space\n");
+               rc = -EIO;
+               goto cleanup_nomem;
+       }
+
+       if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
+               goto cleanup_nomem;
+
+       if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
+               goto cleanup_nomem;
+
+       if ((rc = ipr_alloc_mem(ioa_cfg)))
+               goto cleanup;
+
+       ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+       rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
+
+       if (rc) {
+               dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
+                       pdev->irq, rc);
+               goto cleanup_nolog;
+       }
+
+       spin_lock(&ipr_driver_lock);
+       list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
+       spin_unlock(&ipr_driver_lock);
+
+       LEAVE;
+       return 0;
+
+cleanup:
+       dev_err(&pdev->dev, "Couldn't allocate enough memory for device driver!\n");
+cleanup_nolog:
+       ipr_free_mem(ioa_cfg);
+cleanup_nomem:
+       iounmap((void *) ipr_regs);
+       release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0));
+       scsi_host_put(host);
+
+       return rc;
+}
+
+/**
+ * ipr_scan_vsets - Scans for VSET devices
+ * @ioa_cfg:   ioa config struct
+ *
+ * Description: Since the VSET resources do not follow SAM in that we can have
+ * sparse LUNs with no LUN 0, we have to scan for these ourselves.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
+{
+       int target, lun;
+
+       for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
+               for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
+                       scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
+}
+
+/**
+ * ipr_initiate_ioa_bringdown - Bring down an adapter
+ * @ioa_cfg:           ioa config struct
+ * @shutdown_type:     shutdown type
+ *
+ * Description: This function will initiate bringing down the adapter.
+ * This consists of issuing an IOA shutdown to the adapter
+ * to flush the cache, and running BIST.
+ * If the caller needs to wait on the completion of the reset,
+ * the caller must sleep on the reset_wait_q.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
+                                      enum ipr_shutdown_type shutdown_type)
+{
+       ENTER;
+       if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
+               ioa_cfg->sdt_state = ABORT_DUMP;
+       ioa_cfg->reset_retries = 0;
+       ioa_cfg->in_ioa_bringdown = 1;
+       ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
+       LEAVE;
+}
+
+/**
+ * __ipr_remove - Remove a single adapter
+ * @pdev:      pci device struct
+ *
+ * Adapter hot plug remove entry point.
+ *
+ * Return value:
+ *     none
+ **/
+static void __ipr_remove(struct pci_dev *pdev)
+{
+       unsigned long host_lock_flags = 0;
+       struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+       ENTER;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
+       ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
+       wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+       spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
+
+       spin_lock(&ipr_driver_lock);
+       list_del(&ioa_cfg->queue);
+       spin_unlock(&ipr_driver_lock);
+
+       if (ioa_cfg->sdt_state == ABORT_DUMP)
+               ioa_cfg->sdt_state = WAIT_FOR_DUMP;
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
+
+       ipr_free_all_resources(ioa_cfg);
+
+       LEAVE;
+}
+
+/**
+ * ipr_remove - IOA hot plug remove entry point
+ * @pdev:      pci device struct
+ *
+ * Adapter hot plug remove entry point.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_remove(struct pci_dev *pdev)
+{
+       struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+
+       ENTER;
+
+       ioa_cfg->allow_cmds = 0;
+       flush_scheduled_work();
+       ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
+                             &ipr_trace_attr);
+       ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
+                            &ipr_dump_attr);
+       scsi_remove_host(ioa_cfg->host);
+
+       __ipr_remove(pdev);
+
+       LEAVE;
+}
+
+/**
+ * ipr_probe - Adapter hot plug add entry point
+ *
+ * Return value:
+ *     0 on success / non-zero on failure
+ **/
+static int __devinit ipr_probe(struct pci_dev *pdev,
+                              const struct pci_device_id *dev_id)
+{
+       struct ipr_ioa_cfg *ioa_cfg;
+       int rc;
+
+       rc = ipr_probe_ioa(pdev, dev_id);
+
+       if (rc)
+               return rc;
+
+       ioa_cfg = pci_get_drvdata(pdev);
+       rc = ipr_probe_ioa_part2(ioa_cfg);
+
+       if (rc) {
+               __ipr_remove(pdev);
+               return rc;
+       }
+
+       rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
+
+       if (rc) {
+               __ipr_remove(pdev);
+               return rc;
+       }
+
+       rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
+                                  &ipr_trace_attr);
+
+       if (rc) {
+               scsi_remove_host(ioa_cfg->host);
+               __ipr_remove(pdev);
+               return rc;
+       }
+
+       rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
+                                  &ipr_dump_attr);
+
+       if (rc) {
+               ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
+                                     &ipr_trace_attr);
+               scsi_remove_host(ioa_cfg->host);
+               __ipr_remove(pdev);
+               return rc;
+       }
+
+       scsi_scan_host(ioa_cfg->host);
+       ipr_scan_vsets(ioa_cfg);
+       scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
+       ioa_cfg->allow_ml_add_del = 1;
+       schedule_work(&ioa_cfg->work_q);
+       return 0;
+}
+
+/**
+ * ipr_shutdown - Shutdown handler.
+ * @dev:       device struct
+ *
+ * This function is invoked upon system shutdown/reboot. It will issue
+ * an adapter shutdown to the adapter to flush the write cache.
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_shutdown(struct device *dev)
+{
+       struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(to_pci_dev(dev));
+       unsigned long lock_flags = 0;
+
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+       ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+}
+
+static struct pci_device_id ipr_pci_table[] __devinitdata = {
+       { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
+               0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
+               0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
+             0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
+             0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
+       { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
+               PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
+               0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
+       { }
+};
+MODULE_DEVICE_TABLE(pci, ipr_pci_table);
+
+static struct pci_driver ipr_driver = {
+       .name = IPR_NAME,
+       .id_table = ipr_pci_table,
+       .probe = ipr_probe,
+       .remove = ipr_remove,
+       .driver = {
+               .shutdown = ipr_shutdown,
+       },
+};
+
+/**
+ * ipr_init - Module entry point
+ *
+ * Return value:
+ *     0 on success / non-zero on failure
+ **/
+static int __init ipr_init(void)
+{
+       ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
+                IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
+
+       pci_register_driver(&ipr_driver);
+
+       return 0;
+}
+
+/**
+ * ipr_exit - Module unload
+ *
+ * Module unload entry point.
+ *
+ * Return value:
+ *     none
+ **/
+static void __exit ipr_exit(void)
+{
+       pci_unregister_driver(&ipr_driver);
+}
+
+module_init(ipr_init);
+module_exit(ipr_exit);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
new file mode 100644 (file)
index 0000000..468c807
--- /dev/null
@@ -0,0 +1,1252 @@
+/*
+ * ipr.h -- driver for IBM Power Linux RAID adapters
+ *
+ * Written By: Brian King, IBM Corporation
+ *
+ * Copyright (C) 2003, 2004 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef _IPR_H
+#define _IPR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#ifdef CONFIG_KDB
+#include <linux/kdb.h>
+#endif
+
+/*
+ * Literals
+ */
+#define IPR_DRIVER_VERSION "2.0.7"
+#define IPR_DRIVER_DATE "(May 21, 2004)"
+
+/*
+ * IPR_DBG_TRACE: Setting this to 1 will turn on some general function tracing
+ *                     resulting in a bunch of extra debugging printks to the console
+ *
+ * IPR_DEBUG:  Setting this to 1 will turn on some error path tracing.
+ *                     Enables the ipr_trace macro.
+ */
+#ifdef IPR_DEBUG_ALL
+#define IPR_DEBUG                              1
+#define IPR_DBG_TRACE                  1
+#else
+#define IPR_DEBUG                              0
+#define IPR_DBG_TRACE                  0
+#endif
+
+/*
+ * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
+ *     ops per device for devices not running tagged command queuing.
+ *     This can be adjusted at runtime through sysfs device attributes.
+ */
+#define IPR_MAX_CMD_PER_LUN                            6
+
+/*
+ * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of
+ *     ops the mid-layer can send to the adapter.
+ */
+#define IPR_NUM_BASE_CMD_BLKS                          100
+
+#define IPR_SUBS_DEV_ID_2780   0x0264
+#define IPR_SUBS_DEV_ID_5702   0x0266
+#define IPR_SUBS_DEV_ID_5703   0x0278
+#define IPR_SUBS_DEV_ID_572E  0x02D3
+#define IPR_SUBS_DEV_ID_573D  0x02D4
+
+#define IPR_NAME                               "ipr"
+
+/*
+ * Return codes
+ */
+#define IPR_RC_JOB_CONTINUE            1
+#define IPR_RC_JOB_RETURN              2
+
+/*
+ * IOASCs
+ */
+#define IPR_IOASC_NR_INIT_CMD_REQUIRED         0x02040200
+#define IPR_IOASC_SYNC_REQUIRED                        0x023f0000
+#define IPR_IOASC_MED_DO_NOT_REALLOC           0x03110C00
+#define IPR_IOASC_HW_SEL_TIMEOUT                       0x04050000
+#define IPR_IOASC_HW_DEV_BUS_STATUS                    0x04448500
+#define        IPR_IOASC_IOASC_MASK                    0xFFFFFF00
+#define        IPR_IOASC_SCSI_STATUS_MASK              0x000000FF
+#define IPR_IOASC_IR_RESOURCE_HANDLE           0x05250000
+#define IPR_IOASC_BUS_WAS_RESET                        0x06290000
+#define IPR_IOASC_BUS_WAS_RESET_BY_OTHER               0x06298000
+#define IPR_IOASC_ABORTED_CMD_TERM_BY_HOST     0x0B5A0000
+
+#define IPR_FIRST_DRIVER_IOASC                 0x10000000
+#define IPR_IOASC_IOA_WAS_RESET                        0x10000001
+#define IPR_IOASC_PCI_ACCESS_ERROR                     0x10000002
+
+#define IPR_NUM_LOG_HCAMS                              2
+#define IPR_NUM_CFG_CHG_HCAMS                          2
+#define IPR_NUM_HCAMS  (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
+#define IPR_MAX_NUM_TARGETS_PER_BUS                    0x10
+#define IPR_MAX_NUM_LUNS_PER_TARGET                    256
+#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET       8
+#define IPR_VSET_BUS                                   0xff
+#define IPR_IOA_BUS                                            0xff
+#define IPR_IOA_TARGET                                 0xff
+#define IPR_IOA_LUN                                            0xff
+#define IPR_MAX_NUM_BUSES                              4
+#define IPR_MAX_BUS_TO_SCAN                            IPR_MAX_NUM_BUSES
+
+#define IPR_NUM_RESET_RELOAD_RETRIES           3
+
+/* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */
+#define IPR_NUM_INTERNAL_CMD_BLKS      (IPR_NUM_HCAMS + \
+                                     ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 3)
+
+#define IPR_MAX_COMMANDS               IPR_NUM_BASE_CMD_BLKS
+#define IPR_NUM_CMD_BLKS               (IPR_NUM_BASE_CMD_BLKS + \
+                                               IPR_NUM_INTERNAL_CMD_BLKS)
+
+#define IPR_MAX_PHYSICAL_DEVS                          192
+
+#define IPR_MAX_SGLIST                                 64
+#define IPR_MAX_SECTORS                                        512
+#define IPR_MAX_CDB_LEN                                        16
+
+#define IPR_DEFAULT_BUS_WIDTH                          16
+#define IPR_80MBs_SCSI_RATE            ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8))
+#define IPR_U160_SCSI_RATE     ((160 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8))
+#define IPR_U320_SCSI_RATE     ((320 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8))
+#define IPR_MAX_SCSI_RATE(width) ((320 * 10) / ((width) / 8))
+
+#define IPR_IOA_RES_HANDLE                             0xffffffff
+#define IPR_IOA_RES_ADDR                               0x00ffffff
+
+/*
+ * Adapter Commands
+ */
+#define IPR_RESET_DEVICE                               0xC3
+#define        IPR_RESET_TYPE_SELECT                           0x80
+#define        IPR_LUN_RESET                                   0x40
+#define        IPR_TARGET_RESET                                        0x20
+#define        IPR_BUS_RESET                                   0x10
+#define IPR_ID_HOST_RR_Q                               0xC4
+#define IPR_QUERY_IOA_CONFIG                           0xC5
+#define IPR_ABORT_TASK                                 0xC7
+#define IPR_CANCEL_ALL_REQUESTS                        0xCE
+#define IPR_HOST_CONTROLLED_ASYNC                      0xCF
+#define        IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE      0x01
+#define        IPR_HCAM_CDB_OP_CODE_LOG_DATA           0x02
+#define IPR_SET_SUPPORTED_DEVICES                      0xFB
+#define IPR_IOA_SHUTDOWN                               0xF7
+#define        IPR_WR_BUF_DOWNLOAD_AND_SAVE                    0x05
+
+/*
+ * Timeouts
+ */
+#define IPR_SHUTDOWN_TIMEOUT                   (10 * 60 * HZ)
+#define IPR_VSET_RW_TIMEOUT                    (2 * 60 * HZ)
+#define IPR_ABBREV_SHUTDOWN_TIMEOUT            (10 * HZ)
+#define IPR_DEVICE_RESET_TIMEOUT               (30 * HZ)
+#define IPR_CANCEL_ALL_TIMEOUT         (30 * HZ)
+#define IPR_ABORT_TASK_TIMEOUT         (30 * HZ)
+#define IPR_INTERNAL_TIMEOUT                   (30 * HZ)
+#define IPR_WRITE_BUFFER_TIMEOUT               (10 * 60 * HZ)
+#define IPR_SET_SUP_DEVICE_TIMEOUT             (2 * 60 * HZ)
+#define IPR_REQUEST_SENSE_TIMEOUT              (10 * HZ)
+#define IPR_OPERATIONAL_TIMEOUT                (5 * 60 * HZ)
+#define IPR_WAIT_FOR_RESET_TIMEOUT             (2 * HZ)
+#define IPR_CHECK_FOR_RESET_TIMEOUT            (HZ / 10)
+#define IPR_WAIT_FOR_BIST_TIMEOUT              (2 * HZ)
+#define IPR_DUMP_TIMEOUT                       (15 * HZ)
+
+/*
+ * SCSI Literals
+ */
+#define IPR_VENDOR_ID_LEN                      8
+#define IPR_PROD_ID_LEN                                16
+#define IPR_SERIAL_NUM_LEN                     8
+
+/*
+ * Hardware literals
+ */
+#define IPR_FMT2_MBX_ADDR_MASK                         0x0fffffff
+#define IPR_FMT2_MBX_BAR_SEL_MASK                      0xf0000000
+#define IPR_FMT2_MKR_BAR_SEL_SHIFT                     28
+#define IPR_GET_FMT2_BAR_SEL(mbx) \
+(((mbx) & IPR_FMT2_MBX_BAR_SEL_MASK) >> IPR_FMT2_MKR_BAR_SEL_SHIFT)
+#define IPR_SDT_FMT2_BAR0_SEL                          0x0
+#define IPR_SDT_FMT2_BAR1_SEL                          0x1
+#define IPR_SDT_FMT2_BAR2_SEL                          0x2
+#define IPR_SDT_FMT2_BAR3_SEL                          0x3
+#define IPR_SDT_FMT2_BAR4_SEL                          0x4
+#define IPR_SDT_FMT2_BAR5_SEL                          0x5
+#define IPR_SDT_FMT2_EXP_ROM_SEL                       0x8
+#define IPR_FMT2_SDT_READY_TO_USE                      0xC4D4E3F2
+#define IPR_DOORBELL                                   0x82800000
+
+#define IPR_PCII_IOA_TRANS_TO_OPER                     (0x80000000 >> 0)
+#define IPR_PCII_IOARCB_XFER_FAILED                    (0x80000000 >> 3)
+#define IPR_PCII_IOA_UNIT_CHECKED                      (0x80000000 >> 4)
+#define IPR_PCII_NO_HOST_RRQ                           (0x80000000 >> 5)
+#define IPR_PCII_CRITICAL_OPERATION                    (0x80000000 >> 6)
+#define IPR_PCII_IO_DEBUG_ACKNOWLEDGE          (0x80000000 >> 7)
+#define IPR_PCII_IOARRIN_LOST                          (0x80000000 >> 27)
+#define IPR_PCII_MMIO_ERROR                            (0x80000000 >> 28)
+#define IPR_PCII_PROC_ERR_STATE                        (0x80000000 >> 29)
+#define IPR_PCII_HRRQ_UPDATED                          (0x80000000 >> 30)
+#define IPR_PCII_CORE_ISSUED_RST_REQ           (0x80000000 >> 31)
+
+#define IPR_PCII_ERROR_INTERRUPTS \
+(IPR_PCII_IOARCB_XFER_FAILED | IPR_PCII_IOA_UNIT_CHECKED | \
+IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
+
+#define IPR_PCII_OPER_INTERRUPTS \
+(IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED | IPR_PCII_IOA_TRANS_TO_OPER)
+
+#define IPR_UPROCI_RESET_ALERT                 (0x80000000 >> 7)
+#define IPR_UPROCI_IO_DEBUG_ALERT                      (0x80000000 >> 9)
+
+#define IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC           200000  /* 200 ms */
+#define IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC          200000  /* 200 ms */
+
+/*
+ * Dump literals
+ */
+#define IPR_MAX_IOA_DUMP_SIZE                          (4 * 1024 * 1024)
+#define IPR_NUM_SDT_ENTRIES                            511
+#define IPR_MAX_NUM_DUMP_PAGES ((IPR_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
+
+/*
+ * Misc literals
+ */
+#define IPR_NUM_IOADL_ENTRIES                  IPR_MAX_SGLIST
+
+/*
+ * Adapter interface types
+ */
+
+struct ipr_res_addr {
+       u8 reserved;
+       u8 bus;
+       u8 target;
+       u8 lun;
+#define IPR_GET_PHYS_LOC(res_addr) \
+       (((res_addr).bus << 16) | ((res_addr).target << 8) | (res_addr).lun)
+}__attribute__((packed, aligned (4)));
+
+struct ipr_std_inq_vpids {
+       u8 vendor_id[IPR_VENDOR_ID_LEN];
+       u8 product_id[IPR_PROD_ID_LEN];
+}__attribute__((packed));
+
+struct ipr_std_inq_data {
+       u8 peri_qual_dev_type;
+#define IPR_STD_INQ_PERI_QUAL(peri) ((peri) >> 5)
+#define IPR_STD_INQ_PERI_DEV_TYPE(peri) ((peri) & 0x1F)
+
+       u8 removeable_medium_rsvd;
+#define IPR_STD_INQ_REMOVEABLE_MEDIUM 0x80
+
+#define IPR_IS_DASD_DEVICE(std_inq) \
+((IPR_STD_INQ_PERI_DEV_TYPE((std_inq).peri_qual_dev_type) == TYPE_DISK) && \
+!(((std_inq).removeable_medium_rsvd) & IPR_STD_INQ_REMOVEABLE_MEDIUM))
+
+#define IPR_IS_SES_DEVICE(std_inq) \
+(IPR_STD_INQ_PERI_DEV_TYPE((std_inq).peri_qual_dev_type) == TYPE_ENCLOSURE)
+
+       u8 version;
+       u8 aen_naca_fmt;
+       u8 additional_len;
+       u8 sccs_rsvd;
+       u8 bq_enc_multi;
+       u8 sync_cmdq_flags;
+
+       struct ipr_std_inq_vpids vpids;
+
+       u8 ros_rsvd_ram_rsvd[4];
+
+       u8 serial_num[IPR_SERIAL_NUM_LEN];
+}__attribute__ ((packed));
+
+struct ipr_config_table_entry {
+       u8 service_level;
+       u8 array_id;
+       u8 flags;
+#define IPR_IS_IOA_RESOURCE    0x80
+#define IPR_IS_ARRAY_MEMBER 0x20
+#define IPR_IS_HOT_SPARE       0x10
+
+       u8 rsvd_subtype;
+#define IPR_RES_SUBTYPE(res) (((res)->cfgte.rsvd_subtype) & 0x0f)
+#define IPR_SUBTYPE_AF_DASD                    0
+#define IPR_SUBTYPE_GENERIC_SCSI       1
+#define IPR_SUBTYPE_VOLUME_SET         2
+
+       struct ipr_res_addr res_addr;
+       u32 res_handle;
+       u32 reserved4[2];
+       struct ipr_std_inq_data std_inq_data;
+}__attribute__ ((packed, aligned (4)));
+
+struct ipr_config_table_hdr {
+       u8 num_entries;
+       u8 flags;
+#define IPR_UCODE_DOWNLOAD_REQ 0x10
+       u16 reserved;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_config_table {
+       struct ipr_config_table_hdr hdr;
+       struct ipr_config_table_entry dev[IPR_MAX_PHYSICAL_DEVS];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_cfg_ch_not {
+       struct ipr_config_table_entry cfgte;
+       u8 reserved[936];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_supported_device {
+       u16 data_length;
+       u8 reserved;
+       u8 num_records;
+       struct ipr_std_inq_vpids vpids;
+       u8 reserved2[16];
+}__attribute__((packed, aligned (4)));
+
+/* Command packet structure */
+struct ipr_cmd_pkt {
+       u16 reserved;           /* Reserved by IOA */
+       u8 request_type;
+#define IPR_RQTYPE_SCSICDB             0x00
+#define IPR_RQTYPE_IOACMD              0x01
+#define IPR_RQTYPE_HCAM                        0x02
+
+       u8 luntar_luntrn;
+
+       u8 flags_hi;
+#define IPR_FLAGS_HI_WRITE_NOT_READ            0x80
+#define IPR_FLAGS_HI_NO_ULEN_CHK               0x20
+#define IPR_FLAGS_HI_SYNC_OVERRIDE             0x10
+#define IPR_FLAGS_HI_SYNC_COMPLETE             0x08
+#define IPR_FLAGS_HI_NO_LINK_DESC              0x04
+
+       u8 flags_lo;
+#define IPR_FLAGS_LO_ALIGNED_BFR               0x20
+#define IPR_FLAGS_LO_DELAY_AFTER_RST   0x10
+#define IPR_FLAGS_LO_UNTAGGED_TASK             0x00
+#define IPR_FLAGS_LO_SIMPLE_TASK               0x02
+#define IPR_FLAGS_LO_ORDERED_TASK              0x04
+#define IPR_FLAGS_LO_HEAD_OF_Q_TASK            0x06
+#define IPR_FLAGS_LO_ACA_TASK                  0x08
+
+       u8 cdb[16];
+       u16 timeout;
+}__attribute__ ((packed, aligned(4)));
+
+/* IOA Request Control Block    128 bytes  */
+struct ipr_ioarcb {
+       u32 ioarcb_host_pci_addr;
+       u32 reserved;
+       u32 res_handle;
+       u32 host_response_handle;
+       u32 reserved1;
+       u32 reserved2;
+       u32 reserved3;
+
+       u32 write_data_transfer_length;
+       u32 read_data_transfer_length;
+       u32 write_ioadl_addr;
+       u32 write_ioadl_len;
+       u32 read_ioadl_addr;
+       u32 read_ioadl_len;
+
+       u32 ioasa_host_pci_addr;
+       u16 ioasa_len;
+       u16 reserved4;
+
+       struct ipr_cmd_pkt cmd_pkt;
+
+       u32 add_cmd_parms_len;
+       u32 add_cmd_parms[10];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioadl_desc {
+       u32 flags_and_data_len;
+#define IPR_IOADL_FLAGS_MASK           0xff000000
+#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK)
+#define IPR_IOADL_DATA_LEN_MASK                0x00ffffff
+#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK)
+#define IPR_IOADL_FLAGS_READ           0x48000000
+#define IPR_IOADL_FLAGS_READ_LAST      0x49000000
+#define IPR_IOADL_FLAGS_WRITE          0x68000000
+#define IPR_IOADL_FLAGS_WRITE_LAST     0x69000000
+#define IPR_IOADL_FLAGS_LAST           0x01000000
+
+       u32 address;
+}__attribute__((packed, aligned (8)));
+
+struct ipr_ioasa_vset {
+       u32 failing_lba_hi;
+       u32 failing_lba_lo;
+       u32 ioa_data[22];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa_af_dasd {
+       u32 failing_lba;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa_gpdd {
+       u8 end_state;
+       u8 bus_phase;
+       u16 reserved;
+       u32 ioa_data[23];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa_raw {
+       u32 ioa_data[24];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa {
+       u32 ioasc;
+#define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24)
+#define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16)
+#define IPR_IOASC_SENSE_QUAL(ioasc) (((ioasc) & 0x0000ff00) >> 8)
+#define IPR_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff)
+
+       u16 ret_stat_len;       /* Length of the returned IOASA */
+
+       u16 avail_stat_len;     /* Total Length of status available. */
+
+       u32 residual_data_len;  /* number of bytes in the host data */
+       /* buffers that were not used by the IOARCB command. */
+
+       u32 ilid;
+#define IPR_NO_ILID                    0
+#define IPR_DRIVER_ILID                0xffffffff
+
+       u32 fd_ioasc;
+
+       u32 fd_phys_locator;
+
+       u32 fd_res_handle;
+
+       u32 ioasc_specific;     /* status code specific field */
+#define IPR_IOASC_SPECIFIC_MASK                0x00ffffff
+#define IPR_FIELD_POINTER_VALID                (0x80000000 >> 8)
+#define IPR_FIELD_POINTER_MASK         0x0000ffff
+
+       union {
+               struct ipr_ioasa_vset vset;
+               struct ipr_ioasa_af_dasd dasd;
+               struct ipr_ioasa_gpdd gpdd;
+               struct ipr_ioasa_raw raw;
+       } u;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_mode_parm_hdr {
+       u8 length;
+       u8 medium_type;
+       u8 device_spec_parms;
+       u8 block_desc_len;
+}__attribute__((packed));
+
+struct ipr_mode_pages {
+       struct ipr_mode_parm_hdr hdr;
+       u8 data[255 - sizeof(struct ipr_mode_parm_hdr)];
+}__attribute__((packed));
+
+struct ipr_mode_page_hdr {
+       u8 ps_page_code;
+#define IPR_MODE_PAGE_PS       0x80
+#define IPR_GET_MODE_PAGE_CODE(hdr) ((hdr)->ps_page_code & 0x3F)
+       u8 page_length;
+}__attribute__ ((packed));
+
+struct ipr_dev_bus_entry {
+       struct ipr_res_addr res_addr;
+       u8 flags;
+#define IPR_SCSI_ATTR_ENABLE_QAS                       0x80
+#define IPR_SCSI_ATTR_DISABLE_QAS                      0x40
+#define IPR_SCSI_ATTR_QAS_MASK                         0xC0
+#define IPR_SCSI_ATTR_ENABLE_TM                                0x20
+#define IPR_SCSI_ATTR_NO_TERM_PWR                      0x10
+#define IPR_SCSI_ATTR_TM_SUPPORTED                     0x08
+#define IPR_SCSI_ATTR_LVD_TO_SE_NOT_ALLOWED    0x04
+
+       u8 scsi_id;
+       u8 bus_width;
+       u8 extended_reset_delay;
+#define IPR_EXTENDED_RESET_DELAY       7
+
+       u32 max_xfer_rate;
+
+       u8 spinup_delay;
+       u8 reserved3;
+       u16 reserved4;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_mode_page28 {
+       struct ipr_mode_page_hdr hdr;
+       u8 num_entries;
+       u8 entry_length;
+       struct ipr_dev_bus_entry bus[0];
+}__attribute__((packed));
+
+struct ipr_ioa_vpd {
+       struct ipr_std_inq_data std_inq_data;
+       u8 ascii_part_num[12];
+       u8 reserved[40];
+       u8 ascii_plant_code[4];
+}__attribute__((packed));
+
+struct ipr_inquiry_page3 {
+       u8 peri_qual_dev_type;
+       u8 page_code;
+       u8 reserved1;
+       u8 page_length;
+       u8 ascii_len;
+       u8 reserved2[3];
+       u8 load_id[4];
+       u8 major_release;
+       u8 card_type;
+       u8 minor_release[2];
+       u8 ptf_number[4];
+       u8 patch_number[4];
+}__attribute__((packed));
+
+struct ipr_hostrcb_device_data_entry {
+       struct ipr_std_inq_vpids dev_vpids;
+       u8 dev_sn[IPR_SERIAL_NUM_LEN];
+       struct ipr_res_addr dev_res_addr;
+       struct ipr_std_inq_vpids new_dev_vpids;
+       u8 new_dev_sn[IPR_SERIAL_NUM_LEN];
+       struct ipr_std_inq_vpids ioa_last_with_dev_vpids;
+       u8 ioa_last_with_dev_sn[IPR_SERIAL_NUM_LEN];
+       struct ipr_std_inq_vpids cfc_last_with_dev_vpids;
+       u8 cfc_last_with_dev_sn[IPR_SERIAL_NUM_LEN];
+       u32 ioa_data[5];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_array_data_entry {
+       struct ipr_std_inq_vpids vpids;
+       u8 serial_num[IPR_SERIAL_NUM_LEN];
+       struct ipr_res_addr expected_dev_res_addr;
+       struct ipr_res_addr dev_res_addr;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_ff_error {
+       u32 ioa_data[246];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_01_error {
+       u32 seek_counter;
+       u32 read_counter;
+       u8 sense_data[32];
+       u32 ioa_data[236];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_02_error {
+       struct ipr_std_inq_vpids ioa_vpids;
+       u8 ioa_sn[IPR_SERIAL_NUM_LEN];
+       struct ipr_std_inq_vpids cfc_vpids;
+       u8 cfc_sn[IPR_SERIAL_NUM_LEN];
+       struct ipr_std_inq_vpids ioa_last_attached_to_cfc_vpids;
+       u8 ioa_last_attached_to_cfc_sn[IPR_SERIAL_NUM_LEN];
+       struct ipr_std_inq_vpids cfc_last_attached_to_ioa_vpids;
+       u8 cfc_last_attached_to_ioa_sn[IPR_SERIAL_NUM_LEN];
+       u32 ioa_data[3];
+       u8 reserved[844];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_03_error {
+       struct ipr_std_inq_vpids ioa_vpids;
+       u8 ioa_sn[IPR_SERIAL_NUM_LEN];
+       struct ipr_std_inq_vpids cfc_vpids;
+       u8 cfc_sn[IPR_SERIAL_NUM_LEN];
+       u32 errors_detected;
+       u32 errors_logged;
+       u8 ioa_data[12];
+       struct ipr_hostrcb_device_data_entry dev_entry[3];
+       u8 reserved[444];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_type_04_error {
+       struct ipr_std_inq_vpids ioa_vpids;
+       u8 ioa_sn[IPR_SERIAL_NUM_LEN];
+       struct ipr_std_inq_vpids cfc_vpids;
+       u8 cfc_sn[IPR_SERIAL_NUM_LEN];
+       u8 ioa_data[12];
+       struct ipr_hostrcb_array_data_entry array_member[10];
+       u32 exposed_mode_adn;
+       u32 array_id;
+       struct ipr_std_inq_vpids incomp_dev_vpids;
+       u8 incomp_dev_sn[IPR_SERIAL_NUM_LEN];
+       u32 ioa_data2;
+       struct ipr_hostrcb_array_data_entry array_member2[8];
+       struct ipr_res_addr last_func_vset_res_addr;
+       u8 vset_serial_num[IPR_SERIAL_NUM_LEN];
+       u8 protection_level[8];
+       u8 reserved[124];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_error {
+       u32 failing_dev_ioasc;
+       struct ipr_res_addr failing_dev_res_addr;
+       u32 failing_dev_res_handle;
+       u32 prc;
+       union {
+               struct ipr_hostrcb_type_ff_error type_ff_error;
+               struct ipr_hostrcb_type_01_error type_01_error;
+               struct ipr_hostrcb_type_02_error type_02_error;
+               struct ipr_hostrcb_type_03_error type_03_error;
+               struct ipr_hostrcb_type_04_error type_04_error;
+       } u;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb_raw {
+       u32 data[sizeof(struct ipr_hostrcb_error)/sizeof(u32)];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hcam {
+       u8 op_code;
+#define IPR_HOST_RCB_OP_CODE_CONFIG_CHANGE                     0xE1
+#define IPR_HOST_RCB_OP_CODE_LOG_DATA                          0xE2
+
+       u8 notify_type;
+#define IPR_HOST_RCB_NOTIF_TYPE_EXISTING_CHANGED       0x00
+#define IPR_HOST_RCB_NOTIF_TYPE_NEW_ENTRY                      0x01
+#define IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY                      0x02
+#define IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY                0x10
+#define IPR_HOST_RCB_NOTIF_TYPE_INFORMATION_ENTRY      0x11
+
+       u8 notifications_lost;
+#define IPR_HOST_RCB_NO_NOTIFICATIONS_LOST                     0
+#define IPR_HOST_RCB_NOTIFICATIONS_LOST                                0x80
+
+       u8 flags;
+#define IPR_HOSTRCB_INTERNAL_OPER      0x80
+#define IPR_HOSTRCB_ERR_RESP_SENT      0x40
+
+       u8 overlay_id;
+#define IPR_HOST_RCB_OVERLAY_ID_1                              0x01
+#define IPR_HOST_RCB_OVERLAY_ID_2                              0x02
+#define IPR_HOST_RCB_OVERLAY_ID_3                              0x03
+#define IPR_HOST_RCB_OVERLAY_ID_4                              0x04
+#define IPR_HOST_RCB_OVERLAY_ID_6                              0x06
+#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT                        0xFF
+
+       u8 reserved1[3];
+       u32 ilid;
+       u32 time_since_last_ioa_reset;
+       u32 reserved2;
+       u32 length;
+
+       union {
+               struct ipr_hostrcb_error error;
+               struct ipr_hostrcb_cfg_ch_not ccn;
+               struct ipr_hostrcb_raw raw;
+       } u;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_hostrcb {
+       struct ipr_hcam hcam;
+       u32 hostrcb_dma;
+       struct list_head queue;
+};
+
+/* IPR smart dump table structures */
+struct ipr_sdt_entry {
+       u32 bar_str_offset;
+       u32 end_offset;
+       u8 entry_byte;
+       u8 reserved[3];
+
+       u8 flags;
+#define IPR_SDT_ENDIAN         0x80
+#define IPR_SDT_VALID_ENTRY    0x20
+
+       u8 resv;
+       u16 priority;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_sdt_header {
+       u32 state;
+       u32 num_entries;
+       u32 num_entries_used;
+       u32 dump_size;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_sdt {
+       struct ipr_sdt_header hdr;
+       struct ipr_sdt_entry entry[IPR_NUM_SDT_ENTRIES];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_uc_sdt {
+       struct ipr_sdt_header hdr;
+       struct ipr_sdt_entry entry[1];
+}__attribute__((packed, aligned (4)));
+
+/*
+ * Driver types
+ */
+struct ipr_bus_attributes {
+       u8 bus;
+       u8 qas_enabled;
+       u8 bus_width;
+       u8 reserved;
+       u32 max_xfer_rate;
+};
+
+struct ipr_resource_entry {
+       struct ipr_config_table_entry cfgte;
+       u8 needs_sync_complete:1;
+       u8 in_erp:1;
+       u8 add_to_ml:1;
+       u8 del_from_ml:1;
+       u8 resetting_device:1;
+       u8 tcq_active:1;
+
+       int qdepth;
+       struct scsi_device *sdev;
+       struct list_head queue;
+};
+
+struct ipr_resource_hdr {
+       u16 num_entries;
+       u16 reserved;
+};
+
+struct ipr_resource_table {
+       struct ipr_resource_hdr hdr;
+       struct ipr_resource_entry dev[IPR_MAX_PHYSICAL_DEVS];
+};
+
+struct ipr_misc_cbs {
+       struct ipr_ioa_vpd ioa_vpd;
+       struct ipr_inquiry_page3 page3_data;
+       struct ipr_mode_pages mode_pages;
+       struct ipr_supported_device supp_dev;
+};
+
+struct ipr_interrupts {
+       unsigned long set_interrupt_mask_reg;
+       unsigned long clr_interrupt_mask_reg;
+       unsigned long sense_interrupt_mask_reg;
+       unsigned long clr_interrupt_reg;
+
+       unsigned long sense_interrupt_reg;
+       unsigned long ioarrin_reg;
+       unsigned long sense_uproc_interrupt_reg;
+       unsigned long set_uproc_interrupt_reg;
+       unsigned long clr_uproc_interrupt_reg;
+};
+
+struct ipr_chip_cfg_t {
+       u32 mailbox;
+       u8 cache_line_size;
+       struct ipr_interrupts regs;
+};
+
+enum ipr_shutdown_type {
+       IPR_SHUTDOWN_NORMAL = 0x00,
+       IPR_SHUTDOWN_PREPARE_FOR_NORMAL = 0x40,
+       IPR_SHUTDOWN_ABBREV = 0x80,
+       IPR_SHUTDOWN_NONE = 0x100
+};
+
+struct ipr_trace_entry {
+       u32 time;
+
+       u8 op_code;
+       u8 type;
+#define IPR_TRACE_START                        0x00
+#define IPR_TRACE_FINISH               0xff
+       u16 cmd_index;
+
+       u32 res_handle;
+       union {
+               u32 ioasc;
+               u32 add_data;
+               u32 res_addr;
+       } u;
+};
+
+struct ipr_sglist {
+       u32 order;
+       u32 num_sg;
+       u32 buffer_len;
+       struct scatterlist scatterlist[1];
+};
+
+enum ipr_sdt_state {
+       INACTIVE,
+       WAIT_FOR_DUMP,
+       GET_DUMP,
+       ABORT_DUMP,
+       DUMP_OBTAINED
+};
+
+/* Per-controller data */
+struct ipr_ioa_cfg {
+       char eye_catcher[8];
+#define IPR_EYECATCHER                 "iprcfg"
+
+       struct list_head queue;
+
+       u8 allow_interrupts:1;
+       u8 in_reset_reload:1;
+       u8 in_ioa_bringdown:1;
+       u8 ioa_unit_checked:1;
+       u8 ioa_is_dead:1;
+       u8 dump_taken:1;
+       u8 allow_cmds:1;
+       u8 allow_ml_add_del:1;
+
+       u16 type; /* CCIN of the card */
+
+       u8 log_level;
+#define IPR_MAX_LOG_LEVEL                      4
+#define IPR_DEFAULT_LOG_LEVEL          2
+
+#define IPR_NUM_TRACE_INDEX_BITS       8
+#define IPR_NUM_TRACE_ENTRIES          (1 << IPR_NUM_TRACE_INDEX_BITS)
+#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
+       char trace_start[8];
+#define IPR_TRACE_START_LABEL                  "trace"
+       struct ipr_trace_entry *trace;
+       u32 trace_index:IPR_NUM_TRACE_INDEX_BITS;
+
+       /*
+        * Queue for free command blocks
+        */
+       char ipr_free_label[8];
+#define IPR_FREEQ_LABEL                        "free-q"
+       struct list_head free_q;
+
+       /*
+        * Queue for command blocks outstanding to the adapter
+        */
+       char ipr_pending_label[8];
+#define IPR_PENDQ_LABEL                        "pend-q"
+       struct list_head pending_q;
+
+       char cfg_table_start[8];
+#define IPR_CFG_TBL_START              "cfg"
+       struct ipr_config_table *cfg_table;
+       u32 cfg_table_dma;
+
+       char resource_table_label[8];
+#define IPR_RES_TABLE_LABEL            "res_tbl"
+       struct ipr_resource_entry *res_entries;
+       struct list_head free_res_q;
+       struct list_head used_res_q;
+
+       char ipr_hcam_label[8];
+#define IPR_HCAM_LABEL                 "hcams"
+       struct ipr_hostrcb *hostrcb[IPR_NUM_HCAMS];
+       u32 hostrcb_dma[IPR_NUM_HCAMS];
+       struct list_head hostrcb_free_q;
+       struct list_head hostrcb_pending_q;
+
+       u32 *host_rrq;
+       u32 host_rrq_dma;
+#define IPR_HRRQ_REQ_RESP_HANDLE_MASK  0xfffffffc
+#define IPR_HRRQ_RESP_BIT_SET                  0x00000002
+#define IPR_HRRQ_TOGGLE_BIT                            0x00000001
+#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
+       volatile u32 *hrrq_start;
+       volatile u32 *hrrq_end;
+       volatile u32 *hrrq_curr;
+       volatile u32 toggle_bit;
+
+       struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
+
+       const struct ipr_chip_cfg_t *chip_cfg;
+
+       unsigned long hdw_dma_regs;     /* iomapped PCI memory space */
+       unsigned long hdw_dma_regs_pci; /* raw PCI memory space */
+       unsigned long ioa_mailbox;
+       struct ipr_interrupts regs;
+
+       u32 pci_cfg_buf[64];
+       u16 saved_pcix_cmd_reg;
+       u16 reset_retries;
+
+       u32 errors_logged;
+
+       struct Scsi_Host *host;
+       struct pci_dev *pdev;
+       struct ipr_sglist *ucode_sglist;
+       struct ipr_mode_pages *saved_mode_pages;
+       u8 saved_mode_page_len;
+
+       struct work_struct work_q;
+
+       wait_queue_head_t reset_wait_q;
+
+       struct ipr_dump *dump;
+       enum ipr_sdt_state sdt_state;
+
+       struct ipr_misc_cbs *vpd_cbs;
+       u32 vpd_cbs_dma;
+
+       struct pci_pool *ipr_cmd_pool;
+
+       struct ipr_cmnd *reset_cmd;
+
+       char ipr_cmd_label[8];
+#define IPR_CMD_LABEL          "ipr_cmnd"
+       struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
+       u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
+};
+
+struct ipr_cmnd {
+       struct ipr_ioarcb ioarcb;
+       struct ipr_ioasa ioasa;
+       struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
+       struct list_head queue;
+       struct scsi_cmnd *scsi_cmd;
+       struct completion completion;
+       struct timer_list timer;
+       void (*done) (struct ipr_cmnd *);
+       int (*job_step) (struct ipr_cmnd *);
+       u16 cmd_index;
+       u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
+       dma_addr_t sense_buffer_dma;
+       unsigned short dma_use_sg;
+       dma_addr_t dma_handle;
+       union {
+               enum ipr_shutdown_type shutdown_type;
+               struct ipr_hostrcb *hostrcb;
+               unsigned long time_left;
+               unsigned long scratch;
+               struct ipr_resource_entry *res;
+               struct ipr_cmnd *sibling;
+               struct scsi_device *sdev;
+       } u;
+
+       struct ipr_ioa_cfg *ioa_cfg;
+};
+
+struct ipr_ses_table_entry {
+       char product_id[17];
+       char compare_product_id_byte[17];
+       u32 max_bus_speed_limit;        /* MB/sec limit for this backplane */
+};
+
+struct ipr_dump_header {
+       u32 eye_catcher;
+#define IPR_DUMP_EYE_CATCHER           0xC5D4E3F2
+       u32 len;
+       u32 num_entries;
+       u32 first_entry_offset;
+       u32 status;
+#define IPR_DUMP_STATUS_SUCCESS                        0
+#define IPR_DUMP_STATUS_QUAL_SUCCESS           2
+#define IPR_DUMP_STATUS_FAILED                 0xffffffff
+       u32 os;
+#define IPR_DUMP_OS_LINUX      0x4C4E5558
+       u32 driver_name;
+#define IPR_DUMP_DRIVER_NAME   0x49505232
+}__attribute__((packed, aligned (4)));
+
+struct ipr_dump_entry_header {
+       u32 eye_catcher;
+#define IPR_DUMP_EYE_CATCHER           0xC5D4E3F2
+       u32 len;
+       u32 num_elems;
+       u32 offset;
+       u32 data_type;
+#define IPR_DUMP_DATA_TYPE_ASCII       0x41534349
+#define IPR_DUMP_DATA_TYPE_BINARY      0x42494E41
+       u32 id;
+#define IPR_DUMP_IOA_DUMP_ID           0x494F4131
+#define IPR_DUMP_LOCATION_ID           0x4C4F4341
+#define IPR_DUMP_TRACE_ID              0x54524143
+#define IPR_DUMP_DRIVER_VERSION_ID     0x44525652
+#define IPR_DUMP_DRIVER_TYPE_ID        0x54595045
+#define IPR_DUMP_IOA_CTRL_BLK          0x494F4342
+#define IPR_DUMP_PEND_OPS              0x414F5053
+       u32 status;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_dump_location_entry {
+       struct ipr_dump_entry_header hdr;
+       u8 location[BUS_ID_SIZE];
+}__attribute__((packed));
+
+struct ipr_dump_trace_entry {
+       struct ipr_dump_entry_header hdr;
+       u32 trace[IPR_TRACE_SIZE / sizeof(u32)];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_dump_version_entry {
+       struct ipr_dump_entry_header hdr;
+       u8 version[sizeof(IPR_DRIVER_VERSION)];
+};
+
+struct ipr_dump_ioa_type_entry {
+       struct ipr_dump_entry_header hdr;
+       u32 type;
+       u32 fw_version;
+};
+
+struct ipr_driver_dump {
+       struct ipr_dump_header hdr;
+       struct ipr_dump_version_entry version_entry;
+       struct ipr_dump_location_entry location_entry;
+       struct ipr_dump_ioa_type_entry ioa_type_entry;
+       struct ipr_dump_trace_entry trace_entry;
+}__attribute__((packed));
+
+struct ipr_ioa_dump {
+       struct ipr_dump_entry_header hdr;
+       struct ipr_sdt sdt;
+       u32 *ioa_data[IPR_MAX_NUM_DUMP_PAGES];
+       u32 reserved;
+       u32 next_page_index;
+       u32 page_offset;
+       u32 format;
+#define IPR_SDT_FMT2           2
+#define IPR_SDT_UNKNOWN                3
+}__attribute__((packed, aligned (4)));
+
+struct ipr_dump {
+       struct kobject kobj;
+       struct ipr_ioa_cfg *ioa_cfg;
+       struct ipr_driver_dump driver_dump;
+       struct ipr_ioa_dump ioa_dump;
+};
+
+struct ipr_error_table_t {
+       u32 ioasc;
+       int log_ioasa;
+       int log_hcam;
+       char *error;
+};
+
+struct ipr_software_inq_lid_info {
+    u32  load_id;
+    u32  timestamp[3];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ucode_image_header {
+    u32 header_length;
+    u32 lid_table_offset;
+    u8 major_release;
+    u8 card_type;
+    u8 minor_release[2];
+    u8 reserved[20];
+    char eyecatcher[16];
+    u32 num_lids;
+    struct ipr_software_inq_lid_info lid[1];
+}__attribute__((packed, aligned (4)));
+
+/*
+ * Macros
+ */
+#if IPR_DEBUG
+#define IPR_DBG_CMD(CMD) do { CMD; } while (0)
+#else
+#define IPR_DBG_CMD(CMD)
+#endif
+
+#define ipr_breakpoint_data KERN_ERR IPR_NAME\
+": %s: %s: Line: %d ioa_cfg: %p\n", __FILE__, \
+__FUNCTION__, __LINE__, ioa_cfg
+
+#if defined(CONFIG_KDB) && !defined(CONFIG_PPC_ISERIES)
+#define ipr_breakpoint {printk(ipr_breakpoint_data); KDB_ENTER();}
+#define ipr_breakpoint_or_die {printk(ipr_breakpoint_data); KDB_ENTER();}
+#else
+#define ipr_breakpoint
+#define ipr_breakpoint_or_die panic(ipr_breakpoint_data)
+#endif
+
+#ifdef CONFIG_SCSI_IPR_TRACE
+#define ipr_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
+#define ipr_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
+#else
+#define ipr_create_trace_file(kobj, attr) 0
+#define ipr_remove_trace_file(kobj, attr) do { } while(0)
+#endif
+
+#ifdef CONFIG_SCSI_IPR_DUMP
+#define ipr_create_dump_file(kobj, attr) sysfs_create_bin_file(kobj, attr)
+#define ipr_remove_dump_file(kobj, attr) sysfs_remove_bin_file(kobj, attr)
+#else
+#define ipr_create_dump_file(kobj, attr) 0
+#define ipr_remove_dump_file(kobj, attr) do { } while(0)
+#endif
+
+/*
+ * Error logging macros
+ */
+#define ipr_err(...) printk(KERN_ERR IPR_NAME ": "__VA_ARGS__)
+#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)
+#define ipr_crit(...) printk(KERN_CRIT IPR_NAME ": "__VA_ARGS__)
+#define ipr_warn(...) printk(KERN_WARNING IPR_NAME": "__VA_ARGS__)
+#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__))
+
+#define ipr_sdev_printk(level, sdev, fmt, ...) \
+       printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, sdev->host->host_no, \
+               sdev->channel, sdev->id, sdev->lun, ##__VA_ARGS__)
+
+#define ipr_sdev_err(sdev, fmt, ...) \
+       ipr_sdev_printk(KERN_ERR, sdev, fmt, ##__VA_ARGS__)
+
+#define ipr_sdev_info(sdev, fmt, ...) \
+       ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__)
+
+#define ipr_sdev_dbg(sdev, fmt, ...) \
+       IPR_DBG_CMD(ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__))
+
+#define ipr_res_printk(level, ioa_cfg, res, fmt, ...) \
+       printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, ioa_cfg->host->host_no, \
+               res.bus, res.target, res.lun, ##__VA_ARGS__)
+
+#define ipr_res_err(ioa_cfg, res, fmt, ...) \
+       ipr_res_printk(KERN_ERR, ioa_cfg, res, fmt, ##__VA_ARGS__)
+#define ipr_res_dbg(ioa_cfg, res, fmt, ...) \
+       IPR_DBG_CMD(ipr_res_printk(KERN_INFO, ioa_cfg, res, fmt, ##__VA_ARGS__))
+
+#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
+       __FILE__, __FUNCTION__, __LINE__)
+
+#if IPR_DBG_TRACE
+#define ENTER printk(KERN_INFO IPR_NAME": Entering %s\n", __FUNCTION__)
+#define LEAVE printk(KERN_INFO IPR_NAME": Leaving %s\n", __FUNCTION__)
+#else
+#define ENTER
+#define LEAVE
+#endif
+
+#define ipr_err_separator \
+ipr_err("----------------------------------------------------------\n")
+
+
+/*
+ * Inlines
+ */
+
+/**
+ * ipr_is_ioa_resource - Determine if a resource is the IOA
+ * @res:       resource entry struct
+ *
+ * Return value:
+ *     1 if IOA / 0 if not IOA
+ **/
+static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res)
+{
+       return (res->cfgte.flags & IPR_IS_IOA_RESOURCE) ? 1 : 0;
+}
+
+/**
+ * ipr_is_af_dasd_device - Determine if a resource is an AF DASD
+ * @res:       resource entry struct
+ *
+ * Return value:
+ *     1 if AF DASD / 0 if not AF DASD
+ **/
+static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res)
+{
+       if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) &&
+           !ipr_is_ioa_resource(res) &&
+           IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_AF_DASD)
+               return 1;
+       else
+               return 0;
+}
+
+/**
+ * ipr_is_vset_device - Determine if a resource is a VSET
+ * @res:       resource entry struct
+ *
+ * Return value:
+ *     1 if VSET / 0 if not VSET
+ **/
+static inline int ipr_is_vset_device(struct ipr_resource_entry *res)
+{
+       if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) &&
+           !ipr_is_ioa_resource(res) &&
+           IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_VOLUME_SET)
+               return 1;
+       else
+               return 0;
+}
+
+/**
+ * ipr_is_gscsi - Determine if a resource is a generic scsi resource
+ * @res:       resource entry struct
+ *
+ * Return value:
+ *     1 if GSCSI / 0 if not GSCSI
+ **/
+static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
+{
+       if (!ipr_is_ioa_resource(res) &&
+           IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_SCSI)
+               return 1;
+       else
+               return 0;
+}
+
+/**
+ * ipr_is_device - Determine if resource address is that of a device
+ * @res_addr:  resource address struct
+ *
+ * Return value:
+ *     1 if AF / 0 if not AF
+ **/
+static inline int ipr_is_device(struct ipr_res_addr *res_addr)
+{
+       if ((res_addr->bus < IPR_MAX_NUM_BUSES) &&
+           (res_addr->target < IPR_MAX_NUM_TARGETS_PER_BUS))
+               return 1;
+
+       return 0;
+}
+
+/**
+ * ipr_sdt_is_fmt2 - Determine if a SDT address is in format 2
+ * @sdt_word:  SDT address
+ *
+ * Return value:
+ *     1 if format 2 / 0 if not
+ **/
+static inline int ipr_sdt_is_fmt2(u32 sdt_word)
+{
+       u32 bar_sel = IPR_GET_FMT2_BAR_SEL(sdt_word);
+
+       switch (bar_sel) {
+       case IPR_SDT_FMT2_BAR0_SEL:
+       case IPR_SDT_FMT2_BAR1_SEL:
+       case IPR_SDT_FMT2_BAR2_SEL:
+       case IPR_SDT_FMT2_BAR3_SEL:
+       case IPR_SDT_FMT2_BAR4_SEL:
+       case IPR_SDT_FMT2_BAR5_SEL:
+       case IPR_SDT_FMT2_EXP_ROM_SEL:
+               return 1;
+       };
+
+       return 0;
+}
+
+#endif
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
new file mode 100644 (file)
index 0000000..4277db3
--- /dev/null
@@ -0,0 +1,1042 @@
+/*
+*  sym53c500_cs.c      Bob Tracy (rct@frus.com)
+*
+*  A rewrite of the pcmcia-cs add-on driver for newer (circa 1997)
+*  New Media Bus Toaster PCMCIA SCSI cards using the Symbios Logic
+*  53c500 controller: intended for use with 2.6 and later kernels.
+*  The pcmcia-cs add-on version of this driver is not supported
+*  beyond 2.4.  It consisted of three files with history/copyright
+*  information as follows:
+*
+*  SYM53C500.h
+*      Bob Tracy (rct@frus.com)
+*      Original by Tom Corner (tcorner@via.at).
+*      Adapted from NCR53c406a.h which is Copyrighted (C) 1994
+*      Normunds Saumanis (normunds@rx.tech.swh.lv)
+*
+*  SYM53C500.c
+*      Bob Tracy (rct@frus.com)
+*      Original driver by Tom Corner (tcorner@via.at) was adapted
+*      from NCR53c406a.c which is Copyrighted (C) 1994, 1995, 1996 
+*      Normunds Saumanis (normunds@fi.ibm.com)
+*
+*  sym53c500.c
+*      Bob Tracy (rct@frus.com)
+*      Original by Tom Corner (tcorner@via.at) was adapted from a
+*      driver for the Qlogic SCSI card written by
+*      David Hinds (dhinds@allegro.stanford.edu).
+* 
+*  This program is free software; you can redistribute it and/or modify it
+*  under the terms of the GNU General Public License as published by the
+*  Free Software Foundation; either version 2, or (at your option) any
+*  later version.
+*
+*  This program is distributed in the hope that it will be useful, but
+*  WITHOUT ANY WARRANTY; without even the implied warranty of
+*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+*  General Public License for more details.
+*/
+
+#define SYM53C500_DEBUG 0
+#define VERBOSE_SYM53C500_DEBUG 0
+
+/*
+*  Set this to 0 if you encounter kernel lockups while transferring 
+*  data in PIO mode.  Note this can be changed via "sysfs".
+*/
+#define USE_FAST_PIO 1
+
+/* =============== End of user configurable parameters ============== */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+#include <pcmcia/ciscode.h>
+
+/* ================================================================== */
+
+#ifdef PCMCIA_DEBUG
+static int pc_debug = PCMCIA_DEBUG;
+module_param(pc_debug, int, 0);
+#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
+static char *version =
+"sym53c500_cs.c 0.9b 2004/05/10 (Bob Tracy)";
+#else
+#define DEBUG(n, args...)
+#endif
+
+/* ================================================================== */
+
+/* Parameters that can be set with 'insmod' */
+
+/* Bit map of interrupts to choose from */
+static unsigned int irq_mask = 0xdeb8; /* 3, 6, 7, 9-12, 14, 15 */
+static int irq_list[4] = { -1 };
+static int num_irqs = 1;
+
+module_param(irq_mask, int, 0);
+MODULE_PARM_DESC(irq_mask, "IRQ mask bits (default: 0xdeb8)");
+module_param_array(irq_list, int, num_irqs, 0);
+MODULE_PARM_DESC(irq_list, "Comma-separated list of up to 4 IRQs to try (default: auto select).");
+
+/* ================================================================== */
+
+#define SYNC_MODE 0            /* Synchronous transfer mode */
+
+/* Default configuration */
+#define C1_IMG   0x07          /* ID=7 */
+#define C2_IMG   0x48          /* FE SCSI2 */
+#define C3_IMG   0x20          /* CDB */
+#define C4_IMG   0x04          /* ANE */
+#define C5_IMG   0xa4          /* ? changed from b6= AA PI SIE POL */
+#define C7_IMG   0x80          /* added for SYM53C500 t. corner */
+
+/* Hardware Registers: offsets from io_port (base) */
+
+/* Control Register Set 0 */
+#define TC_LSB         0x00            /* transfer counter lsb */
+#define TC_MSB         0x01            /* transfer counter msb */
+#define SCSI_FIFO      0x02            /* scsi fifo register */
+#define CMD_REG                0x03            /* command register */
+#define STAT_REG       0x04            /* status register */
+#define DEST_ID                0x04            /* selection/reselection bus id */
+#define INT_REG                0x05            /* interrupt status register */
+#define SRTIMOUT       0x05            /* select/reselect timeout reg */
+#define SEQ_REG                0x06            /* sequence step register */
+#define SYNCPRD                0x06            /* synchronous transfer period */
+#define FIFO_FLAGS     0x07            /* indicates # of bytes in fifo */
+#define SYNCOFF                0x07            /* synchronous offset register */
+#define CONFIG1                0x08            /* configuration register */
+#define CLKCONV                0x09            /* clock conversion register */
+/* #define TESTREG     0x0A */         /* test mode register */
+#define CONFIG2                0x0B            /* configuration 2 register */
+#define CONFIG3                0x0C            /* configuration 3 register */
+#define CONFIG4                0x0D            /* configuration 4 register */
+#define TC_HIGH                0x0E            /* transfer counter high */
+/* #define FIFO_BOTTOM 0x0F */         /* reserve FIFO byte register */
+
+/* Control Register Set 1 */
+/* #define JUMPER_SENSE        0x00 */         /* jumper sense port reg (r/w) */
+/* #define SRAM_PTR    0x01 */         /* SRAM address pointer reg (r/w) */
+/* #define SRAM_DATA   0x02 */         /* SRAM data register (r/w) */
+#define PIO_FIFO       0x04            /* PIO FIFO registers (r/w) */
+/* #define PIO_FIFO1   0x05 */         /*  */
+/* #define PIO_FIFO2   0x06 */         /*  */
+/* #define PIO_FIFO3   0x07 */         /*  */
+#define PIO_STATUS     0x08            /* PIO status (r/w) */
+/* #define ATA_CMD     0x09 */         /* ATA command/status reg (r/w) */
+/* #define ATA_ERR     0x0A */         /* ATA features/error reg (r/w) */
+#define PIO_FLAG       0x0B            /* PIO flag interrupt enable (r/w) */
+#define CONFIG5                0x09            /* configuration 5 register */
+/* #define SIGNATURE   0x0E */         /* signature register (r) */
+/* #define CONFIG6     0x0F */         /* configuration 6 register (r) */
+#define CONFIG7                0x0d
+
+/* select register set 0 */
+#define REG0(x)                (outb(C4_IMG, (x) + CONFIG4))
+/* select register set 1 */
+#define REG1(x)                outb(C7_IMG, (x) + CONFIG7); outb(C5_IMG, (x) + CONFIG5)
+
+#if SYM53C500_DEBUG
+#define DEB(x) x
+#else
+#define DEB(x)
+#endif
+
+#if VERBOSE_SYM53C500_DEBUG
+#define VDEB(x) x
+#else
+#define VDEB(x)
+#endif
+
+#define LOAD_DMA_COUNT(x, count) \
+  outb(count & 0xff, (x) + TC_LSB); \
+  outb((count >> 8) & 0xff, (x) + TC_MSB); \
+  outb((count >> 16) & 0xff, (x) + TC_HIGH);
+
+/* Chip commands */
+#define DMA_OP               0x80
+
+#define SCSI_NOP             0x00
+#define FLUSH_FIFO           0x01
+#define CHIP_RESET           0x02
+#define SCSI_RESET           0x03
+#define RESELECT             0x40
+#define SELECT_NO_ATN        0x41
+#define SELECT_ATN           0x42
+#define SELECT_ATN_STOP      0x43
+#define ENABLE_SEL           0x44
+#define DISABLE_SEL          0x45
+#define SELECT_ATN3          0x46
+#define RESELECT3            0x47
+#define TRANSFER_INFO        0x10
+#define INIT_CMD_COMPLETE    0x11
+#define MSG_ACCEPT           0x12
+#define TRANSFER_PAD         0x18
+#define SET_ATN              0x1a
+#define RESET_ATN            0x1b
+#define SEND_MSG             0x20
+#define SEND_STATUS          0x21
+#define SEND_DATA            0x22
+#define DISCONN_SEQ          0x23
+#define TERMINATE_SEQ        0x24
+#define TARG_CMD_COMPLETE    0x25
+#define DISCONN              0x27
+#define RECV_MSG             0x28
+#define RECV_CMD             0x29
+#define RECV_DATA            0x2a
+#define RECV_CMD_SEQ         0x2b
+#define TARGET_ABORT_DMA     0x04
+
+/* ================================================================== */
+
+struct scsi_info_t {
+       dev_link_t link;
+       dev_node_t node;
+       struct Scsi_Host *host;
+       unsigned short manf_id;
+};
+
+/*
+*  Repository for per-instance host data.
+*/
+struct sym53c500_data {
+       struct scsi_cmnd *current_SC;
+       int fast_pio;
+};
+
+enum Phase {
+    idle,
+    data_out,
+    data_in,
+    command_ph,
+    status_ph,
+    message_out,
+    message_in
+};
+
+/* ================================================================== */
+
+/*
+*  Global (within this module) variables other than
+*  sym53c500_driver_template (the scsi_host_template).
+*/
+static dev_link_t *dev_list;
+static dev_info_t dev_info = "sym53c500_cs";
+
+/* ================================================================== */
+
+static void
+chip_init(int io_port)
+{
+       REG1(io_port);
+       outb(0x01, io_port + PIO_STATUS);
+       outb(0x00, io_port + PIO_FLAG);
+
+       outb(C4_IMG, io_port + CONFIG4);        /* REG0(io_port); */
+       outb(C3_IMG, io_port + CONFIG3);
+       outb(C2_IMG, io_port + CONFIG2);
+       outb(C1_IMG, io_port + CONFIG1);
+
+       outb(0x05, io_port + CLKCONV);  /* clock conversion factor */
+       outb(0x9C, io_port + SRTIMOUT); /* Selection timeout */
+       outb(0x05, io_port + SYNCPRD);  /* Synchronous transfer period */
+       outb(SYNC_MODE, io_port + SYNCOFF);     /* synchronous mode */  
+}
+
+static void
+SYM53C500_int_host_reset(int io_port)
+{
+       outb(C4_IMG, io_port + CONFIG4);        /* REG0(io_port); */
+       outb(CHIP_RESET, io_port + CMD_REG);
+       outb(SCSI_NOP, io_port + CMD_REG);      /* required after reset */
+       outb(SCSI_RESET, io_port + CMD_REG);
+       chip_init(io_port);
+}
+
+static __inline__ int
+SYM53C500_pio_read(int fast_pio, int base, unsigned char *request, unsigned int reqlen)
+{
+       int i;
+       int len;        /* current scsi fifo size */
+
+       REG1(base);
+       while (reqlen) {
+               i = inb(base + PIO_STATUS);
+               /* VDEB(printk("pio_status=%x\n", i)); */
+               if (i & 0x80) 
+                       return 0;
+
+               switch (i & 0x1e) {
+               default:
+               case 0x10:      /* fifo empty */
+                       len = 0;
+                       break;
+               case 0x0:
+                       len = 1;
+                       break; 
+               case 0x8:       /* fifo 1/3 full */
+                       len = 42;
+                       break;
+               case 0xc:       /* fifo 2/3 full */
+                       len = 84;
+                       break;
+               case 0xe:       /* fifo full */
+                       len = 128;
+                       break;
+               }
+
+               if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occurred */
+                       return 0;
+               }
+
+               if (len) {
+                       if (len > reqlen) 
+                               len = reqlen;
+
+                       if (fast_pio && len > 3) {
+                               insl(base + PIO_FIFO, request, len >> 2);
+                               request += len & 0xfc; 
+                               reqlen -= len & 0xfc; 
+                       } else {
+                               while (len--) {
+                                       *request++ = inb(base + PIO_FIFO);
+                                       reqlen--;
+                               }
+                       } 
+               }
+       }
+       return 0;
+}
+
+static __inline__ int
+SYM53C500_pio_write(int fast_pio, int base, unsigned char *request, unsigned int reqlen)
+{
+       int i = 0;
+       int len;        /* current scsi fifo size */
+
+       REG1(base);
+       while (reqlen && !(i & 0x40)) {
+               i = inb(base + PIO_STATUS);
+               /* VDEB(printk("pio_status=%x\n", i)); */
+               if (i & 0x80)   /* error */
+                       return 0;
+
+               switch (i & 0x1e) {
+               case 0x10:
+                       len = 128;
+                       break;
+               case 0x0:
+                       len = 84;
+                       break;
+               case 0x8:
+                       len = 42;
+                       break;
+               case 0xc:
+                       len = 1;
+                       break;
+               default:
+               case 0xe:
+                       len = 0;
+                       break;
+               }
+
+               if (len) {
+                       if (len > reqlen)
+                               len = reqlen;
+
+                       if (fast_pio && len > 3) {
+                               outsl(base + PIO_FIFO, request, len >> 2);
+                               request += len & 0xfc;
+                               reqlen -= len & 0xfc;
+                       } else {
+                               while (len--) {
+                                       outb(*request++, base + PIO_FIFO);
+                                       reqlen--;
+                               }
+                       }
+               }
+       }
+       return 0;
+}
+
+static irqreturn_t
+SYM53C500_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+       unsigned long flags;
+       struct Scsi_Host *dev = dev_id;
+       DEB(unsigned char fifo_size;)
+       DEB(unsigned char seq_reg;)
+       unsigned char status, int_reg;
+       unsigned char pio_status;
+       struct scatterlist *sglist;
+       unsigned int sgcount;
+       int port_base = dev->io_port;
+       struct sym53c500_data *data =
+           (struct sym53c500_data *)dev->hostdata;
+       struct scsi_cmnd *curSC = data->current_SC;
+       int fast_pio = data->fast_pio;
+
+       spin_lock_irqsave(dev->host_lock, flags);
+
+       VDEB(printk("SYM53C500_intr called\n"));
+
+       REG1(port_base);
+       pio_status = inb(port_base + PIO_STATUS);
+       REG0(port_base);
+       status = inb(port_base + STAT_REG);
+       DEB(seq_reg = inb(port_base + SEQ_REG));
+       int_reg = inb(port_base + INT_REG);
+       DEB(fifo_size = inb(port_base + FIFO_FLAGS) & 0x1f);
+
+#if SYM53C500_DEBUG
+       printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x", 
+           status, seq_reg, int_reg, fifo_size);
+       printk(", pio=%02x\n", pio_status);
+#endif /* SYM53C500_DEBUG */
+
+       if (int_reg & 0x80) {   /* SCSI reset intr */
+               DEB(printk("SYM53C500: reset intr received\n"));
+               curSC->result = DID_RESET << 16;
+               goto idle_out;
+       }
+
+       if (pio_status & 0x80) {
+               printk("SYM53C500: Warning: PIO error!\n");
+               curSC->result = DID_ERROR << 16;
+               goto idle_out;
+       }
+
+       if (status & 0x20) {            /* Parity error */
+               printk("SYM53C500: Warning: parity error!\n");
+               curSC->result = DID_PARITY << 16;
+               goto idle_out;
+       }
+
+       if (status & 0x40) {            /* Gross error */
+               printk("SYM53C500: Warning: gross error!\n");
+               curSC->result = DID_ERROR << 16;
+               goto idle_out;
+       }
+
+       if (int_reg & 0x20) {           /* Disconnect */
+               DEB(printk("SYM53C500: disconnect intr received\n"));
+               if (curSC->SCp.phase != message_in) {   /* Unexpected disconnect */
+                       curSC->result = DID_NO_CONNECT << 16;
+               } else {        /* Command complete, return status and message */
+                       curSC->result = (curSC->SCp.Status & 0xff)
+                           | ((curSC->SCp.Message & 0xff) << 8) | (DID_OK << 16);
+               }
+               goto idle_out;
+       }
+
+       switch (status & 0x07) {        /* scsi phase */
+       case 0x00:                      /* DATA-OUT */
+               if (int_reg & 0x10) {   /* Target requesting info transfer */
+                       curSC->SCp.phase = data_out;
+                       VDEB(printk("SYM53C500: Data-Out phase\n"));
+                       outb(FLUSH_FIFO, port_base + CMD_REG);
+                       LOAD_DMA_COUNT(port_base, curSC->request_bufflen);      /* Max transfer size */
+                       outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
+                       if (!curSC->use_sg)     /* Don't use scatter-gather */
+                               SYM53C500_pio_write(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen);
+                       else {  /* use scatter-gather */
+                               sgcount = curSC->use_sg;
+                               sglist = curSC->request_buffer;
+                               while (sgcount--) {
+                                       SYM53C500_pio_write(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length);
+                                       sglist++;
+                               }
+                       }
+                       REG0(port_base);
+               }
+               break;
+
+       case 0x01:              /* DATA-IN */
+               if (int_reg & 0x10) {   /* Target requesting info transfer */
+                       curSC->SCp.phase = data_in;
+                       VDEB(printk("SYM53C500: Data-In phase\n"));
+                       outb(FLUSH_FIFO, port_base + CMD_REG);
+                       LOAD_DMA_COUNT(port_base, curSC->request_bufflen);      /* Max transfer size */
+                       outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
+                       if (!curSC->use_sg)     /* Don't use scatter-gather */
+                               SYM53C500_pio_read(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen);
+                       else {  /* Use scatter-gather */
+                               sgcount = curSC->use_sg;
+                               sglist = curSC->request_buffer;
+                               while (sgcount--) {
+                                       SYM53C500_pio_read(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length);
+                                       sglist++;
+                               }
+                       }
+                       REG0(port_base);
+               }
+               break;
+
+       case 0x02:              /* COMMAND */
+               curSC->SCp.phase = command_ph;
+               printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n");
+               break;
+
+       case 0x03:              /* STATUS */
+               curSC->SCp.phase = status_ph;
+               VDEB(printk("SYM53C500: Status phase\n"));
+               outb(FLUSH_FIFO, port_base + CMD_REG);
+               outb(INIT_CMD_COMPLETE, port_base + CMD_REG);
+               break;
+
+       case 0x04:              /* Reserved */
+       case 0x05:              /* Reserved */
+               printk("SYM53C500: WARNING: Reserved phase!!!\n");
+               break;
+
+       case 0x06:              /* MESSAGE-OUT */
+               DEB(printk("SYM53C500: Message-Out phase\n"));
+               curSC->SCp.phase = message_out;
+               outb(SET_ATN, port_base + CMD_REG);     /* Reject the message */
+               outb(MSG_ACCEPT, port_base + CMD_REG);
+               break;
+
+       case 0x07:              /* MESSAGE-IN */
+               VDEB(printk("SYM53C500: Message-In phase\n"));
+               curSC->SCp.phase = message_in;
+
+               curSC->SCp.Status = inb(port_base + SCSI_FIFO);
+               curSC->SCp.Message = inb(port_base + SCSI_FIFO);
+
+               VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f));
+               DEB(printk("Status = %02x  Message = %02x\n", curSC->SCp.Status, curSC->SCp.Message));
+
+               if (curSC->SCp.Message == SAVE_POINTERS || curSC->SCp.Message == DISCONNECT) {
+                       outb(SET_ATN, port_base + CMD_REG);     /* Reject message */
+                       DEB(printk("Discarding SAVE_POINTERS message\n"));
+               }
+               outb(MSG_ACCEPT, port_base + CMD_REG);
+               break;
+       }
+out:
+       spin_unlock_irqrestore(dev->host_lock, flags);
+       return IRQ_HANDLED;
+
+idle_out:
+       curSC->SCp.phase = idle;
+       curSC->scsi_done(curSC);
+       goto out;
+}
+
+static void
+SYM53C500_release(dev_link_t *link)
+{
+       struct scsi_info_t *info = link->priv;
+       struct Scsi_Host *shost = info->host;
+
+       DEBUG(0, "SYM53C500_release(0x%p)\n", link);
+
+       /*
+       *  Do this before releasing/freeing resources.
+       */
+       scsi_remove_host(shost);
+
+       /*
+       *  Interrupts getting hosed on card removal.  Try
+       *  the following code, mostly from qlogicfas.c.
+       */
+       if (shost->irq)
+               free_irq(shost->irq, shost);
+       if (shost->dma_channel != 0xff)
+               free_dma(shost->dma_channel);
+       if (shost->io_port && shost->n_io_port)
+               release_region(shost->io_port, shost->n_io_port);
+
+       link->dev = NULL;
+
+       pcmcia_release_configuration(link->handle);
+       pcmcia_release_io(link->handle, &link->io);
+       pcmcia_release_irq(link->handle, &link->irq);
+
+       link->state &= ~DEV_CONFIG;
+
+       scsi_host_put(shost);
+} /* SYM53C500_release */
+
+static const char*
+SYM53C500_info(struct Scsi_Host *SChost)
+{
+       static char info_msg[256];
+       struct sym53c500_data *data =
+           (struct sym53c500_data *)SChost->hostdata;
+
+       DEB(printk("SYM53C500_info called\n"));
+       (void)snprintf(info_msg, sizeof(info_msg),
+           "SYM53C500 at 0x%lx, IRQ %d, %s PIO mode.", 
+           SChost->io_port, SChost->irq, data->fast_pio ? "fast" : "slow");
+       return (info_msg);
+}
+
+static int 
+SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+       int i;
+       int port_base = SCpnt->device->host->io_port;
+       struct sym53c500_data *data =
+           (struct sym53c500_data *)SCpnt->device->host->hostdata;
+
+       VDEB(printk("SYM53C500_queue called\n"));
+
+       DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", 
+           SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id, 
+           SCpnt->device->lun,  SCpnt->request_bufflen));
+
+       VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
+           printk("cmd[%d]=%02x  ", i, SCpnt->cmnd[i]));
+       VDEB(printk("\n"));
+
+       data->current_SC = SCpnt;
+       data->current_SC->scsi_done = done;
+       data->current_SC->SCp.phase = command_ph;
+       data->current_SC->SCp.Status = 0;
+       data->current_SC->SCp.Message = 0;
+
+       /* We are locked here already by the mid layer */
+       REG0(port_base);
+       outb(SCpnt->device->id, port_base + DEST_ID);   /* set destination */
+       outb(FLUSH_FIFO, port_base + CMD_REG);  /* reset the fifos */
+
+       for (i = 0; i < SCpnt->cmd_len; i++) {
+               outb(SCpnt->cmnd[i], port_base + SCSI_FIFO);
+       }
+       outb(SELECT_NO_ATN, port_base + CMD_REG);
+
+       return 0;
+}
+
+static int 
+SYM53C500_host_reset(struct scsi_cmnd *SCpnt)
+{
+       int port_base = SCpnt->device->host->io_port;
+
+       DEB(printk("SYM53C500_host_reset called\n"));
+       SYM53C500_int_host_reset(port_base);
+
+       return SUCCESS;
+}
+
+static int 
+SYM53C500_biosparm(struct scsi_device *disk,
+    struct block_device *dev,
+    sector_t capacity, int *info_array)
+{
+       int size;
+
+       DEB(printk("SYM53C500_biosparm called\n"));
+
+       size = capacity;
+       info_array[0] = 64;             /* heads */
+       info_array[1] = 32;             /* sectors */
+       info_array[2] = size >> 11;     /* cylinders */
+       if (info_array[2] > 1024) {     /* big disk */
+               info_array[0] = 255;
+               info_array[1] = 63;
+               info_array[2] = size / (255 * 63);
+       }
+       return 0;
+}
+
+static ssize_t
+SYM53C500_show_pio(struct class_device *cdev, char *buf)
+{
+       struct Scsi_Host *SHp = class_to_shost(cdev);
+       struct sym53c500_data *data =
+           (struct sym53c500_data *)SHp->hostdata;
+
+       return snprintf(buf, 4, "%d\n", data->fast_pio);
+}
+
+static ssize_t
+SYM53C500_store_pio(struct class_device *cdev, const char *buf, size_t count)
+{
+       int pio;
+       struct Scsi_Host *SHp = class_to_shost(cdev);
+       struct sym53c500_data *data =
+           (struct sym53c500_data *)SHp->hostdata;
+
+       pio = simple_strtoul(buf, NULL, 0);
+       if (pio == 0 || pio == 1) {
+               data->fast_pio = pio;
+               return count;
+       }
+       else
+               return -EINVAL;
+}
+
+/*
+*  SCSI HBA device attributes we want to
+*  make available via sysfs.
+*/
+static struct class_device_attribute SYM53C500_pio_attr = {
+       .attr = {
+               .name = "fast_pio",
+               .mode = (S_IRUGO | S_IWUSR),
+       },
+       .show = SYM53C500_show_pio,
+       .store = SYM53C500_store_pio,
+};
+
+static struct class_device_attribute *SYM53C500_shost_attrs[] = {
+       &SYM53C500_pio_attr,
+       NULL,
+};
+
+/*
+*  scsi_host_template initializer
+*/
+static struct scsi_host_template sym53c500_driver_template = {
+     .module                   = THIS_MODULE,
+     .name                     = "SYM53C500",
+     .info                     = SYM53C500_info,
+     .queuecommand             = SYM53C500_queue,
+     .eh_host_reset_handler    = SYM53C500_host_reset,
+     .bios_param               = SYM53C500_biosparm,
+     .proc_name                        = "SYM53C500",
+     .can_queue                        = 1,
+     .this_id                  = 7,
+     .sg_tablesize             = 32,
+     .cmd_per_lun              = 1,
+     .use_clustering           = ENABLE_CLUSTERING,
+     .shost_attrs              = SYM53C500_shost_attrs
+};
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+static void
+SYM53C500_config(dev_link_t *link)
+{
+       client_handle_t handle = link->handle;
+       struct scsi_info_t *info = link->priv;
+       tuple_t tuple;
+       cisparse_t parse;
+       int i, last_ret, last_fn;
+       int irq_level, port_base;
+       unsigned short tuple_data[32];
+       struct Scsi_Host *host;
+       struct scsi_host_template *tpnt = &sym53c500_driver_template;
+       struct sym53c500_data *data;
+
+       DEBUG(0, "SYM53C500_config(0x%p)\n", link);
+
+       tuple.TupleData = (cisdata_t *)tuple_data;
+       tuple.TupleDataMax = 64;
+       tuple.TupleOffset = 0;
+       tuple.DesiredTuple = CISTPL_CONFIG;
+       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+       CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
+       CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
+       link->conf.ConfigBase = parse.config.base;
+
+       tuple.DesiredTuple = CISTPL_MANFID;
+       if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) &&
+           (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS))
+               info->manf_id = le16_to_cpu(tuple.TupleData[0]);
+
+       /* Configure card */
+       link->state |= DEV_CONFIG;
+
+       tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+       CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
+       while (1) {
+               if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
+                   pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
+                       goto next_entry;
+               link->conf.ConfigIndex = parse.cftable_entry.index;
+               link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
+               link->io.NumPorts1 = parse.cftable_entry.io.win[0].len;
+
+               if (link->io.BasePort1 != 0) {
+                       i = pcmcia_request_io(handle, &link->io);
+                       if (i == CS_SUCCESS)
+                               break;
+               }
+next_entry:
+               CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
+       }
+
+       CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
+       CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
+
+       /*
+       *  That's the trouble with copying liberally from another driver.
+       *  Some things probably aren't relevant, and I suspect this entire
+       *  section dealing with manufacturer IDs can be scrapped.       --rct
+       */
+       if ((info->manf_id == MANFID_MACNICA) ||
+           (info->manf_id == MANFID_PIONEER) ||
+           (info->manf_id == 0x0098)) {
+               /* set ATAcmd */
+               outb(0xb4, link->io.BasePort1 + 0xd);
+               outb(0x24, link->io.BasePort1 + 0x9);
+               outb(0x04, link->io.BasePort1 + 0xd);
+       }
+
+       /*
+       *  irq_level == 0 implies tpnt->can_queue == 0, which
+       *  is not supported in 2.6.  Thus, only irq_level > 0
+       *  will be allowed.
+       *
+       *  Possible port_base values are as follows:
+       *
+       *       0x130, 0x230, 0x280, 0x290,
+       *       0x320, 0x330, 0x340, 0x350
+       */
+       port_base = link->io.BasePort1;
+       irq_level = link->irq.AssignedIRQ;
+
+       DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n",
+           port_base, irq_level, USE_FAST_PIO);)
+
+       chip_init(port_base);
+
+       host = scsi_host_alloc(tpnt, sizeof(struct sym53c500_data));
+       if (!host) {
+               printk("SYM53C500: Unable to register host, giving up.\n");
+               goto err_release;
+       }
+
+       data = (struct sym53c500_data *)host->hostdata;
+
+       if (irq_level > 0) {
+               if (request_irq(irq_level, SYM53C500_intr, 0, "SYM53C500", host)) {
+                       printk("SYM53C500: unable to allocate IRQ %d\n", irq_level);
+                       goto err_free_scsi;
+               }
+               DEB(printk("SYM53C500: allocated IRQ %d\n", irq_level));
+       } else if (irq_level == 0) {
+               DEB(printk("SYM53C500: No interrupts detected\n"));
+               goto err_free_scsi;
+       } else {
+               DEB(printk("SYM53C500: Shouldn't get here!\n"));
+               goto err_free_scsi;
+       }
+
+       host->unique_id = port_base;
+       host->irq = irq_level;
+       host->io_port = port_base;
+       host->n_io_port = 0x10;
+       host->dma_channel = -1;
+
+       /*
+       *  Note fast_pio is set to USE_FAST_PIO by
+       *  default, but can be changed via "sysfs".
+       */
+       data->fast_pio = USE_FAST_PIO;
+
+       sprintf(info->node.dev_name, "scsi%d", host->host_no);
+       link->dev = &info->node;
+       info->host = host;
+
+       if (scsi_add_host(host, NULL))
+               goto err_free_irq;
+
+       scsi_scan_host(host);
+
+       goto out;       /* SUCCESS */
+
+err_free_irq:
+       free_irq(irq_level, host);
+err_free_scsi:
+       scsi_host_put(host);
+err_release:
+       release_region(port_base, 0x10);
+       printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n");
+
+out:
+       link->state &= ~DEV_CONFIG_PENDING;
+       return;
+
+cs_failed:
+       cs_error(link->handle, last_fn, last_ret);
+       SYM53C500_release(link);
+       return;
+} /* SYM53C500_config */
+
+static int
+SYM53C500_event(event_t event, int priority, event_callback_args_t *args)
+{
+       dev_link_t *link = args->client_data;
+       struct scsi_info_t *info = link->priv;
+
+       DEBUG(1, "SYM53C500_event(0x%06x)\n", event);
+
+       switch (event) {
+       case CS_EVENT_CARD_REMOVAL:
+               link->state &= ~DEV_PRESENT;
+               if (link->state & DEV_CONFIG)
+                       SYM53C500_release(link);
+               break;
+       case CS_EVENT_CARD_INSERTION:
+               link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+               SYM53C500_config(link);
+               break;
+       case CS_EVENT_PM_SUSPEND:
+               link->state |= DEV_SUSPEND;
+               /* Fall through... */
+       case CS_EVENT_RESET_PHYSICAL:
+               if (link->state & DEV_CONFIG)
+                       pcmcia_release_configuration(link->handle);
+               break;
+       case CS_EVENT_PM_RESUME:
+               link->state &= ~DEV_SUSPEND;
+               /* Fall through... */
+       case CS_EVENT_CARD_RESET:
+               if (link->state & DEV_CONFIG) {
+                       pcmcia_request_configuration(link->handle, &link->conf);
+                       /* See earlier comment about manufacturer IDs. */
+                       if ((info->manf_id == MANFID_MACNICA) ||
+                           (info->manf_id == MANFID_PIONEER) ||
+                           (info->manf_id == 0x0098)) {
+                               outb(0x80, link->io.BasePort1 + 0xd);
+                               outb(0x24, link->io.BasePort1 + 0x9);
+                               outb(0x04, link->io.BasePort1 + 0xd);
+                       }
+                       /*
+                       *  If things don't work after a "resume",
+                       *  this is a good place to start looking.
+                       */
+                       SYM53C500_int_host_reset(link->io.BasePort1);
+               }
+               break;
+       }
+       return 0;
+} /* SYM53C500_event */
+
+static void
+SYM53C500_detach(dev_link_t *link)
+{
+       dev_link_t **linkp;
+
+       DEBUG(0, "SYM53C500_detach(0x%p)\n", link);
+
+       /* Locate device structure */
+       for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+               if (*linkp == link)
+                       break;
+       if (*linkp == NULL)
+               return;
+
+       if (link->state & DEV_CONFIG)
+               SYM53C500_release(link);
+
+       if (link->handle)
+               pcmcia_deregister_client(link->handle);
+
+       /* Unlink device structure, free bits. */
+       *linkp = link->next;
+       kfree(link->priv);
+       link->priv = NULL;
+} /* SYM53C500_detach */
+
+static dev_link_t *
+SYM53C500_attach(void)
+{
+       struct scsi_info_t *info;
+       client_reg_t client_reg;
+       dev_link_t *link;
+       int i, ret;
+
+       DEBUG(0, "SYM53C500_attach()\n");
+
+       /* Create new SCSI device */
+       info = kmalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return NULL;
+       memset(info, 0, sizeof(*info));
+       link = &info->link;
+       link->priv = info;
+       link->io.NumPorts1 = 16;
+       link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+       link->io.IOAddrLines = 10;
+       link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
+       link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID;
+       if (irq_list[0] == -1)
+               link->irq.IRQInfo2 = irq_mask;
+       else
+               for (i = 0; i < 4; i++)
+                       link->irq.IRQInfo2 |= 1 << irq_list[i];
+       link->conf.Attributes = CONF_ENABLE_IRQ;
+       link->conf.Vcc = 50;
+       link->conf.IntType = INT_MEMORY_AND_IO;
+       link->conf.Present = PRESENT_OPTION;
+
+       /* Register with Card Services */
+       link->next = dev_list;
+       dev_list = link;
+       client_reg.dev_info = &dev_info;
+       client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE;
+       client_reg.event_handler = &SYM53C500_event;
+       client_reg.EventMask = CS_EVENT_RESET_REQUEST | CS_EVENT_CARD_RESET |
+           CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL |
+           CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME;
+       client_reg.Version = 0x0210;
+       client_reg.event_callback_args.client_data = link;
+       ret = pcmcia_register_client(&link->handle, &client_reg);
+       if (ret != 0) {
+               cs_error(link->handle, RegisterClient, ret);
+               SYM53C500_detach(link);
+               return NULL;
+       }
+
+       return link;
+} /* SYM53C500_attach */
+
+MODULE_AUTHOR("Bob Tracy <rct@frus.com>");
+MODULE_DESCRIPTION("SYM53C500 PCMCIA SCSI driver");
+MODULE_LICENSE("GPL");
+
+static struct pcmcia_driver sym53c500_cs_driver = {
+       .owner          = THIS_MODULE,
+       .drv            = {
+               .name   = "sym53c500_cs",
+       },
+       .attach         = SYM53C500_attach,
+       .detach         = SYM53C500_detach,
+};
+
+static int __init
+init_sym53c500_cs(void)
+{
+       return pcmcia_register_driver(&sym53c500_cs_driver);
+}
+
+static void __exit
+exit_sym53c500_cs(void)
+{
+       pcmcia_unregister_driver(&sym53c500_cs_driver);
+}
+
+module_init(init_sym53c500_cs);
+module_exit(exit_sym53c500_cs);
diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h
new file mode 100644 (file)
index 0000000..f01cbd6
--- /dev/null
@@ -0,0 +1,120 @@
+/* to be used by qlogicfas and qlogic_cs */
+#ifndef __QLOGICFAS408_H
+#define __QLOGICFAS408_H
+
+/*----------------------------------------------------------------*/
+/* Configuration */
+
+/* Set the following to max out the speed of the PIO PseudoDMA transfers,
+   again, 0 tends to be slower, but more stable.  */
+
+#define QL_TURBO_PDMA 1
+
+/* This should be 1 to enable parity detection */
+
+#define QL_ENABLE_PARITY 1
+
+/* This will reset all devices when the driver is initialized (during bootup).
+   The other linux drivers don't do this, but the DOS drivers do, and after
+   using DOS or some kind of crash or lockup this will bring things back
+   without requiring a cold boot.  It does take some time to recover from a
+   reset, so it is slower, and I have seen timeouts so that devices weren't
+   recognized when this was set. */
+
+#define QL_RESET_AT_START 0
+
+/* crystal frequency in megahertz (for offset 5 and 9)
+   Please set this for your card.  Most Qlogic cards are 40 Mhz.  The
+   Control Concepts ISA (not VLB) is 24 Mhz */
+
+#define XTALFREQ       40
+
+/**********/
+/* DANGER! modify these at your own risk */
+/* SLOWCABLE can usually be reset to zero if you have a clean setup and
+   proper termination.  The rest are for synchronous transfers and other
+   advanced features if your device can transfer faster than 5Mb/sec.
+   If you are really curious, email me for a quick howto until I have
+   something official */
+/**********/
+
+/*****/
+/* config register 1 (offset 8) options */
+/* This needs to be set to 1 if your cabling is long or noisy */
+#define SLOWCABLE 1
+
+/*****/
+/* offset 0xc */
+/* This will set fast (10Mhz) synchronous timing when set to 1
+   For this to have an effect, FASTCLK must also be 1 */
+#define FASTSCSI 0
+
+/* This when set to 1 will set a faster sync transfer rate */
+#define FASTCLK 0      /*(XTALFREQ>25?1:0)*/
+
+/*****/
+/* offset 6 */
+/* This is the sync transfer divisor, XTALFREQ/X will be the maximum
+   achievable data rate (assuming the rest of the system is capable
+   and set properly) */
+#define SYNCXFRPD 5    /*(XTALFREQ/5)*/
+
+/*****/
+/* offset 7 */
+/* This is the count of how many synchronous transfers can take place
+       i.e. how many reqs can occur before an ack is given.
+       The maximum value for this is 15, the upper bits can modify
+       REQ/ACK assertion and deassertion during synchronous transfers
+       If this is 0, the bus will only transfer asynchronously */
+#define SYNCOFFST 0
+/* for the curious, bits 7&6 control the deassertion delay in 1/2 cycles
+       of the 40Mhz clock. If FASTCLK is 1, specifying 01 (1/2) will
+       cause the deassertion to be early by 1/2 clock.  Bits 5&4 control
+       the assertion delay, also in 1/2 clocks (FASTCLK is ignored here). */
+
+/*----------------------------------------------------------------*/
+
+struct qlogicfas408_priv {
+        int            qbase;          /* Port */
+        int            qinitid;        /* initiator ID */
+        int            qabort;         /* Flag to cause an abort */
+        int            qlirq;          /* IRQ being used */
+        int            int_type;       /* type of irq, 2 for ISA board, 0 for PCMCIA */
+        char           qinfo[80];      /* description */
+        Scsi_Cmnd      *qlcmd;         /* current command being processed */
+        struct Scsi_Host *shost;       /* pointer back to host */
+        struct qlogicfas408_priv *next; /* next private struct */
+};
+
+/* The qlogic card uses two register maps - These macros select which one */
+#define REG0 ( outb( inb( qbase + 0xd ) & 0x7f , qbase + 0xd ), outb( 4 , qbase + 0xd ))
+#define REG1 ( outb( inb( qbase + 0xd ) | 0x80 , qbase + 0xd ), outb( 0xb4 | int_type, qbase + 0xd ))
+
+/* following is watchdog timeout in microseconds */
+#define WATCHDOG 5000000
+
+/*----------------------------------------------------------------*/
+/* the following will set the monitor border color (useful to find
+   where something crashed or gets stuck at and as a simple profiler) */
+
+#define rtrc(i) {}
+
+#define get_priv_by_cmd(x) (struct qlogicfas408_priv *)&((x)->device->host->hostdata[0])
+#define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0])
+
+irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id, struct pt_regs *regs);
+int qlogicfas408_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *));
+int qlogicfas408_biosparam(struct scsi_device * disk,
+                       struct block_device *dev,
+                       sector_t capacity, int ip[]);
+int qlogicfas408_abort(Scsi_Cmnd * cmd);
+int qlogicfas408_bus_reset(Scsi_Cmnd * cmd);
+int qlogicfas408_host_reset(Scsi_Cmnd * cmd);
+int qlogicfas408_device_reset(Scsi_Cmnd * cmd);
+const char *qlogicfas408_info(struct Scsi_Host *host);
+int qlogicfas408_get_chip_type(int qbase, int int_type);
+void qlogicfas408_setup(int qbase, int id, int int_type);
+int qlogicfas408_detect(int qbase, int int_type);
+void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv);
+#endif /* __QLOGICFAS408_H */
+
diff --git a/drivers/scsi/sata_promise.h b/drivers/scsi/sata_promise.h
new file mode 100644 (file)
index 0000000..6e7e96b
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ *  sata_promise.h - Promise SATA common definitions and inline funcs
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.
+ *
+ *  The contents of this file are subject to the Open
+ *  Software License version 1.1 that can be found at
+ *  http://www.opensource.org/licenses/osl-1.1.txt and is included herein
+ *  by reference.
+ *
+ *  Alternatively, the contents of this file may be used under the terms
+ *  of the GNU General Public License version 2 (the "GPL") as distributed
+ *  in the kernel source COPYING file, in which case the provisions of
+ *  the GPL are applicable instead of the above.  If you wish to allow
+ *  the use of your version of this file only under the terms of the
+ *  GPL and not to allow others to use your version of this file under
+ *  the OSL, indicate your decision by deleting the provisions above and
+ *  replace them with the notice and other provisions required by the GPL.
+ *  If you do not delete the provisions above, a recipient may use your
+ *  version of this file under either the OSL or the GPL.
+ *
+ */
+
+#ifndef __SATA_PROMISE_H__
+#define __SATA_PROMISE_H__
+
+#include <linux/ata.h>
+
+enum pdc_packet_bits {
+       PDC_PKT_READ            = (1 << 2),
+       PDC_PKT_NODATA          = (1 << 3),
+
+       PDC_PKT_SIZEMASK        = (1 << 7) | (1 << 6) | (1 << 5),
+       PDC_PKT_CLEAR_BSY       = (1 << 4),
+       PDC_PKT_WAIT_DRDY       = (1 << 3) | (1 << 4),
+       PDC_LAST_REG            = (1 << 3),
+
+       PDC_REG_DEVCTL          = (1 << 3) | (1 << 2) | (1 << 1),
+};
+
+static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf,
+                                         dma_addr_t sg_table,
+                                         unsigned int devno, u8 *buf)
+{
+       u8 dev_reg;
+       u32 *buf32 = (u32 *) buf;
+
+       /* set control bits (byte 0), zero delay seq id (byte 3),
+        * and seq id (byte 2)
+        */
+       switch (tf->protocol) {
+       case ATA_PROT_DMA:
+               if (!(tf->flags & ATA_TFLAG_WRITE))
+                       buf32[0] = cpu_to_le32(PDC_PKT_READ);
+               else
+                       buf32[0] = 0;
+               break;
+
+       case ATA_PROT_NODATA:
+               buf32[0] = cpu_to_le32(PDC_PKT_NODATA);
+               break;
+
+       default:
+               BUG();
+               break;
+       }
+
+       buf32[1] = cpu_to_le32(sg_table);       /* S/G table addr */
+       buf32[2] = 0;                           /* no next-packet */
+
+       if (devno == 0)
+               dev_reg = ATA_DEVICE_OBS;
+       else
+               dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
+
+       /* select device */
+       buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
+       buf[13] = dev_reg;
+
+       /* device control register */
+       buf[14] = (1 << 5) | PDC_REG_DEVCTL;
+       buf[15] = tf->ctl;
+
+       return 16;      /* offset of next byte */
+}
+
+static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf,
+                                 unsigned int i)
+{
+       if (tf->flags & ATA_TFLAG_DEVICE) {
+               buf[i++] = (1 << 5) | ATA_REG_DEVICE;
+               buf[i++] = tf->device;
+       }
+
+       /* and finally the command itself; also includes end-of-pkt marker */
+       buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD;
+       buf[i++] = tf->command;
+
+       return i;
+}
+
+static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i)
+{
+       /* the "(1 << 5)" should be read "(count << 5)" */
+
+       /* ATA command block registers */
+       buf[i++] = (1 << 5) | ATA_REG_FEATURE;
+       buf[i++] = tf->feature;
+
+       buf[i++] = (1 << 5) | ATA_REG_NSECT;
+       buf[i++] = tf->nsect;
+
+       buf[i++] = (1 << 5) | ATA_REG_LBAL;
+       buf[i++] = tf->lbal;
+
+       buf[i++] = (1 << 5) | ATA_REG_LBAM;
+       buf[i++] = tf->lbam;
+
+       buf[i++] = (1 << 5) | ATA_REG_LBAH;
+       buf[i++] = tf->lbah;
+
+       return i;
+}
+
+static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i)
+{
+       /* the "(2 << 5)" should be read "(count << 5)" */
+
+       /* ATA command block registers */
+       buf[i++] = (2 << 5) | ATA_REG_FEATURE;
+       buf[i++] = tf->hob_feature;
+       buf[i++] = tf->feature;
+
+       buf[i++] = (2 << 5) | ATA_REG_NSECT;
+       buf[i++] = tf->hob_nsect;
+       buf[i++] = tf->nsect;
+
+       buf[i++] = (2 << 5) | ATA_REG_LBAL;
+       buf[i++] = tf->hob_lbal;
+       buf[i++] = tf->lbal;
+
+       buf[i++] = (2 << 5) | ATA_REG_LBAM;
+       buf[i++] = tf->hob_lbam;
+       buf[i++] = tf->lbam;
+
+       buf[i++] = (2 << 5) | ATA_REG_LBAH;
+       buf[i++] = tf->hob_lbah;
+       buf[i++] = tf->lbah;
+
+       return i;
+}
+
+
+#endif /* __SATA_PROMISE_H__ */
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
new file mode 100644 (file)
index 0000000..38c64f6
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * drivers/usb/core/sysfs.c
+ *
+ * (C) Copyright 2002 David Brownell
+ * (C) Copyright 2002 Greg Kroah-Hartman
+ * (C) Copyright 2002 IBM Corp.
+ *
+ * All of the sysfs file attributes for usb devices and interfaces.
+ *
+ */
+
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+
+#ifdef CONFIG_USB_DEBUG
+       #define DEBUG
+#else
+       #undef DEBUG
+#endif
+#include <linux/usb.h>
+
+#include "usb.h"
+
+/* Active configuration fields */
+#define usb_actconfig_show(field, multiplier, format_string)           \
+static ssize_t  show_##field (struct device *dev, char *buf)           \
+{                                                                      \
+       struct usb_device *udev;                                        \
+                                                                       \
+       udev = to_usb_device (dev);                                     \
+       if (udev->actconfig)                                            \
+               return sprintf (buf, format_string,                     \
+                               udev->actconfig->desc.field * multiplier);      \
+       else                                                            \
+               return 0;                                               \
+}                                                                      \
+
+#define usb_actconfig_attr(field, multiplier, format_string)           \
+usb_actconfig_show(field, multiplier, format_string)                   \
+static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
+
+usb_actconfig_attr (bNumInterfaces, 1, "%2d\n")
+usb_actconfig_attr (bmAttributes, 1, "%2x\n")
+usb_actconfig_attr (bMaxPower, 2, "%3dmA\n")
+
+/* configuration value is always present, and r/w */
+usb_actconfig_show(bConfigurationValue, 1, "%u\n");
+
+static ssize_t
+set_bConfigurationValue (struct device *dev, const char *buf, size_t count)
+{
+       struct usb_device       *udev = udev = to_usb_device (dev);
+       int                     config, value;
+
+       if (sscanf (buf, "%u", &config) != 1 || config > 255)
+               return -EINVAL;
+       down(&udev->serialize);
+       value = usb_set_configuration (udev, config);
+       up(&udev->serialize);
+       return (value < 0) ? value : count;
+}
+
+static DEVICE_ATTR(bConfigurationValue, S_IRUGO | S_IWUSR, 
+               show_bConfigurationValue, set_bConfigurationValue);
+
+/* String fields */
+#define usb_string_attr(name, field)           \
+static ssize_t  show_##name(struct device *dev, char *buf)             \
+{                                                                      \
+       struct usb_device *udev;                                        \
+       int len;                                                        \
+                                                                       \
+       udev = to_usb_device (dev);                                     \
+       len = usb_string(udev, udev->descriptor.field, buf, PAGE_SIZE); \
+       if (len < 0)                                                    \
+               return 0;                                               \
+       buf[len] = '\n';                                                \
+       buf[len+1] = 0;                                                 \
+       return len+1;                                                   \
+}                                                                      \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+
+usb_string_attr(product, iProduct);
+usb_string_attr(manufacturer, iManufacturer);
+usb_string_attr(serial, iSerialNumber);
+
+static ssize_t
+show_speed (struct device *dev, char *buf)
+{
+       struct usb_device *udev;
+       char *speed;
+
+       udev = to_usb_device (dev);
+
+       switch (udev->speed) {
+       case USB_SPEED_LOW:
+               speed = "1.5";
+               break;
+       case USB_SPEED_UNKNOWN:
+       case USB_SPEED_FULL:
+               speed = "12";
+               break;
+       case USB_SPEED_HIGH:
+               speed = "480";
+               break;
+       default:
+               speed = "unknown";
+       }
+       return sprintf (buf, "%s\n", speed);
+}
+static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL);
+
+static ssize_t
+show_devnum (struct device *dev, char *buf)
+{
+       struct usb_device *udev;
+
+       udev = to_usb_device (dev);
+       return sprintf (buf, "%d\n", udev->devnum);
+}
+static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL);
+
+static ssize_t
+show_version (struct device *dev, char *buf)
+{
+       struct usb_device *udev;
+
+       udev = to_usb_device (dev);
+       return sprintf (buf, "%2x.%02x\n", udev->descriptor.bcdUSB >> 8, 
+                       udev->descriptor.bcdUSB & 0xff);
+}
+static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+
+static ssize_t
+show_maxchild (struct device *dev, char *buf)
+{
+       struct usb_device *udev;
+
+       udev = to_usb_device (dev);
+       return sprintf (buf, "%d\n", udev->maxchild);
+}
+static DEVICE_ATTR(maxchild, S_IRUGO, show_maxchild, NULL);
+
+/* Descriptor fields */
+#define usb_descriptor_attr(field, format_string)                      \
+static ssize_t                                                         \
+show_##field (struct device *dev, char *buf)                           \
+{                                                                      \
+       struct usb_device *udev;                                        \
+                                                                       \
+       udev = to_usb_device (dev);                                     \
+       return sprintf (buf, format_string, udev->descriptor.field);    \
+}                                                                      \
+static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
+
+usb_descriptor_attr (idVendor, "%04x\n")
+usb_descriptor_attr (idProduct, "%04x\n")
+usb_descriptor_attr (bcdDevice, "%04x\n")
+usb_descriptor_attr (bDeviceClass, "%02x\n")
+usb_descriptor_attr (bDeviceSubClass, "%02x\n")
+usb_descriptor_attr (bDeviceProtocol, "%02x\n")
+usb_descriptor_attr (bNumConfigurations, "%d\n")
+
+
+void usb_create_sysfs_dev_files (struct usb_device *udev)
+{
+       struct device *dev = &udev->dev;
+
+       /* current configuration's attributes */
+       device_create_file (dev, &dev_attr_bNumInterfaces);
+       device_create_file (dev, &dev_attr_bConfigurationValue);
+       device_create_file (dev, &dev_attr_bmAttributes);
+       device_create_file (dev, &dev_attr_bMaxPower);
+
+       /* device attributes */
+       device_create_file (dev, &dev_attr_idVendor);
+       device_create_file (dev, &dev_attr_idProduct);
+       device_create_file (dev, &dev_attr_bcdDevice);
+       device_create_file (dev, &dev_attr_bDeviceClass);
+       device_create_file (dev, &dev_attr_bDeviceSubClass);
+       device_create_file (dev, &dev_attr_bDeviceProtocol);
+       device_create_file (dev, &dev_attr_bNumConfigurations);
+
+       /* speed varies depending on how you connect the device */
+       device_create_file (dev, &dev_attr_speed);
+       // FIXME iff there are other speed configs, show how many
+
+       if (udev->descriptor.iManufacturer)
+               device_create_file (dev, &dev_attr_manufacturer);
+       if (udev->descriptor.iProduct)
+               device_create_file (dev, &dev_attr_product);
+       if (udev->descriptor.iSerialNumber)
+               device_create_file (dev, &dev_attr_serial);
+
+       device_create_file (dev, &dev_attr_devnum);
+       device_create_file (dev, &dev_attr_version);
+       device_create_file (dev, &dev_attr_maxchild);
+}
+
+/* Interface fields */
+#define usb_intf_attr(field, format_string)                            \
+static ssize_t                                                         \
+show_##field (struct device *dev, char *buf)                           \
+{                                                                      \
+       struct usb_interface *intf = to_usb_interface (dev);            \
+                                                                       \
+       return sprintf (buf, format_string, intf->cur_altsetting->desc.field); \
+}                                                                      \
+static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
+
+usb_intf_attr (bInterfaceNumber, "%02x\n")
+usb_intf_attr (bAlternateSetting, "%2d\n")
+usb_intf_attr (bNumEndpoints, "%02x\n")
+usb_intf_attr (bInterfaceClass, "%02x\n")
+usb_intf_attr (bInterfaceSubClass, "%02x\n")
+usb_intf_attr (bInterfaceProtocol, "%02x\n")
+usb_intf_attr (iInterface, "%02x\n")
+
+void usb_create_sysfs_intf_files (struct usb_interface *intf)
+{
+       device_create_file (&intf->dev, &dev_attr_bInterfaceNumber);
+       device_create_file (&intf->dev, &dev_attr_bAlternateSetting);
+       device_create_file (&intf->dev, &dev_attr_bNumEndpoints);
+       device_create_file (&intf->dev, &dev_attr_bInterfaceClass);
+       device_create_file (&intf->dev, &dev_attr_bInterfaceSubClass);
+       device_create_file (&intf->dev, &dev_attr_bInterfaceProtocol);
+       device_create_file (&intf->dev, &dev_attr_iInterface);
+}
diff --git a/drivers/usb/input/touchkitusb.c b/drivers/usb/input/touchkitusb.c
new file mode 100644 (file)
index 0000000..4917b04
--- /dev/null
@@ -0,0 +1,310 @@
+/******************************************************************************
+ * touchkitusb.c  --  Driver for eGalax TouchKit USB Touchscreens
+ *
+ * Copyright (C) 2004 by Daniel Ritz
+ * Copyright (C) by Todd E. Johnson (mtouchusb.c)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Based upon mtouchusb.c
+ *
+ *****************************************************************************/
+
+//#define DEBUG
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#if !defined(DEBUG) && defined(CONFIG_USB_DEBUG)
+#define DEBUG
+#endif
+#include <linux/usb.h>
+
+
+#define TOUCHKIT_MIN_XC                        0x0
+#define TOUCHKIT_MAX_XC                        0x07ff
+#define TOUCHKIT_XC_FUZZ               0x0
+#define TOUCHKIT_XC_FLAT               0x0
+#define TOUCHKIT_MIN_YC                        0x0
+#define TOUCHKIT_MAX_YC                        0x07ff
+#define TOUCHKIT_YC_FUZZ               0x0
+#define TOUCHKIT_YC_FLAT               0x0
+#define TOUCHKIT_REPORT_DATA_SIZE      8
+
+#define TOUCHKIT_DOWN                  0x01
+#define TOUCHKIT_POINT_TOUCH           0x81
+#define TOUCHKIT_POINT_NOTOUCH         0x80
+
+#define TOUCHKIT_GET_TOUCHED(dat)      ((((dat)[0]) & TOUCHKIT_DOWN) ? 1 : 0)
+#define TOUCHKIT_GET_X(dat)            (((dat)[3] << 7) | (dat)[4])
+#define TOUCHKIT_GET_Y(dat)            (((dat)[1] << 7) | (dat)[2])
+
+#define DRIVER_VERSION                 "v0.1"
+#define DRIVER_AUTHOR                  "Daniel Ritz <daniel.ritz@gmx.ch>"
+#define DRIVER_DESC                    "eGalax TouchKit USB HID Touchscreen Driver"
+
+struct touchkit_usb {
+       unsigned char *data;
+       dma_addr_t data_dma;
+       struct urb *irq;
+       struct usb_device *udev;
+       struct input_dev input;
+       int open;
+       char name[128];
+       char phys[64];
+};
+
+static struct usb_device_id touchkit_devices[] = {
+       {USB_DEVICE(0x3823, 0x0001)},
+       {USB_DEVICE(0x0eef, 0x0001)},
+       {}
+};
+
+static void touchkit_irq(struct urb *urb, struct pt_regs *regs)
+{
+       struct touchkit_usb *touchkit = urb->context;
+       int retval;
+
+       switch (urb->status) {
+       case 0:
+               /* success */
+               break;
+       case -ETIMEDOUT:
+               /* this urb is timing out */
+               dbg("%s - urb timed out - was the device unplugged?",
+                   __FUNCTION__);
+               return;
+       case -ECONNRESET:
+       case -ENOENT:
+       case -ESHUTDOWN:
+               /* this urb is terminated, clean up */
+               dbg("%s - urb shutting down with status: %d",
+                   __FUNCTION__, urb->status);
+               return;
+       default:
+               dbg("%s - nonzero urb status received: %d",
+                   __FUNCTION__, urb->status);
+               goto exit;
+       }
+
+       input_regs(&touchkit->input, regs);
+       input_report_key(&touchkit->input, BTN_TOUCH,
+                        TOUCHKIT_GET_TOUCHED(touchkit->data));
+       input_report_abs(&touchkit->input, ABS_X,
+                        TOUCHKIT_GET_X(touchkit->data));
+       input_report_abs(&touchkit->input, ABS_Y,
+                        TOUCHKIT_GET_Y(touchkit->data));
+       input_sync(&touchkit->input);
+
+exit:
+       retval = usb_submit_urb(urb, GFP_ATOMIC);
+       if (retval)
+               err("%s - usb_submit_urb failed with result: %d",
+                   __FUNCTION__, retval);
+}
+
+static int touchkit_open(struct input_dev *input)
+{
+       struct touchkit_usb *touchkit = input->private;
+
+       if (touchkit->open++)
+               return 0;
+
+       touchkit->irq->dev = touchkit->udev;
+
+       if (usb_submit_urb(touchkit->irq, GFP_ATOMIC)) {
+               touchkit->open--;
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void touchkit_close(struct input_dev *input)
+{
+       struct touchkit_usb *touchkit = input->private;
+
+       if (!--touchkit->open)
+               usb_unlink_urb(touchkit->irq);
+}
+
+static int touchkit_alloc_buffers(struct usb_device *udev,
+                                 struct touchkit_usb *touchkit)
+{
+       touchkit->data = usb_buffer_alloc(udev, TOUCHKIT_REPORT_DATA_SIZE,
+                                         SLAB_ATOMIC, &touchkit->data_dma);
+
+       if (!touchkit->data)
+               return -1;
+
+       return 0;
+}
+
+static void touchkit_free_buffers(struct usb_device *udev,
+                                 struct touchkit_usb *touchkit)
+{
+       if (touchkit->data)
+               usb_buffer_free(udev, TOUCHKIT_REPORT_DATA_SIZE,
+                               touchkit->data, touchkit->data_dma);
+}
+
+static int touchkit_probe(struct usb_interface *intf,
+                         const struct usb_device_id *id)
+{
+       int ret;
+       struct touchkit_usb *touchkit;
+       struct usb_host_interface *interface;
+       struct usb_endpoint_descriptor *endpoint;
+       struct usb_device *udev = interface_to_usbdev(intf);
+       char path[64];
+       char *buf;
+
+       interface = intf->cur_altsetting;
+       endpoint = &interface->endpoint[0].desc;
+
+       touchkit = kmalloc(sizeof(struct touchkit_usb), GFP_KERNEL);
+       if (!touchkit)
+               return -ENOMEM;
+
+       memset(touchkit, 0, sizeof(struct touchkit_usb));
+       touchkit->udev = udev;
+
+       if (touchkit_alloc_buffers(udev, touchkit)) {
+               ret = -ENOMEM;
+               goto out_free;
+       }
+
+       touchkit->input.private = touchkit;
+       touchkit->input.open = touchkit_open;
+       touchkit->input.close = touchkit_close;
+
+       usb_make_path(udev, path, 64);
+       sprintf(touchkit->phys, "%s/input0", path);
+
+       touchkit->input.name = touchkit->name;
+       touchkit->input.phys = touchkit->phys;
+       touchkit->input.id.bustype = BUS_USB;
+       touchkit->input.id.vendor = udev->descriptor.idVendor;
+       touchkit->input.id.product = udev->descriptor.idProduct;
+       touchkit->input.id.version = udev->descriptor.bcdDevice;
+       touchkit->input.dev = &intf->dev;
+
+       touchkit->input.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+       touchkit->input.absbit[0] = BIT(ABS_X) | BIT(ABS_Y);
+       touchkit->input.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH);
+
+       /* Used to Scale Compensated Data */
+       touchkit->input.absmin[ABS_X] = TOUCHKIT_MIN_XC;
+       touchkit->input.absmax[ABS_X] = TOUCHKIT_MAX_XC;
+       touchkit->input.absfuzz[ABS_X] = TOUCHKIT_XC_FUZZ;
+       touchkit->input.absflat[ABS_X] = TOUCHKIT_XC_FLAT;
+       touchkit->input.absmin[ABS_Y] = TOUCHKIT_MIN_YC;
+       touchkit->input.absmax[ABS_Y] = TOUCHKIT_MAX_YC;
+       touchkit->input.absfuzz[ABS_Y] = TOUCHKIT_YC_FUZZ;
+       touchkit->input.absflat[ABS_Y] = TOUCHKIT_YC_FLAT;
+
+       buf = kmalloc(63, GFP_KERNEL);
+       if (!buf) {
+               ret = -ENOMEM;
+               goto out_free_buffers;
+       }
+
+       if (udev->descriptor.iManufacturer &&
+           usb_string(udev, udev->descriptor.iManufacturer, buf, 63) > 0)
+               strcat(touchkit->name, buf);
+       if (udev->descriptor.iProduct &&
+           usb_string(udev, udev->descriptor.iProduct, buf, 63) > 0)
+               sprintf(touchkit->name, "%s %s", touchkit->name, buf);
+
+       if (!strlen(touchkit->name))
+               sprintf(touchkit->name, "USB Touchscreen %04x:%04x",
+                       touchkit->input.id.vendor, touchkit->input.id.product);
+
+       kfree(buf);
+
+       touchkit->irq = usb_alloc_urb(0, GFP_KERNEL);
+       if (!touchkit->irq) {
+               dbg("%s - usb_alloc_urb failed: touchkit->irq", __FUNCTION__);
+               ret = -ENOMEM;
+               goto out_free_buffers;
+       }
+
+       usb_fill_int_urb(touchkit->irq, touchkit->udev,
+                        usb_rcvintpipe(touchkit->udev, 0x81),
+                        touchkit->data, TOUCHKIT_REPORT_DATA_SIZE,
+                        touchkit_irq, touchkit, endpoint->bInterval);
+
+       input_register_device(&touchkit->input);
+
+       printk(KERN_INFO "input: %s on %s\n", touchkit->name, path);
+       usb_set_intfdata(intf, touchkit);
+
+       return 0;
+
+out_free_buffers:
+       touchkit_free_buffers(udev, touchkit);
+out_free:
+       kfree(touchkit);
+       return ret;
+}
+
+static void touchkit_disconnect(struct usb_interface *intf)
+{
+       struct touchkit_usb *touchkit = usb_get_intfdata(intf);
+
+       dbg("%s - called", __FUNCTION__);
+
+       if (!touchkit)
+               return;
+
+       dbg("%s - touchkit is initialized, cleaning up", __FUNCTION__);
+       usb_set_intfdata(intf, NULL);
+       input_unregister_device(&touchkit->input);
+       usb_unlink_urb(touchkit->irq);
+       usb_free_urb(touchkit->irq);
+       touchkit_free_buffers(interface_to_usbdev(intf), touchkit);
+       kfree(touchkit);
+}
+
+MODULE_DEVICE_TABLE(usb, touchkit_devices);
+
+static struct usb_driver touchkit_driver = {
+       .owner          = THIS_MODULE,
+       .name           = "touchkitusb",
+       .probe          = touchkit_probe,
+       .disconnect     = touchkit_disconnect,
+       .id_table       = touchkit_devices,
+};
+
+static int __init touchkit_init(void)
+{
+       return usb_register(&touchkit_driver);
+}
+
+static void __exit touchkit_cleanup(void)
+{
+       usb_deregister(&touchkit_driver);
+}
+
+module_init(touchkit_init);
+module_exit(touchkit_cleanup);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/misc/phidgetservo.c b/drivers/usb/misc/phidgetservo.c
new file mode 100644 (file)
index 0000000..9018774
--- /dev/null
@@ -0,0 +1,327 @@
+/*
+ * USB PhidgetServo driver 1.0
+ *
+ * Copyright (C) 2004 Sean Young <sean@mess.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This is a driver for the USB PhidgetServo version 2.0 and 3.0 servo 
+ * controllers available at: http://www.phidgets.com/ 
+ *
+ * Note that the driver takes input as: degrees.minutes
+ * -23 < degrees < 203
+ * 0 < minutes < 59
+ *
+ * CAUTION: Generally you should use 0 < degrees < 180 as anything else
+ * is probably beyond the range of your servo and may damage it.
+ */
+
+#include <linux/config.h>
+#ifdef CONFIG_USB_DEBUG
+#define DEBUG  1
+#endif
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#define DRIVER_AUTHOR "Sean Young <sean@mess.org>"
+#define DRIVER_DESC "USB PhidgetServo Driver"
+
+#define VENDOR_ID_GLAB                 0x06c2
+#define DEVICE_ID_4MOTOR_SERVO_30      0x0038
+#define DEVICE_ID_1MOTOR_SERVO_30      0x0039
+
+#define VENDOR_ID_WISEGROUP            0x0925
+#define DEVICE_ID_1MOTOR_SERVO_20      0x8101
+#define DEVICE_ID_4MOTOR_SERVO_20      0x8104
+
+static struct usb_device_id id_table[] = {
+       {USB_DEVICE(VENDOR_ID_GLAB, DEVICE_ID_4MOTOR_SERVO_30)},
+       {USB_DEVICE(VENDOR_ID_GLAB, DEVICE_ID_1MOTOR_SERVO_30)},
+       {USB_DEVICE(VENDOR_ID_WISEGROUP, DEVICE_ID_4MOTOR_SERVO_20)},
+       {USB_DEVICE(VENDOR_ID_WISEGROUP, DEVICE_ID_1MOTOR_SERVO_20)},
+       {}
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+struct phidget_servo {
+       struct usb_device *udev;
+       int version;
+       int quad_servo;
+       int pulse[4];
+       int degrees[4];
+       int minutes[4];
+};
+
+static void
+change_position_v30(struct phidget_servo *servo, int servo_no, int degrees, 
+                                                               int minutes)
+{
+       int retval;
+       unsigned char *buffer;
+
+       buffer = kmalloc(6, GFP_KERNEL);
+       if (!buffer) {
+               dev_err(&servo->udev->dev, "%s - out of memory\n",
+                       __FUNCTION__);
+               return;
+       }
+
+       /*
+        * pulse = 0 - 4095
+        * angle = 0 - 180 degrees
+        *
+        * pulse = angle * 10.6 + 243.8 
+        */
+       servo->pulse[servo_no] = ((degrees*60 + minutes)*106 + 2438*60)/600;    
+       servo->degrees[servo_no]= degrees;
+       servo->minutes[servo_no]= minutes;      
+
+       /* 
+        * The PhidgetServo v3.0 is controlled by sending 6 bytes,
+        * 4 * 12 bits for each servo.
+        *
+        * low = lower 8 bits pulse
+        * high = higher 4 bits pulse
+        *
+        * offset     bits
+        * +---+-----------------+
+        * | 0 |      low 0      |
+        * +---+--------+--------+
+        * | 1 | high 1 | high 0 |
+        * +---+--------+--------+
+        * | 2 |      low 1      |
+        * +---+-----------------+
+        * | 3 |      low 2      |
+        * +---+--------+--------+
+        * | 4 | high 3 | high 2 |
+        * +---+--------+--------+
+        * | 5 |      low 3      |
+        * +---+-----------------+
+        */
+
+       buffer[0] = servo->pulse[0] & 0xff;
+       buffer[1] = (servo->pulse[0] >> 8 & 0x0f)
+           | (servo->pulse[1] >> 4 & 0xf0);
+       buffer[2] = servo->pulse[1] & 0xff;
+       buffer[3] = servo->pulse[2] & 0xff;
+       buffer[4] = (servo->pulse[2] >> 8 & 0x0f)
+           | (servo->pulse[3] >> 4 & 0xf0);
+       buffer[5] = servo->pulse[3] & 0xff;
+
+       dev_dbg(&servo->udev->dev,
+               "data: %02x %02x %02x %02x %02x %02x\n",
+               buffer[0], buffer[1], buffer[2],
+               buffer[3], buffer[4], buffer[5]);
+
+       retval = usb_control_msg(servo->udev,
+                                usb_sndctrlpipe(servo->udev, 0),
+                                0x09, 0x21, 0x0200, 0x0000, buffer, 6, 2 * HZ);
+       if (retval != 6)
+               dev_err(&servo->udev->dev, "retval = %d\n", retval);
+       kfree(buffer);
+}
+
+static void
+change_position_v20(struct phidget_servo *servo, int servo_no, int degrees,
+                                                               int minutes)
+{
+       int retval;
+       unsigned char *buffer;
+
+       buffer = kmalloc(2, GFP_KERNEL);
+       if (!buffer) {
+               dev_err(&servo->udev->dev, "%s - out of memory\n",
+                       __FUNCTION__);
+               return;
+       }
+
+       /*
+        * angle = 0 - 180 degrees
+        * pulse = angle + 23
+        */
+       servo->pulse[servo_no]= degrees + 23;
+       servo->degrees[servo_no]= degrees;
+       servo->minutes[servo_no]= 0;
+
+       /*
+        * The PhidgetServo v2.0 is controlled by sending two bytes. The
+        * first byte is the servo number xor'ed with 2:
+        *
+        * servo 0 = 2
+        * servo 1 = 3
+        * servo 2 = 0
+        * servo 3 = 1
+        *
+        * The second byte is the position.
+        */
+
+       buffer[0] = servo_no ^ 2;
+       buffer[1] = servo->pulse[servo_no];
+
+       dev_dbg(&servo->udev->dev, "data: %02x %02x\n", buffer[0], buffer[1]);
+
+       retval = usb_control_msg(servo->udev,
+                                usb_sndctrlpipe(servo->udev, 0),
+                                0x09, 0x21, 0x0200, 0x0000, buffer, 2, 2 * HZ);
+       if (retval != 2)
+               dev_err(&servo->udev->dev, "retval = %d\n", retval);
+       kfree(buffer);
+}
+
+#define show_set(value)        \
+static ssize_t set_servo##value (struct device *dev,                   \
+                                       const char *buf, size_t count)  \
+{                                                                      \
+       int degrees, minutes;                                           \
+       struct usb_interface *intf = to_usb_interface (dev);            \
+       struct phidget_servo *servo = usb_get_intfdata (intf);          \
+                                                                       \
+       minutes = 0;                                                    \
+       /* must at least convert degrees */                             \
+       if (sscanf (buf, "%d.%d", &degrees, &minutes) < 1) {            \
+               return -EINVAL;                                         \
+       }                                                               \
+                                                                       \
+       if (degrees < -23 || degrees > (180 + 23) ||                    \
+           minutes < 0 || minutes > 59) {                              \
+               return -EINVAL;                                         \
+       }                                                               \
+                                                                       \
+       if (servo->version >= 3)                                        \
+               change_position_v30 (servo, value, degrees, minutes);   \
+       else                                                            \
+               change_position_v20 (servo, value, degrees, minutes);   \
+                                                                       \
+       return count;                                                   \
+}                                                                      \
+                                                                       \
+static ssize_t show_servo##value (struct device *dev, char *buf)       \
+{                                                                      \
+       struct usb_interface *intf = to_usb_interface (dev);            \
+       struct phidget_servo *servo = usb_get_intfdata (intf);          \
+                                                                       \
+       return sprintf (buf, "%d.%02d\n", servo->degrees[value],        \
+                               servo->minutes[value]);                 \
+}                                                                      \
+static DEVICE_ATTR(servo##value, S_IWUGO | S_IRUGO,                    \
+         show_servo##value, set_servo##value);
+
+show_set(0);
+show_set(1);
+show_set(2);
+show_set(3);
+
+static int
+servo_probe(struct usb_interface *interface, const struct usb_device_id *id)
+{
+       struct usb_device *udev = interface_to_usbdev(interface);
+       struct phidget_servo *dev = NULL;
+
+       dev = kmalloc(sizeof (struct phidget_servo), GFP_KERNEL);
+       if (dev == NULL) {
+               dev_err(&interface->dev, "%s - out of memory\n", __FUNCTION__);
+               return -ENOMEM;
+       }
+       memset(dev, 0x00, sizeof (*dev));
+
+       dev->udev = usb_get_dev(udev);
+       switch (udev->descriptor.idVendor) {
+       case VENDOR_ID_WISEGROUP:
+               dev->version = 2;
+               break;
+       case VENDOR_ID_GLAB:
+               dev->version = 3;
+               break;
+       }
+       switch (udev->descriptor.idProduct) {
+       case DEVICE_ID_4MOTOR_SERVO_20:
+       case DEVICE_ID_4MOTOR_SERVO_30:
+               dev->quad_servo = 1;
+               break;
+       case DEVICE_ID_1MOTOR_SERVO_20:
+       case DEVICE_ID_1MOTOR_SERVO_30:
+               dev->quad_servo = 0;
+               break;
+       }
+
+       usb_set_intfdata(interface, dev);
+
+       device_create_file(&interface->dev, &dev_attr_servo0);
+       if (dev->quad_servo) {
+               device_create_file(&interface->dev, &dev_attr_servo1);
+               device_create_file(&interface->dev, &dev_attr_servo2);
+               device_create_file(&interface->dev, &dev_attr_servo3);
+       }
+
+       dev_info(&interface->dev, "USB %d-Motor PhidgetServo v%d.0 attached\n",
+                dev->quad_servo ? 4 : 1, dev->version);
+       if (dev->version == 2) 
+               dev_info(&interface->dev,
+                        "WARNING: v2.0 not tested! Please report if it works.\n");
+
+       return 0;
+}
+
+static void
+servo_disconnect(struct usb_interface *interface)
+{
+       struct phidget_servo *dev;
+
+       dev = usb_get_intfdata(interface);
+       usb_set_intfdata(interface, NULL);
+
+       device_remove_file(&interface->dev, &dev_attr_servo0);
+       if (dev->quad_servo) {
+               device_remove_file(&interface->dev, &dev_attr_servo1);
+               device_remove_file(&interface->dev, &dev_attr_servo2);
+               device_remove_file(&interface->dev, &dev_attr_servo3);
+       }
+
+       usb_put_dev(dev->udev);
+
+       kfree(dev);
+
+       dev_info(&interface->dev, "USB %d-Motor PhidgetServo v%d.0 detached\n",
+                dev->quad_servo ? 4 : 1, dev->version);
+}
+
+static struct usb_driver servo_driver = {
+       .owner = THIS_MODULE,
+       .name = "phidgetservo",
+       .probe = servo_probe,
+       .disconnect = servo_disconnect,
+       .id_table = id_table
+};
+
+static int __init
+phidget_servo_init(void)
+{
+       int retval = 0;
+
+       retval = usb_register(&servo_driver);
+       if (retval)
+               err("usb_register failed. Error number %d", retval);
+
+       return retval;
+}
+
+static void __exit
+phidget_servo_exit(void)
+{
+       usb_deregister(&servo_driver);
+}
+
+module_init(phidget_servo_init);
+module_exit(phidget_servo_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c
new file mode 100644 (file)
index 0000000..034ec29
--- /dev/null
@@ -0,0 +1,620 @@
+/*
+ * drivers/video/asiliantfb.c
+ *  frame buffer driver for Asiliant 69000 chip
+ *  Copyright (C) 2001-2003 Saito.K & Jeanne
+ *
+ *  from driver/video/chipsfb.c and,
+ *
+ *  drivers/video/asiliantfb.c -- frame buffer device for
+ *  Asiliant 69030 chip (formerly Intel, formerly Chips & Technologies)
+ *  Author: apc@agelectronics.co.uk
+ *  Copyright (C) 2000 AG Electronics
+ *  Note: the data sheets don't seem to be available from Asiliant.
+ *  They are available by searching developer.intel.com, but are not otherwise
+ *  linked to.
+ *
+ *  This driver should be portable with minimal effort to the 69000 display
+ *  chip, and to the twin-display mode of the 69030.
+ *  Contains code from Thomas Hhenleitner <th@visuelle-maschinen.de> (thanks)
+ *
+ *  Derived from the CT65550 driver chipsfb.c:
+ *  Copyright (C) 1998 Paul Mackerras
+ *  ...which was derived from the Powermac "chips" driver:
+ *  Copyright (C) 1997 Fabio Riccardi.
+ *  And from the frame buffer device for Open Firmware-initialized devices:
+ *  Copyright (C) 1997 Geert Uytterhoeven.
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License. See the file COPYING in the main directory of this archive for
+ *  more details.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+/* Built in clock of the 69030 */
+const unsigned Fref = 14318180;
+
+#define mmio_base (p->screen_base + 0x400000)
+
+#define mm_write_ind(num, val, ap, dp) do { \
+       writeb((num), mmio_base + (ap)); writeb((val), mmio_base + (dp)); \
+} while (0)
+
+static void mm_write_xr(struct fb_info *p, u8 reg, u8 data)
+{
+       mm_write_ind(reg, data, 0x7ac, 0x7ad);
+}
+#define write_xr(num, val)     mm_write_xr(p, num, val)
+
+static void mm_write_fr(struct fb_info *p, u8 reg, u8 data)
+{
+       mm_write_ind(reg, data, 0x7a0, 0x7a1);
+}
+#define write_fr(num, val)     mm_write_fr(p, num, val)
+
+static void mm_write_cr(struct fb_info *p, u8 reg, u8 data)
+{
+       mm_write_ind(reg, data, 0x7a8, 0x7a9);
+}
+#define write_cr(num, val)     mm_write_cr(p, num, val)
+
+static void mm_write_gr(struct fb_info *p, u8 reg, u8 data)
+{
+       mm_write_ind(reg, data, 0x79c, 0x79d);
+}
+#define write_gr(num, val)     mm_write_gr(p, num, val)
+
+static void mm_write_sr(struct fb_info *p, u8 reg, u8 data)
+{
+       mm_write_ind(reg, data, 0x788, 0x789);
+}
+#define write_sr(num, val)     mm_write_sr(p, num, val)
+
+static void mm_write_ar(struct fb_info *p, u8 reg, u8 data)
+{
+       readb(mmio_base + 0x7b4);
+       mm_write_ind(reg, data, 0x780, 0x780);
+}
+#define write_ar(num, val)     mm_write_ar(p, num, val)
+
+/*
+ * Exported functions
+ */
+int asiliantfb_init(void);
+
+static int asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *);
+static int asiliantfb_check_var(struct fb_var_screeninfo *var,
+                               struct fb_info *info);
+static int asiliantfb_set_par(struct fb_info *info);
+static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+                               u_int transp, struct fb_info *info);
+
+static struct fb_ops asiliantfb_ops = {
+       .owner          = THIS_MODULE,
+       .fb_check_var   = asiliantfb_check_var,
+       .fb_set_par     = asiliantfb_set_par,
+       .fb_setcolreg   = asiliantfb_setcolreg,
+       .fb_fillrect    = cfb_fillrect,
+       .fb_copyarea    = cfb_copyarea,
+       .fb_imageblit   = cfb_imageblit,
+       .fb_cursor      = soft_cursor,
+};
+
+/* Calculate the ratios for the dot clocks without using a single long long
+ * value */
+static void asiliant_calc_dclk2(u32 *ppixclock, u8 *dclk2_m, u8 *dclk2_n, u8 *dclk2_div)
+{
+       unsigned pixclock = *ppixclock;
+       unsigned Ftarget = 1000000 * (1000000 / pixclock);
+       unsigned n;
+       unsigned best_error = 0xffffffff;
+       unsigned best_m = 0xffffffff,
+                best_n = 0xffffffff;
+       unsigned ratio;
+       unsigned remainder;
+       unsigned char divisor = 0;
+
+       /* Calculate the frequency required. This is hard enough. */
+       ratio = 1000000 / pixclock;
+       remainder = 1000000 % pixclock;
+       Ftarget = 1000000 * ratio + (1000000 * remainder) / pixclock;
+
+       while (Ftarget < 100000000) {
+               divisor += 0x10;
+               Ftarget <<= 1;
+       }
+
+       ratio = Ftarget / Fref;
+       remainder = Ftarget % Fref;
+
+       /* This expresses the constraint that 150kHz <= Fref/n <= 5Mhz,
+        * together with 3 <= n <= 257. */
+       for (n = 3; n <= 257; n++) {
+               unsigned m = n * ratio + (n * remainder) / Fref;
+
+               /* 3 <= m <= 257 */
+               if (m >= 3 && m <= 257) {
+                       unsigned new_error = ((Ftarget * n) - (Fref * m)) >= 0 ?
+                                              ((Ftarget * n) - (Fref * m)) : ((Fref * m) - (Ftarget * n));
+                       if (new_error < best_error) {
+                               best_n = n;
+                               best_m = m;
+                               best_error = new_error;
+                       }
+               }
+               /* But if VLD = 4, then 4m <= 1028 */
+               else if (m <= 1028) {
+                       /* remember there are still only 8-bits of precision in m, so
+                        * avoid over-optimistic error calculations */
+                       unsigned new_error = ((Ftarget * n) - (Fref * (m & ~3))) >= 0 ?
+                                              ((Ftarget * n) - (Fref * (m & ~3))) : ((Fref * (m & ~3)) - (Ftarget * n));
+                       if (new_error < best_error) {
+                               best_n = n;
+                               best_m = m;
+                               best_error = new_error;
+                       }
+               }
+       }
+       if (best_m > 257)
+               best_m >>= 2;   /* divide m by 4, and leave VCO loop divide at 4 */
+       else
+               divisor |= 4;   /* or set VCO loop divide to 1 */
+       *dclk2_m = best_m - 2;
+       *dclk2_n = best_n - 2;
+       *dclk2_div = divisor;
+       *ppixclock = pixclock;
+       return;
+}
+
+static void asiliant_set_timing(struct fb_info *p)
+{
+       unsigned hd = p->var.xres / 8;
+       unsigned hs = (p->var.xres + p->var.right_margin) / 8;
+               unsigned he = (p->var.xres + p->var.right_margin + p->var.hsync_len) / 8;
+       unsigned ht = (p->var.left_margin + p->var.xres + p->var.right_margin + p->var.hsync_len) / 8;
+       unsigned vd = p->var.yres;
+       unsigned vs = p->var.yres + p->var.lower_margin;
+       unsigned ve = p->var.yres + p->var.lower_margin + p->var.vsync_len;
+       unsigned vt = p->var.upper_margin + p->var.yres + p->var.lower_margin + p->var.vsync_len;
+       unsigned wd = (p->var.xres_virtual * ((p->var.bits_per_pixel+7)/8)) / 8;
+
+       if ((p->var.xres == 640) && (p->var.yres == 480) && (p->var.pixclock == 39722)) {
+         write_fr(0x01, 0x02);  /* LCD */
+       } else {
+         write_fr(0x01, 0x01);  /* CRT */
+       }
+
+       write_cr(0x11, (ve - 1) & 0x0f);
+       write_cr(0x00, (ht - 5) & 0xff);
+       write_cr(0x01, hd - 1);
+       write_cr(0x02, hd);
+       write_cr(0x03, ((ht - 1) & 0x1f) | 0x80);
+       write_cr(0x04, hs);
+       write_cr(0x05, (((ht - 1) & 0x20) <<2) | (he & 0x1f));
+       write_cr(0x3c, (ht - 1) & 0xc0);
+       write_cr(0x06, (vt - 2) & 0xff);
+       write_cr(0x30, (vt - 2) >> 8);
+       write_cr(0x07, 0x00);
+       write_cr(0x08, 0x00);
+       write_cr(0x09, 0x00);
+       write_cr(0x10, (vs - 1) & 0xff);
+       write_cr(0x32, ((vs - 1) >> 8) & 0xf);
+       write_cr(0x11, ((ve - 1) & 0x0f) | 0x80);
+       write_cr(0x12, (vd - 1) & 0xff);
+       write_cr(0x31, ((vd - 1) & 0xf00) >> 8);
+       write_cr(0x13, wd & 0xff);
+       write_cr(0x41, (wd & 0xf00) >> 8);
+       write_cr(0x15, (vs - 1) & 0xff);
+       write_cr(0x33, ((vs - 1) >> 8) & 0xf);
+       write_cr(0x38, ((ht - 5) & 0x100) >> 8);
+       write_cr(0x16, (vt - 1) & 0xff);
+       write_cr(0x18, 0x00);
+
+       if (p->var.xres == 640) {
+         writeb(0xc7, mmio_base + 0x784);      /* set misc output reg */
+       } else {
+         writeb(0x07, mmio_base + 0x784);      /* set misc output reg */
+       }
+}
+
+static int asiliantfb_check_var(struct fb_var_screeninfo *var,
+                            struct fb_info *p)
+{
+       unsigned long Ftarget, ratio, remainder;
+
+       ratio = 1000000 / var->pixclock;
+       remainder = 1000000 % var->pixclock;
+       Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock;
+
+       /* First check the constraint that the maximum post-VCO divisor is 32,
+        * and the maximum Fvco is 220MHz */
+       if (Ftarget > 220000000 || Ftarget < 3125000) {
+               printk(KERN_ERR "asiliantfb dotclock must be between 3.125 and 220MHz\n");
+               return -ENXIO;
+       }
+       var->xres_virtual = var->xres;
+       var->yres_virtual = var->yres;
+
+       if (var->bits_per_pixel == 24) {
+               var->red.offset = 16;
+               var->green.offset = 8;
+               var->blue.offset = 0;
+               var->red.length = var->blue.length = var->green.length = 8;
+       } else if (var->bits_per_pixel == 16) {
+               switch (var->red.offset) {
+                       case 11:
+                               var->green.length = 6;
+                               break;
+                       case 10:
+                               var->green.length = 5;
+                               break;
+                       default:
+                               return -EINVAL;
+               }
+               var->green.offset = 5;
+               var->blue.offset = 0;
+               var->red.length = var->blue.length = 5;
+       } else if (var->bits_per_pixel == 8) {
+               var->red.offset = var->green.offset = var->blue.offset = 0;
+               var->red.length = var->green.length = var->blue.length = 8;
+       }
+       return 0;
+}
+
+static int asiliantfb_set_par(struct fb_info *p)
+{
+       u8 dclk2_m;             /* Holds m-2 value for register */
+       u8 dclk2_n;             /* Holds n-2 value for register */
+       u8 dclk2_div;           /* Holds divisor bitmask */
+
+       /* Set pixclock */
+       asiliant_calc_dclk2(&p->var.pixclock, &dclk2_m, &dclk2_n, &dclk2_div);
+
+       /* Set color depth */
+       if (p->var.bits_per_pixel == 24) {
+               write_xr(0x81, 0x16);   /* 24 bit packed color mode */
+               write_xr(0x82, 0x00);   /* Disable palettes */
+               write_xr(0x20, 0x20);   /* 24 bit blitter mode */
+       } else if (p->var.bits_per_pixel == 16) {
+               if (p->var.red.offset == 11)
+                       write_xr(0x81, 0x15);   /* 16 bit color mode */
+               else
+                       write_xr(0x81, 0x14);   /* 15 bit color mode */
+               write_xr(0x82, 0x00);   /* Disable palettes */
+               write_xr(0x20, 0x10);   /* 16 bit blitter mode */
+       } else if (p->var.bits_per_pixel == 8) {
+               write_xr(0x0a, 0x02);   /* Linear */
+               write_xr(0x81, 0x12);   /* 8 bit color mode */
+               write_xr(0x82, 0x00);   /* Graphics gamma enable */
+               write_xr(0x20, 0x00);   /* 8 bit blitter mode */
+       }
+       p->fix.line_length = p->var.xres * (p->var.bits_per_pixel >> 3);
+       p->fix.visual = (p->var.bits_per_pixel == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+       write_xr(0xc4, dclk2_m);
+       write_xr(0xc5, dclk2_n);
+       write_xr(0xc7, dclk2_div);
+       /* Set up the CR registers */
+       asiliant_set_timing(p);
+       return 0;
+}
+
+static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+                            u_int transp, struct fb_info *p)
+{
+       if (regno > 255)
+               return 1;
+       red >>= 8;
+       green >>= 8;
+       blue >>= 8;
+
+        /* Set hardware palete */
+       writeb(regno, mmio_base + 0x790);
+       udelay(1);
+       writeb(red, mmio_base + 0x791);
+       writeb(green, mmio_base + 0x791);
+       writeb(blue, mmio_base + 0x791);
+
+       switch(p->var.bits_per_pixel) {
+       case 15:
+               if (regno < 16) {
+                       ((u32 *)(p->pseudo_palette))[regno] =
+                               ((red & 0xf8) << 7) |
+                               ((green & 0xf8) << 2) |
+                               ((blue & 0xf8) >> 3);
+               }
+               break;
+       case 16:
+               if (regno < 16) {
+                       ((u32 *)(p->pseudo_palette))[regno] =
+                               ((red & 0xf8) << 8) |
+                               ((green & 0xfc) << 3) |
+                               ((blue & 0xf8) >> 3);
+               }
+               break;
+       case 24:
+               if (regno < 24) {
+                       ((u32 *)(p->pseudo_palette))[regno] =
+                               (red << 16)  |
+                               (green << 8) |
+                               (blue);
+               }
+               break;
+       }
+       return 0;
+}
+
+struct chips_init_reg {
+       unsigned char addr;
+       unsigned char data;
+};
+
+#define N_ELTS(x)      (sizeof(x) / sizeof(x[0]))
+
+static struct chips_init_reg chips_init_sr[] =
+{
+       {0x00, 0x03},           /* Reset register */
+       {0x01, 0x01},           /* Clocking mode */
+       {0x02, 0x0f},           /* Plane mask */
+       {0x04, 0x0e}            /* Memory mode */
+};
+
+static struct chips_init_reg chips_init_gr[] =
+{
+        {0x03, 0x00},          /* Data rotate */
+       {0x05, 0x00},           /* Graphics mode */
+       {0x06, 0x01},           /* Miscellaneous */
+       {0x08, 0x00}            /* Bit mask */
+};
+
+static struct chips_init_reg chips_init_ar[] =
+{
+       {0x10, 0x01},           /* Mode control */
+       {0x11, 0x00},           /* Overscan */
+       {0x12, 0x0f},           /* Memory plane enable */
+       {0x13, 0x00}            /* Horizontal pixel panning */
+};
+
+static struct chips_init_reg chips_init_cr[] =
+{
+       {0x0c, 0x00},           /* Start address high */
+       {0x0d, 0x00},           /* Start address low */
+       {0x40, 0x00},           /* Extended Start Address */
+       {0x41, 0x00},           /* Extended Start Address */
+       {0x14, 0x00},           /* Underline location */
+       {0x17, 0xe3},           /* CRT mode control */
+       {0x70, 0x00}            /* Interlace control */
+};
+
+
+static struct chips_init_reg chips_init_fr[] =
+{
+       {0x01, 0x02},
+       {0x03, 0x08},
+       {0x08, 0xcc},
+       {0x0a, 0x08},
+       {0x18, 0x00},
+       {0x1e, 0x80},
+       {0x40, 0x83},
+       {0x41, 0x00},
+       {0x48, 0x13},
+       {0x4d, 0x60},
+       {0x4e, 0x0f},
+
+       {0x0b, 0x01},
+
+       {0x21, 0x51},
+       {0x22, 0x1d},
+       {0x23, 0x5f},
+       {0x20, 0x4f},
+       {0x34, 0x00},
+       {0x24, 0x51},
+       {0x25, 0x00},
+       {0x27, 0x0b},
+       {0x26, 0x00},
+       {0x37, 0x80},
+       {0x33, 0x0b},
+       {0x35, 0x11},
+       {0x36, 0x02},
+       {0x31, 0xea},
+       {0x32, 0x0c},
+       {0x30, 0xdf},
+       {0x10, 0x0c},
+       {0x11, 0xe0},
+       {0x12, 0x50},
+       {0x13, 0x00},
+       {0x16, 0x03},
+       {0x17, 0xbd},
+       {0x1a, 0x00},
+};
+
+
+static struct chips_init_reg chips_init_xr[] =
+{
+       {0xce, 0x00},           /* set default memory clock */
+       {0xcc, 200 },           /* MCLK ratio M */
+       {0xcd, 18  },           /* MCLK ratio N */
+       {0xce, 0x90},           /* MCLK divisor = 2 */
+
+       {0xc4, 209 },
+       {0xc5, 118 },
+       {0xc7, 32  },
+       {0xcf, 0x06},
+       {0x09, 0x01},           /* IO Control - CRT controller extensions */
+       {0x0a, 0x02},           /* Frame buffer mapping */
+       {0x0b, 0x01},           /* PCI burst write */
+       {0x40, 0x03},           /* Memory access control */
+       {0x80, 0x82},           /* Pixel pipeline configuration 0 */
+       {0x81, 0x12},           /* Pixel pipeline configuration 1 */
+       {0x82, 0x08},           /* Pixel pipeline configuration 2 */
+
+       {0xd0, 0x0f},
+       {0xd1, 0x01},
+};
+
+static void __init chips_hw_init(struct fb_info *p)
+{
+       int i;
+
+       for (i = 0; i < N_ELTS(chips_init_xr); ++i)
+               write_xr(chips_init_xr[i].addr, chips_init_xr[i].data);
+       write_xr(0x81, 0x12);
+       write_xr(0x82, 0x08);
+       write_xr(0x20, 0x00);
+       for (i = 0; i < N_ELTS(chips_init_sr); ++i)
+               write_sr(chips_init_sr[i].addr, chips_init_sr[i].data);
+       for (i = 0; i < N_ELTS(chips_init_gr); ++i)
+               write_gr(chips_init_gr[i].addr, chips_init_gr[i].data);
+       for (i = 0; i < N_ELTS(chips_init_ar); ++i)
+               write_ar(chips_init_ar[i].addr, chips_init_ar[i].data);
+       /* Enable video output in attribute index register */
+       writeb(0x20, mmio_base + 0x780);
+       for (i = 0; i < N_ELTS(chips_init_cr); ++i)
+               write_cr(chips_init_cr[i].addr, chips_init_cr[i].data);
+       for (i = 0; i < N_ELTS(chips_init_fr); ++i)
+               write_fr(chips_init_fr[i].addr, chips_init_fr[i].data);
+}
+
+static struct fb_fix_screeninfo asiliantfb_fix __initdata = {
+       .id =           "Asiliant 69000",
+       .type =         FB_TYPE_PACKED_PIXELS,
+       .visual =       FB_VISUAL_PSEUDOCOLOR,
+       .accel =        FB_ACCEL_NONE,
+       .line_length =  640,
+       .smem_len =     0x200000,       /* 2MB */
+};
+
+static struct fb_var_screeninfo asiliantfb_var __initdata = {
+       .xres           = 640,
+       .yres           = 480,
+       .xres_virtual   = 640,
+       .yres_virtual   = 480,
+       .bits_per_pixel = 8,
+       .red            = { .length = 8 },
+       .green          = { .length = 8 },
+       .blue           = { .length = 8 },
+       .height         = -1,
+       .width          = -1,
+       .vmode          = FB_VMODE_NONINTERLACED,
+       .pixclock       = 39722,
+       .left_margin    = 48,
+       .right_margin   = 16,
+       .upper_margin   = 33,
+       .lower_margin   = 10,
+       .hsync_len      = 96,
+       .vsync_len      = 2,
+};
+
+static void __init init_asiliant(struct fb_info *p, unsigned long addr)
+{
+       p->fix                  = asiliantfb_fix;
+       p->fix.smem_start       = addr;
+       p->var                  = asiliantfb_var;
+       p->fbops                = &asiliantfb_ops;
+       p->flags                = FBINFO_FLAG_DEFAULT;
+
+       fb_alloc_cmap(&p->cmap, 256, 0);
+
+       if (register_framebuffer(p) < 0) {
+               printk(KERN_ERR "C&T 69000 framebuffer failed to register\n");
+               return;
+       }
+
+       printk(KERN_INFO "fb%d: Asiliant 69000 frame buffer (%dK RAM detected)\n",
+               p->node, p->fix.smem_len / 1024);
+
+       writeb(0xff, mmio_base + 0x78c);
+       chips_hw_init(p);
+}
+
+static int __devinit
+asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+{
+       unsigned long addr, size;
+       struct fb_info *p;
+
+       if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
+               return -ENODEV;
+       addr = pci_resource_start(dp, 0);
+       size = pci_resource_len(dp, 0);
+       if (addr == 0)
+               return -ENODEV;
+       if (!request_mem_region(addr, size, "asiliantfb"))
+               return -EBUSY;
+
+       p = framebuffer_alloc(sizeof(u32) * 256, &dp->dev);
+       if (!p) {
+               release_mem_region(addr, size);
+               return -ENOMEM;
+       }
+       p->pseudo_palette = p->par;
+       p->par = NULL;
+
+       p->screen_base = ioremap(addr, 0x800000);
+       if (p->screen_base == NULL) {
+               release_mem_region(addr, size);
+               framebuffer_release(p);
+               return -ENOMEM;
+       }
+
+       pci_write_config_dword(dp, 4, 0x02800083);
+       writeb(3, addr + 0x400784);
+
+       init_asiliant(p, addr);
+
+       /* Clear the entire framebuffer */
+       memset(p->screen_base, 0, 0x200000);
+
+       pci_set_drvdata(dp, p);
+       return 0;
+}
+
+static void __devexit asiliantfb_remove(struct pci_dev *dp)
+{
+       struct fb_info *p = pci_get_drvdata(dp);
+
+       unregister_framebuffer(p);
+       iounmap(p->screen_base);
+       release_mem_region(pci_resource_start(dp, 0), pci_resource_len(dp, 0));
+       pci_set_drvdata(dp, NULL);
+       framebuffer_release(p);
+}
+
+static struct pci_device_id asiliantfb_pci_tbl[] __devinitdata = {
+       { PCI_VENDOR_ID_CT, PCI_DEVICE_ID_CT_69000, PCI_ANY_ID, PCI_ANY_ID },
+       { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, asiliantfb_pci_tbl);
+
+static struct pci_driver asiliantfb_driver = {
+       .name =         "asiliantfb",
+       .id_table =     asiliantfb_pci_tbl,
+       .probe =        asiliantfb_pci_init,
+       .remove =       __devexit_p(asiliantfb_remove),
+};
+
+int __init asiliantfb_init(void)
+{
+       return pci_module_init(&asiliantfb_driver);
+}
+
+static void __exit asiliantfb_exit(void)
+{
+       pci_unregister_driver(&asiliantfb_driver);
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c
new file mode 100644 (file)
index 0000000..2afc414
--- /dev/null
@@ -0,0 +1,1200 @@
+/*
+ *  SGI GBE frame buffer driver
+ *
+ *  Copyright (C) 1999 Silicon Graphics, Inc. - Jeffrey Newquist
+ *  Copyright (C) 2002 Vivien Chappelier <vivien.chappelier@linux-mips.org>
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License. See the file COPYING in the main directory of this archive for
+ *  more details.
+ */
+
+#include <linux/config.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+
+#ifdef CONFIG_X86
+#include <asm/mtrr.h>
+#endif
+#ifdef CONFIG_MIPS
+#include <asm/addrspace.h>
+#endif
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/tlbflush.h>
+
+#include <video/gbe.h>
+
+static struct sgi_gbe *gbe;
+
+struct gbefb_par {
+       struct fb_var_screeninfo var;
+       struct gbe_timing_info timing;
+       int valid;
+};
+
+#ifdef CONFIG_SGI_IP32
+#define GBE_BASE       0x16000000 /* SGI O2 */
+#endif
+
+#ifdef CONFIG_X86_VISWS
+#define GBE_BASE       0xd0000000 /* SGI Visual Workstation */
+#endif
+
+/* macro for fastest write-though access to the framebuffer */
+#ifdef CONFIG_MIPS
+#ifdef CONFIG_CPU_R10000
+#define pgprot_fb(_prot) (((_prot) & (~_CACHE_MASK)) | _CACHE_UNCACHED_ACCELERATED)
+#else
+#define pgprot_fb(_prot) (((_prot) & (~_CACHE_MASK)) | _CACHE_CACHABLE_NO_WA)
+#endif
+#endif
+#ifdef CONFIG_X86
+#define pgprot_fb(_prot) ((_prot) | _PAGE_PCD)
+#endif
+
+/*
+ *  RAM we reserve for the frame buffer. This defines the maximum screen
+ *  size
+ */
+#if CONFIG_FB_GBE_MEM > 8
+#error GBE Framebuffer cannot use more than 8MB of memory
+#endif
+
+#define TILE_SHIFT 16
+#define TILE_SIZE (1 << TILE_SHIFT)
+#define TILE_MASK (TILE_SIZE - 1)
+
+static unsigned int gbe_mem_size = CONFIG_FB_GBE_MEM * 1024*1024;
+static void *gbe_mem;
+static dma_addr_t gbe_dma_addr;
+unsigned long gbe_mem_phys;
+
+static struct {
+       uint16_t *cpu;
+       dma_addr_t dma;
+} gbe_tiles;
+
+static int gbe_revision;
+
+static struct fb_info fb_info;
+static int ypan, ywrap;
+
+static uint32_t pseudo_palette[256];
+
+static char *mode_option __initdata = NULL;
+
+/* default CRT mode */
+static struct fb_var_screeninfo default_var_CRT __initdata = {
+       /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
+       .xres           = 640,
+       .yres           = 480,
+       .xres_virtual   = 640,
+       .yres_virtual   = 480,
+       .xoffset        = 0,
+       .yoffset        = 0,
+       .bits_per_pixel = 8,
+       .grayscale      = 0,
+       .red            = { 0, 8, 0 },
+       .green          = { 0, 8, 0 },
+       .blue           = { 0, 8, 0 },
+       .transp         = { 0, 0, 0 },
+       .nonstd         = 0,
+       .activate       = 0,
+       .height         = -1,
+       .width          = -1,
+       .accel_flags    = 0,
+       .pixclock       = 39722,        /* picoseconds */
+       .left_margin    = 48,
+       .right_margin   = 16,
+       .upper_margin   = 33,
+       .lower_margin   = 10,
+       .hsync_len      = 96,
+       .vsync_len      = 2,
+       .sync           = 0,
+       .vmode          = FB_VMODE_NONINTERLACED,
+};
+
+/* default LCD mode */
+static struct fb_var_screeninfo default_var_LCD __initdata = {
+       /* 1600x1024, 8 bpp */
+       .xres           = 1600,
+       .yres           = 1024,
+       .xres_virtual   = 1600,
+       .yres_virtual   = 1024,
+       .xoffset        = 0,
+       .yoffset        = 0,
+       .bits_per_pixel = 8,
+       .grayscale      = 0,
+       .red            = { 0, 8, 0 },
+       .green          = { 0, 8, 0 },
+       .blue           = { 0, 8, 0 },
+       .transp         = { 0, 0, 0 },
+       .nonstd         = 0,
+       .activate       = 0,
+       .height         = -1,
+       .width          = -1,
+       .accel_flags    = 0,
+       .pixclock       = 9353,
+       .left_margin    = 20,
+       .right_margin   = 30,
+       .upper_margin   = 37,
+       .lower_margin   = 3,
+       .hsync_len      = 20,
+       .vsync_len      = 3,
+       .sync           = 0,
+       .vmode          = FB_VMODE_NONINTERLACED
+};
+
+/* default modedb mode */
+/* 640x480, 60 Hz, Non-Interlaced (25.172 MHz dotclock) */
+static struct fb_videomode default_mode_CRT __initdata = {
+       .refresh        = 60,
+       .xres           = 640,
+       .yres           = 480,
+       .pixclock       = 39722,
+       .left_margin    = 48,
+       .right_margin   = 16,
+       .upper_margin   = 33,
+       .lower_margin   = 10,
+       .hsync_len      = 96,
+       .vsync_len      = 2,
+       .sync           = 0,
+       .vmode          = FB_VMODE_NONINTERLACED,
+};
+/* 1600x1024 SGI flatpanel 1600sw */
+static struct fb_videomode default_mode_LCD __initdata = {
+       /* 1600x1024, 8 bpp */
+       .xres           = 1600,
+       .yres           = 1024,
+       .pixclock       = 9353,
+       .left_margin    = 20,
+       .right_margin   = 30,
+       .upper_margin   = 37,
+       .lower_margin   = 3,
+       .hsync_len      = 20,
+       .vsync_len      = 3,
+       .vmode          = FB_VMODE_NONINTERLACED,
+};
+
+struct fb_videomode *default_mode = &default_mode_CRT;
+struct fb_var_screeninfo *default_var = &default_var_CRT;
+
+static int flat_panel_enabled = 0;
+
+static struct gbefb_par par_current;
+
+static void gbe_reset(void)
+{
+       /* Turn on dotclock PLL */
+       gbe->ctrlstat = 0x300aa000;
+}
+
+
+/*
+ * Function:   gbe_turn_off
+ * Parameters: (None)
+ * Description:        This should turn off the monitor and gbe.  This is used
+ *              when switching between the serial console and the graphics
+ *              console.
+ */
+
+void gbe_turn_off(void)
+{
+       int i;
+       unsigned int val, x, y, vpixen_off;
+
+       /* check if pixel counter is on */
+       val = gbe->vt_xy;
+       if (GET_GBE_FIELD(VT_XY, FREEZE, val) == 1)
+               return;
+
+       /* turn off DMA */
+       val = gbe->ovr_control;
+       SET_GBE_FIELD(OVR_CONTROL, OVR_DMA_ENABLE, val, 0);
+       gbe->ovr_control = val;
+       udelay(1000);
+       val = gbe->frm_control;
+       SET_GBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, val, 0);
+       gbe->frm_control = val;
+       udelay(1000);
+       val = gbe->did_control;
+       SET_GBE_FIELD(DID_CONTROL, DID_DMA_ENABLE, val, 0);
+       gbe->did_control = val;
+       udelay(1000);
+
+       /* We have to wait through two vertical retrace periods before
+        * the pixel DMA is turned off for sure. */
+       for (i = 0; i < 10000; i++) {
+               val = gbe->frm_inhwctrl;
+               if (GET_GBE_FIELD(FRM_INHWCTRL, FRM_DMA_ENABLE, val)) {
+                       udelay(10);
+               } else {
+                       val = gbe->ovr_inhwctrl;
+                       if (GET_GBE_FIELD(OVR_INHWCTRL, OVR_DMA_ENABLE, val)) {
+                               udelay(10);
+                       } else {
+                               val = gbe->did_inhwctrl;
+                               if (GET_GBE_FIELD(DID_INHWCTRL, DID_DMA_ENABLE, val)) {
+                                       udelay(10);
+                               } else
+                                       break;
+                       }
+               }
+       }
+       if (i == 10000)
+               printk(KERN_ERR "gbefb: turn off DMA timed out\n");
+
+       /* wait for vpixen_off */
+       val = gbe->vt_vpixen;
+       vpixen_off = GET_GBE_FIELD(VT_VPIXEN, VPIXEN_OFF, val);
+
+       for (i = 0; i < 100000; i++) {
+               val = gbe->vt_xy;
+               x = GET_GBE_FIELD(VT_XY, X, val);
+               y = GET_GBE_FIELD(VT_XY, Y, val);
+               if (y < vpixen_off)
+                       break;
+               udelay(1);
+       }
+       if (i == 100000)
+               printk(KERN_ERR
+                      "gbefb: wait for vpixen_off timed out\n");
+       for (i = 0; i < 10000; i++) {
+               val = gbe->vt_xy;
+               x = GET_GBE_FIELD(VT_XY, X, val);
+               y = GET_GBE_FIELD(VT_XY, Y, val);
+               if (y > vpixen_off)
+                       break;
+               udelay(1);
+       }
+       if (i == 10000)
+               printk(KERN_ERR "gbefb: wait for vpixen_off timed out\n");
+
+       /* turn off pixel counter */
+       val = 0;
+       SET_GBE_FIELD(VT_XY, FREEZE, val, 1);
+       gbe->vt_xy = val;
+       udelay(10000);
+       for (i = 0; i < 10000; i++) {
+               val = gbe->vt_xy;
+               if (GET_GBE_FIELD(VT_XY, FREEZE, val) != 1)
+                       udelay(10);
+               else
+                       break;
+       }
+       if (i == 10000)
+               printk(KERN_ERR "gbefb: turn off pixel clock timed out\n");
+
+       /* turn off dot clock */
+       val = gbe->dotclock;
+       SET_GBE_FIELD(DOTCLK, RUN, val, 0);
+       gbe->dotclock = val;
+       udelay(10000);
+       for (i = 0; i < 10000; i++) {
+               val = gbe->dotclock;
+               if (GET_GBE_FIELD(DOTCLK, RUN, val))
+                       udelay(10);
+               else
+                       break;
+       }
+       if (i == 10000)
+               printk(KERN_ERR "gbefb: turn off dotclock timed out\n");
+
+       /* reset the frame DMA FIFO */
+       val = gbe->frm_size_tile;
+       SET_GBE_FIELD(FRM_SIZE_TILE, FRM_FIFO_RESET, val, 1);
+       gbe->frm_size_tile = val;
+       SET_GBE_FIELD(FRM_SIZE_TILE, FRM_FIFO_RESET, val, 0);
+       gbe->frm_size_tile = val;
+}
+
+static void gbe_turn_on(void)
+{
+       unsigned int val, i;
+
+       /*
+        * Check if pixel counter is off, for unknown reason this
+        * code hangs Visual Workstations
+        */
+       if (gbe_revision < 2) {
+               val = gbe->vt_xy;
+               if (GET_GBE_FIELD(VT_XY, FREEZE, val) == 0)
+                       return;
+       }
+
+       /* turn on dot clock */
+       val = gbe->dotclock;
+       SET_GBE_FIELD(DOTCLK, RUN, val, 1);
+       gbe->dotclock = val;
+       udelay(10000);
+       for (i = 0; i < 10000; i++) {
+               val = gbe->dotclock;
+               if (GET_GBE_FIELD(DOTCLK, RUN, val) != 1)
+                       udelay(10);
+               else
+                       break;
+       }
+       if (i == 10000)
+               printk(KERN_ERR "gbefb: turn on dotclock timed out\n");
+
+       /* turn on pixel counter */
+       val = 0;
+       SET_GBE_FIELD(VT_XY, FREEZE, val, 0);
+       gbe->vt_xy = val;
+       udelay(10000);
+       for (i = 0; i < 10000; i++) {
+               val = gbe->vt_xy;
+               if (GET_GBE_FIELD(VT_XY, FREEZE, val))
+                       udelay(10);
+               else
+                       break;
+       }
+       if (i == 10000)
+               printk(KERN_ERR "gbefb: turn on pixel clock timed out\n");
+
+       /* turn on DMA */
+       val = gbe->frm_control;
+       SET_GBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, val, 1);
+       gbe->frm_control = val;
+       udelay(1000);
+       for (i = 0; i < 10000; i++) {
+               val = gbe->frm_inhwctrl;
+               if (GET_GBE_FIELD(FRM_INHWCTRL, FRM_DMA_ENABLE, val) != 1)
+                       udelay(10);
+               else
+                       break;
+       }
+       if (i == 10000)
+               printk(KERN_ERR "gbefb: turn on DMA timed out\n");
+}
+
+/*
+ *  Blank the display.
+ */
+static int gbefb_blank(int blank, struct fb_info *info)
+{
+       /* 0 unblank, 1 blank, 2 no vsync, 3 no hsync, 4 off */
+       switch (blank) {
+       case 0:         /* unblank */
+               gbe_turn_on();
+               break;
+
+       case 1:         /* blank */
+               gbe_turn_off();
+               break;
+
+       default:
+               /* Nothing */
+               break;
+       }
+       return 0;
+}
+
+/*
+ *  Setup flatpanel related registers.
+ */
+static void gbefb_setup_flatpanel(struct gbe_timing_info *timing)
+{
+       int fp_wid, fp_hgt, fp_vbs, fp_vbe;
+       u32 outputVal = 0;
+
+       SET_GBE_FIELD(VT_FLAGS, HDRV_INVERT, outputVal,
+               (timing->flags & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1);
+       SET_GBE_FIELD(VT_FLAGS, VDRV_INVERT, outputVal,
+               (timing->flags & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1);
+       gbe->vt_flags = outputVal;
+
+       /* Turn on the flat panel */
+       fp_wid = 1600;
+       fp_hgt = 1024;
+       fp_vbs = 0;
+       fp_vbe = 1600;
+       timing->pll_m = 4;
+       timing->pll_n = 1;
+       timing->pll_p = 0;
+
+       outputVal = 0;
+       SET_GBE_FIELD(FP_DE, ON, outputVal, fp_vbs);
+       SET_GBE_FIELD(FP_DE, OFF, outputVal, fp_vbe);
+       gbe->fp_de = outputVal;
+       outputVal = 0;
+       SET_GBE_FIELD(FP_HDRV, OFF, outputVal, fp_wid);
+       gbe->fp_hdrv = outputVal;
+       outputVal = 0;
+       SET_GBE_FIELD(FP_VDRV, ON, outputVal, 1);
+       SET_GBE_FIELD(FP_VDRV, OFF, outputVal, fp_hgt + 1);
+       gbe->fp_vdrv = outputVal;
+}
+
+struct gbe_pll_info {
+       int clock_rate;
+       int fvco_min;
+       int fvco_max;
+};
+
+static struct gbe_pll_info gbe_pll_table[2] = {
+       { 20, 80, 220 },
+       { 27, 80, 220 },
+};
+
+static int compute_gbe_timing(struct fb_var_screeninfo *var,
+                             struct gbe_timing_info *timing)
+{
+       int pll_m, pll_n, pll_p, error, best_m, best_n, best_p, best_error;
+       int pixclock;
+       struct gbe_pll_info *gbe_pll;
+
+       if (gbe_revision < 2)
+               gbe_pll = &gbe_pll_table[0];
+       else
+               gbe_pll = &gbe_pll_table[1];
+
+       /* Determine valid resolution and timing
+        * GBE crystal runs at 20Mhz or 27Mhz
+        * pll_m, pll_n, pll_p define the following frequencies
+        * fvco = pll_m * 20Mhz / pll_n
+        * fout = fvco / (2**pll_p) */
+       best_error = 1000000000;
+       best_n = best_m = best_p = 0;
+       for (pll_p = 0; pll_p < 4; pll_p++)
+               for (pll_m = 1; pll_m < 256; pll_m++)
+                       for (pll_n = 1; pll_n < 64; pll_n++) {
+                               pixclock = (1000000 / gbe_pll->clock_rate) *
+                                               (pll_n << pll_p) / pll_m;
+
+                               error = var->pixclock - pixclock;
+
+                               if (error < 0)
+                                       error = -error;
+
+                               if (error < best_error &&
+                                   pll_m / pll_n >
+                                   gbe_pll->fvco_min / gbe_pll->clock_rate &&
+                                   pll_m / pll_n <
+                                   gbe_pll->fvco_max / gbe_pll->clock_rate) {
+                                       best_error = error;
+                                       best_m = pll_m;
+                                       best_n = pll_n;
+                                       best_p = pll_p;
+                               }
+                       }
+
+       if (!best_n || !best_m)
+               return -EINVAL; /* Resolution to high */
+
+       pixclock = (1000000 / gbe_pll->clock_rate) *
+               (best_n << best_p) / best_m;
+
+       /* set video timing information */
+       if (timing) {
+               timing->width = var->xres;
+               timing->height = var->yres;
+               timing->pll_m = best_m;
+               timing->pll_n = best_n;
+               timing->pll_p = best_p;
+               timing->cfreq = gbe_pll->clock_rate * 1000 * timing->pll_m /
+                       (timing->pll_n << timing->pll_p);
+               timing->htotal = var->left_margin + var->xres +
+                               var->right_margin + var->hsync_len;
+               timing->vtotal = var->upper_margin + var->yres +
+                               var->lower_margin + var->vsync_len;
+               timing->fields_sec = 1000 * timing->cfreq / timing->htotal *
+                               1000 / timing->vtotal;
+               timing->hblank_start = var->xres;
+               timing->vblank_start = var->yres;
+               timing->hblank_end = timing->htotal;
+               timing->hsync_start = var->xres + var->right_margin + 1;
+               timing->hsync_end = timing->hsync_start + var->hsync_len;
+               timing->vblank_end = timing->vtotal;
+               timing->vsync_start = var->yres + var->lower_margin + 1;
+               timing->vsync_end = timing->vsync_start + var->vsync_len;
+       }
+
+       return pixclock;
+}
+
+static void gbe_set_timing_info(struct gbe_timing_info *timing)
+{
+       int temp;
+       unsigned int val;
+
+       /* setup dot clock PLL */
+       val = 0;
+       SET_GBE_FIELD(DOTCLK, M, val, timing->pll_m - 1);
+       SET_GBE_FIELD(DOTCLK, N, val, timing->pll_n - 1);
+       SET_GBE_FIELD(DOTCLK, P, val, timing->pll_p);
+       SET_GBE_FIELD(DOTCLK, RUN, val, 0);     /* do not start yet */
+       gbe->dotclock = val;
+       udelay(10000);
+
+       /* setup pixel counter */
+       val = 0;
+       SET_GBE_FIELD(VT_XYMAX, MAXX, val, timing->htotal);
+       SET_GBE_FIELD(VT_XYMAX, MAXY, val, timing->vtotal);
+       gbe->vt_xymax = val;
+
+       /* setup video timing signals */
+       val = 0;
+       SET_GBE_FIELD(VT_VSYNC, VSYNC_ON, val, timing->vsync_start);
+       SET_GBE_FIELD(VT_VSYNC, VSYNC_OFF, val, timing->vsync_end);
+       gbe->vt_vsync = val;
+       val = 0;
+       SET_GBE_FIELD(VT_HSYNC, HSYNC_ON, val, timing->hsync_start);
+       SET_GBE_FIELD(VT_HSYNC, HSYNC_OFF, val, timing->hsync_end);
+       gbe->vt_hsync = val;
+       val = 0;
+       SET_GBE_FIELD(VT_VBLANK, VBLANK_ON, val, timing->vblank_start);
+       SET_GBE_FIELD(VT_VBLANK, VBLANK_OFF, val, timing->vblank_end);
+       gbe->vt_vblank = val;
+       val = 0;
+       SET_GBE_FIELD(VT_HBLANK, HBLANK_ON, val,
+                     timing->hblank_start - 5);
+       SET_GBE_FIELD(VT_HBLANK, HBLANK_OFF, val,
+                     timing->hblank_end - 3);
+       gbe->vt_hblank = val;
+
+       /* setup internal timing signals */
+       val = 0;
+       SET_GBE_FIELD(VT_VCMAP, VCMAP_ON, val, timing->vblank_start);
+       SET_GBE_FIELD(VT_VCMAP, VCMAP_OFF, val, timing->vblank_end);
+       gbe->vt_vcmap = val;
+       val = 0;
+       SET_GBE_FIELD(VT_HCMAP, HCMAP_ON, val, timing->hblank_start);
+       SET_GBE_FIELD(VT_HCMAP, HCMAP_OFF, val, timing->hblank_end);
+       gbe->vt_hcmap = val;
+
+       val = 0;
+       temp = timing->vblank_start - timing->vblank_end - 1;
+       if (temp > 0)
+               temp = -temp;
+
+       if (flat_panel_enabled)
+               gbefb_setup_flatpanel(timing);
+
+       SET_GBE_FIELD(DID_START_XY, DID_STARTY, val, (u32) temp);
+       if (timing->hblank_end >= 20)
+               SET_GBE_FIELD(DID_START_XY, DID_STARTX, val,
+                             timing->hblank_end - 20);
+       else
+               SET_GBE_FIELD(DID_START_XY, DID_STARTX, val,
+                             timing->htotal - (20 - timing->hblank_end));
+       gbe->did_start_xy = val;
+
+       val = 0;
+       SET_GBE_FIELD(CRS_START_XY, CRS_STARTY, val, (u32) (temp + 1));
+       if (timing->hblank_end >= GBE_CRS_MAGIC)
+               SET_GBE_FIELD(CRS_START_XY, CRS_STARTX, val,
+                             timing->hblank_end - GBE_CRS_MAGIC);
+       else
+               SET_GBE_FIELD(CRS_START_XY, CRS_STARTX, val,
+                             timing->htotal - (GBE_CRS_MAGIC -
+                                               timing->hblank_end));
+       gbe->crs_start_xy = val;
+
+       val = 0;
+       SET_GBE_FIELD(VC_START_XY, VC_STARTY, val, (u32) temp);
+       SET_GBE_FIELD(VC_START_XY, VC_STARTX, val, timing->hblank_end - 4);
+       gbe->vc_start_xy = val;
+
+       val = 0;
+       temp = timing->hblank_end - GBE_PIXEN_MAGIC_ON;
+       if (temp < 0)
+               temp += timing->htotal; /* allow blank to wrap around */
+
+       SET_GBE_FIELD(VT_HPIXEN, HPIXEN_ON, val, temp);
+       SET_GBE_FIELD(VT_HPIXEN, HPIXEN_OFF, val,
+                     ((temp + timing->width -
+                       GBE_PIXEN_MAGIC_OFF) % timing->htotal));
+       gbe->vt_hpixen = val;
+
+       val = 0;
+       SET_GBE_FIELD(VT_VPIXEN, VPIXEN_ON, val, timing->vblank_end);
+       SET_GBE_FIELD(VT_VPIXEN, VPIXEN_OFF, val, timing->vblank_start);
+       gbe->vt_vpixen = val;
+
+       /* turn off sync on green */
+       val = 0;
+       SET_GBE_FIELD(VT_FLAGS, SYNC_LOW, val, 1);
+       gbe->vt_flags = val;
+}
+
+/*
+ *  Set the hardware according to 'par'.
+ */
+
+static int gbefb_set_par(struct fb_info *info)
+{
+       int i;
+       unsigned int val;
+       int wholeTilesX, partTilesX, maxPixelsPerTileX;
+       int height_pix;
+       int xpmax, ypmax;       /* Monitor resolution */
+       int bytesPerPixel;      /* Bytes per pixel */
+       struct gbefb_par *par = (struct gbefb_par *) info->par;
+
+       compute_gbe_timing(&info->var, &par->timing);
+
+       bytesPerPixel = info->var.bits_per_pixel / 8;
+       info->fix.line_length = info->var.xres_virtual * bytesPerPixel;
+       xpmax = par->timing.width;
+       ypmax = par->timing.height;
+
+       /* turn off GBE */
+       gbe_turn_off();
+
+       /* set timing info */
+       gbe_set_timing_info(&par->timing);
+
+       /* initialize DIDs */
+       val = 0;
+       switch (bytesPerPixel) {
+       case 1:
+               SET_GBE_FIELD(WID, TYP, val, GBE_CMODE_I8);
+               break;
+       case 2:
+               SET_GBE_FIELD(WID, TYP, val, GBE_CMODE_ARGB5);
+               break;
+       case 4:
+               SET_GBE_FIELD(WID, TYP, val, GBE_CMODE_RGB8);
+               break;
+       }
+       SET_GBE_FIELD(WID, BUF, val, GBE_BMODE_BOTH);
+
+       for (i = 0; i < 32; i++)
+               gbe->mode_regs[i] = val;
+
+       /* Initialize interrupts */
+       gbe->vt_intr01 = 0xffffffff;
+       gbe->vt_intr23 = 0xffffffff;
+
+       /* HACK:
+          The GBE hardware uses a tiled memory to screen mapping. Tiles are
+          blocks of 512x128, 256x128 or 128x128 pixels, respectively for 8bit,
+          16bit and 32 bit modes (64 kB). They cover the screen with partial
+          tiles on the right and/or bottom of the screen if needed.
+          For exemple in 640x480 8 bit mode the mapping is:
+
+          <-------- 640 ----->
+          <---- 512 ----><128|384 offscreen>
+          ^  ^
+          | 128    [tile 0]        [tile 1]
+          |  v
+          ^
+          4 128    [tile 2]        [tile 3]
+          8  v
+          0  ^
+          128    [tile 4]        [tile 5]
+          |  v
+          |  ^
+          v  96    [tile 6]        [tile 7]
+          32 offscreen
+
+          Tiles have the advantage that they can be allocated individually in
+          memory. However, this mapping is not linear at all, which is not
+          really convienient. In order to support linear addressing, the GBE
+          DMA hardware is fooled into thinking the screen is only one tile
+          large and but has a greater height, so that the DMA transfer covers
+          the same region.
+          Tiles are still allocated as independent chunks of 64KB of
+          continuous physical memory and remapped so that the kernel sees the
+          framebuffer as a continuous virtual memory. The GBE tile table is
+          set up so that each tile references one of these 64k blocks:
+
+          GBE -> tile list    framebuffer           TLB   <------------ CPU
+                 [ tile 0 ] -> [ 64KB ]  <- [ 16x 4KB page entries ]     ^
+                    ...           ...              ...       linear virtual FB
+                 [ tile n ] -> [ 64KB ]  <- [ 16x 4KB page entries ]     v
+
+
+          The GBE hardware is then told that the buffer is 512*tweaked_height,
+          with tweaked_height = real_width*real_height/pixels_per_tile.
+          Thus the GBE hardware will scan the first tile, filing the first 64k
+          covered region of the screen, and then will proceed to the next
+          tile, until the whole screen is covered.
+
+          Here is what would happen at 640x480 8bit:
+
+          normal tiling               linear
+          ^   11111111111111112222    11111111111111111111  ^
+          128 11111111111111112222    11111111111111111111 102 lines
+              11111111111111112222    11111111111111111111  v
+          V   11111111111111112222    11111111222222222222
+              33333333333333334444    22222222222222222222
+              33333333333333334444    22222222222222222222
+              <      512     >        <  256 >               102*640+256 = 64k
+
+          NOTE: The only mode for which this is not working is 800x600 8bit,
+          as 800*600/512 = 937.5 which is not integer and thus causes
+          flickering.
+          I guess this is not so important as one can use 640x480 8bit or
+          800x600 16bit anyway.
+        */
+
+       /* Tell gbe about the tiles table location */
+       /* tile_ptr -> [ tile 1 ] -> FB mem */
+       /*             [ tile 2 ] -> FB mem */
+       /*               ...                */
+       val = 0;
+       SET_GBE_FIELD(FRM_CONTROL, FRM_TILE_PTR, val, gbe_tiles.dma >> 9);
+       SET_GBE_FIELD(FRM_CONTROL, FRM_DMA_ENABLE, val, 0); /* do not start */
+       SET_GBE_FIELD(FRM_CONTROL, FRM_LINEAR, val, 0);
+       gbe->frm_control = val;
+
+       maxPixelsPerTileX = 512 / bytesPerPixel;
+       wholeTilesX = 1;
+       partTilesX = 0;
+
+       /* Initialize the framebuffer */
+       val = 0;
+       SET_GBE_FIELD(FRM_SIZE_TILE, FRM_WIDTH_TILE, val, wholeTilesX);
+       SET_GBE_FIELD(FRM_SIZE_TILE, FRM_RHS, val, partTilesX);
+
+       switch (bytesPerPixel) {
+       case 1:
+               SET_GBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, val,
+                             GBE_FRM_DEPTH_8);
+               break;
+       case 2:
+               SET_GBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, val,
+                             GBE_FRM_DEPTH_16);
+               break;
+       case 4:
+               SET_GBE_FIELD(FRM_SIZE_TILE, FRM_DEPTH, val,
+                             GBE_FRM_DEPTH_32);
+               break;
+       }
+       gbe->frm_size_tile = val;
+
+       /* compute tweaked height */
+       height_pix = xpmax * ypmax / maxPixelsPerTileX;
+
+       val = 0;
+       SET_GBE_FIELD(FRM_SIZE_PIXEL, FB_HEIGHT_PIX, val, height_pix);
+       gbe->frm_size_pixel = val;
+
+       /* turn off DID and overlay DMA */
+       gbe->did_control = 0;
+       gbe->ovr_width_tile = 0;
+
+       /* Turn off mouse cursor */
+       gbe->crs_ctl = 0;
+
+       /* Turn on GBE */
+       gbe_turn_on();
+
+       /* Initialize the gamma map */
+       udelay(10);
+       for (i = 0; i < 256; i++)
+               gbe->gmap[i] = (i << 24) | (i << 16) | (i << 8);
+
+       /* Initialize the color map */
+       for (i = 0; i < 256; i++) {
+               int j;
+
+               for (j = 0; j < 1000 && gbe->cm_fifo >= 63; j++)
+                       udelay(10);
+               if (j == 1000)
+                       printk(KERN_ERR "gbefb: cmap FIFO timeout\n");
+
+               gbe->cmap[i] = (i << 8) | (i << 16) | (i << 24);
+       }
+
+       return 0;
+}
+
+static void gbefb_encode_fix(struct fb_fix_screeninfo *fix,
+                            struct fb_var_screeninfo *var)
+{
+       memset(fix, 0, sizeof(struct fb_fix_screeninfo));
+       strcpy(fix->id, "SGI GBE");
+       fix->smem_start = (unsigned long) gbe_mem;
+       fix->smem_len = gbe_mem_size;
+       fix->type = FB_TYPE_PACKED_PIXELS;
+       fix->type_aux = 0;
+       fix->accel = FB_ACCEL_NONE;
+       switch (var->bits_per_pixel) {
+       case 8:
+               fix->visual = FB_VISUAL_PSEUDOCOLOR;
+               break;
+       default:
+               fix->visual = FB_VISUAL_TRUECOLOR;
+               break;
+       }
+       fix->ywrapstep = 0;
+       fix->xpanstep = 0;
+       fix->ypanstep = 0;
+       fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
+       fix->mmio_start = GBE_BASE;
+       fix->mmio_len = sizeof(struct sgi_gbe);
+}
+
+/*
+ *  Set a single color register. The values supplied are already
+ *  rounded down to the hardware's capabilities (according to the
+ *  entries in the var structure). Return != 0 for invalid regno.
+ */
+
+static int gbefb_setcolreg(unsigned regno, unsigned red, unsigned green,
+                            unsigned blue, unsigned transp,
+                            struct fb_info *info)
+{
+       int i;
+
+       if (regno > 255)
+               return 1;
+       red >>= 8;
+       green >>= 8;
+       blue >>= 8;
+
+       switch (info->var.bits_per_pixel) {
+       case 8:
+               /* wait for the color map FIFO to have a free entry */
+               for (i = 0; i < 1000 && gbe->cm_fifo >= 63; i++)
+                       udelay(10);
+               if (i == 1000) {
+                       printk(KERN_ERR "gbefb: cmap FIFO timeout\n");
+                       return 1;
+               }
+               gbe->cmap[regno] = (red << 24) | (green << 16) | (blue << 8);
+               break;
+       case 15:
+       case 16:
+               red >>= 3;
+               green >>= 3;
+               blue >>= 3;
+               pseudo_palette[regno] =
+                       (red << info->var.red.offset) |
+                       (green << info->var.green.offset) |
+                       (blue << info->var.blue.offset);
+               break;
+       case 32:
+               pseudo_palette[regno] =
+                       (red << info->var.red.offset) |
+                       (green << info->var.green.offset) |
+                       (blue << info->var.blue.offset);
+               break;
+       }
+
+       return 0;
+}
+
+/*
+ *  Check video mode validity, eventually modify var to best match.
+ */
+static int gbefb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+       unsigned int line_length;
+       struct gbe_timing_info timing;
+
+       /* Limit bpp to 8, 16, and 32 */
+       if (var->bits_per_pixel <= 8)
+               var->bits_per_pixel = 8;
+       else if (var->bits_per_pixel <= 16)
+               var->bits_per_pixel = 16;
+       else if (var->bits_per_pixel <= 32)
+               var->bits_per_pixel = 32;
+       else
+               return -EINVAL;
+
+       /* Check the mode can be mapped linearly with the tile table trick. */
+       /* This requires width x height x bytes/pixel be a multiple of 512 */
+       if ((var->xres * var->yres * var->bits_per_pixel) & 4095)
+               return -EINVAL;
+
+       var->grayscale = 0;     /* No grayscale for now */
+
+       if ((var->pixclock = compute_gbe_timing(var, &timing)) < 0)
+               return(-EINVAL);
+
+       /* Adjust virtual resolution, if necessary */
+       if (var->xres > var->xres_virtual || (!ywrap && !ypan))
+               var->xres_virtual = var->xres;
+       if (var->yres > var->yres_virtual || (!ywrap && !ypan))
+               var->yres_virtual = var->yres;
+
+       if (var->vmode & FB_VMODE_CONUPDATE) {
+               var->vmode |= FB_VMODE_YWRAP;
+               var->xoffset = info->var.xoffset;
+               var->yoffset = info->var.yoffset;
+       }
+
+       /* No grayscale for now */
+       var->grayscale = 0;
+
+       /* Memory limit */
+       line_length = var->xres_virtual * var->bits_per_pixel / 8;
+       if (line_length * var->yres_virtual > gbe_mem_size)
+               return -ENOMEM; /* Virtual resolution too high */
+
+       switch (var->bits_per_pixel) {
+       case 8:
+               var->red.offset = 0;
+               var->red.length = 8;
+               var->green.offset = 0;
+               var->green.length = 8;
+               var->blue.offset = 0;
+               var->blue.length = 8;
+               var->transp.offset = 0;
+               var->transp.length = 0;
+               break;
+       case 16:                /* RGB 1555 */
+               var->red.offset = 10;
+               var->red.length = 5;
+               var->green.offset = 5;
+               var->green.length = 5;
+               var->blue.offset = 0;
+               var->blue.length = 5;
+               var->transp.offset = 0;
+               var->transp.length = 0;
+               break;
+       case 32:                /* RGB 8888 */
+               var->red.offset = 24;
+               var->red.length = 8;
+               var->green.offset = 16;
+               var->green.length = 8;
+               var->blue.offset = 8;
+               var->blue.length = 8;
+               var->transp.offset = 0;
+               var->transp.length = 8;
+               break;
+       }
+       var->red.msb_right = 0;
+       var->green.msb_right = 0;
+       var->blue.msb_right = 0;
+       var->transp.msb_right = 0;
+
+       var->left_margin = timing.htotal - timing.hsync_end;
+       var->right_margin = timing.hsync_start - timing.width;
+       var->upper_margin = timing.vtotal - timing.vsync_end;
+       var->lower_margin = timing.vsync_start - timing.height;
+       var->hsync_len = timing.hsync_end - timing.hsync_start;
+       var->vsync_len = timing.vsync_end - timing.vsync_start;
+
+       return 0;
+}
+
+static int gbefb_mmap(struct fb_info *info, struct file *file,
+                       struct vm_area_struct *vma)
+{
+       unsigned long size = vma->vm_end - vma->vm_start;
+       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+       unsigned long addr;
+       unsigned long phys_addr, phys_size;
+       u16 *tile;
+
+       /* check range */
+       if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
+               return -EINVAL;
+       if (offset + size > gbe_mem_size)
+               return -EINVAL;
+
+       /* remap using the fastest write-through mode on architecture */
+       /* try not polluting the cache when possible */
+       pgprot_val(vma->vm_page_prot) =
+               pgprot_fb(pgprot_val(vma->vm_page_prot));
+
+       vma->vm_flags |= VM_IO | VM_RESERVED;
+       vma->vm_file = file;
+
+       /* look for the starting tile */
+       tile = &gbe_tiles.cpu[offset >> TILE_SHIFT];
+       addr = vma->vm_start;
+       offset &= TILE_MASK;
+
+       /* remap each tile separately */
+       do {
+               phys_addr = (((unsigned long) (*tile)) << TILE_SHIFT) + offset;
+               if ((offset + size) < TILE_SIZE)
+                       phys_size = size;
+               else
+                       phys_size = TILE_SIZE - offset;
+
+               if (remap_page_range
+                   (vma, addr, phys_addr, phys_size, vma->vm_page_prot))
+                       return -EAGAIN;
+
+               offset = 0;
+               size -= phys_size;
+               addr += phys_size;
+               tile++;
+       } while (size);
+
+       return 0;
+}
+
+static struct fb_ops gbefb_ops = {
+       .owner          = THIS_MODULE,
+       .fb_check_var   = gbefb_check_var,
+       .fb_set_par     = gbefb_set_par,
+       .fb_setcolreg   = gbefb_setcolreg,
+       .fb_mmap        = gbefb_mmap,
+       .fb_blank       = gbefb_blank,
+       .fb_fillrect    = cfb_fillrect,
+       .fb_copyarea    = cfb_copyarea,
+       .fb_imageblit   = cfb_imageblit,
+       .fb_cursor      = soft_cursor,
+};
+
+/*
+ * Initialization
+ */
+
+int __init gbefb_setup(char *options)
+{
+       char *this_opt;
+
+       if (!options || !*options)
+               return 0;
+
+       while ((this_opt = strsep(&options, ",")) != NULL) {
+               if (!strncmp(this_opt, "monitor:", 8)) {
+                       if (!strncmp(this_opt + 8, "crt", 3)) {
+                               flat_panel_enabled = 0;
+                               default_var = &default_var_CRT;
+                               default_mode = &default_mode_CRT;
+                       } else if (!strncmp(this_opt + 8, "1600sw", 6) ||
+                                  !strncmp(this_opt + 8, "lcd", 3)) {
+                               flat_panel_enabled = 1;
+                               default_var = &default_var_LCD;
+                               default_mode = &default_mode_LCD;
+                       }
+               } else if (!strncmp(this_opt, "mem:", 4)) {
+                       gbe_mem_size = memparse(this_opt + 4, &this_opt);
+                       if (gbe_mem_size > CONFIG_FB_GBE_MEM * 1024 * 1024)
+                               gbe_mem_size = CONFIG_FB_GBE_MEM * 1024 * 1024;
+                       if (gbe_mem_size < TILE_SIZE)
+                               gbe_mem_size = TILE_SIZE;
+               } else
+                       mode_option = this_opt;
+       }
+       return 0;
+}
+
+int __init gbefb_init(void)
+{
+       int i, ret = 0;
+
+       if (!request_mem_region(GBE_BASE, sizeof(struct sgi_gbe), "GBE")) {
+               printk(KERN_ERR "gbefb: couldn't reserve mmio region\n");
+               return -EBUSY;
+       }
+
+       gbe = (struct sgi_gbe *) ioremap(GBE_BASE, sizeof(struct sgi_gbe));
+       if (!gbe) {
+               printk(KERN_ERR "gbefb: couldn't map mmio region\n");
+               ret = -ENXIO;
+               goto out_release_mem_region;
+       }
+       gbe_revision = gbe->ctrlstat & 15;
+
+       gbe_tiles.cpu =
+               dma_alloc_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
+                                  &gbe_tiles.dma, GFP_KERNEL);
+       if (!gbe_tiles.cpu) {
+               printk(KERN_ERR "gbefb: couldn't allocate tiles table\n");
+               ret = -ENOMEM;
+               goto out_unmap;
+       }
+
+
+       if (gbe_mem_phys) {
+               /* memory was allocated at boot time */
+               gbe_mem = ioremap_nocache(gbe_mem_phys, gbe_mem_size);
+               gbe_dma_addr = 0;
+       } else {
+               /* try to allocate memory with the classical allocator
+                * this has high chance to fail on low memory machines */
+               gbe_mem = dma_alloc_coherent(NULL, gbe_mem_size, &gbe_dma_addr,
+                                            GFP_KERNEL);
+               gbe_mem_phys = (unsigned long) gbe_dma_addr;
+       }
+
+#ifdef CONFIG_X86
+       mtrr_add(gbe_mem_phys, gbe_mem_size, MTRR_TYPE_WRCOMB, 1);
+#endif
+
+       if (!gbe_mem) {
+               printk(KERN_ERR "gbefb: couldn't map framebuffer\n");
+               ret = -ENXIO;
+               goto out_tiles_free;
+       }
+
+       /* map framebuffer memory into tiles table */
+       for (i = 0; i < (gbe_mem_size >> TILE_SHIFT); i++)
+               gbe_tiles.cpu[i] = (gbe_mem_phys >> TILE_SHIFT) + i;
+
+       fb_info.currcon = -1;
+       fb_info.fbops = &gbefb_ops;
+       fb_info.pseudo_palette = pseudo_palette;
+       fb_info.flags = FBINFO_FLAG_DEFAULT;
+       fb_info.screen_base = gbe_mem;
+       fb_alloc_cmap(&fb_info.cmap, 256, 0);
+
+       /* reset GBE */
+       gbe_reset();
+
+       /* turn on default video mode */
+       if (fb_find_mode(&par_current.var, &fb_info, mode_option, NULL, 0,
+                        default_mode, 8) == 0)
+               par_current.var = *default_var;
+       fb_info.var = par_current.var;
+       gbefb_check_var(&par_current.var, &fb_info);
+       gbefb_encode_fix(&fb_info.fix, &fb_info.var);
+       fb_info.par = &par_current;
+
+       if (register_framebuffer(&fb_info) < 0) {
+               ret = -ENXIO;
+               printk(KERN_ERR "gbefb: couldn't register framebuffer\n");
+               goto out_gbe_unmap;
+       }
+
+       printk(KERN_INFO "fb%d: %s rev %d @ 0x%08x using %dkB memory\n",
+              fb_info.node, fb_info.fix.id, gbe_revision, (unsigned) GBE_BASE,
+              gbe_mem_size >> 10);
+
+       return 0;
+
+out_gbe_unmap:
+       if (gbe_dma_addr)
+               dma_free_coherent(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
+       else
+               iounmap(gbe_mem);
+out_tiles_free:
+       dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
+                         (void *)gbe_tiles.cpu, gbe_tiles.dma);
+out_unmap:
+       iounmap(gbe);
+out_release_mem_region:
+       release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
+       return ret;
+}
+
+void __exit gbefb_exit(void)
+{
+       unregister_framebuffer(&fb_info);
+       gbe_turn_off();
+       if (gbe_dma_addr)
+               dma_free_coherent(NULL, gbe_mem_size, gbe_mem, gbe_mem_phys);
+       else
+               iounmap(gbe_mem);
+       dma_free_coherent(NULL, GBE_TLB_SIZE * sizeof(uint16_t),
+                         (void *)gbe_tiles.cpu, gbe_tiles.dma);
+       release_mem_region(GBE_BASE, sizeof(struct sgi_gbe));
+       iounmap(gbe);
+}
+
+#ifdef MODULE
+module_init(gbefb_init);
+module_exit(gbefb_exit);
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h
new file mode 100644 (file)
index 0000000..de15fec
--- /dev/null
@@ -0,0 +1,129 @@
+#ifndef __PXAFB_H__
+#define __PXAFB_H__
+
+/*
+ * linux/drivers/video/pxafb.h
+ *    -- Intel PXA250/210 LCD Controller Frame Buffer Device
+ *
+ *  Copyright (C) 1999 Eric A. Thomas.
+ *  Copyright (C) 2004 Jean-Frederic Clere.
+ *  Copyright (C) 2004 Ian Campbell.
+ *  Copyright (C) 2004 Jeff Lackey.
+ *   Based on sa1100fb.c Copyright (C) 1999 Eric A. Thomas
+ *  which in turn is
+ *   Based on acornfb.c Copyright (C) Russell King.
+ *
+ *  2001-08-03: Cliff Brake <cbrake@acclent.com>
+ *      - ported SA1100 code to PXA
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/* Shadows for LCD controller registers */
+struct pxafb_lcd_reg {
+       unsigned int lccr0;
+       unsigned int lccr1;
+       unsigned int lccr2;
+       unsigned int lccr3;
+};
+
+/* PXA LCD DMA descriptor */
+struct pxafb_dma_descriptor {
+       unsigned int fdadr;
+       unsigned int fsadr;
+       unsigned int fidr;
+       unsigned int ldcmd;
+};
+
+struct pxafb_info {
+       struct fb_info          fb;
+       struct device           *dev;
+
+       u_int                   max_bpp;
+       u_int                   max_xres;
+       u_int                   max_yres;
+
+       /*
+        * These are the addresses we mapped
+        * the framebuffer memory region to.
+        */
+       /* raw memory addresses */
+       dma_addr_t              map_dma;        /* physical */
+       u_char *                map_cpu;        /* virtual */
+       u_int                   map_size;
+
+       /* addresses of pieces placed in raw buffer */
+       u_char *                screen_cpu;     /* virtual address of frame buffer */
+       dma_addr_t              screen_dma;     /* physical address of frame buffer */
+       u16 *                   palette_cpu;    /* virtual address of palette memory */
+       dma_addr_t              palette_dma;    /* physical address of palette memory */
+       u_int                   palette_size;
+
+       /* DMA descriptors */
+       struct pxafb_dma_descriptor *   dmadesc_fblow_cpu;
+       dma_addr_t              dmadesc_fblow_dma;
+       struct pxafb_dma_descriptor *   dmadesc_fbhigh_cpu;
+       dma_addr_t              dmadesc_fbhigh_dma;
+       struct pxafb_dma_descriptor *   dmadesc_palette_cpu;
+       dma_addr_t              dmadesc_palette_dma;
+
+       dma_addr_t              fdadr0;
+       dma_addr_t              fdadr1;
+
+       u_int                   lccr0;
+       u_int                   lccr3;
+       u_int                   cmap_inverse:1,
+                               cmap_static:1,
+                               unused:30;
+
+       u_int                   reg_lccr0;
+       u_int                   reg_lccr1;
+       u_int                   reg_lccr2;
+       u_int                   reg_lccr3;
+
+       volatile u_char         state;
+       volatile u_char         task_state;
+       struct semaphore        ctrlr_sem;
+       wait_queue_head_t       ctrlr_wait;
+       struct work_struct      task;
+
+#ifdef CONFIG_CPU_FREQ
+       struct notifier_block   freq_transition;
+       struct notifier_block   freq_policy;
+#endif
+};
+
+#define TO_INF(ptr,member) container_of(ptr,struct pxafb_info,member)
+
+/*
+ * These are the actions for set_ctrlr_state
+ */
+#define C_DISABLE              (0)
+#define C_ENABLE               (1)
+#define C_DISABLE_CLKCHANGE    (2)
+#define C_ENABLE_CLKCHANGE     (3)
+#define C_REENABLE             (4)
+#define C_DISABLE_PM           (5)
+#define C_ENABLE_PM            (6)
+#define C_STARTUP              (7)
+
+#define PXA_NAME       "PXA"
+
+/*
+ *  Debug macros
+ */
+#if DEBUG
+#  define DPRINTK(fmt, args...)        printk("%s: " fmt, __FUNCTION__ , ## args)
+#else
+#  define DPRINTK(fmt, args...)
+#endif
+
+/*
+ * Minimum X and Y resolutions
+ */
+#define MIN_XRES       64
+#define MIN_YRES       64
+
+#endif /* __PXAFB_H__ */
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
new file mode 100644 (file)
index 0000000..7ef7b45
--- /dev/null
@@ -0,0 +1,1441 @@
+/*
+ * linux/fs/reiserfs/xattr.c
+ *
+ * Copyright (c) 2002 by Jeff Mahoney, <jeffm@suse.com>
+ *
+ */
+
+/*
+ * In order to implement EA/ACLs in a clean, backwards compatible manner,
+ * they are implemented as files in a "private" directory.
+ * Each EA is in it's own file, with the directory layout like so (/ is assumed
+ * to be relative to fs root). Inside the /.reiserfs_priv/xattrs directory,
+ * directories named using the capital-hex form of the objectid and
+ * generation number are used. Inside each directory are individual files
+ * named with the name of the extended attribute.
+ *
+ * So, for objectid 12648430, we could have:
+ * /.reiserfs_priv/xattrs/C0FFEE.0/system.posix_acl_access
+ * /.reiserfs_priv/xattrs/C0FFEE.0/system.posix_acl_default
+ * /.reiserfs_priv/xattrs/C0FFEE.0/user.Content-Type
+ * .. or similar.
+ *
+ * The file contents are the text of the EA. The size is known based on the
+ * stat data describing the file.
+ *
+ * In the case of system.posix_acl_access and system.posix_acl_default, since
+ * these are special cases for filesystem ACLs, they are interpreted by the
+ * kernel, in addition, they are negatively and positively cached and attached
+ * to the inode so that unnecessary lookups are avoided.
+ */
+
+#include <linux/reiserfs_fs.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/xattr.h>
+#include <linux/reiserfs_xattr.h>
+#include <linux/reiserfs_acl.h>
+#include <linux/mbcache.h>
+#include <asm/uaccess.h>
+#include <asm/checksum.h>
+#include <linux/smp_lock.h>
+#include <linux/stat.h>
+#include <asm/semaphore.h>
+
+#define FL_READONLY 128
+#define FL_DIR_SEM_HELD 256
+#define PRIVROOT_NAME ".reiserfs_priv"
+#define XAROOT_NAME   "xattrs"
+
+static struct reiserfs_xattr_handler *find_xattr_handler_prefix (const char *prefix);
+
+static struct dentry *
+create_xa_root (struct super_block *sb)
+{
+    struct dentry *privroot = dget (REISERFS_SB(sb)->priv_root);
+    struct dentry *xaroot;
+
+    /* This needs to be created at mount-time */
+    if (!privroot)
+        return ERR_PTR(-EOPNOTSUPP);
+
+    xaroot = lookup_one_len (XAROOT_NAME, privroot, strlen (XAROOT_NAME));
+    if (IS_ERR (xaroot)) {
+        goto out;
+    } else if (!xaroot->d_inode) {
+        int err;
+        down (&privroot->d_inode->i_sem);
+        err = privroot->d_inode->i_op->mkdir (privroot->d_inode, xaroot, 0700);
+        up (&privroot->d_inode->i_sem);
+
+        if (err) {
+            dput (xaroot);
+            dput (privroot);
+            return ERR_PTR (err);
+        }
+        REISERFS_SB(sb)->xattr_root = dget (xaroot);
+    }
+
+out:
+    dput (privroot);
+    return xaroot;
+}
+
+/* This will return a dentry, or error, refering to the xa root directory.
+ * If the xa root doesn't exist yet, the dentry will be returned without
+ * an associated inode. This dentry can be used with ->mkdir to create
+ * the xa directory. */
+static struct dentry *
+__get_xa_root (struct super_block *s)
+{
+    struct dentry *privroot = dget (REISERFS_SB(s)->priv_root);
+    struct dentry *xaroot = NULL;
+
+    if (IS_ERR (privroot) || !privroot)
+        return privroot;
+
+    xaroot = lookup_one_len (XAROOT_NAME, privroot, strlen (XAROOT_NAME));
+    if (IS_ERR (xaroot)) {
+        goto out;
+    } else if (!xaroot->d_inode) {
+        dput (xaroot);
+        xaroot = NULL;
+        goto out;
+    }
+
+    REISERFS_SB(s)->xattr_root = dget (xaroot);
+
+out:
+    dput (privroot);
+    return xaroot;
+}
+
+/* Returns the dentry (or NULL) referring to the root of the extended
+ * attribute directory tree. If it has already been retreived, it is used.
+ * Otherwise, we attempt to retreive it from disk. It may also return
+ * a pointer-encoded error.
+ */
+static inline struct dentry *
+get_xa_root (struct super_block *s)
+{
+    struct dentry *dentry = dget (REISERFS_SB(s)->xattr_root);
+
+    if (!dentry)
+        dentry = __get_xa_root (s);
+
+    return dentry;
+}
+
+/* Opens the directory corresponding to the inode's extended attribute store.
+ * If flags allow, the tree to the directory may be created. If creation is
+ * prohibited, -ENODATA is returned. */
+static struct dentry *
+open_xa_dir (const struct inode *inode, int flags)
+{
+    struct dentry *xaroot, *xadir;
+    char namebuf[17];
+
+    xaroot = get_xa_root (inode->i_sb);
+    if (IS_ERR (xaroot)) {
+        return xaroot;
+    } else if (!xaroot) {
+        if (flags == 0 || flags & XATTR_CREATE) {
+            xaroot = create_xa_root (inode->i_sb);
+            if (IS_ERR (xaroot))
+                return xaroot;
+        }
+        if (!xaroot)
+            return ERR_PTR (-ENODATA);
+    }
+
+    /* ok, we have xaroot open */
+
+    snprintf (namebuf, sizeof (namebuf), "%X.%X",
+              le32_to_cpu (INODE_PKEY (inode)->k_objectid),
+              inode->i_generation);
+    xadir = lookup_one_len (namebuf, xaroot, strlen (namebuf));
+    if (IS_ERR (xadir)) {
+        dput (xaroot);
+        return xadir;
+    }
+
+    if (!xadir->d_inode) {
+        int err;
+        if (flags == 0 || flags & XATTR_CREATE) {
+            /* Although there is nothing else trying to create this directory,
+             * another directory with the same hash may be created, so we need
+             * to protect against that */
+            err = xaroot->d_inode->i_op->mkdir (xaroot->d_inode, xadir, 0700);
+            if (err) {
+                dput (xaroot);
+                dput (xadir);
+                return ERR_PTR (err);
+            }
+        }
+        if (!xadir->d_inode) {
+            dput (xaroot);
+            dput (xadir);
+            return ERR_PTR (-ENODATA);
+        }
+        /* Newly created object.. Need to mark it private */
+        REISERFS_I(xadir->d_inode)->i_flags |= i_priv_object;
+    }
+
+    dput (xaroot);
+    return xadir;
+}
+
+/* Returns a dentry corresponding to a specific extended attribute file
+ * for the inode. If flags allow, the file is created. Otherwise, a
+ * valid or negative dentry, or an error is returned. */
+static struct dentry *
+get_xa_file_dentry (const struct inode *inode, const char *name, int flags)
+{
+    struct dentry *xadir, *xafile;
+    int err = 0;
+
+    xadir = open_xa_dir (inode, flags);
+    if (IS_ERR (xadir)) {
+        return ERR_PTR (PTR_ERR (xadir));
+    } else if (xadir && !xadir->d_inode) {
+        dput (xadir);
+        return ERR_PTR (-ENODATA);
+    }
+
+    xafile = lookup_one_len (name, xadir, strlen (name));
+    if (IS_ERR (xafile)) {
+        dput (xadir);
+        return ERR_PTR (PTR_ERR (xafile));
+    }
+
+    if (xafile->d_inode) { /* file exists */
+        if (flags & XATTR_CREATE) {
+            err = -EEXIST;
+            dput (xafile);
+            goto out;
+        }
+    } else if (flags & XATTR_REPLACE || flags & FL_READONLY) {
+        goto out;
+    } else {
+        /* inode->i_sem is down, so nothing else can try to create
+         * the same xattr */
+        err = xadir->d_inode->i_op->create (xadir->d_inode, xafile,
+                                            0700|S_IFREG, NULL);
+
+        if (err) {
+            dput (xafile);
+            goto out;
+        }
+        /* Newly created object.. Need to mark it private */
+        REISERFS_I(xafile->d_inode)->i_flags |= i_priv_object;
+    }
+
+out:
+    dput (xadir);
+    if (err)
+        xafile = ERR_PTR (err);
+    return xafile;
+}
+
+
+/* Opens a file pointer to the attribute associated with inode */
+static struct file *
+open_xa_file (const struct inode *inode, const char *name, int flags)
+{
+    struct dentry *xafile;
+    struct file *fp;
+
+    xafile = get_xa_file_dentry (inode, name, flags);
+    if (IS_ERR (xafile))
+        return ERR_PTR (PTR_ERR (xafile));
+    else if (!xafile->d_inode) {
+        dput (xafile);
+        return ERR_PTR (-ENODATA);
+    }
+
+    fp = dentry_open (xafile, NULL, O_RDWR);
+    /* dentry_open dputs the dentry if it fails */
+
+    return fp;
+}
+
+
+/*
+ * this is very similar to fs/reiserfs/dir.c:reiserfs_readdir, but
+ * we need to drop the path before calling the filldir struct.  That
+ * would be a big performance hit to the non-xattr case, so I've copied
+ * the whole thing for now. --clm
+ *
+ * the big difference is that I go backwards through the directory,
+ * and don't mess with f->f_pos, but the idea is the same.  Do some
+ * action on each and every entry in the directory.
+ *
+ * we're called with i_sem held, so there are no worries about the directory
+ * changing underneath us.
+ */
+static int __xattr_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+    struct inode *inode = filp->f_dentry->d_inode;
+    struct cpu_key pos_key;    /* key of current position in the directory (key of directory entry) */
+    INITIALIZE_PATH (path_to_entry);
+    struct buffer_head * bh;
+    int entry_num;
+    struct item_head * ih, tmp_ih;
+    int search_res;
+    char * local_buf;
+    loff_t next_pos;
+    char small_buf[32] ; /* avoid kmalloc if we can */
+    struct reiserfs_de_head *deh;
+    int d_reclen;
+    char * d_name;
+    off_t d_off;
+    ino_t d_ino;
+    struct reiserfs_dir_entry de;
+
+
+    /* form key for search the next directory entry using f_pos field of
+       file structure */
+    next_pos = max_reiserfs_offset(inode);
+
+    while (1) {
+research:
+       if (next_pos <= DOT_DOT_OFFSET)
+           break;
+       make_cpu_key (&pos_key, inode, next_pos, TYPE_DIRENTRY, 3);
+
+       search_res = search_by_entry_key(inode->i_sb, &pos_key, &path_to_entry, &de);
+       if (search_res == IO_ERROR) {
+           // FIXME: we could just skip part of directory which could
+           // not be read
+           pathrelse(&path_to_entry);
+           return -EIO;
+       }
+
+       if (search_res == NAME_NOT_FOUND)
+           de.de_entry_num--;
+
+       set_de_name_and_namelen(&de);
+       entry_num = de.de_entry_num;
+       deh = &(de.de_deh[entry_num]);
+
+       bh = de.de_bh;
+       ih = de.de_ih;
+
+       if (!is_direntry_le_ih(ih)) {
+            reiserfs_warning(inode->i_sb, "not direntry %h", ih);
+           break;
+        }
+       copy_item_head(&tmp_ih, ih);
+
+       /* we must have found item, that is item of this directory, */
+       RFALSE( COMP_SHORT_KEYS (&(ih->ih_key), &pos_key),
+               "vs-9000: found item %h does not match to dir we readdir %K",
+               ih, &pos_key);
+
+       if (deh_offset(deh) <= DOT_DOT_OFFSET) {
+           break;
+       }
+
+       /* look for the previous entry in the directory */
+       next_pos = deh_offset (deh) - 1;
+
+       if (!de_visible (deh))
+           /* it is hidden entry */
+           continue;
+
+       d_reclen = entry_length(bh, ih, entry_num);
+       d_name = B_I_DEH_ENTRY_FILE_NAME (bh, ih, deh);
+       d_off = deh_offset (deh);
+       d_ino = deh_objectid (deh);
+
+       if (!d_name[d_reclen - 1])
+           d_reclen = strlen (d_name);
+
+       if (d_reclen > REISERFS_MAX_NAME(inode->i_sb->s_blocksize)){
+           /* too big to send back to VFS */
+           continue ;
+       }
+
+        /* Ignore the .reiserfs_priv entry */
+        if (reiserfs_xattrs (inode->i_sb) &&
+            !old_format_only(inode->i_sb) &&
+            deh_objectid (deh) == le32_to_cpu (INODE_PKEY(REISERFS_SB(inode->i_sb)->priv_root->d_inode)->k_objectid))
+          continue;
+
+       if (d_reclen <= 32) {
+         local_buf = small_buf ;
+       } else {
+           local_buf = reiserfs_kmalloc(d_reclen, GFP_NOFS, inode->i_sb) ;
+           if (!local_buf) {
+               pathrelse (&path_to_entry);
+               return -ENOMEM ;
+           }
+           if (item_moved (&tmp_ih, &path_to_entry)) {
+               reiserfs_kfree(local_buf, d_reclen, inode->i_sb) ;
+
+               /* sigh, must retry.  Do this same offset again */
+               next_pos = d_off;
+               goto research;
+           }
+       }
+
+       // Note, that we copy name to user space via temporary
+       // buffer (local_buf) because filldir will block if
+       // user space buffer is swapped out. At that time
+       // entry can move to somewhere else
+       memcpy (local_buf, d_name, d_reclen);
+
+       /* the filldir function might need to start transactions,
+        * or do who knows what.  Release the path now that we've
+        * copied all the important stuff out of the deh
+        */
+       pathrelse (&path_to_entry);
+
+       if (filldir (dirent, local_buf, d_reclen, d_off, d_ino,
+                    DT_UNKNOWN) < 0) {
+           if (local_buf != small_buf) {
+               reiserfs_kfree(local_buf, d_reclen, inode->i_sb) ;
+           }
+           goto end;
+       }
+       if (local_buf != small_buf) {
+           reiserfs_kfree(local_buf, d_reclen, inode->i_sb) ;
+       }
+    } /* while */
+
+end:
+    pathrelse (&path_to_entry);
+    return 0;
+}
+
+/*
+ * this could be done with dedicated readdir ops for the xattr files,
+ * but I want to get something working asap
+ * this is stolen from vfs_readdir
+ *
+ */
+static
+int xattr_readdir(struct file *file, filldir_t filler, void *buf)
+{
+        struct inode *inode = file->f_dentry->d_inode;
+        int res = -ENOTDIR;
+        if (!file->f_op || !file->f_op->readdir)
+                goto out;
+        down(&inode->i_sem);
+//        down(&inode->i_zombie);
+        res = -ENOENT;
+        if (!IS_DEADDIR(inode)) {
+                lock_kernel();
+                res = __xattr_readdir(file, buf, filler);
+                unlock_kernel();
+        }
+//        up(&inode->i_zombie);
+        up(&inode->i_sem);
+out:
+        return res;
+}
+
+
+/* Internal operations on file data */
+static inline void
+reiserfs_put_page(struct page *page)
+{
+        kunmap(page);
+        page_cache_release(page);
+}
+
+static struct page *
+reiserfs_get_page(struct inode *dir, unsigned long n)
+{
+        struct address_space *mapping = dir->i_mapping;
+        struct page *page;
+        /* We can deadlock if we try to free dentries,
+           and an unlink/rmdir has just occured - GFP_NOFS avoids this */
+        mapping->flags = (mapping->flags & ~__GFP_BITS_MASK) | GFP_NOFS;
+        page = read_cache_page (mapping, n,
+                                (filler_t*)mapping->a_ops->readpage, NULL);
+        if (!IS_ERR(page)) {
+                wait_on_page_locked(page);
+                kmap(page);
+                if (!PageUptodate(page))
+                        goto fail;
+
+                if (PageError(page))
+                        goto fail;
+        }
+        return page;
+
+fail:
+        reiserfs_put_page(page);
+        return ERR_PTR(-EIO);
+}
+
+static inline __u32
+xattr_hash (const char *msg, int len)
+{
+    return csum_partial (msg, len, 0);
+}
+
+/* Generic extended attribute operations that can be used by xa plugins */
+
+/*
+ * inode->i_sem: down
+ */
+int
+reiserfs_xattr_set (struct inode *inode, const char *name, const void *buffer,
+                    size_t buffer_size, int flags)
+{
+    int err = 0;
+    struct file *fp;
+    struct page *page;
+    char *data;
+    struct address_space *mapping;
+    size_t file_pos = 0;
+    size_t buffer_pos = 0;
+    struct inode *xinode;
+    struct iattr newattrs;
+    __u32 xahash = 0;
+
+    if (IS_RDONLY (inode))
+        return -EROFS;
+
+    if (IS_IMMUTABLE (inode) || IS_APPEND (inode))
+        return -EPERM;
+
+    if (get_inode_sd_version (inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    /* Empty xattrs are ok, they're just empty files, no hash */
+    if (buffer && buffer_size)
+        xahash = xattr_hash (buffer, buffer_size);
+
+open_file:
+    fp = open_xa_file (inode, name, flags);
+    if (IS_ERR (fp)) {
+        err = PTR_ERR (fp);
+        goto out;
+    }
+
+    xinode = fp->f_dentry->d_inode;
+    REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
+
+    /* we need to copy it off.. */
+    if (xinode->i_nlink > 1) {
+       fput(fp);
+        err = reiserfs_xattr_del (inode, name);
+        if (err < 0)
+            goto out;
+        /* We just killed the old one, we're not replacing anymore */
+        if (flags & XATTR_REPLACE)
+            flags &= ~XATTR_REPLACE;
+        goto open_file;
+    }
+
+    /* Resize it so we're ok to write there */
+    newattrs.ia_size = buffer_size;
+    newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+    down (&xinode->i_sem);
+    err = notify_change(fp->f_dentry, &newattrs);
+    if (err)
+        goto out_filp;
+
+    mapping = xinode->i_mapping;
+    while (buffer_pos < buffer_size || buffer_pos == 0) {
+        size_t chunk;
+        size_t skip = 0;
+        size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1));
+        if (buffer_size - buffer_pos > PAGE_CACHE_SIZE)
+            chunk = PAGE_CACHE_SIZE;
+        else
+            chunk = buffer_size - buffer_pos;
+
+        page = reiserfs_get_page (xinode, file_pos >> PAGE_CACHE_SHIFT);
+        if (IS_ERR (page)) {
+            err = PTR_ERR (page);
+            goto out_filp;
+        }
+
+        lock_page (page);
+        data = page_address (page);
+
+        if (file_pos == 0) {
+            struct reiserfs_xattr_header *rxh;
+            skip = file_pos = sizeof (struct reiserfs_xattr_header);
+            if (chunk + skip > PAGE_CACHE_SIZE)
+                chunk = PAGE_CACHE_SIZE - skip;
+            rxh = (struct reiserfs_xattr_header *)data;
+            rxh->h_magic = cpu_to_le32 (REISERFS_XATTR_MAGIC);
+            rxh->h_hash = cpu_to_le32 (xahash);
+        }
+
+        err = mapping->a_ops->prepare_write (fp, page, page_offset,
+                                             page_offset + chunk + skip);
+        if (!err) {
+           if (buffer)
+               memcpy (data + skip, buffer + buffer_pos, chunk);
+            err = mapping->a_ops->commit_write (fp, page, page_offset,
+                                                page_offset + chunk + skip);
+       }
+        unlock_page (page);
+        reiserfs_put_page (page);
+        buffer_pos += chunk;
+        file_pos += chunk;
+        skip = 0;
+        if (err || buffer_size == 0 || !buffer)
+            break;
+    }
+
+    inode->i_ctime = CURRENT_TIME;
+    mark_inode_dirty (inode);
+
+out_filp:
+    up (&xinode->i_sem);
+    fput(fp);
+
+out:
+    return err;
+}
+
+/*
+ * inode->i_sem: down
+ */
+int
+reiserfs_xattr_get (const struct inode *inode, const char *name, void *buffer,
+                    size_t buffer_size)
+{
+    ssize_t err = 0;
+    struct file *fp;
+    size_t isize;
+    size_t file_pos = 0;
+    size_t buffer_pos = 0;
+    struct page *page;
+    struct inode *xinode;
+    __u32 hash = 0;
+
+    if (name == NULL)
+        return -EINVAL;
+
+    /* We can't have xattrs attached to v1 items since they don't have
+     * generation numbers */
+    if (get_inode_sd_version (inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    fp = open_xa_file (inode, name, FL_READONLY);
+    if (IS_ERR (fp)) {
+        err = PTR_ERR (fp);
+        goto out;
+    }
+
+    xinode = fp->f_dentry->d_inode;
+    isize = xinode->i_size;
+    REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
+
+    /* Just return the size needed */
+    if (buffer == NULL) {
+        err = isize - sizeof (struct reiserfs_xattr_header);
+        goto out_dput;
+    }
+
+    if (buffer_size < isize - sizeof (struct reiserfs_xattr_header)) {
+        err = -ERANGE;
+        goto out_dput;
+    }
+
+    while (file_pos < isize) {
+        size_t chunk;
+        char *data;
+        size_t skip = 0;
+        if (isize - file_pos > PAGE_CACHE_SIZE)
+            chunk = PAGE_CACHE_SIZE;
+        else
+            chunk = isize - file_pos;
+
+        page = reiserfs_get_page (xinode, file_pos >> PAGE_CACHE_SHIFT);
+        if (IS_ERR (page)) {
+            err = PTR_ERR (page);
+            goto out_dput;
+        }
+
+        lock_page (page);
+        data = page_address (page);
+        if (file_pos == 0) {
+            struct reiserfs_xattr_header *rxh =
+                                        (struct reiserfs_xattr_header *)data;
+            skip = file_pos = sizeof (struct reiserfs_xattr_header);
+            chunk -= skip;
+            /* Magic doesn't match up.. */
+            if (rxh->h_magic != cpu_to_le32 (REISERFS_XATTR_MAGIC)) {
+                unlock_page (page);
+                reiserfs_put_page (page);
+                reiserfs_warning (inode->i_sb, "Invalid magic for xattr (%s) "
+                                  "associated with %k", name,
+                                  INODE_PKEY (inode));
+                err = -EIO;
+                goto out_dput;
+            }
+            hash = le32_to_cpu (rxh->h_hash);
+        }
+        memcpy (buffer + buffer_pos, data + skip, chunk);
+        unlock_page (page);
+        reiserfs_put_page (page);
+        file_pos += chunk;
+        buffer_pos += chunk;
+        skip = 0;
+    }
+    err = isize - sizeof (struct reiserfs_xattr_header);
+
+    if (xattr_hash (buffer, isize - sizeof (struct reiserfs_xattr_header)) != hash) {
+        reiserfs_warning (inode->i_sb, "Invalid hash for xattr (%s) associated "
+                          "with %k", name, INODE_PKEY (inode));
+        err = -EIO;
+    }
+
+out_dput:
+    fput(fp);
+
+out:
+    return err;
+}
+
+static int
+__reiserfs_xattr_del (struct dentry *xadir, const char *name, int namelen)
+{
+    struct dentry *dentry;
+    struct inode *dir = xadir->d_inode;
+    int err = 0;
+
+    dentry = lookup_one_len (name, xadir, namelen);
+    if (IS_ERR (dentry)) {
+        err = PTR_ERR (dentry);
+        goto out;
+    } else if (!dentry->d_inode) {
+        err = -ENODATA;
+        goto out_file;
+    }
+
+    /* Skip directories.. */
+    if (S_ISDIR (dentry->d_inode->i_mode))
+        goto out_file;
+
+    if (!is_reiserfs_priv_object (dentry->d_inode)) {
+        reiserfs_warning (dir->i_sb, "OID %08x [%.*s/%.*s] doesn't have "
+                                     "priv flag set [parent is %sset].",
+                        le32_to_cpu (INODE_PKEY (dentry->d_inode)->k_objectid),
+                        xadir->d_name.len, xadir->d_name.name, namelen, name,
+                        is_reiserfs_priv_object (xadir->d_inode) ? "" : "not ");
+        dput (dentry);
+        return -EIO;
+    }
+
+    err = dir->i_op->unlink (dir, dentry);
+    if (!err)
+        d_delete (dentry);
+
+out_file:
+    dput (dentry);
+
+out:
+    return err;
+}
+
+
+int
+reiserfs_xattr_del (struct inode *inode, const char *name)
+{
+    struct dentry *dir;
+    int err;
+
+    if (IS_RDONLY (inode))
+        return -EROFS;
+
+    dir = open_xa_dir (inode, FL_READONLY);
+    if (IS_ERR (dir)) {
+        err = PTR_ERR (dir);
+        goto out;
+    }
+
+    err = __reiserfs_xattr_del (dir, name, strlen (name));
+    dput (dir);
+
+out:
+    return err;
+}
+
+/* The following are side effects of other operations that aren't explicitly
+ * modifying extended attributes. This includes operations such as permissions
+ * or ownership changes, object deletions, etc. */
+
+static int
+reiserfs_delete_xattrs_filler (void *buf, const char *name, int namelen,
+                               loff_t offset, ino_t ino, unsigned int d_type)
+{
+    struct dentry *xadir = (struct dentry *)buf;
+
+    return __reiserfs_xattr_del (xadir, name, namelen);
+
+}
+
+/* This is called w/ inode->i_sem downed */
+int
+reiserfs_delete_xattrs (struct inode *inode)
+{
+    struct file *fp;
+    struct dentry *dir, *root;
+    int err = 0;
+
+    /* Skip out, an xattr has no xattrs associated with it */
+    if (is_reiserfs_priv_object (inode) ||
+        get_inode_sd_version (inode) == STAT_DATA_V1 ||
+        !reiserfs_xattrs(inode->i_sb))
+    {
+        return 0;
+    }
+    reiserfs_read_lock_xattrs (inode->i_sb);
+    dir = open_xa_dir (inode, FL_READONLY);
+    reiserfs_read_unlock_xattrs (inode->i_sb);
+    if (IS_ERR (dir)) {
+        err = PTR_ERR (dir);
+        goto out;
+    } else if (!dir->d_inode) {
+        dput (dir);
+        return 0;
+    }
+
+    fp = dentry_open (dir, NULL, O_RDWR);
+    if (IS_ERR (fp)) {
+        err = PTR_ERR (fp);
+        /* dentry_open dputs the dentry if it fails */
+        goto out;
+    }
+
+    lock_kernel ();
+    err = xattr_readdir (fp, reiserfs_delete_xattrs_filler, dir);
+    if (err) {
+        unlock_kernel ();
+        goto out_dir;
+    }
+
+    /* Leftovers besides . and .. -- that's not good. */
+    if (dir->d_inode->i_nlink <= 2) {
+        root = get_xa_root (inode->i_sb);
+        reiserfs_write_lock_xattrs (inode->i_sb);
+        err = vfs_rmdir (root->d_inode, dir);
+        reiserfs_write_unlock_xattrs (inode->i_sb);
+        dput (root);
+    } else {
+        reiserfs_warning (inode->i_sb,
+                          "Couldn't remove all entries in directory");
+    }
+    unlock_kernel ();
+
+out_dir:
+    fput(fp);
+
+out:
+    if (!err)
+        REISERFS_I(inode)->i_flags = REISERFS_I(inode)->i_flags & ~i_has_xattr_dir;
+    return err;
+}
+
+struct reiserfs_chown_buf {
+    struct inode *inode;
+    struct dentry *xadir;
+    struct iattr *attrs;
+};
+
+/* XXX: If there is a better way to do this, I'd love to hear about it */
+static int
+reiserfs_chown_xattrs_filler (void *buf, const char *name, int namelen,
+                               loff_t offset, ino_t ino, unsigned int d_type)
+{
+    struct reiserfs_chown_buf *chown_buf = (struct reiserfs_chown_buf *)buf;
+    struct dentry *xafile, *xadir = chown_buf->xadir;
+    struct iattr *attrs = chown_buf->attrs;
+    int err = 0;
+
+    xafile = lookup_one_len (name, xadir, namelen);
+    if (IS_ERR (xafile))
+        return PTR_ERR (xafile);
+    else if (!xafile->d_inode) {
+        dput (xafile);
+        return -ENODATA;
+    }
+
+    if (!S_ISDIR (xafile->d_inode->i_mode))
+        err = notify_change (xafile, attrs);
+    dput (xafile);
+
+    return err;
+}
+
+int
+reiserfs_chown_xattrs (struct inode *inode, struct iattr *attrs)
+{
+    struct file *fp;
+    struct dentry *dir;
+    int err = 0;
+    struct reiserfs_chown_buf buf;
+    unsigned int ia_valid = attrs->ia_valid;
+
+    /* Skip out, an xattr has no xattrs associated with it */
+    if (is_reiserfs_priv_object (inode) ||
+        get_inode_sd_version (inode) == STAT_DATA_V1 ||
+        !reiserfs_xattrs(inode->i_sb))
+    {
+        return 0;
+    }
+    reiserfs_read_lock_xattrs (inode->i_sb);
+    dir = open_xa_dir (inode, FL_READONLY);
+    reiserfs_read_unlock_xattrs (inode->i_sb);
+    if (IS_ERR (dir)) {
+        if (PTR_ERR (dir) != -ENODATA)
+            err = PTR_ERR (dir);
+        goto out;
+    } else if (!dir->d_inode) {
+        dput (dir);
+        goto out;
+    }
+
+    fp = dentry_open (dir, NULL, O_RDWR);
+    if (IS_ERR (fp)) {
+        err = PTR_ERR (fp);
+        /* dentry_open dputs the dentry if it fails */
+        goto out;
+    }
+
+    lock_kernel ();
+
+    attrs->ia_valid &= (ATTR_UID | ATTR_GID | ATTR_CTIME);
+    buf.xadir = dir;
+    buf.attrs = attrs;
+    buf.inode = inode;
+
+    err = xattr_readdir (fp, reiserfs_chown_xattrs_filler, &buf);
+    if (err) {
+        unlock_kernel ();
+        goto out_dir;
+    }
+
+    err = notify_change (dir, attrs);
+    unlock_kernel ();
+
+out_dir:
+    fput(fp);
+
+out:
+    attrs->ia_valid = ia_valid;
+    return err;
+}
+
+
+/* Actual operations that are exported to VFS-land */
+
+/*
+ * Inode operation getxattr()
+ * Preliminary locking: we down dentry->d_inode->i_sem
+ */
+ssize_t
+reiserfs_getxattr (struct dentry *dentry, const char *name, void *buffer,
+                   size_t size)
+{
+    struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix (name);
+    int err;
+
+    if (!xah || !reiserfs_xattrs(dentry->d_sb) ||
+        get_inode_sd_version (dentry->d_inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    reiserfs_read_lock_xattr_i (dentry->d_inode);
+    reiserfs_read_lock_xattrs (dentry->d_sb);
+    err = xah->get (dentry->d_inode, name, buffer, size);
+    reiserfs_read_unlock_xattrs (dentry->d_sb);
+    reiserfs_read_unlock_xattr_i (dentry->d_inode);
+    return err;
+}
+
+
+/*
+ * Inode operation setxattr()
+ *
+ * dentry->d_inode->i_sem down
+ */
+int
+reiserfs_setxattr (struct dentry *dentry, const char *name, const void *value,
+                   size_t size, int flags)
+{
+    struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix (name);
+    int err;
+    int lock;
+
+    if (!xah || !reiserfs_xattrs(dentry->d_sb) ||
+        get_inode_sd_version (dentry->d_inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    if (IS_RDONLY (dentry->d_inode))
+        return -EROFS;
+
+    if (IS_IMMUTABLE (dentry->d_inode) || IS_APPEND (dentry->d_inode))
+        return -EROFS;
+
+    reiserfs_write_lock_xattr_i (dentry->d_inode);
+    lock = !has_xattr_dir (dentry->d_inode);
+    if (lock)
+        reiserfs_write_lock_xattrs (dentry->d_sb);
+    else
+        reiserfs_read_lock_xattrs (dentry->d_sb);
+    err = xah->set (dentry->d_inode, name, value, size, flags);
+    if (lock)
+        reiserfs_write_unlock_xattrs (dentry->d_sb);
+    else
+        reiserfs_read_unlock_xattrs (dentry->d_sb);
+    reiserfs_write_unlock_xattr_i (dentry->d_inode);
+    return err;
+}
+
+/*
+ * Inode operation removexattr()
+ *
+ * dentry->d_inode->i_sem down
+ */
+int
+reiserfs_removexattr (struct dentry *dentry, const char *name)
+{
+    int err;
+    struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix (name);
+
+    if (!xah || !reiserfs_xattrs(dentry->d_sb) ||
+        get_inode_sd_version (dentry->d_inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    if (IS_RDONLY (dentry->d_inode))
+        return -EROFS;
+
+    if (IS_IMMUTABLE (dentry->d_inode) || IS_APPEND (dentry->d_inode))
+        return -EPERM;
+
+    reiserfs_write_lock_xattr_i (dentry->d_inode);
+    reiserfs_read_lock_xattrs (dentry->d_sb);
+
+    /* Deletion pre-operation */
+    if (xah->del) {
+        err = xah->del (dentry->d_inode, name);
+        if (err)
+            goto out;
+    }
+
+    err = reiserfs_xattr_del (dentry->d_inode, name);
+
+    dentry->d_inode->i_ctime = CURRENT_TIME;
+    mark_inode_dirty (dentry->d_inode);
+
+out:
+    reiserfs_read_unlock_xattrs (dentry->d_sb);
+    reiserfs_write_unlock_xattr_i (dentry->d_inode);
+    return err;
+}
+
+
+/* This is what filldir will use:
+ * r_pos will always contain the amount of space required for the entire
+ * list. If r_pos becomes larger than r_size, we need more space and we
+ * return an error indicating this. If r_pos is less than r_size, then we've
+ * filled the buffer successfully and we return success */
+struct reiserfs_listxattr_buf {
+    int r_pos;
+    int r_size;
+    char *r_buf;
+    struct inode *r_inode;
+};
+
+static int
+reiserfs_listxattr_filler (void *buf, const char *name, int namelen,
+                           loff_t offset, ino_t ino, unsigned int d_type)
+{
+    struct reiserfs_listxattr_buf *b = (struct reiserfs_listxattr_buf *)buf;
+    int len = 0;
+    if (name[0] != '.' || (namelen != 1 && (name[1] != '.' || namelen != 2))) {
+        struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix (name);
+        if (!xah) return 0; /* Unsupported xattr name, skip it */
+
+        /* We call ->list() twice because the operation isn't required to just
+         * return the name back - we want to make sure we have enough space */
+        len += xah->list (b->r_inode, name, namelen, NULL);
+
+        if (len) {
+            if (b->r_pos + len + 1 <= b->r_size) {
+                char *p = b->r_buf + b->r_pos;
+                p += xah->list (b->r_inode, name, namelen, p);
+                *p++ = '\0';
+            }
+            b->r_pos += len + 1;
+        }
+    }
+
+    return 0;
+}
+/*
+ * Inode operation listxattr()
+ *
+ * Preliminary locking: we down dentry->d_inode->i_sem
+ */
+ssize_t
+reiserfs_listxattr (struct dentry *dentry, char *buffer, size_t size)
+{
+    struct file *fp;
+    struct dentry *dir;
+    int err = 0;
+    struct reiserfs_listxattr_buf buf;
+
+    if (!dentry->d_inode)
+        return -EINVAL;
+
+    if (!reiserfs_xattrs(dentry->d_sb) ||
+        get_inode_sd_version (dentry->d_inode) == STAT_DATA_V1)
+        return -EOPNOTSUPP;
+
+    reiserfs_read_lock_xattr_i (dentry->d_inode);
+    reiserfs_read_lock_xattrs (dentry->d_sb);
+    dir = open_xa_dir (dentry->d_inode, FL_READONLY);
+    reiserfs_read_unlock_xattrs (dentry->d_sb);
+    if (IS_ERR (dir)) {
+        err = PTR_ERR (dir);
+        if (err == -ENODATA)
+            err = 0; /* Not an error if there aren't any xattrs */
+        goto out;
+    }
+
+    fp = dentry_open (dir, NULL, O_RDWR);
+    if (IS_ERR (fp)) {
+        err = PTR_ERR (fp);
+        /* dentry_open dputs the dentry if it fails */
+        goto out;
+    }
+
+    buf.r_buf = buffer;
+    buf.r_size = buffer ? size : 0;
+    buf.r_pos = 0;
+    buf.r_inode = dentry->d_inode;
+
+    REISERFS_I(dentry->d_inode)->i_flags |= i_has_xattr_dir;
+
+    err = xattr_readdir (fp, reiserfs_listxattr_filler, &buf);
+    if (err)
+        goto out_dir;
+
+    if (buf.r_pos > buf.r_size && buffer != NULL)
+        err = -ERANGE;
+    else
+        err = buf.r_pos;
+
+out_dir:
+    fput(fp);
+
+out:
+    reiserfs_read_unlock_xattr_i (dentry->d_inode);
+    return err;
+}
+
+/* This is the implementation for the xattr plugin infrastructure */
+static struct list_head xattr_handlers = LIST_HEAD_INIT (xattr_handlers);
+static rwlock_t handler_lock = RW_LOCK_UNLOCKED;
+
+static struct reiserfs_xattr_handler *
+find_xattr_handler_prefix (const char *prefix)
+{
+    struct reiserfs_xattr_handler *xah = NULL;
+    struct list_head *p;
+
+    read_lock (&handler_lock);
+    list_for_each (p, &xattr_handlers) {
+        xah = list_entry (p, struct reiserfs_xattr_handler, handlers);
+        if (strncmp (xah->prefix, prefix, strlen (xah->prefix)) == 0)
+            break;
+        xah = NULL;
+    }
+
+    read_unlock (&handler_lock);
+    return xah;
+}
+
+static void
+__unregister_handlers (void)
+{
+    struct reiserfs_xattr_handler *xah;
+    struct list_head *p, *tmp;
+
+    list_for_each_safe (p, tmp, &xattr_handlers) {
+        xah = list_entry (p, struct reiserfs_xattr_handler, handlers);
+        if (xah->exit)
+            xah->exit();
+
+        list_del_init (p);
+    }
+    INIT_LIST_HEAD (&xattr_handlers);
+}
+
+int __init
+reiserfs_xattr_register_handlers (void)
+{
+    int err = 0;
+    struct reiserfs_xattr_handler *xah;
+    struct list_head *p;
+
+    write_lock (&handler_lock);
+
+    /* If we're already initialized, nothing to do */
+    if (!list_empty (&xattr_handlers)) {
+        write_unlock (&handler_lock);
+        return 0;
+    }
+
+    /* Add the handlers */
+    list_add_tail (&user_handler.handlers, &xattr_handlers);
+    list_add_tail (&trusted_handler.handlers, &xattr_handlers);
+#ifdef CONFIG_REISERFS_FS_SECURITY
+    list_add_tail (&security_handler.handlers, &xattr_handlers);
+#endif
+#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+    list_add_tail (&posix_acl_access_handler.handlers, &xattr_handlers);
+    list_add_tail (&posix_acl_default_handler.handlers, &xattr_handlers);
+#endif
+
+    /* Run initializers, if available */
+    list_for_each (p, &xattr_handlers) {
+        xah = list_entry (p, struct reiserfs_xattr_handler, handlers);
+        if (xah->init) {
+            err = xah->init ();
+            if (err) {
+                list_del_init (p);
+                break;
+            }
+        }
+    }
+
+    /* Clean up other handlers, if any failed */
+    if (err)
+        __unregister_handlers ();
+
+    write_unlock (&handler_lock);
+    return err;
+}
+
+void
+reiserfs_xattr_unregister_handlers (void)
+{
+    write_lock (&handler_lock);
+    __unregister_handlers ();
+    write_unlock (&handler_lock);
+}
+
+/* This will catch lookups from the fs root to .reiserfs_priv */
+static int
+xattr_lookup_poison (struct dentry *dentry, struct qstr *q1, struct qstr *name)
+{
+    struct dentry *priv_root = REISERFS_SB(dentry->d_sb)->priv_root;
+    if (name->len == priv_root->d_name.len &&
+        name->hash == priv_root->d_name.hash &&
+        !memcmp (name->name, priv_root->d_name.name, name->len)) {
+            return -ENOENT;
+    }
+    return 0;
+}
+
+static struct dentry_operations xattr_lookup_poison_ops = {
+    .d_compare = xattr_lookup_poison,
+};
+
+
+/* We need to take a copy of the mount flags since things like
+ * MS_RDONLY don't get set until *after* we're called.
+ * mount_flags != mount_options */
+int
+reiserfs_xattr_init (struct super_block *s, int mount_flags)
+{
+  int err = 0;
+
+  /* We need generation numbers to ensure that the oid mapping is correct
+   * v3.5 filesystems don't have them. */
+  if (!old_format_only (s)) {
+    set_bit (REISERFS_XATTRS, &(REISERFS_SB(s)->s_mount_opt));
+  } else if (reiserfs_xattrs_optional (s)) {
+    /* Old format filesystem, but optional xattrs have been enabled
+     * at mount time. Error out. */
+    reiserfs_warning (s, "xattrs/ACLs not supported on pre v3.6 "
+                      "format filesystem. Failing mount.");
+    err = -EOPNOTSUPP;
+    goto error;
+  } else {
+    /* Old format filesystem, but no optional xattrs have been enabled. This
+     * means we silently disable xattrs on the filesystem. */
+    clear_bit (REISERFS_XATTRS, &(REISERFS_SB(s)->s_mount_opt));
+  }
+
+  /* If we don't have the privroot located yet - go find it */
+  if (reiserfs_xattrs (s) && !REISERFS_SB(s)->priv_root) {
+      struct dentry *dentry;
+      dentry = lookup_one_len (PRIVROOT_NAME, s->s_root,
+                               strlen (PRIVROOT_NAME));
+      if (!IS_ERR (dentry)) {
+        if (!(mount_flags & MS_RDONLY) && !dentry->d_inode) {
+            struct inode *inode = dentry->d_parent->d_inode;
+            down (&inode->i_sem);
+            err = inode->i_op->mkdir (inode, dentry, 0700);
+            up (&inode->i_sem);
+            if (err) {
+                dput (dentry);
+                dentry = NULL;
+            }
+
+            if (dentry && dentry->d_inode)
+                reiserfs_warning (s, "Created %s on %s - reserved for "
+                                  "xattr storage.", PRIVROOT_NAME,
+                                  reiserfs_bdevname (inode->i_sb));
+        } else if (!dentry->d_inode) {
+            dput (dentry);
+            dentry = NULL;
+        }
+      } else
+        err = PTR_ERR (dentry);
+
+      if (!err && dentry) {
+          s->s_root->d_op = &xattr_lookup_poison_ops;
+          REISERFS_I(dentry->d_inode)->i_flags |= i_priv_object;
+          REISERFS_SB(s)->priv_root = dentry;
+      } else if (!(mount_flags & MS_RDONLY)) { /* xattrs are unavailable */
+          /* If we're read-only it just means that the dir hasn't been
+           * created. Not an error -- just no xattrs on the fs. We'll
+           * check again if we go read-write */
+          reiserfs_warning (s, "xattrs/ACLs enabled and couldn't "
+                            "find/create .reiserfs_priv. Failing mount.");
+          err = -EOPNOTSUPP;
+      }
+  }
+
+error:
+   /* This is only nonzero if there was an error initializing the xattr
+    * directory or if there is a condition where we don't support them. */
+    if (err) {
+          clear_bit (REISERFS_XATTRS, &(REISERFS_SB(s)->s_mount_opt));
+          clear_bit (REISERFS_XATTRS_USER, &(REISERFS_SB(s)->s_mount_opt));
+          clear_bit (REISERFS_POSIXACL, &(REISERFS_SB(s)->s_mount_opt));
+    }
+
+    /* The super_block MS_POSIXACL must mirror the (no)acl mount option. */
+    s->s_flags = s->s_flags & ~MS_POSIXACL;
+    if (reiserfs_posixacl (s))
+       s->s_flags |= MS_POSIXACL;
+
+    return err;
+}
+
+static int
+__reiserfs_permission (struct inode *inode, int mask, struct nameidata *nd,
+                       int need_lock)
+{
+       umode_t                 mode = inode->i_mode;
+
+       if (mask & MAY_WRITE) {
+               /*
+                * Nobody gets write access to a read-only fs.
+                */
+               if (IS_RDONLY(inode) &&
+                   (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
+                       return -EROFS;
+
+               /*
+                * Nobody gets write access to an immutable file.
+                */
+               if (IS_IMMUTABLE(inode))
+                       return -EACCES;
+       }
+
+       /* We don't do permission checks on the internal objects.
+       * Permissions are determined by the "owning" object. */
+        if (is_reiserfs_priv_object (inode))
+               return 0;
+
+       if (current->fsuid == inode->i_uid) {
+               mode >>= 6;
+#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+       } else if (reiserfs_posixacl(inode->i_sb) &&
+                   get_inode_sd_version (inode) != STAT_DATA_V1) {
+                struct posix_acl *acl;
+
+               /* ACL can't contain additional permissions if
+                  the ACL_MASK entry is 0 */
+               if (!(mode & S_IRWXG))
+                       goto check_groups;
+
+                if (need_lock) {
+                   reiserfs_read_lock_xattr_i (inode);
+                    reiserfs_read_lock_xattrs (inode->i_sb);
+               }
+                acl = reiserfs_get_acl (inode, ACL_TYPE_ACCESS);
+                if (need_lock) {
+                    reiserfs_read_unlock_xattrs (inode->i_sb);
+                   reiserfs_read_unlock_xattr_i (inode);
+               }
+                if (IS_ERR (acl)) {
+                    if (PTR_ERR (acl) == -ENODATA)
+                        goto check_groups;
+                    return PTR_ERR (acl);
+                }
+
+                if (acl) {
+                    int err = posix_acl_permission (inode, acl, mask);
+                    posix_acl_release (acl);
+                    if (err == -EACCES) {
+                        goto check_capabilities;
+                    }
+                    return err;
+               } else {
+                       goto check_groups;
+                }
+#endif
+       } else {
+check_groups:
+               if (in_group_p(inode->i_gid))
+                       mode >>= 3;
+       }
+
+       /*
+        * If the DACs are ok we don't need any capability check.
+        */
+       if (((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask))
+               return 0;
+
+check_capabilities:
+       /*
+        * Read/write DACs are always overridable.
+        * Executable DACs are overridable if at least one exec bit is set.
+        */
+       if (!(mask & MAY_EXEC) ||
+           (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
+               if (capable(CAP_DAC_OVERRIDE))
+                       return 0;
+
+       /*
+        * Searching includes executable on directories, else just read.
+        */
+       if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
+               if (capable(CAP_DAC_READ_SEARCH))
+                       return 0;
+
+       return -EACCES;
+}
+
+int
+reiserfs_permission (struct inode *inode, int mask, struct nameidata *nd)
+{
+    return __reiserfs_permission (inode, mask, nd, 1);
+}
+
+int
+reiserfs_permission_locked (struct inode *inode, int mask, struct nameidata *nd)
+{
+    return __reiserfs_permission (inode, mask, nd, 0);
+}
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
new file mode 100644 (file)
index 0000000..623139f
--- /dev/null
@@ -0,0 +1,563 @@
+#include <linux/fs.h>
+#include <linux/posix_acl.h>
+#include <linux/reiserfs_fs.h>
+#include <linux/errno.h>
+#include <linux/pagemap.h>
+#include <linux/xattr.h>
+#include <linux/xattr_acl.h>
+#include <linux/reiserfs_xattr.h>
+#include <linux/reiserfs_acl.h>
+#include <asm/uaccess.h>
+
+static int
+xattr_set_acl(struct inode *inode, int type, const void *value, size_t size)
+{
+       struct posix_acl *acl;
+       int error;
+
+       if (!reiserfs_posixacl(inode->i_sb))
+               return -EOPNOTSUPP;
+       if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
+               return -EPERM;
+
+       if (value) {
+               acl = posix_acl_from_xattr(value, size);
+               if (IS_ERR(acl)) {
+                       return PTR_ERR(acl);
+               } else if (acl) {
+                       error = posix_acl_valid(acl);
+                       if (error)
+                               goto release_and_out;
+               }
+       } else
+               acl = NULL;
+
+       error = reiserfs_set_acl (inode, type, acl);
+
+release_and_out:
+       posix_acl_release(acl);
+       return error;
+}
+
+
+static int
+xattr_get_acl(struct inode *inode, int type, void *buffer, size_t size)
+{
+       struct posix_acl *acl;
+       int error;
+
+       if (!reiserfs_posixacl(inode->i_sb))
+               return -EOPNOTSUPP;
+
+       acl = reiserfs_get_acl (inode, type);
+       if (IS_ERR(acl))
+               return PTR_ERR(acl);
+       if (acl == NULL)
+               return -ENODATA;
+       error = posix_acl_to_xattr(acl, buffer, size);
+       posix_acl_release(acl);
+
+       return error;
+}
+
+
+/*
+ * Convert from filesystem to in-memory representation.
+ */
+static struct posix_acl *
+posix_acl_from_disk(const void *value, size_t size)
+{
+       const char *end = (char *)value + size;
+       int n, count;
+       struct posix_acl *acl;
+
+       if (!value)
+               return NULL;
+       if (size < sizeof(reiserfs_acl_header))
+                return ERR_PTR(-EINVAL);
+       if (((reiserfs_acl_header *)value)->a_version !=
+           cpu_to_le32(REISERFS_ACL_VERSION))
+               return ERR_PTR(-EINVAL);
+       value = (char *)value + sizeof(reiserfs_acl_header);
+       count = reiserfs_acl_count(size);
+       if (count < 0)
+               return ERR_PTR(-EINVAL);
+       if (count == 0)
+               return NULL;
+       acl = posix_acl_alloc(count, GFP_NOFS);
+       if (!acl)
+               return ERR_PTR(-ENOMEM);
+       for (n=0; n < count; n++) {
+               reiserfs_acl_entry *entry =
+                       (reiserfs_acl_entry *)value;
+               if ((char *)value + sizeof(reiserfs_acl_entry_short) > end)
+                       goto fail;
+               acl->a_entries[n].e_tag  = le16_to_cpu(entry->e_tag);
+               acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
+               switch(acl->a_entries[n].e_tag) {
+                       case ACL_USER_OBJ:
+                       case ACL_GROUP_OBJ:
+                       case ACL_MASK:
+                       case ACL_OTHER:
+                               value = (char *)value +
+                                       sizeof(reiserfs_acl_entry_short);
+                               acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
+                               break;
+
+                       case ACL_USER:
+                       case ACL_GROUP:
+                               value = (char *)value + sizeof(reiserfs_acl_entry);
+                               if ((char *)value > end)
+                                       goto fail;
+                               acl->a_entries[n].e_id =
+                                       le32_to_cpu(entry->e_id);
+                               break;
+
+                       default:
+                               goto fail;
+               }
+       }
+       if (value != end)
+               goto fail;
+       return acl;
+
+fail:
+       posix_acl_release(acl);
+       return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Convert from in-memory to filesystem representation.
+ */
+static void *
+posix_acl_to_disk(const struct posix_acl *acl, size_t *size)
+{
+       reiserfs_acl_header *ext_acl;
+       char *e;
+       int n;
+
+       *size = reiserfs_acl_size(acl->a_count);
+       ext_acl = (reiserfs_acl_header *)kmalloc(sizeof(reiserfs_acl_header) +
+               acl->a_count * sizeof(reiserfs_acl_entry), GFP_NOFS);
+       if (!ext_acl)
+               return ERR_PTR(-ENOMEM);
+       ext_acl->a_version = cpu_to_le32(REISERFS_ACL_VERSION);
+       e = (char *)ext_acl + sizeof(reiserfs_acl_header);
+       for (n=0; n < acl->a_count; n++) {
+               reiserfs_acl_entry *entry = (reiserfs_acl_entry *)e;
+               entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
+               entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
+               switch(acl->a_entries[n].e_tag) {
+                       case ACL_USER:
+                       case ACL_GROUP:
+                               entry->e_id =
+                                       cpu_to_le32(acl->a_entries[n].e_id);
+                               e += sizeof(reiserfs_acl_entry);
+                               break;
+
+                       case ACL_USER_OBJ:
+                       case ACL_GROUP_OBJ:
+                       case ACL_MASK:
+                       case ACL_OTHER:
+                               e += sizeof(reiserfs_acl_entry_short);
+                               break;
+
+                       default:
+                               goto fail;
+               }
+       }
+       return (char *)ext_acl;
+
+fail:
+       kfree(ext_acl);
+       return ERR_PTR(-EINVAL);
+}
+
+/*
+ * Inode operation get_posix_acl().
+ *
+ * inode->i_sem: down
+ * BKL held [before 2.5.x]
+ */
+struct posix_acl *
+reiserfs_get_acl(struct inode *inode, int type)
+{
+       char *name, *value;
+       struct posix_acl *acl, **p_acl;
+       size_t size;
+       int retval;
+        struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+
+        switch (type) {
+            case ACL_TYPE_ACCESS:
+                name = XATTR_NAME_ACL_ACCESS;
+                p_acl = &reiserfs_i->i_acl_access;
+                break;
+            case ACL_TYPE_DEFAULT:
+                name = XATTR_NAME_ACL_DEFAULT;
+                p_acl = &reiserfs_i->i_acl_default;
+                break;
+            default:
+                return ERR_PTR (-EINVAL);
+        }
+
+        if (IS_ERR (*p_acl)) {
+            if (PTR_ERR (*p_acl) == -ENODATA)
+                return NULL;
+        } else if (*p_acl != NULL)
+            return posix_acl_dup (*p_acl);
+
+        size = reiserfs_xattr_get (inode, name, NULL, 0);
+        if ((int)size < 0) {
+            if (size == -ENODATA || size == -ENOSYS) {
+               *p_acl = ERR_PTR (-ENODATA);
+               return NULL;
+            }
+            return ERR_PTR (size);
+        }
+
+        value = kmalloc (size, GFP_NOFS);
+        if (!value)
+            return ERR_PTR (-ENOMEM);
+
+       retval = reiserfs_xattr_get(inode, name, value, size);
+       if (retval == -ENODATA || retval == -ENOSYS) {
+               /* This shouldn't actually happen as it should have
+                  been caught above.. but just in case */
+               acl = NULL;
+               *p_acl = ERR_PTR (-ENODATA);
+        } else if (retval < 0) {
+               acl = ERR_PTR(retval);
+       } else {
+               acl = posix_acl_from_disk(value, retval);
+               *p_acl = posix_acl_dup (acl);
+        }
+
+       kfree(value);
+       return acl;
+}
+
+/*
+ * Inode operation set_posix_acl().
+ *
+ * inode->i_sem: down
+ * BKL held [before 2.5.x]
+ */
+int
+reiserfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
+{
+        char *name;
+       void *value = NULL;
+       struct posix_acl **p_acl;
+       size_t size;
+       int error;
+        struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+
+       if (S_ISLNK(inode->i_mode))
+               return -EOPNOTSUPP;
+
+        switch (type) {
+            case ACL_TYPE_ACCESS:
+                name = XATTR_NAME_ACL_ACCESS;
+                p_acl = &reiserfs_i->i_acl_access;
+                if (acl) {
+                    mode_t mode = inode->i_mode;
+                    error = posix_acl_equiv_mode (acl, &mode);
+                    if (error < 0)
+                        return error;
+                    else {
+                        inode->i_mode = mode;
+                        if (error == 0)
+                            acl = NULL;
+                    }
+                }
+                break;
+            case ACL_TYPE_DEFAULT:
+                name = XATTR_NAME_ACL_DEFAULT;
+                p_acl = &reiserfs_i->i_acl_default;
+                if (!S_ISDIR (inode->i_mode))
+                    return acl ? -EACCES : 0;
+                break;
+            default:
+                return -EINVAL;
+        }
+
+       if (acl) {
+            value = posix_acl_to_disk(acl, &size);
+            if (IS_ERR(value))
+                return (int)PTR_ERR(value);
+            error = reiserfs_xattr_set(inode, name, value, size, 0);
+       } else {
+            error = reiserfs_xattr_del (inode, name);
+            if (error == -ENODATA)
+                error = 0;
+        }
+
+       if (value)
+               kfree(value);
+
+        if (!error) {
+            /* Release the old one */
+            if (!IS_ERR (*p_acl) && *p_acl)
+                posix_acl_release (*p_acl);
+
+            if (acl == NULL)
+                *p_acl = ERR_PTR (-ENODATA);
+            else
+                *p_acl = posix_acl_dup (acl);
+        }
+
+       return error;
+}
+
+/* dir->i_sem: down,
+ * inode is new and not released into the wild yet */
+int
+reiserfs_inherit_default_acl (struct inode *dir, struct dentry *dentry, struct inode *inode)
+{
+    struct posix_acl *acl;
+    int err = 0;
+
+    /* ACLs only get applied to files and directories */
+    if (S_ISLNK (inode->i_mode))
+        return 0;
+
+    /* ACLs can only be used on "new" objects, so if it's an old object
+     * there is nothing to inherit from */
+    if (get_inode_sd_version (dir) == STAT_DATA_V1)
+        goto apply_umask;
+
+    /* Don't apply ACLs to objects in the .reiserfs_priv tree.. This
+     * would be useless since permissions are ignored, and a pain because
+     * it introduces locking cycles */
+    if (is_reiserfs_priv_object (dir)) {
+        REISERFS_I(inode)->i_flags |= i_priv_object;
+        goto apply_umask;
+    }
+
+    acl = reiserfs_get_acl (dir, ACL_TYPE_DEFAULT);
+    if (IS_ERR (acl)) {
+        if (PTR_ERR (acl) == -ENODATA)
+            goto apply_umask;
+        return PTR_ERR (acl);
+    }
+
+    if (acl) {
+        struct posix_acl *acl_copy;
+        mode_t mode = inode->i_mode;
+        int need_acl;
+
+        /* Copy the default ACL to the default ACL of a new directory */
+        if (S_ISDIR (inode->i_mode)) {
+            err = reiserfs_set_acl (inode, ACL_TYPE_DEFAULT, acl);
+            if (err)
+                goto cleanup;
+        }
+
+        /* Now we reconcile the new ACL and the mode,
+           potentially modifying both */
+        acl_copy = posix_acl_clone (acl, GFP_NOFS);
+        if (!acl_copy) {
+            err = -ENOMEM;
+            goto cleanup;
+        }
+
+
+        need_acl = posix_acl_create_masq (acl_copy, &mode);
+        if (need_acl >= 0) {
+            if (mode != inode->i_mode) {
+                inode->i_mode = mode;
+            }
+
+            /* If we need an ACL.. */
+            if (need_acl > 0) {
+                err = reiserfs_set_acl (inode, ACL_TYPE_ACCESS, acl_copy);
+                if (err)
+                    goto cleanup_copy;
+            }
+        }
+cleanup_copy:
+        posix_acl_release (acl_copy);
+cleanup:
+        posix_acl_release (acl);
+    } else {
+apply_umask:
+        /* no ACL, apply umask */
+        inode->i_mode &= ~current->fs->umask;
+    }
+
+    return err;
+}
+
+/* Looks up and caches the result of the default ACL.
+ * We do this so that we don't need to carry the xattr_sem into
+ * reiserfs_new_inode if we don't need to */
+int
+reiserfs_cache_default_acl (struct inode *inode)
+{
+    int ret = 0;
+    if (reiserfs_posixacl (inode->i_sb) &&
+        !is_reiserfs_priv_object (inode)) {
+        struct posix_acl *acl;
+        reiserfs_read_lock_xattr_i (inode);
+        reiserfs_read_lock_xattrs (inode->i_sb);
+        acl = reiserfs_get_acl (inode, ACL_TYPE_DEFAULT);
+        reiserfs_read_unlock_xattrs (inode->i_sb);
+        reiserfs_read_unlock_xattr_i (inode);
+        ret = acl ? 1 : 0;
+        posix_acl_release (acl);
+    }
+
+    return ret;
+}
+
+int
+reiserfs_acl_chmod (struct inode *inode)
+{
+        struct posix_acl *acl, *clone;
+        int error;
+
+        if (S_ISLNK(inode->i_mode))
+                return -EOPNOTSUPP;
+
+       if (get_inode_sd_version (inode) == STAT_DATA_V1 ||
+           !reiserfs_posixacl(inode->i_sb))
+        {
+           return 0;
+       }
+
+        reiserfs_read_lock_xattrs (inode->i_sb);
+        acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
+        reiserfs_read_unlock_xattrs (inode->i_sb);
+        if (!acl)
+                return 0;
+        if (IS_ERR(acl))
+                return PTR_ERR(acl);
+        clone = posix_acl_clone(acl, GFP_NOFS);
+        posix_acl_release(acl);
+        if (!clone)
+                return -ENOMEM;
+        error = posix_acl_chmod_masq(clone, inode->i_mode);
+        if (!error) {
+                int lock = !has_xattr_dir (inode);
+                reiserfs_write_lock_xattr_i (inode);
+                if (lock)
+                    reiserfs_write_lock_xattrs (inode->i_sb);
+                else
+                    reiserfs_read_lock_xattrs (inode->i_sb);
+                error = reiserfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
+                if (lock)
+                    reiserfs_write_unlock_xattrs (inode->i_sb);
+                else
+                    reiserfs_read_unlock_xattrs (inode->i_sb);
+                reiserfs_write_unlock_xattr_i (inode);
+        }
+        posix_acl_release(clone);
+        return error;
+}
+
+static int
+posix_acl_access_get(struct inode *inode, const char *name,
+                         void *buffer, size_t size)
+{
+       if (strlen(name) != sizeof(XATTR_NAME_ACL_ACCESS)-1)
+               return -EINVAL;
+       return xattr_get_acl(inode, ACL_TYPE_ACCESS, buffer, size);
+}
+
+static int
+posix_acl_access_set(struct inode *inode, const char *name,
+                         const void *value, size_t size, int flags)
+{
+       if (strlen(name) != sizeof(XATTR_NAME_ACL_ACCESS)-1)
+               return -EINVAL;
+       return xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
+}
+
+static int
+posix_acl_access_del (struct inode *inode, const char *name)
+{
+    struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+    struct posix_acl **acl = &reiserfs_i->i_acl_access;
+    if (strlen(name) != sizeof(XATTR_NAME_ACL_ACCESS)-1)
+       return -EINVAL;
+    if (!IS_ERR (*acl) && *acl) {
+        posix_acl_release (*acl);
+        *acl = ERR_PTR (-ENODATA);
+    }
+
+    return 0;
+}
+
+static int
+posix_acl_access_list (struct inode *inode, const char *name, int namelen, char *out)
+{
+    int len = namelen;
+    if (!reiserfs_posixacl (inode->i_sb))
+        return 0;
+    if (out)
+        memcpy (out, name, len);
+
+    return len;
+}
+
+struct reiserfs_xattr_handler posix_acl_access_handler = {
+    prefix: XATTR_NAME_ACL_ACCESS,
+    get: posix_acl_access_get,
+    set: posix_acl_access_set,
+    del: posix_acl_access_del,
+    list: posix_acl_access_list,
+};
+
+static int
+posix_acl_default_get (struct inode *inode, const char *name,
+                          void *buffer, size_t size)
+{
+       if (strlen(name) != sizeof(XATTR_NAME_ACL_DEFAULT)-1)
+               return -EINVAL;
+       return xattr_get_acl(inode, ACL_TYPE_DEFAULT, buffer, size);
+}
+
+static int
+posix_acl_default_set(struct inode *inode, const char *name,
+                          const void *value, size_t size, int flags)
+{
+       if (strlen(name) != sizeof(XATTR_NAME_ACL_DEFAULT)-1)
+               return -EINVAL;
+       return xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
+}
+
+static int
+posix_acl_default_del (struct inode *inode, const char *name)
+{
+    struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+    struct posix_acl **acl = &reiserfs_i->i_acl_default;
+    if (strlen(name) != sizeof(XATTR_NAME_ACL_DEFAULT)-1)
+       return -EINVAL;
+    if (!IS_ERR (*acl) && *acl) {
+        posix_acl_release (*acl);
+        *acl = ERR_PTR (-ENODATA);
+    }
+
+    return 0;
+}
+
+static int
+posix_acl_default_list (struct inode *inode, const char *name, int namelen, char *out)
+{
+    int len = namelen;
+    if (!reiserfs_posixacl (inode->i_sb))
+        return 0;
+    if (out)
+        memcpy (out, name, len);
+
+    return len;
+}
+
+struct reiserfs_xattr_handler posix_acl_default_handler = {
+    prefix: XATTR_NAME_ACL_DEFAULT,
+    get: posix_acl_default_get,
+    set: posix_acl_default_set,
+    del: posix_acl_default_del,
+    list: posix_acl_default_list,
+};
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
new file mode 100644 (file)
index 0000000..13e6dcd
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_KMEM_H__
+#define __XFS_SUPPORT_KMEM_H__
+
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Cutoff point to use vmalloc instead of kmalloc.
+ */
+#define MAX_SLAB_SIZE  0x20000
+
+/*
+ * XFS uses slightly different names for these due to the
+ * IRIX heritage.
+ */
+#define        kmem_zone       kmem_cache_s
+#define kmem_zone_t    kmem_cache_t
+
+#define KM_SLEEP       0x0001
+#define KM_NOSLEEP     0x0002
+#define KM_NOFS                0x0004
+#define KM_MAYFAIL     0x0005
+
+typedef unsigned long xfs_pflags_t;
+
+#define PFLAGS_TEST_FSTRANS()           (current->flags & PF_FSTRANS)
+
+/* these could be nested, so we save state */
+#define PFLAGS_SET_FSTRANS(STATEP) do {        \
+       *(STATEP) = current->flags;     \
+       current->flags |= PF_FSTRANS;   \
+} while (0)
+
+#define PFLAGS_CLEAR_FSTRANS(STATEP) do { \
+       *(STATEP) = current->flags;     \
+       current->flags &= ~PF_FSTRANS;  \
+} while (0)
+
+/* Restore the PF_FSTRANS state to what was saved in STATEP */
+#define PFLAGS_RESTORE_FSTRANS(STATEP) do {                    \
+       current->flags = ((current->flags & ~PF_FSTRANS) |      \
+                         (*(STATEP) & PF_FSTRANS));            \
+} while (0)
+
+#define PFLAGS_DUP(OSTATEP, NSTATEP) do { \
+       *(NSTATEP) = *(OSTATEP);        \
+} while (0)
+
+static __inline unsigned int
+kmem_flags_convert(int flags)
+{
+       int lflags;
+
+#if DEBUG
+       if (unlikely(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL))) {
+               printk(KERN_WARNING
+                   "XFS: memory allocation with wrong flags (%x)\n", flags);
+               BUG();
+       }
+#endif
+
+       if (flags & KM_NOSLEEP) {
+               lflags = GFP_ATOMIC;
+       } else {
+               lflags = GFP_KERNEL;
+
+               /* avoid recusive callbacks to filesystem during transactions */
+               if (PFLAGS_TEST_FSTRANS() || (flags & KM_NOFS))
+                       lflags &= ~__GFP_FS;
+
+               if (!(flags & KM_MAYFAIL))
+                       lflags |= __GFP_NOFAIL;
+       }
+
+       return lflags;
+}
+
+static __inline void *
+kmem_alloc(size_t size, int flags)
+{
+       if (unlikely(MAX_SLAB_SIZE < size))
+               /* Avoid doing filesystem sensitive stuff to get this */
+               return __vmalloc(size, kmem_flags_convert(flags), PAGE_KERNEL);
+       return kmalloc(size, kmem_flags_convert(flags));
+}
+
+static __inline void *
+kmem_zalloc(size_t size, int flags)
+{
+       void *ptr = kmem_alloc(size, flags);
+       if (likely(ptr != NULL))
+               memset(ptr, 0, size);
+       return ptr;
+}
+
+static __inline void
+kmem_free(void *ptr, size_t size)
+{
+       if (unlikely((unsigned long)ptr < VMALLOC_START ||
+                    (unsigned long)ptr >= VMALLOC_END))
+               kfree(ptr);
+       else
+               vfree(ptr);
+}
+
+static __inline void *
+kmem_realloc(void *ptr, size_t newsize, size_t oldsize, int flags)
+{
+       void *new = kmem_alloc(newsize, flags);
+
+       if (likely(ptr != NULL)) {
+               if (likely(new != NULL))
+                       memcpy(new, ptr, min(oldsize, newsize));
+               kmem_free(ptr, oldsize);
+       }
+
+       return new;
+}
+
+static __inline kmem_zone_t *
+kmem_zone_init(int size, char *zone_name)
+{
+       return kmem_cache_create(zone_name, size, 0, 0, NULL, NULL);
+}
+
+static __inline void *
+kmem_zone_alloc(kmem_zone_t *zone, int flags)
+{
+       return kmem_cache_alloc(zone, kmem_flags_convert(flags));
+}
+
+static __inline void *
+kmem_zone_zalloc(kmem_zone_t *zone, int flags)
+{
+       void *ptr = kmem_zone_alloc(zone, flags);
+       if (likely(ptr != NULL))
+               memset(ptr, 0, kmem_cache_size(zone));
+       return ptr;
+}
+
+static __inline void
+kmem_zone_free(kmem_zone_t *zone, void *ptr)
+{
+       kmem_cache_free(zone, ptr);
+}
+
+typedef struct shrinker *kmem_shaker_t;
+typedef int (*kmem_shake_func_t)(int, unsigned int);
+
+static __inline kmem_shaker_t
+kmem_shake_register(kmem_shake_func_t sfunc)
+{
+       return set_shrinker(DEFAULT_SEEKS, sfunc);
+}
+
+static __inline void
+kmem_shake_deregister(kmem_shaker_t shrinker)
+{
+       remove_shrinker(shrinker);
+}
+
+static __inline int
+kmem_shake_allow(unsigned int gfp_mask)
+{
+       return (gfp_mask & __GFP_WAIT);
+}
+
+#endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/linux-2.6/mrlock.h b/fs/xfs/linux-2.6/mrlock.h
new file mode 100644 (file)
index 0000000..d2c11a0
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_MRLOCK_H__
+#define __XFS_SUPPORT_MRLOCK_H__
+
+#include <linux/rwsem.h>
+
+enum { MR_NONE, MR_ACCESS, MR_UPDATE };
+
+typedef struct {
+       struct rw_semaphore     mr_lock;
+       int                     mr_writer;
+} mrlock_t;
+
+#define mrinit(mrp, name)      \
+       ( (mrp)->mr_writer = 0, init_rwsem(&(mrp)->mr_lock) )
+#define mrlock_init(mrp, t,n,s)        mrinit(mrp, n)
+#define mrfree(mrp)            do { } while (0)
+#define mraccess(mrp)          mraccessf(mrp, 0)
+#define mrupdate(mrp)          mrupdatef(mrp, 0)
+
+static inline void mraccessf(mrlock_t *mrp, int flags)
+{
+       down_read(&mrp->mr_lock);
+}
+
+static inline void mrupdatef(mrlock_t *mrp, int flags)
+{
+       down_write(&mrp->mr_lock);
+       mrp->mr_writer = 1;
+}
+
+static inline int mrtryaccess(mrlock_t *mrp)
+{
+       return down_read_trylock(&mrp->mr_lock);
+}
+
+static inline int mrtryupdate(mrlock_t *mrp)
+{
+       if (!down_write_trylock(&mrp->mr_lock))
+               return 0;
+       mrp->mr_writer = 1;
+       return 1;
+}
+
+static inline void mrunlock(mrlock_t *mrp)
+{
+       if (mrp->mr_writer) {
+               mrp->mr_writer = 0;
+               up_write(&mrp->mr_lock);
+       } else {
+               up_read(&mrp->mr_lock);
+       }
+}
+
+static inline void mrdemote(mrlock_t *mrp)
+{
+       mrp->mr_writer = 0;
+       downgrade_write(&mrp->mr_lock);
+}
+
+#ifdef DEBUG
+/*
+ * Debug-only routine, without some platform-specific asm code, we can
+ * now only answer requests regarding whether we hold the lock for write
+ * (reader state is outside our visibility, we only track writer state).
+ * Note: means !ismrlocked would give false positivies, so don't do that.
+ */
+static inline int ismrlocked(mrlock_t *mrp, int type)
+{
+       if (mrp && type == MR_UPDATE)
+               return mrp->mr_writer;
+       return 1;
+}
+#endif
+
+#endif /* __XFS_SUPPORT_MRLOCK_H__ */
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h
new file mode 100644 (file)
index 0000000..30b67b4
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_SEMA_H__
+#define __XFS_SUPPORT_SEMA_H__
+
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+
+/*
+ * sema_t structure just maps to struct semaphore in Linux kernel.
+ */
+
+typedef struct semaphore sema_t;
+
+#define init_sema(sp, val, c, d)       sema_init(sp, val)
+#define initsema(sp, val)              sema_init(sp, val)
+#define initnsema(sp, val, name)       sema_init(sp, val)
+#define psema(sp, b)                   down(sp)
+#define vsema(sp)                      up(sp)
+#define valusema(sp)                   (atomic_read(&(sp)->count))
+#define freesema(sema)
+
+/*
+ * Map cpsema (try to get the sema) to down_trylock. We need to switch
+ * the return values since cpsema returns 1 (acquired) 0 (failed) and
+ * down_trylock returns the reverse 0 (acquired) 1 (failed).
+ */
+
+#define cpsema(sp)                     (down_trylock(sp) ? 0 : 1)
+
+/*
+ * Didn't do cvsema(sp). Not sure how to map this to up/down/...
+ * It does a vsema if the values is < 0 other wise nothing.
+ */
+
+#endif /* __XFS_SUPPORT_SEMA_H__ */
diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h
new file mode 100644 (file)
index 0000000..821d316
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPPORT_SV_H__
+#define __XFS_SUPPORT_SV_H__
+
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+/*
+ * Synchronisation variables.
+ *
+ * (Parameters "pri", "svf" and "rts" are not implemented)
+ */
+
+typedef struct sv_s {
+       wait_queue_head_t waiters;
+} sv_t;
+
+#define SV_FIFO                0x0             /* sv_t is FIFO type */
+#define SV_LIFO                0x2             /* sv_t is LIFO type */
+#define SV_PRIO                0x4             /* sv_t is PRIO type */
+#define SV_KEYED       0x6             /* sv_t is KEYED type */
+#define SV_DEFAULT      SV_FIFO
+
+
+static inline void _sv_wait(sv_t *sv, spinlock_t *lock, int state,
+                            unsigned long timeout)
+{
+       DECLARE_WAITQUEUE(wait, current);
+
+       add_wait_queue_exclusive(&sv->waiters, &wait);
+       __set_current_state(state);
+       spin_unlock(lock);
+
+       schedule_timeout(timeout);
+
+       remove_wait_queue(&sv->waiters, &wait);
+}
+
+#define init_sv(sv,type,name,flag) \
+       init_waitqueue_head(&(sv)->waiters)
+#define sv_init(sv,flag,name) \
+       init_waitqueue_head(&(sv)->waiters)
+#define sv_destroy(sv) \
+       /*NOTHING*/
+#define sv_wait(sv, pri, lock, s) \
+       _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
+#define sv_wait_sig(sv, pri, lock, s)   \
+       _sv_wait(sv, lock, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT)
+#define sv_timedwait(sv, pri, lock, s, svf, ts, rts) \
+       _sv_wait(sv, lock, TASK_UNINTERRUPTIBLE, timespec_to_jiffies(ts))
+#define sv_timedwait_sig(sv, pri, lock, s, svf, ts, rts) \
+       _sv_wait(sv, lock, TASK_INTERRUPTIBLE, timespec_to_jiffies(ts))
+#define sv_signal(sv) \
+       wake_up(&(sv)->waiters)
+#define sv_broadcast(sv) \
+       wake_up_all(&(sv)->waiters)
+
+#endif /* __XFS_SUPPORT_SV_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
new file mode 100644 (file)
index 0000000..200159f
--- /dev/null
@@ -0,0 +1,1284 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.         Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_trans.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_error.h"
+#include "xfs_rw.h"
+#include "xfs_iomap.h"
+#include <linux/mpage.h>
+#include <linux/writeback.h>
+
+STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
+STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
+               struct writeback_control *wbc, void *, int, int);
+
+#if defined(XFS_RW_TRACE)
+void
+xfs_page_trace(
+       int             tag,
+       struct inode    *inode,
+       struct page     *page,
+       int             mask)
+{
+       xfs_inode_t     *ip;
+       bhv_desc_t      *bdp;
+       vnode_t         *vp = LINVFS_GET_VP(inode);
+       loff_t          isize = i_size_read(inode);
+       loff_t          offset = page->index << PAGE_CACHE_SHIFT;
+       int             delalloc = -1, unmapped = -1, unwritten = -1;
+
+       if (page_has_buffers(page))
+               xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
+
+       bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
+       ip = XFS_BHVTOI(bdp);
+       if (!ip->i_rwtrace)
+               return;
+
+       ktrace_enter(ip->i_rwtrace,
+               (void *)((unsigned long)tag),
+               (void *)ip,
+               (void *)inode,
+               (void *)page,
+               (void *)((unsigned long)mask),
+               (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
+               (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
+               (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
+               (void *)((unsigned long)(isize & 0xffffffff)),
+               (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
+               (void *)((unsigned long)(offset & 0xffffffff)),
+               (void *)((unsigned long)delalloc),
+               (void *)((unsigned long)unmapped),
+               (void *)((unsigned long)unwritten),
+               (void *)NULL,
+               (void *)NULL);
+}
+#else
+#define xfs_page_trace(tag, inode, page, mask)
+#endif
+
+void
+linvfs_unwritten_done(
+       struct buffer_head      *bh,
+       int                     uptodate)
+{
+       xfs_buf_t               *pb = (xfs_buf_t *)bh->b_private;
+
+       ASSERT(buffer_unwritten(bh));
+       bh->b_end_io = NULL;
+       clear_buffer_unwritten(bh);
+       if (!uptodate)
+               pagebuf_ioerror(pb, EIO);
+       if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
+               pagebuf_iodone(pb, 1, 1);
+       }
+       end_buffer_async_write(bh, uptodate);
+}
+
+/*
+ * Issue transactions to convert a buffer range from unwritten
+ * to written extents (buffered IO).
+ */
+STATIC void
+linvfs_unwritten_convert(
+       xfs_buf_t       *bp)
+{
+       vnode_t         *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *);
+       int             error;
+
+       BUG_ON(atomic_read(&bp->pb_hold) < 1);
+       VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp),
+                       BMAPI_UNWRITTEN, NULL, NULL, error);
+       XFS_BUF_SET_FSPRIVATE(bp, NULL);
+       XFS_BUF_CLR_IODONE_FUNC(bp);
+       XFS_BUF_UNDATAIO(bp);
+       iput(LINVFS_GET_IP(vp));
+       pagebuf_iodone(bp, 0, 0);
+}
+
+/*
+ * Issue transactions to convert a buffer range from unwritten
+ * to written extents (direct IO).
+ */
+STATIC void
+linvfs_unwritten_convert_direct(
+       struct inode    *inode,
+       loff_t          offset,
+       ssize_t         size,
+       void            *private)
+{
+       ASSERT(!private || inode == (struct inode *)private);
+
+       /* private indicates an unwritten extent lay beneath this IO,
+        * see linvfs_get_block_core.
+        */
+       if (private && size > 0) {
+               vnode_t *vp = LINVFS_GET_VP(inode);
+               int     error;
+
+               VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
+       }
+}
+
+STATIC int
+xfs_map_blocks(
+       struct inode            *inode,
+       loff_t                  offset,
+       ssize_t                 count,
+       xfs_iomap_t             *iomapp,
+       int                     flags)
+{
+       vnode_t                 *vp = LINVFS_GET_VP(inode);
+       int                     error, niomaps = 1;
+
+       if (((flags & (BMAPI_DIRECT|BMAPI_SYNC)) == BMAPI_DIRECT) &&
+           (offset >= i_size_read(inode)))
+               count = max_t(ssize_t, count, XFS_WRITE_IO_LOG);
+retry:
+       VOP_BMAP(vp, offset, count, flags, iomapp, &niomaps, error);
+       if ((error == EAGAIN) || (error == EIO))
+               return -error;
+       if (unlikely((flags & (BMAPI_WRITE|BMAPI_DIRECT)) ==
+                                       (BMAPI_WRITE|BMAPI_DIRECT) && niomaps &&
+                                       (iomapp->iomap_flags & IOMAP_DELAY))) {
+               flags = BMAPI_ALLOCATE;
+               goto retry;
+       }
+       if (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
+               VMODIFY(vp);
+       }
+       return -error;
+}
+
+/*
+ * Finds the corresponding mapping in block @map array of the
+ * given @offset within a @page.
+ */
+STATIC xfs_iomap_t *
+xfs_offset_to_map(
+       struct page             *page,
+       xfs_iomap_t             *iomapp,
+       unsigned long           offset)
+{
+       loff_t                  full_offset;    /* offset from start of file */
+
+       ASSERT(offset < PAGE_CACHE_SIZE);
+
+       full_offset = page->index;              /* NB: using 64bit number */
+       full_offset <<= PAGE_CACHE_SHIFT;       /* offset from file start */
+       full_offset += offset;                  /* offset from page start */
+
+       if (full_offset < iomapp->iomap_offset)
+               return NULL;
+       if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
+               return iomapp;
+       return NULL;
+}
+
+STATIC void
+xfs_map_at_offset(
+       struct page             *page,
+       struct buffer_head      *bh,
+       unsigned long           offset,
+       int                     block_bits,
+       xfs_iomap_t             *iomapp)
+{
+       xfs_daddr_t             bn;
+       loff_t                  delta;
+       int                     sector_shift;
+
+       ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
+       ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
+       ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
+
+       delta = page->index;
+       delta <<= PAGE_CACHE_SHIFT;
+       delta += offset;
+       delta -= iomapp->iomap_offset;
+       delta >>= block_bits;
+
+       sector_shift = block_bits - BBSHIFT;
+       bn = iomapp->iomap_bn >> sector_shift;
+       bn += delta;
+       ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
+
+       lock_buffer(bh);
+       bh->b_blocknr = bn;
+       bh->b_bdev = iomapp->iomap_target->pbr_bdev;
+       set_buffer_mapped(bh);
+       clear_buffer_delay(bh);
+}
+
+/*
+ * Look for a page at index which is unlocked and contains our
+ * unwritten extent flagged buffers at its head.  Returns page
+ * locked and with an extra reference count, and length of the
+ * unwritten extent component on this page that we can write,
+ * in units of filesystem blocks.
+ */
+STATIC struct page *
+xfs_probe_unwritten_page(
+       struct address_space    *mapping,
+       pgoff_t                 index,
+       xfs_iomap_t             *iomapp,
+       xfs_buf_t               *pb,
+       unsigned long           max_offset,
+       unsigned long           *fsbs,
+       unsigned int            bbits)
+{
+       struct page             *page;
+
+       page = find_trylock_page(mapping, index);
+       if (!page)
+               return 0;
+       if (PageWriteback(page))
+               goto out;
+
+       if (page->mapping && page_has_buffers(page)) {
+               struct buffer_head      *bh, *head;
+               unsigned long           p_offset = 0;
+
+               *fsbs = 0;
+               bh = head = page_buffers(page);
+               do {
+                       if (!buffer_unwritten(bh))
+                               break;
+                       if (!xfs_offset_to_map(page, iomapp, p_offset))
+                               break;
+                       if (p_offset >= max_offset)
+                               break;
+                       xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
+                       set_buffer_unwritten_io(bh);
+                       bh->b_private = pb;
+                       p_offset += bh->b_size;
+                       (*fsbs)++;
+               } while ((bh = bh->b_this_page) != head);
+
+               if (p_offset)
+                       return page;
+       }
+
+out:
+       unlock_page(page);
+       return NULL;
+}
+
+/*
+ * Look for a page at index which is unlocked and not mapped
+ * yet - clustering for mmap write case.
+ */
+STATIC unsigned int
+xfs_probe_unmapped_page(
+       struct address_space    *mapping,
+       pgoff_t                 index,
+       unsigned int            pg_offset)
+{
+       struct page             *page;
+       int                     ret = 0;
+
+       page = find_trylock_page(mapping, index);
+       if (!page)
+               return 0;
+       if (PageWriteback(page))
+               goto out;
+
+       if (page->mapping && PageDirty(page)) {
+               if (page_has_buffers(page)) {
+                       struct buffer_head      *bh, *head;
+
+                       bh = head = page_buffers(page);
+                       do {
+                               if (buffer_mapped(bh) || !buffer_uptodate(bh))
+                                       break;
+                               ret += bh->b_size;
+                               if (ret >= pg_offset)
+                                       break;
+                       } while ((bh = bh->b_this_page) != head);
+               } else
+                       ret = PAGE_CACHE_SIZE;
+       }
+
+out:
+       unlock_page(page);
+       return ret;
+}
+
+STATIC unsigned int
+xfs_probe_unmapped_cluster(
+       struct inode            *inode,
+       struct page             *startpage,
+       struct buffer_head      *bh,
+       struct buffer_head      *head)
+{
+       pgoff_t                 tindex, tlast, tloff;
+       unsigned int            pg_offset, len, total = 0;
+       struct address_space    *mapping = inode->i_mapping;
+
+       /* First sum forwards in this page */
+       do {
+               if (buffer_mapped(bh))
+                       break;
+               total += bh->b_size;
+       } while ((bh = bh->b_this_page) != head);
+
+       /* If we reached the end of the page, sum forwards in
+        * following pages.
+        */
+       if (bh == head) {
+               tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+               /* Prune this back to avoid pathological behavior */
+               tloff = min(tlast, startpage->index + 64);
+               for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
+                       len = xfs_probe_unmapped_page(mapping, tindex,
+                                                       PAGE_CACHE_SIZE);
+                       if (!len)
+                               return total;
+                       total += len;
+               }
+               if (tindex == tlast &&
+                   (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
+                       total += xfs_probe_unmapped_page(mapping,
+                                                       tindex, pg_offset);
+               }
+       }
+       return total;
+}
+
+/*
+ * Probe for a given page (index) in the inode and test if it is delayed
+ * and without unwritten buffers.  Returns page locked and with an extra
+ * reference count.
+ */
+STATIC struct page *
+xfs_probe_delalloc_page(
+       struct inode            *inode,
+       pgoff_t                 index)
+{
+       struct page             *page;
+
+       page = find_trylock_page(inode->i_mapping, index);
+       if (!page)
+               return NULL;
+       if (PageWriteback(page))
+               goto out;
+
+       if (page->mapping && page_has_buffers(page)) {
+               struct buffer_head      *bh, *head;
+               int                     acceptable = 0;
+
+               bh = head = page_buffers(page);
+               do {
+                       if (buffer_unwritten(bh)) {
+                               acceptable = 0;
+                               break;
+                       } else if (buffer_delay(bh)) {
+                               acceptable = 1;
+                       }
+               } while ((bh = bh->b_this_page) != head);
+
+               if (acceptable)
+                       return page;
+       }
+
+out:
+       unlock_page(page);
+       return NULL;
+}
+
+STATIC int
+xfs_map_unwritten(
+       struct inode            *inode,
+       struct page             *start_page,
+       struct buffer_head      *head,
+       struct buffer_head      *curr,
+       unsigned long           p_offset,
+       int                     block_bits,
+       xfs_iomap_t             *iomapp,
+       struct writeback_control *wbc,
+       int                     startio,
+       int                     all_bh)
+{
+       struct buffer_head      *bh = curr;
+       xfs_iomap_t             *tmp;
+       xfs_buf_t               *pb;
+       loff_t                  offset, size;
+       unsigned long           nblocks = 0;
+
+       offset = start_page->index;
+       offset <<= PAGE_CACHE_SHIFT;
+       offset += p_offset;
+
+       /* get an "empty" pagebuf to manage IO completion
+        * Proper values will be set before returning */
+       pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0);
+       if (!pb)
+               return -EAGAIN;
+
+       /* Take a reference to the inode to prevent it from
+        * being reclaimed while we have outstanding unwritten
+        * extent IO on it.
+        */
+       if ((igrab(inode)) != inode) {
+               pagebuf_free(pb);
+               return -EAGAIN;
+       }
+
+       /* Set the count to 1 initially, this will stop an I/O
+        * completion callout which happens before we have started
+        * all the I/O from calling pagebuf_iodone too early.
+        */
+       atomic_set(&pb->pb_io_remaining, 1);
+
+       /* First map forwards in the page consecutive buffers
+        * covering this unwritten extent
+        */
+       do {
+               if (!buffer_unwritten(bh))
+                       break;
+               tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
+               if (!tmp)
+                       break;
+               xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
+               set_buffer_unwritten_io(bh);
+               bh->b_private = pb;
+               p_offset += bh->b_size;
+               nblocks++;
+       } while ((bh = bh->b_this_page) != head);
+
+       atomic_add(nblocks, &pb->pb_io_remaining);
+
+       /* If we reached the end of the page, map forwards in any
+        * following pages which are also covered by this extent.
+        */
+       if (bh == head) {
+               struct address_space    *mapping = inode->i_mapping;
+               pgoff_t                 tindex, tloff, tlast;
+               unsigned long           bs;
+               unsigned int            pg_offset, bbits = inode->i_blkbits;
+               struct page             *page;
+
+               tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+               tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
+               tloff = min(tlast, tloff);
+               for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
+                       page = xfs_probe_unwritten_page(mapping,
+                                               tindex, iomapp, pb,
+                                               PAGE_CACHE_SIZE, &bs, bbits);
+                       if (!page)
+                               break;
+                       nblocks += bs;
+                       atomic_add(bs, &pb->pb_io_remaining);
+                       xfs_convert_page(inode, page, iomapp, wbc, pb,
+                                                       startio, all_bh);
+                       /* stop if converting the next page might add
+                        * enough blocks that the corresponding byte
+                        * count won't fit in our ulong page buf length */
+                       if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
+                               goto enough;
+               }
+
+               if (tindex == tlast &&
+                   (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
+                       page = xfs_probe_unwritten_page(mapping,
+                                                       tindex, iomapp, pb,
+                                                       pg_offset, &bs, bbits);
+                       if (page) {
+                               nblocks += bs;
+                               atomic_add(bs, &pb->pb_io_remaining);
+                               xfs_convert_page(inode, page, iomapp, wbc, pb,
+                                                       startio, all_bh);
+                               if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
+                                       goto enough;
+                       }
+               }
+       }
+
+enough:
+       size = nblocks;         /* NB: using 64bit number here */
+       size <<= block_bits;    /* convert fsb's to byte range */
+
+       XFS_BUF_DATAIO(pb);
+       XFS_BUF_ASYNC(pb);
+       XFS_BUF_SET_SIZE(pb, size);
+       XFS_BUF_SET_COUNT(pb, size);
+       XFS_BUF_SET_OFFSET(pb, offset);
+       XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
+       XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
+
+       if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
+               pagebuf_iodone(pb, 1, 1);
+       }
+
+       return 0;
+}
+
+STATIC void
+xfs_submit_page(
+       struct page             *page,
+       struct buffer_head      *bh_arr[],
+       int                     cnt)
+{
+       struct buffer_head      *bh;
+       int                     i;
+
+       BUG_ON(PageWriteback(page));
+       set_page_writeback(page);
+       clear_page_dirty(page);
+       unlock_page(page);
+
+       if (cnt) {
+               for (i = 0; i < cnt; i++) {
+                       bh = bh_arr[i];
+                       mark_buffer_async_write(bh);
+                       if (buffer_unwritten(bh))
+                               set_buffer_unwritten_io(bh);
+                       set_buffer_uptodate(bh);
+                       clear_buffer_dirty(bh);
+               }
+
+               for (i = 0; i < cnt; i++)
+                       submit_bh(WRITE, bh_arr[i]);
+       } else
+               end_page_writeback(page);
+}
+
+/*
+ * Allocate & map buffers for page given the extent map. Write it out.
+ * except for the original page of a writepage, this is called on
+ * delalloc/unwritten pages only, for the original page it is possible
+ * that the page has no mapping at all.
+ */
+STATIC void
+xfs_convert_page(
+       struct inode            *inode,
+       struct page             *page,
+       xfs_iomap_t             *iomapp,
+       struct writeback_control *wbc,
+       void                    *private,
+       int                     startio,
+       int                     all_bh)
+{
+       struct buffer_head      *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
+       xfs_iomap_t             *mp = iomapp, *tmp;
+       unsigned long           end, offset;
+       pgoff_t                 end_index;
+       int                     i = 0, index = 0;
+       int                     bbits = inode->i_blkbits;
+
+       end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+       if (page->index < end_index) {
+               end = PAGE_CACHE_SIZE;
+       } else {
+               end = i_size_read(inode) & (PAGE_CACHE_SIZE-1);
+       }
+       bh = head = page_buffers(page);
+       do {
+               offset = i << bbits;
+               if (!(PageUptodate(page) || buffer_uptodate(bh)))
+                       continue;
+               if (buffer_mapped(bh) && all_bh &&
+                   !buffer_unwritten(bh) && !buffer_delay(bh)) {
+                       if (startio && (offset < end)) {
+                               lock_buffer(bh);
+                               bh_arr[index++] = bh;
+                       }
+                       continue;
+               }
+               tmp = xfs_offset_to_map(page, mp, offset);
+               if (!tmp)
+                       continue;
+               ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
+               ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
+
+               /* If this is a new unwritten extent buffer (i.e. one
+                * that we haven't passed in private data for, we must
+                * now map this buffer too.
+                */
+               if (buffer_unwritten(bh) && !bh->b_end_io) {
+                       ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
+                       xfs_map_unwritten(inode, page, head, bh, offset,
+                                       bbits, tmp, wbc, startio, all_bh);
+               } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
+                       xfs_map_at_offset(page, bh, offset, bbits, tmp);
+                       if (buffer_unwritten(bh)) {
+                               set_buffer_unwritten_io(bh);
+                               bh->b_private = private;
+                               ASSERT(private);
+                       }
+               }
+               if (startio && (offset < end)) {
+                       bh_arr[index++] = bh;
+               } else {
+                       set_buffer_dirty(bh);
+                       unlock_buffer(bh);
+                       mark_buffer_dirty(bh);
+               }
+       } while (i++, (bh = bh->b_this_page) != head);
+
+       if (startio) {
+               wbc->nr_to_write--;
+               xfs_submit_page(page, bh_arr, index);
+       } else {
+               unlock_page(page);
+       }
+}
+
+/*
+ * Convert & write out a cluster of pages in the same extent as defined
+ * by mp and following the start page.
+ */
+STATIC void
+xfs_cluster_write(
+       struct inode            *inode,
+       pgoff_t                 tindex,
+       xfs_iomap_t             *iomapp,
+       struct writeback_control *wbc,
+       int                     startio,
+       int                     all_bh)
+{
+       pgoff_t                 tlast;
+       struct page             *page;
+
+       tlast = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
+       for (; tindex < tlast; tindex++) {
+               page = xfs_probe_delalloc_page(inode, tindex);
+               if (!page)
+                       break;
+               xfs_convert_page(inode, page, iomapp, wbc, NULL,
+                               startio, all_bh);
+       }
+}
+
+/*
+ * Calling this without startio set means we are being asked to make a dirty
+ * page ready for freeing it's buffers.  When called with startio set then
+ * we are coming from writepage.
+ *
+ * When called with startio set it is important that we write the WHOLE
+ * page if possible.
+ * The bh->b_state's cannot know if any of the blocks or which block for
+ * that matter are dirty due to mmap writes, and therefore bh uptodate is
+ * only vaild if the page itself isn't completely uptodate.  Some layers
+ * may clear the page dirty flag prior to calling write page, under the
+ * assumption the entire page will be written out; by not writing out the
+ * whole page the page can be reused before all valid dirty data is
+ * written out.  Note: in the case of a page that has been dirty'd by
+ * mapwrite and but partially setup by block_prepare_write the
+ * bh->b_states's will not agree and only ones setup by BPW/BCW will have
+ * valid state, thus the whole page must be written out thing.
+ */
+
+STATIC int
+xfs_page_state_convert(
+       struct inode    *inode,
+       struct page     *page,
+       struct writeback_control *wbc,
+       int             startio,
+       int             unmapped) /* also implies page uptodate */
+{
+       struct buffer_head      *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
+       xfs_iomap_t             *iomp, iomap;
+       unsigned long           p_offset = 0;
+       pgoff_t                 end_index;
+       loff_t                  offset;
+       unsigned long long      end_offset;
+       int                     len, err, i, cnt = 0, uptodate = 1;
+       int                     flags = startio ? 0 : BMAPI_TRYLOCK;
+       int                     page_dirty = 1;
+
+
+       /* Are we off the end of the file ? */
+       end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+       if (page->index >= end_index) {
+               if ((page->index >= end_index + 1) ||
+                   !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
+                       err = -EIO;
+                       goto error;
+               }
+       }
+
+       offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+       end_offset = min_t(unsigned long long,
+                       offset + PAGE_CACHE_SIZE, i_size_read(inode));
+
+       bh = head = page_buffers(page);
+       iomp = NULL;
+
+       len = bh->b_size;
+       do {
+               if (offset >= end_offset)
+                       break;
+               if (!buffer_uptodate(bh))
+                       uptodate = 0;
+               if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
+                       continue;
+
+               if (iomp) {
+                       iomp = xfs_offset_to_map(page, &iomap, p_offset);
+               }
+
+               /*
+                * First case, map an unwritten extent and prepare for
+                * extent state conversion transaction on completion.
+                */
+               if (buffer_unwritten(bh)) {
+                       if (!iomp) {
+                               err = xfs_map_blocks(inode, offset, len, &iomap,
+                                               BMAPI_READ|BMAPI_IGNSTATE);
+                               if (err) {
+                                       goto error;
+                               }
+                               iomp = xfs_offset_to_map(page, &iomap,
+                                                               p_offset);
+                       }
+                       if (iomp && startio) {
+                               if (!bh->b_end_io) {
+                                       err = xfs_map_unwritten(inode, page,
+                                                       head, bh, p_offset,
+                                                       inode->i_blkbits, iomp,
+                                                       wbc, startio, unmapped);
+                                       if (err) {
+                                               goto error;
+                                       }
+                               }
+                               bh_arr[cnt++] = bh;
+                               page_dirty = 0;
+                       }
+               /*
+                * Second case, allocate space for a delalloc buffer.
+                * We can return EAGAIN here in the release page case.
+                */
+               } else if (buffer_delay(bh)) {
+                       if (!iomp) {
+                               err = xfs_map_blocks(inode, offset, len, &iomap,
+                                               BMAPI_ALLOCATE | flags);
+                               if (err) {
+                                       goto error;
+                               }
+                               iomp = xfs_offset_to_map(page, &iomap,
+                                                               p_offset);
+                       }
+                       if (iomp) {
+                               xfs_map_at_offset(page, bh, p_offset,
+                                               inode->i_blkbits, iomp);
+                               if (startio) {
+                                       bh_arr[cnt++] = bh;
+                               } else {
+                                       set_buffer_dirty(bh);
+                                       unlock_buffer(bh);
+                                       mark_buffer_dirty(bh);
+                               }
+                               page_dirty = 0;
+                       }
+               } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
+                          (unmapped || startio)) {
+
+                       if (!buffer_mapped(bh)) {
+                               int     size;
+
+                               /*
+                                * Getting here implies an unmapped buffer
+                                * was found, and we are in a path where we
+                                * need to write the whole page out.
+                                */
+                               if (!iomp) {
+                                       size = xfs_probe_unmapped_cluster(
+                                                       inode, page, bh, head);
+                                       err = xfs_map_blocks(inode, offset,
+                                                       size, &iomap,
+                                                       BMAPI_WRITE|BMAPI_MMAP);
+                                       if (err) {
+                                               goto error;
+                                       }
+                                       iomp = xfs_offset_to_map(page, &iomap,
+                                                                    p_offset);
+                               }
+                               if (iomp) {
+                                       xfs_map_at_offset(page,
+                                                       bh, p_offset,
+                                                       inode->i_blkbits, iomp);
+                                       if (startio) {
+                                               bh_arr[cnt++] = bh;
+                                       } else {
+                                               set_buffer_dirty(bh);
+                                               unlock_buffer(bh);
+                                               mark_buffer_dirty(bh);
+                                       }
+                                       page_dirty = 0;
+                               }
+                       } else if (startio) {
+                               if (buffer_uptodate(bh) &&
+                                   !test_and_set_bit(BH_Lock, &bh->b_state)) {
+                                       bh_arr[cnt++] = bh;
+                                       page_dirty = 0;
+                               }
+                       }
+               }
+       } while (offset += len, p_offset += len,
+               ((bh = bh->b_this_page) != head));
+
+       if (uptodate && bh == head)
+               SetPageUptodate(page);
+
+       if (startio)
+               xfs_submit_page(page, bh_arr, cnt);
+
+       if (iomp) {
+               xfs_cluster_write(inode, page->index + 1, iomp, wbc,
+                               startio, unmapped);
+       }
+
+       return page_dirty;
+
+error:
+       for (i = 0; i < cnt; i++) {
+               unlock_buffer(bh_arr[i]);
+       }
+
+       /*
+        * If it's delalloc and we have nowhere to put it,
+        * throw it away, unless the lower layers told
+        * us to try again.
+        */
+       if (err != -EAGAIN) {
+               if (!unmapped) {
+                       block_invalidatepage(page, 0);
+               }
+               ClearPageUptodate(page);
+       }
+       return err;
+}
+
+STATIC int
+linvfs_get_block_core(
+       struct inode            *inode,
+       sector_t                iblock,
+       unsigned long           blocks,
+       struct buffer_head      *bh_result,
+       int                     create,
+       int                     direct,
+       bmapi_flags_t           flags)
+{
+       vnode_t                 *vp = LINVFS_GET_VP(inode);
+       xfs_iomap_t             iomap;
+       int                     retpbbm = 1;
+       int                     error;
+       ssize_t                 size;
+       loff_t                  offset = (loff_t)iblock << inode->i_blkbits;
+
+       if (blocks)
+               size = blocks << inode->i_blkbits;
+       else
+               size = 1 << inode->i_blkbits;
+
+       VOP_BMAP(vp, offset, size,
+               create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
+       if (error)
+               return -error;
+
+       if (retpbbm == 0)
+               return 0;
+
+       if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
+               xfs_daddr_t             bn;
+               loff_t                  delta;
+
+               /* For unwritten extents do not report a disk address on
+                * the read case (treat as if we're reading into a hole).
+                */
+               if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
+                       delta = offset - iomap.iomap_offset;
+                       delta >>= inode->i_blkbits;
+
+                       bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
+                       bn += delta;
+
+                       bh_result->b_blocknr = bn;
+                       bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
+                       set_buffer_mapped(bh_result);
+               }
+               if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
+                       if (direct)
+                               bh_result->b_private = inode;
+                       set_buffer_unwritten(bh_result);
+                       set_buffer_delay(bh_result);
+               }
+       }
+
+       /* If this is a realtime file, data might be on a new device */
+       bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
+
+       /* If we previously allocated a block out beyond eof and
+        * we are now coming back to use it then we will need to
+        * flag it as new even if it has a disk address.
+        */
+       if (create &&
+           ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
+            (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW))) {
+               set_buffer_new(bh_result);
+       }
+
+       if (iomap.iomap_flags & IOMAP_DELAY) {
+               if (unlikely(direct))
+                       BUG();
+               if (create) {
+                       set_buffer_mapped(bh_result);
+                       set_buffer_uptodate(bh_result);
+               }
+               bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
+               set_buffer_delay(bh_result);
+       }
+
+       if (blocks) {
+               loff_t iosize;
+               iosize = (iomap.iomap_bsize - iomap.iomap_delta);
+               bh_result->b_size =
+                   (ssize_t)min(iosize, (loff_t)(blocks << inode->i_blkbits));
+       }
+
+       return 0;
+}
+
+int
+linvfs_get_block(
+       struct inode            *inode,
+       sector_t                iblock,
+       struct buffer_head      *bh_result,
+       int                     create)
+{
+       return linvfs_get_block_core(inode, iblock, 0, bh_result,
+                                       create, 0, BMAPI_WRITE);
+}
+
+STATIC int
+linvfs_get_block_sync(
+       struct inode            *inode,
+       sector_t                iblock,
+       struct buffer_head      *bh_result,
+       int                     create)
+{
+       return linvfs_get_block_core(inode, iblock, 0, bh_result,
+                                       create, 0, BMAPI_SYNC|BMAPI_WRITE);
+}
+
+STATIC int
+linvfs_get_blocks_direct(
+       struct inode            *inode,
+       sector_t                iblock,
+       unsigned long           max_blocks,
+       struct buffer_head      *bh_result,
+       int                     create)
+{
+       return linvfs_get_block_core(inode, iblock, max_blocks, bh_result,
+                                       create, 1, BMAPI_WRITE|BMAPI_DIRECT);
+}
+
+STATIC ssize_t
+linvfs_direct_IO(
+       int                     rw,
+       struct kiocb            *iocb,
+       const struct iovec      *iov,
+       loff_t                  offset,
+       unsigned long           nr_segs)
+{
+       struct file     *file = iocb->ki_filp;
+       struct inode    *inode = file->f_mapping->host;
+       vnode_t         *vp = LINVFS_GET_VP(inode);
+       xfs_iomap_t     iomap;
+       int             maps = 1;
+       int             error;
+
+       VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
+       if (error)
+               return -error;
+
+       return blockdev_direct_IO_no_locking(rw, iocb, inode,
+               iomap.iomap_target->pbr_bdev,
+               iov, offset, nr_segs,
+               linvfs_get_blocks_direct,
+               linvfs_unwritten_convert_direct);
+}
+
+
+STATIC sector_t
+linvfs_bmap(
+       struct address_space    *mapping,
+       sector_t                block)
+{
+       struct inode            *inode = (struct inode *)mapping->host;
+       vnode_t                 *vp = LINVFS_GET_VP(inode);
+       int                     error;
+
+       vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
+
+       VOP_RWLOCK(vp, VRWLOCK_READ);
+       VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
+       VOP_RWUNLOCK(vp, VRWLOCK_READ);
+       return generic_block_bmap(mapping, block, linvfs_get_block);
+}
+
+STATIC int
+linvfs_readpage(
+       struct file             *unused,
+       struct page             *page)
+{
+       return mpage_readpage(page, linvfs_get_block);
+}
+
+STATIC int
+linvfs_readpages(
+       struct file             *unused,
+       struct address_space    *mapping,
+       struct list_head        *pages,
+       unsigned                nr_pages)
+{
+       return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
+}
+
+STATIC void
+xfs_count_page_state(
+       struct page             *page,
+       int                     *delalloc,
+       int                     *unmapped,
+       int                     *unwritten)
+{
+       struct buffer_head      *bh, *head;
+
+       *delalloc = *unmapped = *unwritten = 0;
+
+       bh = head = page_buffers(page);
+       do {
+               if (buffer_uptodate(bh) && !buffer_mapped(bh))
+                       (*unmapped) = 1;
+               else if (buffer_unwritten(bh) && !buffer_delay(bh))
+                       clear_buffer_unwritten(bh);
+               else if (buffer_unwritten(bh))
+                       (*unwritten) = 1;
+               else if (buffer_delay(bh))
+                       (*delalloc) = 1;
+       } while ((bh = bh->b_this_page) != head);
+}
+
+
+/*
+ * writepage: Called from one of two places:
+ *
+ * 1. we are flushing a delalloc buffer head.
+ *
+ * 2. we are writing out a dirty page. Typically the page dirty
+ *    state is cleared before we get here. In this case is it
+ *    conceivable we have no buffer heads.
+ *
+ * For delalloc space on the page we need to allocate space and
+ * flush it. For unmapped buffer heads on the page we should
+ * allocate space if the page is uptodate. For any other dirty
+ * buffer heads on the page we should flush them.
+ *
+ * If we detect that a transaction would be required to flush
+ * the page, we have to check the process flags first, if we
+ * are already in a transaction or disk I/O during allocations
+ * is off, we need to fail the writepage and redirty the page.
+ */
+
+STATIC int
+linvfs_writepage(
+       struct page             *page,
+       struct writeback_control *wbc)
+{
+       int                     error;
+       int                     need_trans;
+       int                     delalloc, unmapped, unwritten;
+       struct inode            *inode = page->mapping->host;
+
+       xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
+
+       /*
+        * We need a transaction if:
+        *  1. There are delalloc buffers on the page
+        *  2. The page is uptodate and we have unmapped buffers
+        *  3. The page is uptodate and we have no buffers
+        *  4. There are unwritten buffers on the page
+        */
+
+       if (!page_has_buffers(page)) {
+               unmapped = 1;
+               need_trans = 1;
+       } else {
+               xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
+               if (!PageUptodate(page))
+                       unmapped = 0;
+               need_trans = delalloc + unmapped + unwritten;
+       }
+
+       /*
+        * If we need a transaction and the process flags say
+        * we are already in a transaction, or no IO is allowed
+        * then mark the page dirty again and leave the page
+        * as is.
+        */
+       if (PFLAGS_TEST_FSTRANS() && need_trans)
+               goto out_fail;
+
+       /*
+        * Delay hooking up buffer heads until we have
+        * made our go/no-go decision.
+        */
+       if (!page_has_buffers(page))
+               create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+
+       /*
+        * Convert delayed allocate, unwritten or unmapped space
+        * to real space and flush out to disk.
+        */
+       error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
+       if (error == -EAGAIN)
+               goto out_fail;
+       if (unlikely(error < 0))
+               goto out_unlock;
+
+       return 0;
+
+out_fail:
+       set_page_dirty(page);
+       unlock_page(page);
+       return 0;
+out_unlock:
+       unlock_page(page);
+       return error;
+}
+
+/*
+ * Called to move a page into cleanable state - and from there
+ * to be released. Possibly the page is already clean. We always
+ * have buffer heads in this call.
+ *
+ * Returns 0 if the page is ok to release, 1 otherwise.
+ *
+ * Possible scenarios are:
+ *
+ * 1. We are being called to release a page which has been written
+ *    to via regular I/O. buffer heads will be dirty and possibly
+ *    delalloc. If no delalloc buffer heads in this case then we
+ *    can just return zero.
+ *
+ * 2. We are called to release a page which has been written via
+ *    mmap, all we need to do is ensure there is no delalloc
+ *    state in the buffer heads, if not we can let the caller
+ *    free them and we should come back later via writepage.
+ */
+STATIC int
+linvfs_release_page(
+       struct page             *page,
+       int                     gfp_mask)
+{
+       struct inode            *inode = page->mapping->host;
+       int                     dirty, delalloc, unmapped, unwritten;
+       struct writeback_control wbc = {
+               .sync_mode = WB_SYNC_ALL,
+               .nr_to_write = 1,
+       };
+
+       xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
+
+       xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
+       if (!delalloc && !unwritten)
+               goto free_buffers;
+
+       if (!(gfp_mask & __GFP_FS))
+               return 0;
+
+       /* If we are already inside a transaction or the thread cannot
+        * do I/O, we cannot release this page.
+        */
+       if (PFLAGS_TEST_FSTRANS())
+               return 0;
+
+       /*
+        * Convert delalloc space to real space, do not flush the
+        * data out to disk, that will be done by the caller.
+        * Never need to allocate space here - we will always
+        * come back to writepage in that case.
+        */
+       dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
+       if (dirty == 0 && !unwritten)
+               goto free_buffers;
+       return 0;
+
+free_buffers:
+       return try_to_free_buffers(page);
+}
+
+STATIC int
+linvfs_prepare_write(
+       struct file             *file,
+       struct page             *page,
+       unsigned int            from,
+       unsigned int            to)
+{
+       if (file && (file->f_flags & O_SYNC)) {
+               return block_prepare_write(page, from, to,
+                                               linvfs_get_block_sync);
+       } else {
+               return block_prepare_write(page, from, to,
+                                               linvfs_get_block);
+       }
+}
+
+struct address_space_operations linvfs_aops = {
+       .readpage               = linvfs_readpage,
+       .readpages              = linvfs_readpages,
+       .writepage              = linvfs_writepage,
+       .sync_page              = block_sync_page,
+       .releasepage            = linvfs_release_page,
+       .prepare_write          = linvfs_prepare_write,
+       .commit_write           = generic_commit_write,
+       .bmap                   = linvfs_bmap,
+       .direct_IO              = linvfs_direct_IO,
+};
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
new file mode 100644 (file)
index 0000000..b6dc7d9
--- /dev/null
@@ -0,0 +1,1812 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ *     The xfs_buf.c code provides an abstract buffer cache model on top
+ *     of the Linux page cache.  Cached metadata blocks for a file system
+ *     are hashed to the inode for the block device.  xfs_buf.c assembles
+ *     buffers (xfs_buf_t) on demand to aggregate such cached pages for I/O.
+ *
+ *      Written by Steve Lord, Jim Mostek, Russell Cattelan
+ *                 and Rajagopal Ananthanarayanan ("ananth") at SGI.
+ *
+ */
+
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/bio.h>
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+#include <linux/workqueue.h>
+#include <linux/suspend.h>
+#include <linux/percpu.h>
+
+#include "xfs_linux.h"
+
+#ifndef GFP_READAHEAD
+#define GFP_READAHEAD  (__GFP_NOWARN|__GFP_NORETRY)
+#endif
+
+/*
+ * File wide globals
+ */
+
+STATIC kmem_cache_t *pagebuf_cache;
+STATIC void pagebuf_daemon_wakeup(void);
+STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
+STATIC struct workqueue_struct *pagebuf_logio_workqueue;
+STATIC struct workqueue_struct *pagebuf_dataio_workqueue;
+
+/*
+ * Pagebuf debugging
+ */
+
+#ifdef PAGEBUF_TRACE
+void
+pagebuf_trace(
+       xfs_buf_t       *pb,
+       char            *id,
+       void            *data,
+       void            *ra)
+{
+       ktrace_enter(pagebuf_trace_buf,
+               pb, id,
+               (void *)(unsigned long)pb->pb_flags,
+               (void *)(unsigned long)pb->pb_hold.counter,
+               (void *)(unsigned long)pb->pb_sema.count.counter,
+               (void *)current,
+               data, ra,
+               (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
+               (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
+               (void *)(unsigned long)pb->pb_buffer_length,
+               NULL, NULL, NULL, NULL, NULL);
+}
+ktrace_t *pagebuf_trace_buf;
+#define PAGEBUF_TRACE_SIZE     4096
+#define PB_TRACE(pb, id, data) \
+       pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
+#else
+#define PB_TRACE(pb, id, data) do { } while (0)
+#endif
+
+#ifdef PAGEBUF_LOCK_TRACKING
+# define PB_SET_OWNER(pb)      ((pb)->pb_last_holder = current->pid)
+# define PB_CLEAR_OWNER(pb)    ((pb)->pb_last_holder = -1)
+# define PB_GET_OWNER(pb)      ((pb)->pb_last_holder)
+#else
+# define PB_SET_OWNER(pb)      do { } while (0)
+# define PB_CLEAR_OWNER(pb)    do { } while (0)
+# define PB_GET_OWNER(pb)      do { } while (0)
+#endif
+
+/*
+ * Pagebuf allocation / freeing.
+ */
+
+#define pb_to_gfp(flags) \
+       (((flags) & PBF_READ_AHEAD) ? GFP_READAHEAD : \
+        ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL)
+
+#define pb_to_km(flags) \
+        (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
+
+
+#define pagebuf_allocate(flags) \
+       kmem_zone_alloc(pagebuf_cache, pb_to_km(flags))
+#define pagebuf_deallocate(pb) \
+       kmem_zone_free(pagebuf_cache, (pb));
+
+/*
+ * Pagebuf hashing
+ */
+
+#define NBITS  8
+#define NHASH  (1<<NBITS)
+
+typedef struct {
+       struct list_head        pb_hash;
+       spinlock_t              pb_hash_lock;
+} pb_hash_t;
+
+STATIC pb_hash_t       pbhash[NHASH];
+#define pb_hash(pb)    &pbhash[pb->pb_hash_index]
+
+STATIC int
+_bhash(
+       struct block_device *bdev,
+       loff_t          base)
+{
+       int             bit, hval;
+
+       base >>= 9;
+       base ^= (unsigned long)bdev / L1_CACHE_BYTES;
+       for (bit = hval = 0; base && bit < sizeof(base) * 8; bit += NBITS) {
+               hval ^= (int)base & (NHASH-1);
+               base >>= NBITS;
+       }
+       return hval;
+}
+
+/*
+ * Mapping of multi-page buffers into contiguous virtual space
+ */
+
+typedef struct a_list {
+       void            *vm_addr;
+       struct a_list   *next;
+} a_list_t;
+
+STATIC a_list_t                *as_free_head;
+STATIC int             as_list_len;
+STATIC spinlock_t      as_lock = SPIN_LOCK_UNLOCKED;
+
+/*
+ * Try to batch vunmaps because they are costly.
+ */
+STATIC void
+free_address(
+       void            *addr)
+{
+       a_list_t        *aentry;
+
+       aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC);
+       if (aentry) {
+               spin_lock(&as_lock);
+               aentry->next = as_free_head;
+               aentry->vm_addr = addr;
+               as_free_head = aentry;
+               as_list_len++;
+               spin_unlock(&as_lock);
+       } else {
+               vunmap(addr);
+       }
+}
+
+STATIC void
+purge_addresses(void)
+{
+       a_list_t        *aentry, *old;
+
+       if (as_free_head == NULL)
+               return;
+
+       spin_lock(&as_lock);
+       aentry = as_free_head;
+       as_free_head = NULL;
+       as_list_len = 0;
+       spin_unlock(&as_lock);
+
+       while ((old = aentry) != NULL) {
+               vunmap(aentry->vm_addr);
+               aentry = aentry->next;
+               kfree(old);
+       }
+}
+
+/*
+ *     Internal pagebuf object manipulation
+ */
+
+STATIC void
+_pagebuf_initialize(
+       xfs_buf_t               *pb,
+       xfs_buftarg_t           *target,
+       loff_t                  range_base,
+       size_t                  range_length,
+       page_buf_flags_t        flags)
+{
+       /*
+        * We don't want certain flags to appear in pb->pb_flags.
+        */
+       flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
+
+       memset(pb, 0, sizeof(xfs_buf_t));
+       atomic_set(&pb->pb_hold, 1);
+       init_MUTEX_LOCKED(&pb->pb_iodonesema);
+       INIT_LIST_HEAD(&pb->pb_list);
+       INIT_LIST_HEAD(&pb->pb_hash_list);
+       init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
+       PB_SET_OWNER(pb);
+       pb->pb_target = target;
+       pb->pb_file_offset = range_base;
+       /*
+        * Set buffer_length and count_desired to the same value initially.
+        * I/O routines should use count_desired, which will be the same in
+        * most cases but may be reset (e.g. XFS recovery).
+        */
+       pb->pb_buffer_length = pb->pb_count_desired = range_length;
+       pb->pb_flags = flags | PBF_NONE;
+       pb->pb_bn = XFS_BUF_DADDR_NULL;
+       atomic_set(&pb->pb_pin_count, 0);
+       init_waitqueue_head(&pb->pb_waiters);
+
+       XFS_STATS_INC(pb_create);
+       PB_TRACE(pb, "initialize", target);
+}
+
+/*
+ * Allocate a page array capable of holding a specified number
+ * of pages, and point the page buf at it.
+ */
+STATIC int
+_pagebuf_get_pages(
+       xfs_buf_t               *pb,
+       int                     page_count,
+       page_buf_flags_t        flags)
+{
+       /* Make sure that we have a page list */
+       if (pb->pb_pages == NULL) {
+               pb->pb_offset = page_buf_poff(pb->pb_file_offset);
+               pb->pb_page_count = page_count;
+               if (page_count <= PB_PAGES) {
+                       pb->pb_pages = pb->pb_page_array;
+               } else {
+                       pb->pb_pages = kmem_alloc(sizeof(struct page *) *
+                                       page_count, pb_to_km(flags));
+                       if (pb->pb_pages == NULL)
+                               return -ENOMEM;
+               }
+               memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);
+       }
+       return 0;
+}
+
+/*
+ *     Frees pb_pages if it was malloced.
+ */
+STATIC void
+_pagebuf_free_pages(
+       xfs_buf_t       *bp)
+{
+       if (bp->pb_pages != bp->pb_page_array) {
+               kmem_free(bp->pb_pages,
+                         bp->pb_page_count * sizeof(struct page *));
+       }
+}
+
+/*
+ *     Releases the specified buffer.
+ *
+ *     The modification state of any associated pages is left unchanged.
+ *     The buffer most not be on any hash - use pagebuf_rele instead for
+ *     hashed and refcounted buffers
+ */
+void
+pagebuf_free(
+       xfs_buf_t               *bp)
+{
+       PB_TRACE(bp, "free", 0);
+
+       ASSERT(list_empty(&bp->pb_hash_list));
+
+       if (bp->pb_flags & _PBF_PAGE_CACHE) {
+               uint            i;
+
+               if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
+                       free_address(bp->pb_addr - bp->pb_offset);
+
+               for (i = 0; i < bp->pb_page_count; i++)
+                       page_cache_release(bp->pb_pages[i]);
+               _pagebuf_free_pages(bp);
+       } else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
+                /*
+                 * XXX(hch): bp->pb_count_desired might be incorrect (see
+                 * pagebuf_associate_memory for details), but fortunately
+                 * the Linux version of kmem_free ignores the len argument..
+                 */
+               kmem_free(bp->pb_addr, bp->pb_count_desired);
+               _pagebuf_free_pages(bp);
+       }
+
+       pagebuf_deallocate(bp);
+}
+
+/*
+ *     Finds all pages for buffer in question and builds it's page list.
+ */
+STATIC int
+_pagebuf_lookup_pages(
+       xfs_buf_t               *bp,
+       uint                    flags)
+{
+       struct address_space    *mapping = bp->pb_target->pbr_mapping;
+       unsigned int            sectorshift = bp->pb_target->pbr_sshift;
+       size_t                  blocksize = bp->pb_target->pbr_bsize;
+       size_t                  size = bp->pb_count_desired;
+       size_t                  nbytes, offset;
+       int                     gfp_mask = pb_to_gfp(flags);
+       unsigned short          page_count, i;
+       pgoff_t                 first;
+       loff_t                  end;
+       int                     error;
+
+       end = bp->pb_file_offset + bp->pb_buffer_length;
+       page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
+
+       error = _pagebuf_get_pages(bp, page_count, flags);
+       if (unlikely(error))
+               return error;
+       bp->pb_flags |= _PBF_PAGE_CACHE;
+
+       offset = bp->pb_offset;
+       first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;
+
+       for (i = 0; i < bp->pb_page_count; i++) {
+               struct page     *page;
+               uint            retries = 0;
+
+             retry:
+               page = find_or_create_page(mapping, first + i, gfp_mask);
+               if (unlikely(page == NULL)) {
+                       if (flags & PBF_READ_AHEAD) {
+                               bp->pb_page_count = i;
+                               for (i = 0; i < bp->pb_page_count; i++)
+                                       unlock_page(bp->pb_pages[i]);
+                               return -ENOMEM;
+                       }
+
+                       /*
+                        * This could deadlock.
+                        *
+                        * But until all the XFS lowlevel code is revamped to
+                        * handle buffer allocation failures we can't do much.
+                        */
+                       if (!(++retries % 100)) {
+                               printk(KERN_ERR "possibly deadlocking in %s\n",
+                                               __FUNCTION__);
+                       }
+
+                       XFS_STATS_INC(pb_page_retries);
+                       pagebuf_daemon_wakeup();
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       schedule_timeout(10);
+                       goto retry;
+               }
+
+               XFS_STATS_INC(pb_page_found);
+
+               nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
+               size -= nbytes;
+
+               if (!PageUptodate(page)) {
+                       page_count--;
+                       if (blocksize == PAGE_CACHE_SIZE) {
+                               if (flags & PBF_READ)
+                                       bp->pb_locked = 1;
+                       } else if (!PagePrivate(page)) {
+                               unsigned long   j, range;
+
+                               /*
+                                * In this case page->private holds a bitmap
+                                * of uptodate sectors within the page
+                                */
+                               ASSERT(blocksize < PAGE_CACHE_SIZE);
+                               range = (offset + nbytes) >> sectorshift;
+                               for (j = offset >> sectorshift; j < range; j++)
+                                       if (!test_bit(j, &page->private))
+                                               break;
+                               if (j == range)
+                                       page_count++;
+                       }
+               }
+
+               bp->pb_pages[i] = page;
+               offset = 0;
+       }
+
+       if (!bp->pb_locked) {
+               for (i = 0; i < bp->pb_page_count; i++)
+                       unlock_page(bp->pb_pages[i]);
+       }
+
+       if (page_count) {
+               /* if we have any uptodate pages, mark that in the buffer */
+               bp->pb_flags &= ~PBF_NONE;
+
+               /* if some pages aren't uptodate, mark that in the buffer */
+               if (page_count != bp->pb_page_count)
+                       bp->pb_flags |= PBF_PARTIAL;
+       }
+
+       PB_TRACE(bp, "lookup_pages", (long)page_count);
+       return error;
+}
+
+/*
+ *     Map buffer into kernel address-space if nessecary.
+ */
+STATIC int
+_pagebuf_map_pages(
+       xfs_buf_t               *bp,
+       uint                    flags)
+{
+       /* A single page buffer is always mappable */
+       if (bp->pb_page_count == 1) {
+               bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
+               bp->pb_flags |= PBF_MAPPED;
+       } else if (flags & PBF_MAPPED) {
+               if (as_list_len > 64)
+                       purge_addresses();
+               bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
+                               VM_MAP, PAGE_KERNEL);
+               if (unlikely(bp->pb_addr == NULL))
+                       return -ENOMEM;
+               bp->pb_addr += bp->pb_offset;
+               bp->pb_flags |= PBF_MAPPED;
+       }
+
+       return 0;
+}
+
+/*
+ *     Finding and Reading Buffers
+ */
+
+/*
+ *     _pagebuf_find
+ *
+ *     Looks up, and creates if absent, a lockable buffer for
+ *     a given range of an inode.  The buffer is returned
+ *     locked.  If other overlapping buffers exist, they are
+ *     released before the new buffer is created and locked,
+ *     which may imply that this call will block until those buffers
+ *     are unlocked.  No I/O is implied by this call.
+ */
+STATIC xfs_buf_t *
+_pagebuf_find(                         /* find buffer for block        */
+       xfs_buftarg_t           *target,/* target for block             */
+       loff_t                  ioff,   /* starting offset of range     */
+       size_t                  isize,  /* length of range              */
+       page_buf_flags_t        flags,  /* PBF_TRYLOCK                  */
+       xfs_buf_t               *new_pb)/* newly allocated buffer       */
+{
+       loff_t                  range_base;
+       size_t                  range_length;
+       int                     hval;
+       pb_hash_t               *h;
+       xfs_buf_t               *pb, *n;
+       int                     not_locked;
+
+       range_base = (ioff << BBSHIFT);
+       range_length = (isize << BBSHIFT);
+
+       /* Ensure we never do IOs smaller than the sector size */
+       BUG_ON(range_length < (1 << target->pbr_sshift));
+
+       /* Ensure we never do IOs that are not sector aligned */
+       BUG_ON(range_base & (loff_t)target->pbr_smask);
+
+       hval = _bhash(target->pbr_bdev, range_base);
+       h = &pbhash[hval];
+
+       spin_lock(&h->pb_hash_lock);
+       list_for_each_entry_safe(pb, n, &h->pb_hash, pb_hash_list) {
+               if (pb->pb_target == target &&
+                   pb->pb_file_offset == range_base &&
+                   pb->pb_buffer_length == range_length) {
+                       /* If we look at something bring it to the
+                        * front of the list for next time
+                        */
+                       atomic_inc(&pb->pb_hold);
+                       list_move(&pb->pb_hash_list, &h->pb_hash);
+                       goto found;
+               }
+       }
+
+       /* No match found */
+       if (new_pb) {
+               _pagebuf_initialize(new_pb, target, range_base,
+                               range_length, flags);
+               new_pb->pb_hash_index = hval;
+               list_add(&new_pb->pb_hash_list, &h->pb_hash);
+       } else {
+               XFS_STATS_INC(pb_miss_locked);
+       }
+
+       spin_unlock(&h->pb_hash_lock);
+       return (new_pb);
+
+found:
+       spin_unlock(&h->pb_hash_lock);
+
+       /* Attempt to get the semaphore without sleeping,
+        * if this does not work then we need to drop the
+        * spinlock and do a hard attempt on the semaphore.
+        */
+       not_locked = down_trylock(&pb->pb_sema);
+       if (not_locked) {
+               if (!(flags & PBF_TRYLOCK)) {
+                       /* wait for buffer ownership */
+                       PB_TRACE(pb, "get_lock", 0);
+                       pagebuf_lock(pb);
+                       XFS_STATS_INC(pb_get_locked_waited);
+               } else {
+                       /* We asked for a trylock and failed, no need
+                        * to look at file offset and length here, we
+                        * know that this pagebuf at least overlaps our
+                        * pagebuf and is locked, therefore our buffer
+                        * either does not exist, or is this buffer
+                        */
+
+                       pagebuf_rele(pb);
+                       XFS_STATS_INC(pb_busy_locked);
+                       return (NULL);
+               }
+       } else {
+               /* trylock worked */
+               PB_SET_OWNER(pb);
+       }
+
+       if (pb->pb_flags & PBF_STALE)
+               pb->pb_flags &= PBF_MAPPED;
+       PB_TRACE(pb, "got_lock", 0);
+       XFS_STATS_INC(pb_get_locked);
+       return (pb);
+}
+
+
+/*
+ *     pagebuf_find
+ *
+ *     pagebuf_find returns a buffer matching the specified range of
+ *     data for the specified target, if any of the relevant blocks
+ *     are in memory.  The buffer may have unallocated holes, if
+ *     some, but not all, of the blocks are in memory.  Even where
+ *     pages are present in the buffer, not all of every page may be
+ *     valid.
+ */
+xfs_buf_t *
+pagebuf_find(                          /* find buffer for block        */
+                                       /* if the block is in memory    */
+       xfs_buftarg_t           *target,/* target for block             */
+       loff_t                  ioff,   /* starting offset of range     */
+       size_t                  isize,  /* length of range              */
+       page_buf_flags_t        flags)  /* PBF_TRYLOCK                  */
+{
+       return _pagebuf_find(target, ioff, isize, flags, NULL);
+}
+
+/*
+ *     pagebuf_get
+ *
+ *     pagebuf_get assembles a buffer covering the specified range.
+ *     Some or all of the blocks in the range may be valid.  Storage
+ *     in memory for all portions of the buffer will be allocated,
+ *     although backing storage may not be.  If PBF_READ is set in
+ *     flags, pagebuf_iostart is called also.
+ */
+xfs_buf_t *
+pagebuf_get(                           /* allocate a buffer            */
+       xfs_buftarg_t           *target,/* target for buffer            */
+       loff_t                  ioff,   /* starting offset of range     */
+       size_t                  isize,  /* length of range              */
+       page_buf_flags_t        flags)  /* PBF_TRYLOCK                  */
+{
+       xfs_buf_t               *pb, *new_pb;
+       int                     error = 0, i;
+
+       new_pb = pagebuf_allocate(flags);
+       if (unlikely(!new_pb))
+               return NULL;
+
+       pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
+       if (pb == new_pb) {
+               error = _pagebuf_lookup_pages(pb, flags);
+               if (unlikely(error)) {
+                       printk(KERN_WARNING
+                              "pagebuf_get: failed to lookup pages\n");
+                       goto no_buffer;
+               }
+       } else {
+               pagebuf_deallocate(new_pb);
+               if (unlikely(pb == NULL))
+                       return NULL;
+       }
+
+       for (i = 0; i < pb->pb_page_count; i++)
+               mark_page_accessed(pb->pb_pages[i]);
+
+       if (!(pb->pb_flags & PBF_MAPPED)) {
+               error = _pagebuf_map_pages(pb, flags);
+               if (unlikely(error)) {
+                       printk(KERN_WARNING
+                              "pagebuf_get: failed to map pages\n");
+                       goto no_buffer;
+               }
+       }
+
+       XFS_STATS_INC(pb_get);
+
+       /*
+        * Always fill in the block number now, the mapped cases can do
+        * their own overlay of this later.
+        */
+       pb->pb_bn = ioff;
+       pb->pb_count_desired = pb->pb_buffer_length;
+
+       if (flags & PBF_READ) {
+               if (PBF_NOT_DONE(pb)) {
+                       PB_TRACE(pb, "get_read", (unsigned long)flags);
+                       XFS_STATS_INC(pb_get_read);
+                       pagebuf_iostart(pb, flags);
+               } else if (flags & PBF_ASYNC) {
+                       PB_TRACE(pb, "get_read_async", (unsigned long)flags);
+                       /*
+                        * Read ahead call which is already satisfied,
+                        * drop the buffer
+                        */
+                       goto no_buffer;
+               } else {
+                       PB_TRACE(pb, "get_read_done", (unsigned long)flags);
+                       /* We do not want read in the flags */
+                       pb->pb_flags &= ~PBF_READ;
+               }
+       } else {
+               PB_TRACE(pb, "get_write", (unsigned long)flags);
+       }
+
+       return pb;
+
+no_buffer:
+       if (flags & (PBF_LOCK | PBF_TRYLOCK))
+               pagebuf_unlock(pb);
+       pagebuf_rele(pb);
+       return NULL;
+}
+
+/*
+ * Create a skeletal pagebuf (no pages associated with it).
+ */
+xfs_buf_t *
+pagebuf_lookup(
+       xfs_buftarg_t           *target,
+       loff_t                  ioff,
+       size_t                  isize,
+       page_buf_flags_t        flags)
+{
+       xfs_buf_t               *pb;
+
+       pb = pagebuf_allocate(flags);
+       if (pb) {
+               _pagebuf_initialize(pb, target, ioff, isize, flags);
+       }
+       return pb;
+}
+
+/*
+ * If we are not low on memory then do the readahead in a deadlock
+ * safe manner.
+ */
+void
+pagebuf_readahead(
+       xfs_buftarg_t           *target,
+       loff_t                  ioff,
+       size_t                  isize,
+       page_buf_flags_t        flags)
+{
+       struct backing_dev_info *bdi;
+
+       bdi = target->pbr_mapping->backing_dev_info;
+       if (bdi_read_congested(bdi))
+               return;
+       if (bdi_write_congested(bdi))
+               return;
+
+       flags |= (PBF_TRYLOCK|PBF_READ|PBF_ASYNC|PBF_READ_AHEAD);
+       pagebuf_get(target, ioff, isize, flags);
+}
+
+xfs_buf_t *
+pagebuf_get_empty(
+       size_t                  len,
+       xfs_buftarg_t           *target)
+{
+       xfs_buf_t               *pb;
+
+       pb = pagebuf_allocate(0);
+       if (pb)
+               _pagebuf_initialize(pb, target, 0, len, 0);
+       return pb;
+}
+
+static inline struct page *
+mem_to_page(
+       void                    *addr)
+{
+       if (((unsigned long)addr < VMALLOC_START) ||
+           ((unsigned long)addr >= VMALLOC_END)) {
+               return virt_to_page(addr);
+       } else {
+               return vmalloc_to_page(addr);
+       }
+}
+
+int
+pagebuf_associate_memory(
+       xfs_buf_t               *pb,
+       void                    *mem,
+       size_t                  len)
+{
+       int                     rval;
+       int                     i = 0;
+       size_t                  ptr;
+       size_t                  end, end_cur;
+       off_t                   offset;
+       int                     page_count;
+
+       page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
+       offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
+       if (offset && (len > PAGE_CACHE_SIZE))
+               page_count++;
+
+       /* Free any previous set of page pointers */
+       if (pb->pb_pages)
+               _pagebuf_free_pages(pb);
+
+       pb->pb_pages = NULL;
+       pb->pb_addr = mem;
+
+       rval = _pagebuf_get_pages(pb, page_count, 0);
+       if (rval)
+               return rval;
+
+       pb->pb_offset = offset;
+       ptr = (size_t) mem & PAGE_CACHE_MASK;
+       end = PAGE_CACHE_ALIGN((size_t) mem + len);
+       end_cur = end;
+       /* set up first page */
+       pb->pb_pages[0] = mem_to_page(mem);
+
+       ptr += PAGE_CACHE_SIZE;
+       pb->pb_page_count = ++i;
+       while (ptr < end) {
+               pb->pb_pages[i] = mem_to_page((void *)ptr);
+               pb->pb_page_count = ++i;
+               ptr += PAGE_CACHE_SIZE;
+       }
+       pb->pb_locked = 0;
+
+       pb->pb_count_desired = pb->pb_buffer_length = len;
+       pb->pb_flags |= PBF_MAPPED;
+
+       return 0;
+}
+
+xfs_buf_t *
+pagebuf_get_no_daddr(
+       size_t                  len,
+       xfs_buftarg_t           *target)
+{
+       size_t                  malloc_len = len;
+       xfs_buf_t               *bp;
+       void                    *data;
+       int                     error;
+
+       bp = pagebuf_allocate(0);
+       if (unlikely(bp == NULL))
+               goto fail;
+       _pagebuf_initialize(bp, target, 0, len, PBF_FORCEIO);
+
+ try_again:
+       data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
+       if (unlikely(data == NULL))
+               goto fail_free_buf;
+
+       /* check whether alignment matches.. */
+       if ((__psunsigned_t)data !=
+           ((__psunsigned_t)data & ~target->pbr_smask)) {
+               /* .. else double the size and try again */
+               kmem_free(data, malloc_len);
+               malloc_len <<= 1;
+               goto try_again;
+       }
+
+       error = pagebuf_associate_memory(bp, data, len);
+       if (error)
+               goto fail_free_mem;
+       bp->pb_flags |= _PBF_KMEM_ALLOC;
+
+       pagebuf_unlock(bp);
+
+       PB_TRACE(bp, "no_daddr", data);
+       return bp;
+ fail_free_mem:
+       kmem_free(data, malloc_len);
+ fail_free_buf:
+       pagebuf_free(bp);
+ fail:
+       return NULL;
+}
+
+/*
+ *     pagebuf_hold
+ *
+ *     Increment reference count on buffer, to hold the buffer concurrently
+ *     with another thread which may release (free) the buffer asynchronously.
+ *
+ *     Must hold the buffer already to call this function.
+ */
+void
+pagebuf_hold(
+       xfs_buf_t               *pb)
+{
+       atomic_inc(&pb->pb_hold);
+       PB_TRACE(pb, "hold", 0);
+}
+
+/*
+ *     pagebuf_rele
+ *
+ *     pagebuf_rele releases a hold on the specified buffer.  If the
+ *     the hold count is 1, pagebuf_rele calls pagebuf_free.
+ */
+void
+pagebuf_rele(
+       xfs_buf_t               *pb)
+{
+       pb_hash_t               *hash = pb_hash(pb);
+
+       PB_TRACE(pb, "rele", pb->pb_relse);
+
+       if (atomic_dec_and_lock(&pb->pb_hold, &hash->pb_hash_lock)) {
+               int             do_free = 1;
+
+               if (pb->pb_relse) {
+                       atomic_inc(&pb->pb_hold);
+                       spin_unlock(&hash->pb_hash_lock);
+                       (*(pb->pb_relse)) (pb);
+                       spin_lock(&hash->pb_hash_lock);
+                       do_free = 0;
+               }
+
+               if (pb->pb_flags & PBF_DELWRI) {
+                       pb->pb_flags |= PBF_ASYNC;
+                       atomic_inc(&pb->pb_hold);
+                       pagebuf_delwri_queue(pb, 0);
+                       do_free = 0;
+               } else if (pb->pb_flags & PBF_FS_MANAGED) {
+                       do_free = 0;
+               }
+
+               if (do_free) {
+                       list_del_init(&pb->pb_hash_list);
+                       spin_unlock(&hash->pb_hash_lock);
+                       pagebuf_free(pb);
+               } else {
+                       spin_unlock(&hash->pb_hash_lock);
+               }
+       }
+}
+
+
+/*
+ *     Mutual exclusion on buffers.  Locking model:
+ *
+ *     Buffers associated with inodes for which buffer locking
+ *     is not enabled are not protected by semaphores, and are
+ *     assumed to be exclusively owned by the caller.  There is a
+ *     spinlock in the buffer, used by the caller when concurrent
+ *     access is possible.
+ */
+
+/*
+ *     pagebuf_cond_lock
+ *
+ *     pagebuf_cond_lock locks a buffer object, if it is not already locked.
+ *     Note that this in no way
+ *     locks the underlying pages, so it is only useful for synchronizing
+ *     concurrent use of page buffer objects, not for synchronizing independent
+ *     access to the underlying pages.
+ */
+int
+pagebuf_cond_lock(                     /* lock buffer, if not locked   */
+                                       /* returns -EBUSY if locked)    */
+       xfs_buf_t               *pb)
+{
+       int                     locked;
+
+       locked = down_trylock(&pb->pb_sema) == 0;
+       if (locked) {
+               PB_SET_OWNER(pb);
+       }
+       PB_TRACE(pb, "cond_lock", (long)locked);
+       return(locked ? 0 : -EBUSY);
+}
+
+/*
+ *     pagebuf_lock_value
+ *
+ *     Return lock value for a pagebuf
+ */
+int
+pagebuf_lock_value(
+       xfs_buf_t               *pb)
+{
+       return(atomic_read(&pb->pb_sema.count));
+}
+
+/*
+ *     pagebuf_lock
+ *
+ *     pagebuf_lock locks a buffer object.  Note that this in no way
+ *     locks the underlying pages, so it is only useful for synchronizing
+ *     concurrent use of page buffer objects, not for synchronizing independent
+ *     access to the underlying pages.
+ */
+int
+pagebuf_lock(
+       xfs_buf_t               *pb)
+{
+       PB_TRACE(pb, "lock", 0);
+       if (atomic_read(&pb->pb_io_remaining))
+               blk_run_address_space(pb->pb_target->pbr_mapping);
+       down(&pb->pb_sema);
+       PB_SET_OWNER(pb);
+       PB_TRACE(pb, "locked", 0);
+       return 0;
+}
+
+/*
+ *     pagebuf_unlock
+ *
+ *     pagebuf_unlock releases the lock on the buffer object created by
+ *     pagebuf_lock or pagebuf_cond_lock (not any
+ *     pinning of underlying pages created by pagebuf_pin).
+ */
+void
+pagebuf_unlock(                                /* unlock buffer                */
+       xfs_buf_t               *pb)    /* buffer to unlock             */
+{
+       PB_CLEAR_OWNER(pb);
+       up(&pb->pb_sema);
+       PB_TRACE(pb, "unlock", 0);
+}
+
+
+/*
+ *     Pinning Buffer Storage in Memory
+ */
+
+/*
+ *     pagebuf_pin
+ *
+ *     pagebuf_pin locks all of the memory represented by a buffer in
+ *     memory.  Multiple calls to pagebuf_pin and pagebuf_unpin, for
+ *     the same or different buffers affecting a given page, will
+ *     properly count the number of outstanding "pin" requests.  The
+ *     buffer may be released after the pagebuf_pin and a different
+ *     buffer used when calling pagebuf_unpin, if desired.
+ *     pagebuf_pin should be used by the file system when it wants be
+ *     assured that no attempt will be made to force the affected
+ *     memory to disk.  It does not assure that a given logical page
+ *     will not be moved to a different physical page.
+ */
+void
+pagebuf_pin(
+       xfs_buf_t               *pb)
+{
+       atomic_inc(&pb->pb_pin_count);
+       PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
+}
+
+/*
+ *     pagebuf_unpin
+ *
+ *     pagebuf_unpin reverses the locking of memory performed by
+ *     pagebuf_pin.  Note that both functions affected the logical
+ *     pages associated with the buffer, not the buffer itself.
+ */
+void
+pagebuf_unpin(
+       xfs_buf_t               *pb)
+{
+       if (atomic_dec_and_test(&pb->pb_pin_count)) {
+               wake_up_all(&pb->pb_waiters);
+       }
+       PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
+}
+
+int
+pagebuf_ispin(
+       xfs_buf_t               *pb)
+{
+       return atomic_read(&pb->pb_pin_count);
+}
+
+/*
+ *     pagebuf_wait_unpin
+ *
+ *     pagebuf_wait_unpin waits until all of the memory associated
+ *     with the buffer is not longer locked in memory.  It returns
+ *     immediately if none of the affected pages are locked.
+ */
+static inline void
+_pagebuf_wait_unpin(
+       xfs_buf_t               *pb)
+{
+       DECLARE_WAITQUEUE       (wait, current);
+
+       if (atomic_read(&pb->pb_pin_count) == 0)
+               return;
+
+       add_wait_queue(&pb->pb_waiters, &wait);
+       for (;;) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               if (atomic_read(&pb->pb_pin_count) == 0)
+                       break;
+               if (atomic_read(&pb->pb_io_remaining))
+                       blk_run_address_space(pb->pb_target->pbr_mapping);
+               schedule();
+       }
+       remove_wait_queue(&pb->pb_waiters, &wait);
+       set_current_state(TASK_RUNNING);
+}
+
+/*
+ *     Buffer Utility Routines
+ */
+
+/*
+ *     pagebuf_iodone
+ *
+ *     pagebuf_iodone marks a buffer for which I/O is in progress
+ *     done with respect to that I/O.  The pb_iodone routine, if
+ *     present, will be called as a side-effect.
+ */
+void
+pagebuf_iodone_work(
+       void                    *v)
+{
+       xfs_buf_t               *bp = (xfs_buf_t *)v;
+
+       if (bp->pb_iodone)
+               (*(bp->pb_iodone))(bp);
+       else if (bp->pb_flags & PBF_ASYNC)
+               xfs_buf_relse(bp);
+}
+
+void
+pagebuf_iodone(
+       xfs_buf_t               *pb,
+       int                     dataio,
+       int                     schedule)
+{
+       pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
+       if (pb->pb_error == 0) {
+               pb->pb_flags &= ~(PBF_PARTIAL | PBF_NONE);
+       }
+
+       PB_TRACE(pb, "iodone", pb->pb_iodone);
+
+       if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
+               if (schedule) {
+                       INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
+                       queue_work(dataio ? pagebuf_dataio_workqueue :
+                               pagebuf_logio_workqueue, &pb->pb_iodone_work);
+               } else {
+                       pagebuf_iodone_work(pb);
+               }
+       } else {
+               up(&pb->pb_iodonesema);
+       }
+}
+
+/*
+ *     pagebuf_ioerror
+ *
+ *     pagebuf_ioerror sets the error code for a buffer.
+ */
+void
+pagebuf_ioerror(                       /* mark/clear buffer error flag */
+       xfs_buf_t               *pb,    /* buffer to mark               */
+       int                     error)  /* error to store (0 if none)   */
+{
+       ASSERT(error >= 0 && error <= 0xffff);
+       pb->pb_error = (unsigned short)error;
+       PB_TRACE(pb, "ioerror", (unsigned long)error);
+}
+
+/*
+ *     pagebuf_iostart
+ *
+ *     pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
+ *     If necessary, it will arrange for any disk space allocation required,
+ *     and it will break up the request if the block mappings require it.
+ *     The pb_iodone routine in the buffer supplied will only be called
+ *     when all of the subsidiary I/O requests, if any, have been completed.
+ *     pagebuf_iostart calls the pagebuf_ioinitiate routine or
+ *     pagebuf_iorequest, if the former routine is not defined, to start
+ *     the I/O on a given low-level request.
+ */
+int
+pagebuf_iostart(                       /* start I/O on a buffer          */
+       xfs_buf_t               *pb,    /* buffer to start                */
+       page_buf_flags_t        flags)  /* PBF_LOCK, PBF_ASYNC, PBF_READ, */
+                                       /* PBF_WRITE, PBF_DELWRI,         */
+                                       /* PBF_DONT_BLOCK                 */
+{
+       int                     status = 0;
+
+       PB_TRACE(pb, "iostart", (unsigned long)flags);
+
+       if (flags & PBF_DELWRI) {
+               pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
+               pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);
+               pagebuf_delwri_queue(pb, 1);
+               return status;
+       }
+
+       pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
+                       PBF_READ_AHEAD | _PBF_RUN_QUEUES);
+       pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
+                       PBF_READ_AHEAD | _PBF_RUN_QUEUES);
+
+       BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
+
+       /* For writes allow an alternate strategy routine to precede
+        * the actual I/O request (which may not be issued at all in
+        * a shutdown situation, for example).
+        */
+       status = (flags & PBF_WRITE) ?
+               pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
+
+       /* Wait for I/O if we are not an async request.
+        * Note: async I/O request completion will release the buffer,
+        * and that can already be done by this point.  So using the
+        * buffer pointer from here on, after async I/O, is invalid.
+        */
+       if (!status && !(flags & PBF_ASYNC))
+               status = pagebuf_iowait(pb);
+
+       return status;
+}
+
+/*
+ * Helper routine for pagebuf_iorequest
+ */
+
+STATIC __inline__ int
+_pagebuf_iolocked(
+       xfs_buf_t               *pb)
+{
+       ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));
+       if (pb->pb_flags & PBF_READ)
+               return pb->pb_locked;
+       return 0;
+}
+
+STATIC __inline__ void
+_pagebuf_iodone(
+       xfs_buf_t               *pb,
+       int                     schedule)
+{
+       if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
+               pb->pb_locked = 0;
+               pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), schedule);
+       }
+}
+
+STATIC int
+bio_end_io_pagebuf(
+       struct bio              *bio,
+       unsigned int            bytes_done,
+       int                     error)
+{
+       xfs_buf_t               *pb = (xfs_buf_t *)bio->bi_private;
+       unsigned int            i, blocksize = pb->pb_target->pbr_bsize;
+       unsigned int            sectorshift = pb->pb_target->pbr_sshift;
+       struct bio_vec          *bvec = bio->bi_io_vec;
+
+       if (bio->bi_size)
+               return 1;
+
+       if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+               pb->pb_error = EIO;
+
+       for (i = 0; i < bio->bi_vcnt; i++, bvec++) {
+               struct page     *page = bvec->bv_page;
+
+               if (pb->pb_error) {
+                       SetPageError(page);
+               } else if (blocksize == PAGE_CACHE_SIZE) {
+                       SetPageUptodate(page);
+               } else if (!PagePrivate(page) &&
+                               (pb->pb_flags & _PBF_PAGE_CACHE)) {
+                       unsigned long   j, range;
+
+                       ASSERT(blocksize < PAGE_CACHE_SIZE);
+                       range = (bvec->bv_offset + bvec->bv_len) >> sectorshift;
+                       for (j = bvec->bv_offset >> sectorshift; j < range; j++)
+                               set_bit(j, &page->private);
+                       if (page->private == (unsigned long)(PAGE_CACHE_SIZE-1))
+                               SetPageUptodate(page);
+               }
+
+               if (_pagebuf_iolocked(pb)) {
+                       unlock_page(page);
+               }
+       }
+
+       _pagebuf_iodone(pb, 1);
+       bio_put(bio);
+       return 0;
+}
+
+void
+_pagebuf_ioapply(
+       xfs_buf_t               *pb)
+{
+       int                     i, map_i, total_nr_pages, nr_pages;
+       struct bio              *bio;
+       int                     offset = pb->pb_offset;
+       int                     size = pb->pb_count_desired;
+       sector_t                sector = pb->pb_bn;
+       unsigned int            blocksize = pb->pb_target->pbr_bsize;
+       int                     locking = _pagebuf_iolocked(pb);
+
+       total_nr_pages = pb->pb_page_count;
+       map_i = 0;
+
+       /* Special code path for reading a sub page size pagebuf in --
+        * we populate up the whole page, and hence the other metadata
+        * in the same page.  This optimization is only valid when the
+        * filesystem block size and the page size are equal.
+        */
+       if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&
+           (pb->pb_flags & PBF_READ) && locking &&
+           (blocksize == PAGE_CACHE_SIZE)) {
+               bio = bio_alloc(GFP_NOIO, 1);
+
+               bio->bi_bdev = pb->pb_target->pbr_bdev;
+               bio->bi_sector = sector - (offset >> BBSHIFT);
+               bio->bi_end_io = bio_end_io_pagebuf;
+               bio->bi_private = pb;
+
+               bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);
+               size = 0;
+
+               atomic_inc(&pb->pb_io_remaining);
+
+               goto submit_io;
+       }
+
+       /* Lock down the pages which we need to for the request */
+       if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) {
+               for (i = 0; size; i++) {
+                       int             nbytes = PAGE_CACHE_SIZE - offset;
+                       struct page     *page = pb->pb_pages[i];
+
+                       if (nbytes > size)
+                               nbytes = size;
+
+                       lock_page(page);
+
+                       size -= nbytes;
+                       offset = 0;
+               }
+               offset = pb->pb_offset;
+               size = pb->pb_count_desired;
+       }
+
+next_chunk:
+       atomic_inc(&pb->pb_io_remaining);
+       nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
+       if (nr_pages > total_nr_pages)
+               nr_pages = total_nr_pages;
+
+       bio = bio_alloc(GFP_NOIO, nr_pages);
+       bio->bi_bdev = pb->pb_target->pbr_bdev;
+       bio->bi_sector = sector;
+       bio->bi_end_io = bio_end_io_pagebuf;
+       bio->bi_private = pb;
+
+       for (; size && nr_pages; nr_pages--, map_i++) {
+               int     nbytes = PAGE_CACHE_SIZE - offset;
+
+               if (nbytes > size)
+                       nbytes = size;
+
+               if (bio_add_page(bio, pb->pb_pages[map_i],
+                                       nbytes, offset) < nbytes)
+                       break;
+
+               offset = 0;
+               sector += nbytes >> BBSHIFT;
+               size -= nbytes;
+               total_nr_pages--;
+       }
+
+submit_io:
+       if (likely(bio->bi_size)) {
+               submit_bio((pb->pb_flags & PBF_READ) ? READ : WRITE, bio);
+               if (size)
+                       goto next_chunk;
+       } else {
+               bio_put(bio);
+               pagebuf_ioerror(pb, EIO);
+       }
+
+       if (pb->pb_flags & _PBF_RUN_QUEUES) {
+               pb->pb_flags &= ~_PBF_RUN_QUEUES;
+               if (atomic_read(&pb->pb_io_remaining) > 1)
+                       blk_run_address_space(pb->pb_target->pbr_mapping);
+       }
+}
+
+/*
+ *     pagebuf_iorequest -- the core I/O request routine.
+ */
+int
+pagebuf_iorequest(                     /* start real I/O               */
+       xfs_buf_t               *pb)    /* buffer to convey to device   */
+{
+       PB_TRACE(pb, "iorequest", 0);
+
+       if (pb->pb_flags & PBF_DELWRI) {
+               pagebuf_delwri_queue(pb, 1);
+               return 0;
+       }
+
+       if (pb->pb_flags & PBF_WRITE) {
+               _pagebuf_wait_unpin(pb);
+       }
+
+       pagebuf_hold(pb);
+
+       /* Set the count to 1 initially, this will stop an I/O
+        * completion callout which happens before we have started
+        * all the I/O from calling pagebuf_iodone too early.
+        */
+       atomic_set(&pb->pb_io_remaining, 1);
+       _pagebuf_ioapply(pb);
+       _pagebuf_iodone(pb, 0);
+
+       pagebuf_rele(pb);
+       return 0;
+}
+
+/*
+ *     pagebuf_iowait
+ *
+ *     pagebuf_iowait waits for I/O to complete on the buffer supplied.
+ *     It returns immediately if no I/O is pending.  In any case, it returns
+ *     the error code, if any, or 0 if there is no error.
+ */
+int
+pagebuf_iowait(
+       xfs_buf_t               *pb)
+{
+       PB_TRACE(pb, "iowait", 0);
+       if (atomic_read(&pb->pb_io_remaining))
+               blk_run_address_space(pb->pb_target->pbr_mapping);
+       down(&pb->pb_iodonesema);
+       PB_TRACE(pb, "iowaited", (long)pb->pb_error);
+       return pb->pb_error;
+}
+
+caddr_t
+pagebuf_offset(
+       xfs_buf_t               *pb,
+       size_t                  offset)
+{
+       struct page             *page;
+
+       offset += pb->pb_offset;
+
+       page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT];
+       return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1));
+}
+
+/*
+ *     pagebuf_iomove
+ *
+ *     Move data into or out of a buffer.
+ */
+void
+pagebuf_iomove(
+       xfs_buf_t               *pb,    /* buffer to process            */
+       size_t                  boff,   /* starting buffer offset       */
+       size_t                  bsize,  /* length to copy               */
+       caddr_t                 data,   /* data address                 */
+       page_buf_rw_t           mode)   /* read/write flag              */
+{
+       size_t                  bend, cpoff, csize;
+       struct page             *page;
+
+       bend = boff + bsize;
+       while (boff < bend) {
+               page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)];
+               cpoff = page_buf_poff(boff + pb->pb_offset);
+               csize = min_t(size_t,
+                             PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff);
+
+               ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
+
+               switch (mode) {
+               case PBRW_ZERO:
+                       memset(page_address(page) + cpoff, 0, csize);
+                       break;
+               case PBRW_READ:
+                       memcpy(data, page_address(page) + cpoff, csize);
+                       break;
+               case PBRW_WRITE:
+                       memcpy(page_address(page) + cpoff, data, csize);
+               }
+
+               boff += csize;
+               data += csize;
+       }
+}
+
+/*
+ *     Handling of buftargs.
+ */
+
+void
+xfs_free_buftarg(
+       xfs_buftarg_t           *btp,
+       int                     external)
+{
+       xfs_flush_buftarg(btp, 1);
+       if (external)
+               xfs_blkdev_put(btp->pbr_bdev);
+       kmem_free(btp, sizeof(*btp));
+}
+
+void
+xfs_incore_relse(
+       xfs_buftarg_t           *btp,
+       int                     delwri_only,
+       int                     wait)
+{
+       invalidate_bdev(btp->pbr_bdev, 1);
+       truncate_inode_pages(btp->pbr_mapping, 0LL);
+}
+
+void
+xfs_setsize_buftarg(
+       xfs_buftarg_t           *btp,
+       unsigned int            blocksize,
+       unsigned int            sectorsize)
+{
+       btp->pbr_bsize = blocksize;
+       btp->pbr_sshift = ffs(sectorsize) - 1;
+       btp->pbr_smask = sectorsize - 1;
+
+       if (set_blocksize(btp->pbr_bdev, sectorsize)) {
+               printk(KERN_WARNING
+                       "XFS: Cannot set_blocksize to %u on device %s\n",
+                       sectorsize, XFS_BUFTARG_NAME(btp));
+       }
+}
+
+xfs_buftarg_t *
+xfs_alloc_buftarg(
+       struct block_device     *bdev)
+{
+       xfs_buftarg_t           *btp;
+
+       btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
+
+       btp->pbr_dev =  bdev->bd_dev;
+       btp->pbr_bdev = bdev;
+       btp->pbr_mapping = bdev->bd_inode->i_mapping;
+       xfs_setsize_buftarg(btp, PAGE_CACHE_SIZE, bdev_hardsect_size(bdev));
+
+       return btp;
+}
+
+
+/*
+ * Pagebuf delayed write buffer handling
+ */
+
+STATIC LIST_HEAD(pbd_delwrite_queue);
+STATIC spinlock_t pbd_delwrite_lock = SPIN_LOCK_UNLOCKED;
+
+STATIC void
+pagebuf_delwri_queue(
+       xfs_buf_t               *pb,
+       int                     unlock)
+{
+       PB_TRACE(pb, "delwri_q", (long)unlock);
+       ASSERT(pb->pb_flags & PBF_DELWRI);
+
+       spin_lock(&pbd_delwrite_lock);
+       /* If already in the queue, dequeue and place at tail */
+       if (!list_empty(&pb->pb_list)) {
+               if (unlock) {
+                       atomic_dec(&pb->pb_hold);
+               }
+               list_del(&pb->pb_list);
+       }
+
+       list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
+       pb->pb_queuetime = jiffies;
+       spin_unlock(&pbd_delwrite_lock);
+
+       if (unlock)
+               pagebuf_unlock(pb);
+}
+
+void
+pagebuf_delwri_dequeue(
+       xfs_buf_t               *pb)
+{
+       PB_TRACE(pb, "delwri_uq", 0);
+       spin_lock(&pbd_delwrite_lock);
+       list_del_init(&pb->pb_list);
+       pb->pb_flags &= ~PBF_DELWRI;
+       spin_unlock(&pbd_delwrite_lock);
+}
+
+STATIC void
+pagebuf_runall_queues(
+       struct workqueue_struct *queue)
+{
+       flush_workqueue(queue);
+}
+
+/* Defines for pagebuf daemon */
+STATIC DECLARE_COMPLETION(pagebuf_daemon_done);
+STATIC struct task_struct *pagebuf_daemon_task;
+STATIC int pagebuf_daemon_active;
+STATIC int force_flush;
+
+STATIC void
+pagebuf_daemon_wakeup(void)
+{
+       force_flush = 1;
+       barrier();
+       wake_up_process(pagebuf_daemon_task);
+}
+
+STATIC int
+pagebuf_daemon(
+       void                    *data)
+{
+       struct list_head        tmp;
+       unsigned long           age;
+       xfs_buf_t               *pb, *n;
+
+       /*  Set up the thread  */
+       daemonize("xfsbufd");
+       current->flags |= PF_MEMALLOC;
+
+       pagebuf_daemon_task = current;
+       pagebuf_daemon_active = 1;
+       barrier();
+
+       INIT_LIST_HEAD(&tmp);
+       do {
+               /* swsusp */
+               if (current->flags & PF_FREEZE)
+                       refrigerator(PF_FREEZE);
+
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout((xfs_buf_timer_centisecs * HZ) / 100);
+
+               age = (xfs_buf_age_centisecs * HZ) / 100;
+               spin_lock(&pbd_delwrite_lock);
+               list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
+                       PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
+                       ASSERT(pb->pb_flags & PBF_DELWRI);
+
+                       if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
+                               if (!force_flush &&
+                                   time_before(jiffies,
+                                               pb->pb_queuetime + age)) {
+                                       pagebuf_unlock(pb);
+                                       break;
+                               }
+
+                               pb->pb_flags &= ~PBF_DELWRI;
+                               pb->pb_flags |= PBF_WRITE;
+                               list_move(&pb->pb_list, &tmp);
+                       }
+               }
+               spin_unlock(&pbd_delwrite_lock);
+
+               while (!list_empty(&tmp)) {
+                       pb = list_entry(tmp.next, xfs_buf_t, pb_list);
+                       list_del_init(&pb->pb_list);
+                       pagebuf_iostrategy(pb);
+                       blk_run_address_space(pb->pb_target->pbr_mapping);
+               }
+
+               if (as_list_len > 0)
+                       purge_addresses();
+
+               force_flush = 0;
+       } while (pagebuf_daemon_active);
+
+       complete_and_exit(&pagebuf_daemon_done, 0);
+}
+
+/*
+ * Go through all incore buffers, and release buffers if they belong to
+ * the given device. This is used in filesystem error handling to
+ * preserve the consistency of its metadata.
+ */
+int
+xfs_flush_buftarg(
+       xfs_buftarg_t           *target,
+       int                     wait)
+{
+       struct list_head        tmp;
+       xfs_buf_t               *pb, *n;
+       int                     pincount = 0;
+
+       pagebuf_runall_queues(pagebuf_dataio_workqueue);
+       pagebuf_runall_queues(pagebuf_logio_workqueue);
+
+       INIT_LIST_HEAD(&tmp);
+       spin_lock(&pbd_delwrite_lock);
+       list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
+
+               if (pb->pb_target != target)
+                       continue;
+
+               ASSERT(pb->pb_flags & PBF_DELWRI);
+               PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
+               if (pagebuf_ispin(pb)) {
+                       pincount++;
+                       continue;
+               }
+
+               pb->pb_flags &= ~PBF_DELWRI;
+               pb->pb_flags |= PBF_WRITE;
+               list_move(&pb->pb_list, &tmp);
+       }
+       spin_unlock(&pbd_delwrite_lock);
+
+       /*
+        * Dropped the delayed write list lock, now walk the temporary list
+        */
+       list_for_each_entry_safe(pb, n, &tmp, pb_list) {
+               if (wait)
+                       pb->pb_flags &= ~PBF_ASYNC;
+               else
+                       list_del_init(&pb->pb_list);
+
+               pagebuf_lock(pb);
+               pagebuf_iostrategy(pb);
+       }
+
+       /*
+        * Remaining list items must be flushed before returning
+        */
+       while (!list_empty(&tmp)) {
+               pb = list_entry(tmp.next, xfs_buf_t, pb_list);
+
+               list_del_init(&pb->pb_list);
+               xfs_iowait(pb);
+               xfs_buf_relse(pb);
+       }
+
+       if (wait)
+               blk_run_address_space(target->pbr_mapping);
+
+       return pincount;
+}
+
+STATIC int
+pagebuf_daemon_start(void)
+{
+       int             rval;
+
+       pagebuf_logio_workqueue = create_workqueue("xfslogd");
+       if (!pagebuf_logio_workqueue)
+               return -ENOMEM;
+
+       pagebuf_dataio_workqueue = create_workqueue("xfsdatad");
+       if (!pagebuf_dataio_workqueue) {
+               destroy_workqueue(pagebuf_logio_workqueue);
+               return -ENOMEM;
+       }
+
+       rval = kernel_thread(pagebuf_daemon, NULL, CLONE_FS|CLONE_FILES);
+       if (rval < 0) {
+               destroy_workqueue(pagebuf_logio_workqueue);
+               destroy_workqueue(pagebuf_dataio_workqueue);
+       }
+
+       return rval;
+}
+
+/*
+ * pagebuf_daemon_stop
+ *
+ * Note: do not mark as __exit, it is called from pagebuf_terminate.
+ */
+STATIC void
+pagebuf_daemon_stop(void)
+{
+       pagebuf_daemon_active = 0;
+       barrier();
+       wait_for_completion(&pagebuf_daemon_done);
+
+       destroy_workqueue(pagebuf_logio_workqueue);
+       destroy_workqueue(pagebuf_dataio_workqueue);
+}
+
+/*
+ *     Initialization and Termination
+ */
+
+int __init
+pagebuf_init(void)
+{
+       int                     i;
+
+       pagebuf_cache = kmem_cache_create("xfs_buf_t", sizeof(xfs_buf_t), 0,
+                       SLAB_HWCACHE_ALIGN, NULL, NULL);
+       if (pagebuf_cache == NULL) {
+               printk("pagebuf: couldn't init pagebuf cache\n");
+               pagebuf_terminate();
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < NHASH; i++) {
+               spin_lock_init(&pbhash[i].pb_hash_lock);
+               INIT_LIST_HEAD(&pbhash[i].pb_hash);
+       }
+
+#ifdef PAGEBUF_TRACE
+       pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
+#endif
+
+       pagebuf_daemon_start();
+       return 0;
+}
+
+
+/*
+ *     pagebuf_terminate.
+ *
+ *     Note: do not mark as __exit, this is also called from the __init code.
+ */
+void
+pagebuf_terminate(void)
+{
+       pagebuf_daemon_stop();
+
+#ifdef PAGEBUF_TRACE
+       ktrace_free(pagebuf_trace_buf);
+#endif
+
+       kmem_cache_destroy(pagebuf_cache);
+}
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
new file mode 100644 (file)
index 0000000..f97e6c0
--- /dev/null
@@ -0,0 +1,594 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * Written by Steve Lord, Jim Mostek, Russell Cattelan at SGI
+ */
+
+#ifndef __XFS_BUF_H__
+#define __XFS_BUF_H__
+
+#include <linux/config.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <asm/system.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/uio.h>
+
+/*
+ *     Base types
+ */
+
+#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
+
+#define page_buf_ctob(pp)      ((pp) * PAGE_CACHE_SIZE)
+#define page_buf_btoc(dd)      (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
+#define page_buf_btoct(dd)     ((dd) >> PAGE_CACHE_SHIFT)
+#define page_buf_poff(aa)      ((aa) & ~PAGE_CACHE_MASK)
+
+typedef enum page_buf_rw_e {
+       PBRW_READ = 1,                  /* transfer into target memory */
+       PBRW_WRITE = 2,                 /* transfer from target memory */
+       PBRW_ZERO = 3                   /* Zero target memory */
+} page_buf_rw_t;
+
+
+typedef enum page_buf_flags_e {                /* pb_flags values */
+       PBF_READ = (1 << 0),    /* buffer intended for reading from device */
+       PBF_WRITE = (1 << 1),   /* buffer intended for writing to device   */
+       PBF_MAPPED = (1 << 2),  /* buffer mapped (pb_addr valid)           */
+       PBF_PARTIAL = (1 << 3), /* buffer partially read                   */
+       PBF_ASYNC = (1 << 4),   /* initiator will not wait for completion  */
+       PBF_NONE = (1 << 5),    /* buffer not read at all                  */
+       PBF_DELWRI = (1 << 6),  /* buffer has dirty pages                  */
+       PBF_STALE = (1 << 7),   /* buffer has been staled, do not find it  */
+       PBF_FS_MANAGED = (1 << 8),  /* filesystem controls freeing memory  */
+       PBF_FS_DATAIOD = (1 << 9),  /* schedule IO completion on fs datad  */
+       PBF_FORCEIO = (1 << 10),    /* ignore any cache state              */
+       PBF_FLUSH = (1 << 11),      /* flush disk write cache              */
+       PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead             */
+
+       /* flags used only as arguments to access routines */
+       PBF_LOCK = (1 << 14),       /* lock requested                      */
+       PBF_TRYLOCK = (1 << 15),    /* lock requested, but do not wait     */
+       PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread      */
+
+       /* flags used only internally */
+       _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache                 */
+       _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc()              */
+       _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue         */
+} page_buf_flags_t;
+
+#define PBF_UPDATE (PBF_READ | PBF_WRITE)
+#define PBF_NOT_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) != 0)
+#define PBF_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) == 0)
+
+typedef struct xfs_buftarg {
+       dev_t                   pbr_dev;
+       struct block_device     *pbr_bdev;
+       struct address_space    *pbr_mapping;
+       unsigned int            pbr_bsize;
+       unsigned int            pbr_sshift;
+       size_t                  pbr_smask;
+} xfs_buftarg_t;
+
+/*
+ *     xfs_buf_t:  Buffer structure for page cache-based buffers
+ *
+ * This buffer structure is used by the page cache buffer management routines
+ * to refer to an assembly of pages forming a logical buffer.  The actual
+ * I/O is performed with buffer_head or bio structures, as required by drivers,
+ * for drivers which do not understand this structure.  The buffer structure is
+ * used on temporary basis only, and discarded when released.
+ *
+ * The real data storage is recorded in the page cache.  Metadata is
+ * hashed to the inode for the block device on which the file system resides.
+ * File data is hashed to the inode for the file.  Pages which are only
+ * partially filled with data have bits set in their block_map entry
+ * to indicate which disk blocks in the page are not valid.
+ */
+
+struct xfs_buf;
+typedef void (*page_buf_iodone_t)(struct xfs_buf *);
+                       /* call-back function on I/O completion */
+typedef void (*page_buf_relse_t)(struct xfs_buf *);
+                       /* call-back function on I/O completion */
+typedef int (*page_buf_bdstrat_t)(struct xfs_buf *);
+
+#define PB_PAGES       4
+
+typedef struct xfs_buf {
+       struct semaphore        pb_sema;        /* semaphore for lockables  */
+       unsigned long           pb_queuetime;   /* time buffer was queued   */
+       atomic_t                pb_pin_count;   /* pin count                */
+       wait_queue_head_t       pb_waiters;     /* unpin waiters            */
+       struct list_head        pb_list;
+       page_buf_flags_t        pb_flags;       /* status flags */
+       struct list_head        pb_hash_list;
+       xfs_buftarg_t           *pb_target;     /* logical object */
+       atomic_t                pb_hold;        /* reference count */
+       xfs_daddr_t             pb_bn;          /* block number for I/O */
+       loff_t                  pb_file_offset; /* offset in file */
+       size_t                  pb_buffer_length; /* size of buffer in bytes */
+       size_t                  pb_count_desired; /* desired transfer size */
+       void                    *pb_addr;       /* virtual address of buffer */
+       struct work_struct      pb_iodone_work;
+       atomic_t                pb_io_remaining;/* #outstanding I/O requests */
+       page_buf_iodone_t       pb_iodone;      /* I/O completion function */
+       page_buf_relse_t        pb_relse;       /* releasing function */
+       page_buf_bdstrat_t      pb_strat;       /* pre-write function */
+       struct semaphore        pb_iodonesema;  /* Semaphore for I/O waiters */
+       void                    *pb_fspriv;
+       void                    *pb_fspriv2;
+       void                    *pb_fspriv3;
+       unsigned short          pb_error;       /* error code on I/O */
+       unsigned short          pb_page_count;  /* size of page array */
+       unsigned short          pb_offset;      /* page offset in first page */
+       unsigned char           pb_locked;      /* page array is locked */
+       unsigned char           pb_hash_index;  /* hash table index     */
+       struct page             **pb_pages;     /* array of page pointers */
+       struct page             *pb_page_array[PB_PAGES]; /* inline pages */
+#ifdef PAGEBUF_LOCK_TRACKING
+       int                     pb_last_holder;
+#endif
+} xfs_buf_t;
+
+
+/* Finding and Reading Buffers */
+
+extern xfs_buf_t *pagebuf_find(        /* find buffer for block if     */
+                                       /* the block is in memory       */
+               xfs_buftarg_t *,        /* inode for block              */
+               loff_t,                 /* starting offset of range     */
+               size_t,                 /* length of range              */
+               page_buf_flags_t);      /* PBF_LOCK                     */
+
+extern xfs_buf_t *pagebuf_get(         /* allocate a buffer            */
+               xfs_buftarg_t *,        /* inode for buffer             */
+               loff_t,                 /* starting offset of range     */
+               size_t,                 /* length of range              */
+               page_buf_flags_t);      /* PBF_LOCK, PBF_READ,          */
+                                       /* PBF_ASYNC                    */
+
+extern xfs_buf_t *pagebuf_lookup(
+               xfs_buftarg_t *,
+               loff_t,                 /* starting offset of range     */
+               size_t,                 /* length of range              */
+               page_buf_flags_t);      /* PBF_READ, PBF_WRITE,         */
+                                       /* PBF_FORCEIO,                 */
+
+extern xfs_buf_t *pagebuf_get_empty(   /* allocate pagebuf struct with */
+                                       /*  no memory or disk address   */
+               size_t len,
+               xfs_buftarg_t *);       /* mount point "fake" inode     */
+
+extern xfs_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct      */
+                                       /* without disk address         */
+               size_t len,
+               xfs_buftarg_t *);       /* mount point "fake" inode     */
+
+extern int pagebuf_associate_memory(
+               xfs_buf_t *,
+               void *,
+               size_t);
+
+extern void pagebuf_hold(              /* increment reference count    */
+               xfs_buf_t *);           /* buffer to hold               */
+
+extern void pagebuf_readahead(         /* read ahead into cache        */
+               xfs_buftarg_t  *,       /* target for buffer (or NULL)  */
+               loff_t,                 /* starting offset of range     */
+               size_t,                 /* length of range              */
+               page_buf_flags_t);      /* additional read flags        */
+
+/* Releasing Buffers */
+
+extern void pagebuf_free(              /* deallocate a buffer          */
+               xfs_buf_t *);           /* buffer to deallocate         */
+
+extern void pagebuf_rele(              /* release hold on a buffer     */
+               xfs_buf_t *);           /* buffer to release            */
+
+/* Locking and Unlocking Buffers */
+
+extern int pagebuf_cond_lock(          /* lock buffer, if not locked   */
+                                       /* (returns -EBUSY if locked)   */
+               xfs_buf_t *);           /* buffer to lock               */
+
+extern int pagebuf_lock_value(         /* return count on lock         */
+               xfs_buf_t *);          /* buffer to check              */
+
+extern int pagebuf_lock(               /* lock buffer                  */
+               xfs_buf_t *);          /* buffer to lock               */
+
+extern void pagebuf_unlock(            /* unlock buffer                */
+               xfs_buf_t *);           /* buffer to unlock             */
+
+/* Buffer Read and Write Routines */
+
+extern void pagebuf_iodone(            /* mark buffer I/O complete     */
+               xfs_buf_t *,            /* buffer to mark               */
+               int,                    /* use data/log helper thread.  */
+               int);                   /* run completion locally, or in
+                                        * a helper thread.             */
+
+extern void pagebuf_ioerror(           /* mark buffer in error (or not) */
+               xfs_buf_t *,            /* buffer to mark               */
+               int);                   /* error to store (0 if none)   */
+
+extern int pagebuf_iostart(            /* start I/O on a buffer        */
+               xfs_buf_t *,            /* buffer to start              */
+               page_buf_flags_t);      /* PBF_LOCK, PBF_ASYNC,         */
+                                       /* PBF_READ, PBF_WRITE,         */
+                                       /* PBF_DELWRI                   */
+
+extern int pagebuf_iorequest(          /* start real I/O               */
+               xfs_buf_t *);           /* buffer to convey to device   */
+
+extern int pagebuf_iowait(             /* wait for buffer I/O done     */
+               xfs_buf_t *);           /* buffer to wait on            */
+
+extern void pagebuf_iomove(            /* move data in/out of pagebuf  */
+               xfs_buf_t *,            /* buffer to manipulate         */
+               size_t,                 /* starting buffer offset       */
+               size_t,                 /* length in buffer             */
+               caddr_t,                /* data pointer                 */
+               page_buf_rw_t);         /* direction                    */
+
+static inline int pagebuf_iostrategy(xfs_buf_t *pb)
+{
+       return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb);
+}
+
+static inline int pagebuf_geterror(xfs_buf_t *pb)
+{
+       return pb ? pb->pb_error : ENOMEM;
+}
+
+/* Buffer Utility Routines */
+
+extern caddr_t pagebuf_offset(         /* pointer at offset in buffer  */
+               xfs_buf_t *,            /* buffer to offset into        */
+               size_t);                /* offset                       */
+
+/* Pinning Buffer Storage in Memory */
+
+extern void pagebuf_pin(               /* pin buffer in memory         */
+               xfs_buf_t *);           /* buffer to pin                */
+
+extern void pagebuf_unpin(             /* unpin buffered data          */
+               xfs_buf_t *);           /* buffer to unpin              */
+
+extern int pagebuf_ispin(              /* check if buffer is pinned    */
+               xfs_buf_t *);           /* buffer to check              */
+
+/* Delayed Write Buffer Routines */
+
+extern void pagebuf_delwri_dequeue(xfs_buf_t *);
+
+/* Buffer Daemon Setup Routines */
+
+extern int pagebuf_init(void);
+extern void pagebuf_terminate(void);
+
+
+#ifdef PAGEBUF_TRACE
+extern ktrace_t *pagebuf_trace_buf;
+extern void pagebuf_trace(
+               xfs_buf_t *,            /* buffer being traced          */
+               char *,                 /* description of operation     */
+               void *,                 /* arbitrary diagnostic value   */
+               void *);                /* return address               */
+#else
+# define pagebuf_trace(pb, id, ptr, ra)        do { } while (0)
+#endif
+
+#define pagebuf_target_name(target)    \
+       ({ char __b[BDEVNAME_SIZE]; bdevname((target)->pbr_bdev, __b); __b; })
+
+
+
+
+
+/* These are just for xfs_syncsub... it sets an internal variable
+ * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
+ */
+#define XFS_B_ASYNC            PBF_ASYNC
+#define XFS_B_DELWRI           PBF_DELWRI
+#define XFS_B_READ             PBF_READ
+#define XFS_B_WRITE            PBF_WRITE
+#define XFS_B_STALE            PBF_STALE
+
+#define XFS_BUF_TRYLOCK                PBF_TRYLOCK
+#define XFS_INCORE_TRYLOCK     PBF_TRYLOCK
+#define XFS_BUF_LOCK           PBF_LOCK
+#define XFS_BUF_MAPPED         PBF_MAPPED
+
+#define BUF_BUSY               PBF_DONT_BLOCK
+
+#define XFS_BUF_BFLAGS(x)      ((x)->pb_flags)
+#define XFS_BUF_ZEROFLAGS(x)   \
+       ((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI))
+
+#define XFS_BUF_STALE(x)       ((x)->pb_flags |= XFS_B_STALE)
+#define XFS_BUF_UNSTALE(x)     ((x)->pb_flags &= ~XFS_B_STALE)
+#define XFS_BUF_ISSTALE(x)     ((x)->pb_flags & XFS_B_STALE)
+#define XFS_BUF_SUPER_STALE(x) do {                            \
+                                       XFS_BUF_STALE(x);       \
+                                       xfs_buf_undelay(x);     \
+                                       XFS_BUF_DONE(x);        \
+                               } while (0)
+
+#define XFS_BUF_MANAGE         PBF_FS_MANAGED
+#define XFS_BUF_UNMANAGE(x)    ((x)->pb_flags &= ~PBF_FS_MANAGED)
+
+static inline void xfs_buf_undelay(xfs_buf_t *pb)
+{
+       if (pb->pb_flags & PBF_DELWRI) {
+               if (pb->pb_list.next != &pb->pb_list) {
+                       pagebuf_delwri_dequeue(pb);
+                       pagebuf_rele(pb);
+               } else {
+                       pb->pb_flags &= ~PBF_DELWRI;
+               }
+       }
+}
+
+#define XFS_BUF_DELAYWRITE(x)   ((x)->pb_flags |= PBF_DELWRI)
+#define XFS_BUF_UNDELAYWRITE(x)         xfs_buf_undelay(x)
+#define XFS_BUF_ISDELAYWRITE(x)         ((x)->pb_flags & PBF_DELWRI)
+
+#define XFS_BUF_ERROR(x,no)     pagebuf_ioerror(x,no)
+#define XFS_BUF_GETERROR(x)     pagebuf_geterror(x)
+#define XFS_BUF_ISERROR(x)      (pagebuf_geterror(x)?1:0)
+
+#define XFS_BUF_DONE(x)                 ((x)->pb_flags &= ~(PBF_PARTIAL|PBF_NONE))
+#define XFS_BUF_UNDONE(x)       ((x)->pb_flags |= PBF_PARTIAL|PBF_NONE)
+#define XFS_BUF_ISDONE(x)       (!(PBF_NOT_DONE(x)))
+
+#define XFS_BUF_BUSY(x)                 ((x)->pb_flags |= PBF_FORCEIO)
+#define XFS_BUF_UNBUSY(x)       ((x)->pb_flags &= ~PBF_FORCEIO)
+#define XFS_BUF_ISBUSY(x)       (1)
+
+#define XFS_BUF_ASYNC(x)        ((x)->pb_flags |= PBF_ASYNC)
+#define XFS_BUF_UNASYNC(x)      ((x)->pb_flags &= ~PBF_ASYNC)
+#define XFS_BUF_ISASYNC(x)      ((x)->pb_flags & PBF_ASYNC)
+
+#define XFS_BUF_FLUSH(x)        ((x)->pb_flags |= PBF_FLUSH)
+#define XFS_BUF_UNFLUSH(x)      ((x)->pb_flags &= ~PBF_FLUSH)
+#define XFS_BUF_ISFLUSH(x)      ((x)->pb_flags & PBF_FLUSH)
+
+#define XFS_BUF_SHUT(x)                 printk("XFS_BUF_SHUT not implemented yet\n")
+#define XFS_BUF_UNSHUT(x)       printk("XFS_BUF_UNSHUT not implemented yet\n")
+#define XFS_BUF_ISSHUT(x)       (0)
+
+#define XFS_BUF_HOLD(x)                pagebuf_hold(x)
+#define XFS_BUF_READ(x)                ((x)->pb_flags |= PBF_READ)
+#define XFS_BUF_UNREAD(x)      ((x)->pb_flags &= ~PBF_READ)
+#define XFS_BUF_ISREAD(x)      ((x)->pb_flags & PBF_READ)
+
+#define XFS_BUF_WRITE(x)       ((x)->pb_flags |= PBF_WRITE)
+#define XFS_BUF_UNWRITE(x)     ((x)->pb_flags &= ~PBF_WRITE)
+#define XFS_BUF_ISWRITE(x)     ((x)->pb_flags & PBF_WRITE)
+
+#define XFS_BUF_ISUNINITIAL(x)  (0)
+#define XFS_BUF_UNUNINITIAL(x)  (0)
+
+#define XFS_BUF_BP_ISMAPPED(bp)         1
+
+#define XFS_BUF_DATAIO(x)      ((x)->pb_flags |= PBF_FS_DATAIOD)
+#define XFS_BUF_UNDATAIO(x)    ((x)->pb_flags &= ~PBF_FS_DATAIOD)
+
+#define XFS_BUF_IODONE_FUNC(buf)       (buf)->pb_iodone
+#define XFS_BUF_SET_IODONE_FUNC(buf, func)     \
+                       (buf)->pb_iodone = (func)
+#define XFS_BUF_CLR_IODONE_FUNC(buf)           \
+                       (buf)->pb_iodone = NULL
+#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func)    \
+                       (buf)->pb_strat = (func)
+#define XFS_BUF_CLR_BDSTRAT_FUNC(buf)          \
+                       (buf)->pb_strat = NULL
+
+#define XFS_BUF_FSPRIVATE(buf, type)           \
+                       ((type)(buf)->pb_fspriv)
+#define XFS_BUF_SET_FSPRIVATE(buf, value)      \
+                       (buf)->pb_fspriv = (void *)(value)
+#define XFS_BUF_FSPRIVATE2(buf, type)          \
+                       ((type)(buf)->pb_fspriv2)
+#define XFS_BUF_SET_FSPRIVATE2(buf, value)     \
+                       (buf)->pb_fspriv2 = (void *)(value)
+#define XFS_BUF_FSPRIVATE3(buf, type)          \
+                       ((type)(buf)->pb_fspriv3)
+#define XFS_BUF_SET_FSPRIVATE3(buf, value)     \
+                       (buf)->pb_fspriv3  = (void *)(value)
+#define XFS_BUF_SET_START(buf)
+
+#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \
+                       (buf)->pb_relse = (value)
+
+#define XFS_BUF_PTR(bp)                (xfs_caddr_t)((bp)->pb_addr)
+
+extern inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
+{
+       if (bp->pb_flags & PBF_MAPPED)
+               return XFS_BUF_PTR(bp) + offset;
+       return (xfs_caddr_t) pagebuf_offset(bp, offset);
+}
+
+#define XFS_BUF_SET_PTR(bp, val, count)                \
+                               pagebuf_associate_memory(bp, val, count)
+#define XFS_BUF_ADDR(bp)       ((bp)->pb_bn)
+#define XFS_BUF_SET_ADDR(bp, blk)              \
+                       ((bp)->pb_bn = (blk))
+#define XFS_BUF_OFFSET(bp)     ((bp)->pb_file_offset)
+#define XFS_BUF_SET_OFFSET(bp, off)            \
+                       ((bp)->pb_file_offset = (off))
+#define XFS_BUF_COUNT(bp)      ((bp)->pb_count_desired)
+#define XFS_BUF_SET_COUNT(bp, cnt)             \
+                       ((bp)->pb_count_desired = (cnt))
+#define XFS_BUF_SIZE(bp)       ((bp)->pb_buffer_length)
+#define XFS_BUF_SET_SIZE(bp, cnt)              \
+                       ((bp)->pb_buffer_length = (cnt))
+#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
+#define XFS_BUF_SET_VTYPE(bp, type)
+#define XFS_BUF_SET_REF(bp, ref)
+
+#define XFS_BUF_ISPINNED(bp)   pagebuf_ispin(bp)
+
+#define XFS_BUF_VALUSEMA(bp)   pagebuf_lock_value(bp)
+#define XFS_BUF_CPSEMA(bp)     (pagebuf_cond_lock(bp) == 0)
+#define XFS_BUF_VSEMA(bp)      pagebuf_unlock(bp)
+#define XFS_BUF_PSEMA(bp,x)    pagebuf_lock(bp)
+#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema);
+
+/* setup the buffer target from a buftarg structure */
+#define XFS_BUF_SET_TARGET(bp, target) \
+               (bp)->pb_target = (target)
+#define XFS_BUF_TARGET(bp)     ((bp)->pb_target)
+#define XFS_BUFTARG_NAME(target)       \
+               pagebuf_target_name(target)
+
+#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
+#define XFS_BUF_SET_VTYPE(bp, type)
+#define XFS_BUF_SET_REF(bp, ref)
+
+#define xfs_buf_read(target, blkno, len, flags) \
+               pagebuf_get((target), (blkno), (len), \
+                       PBF_LOCK | PBF_READ | PBF_MAPPED)
+#define xfs_buf_get(target, blkno, len, flags) \
+               pagebuf_get((target), (blkno), (len), \
+                       PBF_LOCK | PBF_MAPPED)
+
+#define xfs_buf_read_flags(target, blkno, len, flags) \
+               pagebuf_get((target), (blkno), (len), PBF_READ | (flags))
+#define xfs_buf_get_flags(target, blkno, len, flags) \
+               pagebuf_get((target), (blkno), (len), (flags))
+
+static inline int      xfs_bawrite(void *mp, xfs_buf_t *bp)
+{
+       bp->pb_fspriv3 = mp;
+       bp->pb_strat = xfs_bdstrat_cb;
+       xfs_buf_undelay(bp);
+       return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES);
+}
+
+static inline void     xfs_buf_relse(xfs_buf_t *bp)
+{
+       if (!bp->pb_relse)
+               pagebuf_unlock(bp);
+       pagebuf_rele(bp);
+}
+
+#define xfs_bpin(bp)           pagebuf_pin(bp)
+#define xfs_bunpin(bp)         pagebuf_unpin(bp)
+
+#define xfs_buftrace(id, bp)   \
+           pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
+
+#define xfs_biodone(pb)                    \
+           pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), 0)
+
+#define xfs_incore(buftarg,blkno,len,lockit) \
+           pagebuf_find(buftarg, blkno ,len, lockit)
+
+
+#define xfs_biomove(pb, off, len, data, rw) \
+           pagebuf_iomove((pb), (off), (len), (data), \
+               ((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ)
+
+#define xfs_biozero(pb, off, len) \
+           pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO)
+
+
+static inline int      XFS_bwrite(xfs_buf_t *pb)
+{
+       int     iowait = (pb->pb_flags & PBF_ASYNC) == 0;
+       int     error = 0;
+
+       if (!iowait)
+               pb->pb_flags |= _PBF_RUN_QUEUES;
+
+       xfs_buf_undelay(pb);
+       pagebuf_iostrategy(pb);
+       if (iowait) {
+               error = pagebuf_iowait(pb);
+               xfs_buf_relse(pb);
+       }
+       return error;
+}
+
+#define XFS_bdwrite(pb)                     \
+           pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
+
+static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
+{
+       bp->pb_strat = xfs_bdstrat_cb;
+       bp->pb_fspriv3 = mp;
+
+       return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC);
+}
+
+#define XFS_bdstrat(bp) pagebuf_iorequest(bp)
+
+#define xfs_iowait(pb) pagebuf_iowait(pb)
+
+#define xfs_baread(target, rablkno, ralen)  \
+       pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK)
+
+#define xfs_buf_get_empty(len, target) pagebuf_get_empty((len), (target))
+#define xfs_buf_get_noaddr(len, target)        pagebuf_get_no_daddr((len), (target))
+#define xfs_buf_free(bp)               pagebuf_free(bp)
+
+
+/*
+ *     Handling of buftargs.
+ */
+
+extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *);
+extern void xfs_free_buftarg(xfs_buftarg_t *, int);
+extern void xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
+extern void xfs_incore_relse(xfs_buftarg_t *, int, int);
+extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
+
+#define xfs_getsize_buftarg(buftarg) \
+       block_size((buftarg)->pbr_bdev)
+#define xfs_readonly_buftarg(buftarg) \
+       bdev_read_only((buftarg)->pbr_bdev)
+#define xfs_binval(buftarg) \
+       xfs_flush_buftarg(buftarg, 1)
+#define XFS_bflush(buftarg) \
+       xfs_flush_buftarg(buftarg, 1)
+
+#endif /* __XFS_BUF_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
new file mode 100644 (file)
index 0000000..aaa74d2
--- /dev/null
@@ -0,0 +1,546 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_trans.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_error.h"
+#include "xfs_rw.h"
+
+#include <linux/dcache.h>
+
+static struct vm_operations_struct linvfs_file_vm_ops;
+
+
+STATIC inline ssize_t
+__linvfs_read(
+       struct kiocb            *iocb,
+       char __user             *buf,
+       int                     ioflags,
+       size_t                  count,
+       loff_t                  pos)
+{
+       struct iovec            iov = {buf, count};
+       struct file             *file = iocb->ki_filp;
+       vnode_t                 *vp = LINVFS_GET_VP(file->f_dentry->d_inode);
+       ssize_t                 rval;
+
+       BUG_ON(iocb->ki_pos != pos);
+
+       if (unlikely(file->f_flags & O_DIRECT))
+               ioflags |= IO_ISDIRECT;
+       VOP_READ(vp, iocb, &iov, 1, &iocb->ki_pos, ioflags, NULL, rval);
+       return rval;
+}
+
+
+STATIC ssize_t
+linvfs_read(
+       struct kiocb            *iocb,
+       char __user             *buf,
+       size_t                  count,
+       loff_t                  pos)
+{
+       return __linvfs_read(iocb, buf, 0, count, pos);
+}
+
+STATIC ssize_t
+linvfs_read_invis(
+       struct kiocb            *iocb,
+       char __user             *buf,
+       size_t                  count,
+       loff_t                  pos)
+{
+       return __linvfs_read(iocb, buf, IO_INVIS, count, pos);
+}
+
+
+STATIC inline ssize_t
+__linvfs_write(
+       struct kiocb    *iocb,
+       const char      *buf,
+       int             ioflags,
+       size_t          count,
+       loff_t          pos)
+{
+       struct iovec    iov = {(void *)buf, count};
+       struct file     *file = iocb->ki_filp;
+       struct inode    *inode = file->f_mapping->host;
+       vnode_t         *vp = LINVFS_GET_VP(inode);
+       ssize_t         rval;
+
+       BUG_ON(iocb->ki_pos != pos);
+       if (unlikely(file->f_flags & O_DIRECT)) {
+               ioflags |= IO_ISDIRECT;
+               VOP_WRITE(vp, iocb, &iov, 1, &iocb->ki_pos,
+                               ioflags, NULL, rval);
+       } else {
+               down(&inode->i_sem);
+               VOP_WRITE(vp, iocb, &iov, 1, &iocb->ki_pos,
+                               ioflags, NULL, rval);
+               up(&inode->i_sem);
+       }
+
+       return rval;
+}
+
+
+STATIC ssize_t
+linvfs_write(
+       struct kiocb            *iocb,
+       const char __user       *buf,
+       size_t                  count,
+       loff_t                  pos)
+{
+       return __linvfs_write(iocb, buf, 0, count, pos);
+}
+
+STATIC ssize_t
+linvfs_write_invis(
+       struct kiocb            *iocb,
+       const char __user       *buf,
+       size_t                  count,
+       loff_t                  pos)
+{
+       return __linvfs_write(iocb, buf, IO_INVIS, count, pos);
+}
+
+
+STATIC inline ssize_t
+__linvfs_readv(
+       struct file             *file,
+       const struct iovec      *iov,
+       int                     ioflags,
+       unsigned long           nr_segs,
+       loff_t                  *ppos)
+{
+       struct inode    *inode = file->f_mapping->host;
+       vnode_t         *vp = LINVFS_GET_VP(inode);
+       struct          kiocb kiocb;
+       ssize_t         rval;
+
+       init_sync_kiocb(&kiocb, file);
+       kiocb.ki_pos = *ppos;
+
+       if (unlikely(file->f_flags & O_DIRECT))
+               ioflags |= IO_ISDIRECT;
+       VOP_READ(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, ioflags, NULL, rval);
+       if (rval == -EIOCBQUEUED)
+               rval = wait_on_sync_kiocb(&kiocb);
+
+       *ppos = kiocb.ki_pos;
+       return rval;
+}
+
+STATIC ssize_t
+linvfs_readv(
+       struct file             *file,
+       const struct iovec      *iov,
+       unsigned long           nr_segs,
+       loff_t                  *ppos)
+{
+       return __linvfs_readv(file, iov, 0, nr_segs, ppos);
+}
+
+STATIC ssize_t
+linvfs_readv_invis(
+       struct file             *file,
+       const struct iovec      *iov,
+       unsigned long           nr_segs,
+       loff_t                  *ppos)
+{
+       return __linvfs_readv(file, iov, IO_INVIS, nr_segs, ppos);
+}
+
+
+STATIC inline ssize_t
+__linvfs_writev(
+       struct file             *file,
+       const struct iovec      *iov,
+       int                     ioflags,
+       unsigned long           nr_segs,
+       loff_t                  *ppos)
+{
+       struct inode    *inode = file->f_mapping->host;
+       vnode_t         *vp = LINVFS_GET_VP(inode);
+       struct          kiocb kiocb;
+       ssize_t         rval;
+
+       init_sync_kiocb(&kiocb, file);
+       kiocb.ki_pos = *ppos;
+       if (unlikely(file->f_flags & O_DIRECT)) {
+               ioflags |= IO_ISDIRECT;
+               VOP_WRITE(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos,
+                               ioflags, NULL, rval);
+       } else {
+               down(&inode->i_sem);
+               VOP_WRITE(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos,
+                               ioflags, NULL, rval);
+               up(&inode->i_sem);
+       }
+
+       if (rval == -EIOCBQUEUED)
+               rval = wait_on_sync_kiocb(&kiocb);
+
+       *ppos = kiocb.ki_pos;
+       return rval;
+}
+
+
+STATIC ssize_t
+linvfs_writev(
+       struct file             *file,
+       const struct iovec      *iov,
+       unsigned long           nr_segs,
+       loff_t                  *ppos)
+{
+       return __linvfs_writev(file, iov, 0, nr_segs, ppos);
+}
+
+STATIC ssize_t
+linvfs_writev_invis(
+       struct file             *file,
+       const struct iovec      *iov,
+       unsigned long           nr_segs,
+       loff_t                  *ppos)
+{
+       return __linvfs_writev(file, iov, IO_INVIS, nr_segs, ppos);
+}
+
+STATIC ssize_t
+linvfs_sendfile(
+       struct file             *filp,
+       loff_t                  *ppos,
+       size_t                  count,
+       read_actor_t            actor,
+       void                    *target)
+{
+       vnode_t                 *vp = LINVFS_GET_VP(filp->f_dentry->d_inode);
+       ssize_t                 rval;
+
+       VOP_SENDFILE(vp, filp, ppos, 0, count, actor, target, NULL, rval);
+       return rval;
+}
+
+
+STATIC int
+linvfs_open(
+       struct inode    *inode,
+       struct file     *filp)
+{
+       vnode_t         *vp = LINVFS_GET_VP(inode);
+       int             error;
+
+       if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
+               return -EFBIG;
+
+       ASSERT(vp);
+       VOP_OPEN(vp, NULL, error);
+       return -error;
+}
+
+
+STATIC int
+linvfs_release(
+       struct inode    *inode,
+       struct file     *filp)
+{
+       vnode_t         *vp = LINVFS_GET_VP(inode);
+       int             error = 0;
+
+       if (vp)
+               VOP_RELEASE(vp, error);
+       return -error;
+}
+
+
+STATIC int
+linvfs_fsync(
+       struct file     *filp,
+       struct dentry   *dentry,
+       int             datasync)
+{
+       struct inode    *inode = dentry->d_inode;
+       vnode_t         *vp = LINVFS_GET_VP(inode);
+       int             error;
+       int             flags = FSYNC_WAIT;
+
+       if (datasync)
+               flags |= FSYNC_DATA;
+
+       ASSERT(vp);
+       VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error);
+       return -error;
+}
+
+/*
+ * linvfs_readdir maps to VOP_READDIR().
+ * We need to build a uio, cred, ...
+ */
+
+#define nextdp(dp)      ((struct xfs_dirent *)((char *)(dp) + (dp)->d_reclen))
+
+STATIC int
+linvfs_readdir(
+       struct file     *filp,
+       void            *dirent,
+       filldir_t       filldir)
+{
+       int             error = 0;
+       vnode_t         *vp;
+       uio_t           uio;
+       iovec_t         iov;
+       int             eof = 0;
+       caddr_t         read_buf;
+       int             namelen, size = 0;
+       size_t          rlen = PAGE_CACHE_SIZE;
+       xfs_off_t       start_offset, curr_offset;
+       xfs_dirent_t    *dbp = NULL;
+
+       vp = LINVFS_GET_VP(filp->f_dentry->d_inode);
+       ASSERT(vp);
+
+       /* Try fairly hard to get memory */
+       do {
+               if ((read_buf = (caddr_t)kmalloc(rlen, GFP_KERNEL)))
+                       break;
+               rlen >>= 1;
+       } while (rlen >= 1024);
+
+       if (read_buf == NULL)
+               return -ENOMEM;
+
+       uio.uio_iov = &iov;
+       uio.uio_segflg = UIO_SYSSPACE;
+       curr_offset = filp->f_pos;
+       if (filp->f_pos != 0x7fffffff)
+               uio.uio_offset = filp->f_pos;
+       else
+               uio.uio_offset = 0xffffffff;
+
+       while (!eof) {
+               uio.uio_resid = iov.iov_len = rlen;
+               iov.iov_base = read_buf;
+               uio.uio_iovcnt = 1;
+
+               start_offset = uio.uio_offset;
+
+               VOP_READDIR(vp, &uio, NULL, &eof, error);
+               if ((uio.uio_offset == start_offset) || error) {
+                       size = 0;
+                       break;
+               }
+
+               size = rlen - uio.uio_resid;
+               dbp = (xfs_dirent_t *)read_buf;
+               while (size > 0) {
+                       namelen = strlen(dbp->d_name);
+
+                       if (filldir(dirent, dbp->d_name, namelen,
+                                       (loff_t) curr_offset & 0x7fffffff,
+                                       (ino_t) dbp->d_ino,
+                                       DT_UNKNOWN)) {
+                               goto done;
+                       }
+                       size -= dbp->d_reclen;
+                       curr_offset = (loff_t)dbp->d_off /* & 0x7fffffff */;
+                       dbp = nextdp(dbp);
+               }
+       }
+done:
+       if (!error) {
+               if (size == 0)
+                       filp->f_pos = uio.uio_offset & 0x7fffffff;
+               else if (dbp)
+                       filp->f_pos = curr_offset;
+       }
+
+       kfree(read_buf);
+       return -error;
+}
+
+
+STATIC int
+linvfs_file_mmap(
+       struct file     *filp,
+       struct vm_area_struct *vma)
+{
+       struct inode    *ip = filp->f_dentry->d_inode;
+       vnode_t         *vp = LINVFS_GET_VP(ip);
+       vattr_t         va = { .va_mask = XFS_AT_UPDATIME };
+       int             error;
+
+       if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) {
+               xfs_mount_t     *mp = XFS_VFSTOM(vp->v_vfsp);
+
+               error = -XFS_SEND_MMAP(mp, vma, 0);
+               if (error)
+                       return error;
+       }
+
+       vma->vm_ops = &linvfs_file_vm_ops;
+
+       VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error);
+       return 0;
+}
+
+
+STATIC int
+linvfs_ioctl(
+       struct inode    *inode,
+       struct file     *filp,
+       unsigned int    cmd,
+       unsigned long   arg)
+{
+       int             error;
+       vnode_t         *vp = LINVFS_GET_VP(inode);
+
+       ASSERT(vp);
+       VOP_IOCTL(vp, inode, filp, 0, cmd, arg, error);
+       VMODIFY(vp);
+
+       /* NOTE:  some of the ioctl's return positive #'s as a
+        *        byte count indicating success, such as
+        *        readlink_by_handle.  So we don't "sign flip"
+        *        like most other routines.  This means true
+        *        errors need to be returned as a negative value.
+        */
+       return error;
+}
+
+STATIC int
+linvfs_ioctl_invis(
+       struct inode    *inode,
+       struct file     *filp,
+       unsigned int    cmd,
+       unsigned long   arg)
+{
+       int             error;
+       vnode_t         *vp = LINVFS_GET_VP(inode);
+
+       ASSERT(vp);
+       VOP_IOCTL(vp, inode, filp, IO_INVIS, cmd, arg, error);
+       VMODIFY(vp);
+
+       /* NOTE:  some of the ioctl's return positive #'s as a
+        *        byte count indicating success, such as
+        *        readlink_by_handle.  So we don't "sign flip"
+        *        like most other routines.  This means true
+        *        errors need to be returned as a negative value.
+        */
+       return error;
+}
+
+#ifdef HAVE_VMOP_MPROTECT
+STATIC int
+linvfs_mprotect(
+       struct vm_area_struct *vma,
+       unsigned int    newflags)
+{
+       vnode_t         *vp = LINVFS_GET_VP(vma->vm_file->f_dentry->d_inode);
+       int             error = 0;
+
+       if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) {
+               if ((vma->vm_flags & VM_MAYSHARE) &&
+                   (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) {
+                       xfs_mount_t     *mp = XFS_VFSTOM(vp->v_vfsp);
+
+                       error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
+                   }
+       }
+       return error;
+}
+#endif /* HAVE_VMOP_MPROTECT */
+
+
+struct file_operations linvfs_file_operations = {
+       .llseek         = generic_file_llseek,
+       .read           = do_sync_read,
+       .write          = do_sync_write,
+       .readv          = linvfs_readv,
+       .writev         = linvfs_writev,
+       .aio_read       = linvfs_read,
+       .aio_write      = linvfs_write,
+       .sendfile       = linvfs_sendfile,
+       .ioctl          = linvfs_ioctl,
+       .mmap           = linvfs_file_mmap,
+       .open           = linvfs_open,
+       .release        = linvfs_release,
+       .fsync          = linvfs_fsync,
+};
+
+struct file_operations linvfs_invis_file_operations = {
+       .llseek         = generic_file_llseek,
+       .read           = do_sync_read,
+       .write          = do_sync_write,
+       .readv          = linvfs_readv_invis,
+       .writev         = linvfs_writev_invis,
+       .aio_read       = linvfs_read_invis,
+       .aio_write      = linvfs_write_invis,
+       .sendfile       = linvfs_sendfile,
+       .ioctl          = linvfs_ioctl_invis,
+       .mmap           = linvfs_file_mmap,
+       .open           = linvfs_open,
+       .release        = linvfs_release,
+       .fsync          = linvfs_fsync,
+};
+
+
+struct file_operations linvfs_dir_operations = {
+       .read           = generic_read_dir,
+       .readdir        = linvfs_readdir,
+       .ioctl          = linvfs_ioctl,
+       .fsync          = linvfs_fsync,
+};
+
+static struct vm_operations_struct linvfs_file_vm_ops = {
+       .nopage         = filemap_nopage,
+#ifdef HAVE_VMOP_MPROTECT
+       .mprotect       = linvfs_mprotect,
+#endif
+};
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
new file mode 100644 (file)
index 0000000..afad970
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2000-2002 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+/*
+ * Stub for no-op vnode operations that return error status.
+ */
+int
+fs_noerr()
+{
+       return 0;
+}
+
+/*
+ * Operation unsupported under this file system.
+ */
+int
+fs_nosys()
+{
+       return ENOSYS;
+}
+
+/*
+ * Stub for inactive, strategy, and read/write lock/unlock.  Does nothing.
+ */
+/* ARGSUSED */
+void
+fs_noval()
+{
+}
+
+/*
+ * vnode pcache layer for vnode_tosspages.
+ * 'last' parameter unused but left in for IRIX compatibility
+ */
+void
+fs_tosspages(
+       bhv_desc_t      *bdp,
+       xfs_off_t       first,
+       xfs_off_t       last,
+       int             fiopt)
+{
+       vnode_t         *vp = BHV_TO_VNODE(bdp);
+       struct inode    *ip = LINVFS_GET_IP(vp);
+
+       if (VN_CACHED(vp))
+               truncate_inode_pages(ip->i_mapping, first);
+}
+
+
+/*
+ * vnode pcache layer for vnode_flushinval_pages.
+ * 'last' parameter unused but left in for IRIX compatibility
+ */
+void
+fs_flushinval_pages(
+       bhv_desc_t      *bdp,
+       xfs_off_t       first,
+       xfs_off_t       last,
+       int             fiopt)
+{
+       vnode_t         *vp = BHV_TO_VNODE(bdp);
+       struct inode    *ip = LINVFS_GET_IP(vp);
+
+       if (VN_CACHED(vp)) {
+               filemap_fdatawrite(ip->i_mapping);
+               filemap_fdatawait(ip->i_mapping);
+
+               truncate_inode_pages(ip->i_mapping, first);
+       }
+}
+
+/*
+ * vnode pcache layer for vnode_flush_pages.
+ * 'last' parameter unused but left in for IRIX compatibility
+ */
+int
+fs_flush_pages(
+       bhv_desc_t      *bdp,
+       xfs_off_t       first,
+       xfs_off_t       last,
+       uint64_t        flags,
+       int             fiopt)
+{
+       vnode_t         *vp = BHV_TO_VNODE(bdp);
+       struct inode    *ip = LINVFS_GET_IP(vp);
+
+       if (VN_CACHED(vp)) {
+               filemap_fdatawrite(ip->i_mapping);
+               filemap_fdatawait(ip->i_mapping);
+       }
+
+       return 0;
+}
diff --git a/fs/xfs/linux-2.6/xfs_globals.c b/fs/xfs/linux-2.6/xfs_globals.c
new file mode 100644 (file)
index 0000000..cbcdec4
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+/*
+ * This file contains globals needed by XFS that were normally defined
+ * somewhere else in IRIX.
+ */
+
+#include "xfs.h"
+#include "xfs_cred.h"
+#include "xfs_sysctl.h"
+
+/*
+ * System memory size - used to scale certain data structures in XFS.
+ */
+unsigned long xfs_physmem;
+
+/*
+ * Tunable XFS parameters.  xfs_params is required even when CONFIG_SYSCTL=n,
+ * other XFS code uses these values.  Times are measured in centisecs (i.e.
+ * 100ths of a second).
+ */
+xfs_param_t xfs_params = {
+                         /*    MIN             DFLT            MAX     */
+       .restrict_chown = {     0,              1,              1       },
+       .sgid_inherit   = {     0,              0,              1       },
+       .symlink_mode   = {     0,              0,              1       },
+       .panic_mask     = {     0,              0,              127     },
+       .error_level    = {     0,              3,              11      },
+       .syncd_timer    = {     1*100,          30*100,         7200*100},
+       .stats_clear    = {     0,              0,              1       },
+       .inherit_sync   = {     0,              1,              1       },
+       .inherit_nodump = {     0,              1,              1       },
+       .inherit_noatim = {     0,              1,              1       },
+       .xfs_buf_timer  = {     100/2,          1*100,          30*100  },
+       .xfs_buf_age    = {     1*100,          15*100,         7200*100},
+};
+
+/*
+ * Global system credential structure.
+ */
+cred_t sys_cred_val, *sys_cred = &sys_cred_val;
+
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
new file mode 100644 (file)
index 0000000..e348ef8
--- /dev/null
@@ -0,0 +1,1246 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_utils.h"
+#include "xfs_dfrag.h"
+#include "xfs_fsops.h"
+
+#include <linux/dcache.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+
+/*
+ * ioctl commands that are used by Linux filesystems
+ */
+#define XFS_IOC_GETXFLAGS      _IOR('f', 1, long)
+#define XFS_IOC_SETXFLAGS      _IOW('f', 2, long)
+#define XFS_IOC_GETVERSION     _IOR('v', 1, long)
+
+
+/*
+ * xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
+ * a file or fs handle.
+ *
+ * XFS_IOC_PATH_TO_FSHANDLE
+ *    returns fs handle for a mount point or path within that mount point
+ * XFS_IOC_FD_TO_HANDLE
+ *    returns full handle for a FD opened in user space
+ * XFS_IOC_PATH_TO_HANDLE
+ *    returns full handle for a path
+ */
+STATIC int
+xfs_find_handle(
+       unsigned int            cmd,
+       unsigned long           arg)
+{
+       int                     hsize;
+       xfs_handle_t            handle;
+       xfs_fsop_handlereq_t    hreq;
+       struct inode            *inode;
+       struct vnode            *vp;
+
+       if (copy_from_user(&hreq, (xfs_fsop_handlereq_t *)arg, sizeof(hreq)))
+               return -XFS_ERROR(EFAULT);
+
+       memset((char *)&handle, 0, sizeof(handle));
+
+       switch (cmd) {
+       case XFS_IOC_PATH_TO_FSHANDLE:
+       case XFS_IOC_PATH_TO_HANDLE: {
+               struct nameidata        nd;
+               int                     error;
+
+               error = user_path_walk_link(hreq.path, &nd);
+               if (error)
+                       return error;
+
+               ASSERT(nd.dentry);
+               ASSERT(nd.dentry->d_inode);
+               inode = igrab(nd.dentry->d_inode);
+               path_release(&nd);
+               break;
+       }
+
+       case XFS_IOC_FD_TO_HANDLE: {
+               struct file     *file;
+
+               file = fget(hreq.fd);
+               if (!file)
+                   return -EBADF;
+
+               ASSERT(file->f_dentry);
+               ASSERT(file->f_dentry->d_inode);
+               inode = igrab(file->f_dentry->d_inode);
+               fput(file);
+               break;
+       }
+
+       default:
+               ASSERT(0);
+               return -XFS_ERROR(EINVAL);
+       }
+
+       if (inode->i_sb->s_magic != XFS_SB_MAGIC) {
+               /* we're not in XFS anymore, Toto */
+               iput(inode);
+               return -XFS_ERROR(EINVAL);
+       }
+
+       /* we need the vnode */
+       vp = LINVFS_GET_VP(inode);
+       if (vp->v_type != VREG && vp->v_type != VDIR && vp->v_type != VLNK) {
+               iput(inode);
+               return -XFS_ERROR(EBADF);
+       }
+
+       /* now we can grab the fsid */
+       memcpy(&handle.ha_fsid, vp->v_vfsp->vfs_altfsid, sizeof(xfs_fsid_t));
+       hsize = sizeof(xfs_fsid_t);
+
+       if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
+               xfs_inode_t     *ip;
+               bhv_desc_t      *bhv;
+               int             lock_mode;
+
+               /* need to get access to the xfs_inode to read the generation */
+               bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops);
+               ASSERT(bhv);
+               ip = XFS_BHVTOI(bhv);
+               ASSERT(ip);
+               lock_mode = xfs_ilock_map_shared(ip);
+
+               /* fill in fid section of handle from inode */
+               handle.ha_fid.xfs_fid_len = sizeof(xfs_fid_t) -
+                                           sizeof(handle.ha_fid.xfs_fid_len);
+               handle.ha_fid.xfs_fid_pad = 0;
+               handle.ha_fid.xfs_fid_gen = ip->i_d.di_gen;
+               handle.ha_fid.xfs_fid_ino = ip->i_ino;
+
+               xfs_iunlock_map_shared(ip, lock_mode);
+
+               hsize = XFS_HSIZE(handle);
+       }
+
+       /* now copy our handle into the user buffer & write out the size */
+       if (copy_to_user((xfs_handle_t *)hreq.ohandle, &handle, hsize) ||
+           copy_to_user(hreq.ohandlen, &hsize, sizeof(__s32))) {
+               iput(inode);
+               return -XFS_ERROR(EFAULT);
+       }
+
+       iput(inode);
+       return 0;
+}
+
+
+/*
+ * Convert userspace handle data into vnode (and inode).
+ * We [ab]use the fact that all the fsop_handlereq ioctl calls
+ * have a data structure argument whose first component is always
+ * a xfs_fsop_handlereq_t, so we can cast to and from this type.
+ * This allows us to optimise the copy_from_user calls and gives
+ * a handy, shared routine.
+ *
+ * If no error, caller must always VN_RELE the returned vp.
+ */
+STATIC int
+xfs_vget_fsop_handlereq(
+       xfs_mount_t             *mp,
+       struct inode            *parinode,      /* parent inode pointer    */
+       int                     cap,            /* capability level for op */
+       unsigned long           arg,            /* userspace data pointer  */
+       unsigned long           size,           /* size of expected struct */
+       /* output arguments */
+       xfs_fsop_handlereq_t    *hreq,
+       vnode_t                 **vp,
+       struct inode            **inode)
+{
+       void                    *hanp;
+       size_t                  hlen;
+       xfs_fid_t               *xfid;
+       xfs_handle_t            *handlep;
+       xfs_handle_t            handle;
+       xfs_inode_t             *ip;
+       struct inode            *inodep;
+       vnode_t                 *vpp;
+       xfs_ino_t               ino;
+       __u32                   igen;
+       int                     error;
+
+       if (!capable(cap))
+               return XFS_ERROR(EPERM);
+
+       /*
+        * Only allow handle opens under a directory.
+        */
+       if (!S_ISDIR(parinode->i_mode))
+               return XFS_ERROR(ENOTDIR);
+
+       /*
+        * Copy the handle down from the user and validate
+        * that it looks to be in the correct format.
+        */
+       if (copy_from_user(hreq, (struct xfs_fsop_handlereq *)arg, size))
+               return XFS_ERROR(EFAULT);
+
+       hanp = hreq->ihandle;
+       hlen = hreq->ihandlen;
+       handlep = &handle;
+
+       if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
+               return XFS_ERROR(EINVAL);
+       if (copy_from_user(handlep, hanp, hlen))
+               return XFS_ERROR(EFAULT);
+       if (hlen < sizeof(*handlep))
+               memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
+       if (hlen > sizeof(handlep->ha_fsid)) {
+               if (handlep->ha_fid.xfs_fid_len !=
+                               (hlen - sizeof(handlep->ha_fsid)
+                                       - sizeof(handlep->ha_fid.xfs_fid_len))
+                   || handlep->ha_fid.xfs_fid_pad)
+                       return XFS_ERROR(EINVAL);
+       }
+
+       /*
+        * Crack the handle, obtain the inode # & generation #
+        */
+       xfid = (struct xfs_fid *)&handlep->ha_fid;
+       if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) {
+               ino  = xfid->xfs_fid_ino;
+               igen = xfid->xfs_fid_gen;
+       } else {
+               return XFS_ERROR(EINVAL);
+       }
+
+       /*
+        * Get the XFS inode, building a vnode to go with it.
+        */
+       error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, 0);
+       if (error)
+               return error;
+       if (ip == NULL)
+               return XFS_ERROR(EIO);
+       if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
+               xfs_iput_new(ip, XFS_ILOCK_SHARED);
+               return XFS_ERROR(ENOENT);
+       }
+
+       vpp = XFS_ITOV(ip);
+       inodep = LINVFS_GET_IP(vpp);
+       xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+       *vp = vpp;
+       *inode = inodep;
+       return 0;
+}
+
+STATIC int
+xfs_open_by_handle(
+       xfs_mount_t             *mp,
+       unsigned long           arg,
+       struct file             *parfilp,
+       struct inode            *parinode)
+{
+       int                     error;
+       int                     new_fd;
+       int                     permflag;
+       struct file             *filp;
+       struct inode            *inode;
+       struct dentry           *dentry;
+       vnode_t                 *vp;
+       xfs_fsop_handlereq_t    hreq;
+
+       error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
+                                       sizeof(xfs_fsop_handlereq_t),
+                                       &hreq, &vp, &inode);
+       if (error)
+               return -error;
+
+       /* Restrict xfs_open_by_handle to directories & regular files. */
+       if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
+               iput(inode);
+               return -XFS_ERROR(EINVAL);
+       }
+
+#if BITS_PER_LONG != 32
+       hreq.oflags |= O_LARGEFILE;
+#endif
+       /* Put open permission in namei format. */
+       permflag = hreq.oflags;
+       if ((permflag+1) & O_ACCMODE)
+               permflag++;
+       if (permflag & O_TRUNC)
+               permflag |= 2;
+
+       if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
+           (permflag & FMODE_WRITE) && IS_APPEND(inode)) {
+               iput(inode);
+               return -XFS_ERROR(EPERM);
+       }
+
+       if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
+               iput(inode);
+               return -XFS_ERROR(EACCES);
+       }
+
+       /* Can't write directories. */
+       if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
+               iput(inode);
+               return -XFS_ERROR(EISDIR);
+       }
+
+       if ((new_fd = get_unused_fd()) < 0) {
+               iput(inode);
+               return new_fd;
+       }
+
+       dentry = d_alloc_anon(inode);
+       if (dentry == NULL) {
+               iput(inode);
+               put_unused_fd(new_fd);
+               return -XFS_ERROR(ENOMEM);
+       }
+
+       /* Ensure umount returns EBUSY on umounts while this file is open. */
+       mntget(parfilp->f_vfsmnt);
+
+       /* Create file pointer. */
+       filp = dentry_open(dentry, parfilp->f_vfsmnt, hreq.oflags);
+       if (IS_ERR(filp)) {
+               put_unused_fd(new_fd);
+               return -XFS_ERROR(-PTR_ERR(filp));
+       }
+       if (inode->i_mode & S_IFREG)
+               filp->f_op = &linvfs_invis_file_operations;
+
+       fd_install(new_fd, filp);
+       return new_fd;
+}
+
+STATIC int
+xfs_readlink_by_handle(
+       xfs_mount_t             *mp,
+       unsigned long           arg,
+       struct file             *parfilp,
+       struct inode            *parinode)
+{
+       int                     error;
+       struct iovec            aiov;
+       struct uio              auio;
+       struct inode            *inode;
+       xfs_fsop_handlereq_t    hreq;
+       vnode_t                 *vp;
+       __u32                   olen;
+
+       error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
+                                       sizeof(xfs_fsop_handlereq_t),
+                                       &hreq, &vp, &inode);
+       if (error)
+               return -error;
+
+       /* Restrict this handle operation to symlinks only. */
+       if (vp->v_type != VLNK) {
+               VN_RELE(vp);
+               return -XFS_ERROR(EINVAL);
+       }
+
+       if (copy_from_user(&olen, hreq.ohandlen, sizeof(__u32))) {
+               VN_RELE(vp);
+               return -XFS_ERROR(EFAULT);
+       }
+       aiov.iov_len    = olen;
+       aiov.iov_base   = hreq.ohandle;
+
+       auio.uio_iov    = &aiov;
+       auio.uio_iovcnt = 1;
+       auio.uio_offset = 0;
+       auio.uio_segflg = UIO_USERSPACE;
+       auio.uio_resid  = olen;
+
+       VOP_READLINK(vp, &auio, IO_INVIS, NULL, error);
+
+       VN_RELE(vp);
+       return (olen - auio.uio_resid);
+}
+
+STATIC int
+xfs_fssetdm_by_handle(
+       xfs_mount_t             *mp,
+       unsigned long           arg,
+       struct file             *parfilp,
+       struct inode            *parinode)
+{
+       int                     error;
+       struct fsdmidata        fsd;
+       xfs_fsop_setdm_handlereq_t dmhreq;
+       struct inode            *inode;
+       bhv_desc_t              *bdp;
+       vnode_t                 *vp;
+
+       error = xfs_vget_fsop_handlereq(mp, parinode, CAP_MKNOD, arg,
+                                       sizeof(xfs_fsop_setdm_handlereq_t),
+                                       (xfs_fsop_handlereq_t *)&dmhreq,
+                                       &vp, &inode);
+       if (error)
+               return -error;
+
+       if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
+               VN_RELE(vp);
+               return -XFS_ERROR(EPERM);
+       }
+
+       if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) {
+               VN_RELE(vp);
+               return -XFS_ERROR(EFAULT);
+       }
+
+       bdp = bhv_base_unlocked(VN_BHV_HEAD(vp));
+       error = xfs_set_dmattrs(bdp, fsd.fsd_dmevmask, fsd.fsd_dmstate, NULL);
+
+       VN_RELE(vp);
+       if (error)
+               return -error;
+       return 0;
+}
+
+STATIC int
+xfs_attrlist_by_handle(
+       xfs_mount_t             *mp,
+       unsigned long           arg,
+       struct file             *parfilp,
+       struct inode            *parinode)
+{
+       int                     error;
+       attrlist_cursor_kern_t  *cursor;
+       xfs_fsop_attrlist_handlereq_t al_hreq;
+       struct inode            *inode;
+       vnode_t                 *vp;
+
+       error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
+                                       sizeof(xfs_fsop_attrlist_handlereq_t),
+                                       (xfs_fsop_handlereq_t *)&al_hreq,
+                                       &vp, &inode);
+       if (error)
+               return -error;
+
+       cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
+       VOP_ATTR_LIST(vp, al_hreq.buffer, al_hreq.buflen, al_hreq.flags,
+                       cursor, NULL, error);
+       VN_RELE(vp);
+       if (error)
+               return -error;
+       return 0;
+}
+
+STATIC int
+xfs_attrmulti_by_handle(
+       xfs_mount_t             *mp,
+       unsigned long           arg,
+       struct file             *parfilp,
+       struct inode            *parinode)
+{
+       int                     error;
+       xfs_attr_multiop_t      *ops;
+       xfs_fsop_attrmulti_handlereq_t am_hreq;
+       struct inode            *inode;
+       vnode_t                 *vp;
+       int                     i, size;
+
+       error = xfs_vget_fsop_handlereq(mp, parinode, CAP_SYS_ADMIN, arg,
+                                       sizeof(xfs_fsop_attrmulti_handlereq_t),
+                                       (xfs_fsop_handlereq_t *)&am_hreq,
+                                       &vp, &inode);
+       if (error)
+               return -error;
+
+       size = am_hreq.opcount * sizeof(attr_multiop_t);
+       ops = (xfs_attr_multiop_t *)kmalloc(size, GFP_KERNEL);
+       if (!ops) {
+               VN_RELE(vp);
+               return -XFS_ERROR(ENOMEM);
+       }
+
+       if (copy_from_user(ops, am_hreq.ops, size)) {
+               kfree(ops);
+               VN_RELE(vp);
+               return -XFS_ERROR(EFAULT);
+       }
+
+       for (i = 0; i < am_hreq.opcount; i++) {
+               switch(ops[i].am_opcode) {
+               case ATTR_OP_GET:
+                       VOP_ATTR_GET(vp,ops[i].am_attrname, ops[i].am_attrvalue,
+                                       &ops[i].am_length, ops[i].am_flags,
+                                       NULL, ops[i].am_error);
+                       break;
+               case ATTR_OP_SET:
+                       if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
+                               ops[i].am_error = EPERM;
+                               break;
+                       }
+                       VOP_ATTR_SET(vp,ops[i].am_attrname, ops[i].am_attrvalue,
+                                       ops[i].am_length, ops[i].am_flags,
+                                       NULL, ops[i].am_error);
+                       break;
+               case ATTR_OP_REMOVE:
+                       if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
+                               ops[i].am_error = EPERM;
+                               break;
+                       }
+                       VOP_ATTR_REMOVE(vp, ops[i].am_attrname, ops[i].am_flags,
+                                       NULL, ops[i].am_error);
+                       break;
+               default:
+                       ops[i].am_error = EINVAL;
+               }
+       }
+
+       if (copy_to_user(am_hreq.ops, ops, size))
+               error = -XFS_ERROR(EFAULT);
+
+       kfree(ops);
+       VN_RELE(vp);
+       return error;
+}
+
+/* prototypes for a few of the stack-hungry cases that have
+ * their own functions.  Functions are defined after their use
+ * so gcc doesn't get fancy and inline them with -03 */
+
+STATIC int
+xfs_ioc_space(
+       bhv_desc_t              *bdp,
+       vnode_t                 *vp,
+       struct file             *filp,
+       int                     flags,
+       unsigned int            cmd,
+       unsigned long           arg);
+
+STATIC int
+xfs_ioc_bulkstat(
+       xfs_mount_t             *mp,
+       unsigned int            cmd,
+       unsigned long           arg);
+
+STATIC int
+xfs_ioc_fsgeometry_v1(
+       xfs_mount_t             *mp,
+       unsigned long           arg);
+
+STATIC int
+xfs_ioc_fsgeometry(
+       xfs_mount_t             *mp,
+       unsigned long           arg);
+
+STATIC int
+xfs_ioc_xattr(
+       vnode_t                 *vp,
+       xfs_inode_t             *ip,
+       struct file             *filp,
+       unsigned int            cmd,
+       unsigned long           arg);
+
+STATIC int
+xfs_ioc_getbmap(
+       bhv_desc_t              *bdp,
+       struct file             *filp,
+       int                     flags,
+       unsigned int            cmd,
+       unsigned long           arg);
+
+STATIC int
+xfs_ioc_getbmapx(
+       bhv_desc_t              *bdp,
+       unsigned long           arg);
+
+int
+xfs_ioctl(
+       bhv_desc_t              *bdp,
+       struct inode            *inode,
+       struct file             *filp,
+       int                     ioflags,
+       unsigned int            cmd,
+       unsigned long           arg)
+{
+       int                     error;
+       vnode_t                 *vp;
+       xfs_inode_t             *ip;
+       xfs_mount_t             *mp;
+
+       vp = LINVFS_GET_VP(inode);
+
+       vn_trace_entry(vp, "xfs_ioctl", (inst_t *)__return_address);
+
+       ip = XFS_BHVTOI(bdp);
+       mp = ip->i_mount;
+
+       switch (cmd) {
+
+       case XFS_IOC_ALLOCSP:
+       case XFS_IOC_FREESP:
+       case XFS_IOC_RESVSP:
+       case XFS_IOC_UNRESVSP:
+       case XFS_IOC_ALLOCSP64:
+       case XFS_IOC_FREESP64:
+       case XFS_IOC_RESVSP64:
+       case XFS_IOC_UNRESVSP64:
+               /*
+                * Only allow the sys admin to reserve space unless
+                * unwritten extents are enabled.
+                */
+               if (!XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb) &&
+                   !capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               return xfs_ioc_space(bdp, vp, filp, ioflags, cmd, arg);
+
+       case XFS_IOC_DIOINFO: {
+               struct dioattr  da;
+               xfs_buftarg_t   *target =
+                       (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
+                       mp->m_rtdev_targp : mp->m_ddev_targp;
+
+               da.d_mem = da.d_miniosz = 1 << target->pbr_sshift;
+               /* The size dio will do in one go */
+               da.d_maxiosz = 64 * PAGE_CACHE_SIZE;
+
+               if (copy_to_user((struct dioattr *)arg, &da, sizeof(da)))
+                       return -XFS_ERROR(EFAULT);
+               return 0;
+       }
+
+       case XFS_IOC_FSBULKSTAT_SINGLE:
+       case XFS_IOC_FSBULKSTAT:
+       case XFS_IOC_FSINUMBERS:
+               return xfs_ioc_bulkstat(mp, cmd, arg);
+
+       case XFS_IOC_FSGEOMETRY_V1:
+               return xfs_ioc_fsgeometry_v1(mp, arg);
+
+       case XFS_IOC_FSGEOMETRY:
+               return xfs_ioc_fsgeometry(mp, arg);
+
+       case XFS_IOC_GETVERSION:
+       case XFS_IOC_GETXFLAGS:
+       case XFS_IOC_SETXFLAGS:
+       case XFS_IOC_FSGETXATTR:
+       case XFS_IOC_FSSETXATTR:
+       case XFS_IOC_FSGETXATTRA:
+               return xfs_ioc_xattr(vp, ip, filp, cmd, arg);
+
+       case XFS_IOC_FSSETDM: {
+               struct fsdmidata        dmi;
+
+               if (copy_from_user(&dmi, (struct fsdmidata *)arg, sizeof(dmi)))
+                       return -XFS_ERROR(EFAULT);
+
+               error = xfs_set_dmattrs(bdp, dmi.fsd_dmevmask, dmi.fsd_dmstate,
+                                                       NULL);
+               return -error;
+       }
+
+       case XFS_IOC_GETBMAP:
+       case XFS_IOC_GETBMAPA:
+               return xfs_ioc_getbmap(bdp, filp, ioflags, cmd, arg);
+
+       case XFS_IOC_GETBMAPX:
+               return xfs_ioc_getbmapx(bdp, arg);
+
+       case XFS_IOC_FD_TO_HANDLE:
+       case XFS_IOC_PATH_TO_HANDLE:
+       case XFS_IOC_PATH_TO_FSHANDLE:
+               return xfs_find_handle(cmd, arg);
+
+       case XFS_IOC_OPEN_BY_HANDLE:
+               return xfs_open_by_handle(mp, arg, filp, inode);
+
+       case XFS_IOC_FSSETDM_BY_HANDLE:
+               return xfs_fssetdm_by_handle(mp, arg, filp, inode);
+
+       case XFS_IOC_READLINK_BY_HANDLE:
+               return xfs_readlink_by_handle(mp, arg, filp, inode);
+
+       case XFS_IOC_ATTRLIST_BY_HANDLE:
+               return xfs_attrlist_by_handle(mp, arg, filp, inode);
+
+       case XFS_IOC_ATTRMULTI_BY_HANDLE:
+               return xfs_attrmulti_by_handle(mp, arg, filp, inode);
+
+       case XFS_IOC_SWAPEXT: {
+               error = xfs_swapext((struct xfs_swapext *)arg);
+               return -error;
+       }
+
+       case XFS_IOC_FSCOUNTS: {
+               xfs_fsop_counts_t out;
+
+               error = xfs_fs_counts(mp, &out);
+               if (error)
+                       return -error;
+
+               if (copy_to_user((char *)arg, &out, sizeof(out)))
+                       return -XFS_ERROR(EFAULT);
+               return 0;
+       }
+
+       case XFS_IOC_SET_RESBLKS: {
+               xfs_fsop_resblks_t inout;
+               __uint64_t         in;
+
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               if (copy_from_user(&inout, (char *)arg, sizeof(inout)))
+                       return -XFS_ERROR(EFAULT);
+
+               /* input parameter is passed in resblks field of structure */
+               in = inout.resblks;
+               error = xfs_reserve_blocks(mp, &in, &inout);
+               if (error)
+                       return -error;
+
+               if (copy_to_user((char *)arg, &inout, sizeof(inout)))
+                       return -XFS_ERROR(EFAULT);
+               return 0;
+       }
+
+       case XFS_IOC_GET_RESBLKS: {
+               xfs_fsop_resblks_t out;
+
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               error = xfs_reserve_blocks(mp, NULL, &out);
+               if (error)
+                       return -error;
+
+               if (copy_to_user((char *)arg, &out, sizeof(out)))
+                       return -XFS_ERROR(EFAULT);
+
+               return 0;
+       }
+
+       case XFS_IOC_FSGROWFSDATA: {
+               xfs_growfs_data_t in;
+
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               if (copy_from_user(&in, (char *)arg, sizeof(in)))
+                       return -XFS_ERROR(EFAULT);
+
+               error = xfs_growfs_data(mp, &in);
+               return -error;
+       }
+
+       case XFS_IOC_FSGROWFSLOG: {
+               xfs_growfs_log_t in;
+
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               if (copy_from_user(&in, (char *)arg, sizeof(in)))
+                       return -XFS_ERROR(EFAULT);
+
+               error = xfs_growfs_log(mp, &in);
+               return -error;
+       }
+
+       case XFS_IOC_FSGROWFSRT: {
+               xfs_growfs_rt_t in;
+
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               if (copy_from_user(&in, (char *)arg, sizeof(in)))
+                       return -XFS_ERROR(EFAULT);
+
+               error = xfs_growfs_rt(mp, &in);
+               return -error;
+       }
+
+       case XFS_IOC_FREEZE:
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               freeze_bdev(inode->i_sb->s_bdev);
+               return 0;
+
+       case XFS_IOC_THAW:
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+               thaw_bdev(inode->i_sb->s_bdev, inode->i_sb);
+               return 0;
+
+       case XFS_IOC_GOINGDOWN: {
+               __uint32_t in;
+
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               if (get_user(in, (__uint32_t *)arg))
+                       return -XFS_ERROR(EFAULT);
+
+               error = xfs_fs_goingdown(mp, in);
+               return -error;
+       }
+
+       case XFS_IOC_ERROR_INJECTION: {
+               xfs_error_injection_t in;
+
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               if (copy_from_user(&in, (char *)arg, sizeof(in)))
+                       return -XFS_ERROR(EFAULT);
+
+               error = xfs_errortag_add(in.errtag, mp);
+               return -error;
+       }
+
+       case XFS_IOC_ERROR_CLEARALL:
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               error = xfs_errortag_clearall(mp);
+               return -error;
+
+       default:
+               return -ENOTTY;
+       }
+}
+
+STATIC int
+xfs_ioc_space(
+       bhv_desc_t              *bdp,
+       vnode_t                 *vp,
+       struct file             *filp,
+       int                     ioflags,
+       unsigned int            cmd,
+       unsigned long           arg)
+{
+       xfs_flock64_t           bf;
+       int                     attr_flags = 0;
+       int                     error;
+
+       if (vp->v_inode.i_flags & (S_IMMUTABLE|S_APPEND))
+               return -XFS_ERROR(EPERM);
+
+       if (!(filp->f_flags & FMODE_WRITE))
+               return -XFS_ERROR(EBADF);
+
+       if (vp->v_type != VREG)
+               return -XFS_ERROR(EINVAL);
+
+       if (copy_from_user(&bf, (xfs_flock64_t *)arg, sizeof(bf)))
+               return -XFS_ERROR(EFAULT);
+
+       if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
+               attr_flags |= ATTR_NONBLOCK;
+       if (ioflags & IO_INVIS)
+               attr_flags |= ATTR_DMI;
+
+       error = xfs_change_file_space(bdp, cmd, &bf, filp->f_pos,
+                                             NULL, attr_flags);
+       return -error;
+}
+
+STATIC int
+xfs_ioc_bulkstat(
+       xfs_mount_t             *mp,
+       unsigned int            cmd,
+       unsigned long           arg)
+{
+       xfs_fsop_bulkreq_t      bulkreq;
+       int                     count;  /* # of records returned */
+       xfs_ino_t               inlast; /* last inode number */
+       int                     done;
+       int                     error;
+
+       /* done = 1 if there are more stats to get and if bulkstat */
+       /* should be called again (unused here, but used in dmapi) */
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -XFS_ERROR(EIO);
+
+       if (copy_from_user(&bulkreq, (xfs_fsop_bulkreq_t *)arg,
+                                       sizeof(xfs_fsop_bulkreq_t)))
+               return -XFS_ERROR(EFAULT);
+
+       if (copy_from_user(&inlast, (__s64 *)bulkreq.lastip,
+                                               sizeof(__s64)))
+               return -XFS_ERROR(EFAULT);
+
+       if ((count = bulkreq.icount) <= 0)
+               return -XFS_ERROR(EINVAL);
+
+       if (cmd == XFS_IOC_FSINUMBERS)
+               error = xfs_inumbers(mp, &inlast, &count,
+                                               bulkreq.ubuffer);
+       else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE)
+               error = xfs_bulkstat_single(mp, &inlast,
+                                               bulkreq.ubuffer, &done);
+       else {  /* XFS_IOC_FSBULKSTAT */
+               if (count == 1 && inlast != 0) {
+                       inlast++;
+                       error = xfs_bulkstat_single(mp, &inlast,
+                                       bulkreq.ubuffer, &done);
+               } else {
+                       error = xfs_bulkstat(mp, &inlast, &count,
+                               (bulkstat_one_pf)xfs_bulkstat_one, NULL,
+                               sizeof(xfs_bstat_t), bulkreq.ubuffer,
+                               BULKSTAT_FG_QUICK, &done);
+               }
+       }
+
+       if (error)
+               return -error;
+
+       if (bulkreq.ocount != NULL) {
+               if (copy_to_user((xfs_ino_t *)bulkreq.lastip, &inlast,
+                                               sizeof(xfs_ino_t)))
+                       return -XFS_ERROR(EFAULT);
+
+               if (copy_to_user((__s32 *)bulkreq.ocount, &count,
+                                               sizeof(count)))
+                       return -XFS_ERROR(EFAULT);
+       }
+
+       return 0;
+}
+
+STATIC int
+xfs_ioc_fsgeometry_v1(
+       xfs_mount_t             *mp,
+       unsigned long           arg)
+{
+       xfs_fsop_geom_v1_t      fsgeo;
+       int                     error;
+
+       error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3);
+       if (error)
+               return -error;
+
+       if (copy_to_user((xfs_fsop_geom_t *)arg, &fsgeo, sizeof(fsgeo)))
+               return -XFS_ERROR(EFAULT);
+       return 0;
+}
+
+STATIC int
+xfs_ioc_fsgeometry(
+       xfs_mount_t             *mp,
+       unsigned long           arg)
+{
+       xfs_fsop_geom_t         fsgeo;
+       int                     error;
+
+       error = xfs_fs_geometry(mp, &fsgeo, 4);
+       if (error)
+               return -error;
+
+       if (copy_to_user((xfs_fsop_geom_t *)arg, &fsgeo, sizeof(fsgeo)))
+               return -XFS_ERROR(EFAULT);
+       return 0;
+}
+
+/*
+ * Linux extended inode flags interface.
+ */
+#define LINUX_XFLAG_SYNC       0x00000008 /* Synchronous updates */
+#define LINUX_XFLAG_IMMUTABLE  0x00000010 /* Immutable file */
+#define LINUX_XFLAG_APPEND     0x00000020 /* writes to file may only append */
+#define LINUX_XFLAG_NODUMP     0x00000040 /* do not dump file */
+#define LINUX_XFLAG_NOATIME    0x00000080 /* do not update atime */
+
+STATIC unsigned int
+xfs_merge_ioc_xflags(
+       unsigned int    flags,
+       unsigned int    start)
+{
+       unsigned int    xflags = start;
+
+       if (flags & LINUX_XFLAG_IMMUTABLE)
+               xflags |= XFS_XFLAG_IMMUTABLE;
+       else
+               xflags &= ~XFS_XFLAG_IMMUTABLE;
+       if (flags & LINUX_XFLAG_APPEND)
+               xflags |= XFS_XFLAG_APPEND;
+       else
+               xflags &= ~XFS_XFLAG_APPEND;
+       if (flags & LINUX_XFLAG_SYNC)
+               xflags |= XFS_XFLAG_SYNC;
+       else
+               xflags &= ~XFS_XFLAG_SYNC;
+       if (flags & LINUX_XFLAG_NOATIME)
+               xflags |= XFS_XFLAG_NOATIME;
+       else
+               xflags &= ~XFS_XFLAG_NOATIME;
+       if (flags & LINUX_XFLAG_NODUMP)
+               xflags |= XFS_XFLAG_NODUMP;
+       else
+               xflags &= ~XFS_XFLAG_NODUMP;
+
+       return xflags;
+}
+
+STATIC unsigned int
+xfs_di2lxflags(
+       __uint16_t      di_flags)
+{
+       unsigned int    flags = 0;
+
+       if (di_flags & XFS_DIFLAG_IMMUTABLE)
+               flags |= LINUX_XFLAG_IMMUTABLE;
+       if (di_flags & XFS_DIFLAG_APPEND)
+               flags |= LINUX_XFLAG_APPEND;
+       if (di_flags & XFS_DIFLAG_SYNC)
+               flags |= LINUX_XFLAG_SYNC;
+       if (di_flags & XFS_DIFLAG_NOATIME)
+               flags |= LINUX_XFLAG_NOATIME;
+       if (di_flags & XFS_DIFLAG_NODUMP)
+               flags |= LINUX_XFLAG_NODUMP;
+       return flags;
+}
+
+STATIC int
+xfs_ioc_xattr(
+       vnode_t                 *vp,
+       xfs_inode_t             *ip,
+       struct file             *filp,
+       unsigned int            cmd,
+       unsigned long           arg)
+{
+       struct fsxattr          fa;
+       vattr_t                 va;
+       int                     error;
+       int                     attr_flags;
+       unsigned int            flags;
+
+       switch (cmd) {
+       case XFS_IOC_FSGETXATTR: {
+               va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS;
+               VOP_GETATTR(vp, &va, 0, NULL, error);
+               if (error)
+                       return -error;
+
+               fa.fsx_xflags   = va.va_xflags;
+               fa.fsx_extsize  = va.va_extsize;
+               fa.fsx_nextents = va.va_nextents;
+
+               if (copy_to_user((struct fsxattr *)arg, &fa, sizeof(fa)))
+                       return -XFS_ERROR(EFAULT);
+               return 0;
+       }
+
+       case XFS_IOC_FSSETXATTR: {
+               if (copy_from_user(&fa, (struct fsxattr *)arg, sizeof(fa)))
+                       return -XFS_ERROR(EFAULT);
+
+               attr_flags = 0;
+               if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
+                       attr_flags |= ATTR_NONBLOCK;
+
+               va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE;
+               va.va_xflags  = fa.fsx_xflags;
+               va.va_extsize = fa.fsx_extsize;
+
+               VOP_SETATTR(vp, &va, attr_flags, NULL, error);
+               if (!error)
+                       vn_revalidate(vp);      /* update Linux inode flags */
+               return -error;
+       }
+
+       case XFS_IOC_FSGETXATTRA: {
+               va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_ANEXTENTS;
+               VOP_GETATTR(vp, &va, 0, NULL, error);
+               if (error)
+                       return -error;
+
+               fa.fsx_xflags   = va.va_xflags;
+               fa.fsx_extsize  = va.va_extsize;
+               fa.fsx_nextents = va.va_anextents;
+
+               if (copy_to_user((struct fsxattr *)arg, &fa, sizeof(fa)))
+                       return -XFS_ERROR(EFAULT);
+               return 0;
+       }
+
+       case XFS_IOC_GETXFLAGS: {
+               flags = xfs_di2lxflags(ip->i_d.di_flags);
+               if (copy_to_user((unsigned int *)arg, &flags, sizeof(flags)))
+                       return -XFS_ERROR(EFAULT);
+               return 0;
+       }
+
+       case XFS_IOC_SETXFLAGS: {
+               if (copy_from_user(&flags, (unsigned int *)arg, sizeof(flags)))
+                       return -XFS_ERROR(EFAULT);
+
+               if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \
+                             LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \
+                             LINUX_XFLAG_SYNC))
+                       return -XFS_ERROR(EOPNOTSUPP);
+
+               attr_flags = 0;
+               if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
+                       attr_flags |= ATTR_NONBLOCK;
+
+               va.va_mask = XFS_AT_XFLAGS;
+               va.va_xflags = xfs_merge_ioc_xflags(flags,
+                               xfs_dic2xflags(&ip->i_d, ARCH_NOCONVERT));
+
+               VOP_SETATTR(vp, &va, attr_flags, NULL, error);
+               if (!error)
+                       vn_revalidate(vp);      /* update Linux inode flags */
+               return -error;
+       }
+
+       case XFS_IOC_GETVERSION: {
+               flags = LINVFS_GET_IP(vp)->i_generation;
+               if (copy_to_user((unsigned int *)arg, &flags, sizeof(flags)))
+                       return -XFS_ERROR(EFAULT);
+               return 0;
+       }
+
+       default:
+               return -ENOTTY;
+       }
+}
+
+STATIC int
+xfs_ioc_getbmap(
+       bhv_desc_t              *bdp,
+       struct file             *filp,
+       int                     ioflags,
+       unsigned int            cmd,
+       unsigned long           arg)
+{
+       struct getbmap          bm;
+       int                     iflags;
+       int                     error;
+
+       if (copy_from_user(&bm, (struct getbmap *)arg, sizeof(bm)))
+               return -XFS_ERROR(EFAULT);
+
+       if (bm.bmv_count < 2)
+               return -XFS_ERROR(EINVAL);
+
+       iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
+       if (ioflags & IO_INVIS)
+               iflags |= BMV_IF_NO_DMAPI_READ;
+
+       error = xfs_getbmap(bdp, &bm, (struct getbmap *)arg+1, iflags);
+       if (error)
+               return -error;
+
+       if (copy_to_user((struct getbmap *)arg, &bm, sizeof(bm)))
+               return -XFS_ERROR(EFAULT);
+       return 0;
+}
+
+STATIC int
+xfs_ioc_getbmapx(
+       bhv_desc_t              *bdp,
+       unsigned long           arg)
+{
+       struct getbmapx         bmx;
+       struct getbmap          bm;
+       int                     iflags;
+       int                     error;
+
+       if (copy_from_user(&bmx, (struct getbmapx *)arg, sizeof(bmx)))
+               return -XFS_ERROR(EFAULT);
+
+       if (bmx.bmv_count < 2)
+               return -XFS_ERROR(EINVAL);
+
+       /*
+        * Map input getbmapx structure to a getbmap
+        * structure for xfs_getbmap.
+        */
+       GETBMAP_CONVERT(bmx, bm);
+
+       iflags = bmx.bmv_iflags;
+
+       if (iflags & (~BMV_IF_VALID))
+               return -XFS_ERROR(EINVAL);
+
+       iflags |= BMV_IF_EXTENDED;
+
+       error = xfs_getbmap(bdp, &bm, (struct getbmapx *)arg+1, iflags);
+       if (error)
+               return -error;
+
+       GETBMAP_CONVERT(bm, bmx);
+
+       if (copy_to_user((struct getbmapx *)arg, &bmx, sizeof(bmx)))
+               return -XFS_ERROR(EFAULT);
+
+       return 0;
+}
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h
new file mode 100644 (file)
index 0000000..f0f5c87
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_IOPS_H__
+#define __XFS_IOPS_H__
+
+extern struct inode_operations linvfs_file_inode_operations;
+extern struct inode_operations linvfs_dir_inode_operations;
+extern struct inode_operations linvfs_symlink_inode_operations;
+
+extern struct file_operations linvfs_file_operations;
+extern struct file_operations linvfs_invis_file_operations;
+extern struct file_operations linvfs_dir_operations;
+
+extern struct address_space_operations linvfs_aops;
+
+extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
+extern void linvfs_unwritten_done(struct buffer_head *, int);
+
+extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *,
+                        int, unsigned int, unsigned long);
+
+#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
new file mode 100644 (file)
index 0000000..ecaa3c0
--- /dev/null
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_LINUX__
+#define __XFS_LINUX__
+
+#include <linux/types.h>
+#include <linux/config.h>
+
+/*
+ * Some types are conditional depending on the target system.
+ * XFS_BIG_BLKNOS needs block layer disk addresses to be 64 bits.
+ * XFS_BIG_INUMS needs the VFS inode number to be 64 bits, as well
+ * as requiring XFS_BIG_BLKNOS to be set.
+ */
+#if defined(CONFIG_LBD) || (BITS_PER_LONG == 64)
+# define XFS_BIG_BLKNOS        1
+# if BITS_PER_LONG == 64
+#  define XFS_BIG_INUMS        1
+# else
+#  define XFS_BIG_INUMS        0
+# endif
+#else
+# define XFS_BIG_BLKNOS        0
+# define XFS_BIG_INUMS 0
+#endif
+
+#include <xfs_types.h>
+#include <xfs_arch.h>
+
+#include <kmem.h>
+#include <mrlock.h>
+#include <spin.h>
+#include <sv.h>
+#include <mutex.h>
+#include <sema.h>
+#include <time.h>
+
+#include <support/qsort.h>
+#include <support/ktrace.h>
+#include <support/debug.h>
+#include <support/move.h>
+#include <support/uuid.h>
+
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/swap.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/major.h>
+#include <linux/pagemap.h>
+#include <linux/vfs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/version.h>
+
+#include <asm/page.h>
+#include <asm/div64.h>
+#include <asm/param.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#include <xfs_behavior.h>
+#include <xfs_vfs.h>
+#include <xfs_cred.h>
+#include <xfs_vnode.h>
+#include <xfs_stats.h>
+#include <xfs_sysctl.h>
+#include <xfs_iops.h>
+#include <xfs_super.h>
+#include <xfs_globals.h>
+#include <xfs_fs_subr.h>
+#include <xfs_lrw.h>
+#include <xfs_buf.h>
+
+/*
+ * Feature macros (disable/enable)
+ */
+#undef  HAVE_REFCACHE  /* reference cache not needed for NFS in 2.6 */
+#define HAVE_SENDFILE  /* sendfile(2) exists in 2.6, but not in 2.4 */
+
+/*
+ * State flag for unwritten extent buffers.
+ *
+ * We need to be able to distinguish between these and delayed
+ * allocate buffers within XFS.  The generic IO path code does
+ * not need to distinguish - we use the BH_Delay flag for both
+ * delalloc and these ondisk-uninitialised buffers.
+ */
+BUFFER_FNS(PrivateStart, unwritten);
+static inline void set_buffer_unwritten_io(struct buffer_head *bh)
+{
+       bh->b_end_io = linvfs_unwritten_done;
+}
+
+#define restricted_chown       xfs_params.restrict_chown.val
+#define irix_sgid_inherit      xfs_params.sgid_inherit.val
+#define irix_symlink_mode      xfs_params.symlink_mode.val
+#define xfs_panic_mask         xfs_params.panic_mask.val
+#define xfs_error_level                xfs_params.error_level.val
+#define xfs_syncd_centisecs    xfs_params.syncd_timer.val
+#define xfs_stats_clear                xfs_params.stats_clear.val
+#define xfs_inherit_sync       xfs_params.inherit_sync.val
+#define xfs_inherit_nodump     xfs_params.inherit_nodump.val
+#define xfs_inherit_noatime    xfs_params.inherit_noatim.val
+#define xfs_buf_timer_centisecs        xfs_params.xfs_buf_timer.val
+#define xfs_buf_age_centisecs  xfs_params.xfs_buf_age.val
+
+#define current_cpu()          smp_processor_id()
+#define current_pid()          (current->pid)
+#define current_fsuid(cred)    (current->fsuid)
+#define current_fsgid(cred)    (current->fsgid)
+
+#define NBPP           PAGE_SIZE
+#define DPPSHFT                (PAGE_SHIFT - 9)
+#define NDPP           (1 << (PAGE_SHIFT - 9))
+#define dtop(DD)       (((DD) + NDPP - 1) >> DPPSHFT)
+#define dtopt(DD)      ((DD) >> DPPSHFT)
+#define dpoff(DD)      ((DD) & (NDPP-1))
+
+#define NBBY           8               /* number of bits per byte */
+#define        NBPC            PAGE_SIZE       /* Number of bytes per click */
+#define        BPCSHIFT        PAGE_SHIFT      /* LOG2(NBPC) if exact */
+
+/*
+ * Size of block device i/o is parameterized here.
+ * Currently the system supports page-sized i/o.
+ */
+#define        BLKDEV_IOSHIFT          BPCSHIFT
+#define        BLKDEV_IOSIZE           (1<<BLKDEV_IOSHIFT)
+/* number of BB's per block device block */
+#define        BLKDEV_BB               BTOBB(BLKDEV_IOSIZE)
+
+/* bytes to clicks */
+#define        btoc(x)         (((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT)
+#define        btoct(x)        ((__psunsigned_t)(x)>>BPCSHIFT)
+#define        btoc64(x)       (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT)
+#define        btoct64(x)      ((__uint64_t)(x)>>BPCSHIFT)
+#define        io_btoc(x)      (((__psunsigned_t)(x)+(IO_NBPC-1))>>IO_BPCSHIFT)
+#define        io_btoct(x)     ((__psunsigned_t)(x)>>IO_BPCSHIFT)
+
+/* off_t bytes to clicks */
+#define offtoc(x)       (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT)
+#define offtoct(x)      ((xfs_off_t)(x)>>BPCSHIFT)
+
+/* clicks to off_t bytes */
+#define        ctooff(x)       ((xfs_off_t)(x)<<BPCSHIFT)
+
+/* clicks to bytes */
+#define        ctob(x)         ((__psunsigned_t)(x)<<BPCSHIFT)
+#define btoct(x)        ((__psunsigned_t)(x)>>BPCSHIFT)
+#define        ctob64(x)       ((__uint64_t)(x)<<BPCSHIFT)
+#define        io_ctob(x)      ((__psunsigned_t)(x)<<IO_BPCSHIFT)
+
+/* bytes to clicks */
+#define btoc(x)         (((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT)
+
+#ifndef CELL_CAPABLE
+#define FSC_NOTIFY_NAME_CHANGED(vp)
+#endif
+
+#ifndef ENOATTR
+#define ENOATTR                ENODATA         /* Attribute not found */
+#endif
+
+/* Note: EWRONGFS never visible outside the kernel */
+#define        EWRONGFS        EINVAL          /* Mount with wrong filesystem type */
+
+/*
+ * XXX EFSCORRUPTED needs a real value in errno.h. asm-i386/errno.h won't
+ *     return codes out of its known range in errno.
+ * XXX Also note: needs to be < 1000 and fairly unique on Linux (mustn't
+ *     conflict with any code we use already or any code a driver may use)
+ * XXX Some options (currently we do #2):
+ *     1/ New error code ["Filesystem is corrupted", _after_ glibc updated]
+ *     2/ 990 ["Unknown error 990"]
+ *     3/ EUCLEAN ["Structure needs cleaning"]
+ *     4/ Convert EFSCORRUPTED to EIO [just prior to return into userspace]
+ */
+#define EFSCORRUPTED    990            /* Filesystem is corrupted */
+
+#define SYNCHRONIZE()  barrier()
+#define __return_address __builtin_return_address(0)
+
+/*
+ * IRIX (BSD) quotactl makes use of separate commands for user/group,
+ * whereas on Linux the syscall encodes this information into the cmd
+ * field (see the QCMD macro in quota.h).  These macros help keep the
+ * code portable - they are not visible from the syscall interface.
+ */
+#define Q_XSETGQLIM    XQM_CMD(0x8)    /* set groups disk limits */
+#define Q_XGETGQUOTA   XQM_CMD(0x9)    /* get groups disk limits */
+
+/* IRIX uses a dynamic sizing algorithm (ndquot = 200 + numprocs*2) */
+/* we may well need to fine-tune this if it ever becomes an issue.  */
+#define DQUOT_MAX_HEURISTIC    1024    /* NR_DQUOTS */
+#define ndquot                 DQUOT_MAX_HEURISTIC
+
+/* IRIX uses the current size of the name cache to guess a good value */
+/* - this isn't the same but is a good enough starting point for now. */
+#define DQUOT_HASH_HEURISTIC   files_stat.nr_files
+
+/* IRIX inodes maintain the project ID also, zero this field on Linux */
+#define DEFAULT_PROJID 0
+#define dfltprid       DEFAULT_PROJID
+
+#define MAXPATHLEN     1024
+
+#define MIN(a,b)       (min(a,b))
+#define MAX(a,b)       (max(a,b))
+#define howmany(x, y)  (((x)+((y)-1))/(y))
+#define roundup(x, y)  ((((x)+((y)-1))/(y))*(y))
+
+#define xfs_stack_trace()      dump_stack()
+
+#define xfs_itruncate_data(ip, off)    \
+       (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
+
+
+/* Move the kernel do_div definition off to one side */
+
+#if defined __i386__
+/* For ia32 we need to pull some tricks to get past various versions
+ * of the compiler which do not like us using do_div in the middle
+ * of large functions.
+ */
+static inline __u32 xfs_do_div(void *a, __u32 b, int n)
+{
+       __u32   mod;
+
+       switch (n) {
+               case 4:
+                       mod = *(__u32 *)a % b;
+                       *(__u32 *)a = *(__u32 *)a / b;
+                       return mod;
+               case 8:
+                       {
+                       unsigned long __upper, __low, __high, __mod;
+                       __u64   c = *(__u64 *)a;
+                       __upper = __high = c >> 32;
+                       __low = c;
+                       if (__high) {
+                               __upper = __high % (b);
+                               __high = __high / (b);
+                       }
+                       asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper));
+                       asm("":"=A" (c):"a" (__low),"d" (__high));
+                       *(__u64 *)a = c;
+                       return __mod;
+                       }
+       }
+
+       /* NOTREACHED */
+       return 0;
+}
+
+/* Side effect free 64 bit mod operation */
+static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
+{
+       switch (n) {
+               case 4:
+                       return *(__u32 *)a % b;
+               case 8:
+                       {
+                       unsigned long __upper, __low, __high, __mod;
+                       __u64   c = *(__u64 *)a;
+                       __upper = __high = c >> 32;
+                       __low = c;
+                       if (__high) {
+                               __upper = __high % (b);
+                               __high = __high / (b);
+                       }
+                       asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (b), "0" (__low), "1" (__upper));
+                       asm("":"=A" (c):"a" (__low),"d" (__high));
+                       return __mod;
+                       }
+       }
+
+       /* NOTREACHED */
+       return 0;
+}
+#else
+static inline __u32 xfs_do_div(void *a, __u32 b, int n)
+{
+       __u32   mod;
+
+       switch (n) {
+               case 4:
+                       mod = *(__u32 *)a % b;
+                       *(__u32 *)a = *(__u32 *)a / b;
+                       return mod;
+               case 8:
+                       mod = do_div(*(__u64 *)a, b);
+                       return mod;
+       }
+
+       /* NOTREACHED */
+       return 0;
+}
+
+/* Side effect free 64 bit mod operation */
+static inline __u32 xfs_do_mod(void *a, __u32 b, int n)
+{
+       switch (n) {
+               case 4:
+                       return *(__u32 *)a % b;
+               case 8:
+                       {
+                       __u64   c = *(__u64 *)a;
+                       return do_div(c, b);
+                       }
+       }
+
+       /* NOTREACHED */
+       return 0;
+}
+#endif
+
+#undef do_div
+#define do_div(a, b)   xfs_do_div(&(a), (b), sizeof(a))
+#define do_mod(a, b)   xfs_do_mod(&(a), (b), sizeof(a))
+
+static inline __uint64_t roundup_64(__uint64_t x, __uint32_t y)
+{
+       x += y - 1;
+       do_div(x, y);
+       return(x * y);
+}
+
+#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
new file mode 100644 (file)
index 0000000..c45e963
--- /dev/null
@@ -0,0 +1,1028 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+/*
+ *  fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff)
+ *
+ */
+
+#include "xfs.h"
+
+#include "xfs_fs.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_inode_item.h"
+#include "xfs_buf_item.h"
+#include "xfs_utils.h"
+#include "xfs_iomap.h"
+
+#include <linux/capability.h>
+
+
+#if defined(XFS_RW_TRACE)
+void
+xfs_rw_enter_trace(
+       int                     tag,
+       xfs_iocore_t            *io,
+       const struct iovec      *iovp,
+       size_t                  segs,
+       loff_t                  offset,
+       int                     ioflags)
+{
+       xfs_inode_t     *ip = XFS_IO_INODE(io);
+
+       if (ip->i_rwtrace == NULL)
+               return;
+       ktrace_enter(ip->i_rwtrace,
+               (void *)(unsigned long)tag,
+               (void *)ip,
+               (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
+               (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
+               (void *)(__psint_t)iovp,
+               (void *)((unsigned long)segs),
+               (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
+               (void *)((unsigned long)(offset & 0xffffffff)),
+               (void *)((unsigned long)ioflags),
+               (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
+               (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
+               (void *)NULL,
+               (void *)NULL,
+               (void *)NULL,
+               (void *)NULL,
+               (void *)NULL);
+}
+
+void
+xfs_inval_cached_trace(
+       xfs_iocore_t    *io,
+       xfs_off_t       offset,
+       xfs_off_t       len,
+       xfs_off_t       first,
+       xfs_off_t       last)
+{
+       xfs_inode_t     *ip = XFS_IO_INODE(io);
+
+       if (ip->i_rwtrace == NULL)
+               return;
+       ktrace_enter(ip->i_rwtrace,
+               (void *)(__psint_t)XFS_INVAL_CACHED,
+               (void *)ip,
+               (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
+               (void *)((unsigned long)(offset & 0xffffffff)),
+               (void *)((unsigned long)((len >> 32) & 0xffffffff)),
+               (void *)((unsigned long)(len & 0xffffffff)),
+               (void *)((unsigned long)((first >> 32) & 0xffffffff)),
+               (void *)((unsigned long)(first & 0xffffffff)),
+               (void *)((unsigned long)((last >> 32) & 0xffffffff)),
+               (void *)((unsigned long)(last & 0xffffffff)),
+               (void *)NULL,
+               (void *)NULL,
+               (void *)NULL,
+               (void *)NULL,
+               (void *)NULL,
+               (void *)NULL);
+}
+#endif
+
+/*
+ *     xfs_iozero
+ *
+ *     xfs_iozero clears the specified range of buffer supplied,
+ *     and marks all the affected blocks as valid and modified.  If
+ *     an affected block is not allocated, it will be allocated.  If
+ *     an affected block is not completely overwritten, and is not
+ *     valid before the operation, it will be read from disk before
+ *     being partially zeroed.
+ */
+STATIC int
+xfs_iozero(
+       struct inode            *ip,    /* inode                        */
+       loff_t                  pos,    /* offset in file               */
+       size_t                  count,  /* size of data to zero         */
+       loff_t                  end_size)       /* max file size to set */
+{
+       unsigned                bytes;
+       struct page             *page;
+       struct address_space    *mapping;
+       char                    *kaddr;
+       int                     status;
+
+       mapping = ip->i_mapping;
+       do {
+               unsigned long index, offset;
+
+               offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
+               index = pos >> PAGE_CACHE_SHIFT;
+               bytes = PAGE_CACHE_SIZE - offset;
+               if (bytes > count)
+                       bytes = count;
+
+               status = -ENOMEM;
+               page = grab_cache_page(mapping, index);
+               if (!page)
+                       break;
+
+               kaddr = kmap(page);
+               status = mapping->a_ops->prepare_write(NULL, page, offset,
+                                                       offset + bytes);
+               if (status) {
+                       goto unlock;
+               }
+
+               memset((void *) (kaddr + offset), 0, bytes);
+               flush_dcache_page(page);
+               status = mapping->a_ops->commit_write(NULL, page, offset,
+                                                       offset + bytes);
+               if (!status) {
+                       pos += bytes;
+                       count -= bytes;
+                       if (pos > i_size_read(ip))
+                               i_size_write(ip, pos < end_size ? pos : end_size);
+               }
+
+unlock:
+               kunmap(page);
+               unlock_page(page);
+               page_cache_release(page);
+               if (status)
+                       break;
+       } while (count);
+
+       return (-status);
+}
+
+/*
+ * xfs_inval_cached_pages
+ * 
+ * This routine is responsible for keeping direct I/O and buffered I/O
+ * somewhat coherent.  From here we make sure that we're at least
+ * temporarily holding the inode I/O lock exclusively and then call
+ * the page cache to flush and invalidate any cached pages.  If there
+ * are no cached pages this routine will be very quick.
+ */
+void
+xfs_inval_cached_pages(
+       vnode_t         *vp,
+       xfs_iocore_t    *io,
+       xfs_off_t       offset,
+       int             write,
+       int             relock)
+{
+       xfs_mount_t     *mp;
+
+       if (!VN_CACHED(vp)) {
+               return;
+       }
+
+       mp = io->io_mount;
+
+       /*
+        * We need to get the I/O lock exclusively in order
+        * to safely invalidate pages and mappings.
+        */
+       if (relock) {
+               XFS_IUNLOCK(mp, io, XFS_IOLOCK_SHARED);
+               XFS_ILOCK(mp, io, XFS_IOLOCK_EXCL);
+       }
+
+       /* Writing beyond EOF creates a hole that must be zeroed */
+       if (write && (offset > XFS_SIZE(mp, io))) {
+               xfs_fsize_t     isize;
+
+               XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+               isize = XFS_SIZE(mp, io);
+               if (offset > isize) {
+                       xfs_zero_eof(vp, io, offset, isize, offset);
+               }
+               XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+       }
+
+       xfs_inval_cached_trace(io, offset, -1, ctooff(offtoct(offset)), -1);
+       VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED);
+       if (relock) {
+               XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
+       }
+}
+
+ssize_t                        /* bytes read, or (-)  error */
+xfs_read(
+       bhv_desc_t              *bdp,
+       struct kiocb            *iocb,
+       const struct iovec      *iovp,
+       unsigned int            segs,
+       loff_t                  *offset,
+       int                     ioflags,
+       cred_t                  *credp)
+{
+       struct file             *file = iocb->ki_filp;
+       size_t                  size = 0;
+       ssize_t                 ret;
+       xfs_fsize_t             n;
+       xfs_inode_t             *ip;
+       xfs_mount_t             *mp;
+       vnode_t                 *vp;
+       unsigned long           seg;
+
+       ip = XFS_BHVTOI(bdp);
+       vp = BHV_TO_VNODE(bdp);
+       mp = ip->i_mount;
+
+       XFS_STATS_INC(xs_read_calls);
+
+       /* START copy & waste from filemap.c */
+       for (seg = 0; seg < segs; seg++) {
+               const struct iovec *iv = &iovp[seg];
+
+               /*
+                * If any segment has a negative length, or the cumulative
+                * length ever wraps negative then return -EINVAL.
+                */
+               size += iv->iov_len;
+               if (unlikely((ssize_t)(size|iv->iov_len) < 0))
+                       return XFS_ERROR(-EINVAL);
+       }
+       /* END copy & waste from filemap.c */
+
+       if (ioflags & IO_ISDIRECT) {
+               xfs_buftarg_t   *target =
+                       (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
+                               mp->m_rtdev_targp : mp->m_ddev_targp;
+               if ((*offset & target->pbr_smask) ||
+                   (size & target->pbr_smask)) {
+                       if (*offset == ip->i_d.di_size) {
+                               return (0);
+                       }
+                       return -XFS_ERROR(EINVAL);
+               }
+       }
+
+       n = XFS_MAXIOFFSET(mp) - *offset;
+       if ((n <= 0) || (size == 0))
+               return 0;
+
+       if (n < size)
+               size = n;
+
+       if (XFS_FORCED_SHUTDOWN(mp)) {
+               return -EIO;
+       }
+
+       /* OK so we are holding the I/O lock for the duration
+        * of the submission, then what happens if the I/O
+        * does not really happen here, but is scheduled 
+        * later?
+        */
+       xfs_ilock(ip, XFS_IOLOCK_SHARED);
+
+       if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
+           !(ioflags & IO_INVIS)) {
+               vrwlock_t locktype = VRWLOCK_READ;
+
+               ret = XFS_SEND_DATA(mp, DM_EVENT_READ,
+                                       BHV_TO_VNODE(bdp), *offset, size,
+                                       FILP_DELAY_FLAG(file), &locktype);
+               if (ret) {
+                       xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+                       return -ret;
+               }
+       }
+
+       xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
+                               iovp, segs, *offset, ioflags);
+       ret = __generic_file_aio_read(iocb, iovp, segs, offset);
+       xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+
+       if (ret > 0)
+               XFS_STATS_ADD(xs_read_bytes, ret);
+
+       if (likely(!(ioflags & IO_INVIS)))
+               xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
+
+       return ret;
+}
+
+ssize_t
+xfs_sendfile(
+       bhv_desc_t              *bdp,
+       struct file             *filp,
+       loff_t                  *offset,
+       int                     ioflags,
+       size_t                  count,
+       read_actor_t            actor,
+       void                    *target,
+       cred_t                  *credp)
+{
+       ssize_t                 ret;
+       xfs_fsize_t             n;
+       xfs_inode_t             *ip;
+       xfs_mount_t             *mp;
+       vnode_t                 *vp;
+
+       ip = XFS_BHVTOI(bdp);
+       vp = BHV_TO_VNODE(bdp);
+       mp = ip->i_mount;
+
+       XFS_STATS_INC(xs_read_calls);
+
+       n = XFS_MAXIOFFSET(mp) - *offset;
+       if ((n <= 0) || (count == 0))
+               return 0;
+
+       if (n < count)
+               count = n;
+
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+               return -EIO;
+
+       xfs_ilock(ip, XFS_IOLOCK_SHARED);
+
+       if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
+           (!(ioflags & IO_INVIS))) {
+               vrwlock_t locktype = VRWLOCK_READ;
+               int error;
+
+               error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,
+                                     FILP_DELAY_FLAG(filp), &locktype);
+               if (error) {
+                       xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+                       return -error;
+               }
+       }
+       xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
+                               target, count, *offset, ioflags);
+       ret = generic_file_sendfile(filp, offset, count, actor, target);
+       xfs_iunlock(ip, XFS_IOLOCK_SHARED);
+
+       XFS_STATS_ADD(xs_read_bytes, ret);
+       xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
+       return ret;
+}
+
+/*
+ * This routine is called to handle zeroing any space in the last
+ * block of the file that is beyond the EOF.  We do this since the
+ * size is being increased without writing anything to that block
+ * and we don't want anyone to read the garbage on the disk.
+ */
+STATIC int                             /* error (positive) */
+xfs_zero_last_block(
+       struct inode    *ip,
+       xfs_iocore_t    *io,
+       xfs_off_t       offset,
+       xfs_fsize_t     isize,
+       xfs_fsize_t     end_size)
+{
+       xfs_fileoff_t   last_fsb;
+       xfs_mount_t     *mp;
+       int             nimaps;
+       int             zero_offset;
+       int             zero_len;
+       int             isize_fsb_offset;
+       int             error = 0;
+       xfs_bmbt_irec_t imap;
+       loff_t          loff;
+       size_t          lsize;
+
+       ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
+       ASSERT(offset > isize);
+
+       mp = io->io_mount;
+
+       isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize);
+       if (isize_fsb_offset == 0) {
+               /*
+                * There are no extra bytes in the last block on disk to
+                * zero, so return.
+                */
+               return 0;
+       }
+
+       last_fsb = XFS_B_TO_FSBT(mp, isize);
+       nimaps = 1;
+       error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
+                         &nimaps, NULL);
+       if (error) {
+               return error;
+       }
+       ASSERT(nimaps > 0);
+       /*
+        * If the block underlying isize is just a hole, then there
+        * is nothing to zero.
+        */
+       if (imap.br_startblock == HOLESTARTBLOCK) {
+               return 0;
+       }
+       /*
+        * Zero the part of the last block beyond the EOF, and write it
+        * out sync.  We need to drop the ilock while we do this so we
+        * don't deadlock when the buffer cache calls back to us.
+        */
+       XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
+       loff = XFS_FSB_TO_B(mp, last_fsb);
+       lsize = XFS_FSB_TO_B(mp, 1);
+
+       zero_offset = isize_fsb_offset;
+       zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset;
+
+       error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
+
+       XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+       ASSERT(error >= 0);
+       return error;
+}
+
+/*
+ * Zero any on disk space between the current EOF and the new,
+ * larger EOF.  This handles the normal case of zeroing the remainder
+ * of the last block in the file and the unusual case of zeroing blocks
+ * out beyond the size of the file.  This second case only happens
+ * with fixed size extents and when the system crashes before the inode
+ * size was updated but after blocks were allocated.  If fill is set,
+ * then any holes in the range are filled and zeroed.  If not, the holes
+ * are left alone as holes.
+ */
+
+int                                    /* error (positive) */
+xfs_zero_eof(
+       vnode_t         *vp,
+       xfs_iocore_t    *io,
+       xfs_off_t       offset,         /* starting I/O offset */
+       xfs_fsize_t     isize,          /* current inode size */
+       xfs_fsize_t     end_size)       /* terminal inode size */
+{
+       struct inode    *ip = LINVFS_GET_IP(vp);
+       xfs_fileoff_t   start_zero_fsb;
+       xfs_fileoff_t   end_zero_fsb;
+       xfs_fileoff_t   prev_zero_fsb;
+       xfs_fileoff_t   zero_count_fsb;
+       xfs_fileoff_t   last_fsb;
+       xfs_extlen_t    buf_len_fsb;
+       xfs_extlen_t    prev_zero_count;
+       xfs_mount_t     *mp;
+       int             nimaps;
+       int             error = 0;
+       xfs_bmbt_irec_t imap;
+       loff_t          loff;
+       size_t          lsize;
+
+       ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
+       ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
+
+       mp = io->io_mount;
+
+       /*
+        * First handle zeroing the block on which isize resides.
+        * We only zero a part of that block so it is handled specially.
+        */
+       error = xfs_zero_last_block(ip, io, offset, isize, end_size);
+       if (error) {
+               ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
+               ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
+               return error;
+       }
+
+       /*
+        * Calculate the range between the new size and the old
+        * where blocks needing to be zeroed may exist.  To get the
+        * block where the last byte in the file currently resides,
+        * we need to subtract one from the size and truncate back
+        * to a block boundary.  We subtract 1 in case the size is
+        * exactly on a block boundary.
+        */
+       last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
+       start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
+       end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
+       ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
+       if (last_fsb == end_zero_fsb) {
+               /*
+                * The size was only incremented on its last block.
+                * We took care of that above, so just return.
+                */
+               return 0;
+       }
+
+       ASSERT(start_zero_fsb <= end_zero_fsb);
+       prev_zero_fsb = NULLFILEOFF;
+       prev_zero_count = 0;
+       while (start_zero_fsb <= end_zero_fsb) {
+               nimaps = 1;
+               zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
+               error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
+                                 0, NULL, 0, &imap, &nimaps, NULL);
+               if (error) {
+                       ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
+                       ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
+                       return error;
+               }
+               ASSERT(nimaps > 0);
+
+               if (imap.br_state == XFS_EXT_UNWRITTEN ||
+                   imap.br_startblock == HOLESTARTBLOCK) {
+                       /*
+                        * This loop handles initializing pages that were
+                        * partially initialized by the code below this
+                        * loop. It basically zeroes the part of the page
+                        * that sits on a hole and sets the page as P_HOLE
+                        * and calls remapf if it is a mapped file.
+                        */
+                       prev_zero_fsb = NULLFILEOFF;
+                       prev_zero_count = 0;
+                       start_zero_fsb = imap.br_startoff +
+                                        imap.br_blockcount;
+                       ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
+                       continue;
+               }
+
+               /*
+                * There are blocks in the range requested.
+                * Zero them a single write at a time.  We actually
+                * don't zero the entire range returned if it is
+                * too big and simply loop around to get the rest.
+                * That is not the most efficient thing to do, but it
+                * is simple and this path should not be exercised often.
+                */
+               buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
+                                             mp->m_writeio_blocks << 8);
+               /*
+                * Drop the inode lock while we're doing the I/O.
+                * We'll still have the iolock to protect us.
+                */
+               XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+
+               loff = XFS_FSB_TO_B(mp, start_zero_fsb);
+               lsize = XFS_FSB_TO_B(mp, buf_len_fsb);
+
+               error = xfs_iozero(ip, loff, lsize, end_size);
+
+               if (error) {
+                       goto out_lock;
+               }
+
+               prev_zero_fsb = start_zero_fsb;
+               prev_zero_count = buf_len_fsb;
+               start_zero_fsb = imap.br_startoff + buf_len_fsb;
+               ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
+
+               XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+       }
+
+       return 0;
+
+out_lock:
+
+       XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+       ASSERT(error >= 0);
+       return error;
+}
+
+ssize_t                                /* bytes written, or (-) error */
+xfs_write(
+       bhv_desc_t              *bdp,
+       struct kiocb            *iocb,
+       const struct iovec      *iovp,
+       unsigned int            segs,
+       loff_t                  *offset,
+       int                     ioflags,
+       cred_t                  *credp)
+{
+       struct file             *file = iocb->ki_filp;
+       size_t                  size = 0;
+       xfs_inode_t             *xip;
+       xfs_mount_t             *mp;
+       ssize_t                 ret;
+       int                     error = 0;
+       xfs_fsize_t             isize, new_size;
+       xfs_fsize_t             n, limit;
+       xfs_iocore_t            *io;
+       vnode_t                 *vp;
+       unsigned long           seg;
+       int                     iolock;
+       int                     eventsent = 0;
+       vrwlock_t               locktype;
+
+       XFS_STATS_INC(xs_write_calls);
+
+       vp = BHV_TO_VNODE(bdp);
+       xip = XFS_BHVTOI(bdp);
+
+       /* START copy & waste from filemap.c */
+       for (seg = 0; seg < segs; seg++) {
+               const struct iovec *iv = &iovp[seg];
+
+               /*
+                * If any segment has a negative length, or the cumulative
+                * length ever wraps negative then return -EINVAL.
+                */
+               size += iv->iov_len;
+               if (unlikely((ssize_t)(size|iv->iov_len) < 0))
+                       return XFS_ERROR(-EINVAL);
+       }
+       /* END copy & waste from filemap.c */
+
+       if (size == 0)
+               return 0;
+
+       io = &xip->i_iocore;
+       mp = io->io_mount;
+
+       if (XFS_FORCED_SHUTDOWN(mp)) {
+               return -EIO;
+       }
+
+       if (ioflags & IO_ISDIRECT) {
+               xfs_buftarg_t   *target =
+                       (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
+                               mp->m_rtdev_targp : mp->m_ddev_targp;
+
+               if ((*offset & target->pbr_smask) ||
+                   (size & target->pbr_smask)) {
+                       return XFS_ERROR(-EINVAL);
+               }
+               iolock = XFS_IOLOCK_SHARED;
+               locktype = VRWLOCK_WRITE_DIRECT;
+       } else {
+               iolock = XFS_IOLOCK_EXCL;
+               locktype = VRWLOCK_WRITE;
+       }
+
+       xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
+
+       isize = xip->i_d.di_size;
+       limit = XFS_MAXIOFFSET(mp);
+
+       if (file->f_flags & O_APPEND)
+               *offset = isize;
+
+start:
+       n = limit - *offset;
+       if (n <= 0) {
+               xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
+               return -EFBIG;
+       }
+
+       if (n < size)
+               size = n;
+
+       new_size = *offset + size;
+       if (new_size > isize) {
+               io->io_new_size = new_size;
+       }
+
+       if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
+           !(ioflags & IO_INVIS) && !eventsent)) {
+               loff_t          savedsize = *offset;
+               int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
+
+               xfs_iunlock(xip, XFS_ILOCK_EXCL);
+               error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
+                                     *offset, size,
+                                     dmflags, &locktype);
+               if (error) {
+                       xfs_iunlock(xip, iolock);
+                       return -error;
+               }
+               xfs_ilock(xip, XFS_ILOCK_EXCL);
+               eventsent = 1;
+
+               /*
+                * The iolock was dropped and reaquired in XFS_SEND_DATA
+                * so we have to recheck the size when appending.
+                * We will only "goto start;" once, since having sent the
+                * event prevents another call to XFS_SEND_DATA, which is
+                * what allows the size to change in the first place.
+                */
+               if ((file->f_flags & O_APPEND) &&
+                   savedsize != xip->i_d.di_size) {
+                       *offset = isize = xip->i_d.di_size;
+                       goto start;
+               }
+       }
+
+       /*
+        * On Linux, generic_file_write updates the times even if
+        * no data is copied in so long as the write had a size.
+        *
+        * We must update xfs' times since revalidate will overcopy xfs.
+        */
+       if (size && !(ioflags & IO_INVIS))
+               xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+
+       /*
+        * If the offset is beyond the size of the file, we have a couple
+        * of things to do. First, if there is already space allocated
+        * we need to either create holes or zero the disk or ...
+        *
+        * If there is a page where the previous size lands, we need
+        * to zero it out up to the new size.
+        */
+
+       if (!(ioflags & IO_ISDIRECT) && (*offset > isize && isize)) {
+               error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, *offset,
+                       isize, *offset + size);
+               if (error) {
+                       xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
+                       return(-error);
+               }
+       }
+       xfs_iunlock(xip, XFS_ILOCK_EXCL);
+
+       /*
+        * If we're writing the file then make sure to clear the
+        * setuid and setgid bits if the process is not being run
+        * by root.  This keeps people from modifying setuid and
+        * setgid binaries.
+        */
+
+       if (((xip->i_d.di_mode & S_ISUID) ||
+           ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
+               (S_ISGID | S_IXGRP))) &&
+            !capable(CAP_FSETID)) {
+               error = xfs_write_clear_setuid(xip);
+               if (error) {
+                       xfs_iunlock(xip, iolock);
+                       return -error;
+               }
+       }
+
+retry:
+       if (ioflags & IO_ISDIRECT) {
+               xfs_inval_cached_pages(vp, io, *offset, 1, 1);
+               xfs_rw_enter_trace(XFS_DIOWR_ENTER,
+                               io, iovp, segs, *offset, ioflags);
+       } else {
+               xfs_rw_enter_trace(XFS_WRITE_ENTER,
+                               io, iovp, segs, *offset, ioflags);
+       }
+       ret = generic_file_aio_write_nolock(iocb, iovp, segs, offset);
+
+       if ((ret == -ENOSPC) &&
+           DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
+           !(ioflags & IO_INVIS)) {
+
+               xfs_rwunlock(bdp, locktype);
+               error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
+                               DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
+                               0, 0, 0); /* Delay flag intentionally  unused */
+               if (error)
+                       return -error;
+               xfs_rwlock(bdp, locktype);
+               *offset = xip->i_d.di_size;
+               goto retry;
+       }
+
+       if (*offset > xip->i_d.di_size) {
+               xfs_ilock(xip, XFS_ILOCK_EXCL);
+               if (*offset > xip->i_d.di_size) {
+                       struct inode    *inode = LINVFS_GET_IP(vp);
+
+                       xip->i_d.di_size = *offset;
+                       i_size_write(inode, *offset);
+                       xip->i_update_core = 1;
+                       xip->i_update_size = 1;
+               }
+               xfs_iunlock(xip, XFS_ILOCK_EXCL);
+       }
+
+       if (ret <= 0) {
+               xfs_rwunlock(bdp, locktype);
+               return ret;
+       }
+
+       XFS_STATS_ADD(xs_write_bytes, ret);
+
+       /* Handle various SYNC-type writes */
+       if ((file->f_flags & O_SYNC) || IS_SYNC(file->f_dentry->d_inode)) {
+
+               /*
+                * If we're treating this as O_DSYNC and we have not updated the
+                * size, force the log.
+                */
+
+               if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC)
+                       && !(xip->i_update_size)) {
+                       /*
+                        * If an allocation transaction occurred
+                        * without extending the size, then we have to force
+                        * the log up the proper point to ensure that the
+                        * allocation is permanent.  We can't count on
+                        * the fact that buffered writes lock out direct I/O
+                        * writes - the direct I/O write could have extended
+                        * the size nontransactionally, then finished before
+                        * we started.  xfs_write_file will think that the file
+                        * didn't grow but the update isn't safe unless the
+                        * size change is logged.
+                        *
+                        * Force the log if we've committed a transaction
+                        * against the inode or if someone else has and
+                        * the commit record hasn't gone to disk (e.g.
+                        * the inode is pinned).  This guarantees that
+                        * all changes affecting the inode are permanent
+                        * when we return.
+                        */
+
+                       xfs_inode_log_item_t *iip;
+                       xfs_lsn_t lsn;
+
+                       iip = xip->i_itemp;
+                       if (iip && iip->ili_last_lsn) {
+                               lsn = iip->ili_last_lsn;
+                               xfs_log_force(mp, lsn,
+                                               XFS_LOG_FORCE | XFS_LOG_SYNC);
+                       } else if (xfs_ipincount(xip) > 0) {
+                               xfs_log_force(mp, (xfs_lsn_t)0,
+                                               XFS_LOG_FORCE | XFS_LOG_SYNC);
+                       }
+
+               } else {
+                       xfs_trans_t     *tp;
+
+                       /*
+                        * O_SYNC or O_DSYNC _with_ a size update are handled
+                        * the same way.
+                        *
+                        * If the write was synchronous then we need to make
+                        * sure that the inode modification time is permanent.
+                        * We'll have updated the timestamp above, so here
+                        * we use a synchronous transaction to log the inode.
+                        * It's not fast, but it's necessary.
+                        *
+                        * If this a dsync write and the size got changed
+                        * non-transactionally, then we need to ensure that
+                        * the size change gets logged in a synchronous
+                        * transaction.
+                        */
+
+                       tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
+                       if ((error = xfs_trans_reserve(tp, 0,
+                                                     XFS_SWRITE_LOG_RES(mp),
+                                                     0, 0, 0))) {
+                               /* Transaction reserve failed */
+                               xfs_trans_cancel(tp, 0);
+                       } else {
+                               /* Transaction reserve successful */
+                               xfs_ilock(xip, XFS_ILOCK_EXCL);
+                               xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
+                               xfs_trans_ihold(tp, xip);
+                               xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
+                               xfs_trans_set_sync(tp);
+                               error = xfs_trans_commit(tp, 0, NULL);
+                               xfs_iunlock(xip, XFS_ILOCK_EXCL);
+                       }
+               }
+       } /* (ioflags & O_SYNC) */
+
+       xfs_rwunlock(bdp, locktype);
+       return(ret);
+}
+
+/*
+ * All xfs metadata buffers except log state machine buffers
+ * get this attached as their b_bdstrat callback function.
+ * This is so that we can catch a buffer
+ * after prematurely unpinning it to forcibly shutdown the filesystem.
+ */
+int
+xfs_bdstrat_cb(struct xfs_buf *bp)
+{
+       xfs_mount_t     *mp;
+
+       mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
+       if (!XFS_FORCED_SHUTDOWN(mp)) {
+               pagebuf_iorequest(bp);
+               return 0;
+       } else {
+               xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
+               /*
+                * Metadata write that didn't get logged but
+                * written delayed anyway. These aren't associated
+                * with a transaction, and can be ignored.
+                */
+               if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
+                   (XFS_BUF_ISREAD(bp)) == 0)
+                       return (xfs_bioerror_relse(bp));
+               else
+                       return (xfs_bioerror(bp));
+       }
+}
+
+
+int
+xfs_bmap(bhv_desc_t    *bdp,
+       xfs_off_t       offset,
+       ssize_t         count,
+       int             flags,
+       xfs_iomap_t     *iomapp,
+       int             *niomaps)
+{
+       xfs_inode_t     *ip = XFS_BHVTOI(bdp);
+       xfs_iocore_t    *io = &ip->i_iocore;
+
+       ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
+       ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
+              ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
+
+       return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
+}
+
+/*
+ * Wrapper around bdstrat so that we can stop data
+ * from going to disk in case we are shutting down the filesystem.
+ * Typically user data goes thru this path; one of the exceptions
+ * is the superblock.
+ */
+int
+xfsbdstrat(
+       struct xfs_mount        *mp,
+       struct xfs_buf          *bp)
+{
+       ASSERT(mp);
+       if (!XFS_FORCED_SHUTDOWN(mp)) {
+               /* Grio redirection would go here
+                * if (XFS_BUF_IS_GRIO(bp)) {
+                */
+
+               pagebuf_iorequest(bp);
+               return 0;
+       }
+
+       xfs_buftrace("XFSBDSTRAT IOERROR", bp);
+       return (xfs_bioerror_relse(bp));
+}
+
+/*
+ * If the underlying (data/log/rt) device is readonly, there are some
+ * operations that cannot proceed.
+ */
+int
+xfs_dev_is_read_only(
+       xfs_mount_t             *mp,
+       char                    *message)
+{
+       if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
+           xfs_readonly_buftarg(mp->m_logdev_targp) ||
+           (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
+               cmn_err(CE_NOTE,
+                       "XFS: %s required on read-only device.", message);
+               cmn_err(CE_NOTE,
+                       "XFS: write access unavailable, cannot proceed.");
+               return EROFS;
+       }
+       return 0;
+}
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
new file mode 100644 (file)
index 0000000..faf0afc
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_LRW_H__
+#define __XFS_LRW_H__
+
+struct vnode;
+struct bhv_desc;
+struct xfs_mount;
+struct xfs_iocore;
+struct xfs_inode;
+struct xfs_bmbt_irec;
+struct xfs_buf;
+struct xfs_iomap;
+
+#if defined(XFS_RW_TRACE)
+/*
+ * Defines for the trace mechanisms in xfs_lrw.c.
+ */
+#define        XFS_RW_KTRACE_SIZE      128
+
+#define        XFS_READ_ENTER          1
+#define        XFS_WRITE_ENTER         2
+#define XFS_IOMAP_READ_ENTER   3
+#define        XFS_IOMAP_WRITE_ENTER   4
+#define        XFS_IOMAP_READ_MAP      5
+#define        XFS_IOMAP_WRITE_MAP     6
+#define        XFS_IOMAP_WRITE_NOSPACE 7
+#define        XFS_ITRUNC_START        8
+#define        XFS_ITRUNC_FINISH1      9
+#define        XFS_ITRUNC_FINISH2      10
+#define        XFS_CTRUNC1             11
+#define        XFS_CTRUNC2             12
+#define        XFS_CTRUNC3             13
+#define        XFS_CTRUNC4             14
+#define        XFS_CTRUNC5             15
+#define        XFS_CTRUNC6             16
+#define        XFS_BUNMAPI             17
+#define        XFS_INVAL_CACHED        18
+#define        XFS_DIORD_ENTER         19
+#define        XFS_DIOWR_ENTER         20
+#define        XFS_SENDFILE_ENTER      21
+#define        XFS_WRITEPAGE_ENTER     22
+#define        XFS_RELEASEPAGE_ENTER   23
+#define        XFS_IOMAP_ALLOC_ENTER   24
+#define        XFS_IOMAP_ALLOC_MAP     25
+#define        XFS_IOMAP_UNWRITTEN     26
+extern void xfs_rw_enter_trace(int, struct xfs_iocore *,
+                       const struct iovec *, size_t, loff_t, int);
+extern void xfs_inval_cached_trace(struct xfs_iocore *,
+                       xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t);
+#else
+#define xfs_rw_enter_trace(tag, io, iovec, segs, offset, ioflags)
+#define xfs_inval_cached_trace(io, offset, len, first, last)
+#endif
+
+/*
+ * Maximum count of bmaps used by read and write paths.
+ */
+#define        XFS_MAX_RW_NBMAPS       4
+
+extern int xfs_bmap(struct bhv_desc *, xfs_off_t, ssize_t, int,
+                       struct xfs_iomap *, int *);
+extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
+extern int xfs_bdstrat_cb(struct xfs_buf *);
+
+extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t,
+                               xfs_fsize_t, xfs_fsize_t);
+extern void xfs_inval_cached_pages(struct vnode        *, struct xfs_iocore *,
+                               xfs_off_t, int, int);
+extern ssize_t xfs_read(struct bhv_desc *, struct kiocb *,
+                               const struct iovec *, unsigned int,
+                               loff_t *, int, struct cred *);
+extern ssize_t xfs_write(struct bhv_desc *, struct kiocb *,
+                               const struct iovec *, unsigned int,
+                               loff_t *, int, struct cred *);
+extern ssize_t xfs_sendfile(struct bhv_desc *, struct file *,
+                               loff_t *, int, size_t, read_actor_t,
+                               void *, struct cred *);
+
+extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
+
+#define XFS_FSB_TO_DB_IO(io,fsb) \
+               (((io)->io_flags & XFS_IOCORE_RT) ? \
+                XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \
+                XFS_FSB_TO_DADDR((io)->io_mount, (fsb)))
+
+#endif /* __XFS_LRW_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_stats.c b/fs/xfs/linux-2.6/xfs_stats.c
new file mode 100644 (file)
index 0000000..86b633e
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include <linux/proc_fs.h>
+
+DEFINE_PER_CPU(struct xfsstats, xfsstats);
+
+STATIC int
+xfs_read_xfsstats(
+       char            *buffer,
+       char            **start,
+       off_t           offset,
+       int             count,
+       int             *eof,
+       void            *data)
+{
+       int             c, i, j, len, val;
+       __uint64_t      xs_xstrat_bytes = 0;
+       __uint64_t      xs_write_bytes = 0;
+       __uint64_t      xs_read_bytes = 0;
+
+       static struct xstats_entry {
+               char    *desc;
+               int     endpoint;
+       } xstats[] = {
+               { "extent_alloc",       XFSSTAT_END_EXTENT_ALLOC        },
+               { "abt",                XFSSTAT_END_ALLOC_BTREE         },
+               { "blk_map",            XFSSTAT_END_BLOCK_MAPPING       },
+               { "bmbt",               XFSSTAT_END_BLOCK_MAP_BTREE     },
+               { "dir",                XFSSTAT_END_DIRECTORY_OPS       },
+               { "trans",              XFSSTAT_END_TRANSACTIONS        },
+               { "ig",                 XFSSTAT_END_INODE_OPS           },
+               { "log",                XFSSTAT_END_LOG_OPS             },
+               { "push_ail",           XFSSTAT_END_TAIL_PUSHING        },
+               { "xstrat",             XFSSTAT_END_WRITE_CONVERT       },
+               { "rw",                 XFSSTAT_END_READ_WRITE_OPS      },
+               { "attr",               XFSSTAT_END_ATTRIBUTE_OPS       },
+               { "icluster",           XFSSTAT_END_INODE_CLUSTER       },
+               { "vnodes",             XFSSTAT_END_VNODE_OPS           },
+               { "buf",                XFSSTAT_END_BUF                 },
+       };
+
+       /* Loop over all stats groups */
+       for (i=j=len = 0; i < sizeof(xstats)/sizeof(struct xstats_entry); i++) {
+               len += sprintf(buffer + len, xstats[i].desc);
+               /* inner loop does each group */
+               while (j < xstats[i].endpoint) {
+                       val = 0;
+                       /* sum over all cpus */
+                       for (c = 0; c < NR_CPUS; c++) {
+                               if (!cpu_possible(c)) continue;
+                               val += *(((__u32*)&per_cpu(xfsstats, c) + j));
+                       }
+                       len += sprintf(buffer + len, " %u", val);
+                       j++;
+               }
+               buffer[len++] = '\n';
+       }
+       /* extra precision counters */
+       for (i = 0; i < NR_CPUS; i++) {
+               if (!cpu_possible(i)) continue;
+               xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
+               xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
+               xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
+       }
+
+       len += sprintf(buffer + len, "xpc %Lu %Lu %Lu\n",
+                       xs_xstrat_bytes, xs_write_bytes, xs_read_bytes);
+       len += sprintf(buffer + len, "debug %u\n",
+#if defined(DEBUG)
+               1);
+#else
+               0);
+#endif
+
+       if (offset >= len) {
+               *start = buffer;
+               *eof = 1;
+               return 0;
+       }
+       *start = buffer + offset;
+       if ((len -= offset) > count)
+               return count;
+       *eof = 1;
+
+       return len;
+}
+
+void
+xfs_init_procfs(void)
+{
+       if (!proc_mkdir("fs/xfs", 0))
+               return;
+       create_proc_read_entry("fs/xfs/stat", 0, 0, xfs_read_xfsstats, NULL);
+}
+
+void
+xfs_cleanup_procfs(void)
+{
+       remove_proc_entry("fs/xfs/stat", NULL);
+       remove_proc_entry("fs/xfs", NULL);
+}
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
new file mode 100644 (file)
index 0000000..e7825df
--- /dev/null
@@ -0,0 +1,873 @@
+/*
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_clnt.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_quota.h"
+#include "xfs_mount.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_ialloc_btree.h"
+#include "xfs_btree.h"
+#include "xfs_ialloc.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dir_sf.h"
+#include "xfs_dir2_sf.h"
+#include "xfs_dinode.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bit.h"
+#include "xfs_rtalloc.h"
+#include "xfs_error.h"
+#include "xfs_itable.h"
+#include "xfs_rw.h"
+#include "xfs_acl.h"
+#include "xfs_cap.h"
+#include "xfs_mac.h"
+#include "xfs_attr.h"
+#include "xfs_buf_item.h"
+#include "xfs_utils.h"
+#include "xfs_version.h"
+
+#include <linux/namei.h>
+#include <linux/init.h>
+#include <linux/mount.h>
+#include <linux/suspend.h>
+#include <linux/writeback.h>
+
+STATIC struct quotactl_ops linvfs_qops;
+STATIC struct super_operations linvfs_sops;
+STATIC struct export_operations linvfs_export_ops;
+STATIC kmem_cache_t * linvfs_inode_cachep;
+
+STATIC struct xfs_mount_args *
+xfs_args_allocate(
+       struct super_block      *sb)
+{
+       struct xfs_mount_args   *args;
+
+       args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
+       args->logbufs = args->logbufsize = -1;
+       strncpy(args->fsname, sb->s_id, MAXNAMELEN);
+
+       /* Copy the already-parsed mount(2) flags we're interested in */
+       if (sb->s_flags & MS_NOATIME)
+               args->flags |= XFSMNT_NOATIME;
+
+       /* Default to 32 bit inodes on Linux all the time */
+       args->flags |= XFSMNT_32BITINODES;
+
+       return args;
+}
+
+__uint64_t
+xfs_max_file_offset(
+       unsigned int            blockshift)
+{
+       unsigned int            pagefactor = 1;
+       unsigned int            bitshift = BITS_PER_LONG - 1;
+
+       /* Figure out maximum filesize, on Linux this can depend on
+        * the filesystem blocksize (on 32 bit platforms).
+        * __block_prepare_write does this in an [unsigned] long...
+        *      page->index << (PAGE_CACHE_SHIFT - bbits)
+        * So, for page sized blocks (4K on 32 bit platforms),
+        * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
+        *      (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+        * but for smaller blocksizes it is less (bbits = log2 bsize).
+        * Note1: get_block_t takes a long (implicit cast from above)
+        * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
+        * can optionally convert the [unsigned] long from above into
+        * an [unsigned] long long.
+        */
+
+#if BITS_PER_LONG == 32
+# if defined(CONFIG_LBD)
+       ASSERT(sizeof(sector_t) == 8);
+       pagefactor = PAGE_CACHE_SIZE;
+       bitshift = BITS_PER_LONG;
+# else
+       pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
+# endif
+#endif
+
+       return (((__uint64_t)pagefactor) << bitshift) - 1;
+}
+
+STATIC __inline__ void
+xfs_set_inodeops(
+       struct inode            *inode)
+{
+       vnode_t                 *vp = LINVFS_GET_VP(inode);
+
+       if (vp->v_type == VNON) {
+               make_bad_inode(inode);
+       } else if (S_ISREG(inode->i_mode)) {
+               inode->i_op = &linvfs_file_inode_operations;
+               inode->i_fop = &linvfs_file_operations;
+               inode->i_mapping->a_ops = &linvfs_aops;
+       } else if (S_ISDIR(inode->i_mode)) {
+               inode->i_op = &linvfs_dir_inode_operations;
+               inode->i_fop = &linvfs_dir_operations;
+       } else if (S_ISLNK(inode->i_mode)) {
+               inode->i_op = &linvfs_symlink_inode_operations;
+               if (inode->i_blocks)
+                       inode->i_mapping->a_ops = &linvfs_aops;
+       } else {
+               inode->i_op = &linvfs_file_inode_operations;
+               init_special_inode(inode, inode->i_mode, inode->i_rdev);
+       }
+}
+
+STATIC __inline__ void
+xfs_revalidate_inode(
+       xfs_mount_t             *mp,
+       vnode_t                 *vp,
+       xfs_inode_t             *ip)
+{
+       struct inode            *inode = LINVFS_GET_IP(vp);
+
+       inode->i_mode   = (ip->i_d.di_mode & MODEMASK) | VTTOIF(vp->v_type);
+       inode->i_nlink  = ip->i_d.di_nlink;
+       inode->i_uid    = ip->i_d.di_uid;
+       inode->i_gid    = ip->i_d.di_gid;
+       if (((1 << vp->v_type) & ((1<<VBLK) | (1<<VCHR))) == 0) {
+               inode->i_rdev = 0;
+       } else {
+               xfs_dev_t dev = ip->i_df.if_u2.if_rdev;
+               inode->i_rdev = MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev));
+       }
+       inode->i_blksize = PAGE_CACHE_SIZE;
+       inode->i_generation = ip->i_d.di_gen;
+       i_size_write(inode, ip->i_d.di_size);
+       inode->i_blocks =
+               XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
+       inode->i_atime.tv_sec   = ip->i_d.di_atime.t_sec;
+       inode->i_atime.tv_nsec  = ip->i_d.di_atime.t_nsec;
+       inode->i_mtime.tv_sec   = ip->i_d.di_mtime.t_sec;
+       inode->i_mtime.tv_nsec  = ip->i_d.di_mtime.t_nsec;
+       inode->i_ctime.tv_sec   = ip->i_d.di_ctime.t_sec;
+       inode->i_ctime.tv_nsec  = ip->i_d.di_ctime.t_nsec;
+       if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
+               inode->i_flags |= S_IMMUTABLE;
+       else
+               inode->i_flags &= ~S_IMMUTABLE;
+       if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
+               inode->i_flags |= S_APPEND;
+       else
+               inode->i_flags &= ~S_APPEND;
+       if (ip->i_d.di_flags & XFS_DIFLAG_SYNC)
+               inode->i_flags |= S_SYNC;
+       else
+               inode->i_flags &= ~S_SYNC;
+       if (ip->i_d.di_flags & XFS_DIFLAG_NOATIME)
+               inode->i_flags |= S_NOATIME;
+       else
+               inode->i_flags &= ~S_NOATIME;
+       vp->v_flag &= ~VMODIFIED;
+}
+
+void
+xfs_initialize_vnode(
+       bhv_desc_t              *bdp,
+       vnode_t                 *vp,
+       bhv_desc_t              *inode_bhv,
+       int                     unlock)
+{
+       xfs_inode_t             *ip = XFS_BHVTOI(inode_bhv);
+       struct inode            *inode = LINVFS_GET_IP(vp);
+
+       if (!inode_bhv->bd_vobj) {
+               vp->v_vfsp = bhvtovfs(bdp);
+               bhv_desc_init(inode_bhv, ip, vp, &xfs_vnodeops);
+               bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
+       }
+
+       vp->v_type = IFTOVT(ip->i_d.di_mode);
+
+       /* Have we been called during the new inode create process,
+        * in which case we are too early to fill in the Linux inode.
+        */
+       if (vp->v_type == VNON)
+               return;
+
+       xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
+
+       /* For new inodes we need to set the ops vectors,
+        * and unlock the inode.
+        */
+       if (unlock && (inode->i_state & I_NEW)) {
+               xfs_set_inodeops(inode);
+               unlock_new_inode(inode);
+       }
+}
+
+void
+xfs_flush_inode(
+       xfs_inode_t     *ip)
+{
+       struct inode    *inode = LINVFS_GET_IP(XFS_ITOV(ip));
+
+       filemap_flush(inode->i_mapping);
+}
+
+void
+xfs_flush_device(
+       xfs_inode_t     *ip)
+{
+       sync_blockdev(XFS_ITOV(ip)->v_vfsp->vfs_super->s_bdev);
+       xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
+}
+
+int
+xfs_blkdev_get(
+       xfs_mount_t             *mp,
+       const char              *name,
+       struct block_device     **bdevp)
+{
+       int                     error = 0;
+
+       *bdevp = open_bdev_excl(name, 0, mp);
+       if (IS_ERR(*bdevp)) {
+               error = PTR_ERR(*bdevp);
+               printk("XFS: Invalid device [%s], error=%d\n", name, error);
+       }
+
+       return -error;
+}
+
+void
+xfs_blkdev_put(
+       struct block_device     *bdev)
+{
+       if (bdev)
+               close_bdev_excl(bdev);
+}
+
+
+STATIC struct inode *
+linvfs_alloc_inode(
+       struct super_block      *sb)
+{
+       vnode_t                 *vp;
+
+       vp = (vnode_t *)kmem_cache_alloc(linvfs_inode_cachep, 
+                kmem_flags_convert(KM_SLEEP));
+       if (!vp)
+               return NULL;
+       return LINVFS_GET_IP(vp);
+}
+
+STATIC void
+linvfs_destroy_inode(
+       struct inode            *inode)
+{
+       kmem_cache_free(linvfs_inode_cachep, LINVFS_GET_VP(inode));
+}
+
+STATIC void
+init_once(
+       void                    *data,
+       kmem_cache_t            *cachep,
+       unsigned long           flags)
+{
+       vnode_t                 *vp = (vnode_t *)data;
+
+       if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
+           SLAB_CTOR_CONSTRUCTOR)
+               inode_init_once(LINVFS_GET_IP(vp));
+}
+
+STATIC int
+init_inodecache( void )
+{
+       linvfs_inode_cachep = kmem_cache_create("linvfs_icache",
+                               sizeof(vnode_t), 0,
+                               SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT,
+                               init_once, NULL);
+
+       if (linvfs_inode_cachep == NULL)
+               return -ENOMEM;
+       return 0;
+}
+
+STATIC void
+destroy_inodecache( void )
+{
+       if (kmem_cache_destroy(linvfs_inode_cachep))
+               printk(KERN_WARNING "%s: cache still in use!\n", __FUNCTION__);
+}
+
+/*
+ * Attempt to flush the inode, this will actually fail
+ * if the inode is pinned, but we dirty the inode again
+ * at the point when it is unpinned after a log write,
+ * since this is when the inode itself becomes flushable. 
+ */
+STATIC void
+linvfs_write_inode(
+       struct inode            *inode,
+       int                     sync)
+{
+       vnode_t                 *vp = LINVFS_GET_VP(inode);
+       int                     error, flags = FLUSH_INODE;
+
+       if (vp) {
+               vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+               if (sync)
+                       flags |= FLUSH_SYNC;
+               VOP_IFLUSH(vp, flags, error);
+       }
+}
+
+STATIC void
+linvfs_clear_inode(
+       struct inode            *inode)
+{
+       vnode_t                 *vp = LINVFS_GET_VP(inode);
+
+       if (vp) {
+               vn_rele(vp);
+               vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+               /*
+                * Do all our cleanup, and remove this vnode.
+                */
+               vn_remove(vp);
+       }
+}
+
+
+#define SYNCD_FLAGS    (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR)
+
+STATIC int
+xfssyncd(
+       void                    *arg)
+{
+       vfs_t                   *vfsp = (vfs_t *) arg;
+       int                     error;
+
+       daemonize("xfssyncd");
+
+       vfsp->vfs_sync_task = current;
+       wmb();
+       wake_up(&vfsp->vfs_wait_sync_task);
+
+       for (;;) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout((xfs_syncd_centisecs * HZ) / 100);
+               /* swsusp */
+               if (current->flags & PF_FREEZE)
+                       refrigerator(PF_FREEZE);
+               if (vfsp->vfs_flag & VFS_UMOUNT)
+                       break;
+               if (vfsp->vfs_flag & VFS_RDONLY)
+                       continue;
+               VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error);
+
+               vfsp->vfs_sync_seq++;
+               wmb();
+               wake_up(&vfsp->vfs_wait_single_sync_task);
+       }
+
+       vfsp->vfs_sync_task = NULL;
+       wmb();
+       wake_up(&vfsp->vfs_wait_sync_task);
+
+       return 0;
+}
+
+STATIC int
+linvfs_start_syncd(
+       vfs_t                   *vfsp)
+{
+       int                     pid;
+
+       pid = kernel_thread(xfssyncd, (void *) vfsp,
+                       CLONE_VM | CLONE_FS | CLONE_FILES);
+       if (pid < 0)
+               return -pid;
+       wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task);
+       return 0;
+}
+
+STATIC void
+linvfs_stop_syncd(
+       vfs_t                   *vfsp)
+{
+       vfsp->vfs_flag |= VFS_UMOUNT;
+       wmb();
+
+       wake_up_process(vfsp->vfs_sync_task);
+       wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task);
+}
+
+STATIC void
+linvfs_put_super(
+       struct super_block      *sb)
+{
+       vfs_t                   *vfsp = LINVFS_GET_VFS(sb);
+       int                     error;
+
+       linvfs_stop_syncd(vfsp);
+       VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error);
+       if (!error)
+               VFS_UNMOUNT(vfsp, 0, NULL, error);
+       if (error) {
+               printk("XFS unmount got error %d\n", error);
+               printk("%s: vfsp/0x%p left dangling!\n", __FUNCTION__, vfsp);
+               return;
+       }
+
+       vfs_deallocate(vfsp);
+}
+
+STATIC void
+linvfs_write_super(
+       struct super_block      *sb)
+{
+       vfs_t                   *vfsp = LINVFS_GET_VFS(sb);
+       int                     error;
+
+       if (sb->s_flags & MS_RDONLY) {
+               sb->s_dirt = 0; /* paranoia */
+               return;
+       }
+       /* Push the log and superblock a little */
+       VFS_SYNC(vfsp, SYNC_FSDATA, NULL, error);
+       sb->s_dirt = 0;
+}
+
+STATIC int
+linvfs_sync_super(
+       struct super_block      *sb,
+       int                     wait)
+{
+       vfs_t           *vfsp = LINVFS_GET_VFS(sb);
+       int             error;
+       int             flags = SYNC_FSDATA;
+
+       if (wait)
+               flags |= SYNC_WAIT;
+
+       VFS_SYNC(vfsp, flags, NULL, error);
+       sb->s_dirt = 0;
+
+       if (unlikely(laptop_mode)) {
+               int     prev_sync_seq = vfsp->vfs_sync_seq;
+               /*
+                * The disk must be active because we're syncing.
+                * We schedule syncd now (now that the disk is
+                * active) instead of later (when it might not be).
+                */
+               wake_up_process(vfsp->vfs_sync_task);
+               /*
+                * We have to wait for the sync iteration to complete.
+                * If we don't, the disk activity caused by the sync
+                * will come after the sync is completed, and that
+                * triggers another sync from laptop mode.
+                */
+               wait_event(vfsp->vfs_wait_single_sync_task,
+                               vfsp->vfs_sync_seq != prev_sync_seq);
+       }
+
+       return -error;
+}
+
+STATIC int
+linvfs_statfs(
+       struct super_block      *sb,
+       struct kstatfs          *statp)
+{
+       vfs_t                   *vfsp = LINVFS_GET_VFS(sb);
+       int                     error;
+
+       VFS_STATVFS(vfsp, statp, NULL, error);
+       return -error;
+}
+
+STATIC int
+linvfs_remount(
+       struct super_block      *sb,
+       int                     *flags,
+       char                    *options)
+{
+       vfs_t                   *vfsp = LINVFS_GET_VFS(sb);
+       struct xfs_mount_args   *args = xfs_args_allocate(sb);
+       int                     error;
+
+       VFS_PARSEARGS(vfsp, options, args, 1, error);
+       if (!error)
+               VFS_MNTUPDATE(vfsp, flags, args, error);
+       kmem_free(args, sizeof(*args));
+       return -error;
+}
+
+STATIC void
+linvfs_freeze_fs(
+       struct super_block      *sb)
+{
+       VFS_FREEZE(LINVFS_GET_VFS(sb));
+}
+
+STATIC struct dentry *
+linvfs_get_parent(
+       struct dentry           *child)
+{
+       int                     error;
+       vnode_t                 *vp, *cvp;
+       struct dentry           *parent;
+       struct inode            *ip = NULL;
+       struct dentry           dotdot;
+
+       dotdot.d_name.name = "..";
+       dotdot.d_name.len = 2;
+       dotdot.d_inode = 0;
+
+       cvp = NULL;
+       vp = LINVFS_GET_VP(child->d_inode);
+       VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error);
+
+       if (!error) {
+               ASSERT(cvp);
+               ip = LINVFS_GET_IP(cvp);
+               if (!ip) {
+                       VN_RELE(cvp);
+                       return ERR_PTR(-EACCES);
+               }
+       }
+       if (error)
+               return ERR_PTR(-error);
+       parent = d_alloc_anon(ip);
+       if (!parent) {
+               VN_RELE(cvp);
+               parent = ERR_PTR(-ENOMEM);
+       }
+       return parent;
+}
+
+STATIC struct dentry *
+linvfs_get_dentry(
+       struct super_block      *sb,
+       void                    *data)
+{
+       vnode_t                 *vp;
+       struct inode            *inode;
+       struct dentry           *result;
+       xfs_fid2_t              xfid;
+       vfs_t                   *vfsp = LINVFS_GET_VFS(sb);
+       int                     error;
+
+       xfid.fid_len = sizeof(xfs_fid2_t) - sizeof(xfid.fid_len);
+       xfid.fid_pad = 0;
+       xfid.fid_gen = ((__u32 *)data)[1];
+       xfid.fid_ino = ((__u32 *)data)[0];
+
+       VFS_VGET(vfsp, &vp, (fid_t *)&xfid, error);
+       if (error || vp == NULL)
+               return ERR_PTR(-ESTALE) ;
+
+       inode = LINVFS_GET_IP(vp);
+       result = d_alloc_anon(inode);
+        if (!result) {
+               iput(inode);
+               return ERR_PTR(-ENOMEM);
+       }
+       return result;
+}
+
+STATIC int
+linvfs_show_options(
+       struct seq_file         *m,
+       struct vfsmount         *mnt)
+{
+       struct vfs              *vfsp = LINVFS_GET_VFS(mnt->mnt_sb);
+       int                     error;
+
+       VFS_SHOWARGS(vfsp, m, error);
+       return error;
+}
+
+STATIC int
+linvfs_getxstate(
+       struct super_block      *sb,
+       struct fs_quota_stat    *fqs)
+{
+       struct vfs              *vfsp = LINVFS_GET_VFS(sb);
+       int                     error;
+
+       VFS_QUOTACTL(vfsp, Q_XGETQSTAT, 0, (caddr_t)fqs, error);
+       return -error;
+}
+
+STATIC int
+linvfs_setxstate(
+       struct super_block      *sb,
+       unsigned int            flags,
+       int                     op)
+{
+       struct vfs              *vfsp = LINVFS_GET_VFS(sb);
+       int                     error;
+
+       VFS_QUOTACTL(vfsp, op, 0, (caddr_t)&flags, error);
+       return -error;
+}
+
+STATIC int
+linvfs_getxquota(
+       struct super_block      *sb,
+       int                     type,
+       qid_t                   id,
+       struct fs_disk_quota    *fdq)
+{
+       struct vfs              *vfsp = LINVFS_GET_VFS(sb);
+       int                     error, getmode;
+
+       getmode = (type == GRPQUOTA) ? Q_XGETGQUOTA : Q_XGETQUOTA;
+       VFS_QUOTACTL(vfsp, getmode, id, (caddr_t)fdq, error);
+       return -error;
+}
+
+STATIC int
+linvfs_setxquota(
+       struct super_block      *sb,
+       int                     type,
+       qid_t                   id,
+       struct fs_disk_quota    *fdq)
+{
+       struct vfs              *vfsp = LINVFS_GET_VFS(sb);
+       int                     error, setmode;
+
+       setmode = (type == GRPQUOTA) ? Q_XSETGQLIM : Q_XSETQLIM;
+       VFS_QUOTACTL(vfsp, setmode, id, (caddr_t)fdq, error);
+       return -error;
+}
+
+STATIC int
+linvfs_fill_super(
+       struct super_block      *sb,
+       void                    *data,
+       int                     silent)
+{
+       vnode_t                 *rootvp;
+       struct vfs              *vfsp = vfs_allocate();
+       struct xfs_mount_args   *args = xfs_args_allocate(sb);
+       struct kstatfs          statvfs;
+       int                     error, error2;
+
+       vfsp->vfs_super = sb;
+       LINVFS_SET_VFS(sb, vfsp);
+       if (sb->s_flags & MS_RDONLY)
+               vfsp->vfs_flag |= VFS_RDONLY;
+       bhv_insert_all_vfsops(vfsp);
+
+       VFS_PARSEARGS(vfsp, (char *)data, args, 0, error);
+       if (error) {
+               bhv_remove_all_vfsops(vfsp, 1);
+               goto fail_vfsop;
+       }
+
+       sb_min_blocksize(sb, BBSIZE);
+       sb->s_export_op = &linvfs_export_ops;
+       sb->s_qcop = &linvfs_qops;
+       sb->s_op = &linvfs_sops;
+
+       VFS_MOUNT(vfsp, args, NULL, error);
+       if (error) {
+               bhv_remove_all_vfsops(vfsp, 1);
+               goto fail_vfsop;
+       }
+
+       VFS_STATVFS(vfsp, &statvfs, NULL, error);
+       if (error)
+               goto fail_unmount;
+
+       sb->s_dirt = 1;
+       sb->s_magic = statvfs.f_type;
+       sb->s_blocksize = statvfs.f_bsize;
+       sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1;
+       sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
+       set_posix_acl_flag(sb);
+
+       VFS_ROOT(vfsp, &rootvp, error);
+       if (error)
+               goto fail_unmount;
+
+       sb->s_root = d_alloc_root(LINVFS_GET_IP(rootvp));
+       if (!sb->s_root) {
+               error = ENOMEM;
+               goto fail_vnrele;
+       }
+       if (is_bad_inode(sb->s_root->d_inode)) {
+               error = EINVAL;
+               goto fail_vnrele;
+       }
+       if ((error = linvfs_start_syncd(vfsp)))
+               goto fail_vnrele;
+       vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
+
+       kmem_free(args, sizeof(*args));
+       return 0;
+
+fail_vnrele:
+       if (sb->s_root) {
+               dput(sb->s_root);
+               sb->s_root = NULL;
+       } else {
+               VN_RELE(rootvp);
+       }
+
+fail_unmount:
+       VFS_UNMOUNT(vfsp, 0, NULL, error2);
+
+fail_vfsop:
+       vfs_deallocate(vfsp);
+       kmem_free(args, sizeof(*args));
+       return -error;
+}
+
+STATIC struct super_block *
+linvfs_get_sb(
+       struct file_system_type *fs_type,
+       int                     flags,
+       const char              *dev_name,
+       void                    *data)
+{
+       return get_sb_bdev(fs_type, flags, dev_name, data, linvfs_fill_super);
+}
+
+
+STATIC struct export_operations linvfs_export_ops = {
+       .get_parent             = linvfs_get_parent,
+       .get_dentry             = linvfs_get_dentry,
+};
+
+STATIC struct super_operations linvfs_sops = {
+       .alloc_inode            = linvfs_alloc_inode,
+       .destroy_inode          = linvfs_destroy_inode,
+       .write_inode            = linvfs_write_inode,
+       .clear_inode            = linvfs_clear_inode,
+       .put_super              = linvfs_put_super,
+       .write_super            = linvfs_write_super,
+       .sync_fs                = linvfs_sync_super,
+       .write_super_lockfs     = linvfs_freeze_fs,
+       .statfs                 = linvfs_statfs,
+       .remount_fs             = linvfs_remount,
+       .show_options           = linvfs_show_options,
+};
+
+STATIC struct quotactl_ops linvfs_qops = {
+       .get_xstate             = linvfs_getxstate,
+       .set_xstate             = linvfs_setxstate,
+       .get_xquota             = linvfs_getxquota,
+       .set_xquota             = linvfs_setxquota,
+};
+
+STATIC struct file_system_type xfs_fs_type = {
+       .owner                  = THIS_MODULE,
+       .name                   = "xfs",
+       .get_sb                 = linvfs_get_sb,
+       .kill_sb                = kill_block_super,
+       .fs_flags               = FS_REQUIRES_DEV,
+};
+
+
+STATIC int __init
+init_xfs_fs( void )
+{
+       int                     error;
+       struct sysinfo          si;
+       static char             message[] __initdata = KERN_INFO \
+               XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
+
+       printk(message);
+
+       si_meminfo(&si);
+       xfs_physmem = si.totalram;
+
+       ktrace_init(64);
+
+       error = init_inodecache();
+       if (error < 0)
+               goto undo_inodecache;
+
+       error = pagebuf_init();
+       if (error < 0)
+               goto undo_pagebuf;
+
+       vn_init();
+       xfs_init();
+       uuid_init();
+       vfs_initdmapi();
+       vfs_initquota();
+
+       error = register_filesystem(&xfs_fs_type);
+       if (error)
+               goto undo_register;
+       return 0;
+
+undo_register:
+       pagebuf_terminate();
+
+undo_pagebuf:
+       destroy_inodecache();
+
+undo_inodecache:
+       return error;
+}
+
+STATIC void __exit
+exit_xfs_fs( void )
+{
+       vfs_exitquota();
+       vfs_exitdmapi();
+       unregister_filesystem(&xfs_fs_type);
+       xfs_cleanup();
+       pagebuf_terminate();
+       destroy_inodecache();
+       ktrace_uninit();
+}
+
+module_init(init_xfs_fs);
+module_exit(exit_xfs_fs);
+
+MODULE_AUTHOR("Silicon Graphics, Inc.");
+MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
+MODULE_LICENSE("GPL");
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
new file mode 100644 (file)
index 0000000..0d3703d
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_SUPER_H__
+#define __XFS_SUPER_H__
+
+#ifdef CONFIG_XFS_DMAPI
+# define vfs_insertdmapi(vfs)  vfs_insertops(vfsp, &xfs_dmops)
+# define vfs_initdmapi()       dmapi_init()
+# define vfs_exitdmapi()       dmapi_uninit()
+#else
+# define vfs_insertdmapi(vfs)  do { } while (0)
+# define vfs_initdmapi()       do { } while (0)
+# define vfs_exitdmapi()       do { } while (0)
+#endif
+
+#ifdef CONFIG_XFS_QUOTA
+# define vfs_insertquota(vfs)  vfs_insertops(vfsp, &xfs_qmops)
+extern void xfs_qm_init(void);
+extern void xfs_qm_exit(void);
+# define vfs_initquota()       xfs_qm_init()
+# define vfs_exitquota()       xfs_qm_exit()
+#else
+# define vfs_insertquota(vfs)  do { } while (0)
+# define vfs_initquota()       do { } while (0)
+# define vfs_exitquota()       do { } while (0)
+#endif
+
+#ifdef CONFIG_XFS_POSIX_ACL
+# define XFS_ACL_STRING                "ACLs, "
+# define set_posix_acl_flag(sb)        ((sb)->s_flags |= MS_POSIXACL)
+#else
+# define XFS_ACL_STRING
+# define set_posix_acl_flag(sb)        do { } while (0)
+#endif
+
+#ifdef CONFIG_XFS_SECURITY
+# define XFS_SECURITY_STRING   "security attributes, "
+# define ENOSECURITY           0
+#else
+# define XFS_SECURITY_STRING
+# define ENOSECURITY           EOPNOTSUPP
+#endif
+
+#ifdef CONFIG_XFS_RT
+# define XFS_REALTIME_STRING   "realtime, "
+#else
+# define XFS_REALTIME_STRING
+#endif
+
+#if XFS_BIG_BLKNOS
+# if XFS_BIG_INUMS
+#  define XFS_BIGFS_STRING     "large block/inode numbers, "
+# else
+#  define XFS_BIGFS_STRING     "large block numbers, "
+# endif
+#else
+# define XFS_BIGFS_STRING
+#endif
+
+#ifdef CONFIG_XFS_TRACE
+# define XFS_TRACE_STRING      "tracing, "
+#else
+# define XFS_TRACE_STRING
+#endif
+
+#ifdef DEBUG
+# define XFS_DBG_STRING                "debug"
+#else
+# define XFS_DBG_STRING                "no debug"
+#endif
+
+#define XFS_BUILD_OPTIONS      XFS_ACL_STRING \
+                               XFS_SECURITY_STRING \
+                               XFS_REALTIME_STRING \
+                               XFS_BIGFS_STRING \
+                               XFS_TRACE_STRING \
+                               XFS_DBG_STRING /* DBG must be last */
+
+#define LINVFS_GET_VFS(s) \
+       (vfs_t *)((s)->s_fs_info)
+#define LINVFS_SET_VFS(s, vfsp) \
+       ((s)->s_fs_info = vfsp)
+
+struct xfs_inode;
+struct xfs_mount;
+struct xfs_buftarg;
+struct block_device;
+
+extern __uint64_t xfs_max_file_offset(unsigned int);
+
+extern void xfs_initialize_vnode(bhv_desc_t *, vnode_t *, bhv_desc_t *, int);
+
+extern void xfs_flush_inode(struct xfs_inode *);
+extern void xfs_flush_device(struct xfs_inode *);
+
+extern int  xfs_blkdev_get(struct xfs_mount *, const char *,
+                               struct block_device **);
+extern void xfs_blkdev_put(struct block_device *);
+
+#endif /* __XFS_SUPER_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c
new file mode 100644 (file)
index 0000000..570d1a9
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Copyright (c) 2001-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_rw.h"
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+
+
+static struct ctl_table_header *xfs_table_header;
+
+
+#ifdef CONFIG_PROC_FS
+STATIC int
+xfs_stats_clear_proc_handler(
+       ctl_table       *ctl,
+       int             write,
+       struct file     *filp,
+       void            *buffer,
+       size_t          *lenp)
+{
+       int             c, ret, *valp = ctl->data;
+       __uint32_t      vn_active;
+
+       ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp);
+
+       if (!ret && write && *valp) {
+               printk("XFS Clearing xfsstats\n");
+               for (c = 0; c < NR_CPUS; c++) {
+                       if (!cpu_possible(c)) continue;
+                       preempt_disable();
+                       /* save vn_active, it's a universal truth! */
+                       vn_active = per_cpu(xfsstats, c).vn_active;
+                       memset(&per_cpu(xfsstats, c), 0,
+                              sizeof(struct xfsstats));
+                       per_cpu(xfsstats, c).vn_active = vn_active;
+                       preempt_enable();
+               }
+               xfs_stats_clear = 0;
+       }
+
+       return ret;
+}
+#endif /* CONFIG_PROC_FS */
+
+STATIC ctl_table xfs_table[] = {
+       {XFS_RESTRICT_CHOWN, "restrict_chown", &xfs_params.restrict_chown.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL, 
+       &xfs_params.restrict_chown.min, &xfs_params.restrict_chown.max},
+
+       {XFS_SGID_INHERIT, "irix_sgid_inherit", &xfs_params.sgid_inherit.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL,
+       &xfs_params.sgid_inherit.min, &xfs_params.sgid_inherit.max},
+
+       {XFS_SYMLINK_MODE, "irix_symlink_mode", &xfs_params.symlink_mode.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL, 
+       &xfs_params.symlink_mode.min, &xfs_params.symlink_mode.max},
+
+       {XFS_PANIC_MASK, "panic_mask", &xfs_params.panic_mask.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL, 
+       &xfs_params.panic_mask.min, &xfs_params.panic_mask.max},
+
+       {XFS_ERRLEVEL, "error_level", &xfs_params.error_level.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL, 
+       &xfs_params.error_level.min, &xfs_params.error_level.max},
+
+       {XFS_SYNCD_TIMER, "xfssyncd_centisecs", &xfs_params.syncd_timer.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL, 
+       &xfs_params.syncd_timer.min, &xfs_params.syncd_timer.max},
+
+       {XFS_INHERIT_SYNC, "inherit_sync", &xfs_params.inherit_sync.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL,
+       &xfs_params.inherit_sync.min, &xfs_params.inherit_sync.max},
+
+       {XFS_INHERIT_NODUMP, "inherit_nodump", &xfs_params.inherit_nodump.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL,
+       &xfs_params.inherit_nodump.min, &xfs_params.inherit_nodump.max},
+
+       {XFS_INHERIT_NOATIME, "inherit_noatime", &xfs_params.inherit_noatim.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL,
+       &xfs_params.inherit_noatim.min, &xfs_params.inherit_noatim.max},
+       
+       {XFS_BUF_TIMER, "xfsbufd_centisecs", &xfs_params.xfs_buf_timer.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL,
+       &xfs_params.xfs_buf_timer.min, &xfs_params.xfs_buf_timer.max},
+
+       {XFS_BUF_AGE, "age_buffer_centisecs", &xfs_params.xfs_buf_age.val,
+       sizeof(int), 0644, NULL, &proc_dointvec_minmax,
+       &sysctl_intvec, NULL,
+       &xfs_params.xfs_buf_age.min, &xfs_params.xfs_buf_age.max},
+
+       /* please keep this the last entry */
+#ifdef CONFIG_PROC_FS
+       {XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear.val,
+       sizeof(int), 0644, NULL, &xfs_stats_clear_proc_handler,
+       &sysctl_intvec, NULL, 
+       &xfs_params.stats_clear.min, &xfs_params.stats_clear.max},
+#endif /* CONFIG_PROC_FS */
+
+       {0}
+};
+
+STATIC ctl_table xfs_dir_table[] = {
+       {FS_XFS, "xfs", NULL, 0, 0555, xfs_table},
+       {0}
+};
+
+STATIC ctl_table xfs_root_table[] = {
+       {CTL_FS, "fs",  NULL, 0, 0555, xfs_dir_table},
+       {0}
+};
+
+void
+xfs_sysctl_register(void)
+{
+       xfs_table_header = register_sysctl_table(xfs_root_table, 1);
+}
+
+void
+xfs_sysctl_unregister(void)
+{
+       if (xfs_table_header)
+               unregister_sysctl_table(xfs_table_header);
+}
diff --git a/fs/xfs/linux-2.6/xfs_sysctl.h b/fs/xfs/linux-2.6/xfs_sysctl.h
new file mode 100644 (file)
index 0000000..872014b
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2001-2004 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#ifndef __XFS_SYSCTL_H__
+#define __XFS_SYSCTL_H__
+
+#include <linux/sysctl.h>
+
+/*
+ * Tunable xfs parameters
+ */
+
+typedef struct xfs_sysctl_val {
+       int min;
+       int val;
+       int max;
+} xfs_sysctl_val_t;
+
+typedef struct xfs_param {
+       xfs_sysctl_val_t restrict_chown;/* Root/non-root can give away files.*/
+       xfs_sysctl_val_t sgid_inherit;  /* Inherit S_ISGID if process' GID is
+                                        * not a member of parent dir GID. */
+       xfs_sysctl_val_t symlink_mode;  /* Link creat mode affected by umask */
+       xfs_sysctl_val_t panic_mask;    /* bitmask to cause panic on errors. */
+       xfs_sysctl_val_t error_level;   /* Degree of reporting for problems  */
+       xfs_sysctl_val_t syncd_timer;   /* Interval between xfssyncd wakeups */
+       xfs_sysctl_val_t stats_clear;   /* Reset all XFS statistics to zero. */
+       xfs_sysctl_val_t inherit_sync;  /* Inherit the "sync" inode flag. */
+       xfs_sysctl_val_t inherit_nodump;/* Inherit the "nodump" inode flag. */
+       xfs_sysctl_val_t inherit_noatim;/* Inherit the "noatime" inode flag. */
+       xfs_sysctl_val_t xfs_buf_timer; /* Interval between xfsbufd wakeups. */
+       xfs_sysctl_val_t xfs_buf_age;   /* Metadata buffer age before flush. */
+} xfs_param_t;
+
+/*
+ * xfs_error_level:
+ *
+ * How much error reporting will be done when internal problems are
+ * encountered.  These problems normally return an EFSCORRUPTED to their
+ * caller, with no other information reported.
+ *
+ * 0   No error reports
+ * 1   Report EFSCORRUPTED errors that will cause a filesystem shutdown
+ * 5   Report all EFSCORRUPTED errors (all of the above errors, plus any
+ *     additional errors that are known to not cause shutdowns)
+ *
+ * xfs_panic_mask bit 0x8 turns the error reports into panics
+ */
+
+enum {
+       /* XFS_REFCACHE_SIZE = 1 */
+       /* XFS_REFCACHE_PURGE = 2 */
+       XFS_RESTRICT_CHOWN = 3,
+       XFS_SGID_INHERIT = 4,
+       XFS_SYMLINK_MODE = 5,
+       XFS_PANIC_MASK = 6,
+       XFS_ERRLEVEL = 7,
+       XFS_SYNCD_TIMER = 8,
+       /* XFS_PROBE_DMAPI = 9 */
+       /* XFS_PROBE_IOOPS = 10 */
+       /* XFS_PROBE_QUOTA = 11 */
+       XFS_STATS_CLEAR = 12,
+       XFS_INHERIT_SYNC = 13,
+       XFS_INHERIT_NODUMP = 14,
+       XFS_INHERIT_NOATIME = 15,
+       XFS_BUF_TIMER = 16,
+       XFS_BUF_AGE = 17,
+       /* XFS_IO_BYPASS = 18 */
+};
+
+extern xfs_param_t     xfs_params;
+
+#ifdef CONFIG_SYSCTL
+extern void xfs_sysctl_register(void);
+extern void xfs_sysctl_unregister(void);
+#else
+# define xfs_sysctl_register()         do { } while (0)
+# define xfs_sysctl_unregister()       do { } while (0)
+#endif /* CONFIG_SYSCTL */
+
+#endif /* __XFS_SYSCTL_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_vfs.c b/fs/xfs/linux-2.6/xfs_vfs.c
new file mode 100644 (file)
index 0000000..897b9dc
--- /dev/null
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_macros.h"
+#include "xfs_inum.h"
+#include "xfs_log.h"
+#include "xfs_clnt.h"
+#include "xfs_trans.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_dir.h"
+#include "xfs_dir2.h"
+#include "xfs_imap.h"
+#include "xfs_alloc.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+#include "xfs_quota.h"
+
+int
+vfs_mount(
+       struct bhv_desc         *bdp,
+       struct xfs_mount_args   *args,
+       struct cred             *cr)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_mount)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_mount)(next, args, cr));
+}
+
+int
+vfs_parseargs(
+       struct bhv_desc         *bdp,
+       char                    *s,
+       struct xfs_mount_args   *args,
+       int                     f)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_parseargs)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_parseargs)(next, s, args, f));
+}
+
+int
+vfs_showargs(
+       struct bhv_desc         *bdp,
+       struct seq_file         *m)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_showargs)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_showargs)(next, m));
+}
+
+int
+vfs_unmount(
+       struct bhv_desc         *bdp,
+       int                     fl,
+       struct cred             *cr)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_unmount)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_unmount)(next, fl, cr));
+}
+
+int
+vfs_mntupdate(
+       struct bhv_desc         *bdp,
+       int                     *fl,
+       struct xfs_mount_args   *args)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_mntupdate)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_mntupdate)(next, fl, args));
+}
+
+int
+vfs_root(
+       struct bhv_desc         *bdp,
+       struct vnode            **vpp)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_root)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_root)(next, vpp));
+}
+
+int
+vfs_statvfs(
+       struct bhv_desc         *bdp,
+       xfs_statfs_t            *sp,
+       struct vnode            *vp)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_statvfs)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_statvfs)(next, sp, vp));
+}
+
+int
+vfs_sync(
+       struct bhv_desc         *bdp,
+       int                     fl,
+       struct cred             *cr)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_sync)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_sync)(next, fl, cr));
+}
+
+int
+vfs_vget(
+       struct bhv_desc         *bdp,
+       struct vnode            **vpp,
+       struct fid              *fidp)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_vget)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_vget)(next, vpp, fidp));
+}
+
+int
+vfs_dmapiops(
+       struct bhv_desc         *bdp,
+       caddr_t                 addr)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_dmapiops)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_dmapiops)(next, addr));
+}
+
+int
+vfs_quotactl(
+       struct bhv_desc         *bdp,
+       int                     cmd,
+       int                     id,
+       caddr_t                 addr)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_quotactl)
+               next = BHV_NEXT(next);
+       return ((*bhvtovfsops(next)->vfs_quotactl)(next, cmd, id, addr));
+}
+
+void
+vfs_init_vnode(
+       struct bhv_desc         *bdp,
+       struct vnode            *vp,
+       struct bhv_desc         *bp,
+       int                     unlock)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_init_vnode)
+               next = BHV_NEXT(next);
+       ((*bhvtovfsops(next)->vfs_init_vnode)(next, vp, bp, unlock));
+}
+
+void
+vfs_force_shutdown(
+       struct bhv_desc         *bdp,
+       int                     fl,
+       char                    *file,
+       int                     line)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_force_shutdown)
+               next = BHV_NEXT(next);
+       ((*bhvtovfsops(next)->vfs_force_shutdown)(next, fl, file, line));
+}
+
+void
+vfs_freeze(
+       struct bhv_desc         *bdp)
+{
+       struct bhv_desc         *next = bdp;
+
+       ASSERT(next);
+       while (! (bhvtovfsops(next))->vfs_freeze)
+               next = BHV_NEXT(next);
+       ((*bhvtovfsops(next)->vfs_freeze)(next));
+}
+
+vfs_t *
+vfs_allocate( void )
+{
+       struct vfs              *vfsp;
+
+       vfsp = kmem_zalloc(sizeof(vfs_t), KM_SLEEP);
+       bhv_head_init(VFS_BHVHEAD(vfsp), "vfs");
+       init_waitqueue_head(&vfsp->vfs_wait_sync_task);
+       init_waitqueue_head(&vfsp->vfs_wait_single_sync_task);
+       return vfsp;
+}
+
+void
+vfs_deallocate(
+       struct vfs              *vfsp)
+{
+       bhv_head_destroy(VFS_BHVHEAD(vfsp));
+       kmem_free(vfsp, sizeof(vfs_t));
+}
+
+void
+vfs_insertops(
+       struct vfs              *vfsp,
+       struct bhv_vfsops       *vfsops)
+{
+       struct bhv_desc         *bdp;
+
+       bdp = kmem_alloc(sizeof(struct bhv_desc), KM_SLEEP);
+       bhv_desc_init(bdp, NULL, vfsp, vfsops);
+       bhv_insert(&vfsp->vfs_bh, bdp);
+}
+
+void
+vfs_insertbhv(
+       struct vfs              *vfsp,
+       struct bhv_desc         *bdp,
+       struct vfsops           *vfsops,
+       void                    *mount)
+{
+       bhv_desc_init(bdp, mount, vfsp, vfsops);
+       bhv_insert_initial(&vfsp->vfs_bh, bdp);
+}
+
+void
+bhv_remove_vfsops(
+       struct vfs              *vfsp,
+       int                     pos)
+{
+       struct bhv_desc         *bhv;
+
+       bhv = bhv_lookup_range(&vfsp->vfs_bh, pos, pos);
+       if (!bhv)
+               return;
+       bhv_remove(&vfsp->vfs_bh, bhv);
+       kmem_free(bhv, sizeof(*bhv));
+}
+
+void
+bhv_remove_all_vfsops(
+       struct vfs              *vfsp,
+       int                     freebase)
+{
+       struct xfs_mount        *mp;
+
+       bhv_remove_vfsops(vfsp, VFS_POSITION_QM);
+       bhv_remove_vfsops(vfsp, VFS_POSITION_DM);
+       if (!freebase)
+               return;
+       mp = XFS_BHVTOM(bhv_lookup(VFS_BHVHEAD(vfsp), &xfs_vfsops));
+       VFS_REMOVEBHV(vfsp, &mp->m_bhv);
+       xfs_mount_free(mp, 0);
+}
+
+void
+bhv_insert_all_vfsops(
+       struct vfs              *vfsp)
+{
+       struct xfs_mount        *mp;
+
+       mp = xfs_mount_init();
+       vfs_insertbhv(vfsp, &mp->m_bhv, &xfs_vfsops, mp);
+       vfs_insertdmapi(vfsp);
+       vfs_insertquota(vfsp);
+}
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
new file mode 100644 (file)
index 0000000..3c7ca84
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+#ifndef __XFS_VFS_H__
+#define __XFS_VFS_H__
+
+#include <linux/vfs.h>
+#include "xfs_fs.h"
+
+struct fid;
+struct cred;
+struct vnode;
+struct kstatfs;
+struct seq_file;
+struct super_block;
+struct xfs_mount_args;
+
+typedef struct kstatfs xfs_statfs_t;
+
+typedef struct vfs {
+       u_int                   vfs_flag;       /* flags */
+       xfs_fsid_t              vfs_fsid;       /* file system ID */
+       xfs_fsid_t              *vfs_altfsid;   /* An ID fixed for life of FS */
+       bhv_head_t              vfs_bh;         /* head of vfs behavior chain */
+       struct super_block      *vfs_super;     /* Linux superblock structure */
+       struct task_struct      *vfs_sync_task; /* xfssyncd process */
+       int                     vfs_sync_seq;   /* xfssyncd generation number */
+       wait_queue_head_t       vfs_wait_single_sync_task;
+       wait_queue_head_t       vfs_wait_sync_task;
+} vfs_t;
+
+#define vfs_fbhv               vfs_bh.bh_first /* 1st on vfs behavior chain */
+
+#define bhvtovfs(bdp)          ( (struct vfs *)BHV_VOBJ(bdp) )
+#define bhvtovfsops(bdp)       ( (struct vfsops *)BHV_OPS(bdp) )
+#define VFS_BHVHEAD(vfs)       ( &(vfs)->vfs_bh )
+#define VFS_REMOVEBHV(vfs, bdp)        ( bhv_remove(VFS_BHVHEAD(vfs), bdp) )
+
+#define VFS_POSITION_BASE      BHV_POSITION_BASE       /* chain bottom */
+#define VFS_POSITION_TOP       BHV_POSITION_TOP        /* chain top */
+#define VFS_POSITION_INVALID   BHV_POSITION_INVALID    /* invalid pos. num */
+
+typedef enum {
+       VFS_BHV_UNKNOWN,        /* not specified */
+       VFS_BHV_XFS,            /* xfs */
+       VFS_BHV_DM,             /* data migration */
+       VFS_BHV_QM,             /* quota manager */
+       VFS_BHV_IO,             /* IO path */
+       VFS_BHV_END             /* housekeeping end-of-range */
+} vfs_bhv_t;
+
+#define VFS_POSITION_XFS       (BHV_POSITION_BASE)
+#define VFS_POSITION_DM                (VFS_POSITION_BASE+10)
+#define VFS_POSITION_QM                (VFS_POSITION_BASE+20)
+#define VFS_POSITION_IO                (VFS_POSITION_BASE+30)
+
+#define VFS_RDONLY             0x0001  /* read-only vfs */
+#define VFS_GRPID              0x0002  /* group-ID assigned from directory */
+#define VFS_DMI                        0x0004  /* filesystem has the DMI enabled */
+#define VFS_UMOUNT             0x0008  /* unmount in progress */
+#define VFS_END                        0x0008  /* max flag */
+
+#define SYNC_ATTR              0x0001  /* sync attributes */
+#define SYNC_CLOSE             0x0002  /* close file system down */
+#define SYNC_DELWRI            0x0004  /* look at delayed writes */
+#define SYNC_WAIT              0x0008  /* wait for i/o to complete */
+#define SYNC_BDFLUSH           0x0010  /* BDFLUSH is calling -- don't block */
+#define SYNC_FSDATA            0x0020  /* flush fs data (e.g. superblocks) */
+#define SYNC_REFCACHE          0x0040  /* prune some of the nfs ref cache */
+#define SYNC_REMOUNT           0x0080  /* remount readonly, no dummy LRs */
+
+typedef int    (*vfs_mount_t)(bhv_desc_t *,
+                               struct xfs_mount_args *, struct cred *);
+typedef int    (*vfs_parseargs_t)(bhv_desc_t *, char *,
+                               struct xfs_mount_args *, int);
+typedef        int     (*vfs_showargs_t)(bhv_desc_t *, struct seq_file *);
+typedef int    (*vfs_unmount_t)(bhv_desc_t *, int, struct cred *);
+typedef int    (*vfs_mntupdate_t)(bhv_desc_t *, int *,
+                               struct xfs_mount_args *);
+typedef int    (*vfs_root_t)(bhv_desc_t *, struct vnode **);
+typedef int    (*vfs_statvfs_t)(bhv_desc_t *, xfs_statfs_t *, struct vnode *);
+typedef int    (*vfs_sync_t)(bhv_desc_t *, int, struct cred *);
+typedef int    (*vfs_vget_t)(bhv_desc_t *, struct vnode **, struct fid *);
+typedef int    (*vfs_dmapiops_t)(bhv_desc_t *, caddr_t);
+typedef int    (*vfs_quotactl_t)(bhv_desc_t *, int, int, caddr_t);
+typedef void   (*vfs_init_vnode_t)(bhv_desc_t *,
+                               struct vnode *, bhv_desc_t *, int);
+typedef void   (*vfs_force_shutdown_t)(bhv_desc_t *, int, char *, int);
+typedef void   (*vfs_freeze_t)(bhv_desc_t *);
+
+typedef struct vfsops {
+       bhv_position_t          vf_position;    /* behavior chain position */
+       vfs_mount_t             vfs_mount;      /* mount file system */
+       vfs_parseargs_t         vfs_parseargs;  /* parse mount options */
+       vfs_showargs_t          vfs_showargs;   /* unparse mount options */
+       vfs_unmount_t           vfs_unmount;    /* unmount file system */
+       vfs_mntupdate_t         vfs_mntupdate;  /* update file system options */
+       vfs_root_t              vfs_root;       /* get root vnode */
+       vfs_statvfs_t           vfs_statvfs;    /* file system statistics */
+       vfs_sync_t              vfs_sync;       /* flush files */
+       vfs_vget_t              vfs_vget;       /* get vnode from fid */
+       vfs_dmapiops_t          vfs_dmapiops;   /* data migration */
+       vfs_quotactl_t          vfs_quotactl;   /* disk quota */
+       vfs_init_vnode_t        vfs_init_vnode; /* initialize a new vnode */
+       vfs_force_shutdown_t    vfs_force_shutdown;     /* crash and burn */
+       vfs_freeze_t            vfs_freeze;     /* freeze fs for snapshot */
+} vfsops_t;
+
+/*
+ * VFS's.  Operates on vfs structure pointers (starts at bhv head).
+ */
+#define VHEAD(v)                       ((v)->vfs_fbhv)
+#define VFS_MOUNT(v, ma,cr, rv)                ((rv) = vfs_mount(VHEAD(v), ma,cr))
+#define VFS_PARSEARGS(v, o,ma,f, rv)   ((rv) = vfs_parseargs(VHEAD(v), o,ma,f))
+#define VFS_SHOWARGS(v, m, rv)         ((rv) = vfs_showargs(VHEAD(v), m))
+#define VFS_UNMOUNT(v, f, cr, rv)      ((rv) = vfs_unmount(VHEAD(v), f,cr))
+#define VFS_MNTUPDATE(v, fl, args, rv) ((rv) = vfs_mntupdate(VHEAD(v), fl, args))
+#define VFS_ROOT(v, vpp, rv)           ((rv) = vfs_root(VHEAD(v), vpp))
+#define VFS_STATVFS(v, sp,vp, rv)      ((rv) = vfs_statvfs(VHEAD(v), sp,vp))
+#define VFS_SYNC(v, flag,cr, rv)       ((rv) = vfs_sync(VHEAD(v), flag,cr))
+#define VFS_VGET(v, vpp,fidp, rv)      ((rv) = vfs_vget(VHEAD(v), vpp,fidp))
+#define VFS_DMAPIOPS(v, p, rv)         ((rv) = vfs_dmapiops(VHEAD(v), p))
+#define VFS_QUOTACTL(v, c,id,p, rv)    ((rv) = vfs_quotactl(VHEAD(v), c,id,p))
+#define VFS_INIT_VNODE(v, vp,b,ul)     ( vfs_init_vnode(VHEAD(v), vp,b,ul) )
+#define VFS_FORCE_SHUTDOWN(v, fl,f,l)  ( vfs_force_shutdown(VHEAD(v), fl,f,l) )
+#define VFS_FREEZE(v)                  ( vfs_freeze(VHEAD(v)) )
+
+/*
+ * PVFS's.  Operates on behavior descriptor pointers.
+ */
+#define PVFS_MOUNT(b, ma,cr, rv)       ((rv) = vfs_mount(b, ma,cr))
+#define PVFS_PARSEARGS(b, o,ma,f, rv)  ((rv) = vfs_parseargs(b, o,ma,f))
+#define PVFS_SHOWARGS(b, m, rv)                ((rv) = vfs_showargs(b, m))
+#define PVFS_UNMOUNT(b, f,cr, rv)      ((rv) = vfs_unmount(b, f,cr))
+#define PVFS_MNTUPDATE(b, fl, args, rv)        ((rv) = vfs_mntupdate(b, fl, args))
+#define PVFS_ROOT(b, vpp, rv)          ((rv) = vfs_root(b, vpp))
+#define PVFS_STATVFS(b, sp,vp, rv)     ((rv) = vfs_statvfs(b, sp,vp))
+#define PVFS_SYNC(b, flag,cr, rv)      ((rv) = vfs_sync(b, flag,cr))
+#define PVFS_VGET(b, vpp,fidp, rv)     ((rv) = vfs_vget(b, vpp,fidp))
+#define PVFS_DMAPIOPS(b, p, rv)                ((rv) = vfs_dmapiops(b, p))
+#define PVFS_QUOTACTL(b, c,id,p, rv)   ((rv) = vfs_quotactl(b, c,id,p))
+#define PVFS_INIT_VNODE(b, vp,b2,ul)   ( vfs_init_vnode(b, vp,b2,ul) )
+#define PVFS_FORCE_SHUTDOWN(b, fl,f,l) ( vfs_force_shutdown(b, fl,f,l) )
+#define PVFS_FREEZE(b)                 ( vfs_freeze(b) )
+
+extern int vfs_mount(bhv_desc_t *, struct xfs_mount_args *, struct cred *);
+extern int vfs_parseargs(bhv_desc_t *, char *, struct xfs_mount_args *, int);
+extern int vfs_showargs(bhv_desc_t *, struct seq_file *);
+extern int vfs_unmount(bhv_desc_t *, int, struct cred *);
+extern int vfs_mntupdate(bhv_desc_t *, int *, struct xfs_mount_args *);
+extern int vfs_root(bhv_desc_t *, struct vnode **);
+extern int vfs_statvfs(bhv_desc_t *, xfs_statfs_t *, struct vnode *);
+extern int vfs_sync(bhv_desc_t *, int, struct cred *);
+extern int vfs_vget(bhv_desc_t *, struct vnode **, struct fid *);
+extern int vfs_dmapiops(bhv_desc_t *, caddr_t);
+extern int vfs_quotactl(bhv_desc_t *, int, int, caddr_t);
+extern void vfs_init_vnode(bhv_desc_t *, struct vnode *, bhv_desc_t *, int);
+extern void vfs_force_shutdown(bhv_desc_t *, int, char *, int);
+extern void vfs_freeze(bhv_desc_t *);
+
+typedef struct bhv_vfsops {
+       struct vfsops           bhv_common;
+       void *                  bhv_custom;
+} bhv_vfsops_t;
+
+#define vfs_bhv_lookup(v, id)  ( bhv_lookup_range(&(v)->vfs_bh, (id), (id)) )
+#define vfs_bhv_custom(b)      ( ((bhv_vfsops_t *)BHV_OPS(b))->bhv_custom )
+#define vfs_bhv_set_custom(b,o)        ( (b)->bhv_custom = (void *)(o))
+#define vfs_bhv_clr_custom(b)  ( (b)->bhv_custom = NULL )
+
+extern vfs_t *vfs_allocate(void);
+extern void vfs_deallocate(vfs_t *);
+extern void vfs_insertops(vfs_t *, bhv_vfsops_t *);
+extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *);
+
+extern void bhv_insert_all_vfsops(struct vfs *);
+extern void bhv_remove_all_vfsops(struct vfs *, int);
+extern void bhv_remove_vfsops(struct vfs *, int);
+
+#endif /* __XFS_VFS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.c b/fs/xfs/linux-2.6/xfs_vnode.c
new file mode 100644 (file)
index 0000000..9240efb
--- /dev/null
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ */
+
+#include "xfs.h"
+
+
+uint64_t vn_generation;                /* vnode generation number */
+spinlock_t vnumber_lock = SPIN_LOCK_UNLOCKED;
+
+/*
+ * Dedicated vnode inactive/reclaim sync semaphores.
+ * Prime number of hash buckets since address is used as the key.
+ */
+#define NVSYNC                  37
+#define vptosync(v)             (&vsync[((unsigned long)v) % NVSYNC])
+sv_t vsync[NVSYNC];
+
+/*
+ * Translate stat(2) file types to vnode types and vice versa.
+ * Aware of numeric order of S_IFMT and vnode type values.
+ */
+enum vtype iftovt_tab[] = {
+       VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
+       VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON
+};
+
+u_short vttoif_tab[] = {
+       0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO, 0, S_IFSOCK
+};
+
+
+void
+vn_init(void)
+{
+       register sv_t *svp;
+       register int i;
+
+       for (svp = vsync, i = 0; i < NVSYNC; i++, svp++)
+               init_sv(svp, SV_DEFAULT, "vsy", i);
+}
+
+/*
+ * Clean a vnode of filesystem-specific data and prepare it for reuse.
+ */
+STATIC int
+vn_reclaim(
+       struct vnode    *vp)
+{
+       int             error;
+
+       XFS_STATS_INC(vn_reclaim);
+       vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address);
+
+       /*
+        * Only make the VOP_RECLAIM call if there are behaviors
+        * to call.
+        */
+       if (vp->v_fbhv) {
+               VOP_RECLAIM(vp, error);
+               if (error)
+                       return -error;
+       }
+       ASSERT(vp->v_fbhv == NULL);
+
+       VN_LOCK(vp);
+       vp->v_flag &= (VRECLM|VWAIT);
+       VN_UNLOCK(vp, 0);
+
+       vp->v_type = VNON;
+       vp->v_fbhv = NULL;
+
+#ifdef XFS_VNODE_TRACE
+       ktrace_free(vp->v_trace);
+       vp->v_trace = NULL;
+#endif
+
+       return 0;
+}
+
+STATIC void
+vn_wakeup(
+       struct vnode    *vp)
+{
+       VN_LOCK(vp);
+       if (vp->v_flag & VWAIT)
+               sv_broadcast(vptosync(vp));
+       vp->v_flag &= ~(VRECLM|VWAIT|VMODIFIED);
+       VN_UNLOCK(vp, 0);
+}
+
+int
+vn_wait(
+       struct vnode    *vp)
+{
+       VN_LOCK(vp);
+       if (vp->v_flag & (VINACT | VRECLM)) {
+               vp->v_flag |= VWAIT;
+               sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
+               return 1;
+       }
+       VN_UNLOCK(vp, 0);
+       return 0;
+}
+
+struct vnode *
+vn_initialize(
+       struct inode    *inode)
+{
+       struct vnode    *vp = LINVFS_GET_VP(inode);
+
+       XFS_STATS_INC(vn_active);
+       XFS_STATS_INC(vn_alloc);
+
+       vp->v_flag = VMODIFIED;
+       spinlock_init(&vp->v_lock, "v_lock");
+
+       spin_lock(&vnumber_lock);
+       if (!++vn_generation)   /* v_number shouldn't be zero */
+               vn_generation++;
+       vp->v_number = vn_generation;
+       spin_unlock(&vnumber_lock);
+
+       ASSERT(VN_CACHED(vp) == 0);
+
+       /* Initialize the first behavior and the behavior chain head. */
+       vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode");
+
+#ifdef XFS_VNODE_TRACE
+       vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
+       printk("Allocated VNODE_TRACE at 0x%p\n", vp->v_trace);
+#endif /* XFS_VNODE_TRACE */
+
+       vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address);
+       return vp;
+}
+
+/*
+ * Get a reference on a vnode.
+ */
+vnode_t *
+vn_get(
+       struct vnode    *vp,
+       vmap_t          *vmap)
+{
+       struct inode    *inode;
+
+       XFS_STATS_INC(vn_get);
+       inode = LINVFS_GET_IP(vp);
+       if (inode->i_state & I_FREEING)
+               return NULL;
+
+       inode = ilookup(vmap->v_vfsp->vfs_super, vmap->v_ino);
+       if (!inode)     /* Inode not present */
+               return NULL;
+
+       vn_trace_exit(vp, "vn_get", (inst_t *)__return_address);
+
+       return vp;
+}
+
+/*
+ * Revalidate the Linux inode from the vnode.
+ */
+int
+vn_revalidate(
+       struct vnode    *vp)
+{
+       struct inode    *inode;
+       vattr_t         va;
+       int             error;
+
+       vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address);
+       ASSERT(vp->v_fbhv != NULL);
+
+       va.va_mask = XFS_AT_STAT|XFS_AT_XFLAGS;
+       VOP_GETATTR(vp, &va, 0, NULL, error);
+       if (!error) {
+               inode = LINVFS_GET_IP(vp);
+               inode->i_mode       = VTTOIF(va.va_type) | va.va_mode;
+               inode->i_nlink      = va.va_nlink;
+               inode->i_uid        = va.va_uid;
+               inode->i_gid        = va.va_gid;
+               inode->i_blocks     = va.va_nblocks;
+               inode->i_mtime      = va.va_mtime;
+               inode->i_ctime      = va.va_ctime;
+               inode->i_atime      = va.va_atime;
+               if (va.va_xflags & XFS_XFLAG_IMMUTABLE)
+                       inode->i_flags |= S_IMMUTABLE;
+               else
+                       inode->i_flags &= ~S_IMMUTABLE;
+               if (va.va_xflags & XFS_XFLAG_APPEND)
+                       inode->i_flags |= S_APPEND;
+               else
+                       inode->i_flags &= ~S_APPEND;
+               if (va.va_xflags & XFS_XFLAG_SYNC)
+                       inode->i_flags |= S_SYNC;
+               else
+                       inode->i_flags &= ~S_SYNC;
+               if (va.va_xflags & XFS_XFLAG_NOATIME)
+                       inode->i_flags |= S_NOATIME;
+               else
+                       inode->i_flags &= ~S_NOATIME;
+               VUNMODIFY(vp);
+       }
+       return -error;
+}
+
+/*
+ * purge a vnode from the cache
+ * At this point the vnode is guaranteed to have no references (vn_count == 0)
+ * The caller has to make sure that there are no ways someone could
+ * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock).
+ */
+void
+vn_purge(
+       struct vnode    *vp,
+       vmap_t          *vmap)
+{
+       vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address);
+
+again:
+       /*
+        * Check whether vp has already been reclaimed since our caller
+        * sampled its version while holding a filesystem cache lock that
+        * its VOP_RECLAIM function acquires.
+        */
+       VN_LOCK(vp);
+       if (vp->v_number != vmap->v_number) {
+               VN_UNLOCK(vp, 0);
+               return;
+       }
+
+       /*
+        * If vp is being reclaimed or inactivated, wait until it is inert,
+        * then proceed.  Can't assume that vnode is actually reclaimed
+        * just because the reclaimed flag is asserted -- a vn_alloc
+        * reclaim can fail.
+        */
+       if (vp->v_flag & (VINACT | VRECLM)) {
+               ASSERT(vn_count(vp) == 0);
+               vp->v_flag |= VWAIT;
+               sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0);
+               goto again;
+       }
+
+       /*
+        * Another process could have raced in and gotten this vnode...
+        */
+       if (vn_count(vp) > 0) {
+               VN_UNLOCK(vp, 0);
+               return;
+       }
+
+       XFS_STATS_DEC(vn_active);
+       vp->v_flag |= VRECLM;
+       VN_UNLOCK(vp, 0);
+
+       /*
+        * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells
+        * vp's filesystem to flush and invalidate all cached resources.
+        * When vn_reclaim returns, vp should have no private data,
+        * either in a system cache or attached to v_data.
+        */
+       if (vn_reclaim(vp) != 0)
+               panic("vn_purge: cannot reclaim");
+
+       /*
+        * Wakeup anyone waiting for vp to be reclaimed.
+        */
+       vn_wakeup(vp);
+}
+
+/*
+ * Add a reference to a referenced vnode.
+ */
+struct vnode *
+vn_hold(
+       struct vnode    *vp)
+{
+       struct inode    *inode;
+
+       XFS_STATS_INC(vn_hold);
+
+       VN_LOCK(vp);
+       inode = igrab(LINVFS_GET_IP(vp));
+       ASSERT(inode);
+       VN_UNLOCK(vp, 0);
+
+       return vp;
+}
+
+/*
+ *  Call VOP_INACTIVE on last reference.
+ */
+void
+vn_rele(
+       struct vnode    *vp)
+{
+       int             vcnt;
+       int             cache;
+
+       XFS_STATS_INC(vn_rele);
+
+       VN_LOCK(vp);
+
+       vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address);
+       vcnt = vn_count(vp);
+
+       /*
+        * Since we always get called from put_inode we know
+        * that i_count won't be decremented after we
+        * return.
+        */
+       if (!vcnt) {
+               /*
+                * As soon as we turn this on, noone can find us in vn_get
+                * until we turn off VINACT or VRECLM
+                */
+               vp->v_flag |= VINACT;
+               VN_UNLOCK(vp, 0);
+
+               /*
+                * Do not make the VOP_INACTIVE call if there
+                * are no behaviors attached to the vnode to call.
+                */
+               if (vp->v_fbhv)
+                       VOP_INACTIVE(vp, NULL, cache);
+
+               VN_LOCK(vp);
+               if (vp->v_flag & VWAIT)
+                       sv_broadcast(vptosync(vp));
+
+               vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED);
+       }
+
+       VN_UNLOCK(vp, 0);
+
+       vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address);
+}
+
+/*
+ * Finish the removal of a vnode.
+ */
+void
+vn_remove(
+       struct vnode    *vp)
+{
+       vmap_t          vmap;
+
+       /* Make sure we don't do this to the same vnode twice */
+       if (!(vp->v_fbhv))
+               return;
+
+       XFS_STATS_INC(vn_remove);
+       vn_trace_exit(vp, "vn_remove", (inst_t *)__return_address);
+
+       /*
+        * After the following purge the vnode
+        * will no longer exist.
+        */
+       VMAP(vp, vmap);
+       vn_purge(vp, &vmap);
+}
+
+
+#ifdef XFS_VNODE_TRACE
+
+#define KTRACE_ENTER(vp, vk, s, line, ra)                      \
+       ktrace_enter(   (vp)->v_trace,                          \
+/*  0 */               (void *)(__psint_t)(vk),                \
+/*  1 */               (void *)(s),                            \
+/*  2 */               (void *)(__psint_t) line,               \
+/*  3 */               (void *)(vn_count(vp)), \
+/*  4 */               (void *)(ra),                           \
+/*  5 */               (void *)(__psunsigned_t)(vp)->v_flag,   \
+/*  6 */               (void *)(__psint_t)smp_processor_id(),  \
+/*  7 */               (void *)(__psint_t)(current->pid),      \
+/*  8 */               (void *)__return_address,               \
+/*  9 */               0, 0, 0, 0, 0, 0, 0)
+
+/*
+ * Vnode tracing code.
+ */
+void
+vn_trace_entry(vnode_t *vp, char *func, inst_t *ra)
+{
+       KTRACE_ENTER(vp, VNODE_KTRACE_ENTRY, func, 0, ra);
+}
+
+void
+vn_trace_exit(vnode_t *vp, char *func, inst_t *ra)
+{
+       KTRACE_ENTER(vp, VNODE_KTRACE_EXIT, func, 0, ra);
+}
+
+void
+vn_trace_hold(vnode_t *vp, char *file, int line, inst_t *ra)
+{
+       KTRACE_ENTER(vp, VNODE_KTRACE_HOLD, file, line, ra);
+}
+
+void
+vn_trace_ref(vnode_t *vp, char *file, int line, inst_t *ra)
+{
+       KTRACE_ENTER(vp, VNODE_KTRACE_REF, file, line, ra);
+}
+
+void
+vn_trace_rele(vnode_t *vp, char *file, int line, inst_t *ra)
+{
+       KTRACE_ENTER(vp, VNODE_KTRACE_RELE, file, line, ra);
+}
+#endif /* XFS_VNODE_TRACE */
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
new file mode 100644 (file)
index 0000000..af0b65f
--- /dev/null
@@ -0,0 +1,651 @@
+/*
+ * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * Further, this software is distributed without any warranty that it is
+ * free of the rightful claim of any third person regarding infringement
+ * or the like.  Any license provided herein, whether implied or
+ * otherwise, applies only to this software file.  Patent licenses, if
+ * any, provided herein do not apply to combinations of this program with
+ * other software, or any other product whatsoever.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
+ * Mountain View, CA  94043, or:
+ *
+ * http://www.sgi.com
+ *
+ * For further information regarding this notice, see:
+ *
+ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
+ *
+ * Portions Copyright (c) 1989, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef __XFS_VNODE_H__
+#define __XFS_VNODE_H__
+
+struct uio;
+struct file;
+struct vattr;
+struct xfs_iomap;
+struct attrlist_cursor_kern;
+
+/*
+ * Vnode types.  VNON means no type.
+ */
+enum vtype     { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VFIFO, VBAD, VSOCK };
+
+typedef xfs_ino_t vnumber_t;
+typedef struct dentry vname_t;
+typedef bhv_head_t vn_bhv_head_t;
+
+/*
+ * MP locking protocols:
+ *     v_flag, v_vfsp                          VN_LOCK/VN_UNLOCK
+ *     v_type                                  read-only or fs-dependent
+ */
+typedef struct vnode {
+       __u32           v_flag;                 /* vnode flags (see below) */
+       enum vtype      v_type;                 /* vnode type */
+       struct vfs      *v_vfsp;                /* ptr to containing VFS */
+       vnumber_t       v_number;               /* in-core vnode number */
+       vn_bhv_head_t   v_bh;                   /* behavior head */
+       spinlock_t      v_lock;                 /* VN_LOCK/VN_UNLOCK */
+       struct inode    v_inode;                /* Linux inode */
+#ifdef XFS_VNODE_TRACE
+       struct ktrace   *v_trace;               /* trace header structure    */
+#endif
+} vnode_t;
+
+#define v_fbhv                 v_bh.bh_first          /* first behavior */
+#define v_fops                 v_bh.bh_first->bd_ops  /* first behavior ops */
+
+#define VNODE_POSITION_BASE    BHV_POSITION_BASE       /* chain bottom */
+#define VNODE_POSITION_TOP     BHV_POSITION_TOP        /* chain top */
+#define VNODE_POSITION_INVALID BHV_POSITION_INVALID    /* invalid pos. num */
+
+typedef enum {
+       VN_BHV_UNKNOWN,         /* not specified */
+       VN_BHV_XFS,             /* xfs */
+       VN_BHV_DM,              /* data migration */
+       VN_BHV_QM,              /* quota manager */
+       VN_BHV_IO,              /* IO path */
+       VN_BHV_END              /* housekeeping end-of-range */
+} vn_bhv_t;
+
+#define VNODE_POSITION_XFS     (VNODE_POSITION_BASE)
+#define VNODE_POSITION_DM      (VNODE_POSITION_BASE+10)
+#define VNODE_POSITION_QM      (VNODE_POSITION_BASE+20)
+#define VNODE_POSITION_IO      (VNODE_POSITION_BASE+30)
+
+/*
+ * Macros for dealing with the behavior descriptor inside of the vnode.
+ */
+#define BHV_TO_VNODE(bdp)      ((vnode_t *)BHV_VOBJ(bdp))
+#define BHV_TO_VNODE_NULL(bdp) ((vnode_t *)BHV_VOBJNULL(bdp))
+
+#define VN_BHV_HEAD(vp)                        ((bhv_head_t *)(&((vp)->v_bh)))
+#define vn_bhv_head_init(bhp,name)     bhv_head_init(bhp,name)
+#define vn_bhv_remove(bhp,bdp)         bhv_remove(bhp,bdp)
+#define vn_bhv_lookup(bhp,ops)         bhv_lookup(bhp,ops)
+#define vn_bhv_lookup_unlocked(bhp,ops) bhv_lookup_unlocked(bhp,ops)
+
+/*
+ * Vnode to Linux inode mapping.
+ */
+#define LINVFS_GET_VP(inode)   ((vnode_t *)list_entry(inode, vnode_t, v_inode))
+#define LINVFS_GET_IP(vp)      (&(vp)->v_inode)
+
+/*
+ * Convert between vnode types and inode formats (since POSIX.1
+ * defines mode word of stat structure in terms of inode formats).
+ */
+extern enum vtype      iftovt_tab[];
+extern u_short         vttoif_tab[];
+#define IFTOVT(mode)   (iftovt_tab[((mode) & S_IFMT) >> 12])
+#define VTTOIF(indx)   (vttoif_tab[(int)(indx)])
+#define MAKEIMODE(indx, mode)  (int)(VTTOIF(indx) | (mode))
+
+
+/*
+ * Vnode flags.
+ */
+#define VINACT                0x1      /* vnode is being inactivated   */
+#define VRECLM                0x2      /* vnode is being reclaimed     */
+#define VWAIT                 0x4      /* waiting for VINACT/VRECLM to end */
+#define VMODIFIED             0x8      /* XFS inode state possibly differs */
+                                       /* to the Linux inode state.    */
+
+/*
+ * Values for the VOP_RWLOCK and VOP_RWUNLOCK flags parameter.
+ */
+typedef enum vrwlock {
+       VRWLOCK_NONE,
+       VRWLOCK_READ,
+       VRWLOCK_WRITE,
+       VRWLOCK_WRITE_DIRECT,
+       VRWLOCK_TRY_READ,
+       VRWLOCK_TRY_WRITE
+} vrwlock_t;
+
+/*
+ * Return values for VOP_INACTIVE.  A return value of
+ * VN_INACTIVE_NOCACHE implies that the file system behavior
+ * has disassociated its state and bhv_desc_t from the vnode.
+ */
+#define        VN_INACTIVE_CACHE       0
+#define        VN_INACTIVE_NOCACHE     1
+
+/*
+ * Values for the cmd code given to VOP_VNODE_CHANGE.
+ */
+typedef enum vchange {
+       VCHANGE_FLAGS_FRLOCKS           = 0,
+       VCHANGE_FLAGS_ENF_LOCKING       = 1,
+       VCHANGE_FLAGS_TRUNCATED         = 2,
+       VCHANGE_FLAGS_PAGE_DIRTY        = 3,
+       VCHANGE_FLAGS_IOEXCL_COUNT      = 4
+} vchange_t;
+
+
+typedef int    (*vop_open_t)(bhv_desc_t *, struct cred *);
+typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *,
+                               const struct iovec *, unsigned int,
+                               loff_t *, int, struct cred *);
+typedef ssize_t (*vop_write_t)(bhv_desc_t *, struct kiocb *,
+                               const struct iovec *, unsigned int,
+                               loff_t *, int, struct cred *);
+typedef ssize_t (*vop_sendfile_t)(bhv_desc_t *, struct file *,
+                               loff_t *, int, size_t, read_actor_t,
+                               void *, struct cred *);
+typedef int    (*vop_ioctl_t)(bhv_desc_t *, struct inode *, struct file *,
+                               int, unsigned int, unsigned long);
+typedef int    (*vop_getattr_t)(bhv_desc_t *, struct vattr *, int,
+                               struct cred *);
+typedef int    (*vop_setattr_t)(bhv_desc_t *, struct vattr *, int,
+                               struct cred *);
+typedef int    (*vop_access_t)(bhv_desc_t *, int, struct cred *);
+typedef int    (*vop_lookup_t)(bhv_desc_t *, vname_t *, vnode_t **,
+                               int, vnode_t *, struct cred *);
+typedef int    (*vop_create_t)(bhv_desc_t *, vname_t *, struct vattr *,
+                               vnode_t **, struct cred *);
+typedef int    (*vop_remove_t)(bhv_desc_t *, vname_t *, struct cred *);
+typedef int    (*vop_link_t)(bhv_desc_t *, vnode_t *, vname_t *,
+                               struct cred *);
+typedef int    (*vop_rename_t)(bhv_desc_t *, vname_t *, vnode_t *, vname_t *,
+                               struct cred *);
+typedef int    (*vop_mkdir_t)(bhv_desc_t *, vname_t *, struct vattr *,
+                               vnode_t **, struct cred *);
+typedef int    (*vop_rmdir_t)(bhv_desc_t *, vname_t *, struct cred *);
+typedef int    (*vop_readdir_t)(bhv_desc_t *, struct uio *, struct cred *,
+                               int *);
+typedef int    (*vop_symlink_t)(bhv_desc_t *, vname_t *, struct vattr *,
+                               char *, vnode_t **, struct cred *);
+typedef int    (*vop_readlink_t)(bhv_desc_t *, struct uio *, int,
+                               struct cred *);
+typedef int    (*vop_fsync_t)(bhv_desc_t *, int, struct cred *,
+                               xfs_off_t, xfs_off_t);
+typedef int    (*vop_inactive_t)(bhv_desc_t *, struct cred *);
+typedef int    (*vop_fid2_t)(bhv_desc_t *, struct fid *);
+typedef int    (*vop_release_t)(bhv_desc_t *);
+typedef int    (*vop_rwlock_t)(bhv_desc_t *, vrwlock_t);
+typedef void   (*vop_rwunlock_t)(bhv_desc_t *, vrwlock_t);
+typedef int    (*vop_bmap_t)(bhv_desc_t *, xfs_off_t, ssize_t, int,
+                               struct xfs_iomap *, int *);
+typedef int    (*vop_reclaim_t)(bhv_desc_t *);
+typedef int    (*vop_attr_get_t)(bhv_desc_t *, char *, char *, int *, int,
+                               struct cred *);
+typedef        int     (*vop_attr_set_t)(bhv_desc_t *, char *, char *, int, int,
+                               struct cred *);
+typedef        int     (*vop_attr_remove_t)(bhv_desc_t *, char *, int, struct cred *);
+typedef        int     (*vop_attr_list_t)(bhv_desc_t *, char *, int, int,
+                               struct attrlist_cursor_kern *, struct cred *);
+typedef void   (*vop_link_removed_t)(bhv_desc_t *, vnode_t *, int);
+typedef void   (*vop_vnode_change_t)(bhv_desc_t *, vchange_t, __psint_t);
+typedef void   (*vop_ptossvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
+typedef void   (*vop_pflushinvalvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
+typedef int    (*vop_pflushvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t,
+                               uint64_t, int);
+typedef int    (*vop_iflush_t)(bhv_desc_t *, int);
+
+
+typedef struct vnodeops {
+       bhv_position_t  vn_position;    /* position within behavior chain */
+       vop_open_t              vop_open;
+       vop_read_t              vop_read;
+       vop_write_t             vop_write;
+       vop_sendfile_t          vop_sendfile;
+       vop_ioctl_t             vop_ioctl;
+       vop_getattr_t           vop_getattr;
+       vop_setattr_t           vop_setattr;
+       vop_access_t            vop_access;
+       vop_lookup_t            vop_lookup;
+       vop_create_t            vop_create;
+       vop_remove_t            vop_remove;
+       vop_link_t              vop_link;
+       vop_rename_t            vop_rename;
+       vop_mkdir_t             vop_mkdir;
+       vop_rmdir_t             vop_rmdir;
+       vop_readdir_t           vop_readdir;
+       vop_symlink_t           vop_symlink;
+       vop_readlink_t          vop_readlink;
+       vop_fsync_t             vop_fsync;
+       vop_inactive_t          vop_inactive;
+       vop_fid2_t              vop_fid2;
+       vop_rwlock_t            vop_rwlock;
+       vop_rwunlock_t          vop_rwunlock;
+       vop_bmap_t              vop_bmap;
+       vop_reclaim_t           vop_reclaim;
+       vop_attr_get_t          vop_attr_get;
+       vop_attr_set_t          vop_attr_set;
+       vop_attr_remove_t       vop_attr_remove;
+       vop_attr_list_t         vop_attr_list;
+       vop_link_removed_t      vop_link_removed;
+       vop_vnode_change_t      vop_vnode_change;
+       vop_ptossvp_t           vop_tosspages;
+       vop_pflushinvalvp_t     vop_flushinval_pages;
+       vop_pflushvp_t          vop_flush_pages;
+       vop_release_t           vop_release;
+       vop_iflush_t            vop_iflush;
+} vnodeops_t;
+
+/*
+ * VOP's.
+ */
+#define _VOP_(op, vp)  (*((vnodeops_t *)(vp)->v_fops)->op)
+
+#define VOP_READ(vp,file,iov,segs,offset,ioflags,cr,rv)                        \
+       rv = _VOP_(vop_read, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr)
+#define VOP_WRITE(vp,file,iov,segs,offset,ioflags,cr,rv)               \
+       rv = _VOP_(vop_write, vp)((vp)->v_fbhv,file,iov,segs,offset,ioflags,cr)
+#define VOP_SENDFILE(vp,f,off,ioflags,cnt,act,targ,cr,rv)              \
+       rv = _VOP_(vop_sendfile, vp)((vp)->v_fbhv,f,off,ioflags,cnt,act,targ,cr)
+#define VOP_BMAP(vp,of,sz,rw,b,n,rv)                                   \
+       rv = _VOP_(vop_bmap, vp)((vp)->v_fbhv,of,sz,rw,b,n)
+#define VOP_OPEN(vp, cr, rv)                                           \
+       rv = _VOP_(vop_open, vp)((vp)->v_fbhv, cr)
+#define VOP_GETATTR(vp, vap, f, cr, rv)                                        \
+       rv = _VOP_(vop_getattr, vp)((vp)->v_fbhv, vap, f, cr)
+#define        VOP_SETATTR(vp, vap, f, cr, rv)                                 \
+       rv = _VOP_(vop_setattr, vp)((vp)->v_fbhv, vap, f, cr)
+#define        VOP_ACCESS(vp, mode, cr, rv)                                    \
+       rv = _VOP_(vop_access, vp)((vp)->v_fbhv, mode, cr)
+#define        VOP_LOOKUP(vp,d,vpp,f,rdir,cr,rv)                               \
+       rv = _VOP_(vop_lookup, vp)((vp)->v_fbhv,d,vpp,f,rdir,cr)
+#define VOP_CREATE(dvp,d,vap,vpp,cr,rv)                                        \
+       rv = _VOP_(vop_create, dvp)((dvp)->v_fbhv,d,vap,vpp,cr)
+#define VOP_REMOVE(dvp,d,cr,rv)                                                \
+       rv = _VOP_(vop_remove, dvp)((dvp)->v_fbhv,d,cr)
+#define        VOP_LINK(tdvp,fvp,d,cr,rv)                                      \
+       rv = _VOP_(vop_link, tdvp)((tdvp)->v_fbhv,fvp,d,cr)
+#define        VOP_RENAME(fvp,fnm,tdvp,tnm,cr,rv)                              \
+       rv = _VOP_(vop_rename, fvp)((fvp)->v_fbhv,fnm,tdvp,tnm,cr)
+#define        VOP_MKDIR(dp,d,vap,vpp,cr,rv)                                   \
+       rv = _VOP_(vop_mkdir, dp)((dp)->v_fbhv,d,vap,vpp,cr)
+#define        VOP_RMDIR(dp,d,cr,rv)                                           \
+       rv = _VOP_(vop_rmdir, dp)((dp)->v_fbhv,d,cr)
+#define        VOP_READDIR(vp,uiop,cr,eofp,rv)                                 \
+       rv = _VOP_(vop_readdir, vp)((vp)->v_fbhv,uiop,cr,eofp)
+#define        VOP_SYMLINK(dvp,d,vap,tnm,vpp,cr,rv)                            \
+       rv = _VOP_(vop_symlink, dvp) ((dvp)->v_fbhv,d,vap,tnm,vpp,cr)
+#define        VOP_READLINK(vp,uiop,fl,cr,rv)                                  \
+       rv = _VOP_(vop_readlink, vp)((vp)->v_fbhv,uiop,fl,cr)
+#define        VOP_FSYNC(vp,f,cr,b,e,rv)                                       \
+       rv = _VOP_(vop_fsync, vp)((vp)->v_fbhv,f,cr,b,e)
+#define VOP_INACTIVE(vp, cr, rv)                                       \
+       rv = _VOP_(vop_inactive, vp)((vp)->v_fbhv, cr)
+#define VOP_RELEASE(vp, rv)                                            \
+       rv = _VOP_(vop_release, vp)((vp)->v_fbhv)
+#define VOP_FID2(vp, fidp, rv)                                         \
+       rv = _VOP_(vop_fid2, vp)((vp)->v_fbhv, fidp)
+#define VOP_RWLOCK(vp,i)                                               \
+       (void)_VOP_(vop_rwlock, vp)((vp)->v_fbhv, i)
+#define VOP_RWLOCK_TRY(vp,i)                                           \
+       _VOP_(vop_rwlock, vp)((vp)->v_fbhv, i)
+#define VOP_RWUNLOCK(vp,i)                                             \
+       (void)_VOP_(vop_rwunlock, vp)((vp)->v_fbhv, i)
+#define VOP_FRLOCK(vp,c,fl,flags,offset,fr,rv)                         \
+       rv = _VOP_(vop_frlock, vp)((vp)->v_fbhv,c,fl,flags,offset,fr)
+#define VOP_RECLAIM(vp, rv)                                            \
+       rv = _VOP_(vop_reclaim, vp)((vp)->v_fbhv)
+#define VOP_ATTR_GET(vp, name, val, vallenp, fl, cred, rv)             \
+       rv = _VOP_(vop_attr_get, vp)((vp)->v_fbhv,name,val,vallenp,fl,cred)
+#define        VOP_ATTR_SET(vp, name, val, vallen, fl, cred, rv)               \
+       rv = _VOP_(vop_attr_set, vp)((vp)->v_fbhv,name,val,vallen,fl,cred)
+#define        VOP_ATTR_REMOVE(vp, name, flags, cred, rv)                      \
+       rv = _VOP_(vop_attr_remove, vp)((vp)->v_fbhv,name,flags,cred)
+#define        VOP_ATTR_LIST(vp, buf, buflen, fl, cursor, cred, rv)            \
+       rv = _VOP_(vop_attr_list, vp)((vp)->v_fbhv,buf,buflen,fl,cursor,cred)
+#define VOP_LINK_REMOVED(vp, dvp, linkzero)                            \
+       (void)_VOP_(vop_link_removed, vp)((vp)->v_fbhv, dvp, linkzero)
+#define VOP_VNODE_CHANGE(vp, cmd, val)                                 \
+       (void)_VOP_(vop_vnode_change, vp)((vp)->v_fbhv,cmd,val)
+/*
+ * These are page cache functions that now go thru VOPs.
+ * 'last' parameter is unused and left in for IRIX compatibility
+ */
+#define VOP_TOSS_PAGES(vp, first, last, fiopt)                         \
+       _VOP_(vop_tosspages, vp)((vp)->v_fbhv,first, last, fiopt)
+/*
+ * 'last' parameter is unused and left in for IRIX compatibility
+ */
+#define VOP_FLUSHINVAL_PAGES(vp, first, last, fiopt)                   \
+       _VOP_(vop_flushinval_pages, vp)((vp)->v_fbhv,first,last,fiopt)
+/*
+ * 'last' parameter is unused and left in for IRIX compatibility
+ */
+#define VOP_FLUSH_PAGES(vp, first, last, flags, fiopt, rv)             \
+       rv = _VOP_(vop_flush_pages, vp)((vp)->v_fbhv,first,last,flags,fiopt)
+#define VOP_IOCTL(vp, inode, filp, fl, cmd, arg, rv)                   \
+       rv = _VOP_(vop_ioctl, vp)((vp)->v_fbhv,inode,filp,fl,cmd,arg)
+#define VOP_IFLUSH(vp, flags, rv)                                      \
+       rv = _VOP_(vop_iflush, vp)((vp)->v_fbhv, flags)
+
+/*
+ * Flags for read/write calls - same values as IRIX
+ */
+#define IO_ISDIRECT    0x00004         /* bypass page cache */
+#define IO_INVIS       0x00020         /* don't update inode timestamps */
+
+/*
+ * Flags for VOP_IFLUSH call
+ */
+#define FLUSH_SYNC             1       /* wait for flush to complete   */
+#define FLUSH_INODE            2       /* flush the inode itself       */
+#define FLUSH_LOG              4       /* force the last log entry for
+                                        * this inode out to disk       */
+
+/*
+ * Flush/Invalidate options for VOP_TOSS_PAGES, VOP_FLUSHINVAL_PAGES and
+ *     VOP_FLUSH_PAGES.
+ */
+#define FI_NONE                        0       /* none */
+#define FI_REMAPF              1       /* Do a remapf prior to the operation */
+#define FI_REMAPF_LOCKED       2       /* Do a remapf prior to the operation.
+                                          Prevent VM access to the pages until
+                                          the operation completes. */
+
+/*
+ * Vnode attributes.  va_mask indicates those attributes the caller
+ * wants to set or extract.
+ */
+typedef struct vattr {
+       int             va_mask;        /* bit-mask of attributes present */
+       enum vtype      va_type;        /* vnode type (for create) */
+       mode_t          va_mode;        /* file access mode and type */
+       nlink_t         va_nlink;       /* number of references to file */
+       uid_t           va_uid;         /* owner user id */
+       gid_t           va_gid;         /* owner group id */
+       xfs_ino_t       va_nodeid;      /* file id */
+       xfs_off_t       va_size;        /* file size in bytes */
+       u_long          va_blocksize;   /* blocksize preferred for i/o */
+       struct timespec va_atime;       /* time of last access */
+       struct timespec va_mtime;       /* time of last modification */
+       struct timespec va_ctime;       /* time file changed */
+       u_int           va_gen;         /* generation number of file */
+       xfs_dev_t       va_rdev;        /* device the special file represents */
+       __int64_t       va_nblocks;     /* number of blocks allocated */
+       u_long          va_xflags;      /* random extended file flags */
+       u_long          va_extsize;     /* file extent size */
+       u_long          va_nextents;    /* number of extents in file */
+       u_long          va_anextents;   /* number of attr extents in file */
+       int             va_projid;      /* project id */
+} vattr_t;
+
+/*
+ * setattr or getattr attributes
+ */
+#define XFS_AT_TYPE            0x00000001
+#define XFS_AT_MODE            0x00000002
+#define XFS_AT_UID             0x00000004
+#define XFS_AT_GID             0x00000008
+#define XFS_AT_FSID            0x00000010
+#define XFS_AT_NODEID          0x00000020
+#define XFS_AT_NLINK           0x00000040
+#define XFS_AT_SIZE            0x00000080
+#define XFS_AT_ATIME           0x00000100
+#define XFS_AT_MTIME           0x00000200
+#define XFS_AT_CTIME           0x00000400
+#define XFS_AT_RDEV            0x00000800
+#define XFS_AT_BLKSIZE         0x00001000
+#define XFS_AT_NBLOCKS         0x00002000
+#define XFS_AT_VCODE           0x00004000
+#define XFS_AT_MAC             0x00008000
+#define XFS_AT_UPDATIME                0x00010000
+#define XFS_AT_UPDMTIME                0x00020000
+#define XFS_AT_UPDCTIME                0x00040000
+#define XFS_AT_ACL             0x00080000
+#define XFS_AT_CAP             0x00100000
+#define XFS_AT_INF             0x00200000
+#define XFS_AT_XFLAGS          0x00400000
+#define XFS_AT_EXTSIZE         0x00800000
+#define XFS_AT_NEXTENTS                0x01000000
+#define XFS_AT_ANEXTENTS       0x02000000
+#define XFS_AT_PROJID          0x04000000
+#define XFS_AT_SIZE_NOPERM     0x08000000
+#define XFS_AT_GENCOUNT                0x10000000
+
+#define XFS_AT_ALL     (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\
+               XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\
+               XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\
+               XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|XFS_AT_MAC|\
+               XFS_AT_ACL|XFS_AT_CAP|XFS_AT_INF|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|\
+               XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_PROJID|XFS_AT_GENCOUNT)
+
+#define XFS_AT_STAT    (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\
+               XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\
+               XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\
+               XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_PROJID)
+
+#define XFS_AT_TIMES   (XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME)
+
+#define XFS_AT_UPDTIMES        (XFS_AT_UPDATIME|XFS_AT_UPDMTIME|XFS_AT_UPDCTIME)
+
+#define XFS_AT_NOSET   (XFS_AT_NLINK|XFS_AT_RDEV|XFS_AT_FSID|XFS_AT_NODEID|\
+               XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\
+               XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT)
+
+/*
+ *  Modes.
+ */
+#define VSUID  S_ISUID         /* set user id on execution */
+#define VSGID  S_ISGID         /* set group id on execution */
+#define VSVTX  S_ISVTX         /* save swapped text even after use */
+#define VREAD  S_IRUSR         /* read, write, execute permissions */
+#define VWRITE S_IWUSR
+#define VEXEC  S_IXUSR
+
+#define MODEMASK S_IALLUGO     /* mode bits plus permission bits */
+
+/*
+ * Check whether mandatory file locking is enabled.
+ */
+#define MANDLOCK(vp, mode)     \
+       ((vp)->v_type == VREG && ((mode) & (VSGID|(VEXEC>>3))) == VSGID)
+
+extern void    vn_init(void);
+extern int     vn_wait(struct vnode *);
+extern vnode_t *vn_initialize(struct inode *);
+
+/*
+ * Acquiring and invalidating vnodes:
+ *
+ *     if (vn_get(vp, version, 0))
+ *             ...;
+ *     vn_purge(vp, version);
+ *
+ * vn_get and vn_purge must be called with vmap_t arguments, sampled
+ * while a lock that the vnode's VOP_RECLAIM function acquires is
+ * held, to ensure that the vnode sampled with the lock held isn't
+ * recycled (VOP_RECLAIMed) or deallocated between the release of the lock
+ * and the subsequent vn_get or vn_purge.
+ */
+
+/*
+ * vnode_map structures _must_ match vn_epoch and vnode structure sizes.
+ */
+typedef struct vnode_map {
+       vfs_t           *v_vfsp;
+       vnumber_t       v_number;               /* in-core vnode number */
+       xfs_ino_t       v_ino;                  /* inode #      */
+} vmap_t;
+
+#define VMAP(vp, vmap) {(vmap).v_vfsp   = (vp)->v_vfsp,        \
+                        (vmap).v_number = (vp)->v_number,      \
+                        (vmap).v_ino    = (vp)->v_inode.i_ino; }
+
+extern void    vn_purge(struct vnode *, vmap_t *);
+extern vnode_t *vn_get(struct vnode *, vmap_t *);
+extern int     vn_revalidate(struct vnode *);
+extern void    vn_remove(struct vnode *);
+
+static inline int vn_count(struct vnode *vp)
+{
+       return atomic_read(&LINVFS_GET_IP(vp)->i_count);
+}
+
+/*
+ * Vnode reference counting functions (and macros for compatibility).
+ */
+extern vnode_t *vn_hold(struct vnode *);
+extern void    vn_rele(struct vnode *);
+
+#if defined(XFS_VNODE_TRACE)
+#define VN_HOLD(vp)            \
+       ((void)vn_hold(vp),     \
+         vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address))
+#define VN_RELE(vp)            \
+         (vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \
+          iput(LINVFS_GET_IP(vp)))
+#else
+#define VN_HOLD(vp)            ((void)vn_hold(vp))
+#define VN_RELE(vp)            (iput(LINVFS_GET_IP(vp)))
+#endif
+
+/*
+ * Vname handling macros.
+ */
+#define VNAME(dentry)          ((char *) (dentry)->d_name.name)
+#define VNAMELEN(dentry)       ((dentry)->d_name.len)
+#define VNAME_TO_VNODE(dentry) (LINVFS_GET_VP((dentry)->d_inode))
+
+/*
+ * Vnode spinlock manipulation.
+ */
+#define VN_LOCK(vp)            mutex_spinlock(&(vp)->v_lock)
+#define VN_UNLOCK(vp, s)       mutex_spinunlock(&(vp)->v_lock, s)
+#define VN_FLAGSET(vp,b)       vn_flagset(vp,b)
+#define VN_FLAGCLR(vp,b)       vn_flagclr(vp,b)
+
+static __inline__ void vn_flagset(struct vnode *vp, uint flag)
+{
+       spin_lock(&vp->v_lock);
+       vp->v_flag |= flag;
+       spin_unlock(&vp->v_lock);
+}
+
+static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
+{
+       spin_lock(&vp->v_lock);
+       vp->v_flag &= ~flag;
+       spin_unlock(&vp->v_lock);
+}
+
+/*
+ * Update modify/access/change times on the vnode
+ */
+#define VN_MTIMESET(vp, tvp)   (LINVFS_GET_IP(vp)->i_mtime = *(tvp))
+#define VN_ATIMESET(vp, tvp)   (LINVFS_GET_IP(vp)->i_atime = *(tvp))
+#define VN_CTIMESET(vp, tvp)   (LINVFS_GET_IP(vp)->i_ctime = *(tvp))
+
+/*
+ * Some useful predicates.
+ */
+#define VN_MAPPED(vp)  mapping_mapped(LINVFS_GET_IP(vp)->i_mapping)
+#define VN_CACHED(vp)  (LINVFS_GET_IP(vp)->i_mapping->nrpages)
+#define VN_DIRTY(vp)   mapping_tagged(LINVFS_GET_IP(vp)->i_mapping, \
+                                       PAGECACHE_TAG_DIRTY)
+#define VMODIFY(vp)    VN_FLAGSET(vp, VMODIFIED)
+#define VUNMODIFY(vp)  VN_FLAGCLR(vp, VMODIFIED)
+
+/*
+ * Flags to VOP_SETATTR/VOP_GETATTR.
+ */
+#define        ATTR_UTIME      0x01    /* non-default utime(2) request */
+#define        ATTR_DMI        0x08    /* invocation from a DMI function */
+#define        ATTR_LAZY       0x80    /* set/get attributes lazily */
+#define        ATTR_NONBLOCK   0x100   /* return EAGAIN if operation would block */
+
+/*
+ * Flags to VOP_FSYNC and VOP_RECLAIM.
+ */
+#define FSYNC_NOWAIT   0       /* asynchronous flush */
+#define FSYNC_WAIT     0x1     /* synchronous fsync or forced reclaim */
+#define FSYNC_INVAL    0x2     /* flush and invalidate cached data */
+#define FSYNC_DATA     0x4     /* synchronous fsync of data only */
+
+/*
+ * Tracking vnode activity.
+ */
+#if defined(XFS_VNODE_TRACE)
+
+#define        VNODE_TRACE_SIZE        16              /* number of trace entries */
+#define        VNODE_KTRACE_ENTRY      1
+#define        VNODE_KTRACE_EXIT       2
+#define        VNODE_KTRACE_HOLD       3
+#define        VNODE_KTRACE_REF        4
+#define        VNODE_KTRACE_RELE       5
+
+extern void vn_trace_entry(struct vnode *, char *, inst_t *);
+extern void vn_trace_exit(struct vnode *, char *, inst_t *);
+extern void vn_trace_hold(struct vnode *, char *, int, inst_t *);
+extern void vn_trace_ref(struct vnode *, char *, int, inst_t *);
+extern void vn_trace_rele(struct vnode *, char *, int, inst_t *);
+
+#define        VN_TRACE(vp)            \
+       vn_trace_ref(vp, __FILE__, __LINE__, (inst_t *)__return_address)
+#else
+#define        vn_trace_entry(a,b,c)
+#define        vn_trace_exit(a,b,c)
+#define        vn_trace_hold(a,b,c,d)
+#define        vn_trace_ref(a,b,c,d)
+#define        vn_trace_rele(a,b,c,d)
+#define        VN_TRACE(vp)
+#endif
+
+#endif /* __XFS_VNODE_H__ */
diff --git a/include/asm-arm/arch-ixp4xx/dma.h b/include/asm-arm/arch-ixp4xx/dma.h
new file mode 100644 (file)
index 0000000..686eaca
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * include/asm-arm/arch-ixp4xx/dma.h
+ *
+ * Copyright (C) 2001-2004 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#ifndef __ASM_ARCH_DMA_H
+#define __ASM_ARCH_DMA_H
+
+#include <linux/config.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <asm/page.h>
+#include <asm/sizes.h>
+#include <asm/hardware.h>
+
+#define MAX_DMA_ADDRESS                (PAGE_OFFSET + SZ_64M)
+
+/* No DMA */
+#define MAX_DMA_CHANNELS       0
+
+/*
+ * Only first 64MB of memory can be accessed via PCI.
+ * We use GFP_DMA to allocate safe buffers to do map/unmap.
+ * This is really ugly and we need a better way of specifying
+ * DMA-capable regions of memory.
+ */
+static inline void __arch_adjust_zones(int node, unsigned long *zone_size, 
+       unsigned long *zhole_size) 
+{
+       unsigned int sz = SZ_64M >> PAGE_SHIFT;
+
+       /*
+        * Only adjust if > 64M on current system
+        */
+       if (node || (zone_size[0] <= sz))
+               return;
+
+       zone_size[1] = zone_size[0] - sz;
+       zone_size[0] = sz;
+       zhole_size[1] = zhole_size[0];
+       zhole_size[0] = 0;
+}
+
+#define arch_adjust_zones(node, size, holes) \
+       __arch_adjust_zones(node, size, holes)
+
+#endif /* _ASM_ARCH_DMA_H */
diff --git a/include/asm-arm/arch-ixp4xx/io.h b/include/asm-arm/arch-ixp4xx/io.h
new file mode 100644 (file)
index 0000000..91d25c2
--- /dev/null
@@ -0,0 +1,388 @@
+/*
+ * linux/include/asm-arm/arch-ixp4xx/io.h
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright (C) 2002-2004  MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_ARCH_IO_H
+#define __ASM_ARM_ARCH_IO_H
+
+#include <asm/hardware.h>
+
+#define IO_SPACE_LIMIT 0xffff0000
+
+#define        BIT(x)  ((1)<<(x))
+
+
+extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
+extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
+
+
+/*
+ * IXP4xx provides two methods of accessing PCI memory space:
+ *
+ * 1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB).
+ *    To access PCI via this space, we simply ioremap() the BAR
+ *    into the kernel and we can use the standard read[bwl]/write[bwl]
+ *    macros. This is the preffered method due to speed but it
+ *    limits the system to just 64MB of PCI memory. This can be 
+ *    problamatic if using video cards and other memory-heavy
+ *    targets.
+ *
+ * 2) If > 64MB of memory space is required, the IXP4xx can be configured
+ *    to use indirect registers to access PCI (as we do below for I/O
+ *    transactions). This allows for up to 128MB (0x48000000 to 0x4fffffff)
+ *    of memory on the bus. The disadvantadge of this is that every 
+ *    PCI access requires three local register accesses plus a spinlock,
+ *    but in some cases the performance hit is acceptable. In addition,
+ *    you cannot mmap() PCI devices in this case.
+ *
+ */
+#ifndef        CONFIG_IXP4XX_INDIRECT_PCI
+
+#define __mem_pci(a)           ((unsigned long)(a))
+
+#else
+
+#include <linux/mm.h>
+
+/*
+ * In the case of using indirect PCI, we simply return the actual PCI
+ * address and our read/write implementation use that to drive the 
+ * access registers. If something outside of PCI is ioremap'd, we
+ * fallback to the default.
+ */
+static inline void *
+__ixp4xx_ioremap(unsigned long addr, size_t size, unsigned long flags, unsigned long align)
+{
+       extern void * __ioremap(unsigned long, size_t, unsigned long, unsigned long);
+       if((addr < 0x48000000) || (addr > 0x4fffffff))
+               return __ioremap(addr, size, flags, align);
+
+       return (void *)addr;
+}
+
+static inline void
+__ixp4xx_iounmap(void *addr)
+{
+       extern void __iounmap(void *addr);
+
+       if ((u32)addr > VMALLOC_START)
+               __iounmap(addr);
+}
+
+#define __arch_ioremap(a, s, f, x)     __ixp4xx_ioremap(a, s, f, x)
+#define        __arch_iounmap(a)               __ixp4xx_iounmap(a)
+
+#define        writeb(p, v)                    __ixp4xx_writeb(p, v)
+#define        writew(p, v)                    __ixp4xx_writew(p, v)
+#define        writel(p, v)                    __ixp4xx_writel(p, v)
+
+#define        writesb(p, v, l)                __ixp4xx_writesb(p, v, l)
+#define        writesw(p, v, l)                __ixp4xx_writesw(p, v, l)
+#define        writesl(p, v, l)                __ixp4xx_writesl(p, v, l)
+       
+#define        readb(p)                        __ixp4xx_readb(p)
+#define        readw(p)                        __ixp4xx_readw(p)
+#define        readl(p)                        __ixp4xx_readl(p)
+       
+#define        readsb(p, v, l)                 __ixp4xx_readsb(p, v, l)
+#define        readsw(p, v, l)                 __ixp4xx_readsw(p, v, l)
+#define        readsl(p, v, l)                 __ixp4xx_readsl(p, v, l)
+
+static inline void 
+__ixp4xx_writeb(u8 value, u32 addr)
+{
+       u32 n, byte_enables, data;
+
+       if (addr > VMALLOC_START) {
+               __raw_writeb(value, addr);
+               return;
+       }
+
+       n = addr % 4;
+       byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
+       data = value << (8*n);
+       ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
+}
+
+static inline void
+__ixp4xx_writesb(u32 bus_addr, u8 *vaddr, int count)
+{
+       while (count--)
+               writeb(*vaddr++, bus_addr);
+}
+
+static inline void 
+__ixp4xx_writew(u16 value, u32 addr)
+{
+       u32 n, byte_enables, data;
+
+       if (addr > VMALLOC_START) {
+               __raw_writew(value, addr);
+               return;
+       }
+
+       n = addr % 4;
+       byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
+       data = value << (8*n);
+       ixp4xx_pci_write(addr, byte_enables | NP_CMD_MEMWRITE, data);
+}
+
+static inline void
+__ixp4xx_writesw(u32 bus_addr, u16 *vaddr, int count)
+{
+       while (count--)
+               writew(*vaddr++, bus_addr);
+}
+
+static inline void 
+__ixp4xx_writel(u32 value, u32 addr)
+{
+       if (addr > VMALLOC_START) {
+               __raw_writel(value, addr);
+               return;
+       }
+
+       ixp4xx_pci_write(addr, NP_CMD_MEMWRITE, value);
+}
+
+static inline void
+__ixp4xx_writesl(u32 bus_addr, u32 *vaddr, int count)
+{
+       while (count--)
+               writel(*vaddr++, bus_addr);
+}
+
+static inline unsigned char 
+__ixp4xx_readb(u32 addr)
+{
+       u32 n, byte_enables, data;
+
+       if (addr > VMALLOC_START)
+               return __raw_readb(addr);
+
+       n = addr % 4;
+       byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
+       if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
+               return 0xff;
+
+       return data >> (8*n);
+}
+
+static inline void
+__ixp4xx_readsb(u32 bus_addr, u8 *vaddr, u32 count)
+{
+       while (count--)
+               *vaddr++ = readb(bus_addr);
+}
+
+static inline unsigned short 
+__ixp4xx_readw(u32 addr)
+{
+       u32 n, byte_enables, data;
+
+       if (addr > VMALLOC_START)
+               return __raw_readw(addr);
+
+       n = addr % 4;
+       byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
+       if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_MEMREAD, &data))
+               return 0xffff;
+
+       return data>>(8*n);
+}
+
+static inline void 
+__ixp4xx_readsw(u32 bus_addr, u16 *vaddr, u32 count)
+{
+       while (count--)
+               *vaddr++ = readw(bus_addr);
+}
+
+static inline unsigned long 
+__ixp4xx_readl(u32 addr)
+{
+       u32 data;
+
+       if (addr > VMALLOC_START)
+               return __raw_readl(addr);
+
+       if (ixp4xx_pci_read(addr, NP_CMD_MEMREAD, &data))
+               return 0xffffffff;
+
+       return data;
+}
+
+static inline void 
+__ixp4xx_readsl(u32 bus_addr, u32 *vaddr, u32 count)
+{
+       while (count--)
+               *vaddr++ = readl(bus_addr);
+}
+
+
+/*
+ * We can use the built-in functions b/c they end up calling writeb/readb
+ */
+#define memset_io(c,v,l)               _memset_io((c),(v),(l))
+#define memcpy_fromio(a,c,l)           _memcpy_fromio((a),(c),(l))
+#define memcpy_toio(c,a,l)             _memcpy_toio((c),(a),(l))
+
+#define eth_io_copy_and_sum(s,c,l,b) \
+                               eth_copy_and_sum((s),__mem_pci(c),(l),(b))
+
+static inline int
+check_signature(unsigned long bus_addr, const unsigned char *signature,
+               int length)
+{
+       int retval = 0;
+       do {
+               if (readb(bus_addr) != *signature)
+                       goto out;
+               bus_addr++;
+               signature++;
+               length--;
+       } while (length);
+       retval = 1;
+out:
+       return retval;
+}
+
+#endif
+
+/*
+ * IXP4xx does not have a transparent cpu -> PCI I/O translation
+ * window.  Instead, it has a set of registers that must be tweaked
+ * with the proper byte lanes, command types, and address for the
+ * transaction.  This means that we need to override the default
+ * I/O functions.
+ */
+#define        outb(p, v)                      __ixp4xx_outb(p, v)
+#define        outw(p, v)                      __ixp4xx_outw(p, v)
+#define        outl(p, v)                      __ixp4xx_outl(p, v)
+       
+#define        outsb(p, v, l)                  __ixp4xx_outsb(p, v, l)
+#define        outsw(p, v, l)                  __ixp4xx_outsw(p, v, l)
+#define        outsl(p, v, l)                  __ixp4xx_outsl(p, v, l)
+
+#define        inb(p)                          __ixp4xx_inb(p)
+#define        inw(p)                          __ixp4xx_inw(p)
+#define        inl(p)                          __ixp4xx_inl(p)
+
+#define        insb(p, v, l)                   __ixp4xx_insb(p, v, l)
+#define        insw(p, v, l)                   __ixp4xx_insw(p, v, l)
+#define        insl(p, v, l)                   __ixp4xx_insl(p, v, l)
+
+
+static inline void 
+__ixp4xx_outb(u8 value, u32 addr)
+{
+       u32 n, byte_enables, data;
+       n = addr % 4;
+       byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
+       data = value << (8*n);
+       ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
+}
+
+static inline void 
+__ixp4xx_outsb(u32 io_addr, u8 *vaddr, u32 count)
+{
+       while (count--)
+               outb(*vaddr++, io_addr);
+}
+
+static inline void 
+__ixp4xx_outw(u16 value, u32 addr)
+{
+       u32 n, byte_enables, data;
+       n = addr % 4;
+       byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
+       data = value << (8*n);
+       ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
+}
+
+static inline void 
+__ixp4xx_outsw(u32 io_addr, u16 *vaddr, u32 count)
+{
+       while (count--)
+               outw(cpu_to_le16(*vaddr++), io_addr);
+}
+
+static inline void 
+__ixp4xx_outl(u32 value, u32 addr)
+{
+       ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value);
+}
+
+static inline void 
+__ixp4xx_outsl(u32 io_addr, u32 *vaddr, u32 count)
+{
+       while (count--)
+               outl(*vaddr++, io_addr);
+}
+
+static inline u8 
+__ixp4xx_inb(u32 addr)
+{
+       u32 n, byte_enables, data;
+       n = addr % 4;
+       byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
+       if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
+               return 0xff;
+
+       return data >> (8*n);
+}
+
+static inline void 
+__ixp4xx_insb(u32 io_addr, u8 *vaddr, u32 count)
+{
+       while (count--)
+               *vaddr++ = inb(io_addr);
+}
+
+static inline u16 
+__ixp4xx_inw(u32 addr)
+{
+       u32 n, byte_enables, data;
+       n = addr % 4;
+       byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
+       if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_IOREAD, &data))
+               return 0xffff;
+
+       return data>>(8*n);
+}
+
+static inline void 
+__ixp4xx_insw(u32 io_addr, u16 *vaddr, u32 count)
+{
+       while (count--)
+               *vaddr++ = le16_to_cpu(inw(io_addr));
+}
+
+static inline u32 
+__ixp4xx_inl(u32 addr)
+{
+       u32 data;
+       if (ixp4xx_pci_read(addr, NP_CMD_IOREAD, &data))
+               return 0xffffffff;
+
+       return data;
+}
+
+static inline void 
+__ixp4xx_insl(u32 io_addr, u32 *vaddr, u32 count)
+{
+       while (count--)
+               *vaddr++ = inl(io_addr);
+}
+
+
+#endif //  __ASM_ARM_ARCH_IO_H
+
diff --git a/include/asm-arm/arch-ixp4xx/irq.h b/include/asm-arm/arch-ixp4xx/irq.h
new file mode 100644 (file)
index 0000000..87da706
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+ * irq.h 
+ *
+ *  Copyright (C) 2002 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define fixup_irq(irq)  (irq)
+
diff --git a/include/asm-arm/arch-ixp4xx/memory.h b/include/asm-arm/arch-ixp4xx/memory.h
new file mode 100644 (file)
index 0000000..3f6da11
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * linux/include/asm-arm/arch-ixp4xx/memory.h
+ *
+ * Copyright (c) 2001-2004 MontaVista Software, Inc.
+ */
+
+#ifndef __ASM_ARCH_MEMORY_H
+#define __ASM_ARCH_MEMORY_H
+
+/*
+ * Physical DRAM offset.
+ */
+#define PHYS_OFFSET    (0x00000000UL)
+
+/*
+ * Virtual view <-> DMA view memory address translations
+ * virt_to_bus: Used to translate the virtual address to an
+ *             address suitable to be passed to set_dma_addr
+ * bus_to_virt: Used to convert an address for DMA operations
+ *             to an address that the kernel can use.
+ *
+ * These are dummies for now.
+ */
+#define __virt_to_bus(x)        __virt_to_phys(x)
+#define __bus_to_virt(x)        __phys_to_virt(x)
+
+#endif
diff --git a/include/asm-arm/arch-ixp4xx/param.h b/include/asm-arm/arch-ixp4xx/param.h
new file mode 100644 (file)
index 0000000..8a75712
--- /dev/null
@@ -0,0 +1,3 @@
+/*
+ * linux/include/asm-arm/arch-ixp4xx/param.h
+ */
diff --git a/include/asm-arm/arch-ixp4xx/platform.h b/include/asm-arm/arch-ixp4xx/platform.h
new file mode 100644 (file)
index 0000000..52c1c44
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * include/asm-arm/arch-ixp4xx/platform.h
+ *
+ * Constants and functions that are useful to IXP4xx platform-specific code
+ * and device drivers.
+ *
+ * Copyright (C) 2004 MontaVista Software, Inc.
+ */
+
+#ifndef __ASM_ARCH_HARDWARE_H__
+#error "Do not include this directly, instead #include <asm/hardware.h>"
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <asm/types.h>
+
+/*
+ * Expansion bus memory regions
+ */
+#define IXP4XX_EXP_BUS_BASE_PHYS       (0x50000000)
+
+#define        IXP4XX_EXP_BUS_CSX_REGION_SIZE  (0x01000000)
+
+#define IXP4XX_EXP_BUS_CS0_BASE_PHYS   (IXP4XX_EXP_BUS_BASE_PHYS + 0x00000000)
+#define IXP4XX_EXP_BUS_CS1_BASE_PHYS   (IXP4XX_EXP_BUS_BASE_PHYS + 0x01000000)
+#define IXP4XX_EXP_BUS_CS2_BASE_PHYS   (IXP4XX_EXP_BUS_BASE_PHYS + 0x02000000)
+#define IXP4XX_EXP_BUS_CS3_BASE_PHYS   (IXP4XX_EXP_BUS_BASE_PHYS + 0x03000000)
+#define IXP4XX_EXP_BUS_CS4_BASE_PHYS   (IXP4XX_EXP_BUS_BASE_PHYS + 0x04000000)
+#define IXP4XX_EXP_BUS_CS5_BASE_PHYS   (IXP4XX_EXP_BUS_BASE_PHYS + 0x05000000)
+#define IXP4XX_EXP_BUS_CS6_BASE_PHYS   (IXP4XX_EXP_BUS_BASE_PHYS + 0x06000000)
+#define IXP4XX_EXP_BUS_CS7_BASE_PHYS   (IXP4XX_EXP_BUS_BASE_PHYS + 0x07000000)
+
+#define IXP4XX_FLASH_WRITABLE  (0x2)
+#define IXP4XX_FLASH_DEFAULT   (0xbcd23c40)
+#define IXP4XX_FLASH_WRITE     (0xbcd23c42)
+
+/*
+ * Clock Speed Definitions.
+ */
+#define IXP4XX_PERIPHERAL_BUS_CLOCK    (66) /* 66Mhzi APB BUS   */ 
+#define IXP4XX_UART_XTAL               14745600
+
+/*
+ * The IXP4xx chips do not have an I2C unit, so GPIO lines are just
+ * used to 
+ * Used as platform_data to provide GPIO pin information to the ixp42x
+ * I2C driver.
+ */
+struct ixp4xx_i2c_pins {
+       unsigned long sda_pin;
+       unsigned long scl_pin;
+};
+
+
+/*
+ * Functions used by platform-level setup code
+ */
+extern void ixp4xx_map_io(void);
+extern void ixp4xx_init_irq(void);
+extern void ixp4xx_pci_preinit(void);
+struct pci_sys_data;
+extern int ixp4xx_setup(int nr, struct pci_sys_data *sys);
+extern struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys);
+
+/*
+ * GPIO-functions
+ */
+/*
+ * The following converted to the real HW bits the gpio_line_config
+ */
+/* GPIO pin types */
+#define IXP4XX_GPIO_OUT                0x1
+#define IXP4XX_GPIO_IN                 0x2
+
+#define IXP4XX_GPIO_INTSTYLE_MASK      0x7C  /* Bits [6:2] define interrupt style */
+
+/* 
+ * GPIO interrupt types.
+ */
+#define IXP4XX_GPIO_ACTIVE_HIGH                0x4 /* Default */
+#define IXP4XX_GPIO_ACTIVE_LOW         0x8
+#define IXP4XX_GPIO_RISING_EDGE                0x10
+#define IXP4XX_GPIO_FALLING_EDGE       0x20
+#define IXP4XX_GPIO_TRANSITIONAL       0x40
+
+/* GPIO signal types */
+#define IXP4XX_GPIO_LOW                        0
+#define IXP4XX_GPIO_HIGH               1
+
+/* GPIO Clocks */
+#define IXP4XX_GPIO_CLK_0              14
+#define IXP4XX_GPIO_CLK_1              15
+
+extern void gpio_line_config(u8 line, u32 style);
+
+static inline void gpio_line_get(u8 line, int *value)
+{
+       *value = (*IXP4XX_GPIO_GPINR >> line) & 0x1;
+}
+
+static inline void gpio_line_set(u8 line, int value)
+{
+       if (value == IXP4XX_GPIO_HIGH)
+           *IXP4XX_GPIO_GPOUTR |= (1 << line);
+       else if (value == IXP4XX_GPIO_LOW)
+           *IXP4XX_GPIO_GPOUTR &= ~(1 << line);
+}
+
+static inline void gpio_line_isr_clear(u8 line)
+{
+       *IXP4XX_GPIO_GPISR = (1 << line);
+}
+
+#endif // __ASSEMBLY__
+
diff --git a/include/asm-arm/arch-ixp4xx/serial.h b/include/asm-arm/arch-ixp4xx/serial.h
new file mode 100644 (file)
index 0000000..93d6c38
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * include/asm-arm/arch-ixp4xx/serial.h
+ *
+ * Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ * Copyright (C) 2002-2004 MontaVista Software, Inc.
+ * 
+ */
+
+#ifndef _ARCH_SERIAL_H_
+#define _ARCH_SERIAL_H_
+
+/*
+ * We don't hardcode our serial port information but instead
+ * fill it in dynamically based on our platform in arch->map_io.
+ * This allows for per-board serial ports w/o a bunch of
+ * #ifdefs in this file.
+ */
+#define        STD_SERIAL_PORT_DEFNS
+#define        EXTRA_SERIAL_PORT_DEFNS
+
+/*
+ * IXP4XX uses 15.6MHz clock for uart
+ */
+#define BASE_BAUD ( IXP4XX_UART_XTAL / 16 )
+
+#endif // _ARCH_SERIAL_H_
diff --git a/include/asm-arm/arch-ixp4xx/system.h b/include/asm-arm/arch-ixp4xx/system.h
new file mode 100644 (file)
index 0000000..8a78b96
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * include/asm-arm/arch-ixp4x//system.h 
+ *
+ * Copyright (C) 2002 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <asm/hardware.h>
+
+static inline void arch_idle(void)
+{
+#if 0
+       if (!hlt_counter)
+               cpu_do_idle(0);
+#endif
+}
+
+
+static inline void arch_reset(char mode)
+{
+       if ( 1 && mode == 's') {
+               /* Jump into ROM at address 0 */
+               cpu_reset(0);
+       } else {
+               /* Use on-chip reset capability */
+
+               /* set the "key" register to enable access to
+                * "timer" and "enable" registers
+                */
+               *IXP4XX_OSWK = 0x482e;      
+
+               /* write 0 to the timer register for an immidiate reset */
+               *IXP4XX_OSWT = 0;
+
+               /* disable watchdog interrupt, enable reset, enable count */
+               *IXP4XX_OSWE = 0x3;
+       }
+}
+
diff --git a/include/asm-arm/arch-ixp4xx/time.h b/include/asm-arm/arch-ixp4xx/time.h
new file mode 100644 (file)
index 0000000..e79f4ac
--- /dev/null
@@ -0,0 +1,7 @@
+/*
+ * linux/include/asm-arm/arch-ixp4xx/time.h
+ *
+ * We implement timer code in arch/arm/mach-ixp4xx/time.c
+ *
+ */
+
diff --git a/include/asm-arm/arch-ixp4xx/uncompress.h b/include/asm-arm/arch-ixp4xx/uncompress.h
new file mode 100644 (file)
index 0000000..29692ec
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * include/asm-arm/arch-ixp4xx/uncompress.h 
+ *
+ * Copyright (C) 2002 Intel Corporation.
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ARCH_UNCOMPRESS_H_
+#define _ARCH_UNCOMPRESS_H_
+
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <linux/serial_reg.h>
+
+#define TX_DONE (UART_LSR_TEMT|UART_LSR_THRE)
+
+static volatile u32* uart_base;
+
+static __inline__ void putc(char c)
+{
+       /* Check THRE and TEMT bits before we transmit the character.
+        */
+       while ((uart_base[UART_LSR] & TX_DONE) != TX_DONE); 
+       *uart_base = c;
+}
+
+/*
+ * This does not append a newline
+ */
+static void puts(const char *s)
+{
+       while (*s)
+       {
+               putc(*s);
+               if (*s == '\n')
+                       putc('\r');
+               s++;
+       }
+}
+
+static __inline__ void __arch_decomp_setup(unsigned long arch_id)
+{
+       /*
+        * Coyote only has UART2 connected
+        */
+       if (machine_is_adi_coyote())
+               uart_base = (volatile u32*) IXP4XX_UART2_BASE_PHYS;
+       else
+               uart_base = (volatile u32*) IXP4XX_UART1_BASE_PHYS;
+}
+
+/*
+ * arch_id is a variable in decompress_kernel()
+ */
+#define arch_decomp_setup()    __arch_decomp_setup(arch_id)
+
+#define arch_decomp_wdog()
+
+#endif
diff --git a/include/asm-arm/arch-pxa/pxafb.h b/include/asm-arm/arch-pxa/pxafb.h
new file mode 100644 (file)
index 0000000..27d71e9
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ *  linux/include/asm-arm/arch-pxa/pxafb.h
+ *
+ *  Support for the xscale frame buffer.
+ *
+ *  Author:     Jean-Frederic Clere
+ *  Created:    Sep 22, 2003
+ *  Copyright:  jfclere@sinix.net
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+/*
+ * This structure describes the machine which we are running on.
+ * It is set in linux/arch/arm/mach-pxa/machine_name.c and used in the probe routine
+ * of linux/drivers/video/pxafb.c
+ */
+struct pxafb_mach_info {
+       u_long          pixclock;
+
+       u_short         xres;
+       u_short         yres;
+
+       u_char          bpp;
+       u_char          hsync_len;
+       u_char          left_margin;
+       u_char          right_margin;
+
+       u_char          vsync_len;
+       u_char          upper_margin;
+       u_char          lower_margin;
+       u_char          sync;
+
+       u_int           cmap_greyscale:1,
+                       cmap_inverse:1,
+                       cmap_static:1,
+                       unused:29;
+
+       /* The following should be defined in LCCR0
+        *      LCCR0_Act or LCCR0_Pas          Active or Passive
+        *      LCCR0_Sngl or LCCR0_Dual        Single/Dual panel
+        *      LCCR0_Mono or LCCR0_Color       Mono/Color
+        *      LCCR0_4PixMono or LCCR0_8PixMono (in mono single mode)
+        *      LCCR0_DMADel(Tcpu) (optional)   DMA request delay
+        *
+        * The following should not be defined in LCCR0:
+        *      LCCR0_OUM, LCCR0_BM, LCCR0_QDM, LCCR0_DIS, LCCR0_EFM
+        *      LCCR0_IUM, LCCR0_SFM, LCCR0_LDM, LCCR0_ENB
+        */
+       u_int           lccr0;
+       /* The following should be defined in LCCR3
+        *      LCCR3_OutEnH or LCCR3_OutEnL    Output enable polarity
+        *      LCCR3_PixRsEdg or LCCR3_PixFlEdg Pixel clock edge type
+        *      LCCR3_Acb(X)                    AB Bias pin frequency
+        *      LCCR3_DPC (optional)            Double Pixel Clock mode (untested)
+        *
+        * The following should not be defined in LCCR3
+        *      LCCR3_HSP, LCCR3_VSP, LCCR0_Pcd(x), LCCR3_Bpp
+        */
+       u_int           lccr3;
+
+       void (*pxafb_backlight_power)(int);
+       void (*pxafb_lcd_power)(int);
+
+};
+void set_pxa_fb_info(struct pxafb_mach_info *hard_pxa_fb_info);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
new file mode 100644 (file)
index 0000000..2aeecaf
--- /dev/null
@@ -0,0 +1,221 @@
+#ifndef _LINUX_MEMPOLICY_H
+#define _LINUX_MEMPOLICY_H 1
+
+#include <linux/errno.h>
+
+/*
+ * NUMA memory policies for Linux.
+ * Copyright 2003,2004 Andi Kleen SuSE Labs
+ */
+
+/* Policies */
+#define MPOL_DEFAULT   0
+#define MPOL_PREFERRED 1
+#define MPOL_BIND      2
+#define MPOL_INTERLEAVE        3
+
+#define MPOL_MAX MPOL_INTERLEAVE
+
+/* Flags for get_mem_policy */
+#define MPOL_F_NODE    (1<<0)  /* return next IL mode instead of node mask */
+#define MPOL_F_ADDR    (1<<1)  /* look up vma using address */
+
+/* Flags for mbind */
+#define MPOL_MF_STRICT (1<<0)  /* Verify existing pages in the mapping */
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/mmzone.h>
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <asm/semaphore.h>
+
+struct vm_area_struct;
+
+#ifdef CONFIG_NUMA
+
+/*
+ * Describe a memory policy.
+ *
+ * A mempolicy can be either associated with a process or with a VMA.
+ * For VMA related allocations the VMA policy is preferred, otherwise
+ * the process policy is used. Interrupts ignore the memory policy
+ * of the current process.
+ *
+ * Locking policy for interlave:
+ * In process context there is no locking because only the process accesses
+ * its own state. All vma manipulation is somewhat protected by a down_read on
+ * mmap_sem. For allocating in the interleave policy the page_table_lock
+ * must be also aquired to protect il_next.
+ *
+ * Freeing policy:
+ * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
+ * All other policies don't have any external state. mpol_free() handles this.
+ *
+ * Copying policy objects:
+ * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
+ */
+struct mempolicy {
+       atomic_t refcnt;
+       short policy;   /* See MPOL_* above */
+       union {
+               struct zonelist  *zonelist;     /* bind */
+               short            preferred_node; /* preferred */
+               DECLARE_BITMAP(nodes, MAX_NUMNODES); /* interleave */
+               /* undefined for default */
+       } v;
+};
+
+/* An NULL mempolicy pointer is a synonym of &default_policy. */
+extern struct mempolicy default_policy;
+
+/*
+ * Support for managing mempolicy data objects (clone, copy, destroy)
+ * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
+ */
+
+extern void __mpol_free(struct mempolicy *pol);
+static inline void mpol_free(struct mempolicy *pol)
+{
+       if (pol)
+               __mpol_free(pol);
+}
+
+extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
+static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
+{
+       if (pol)
+               pol = __mpol_copy(pol);
+       return pol;
+}
+
+#define vma_policy(vma) ((vma)->vm_policy)
+#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
+
+static inline void mpol_get(struct mempolicy *pol)
+{
+       if (pol)
+               atomic_inc(&pol->refcnt);
+}
+
+extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
+static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
+{
+       if (a == b)
+               return 1;
+       return __mpol_equal(a, b);
+}
+#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
+
+/* Could later add inheritance of the process policy here. */
+
+#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
+
+/*
+ * Hugetlb policy. i386 hugetlb so far works with node numbers
+ * instead of zone lists, so give it special interfaces for now.
+ */
+extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr);
+extern int mpol_node_valid(int nid, struct vm_area_struct *vma,
+                       unsigned long addr);
+
+/*
+ * Tree of shared policies for a shared memory region.
+ * Maintain the policies in a pseudo mm that contains vmas. The vmas
+ * carry the policy. As a special twist the pseudo mm is indexed in pages, not
+ * bytes, so that we can work with shared memory segments bigger than
+ * unsigned long.
+ */
+
+struct sp_node {
+       struct rb_node nd;
+       unsigned long start, end;
+       struct mempolicy *policy;
+};
+
+struct shared_policy {
+       struct rb_root root;
+       struct semaphore sem;
+};
+
+static inline void mpol_shared_policy_init(struct shared_policy *info)
+{
+       info->root = RB_ROOT;
+       init_MUTEX(&info->sem);
+}
+
+int mpol_set_shared_policy(struct shared_policy *info,
+                               struct vm_area_struct *vma,
+                               struct mempolicy *new);
+void mpol_free_shared_policy(struct shared_policy *p);
+struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
+                                           unsigned long idx);
+
+#else
+
+struct mempolicy {};
+
+static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
+{
+       return 1;
+}
+#define vma_mpol_equal(a,b) 1
+
+#define mpol_set_vma_default(vma) do {} while(0)
+
+static inline void mpol_free(struct mempolicy *p)
+{
+}
+
+static inline void mpol_get(struct mempolicy *pol)
+{
+}
+
+static inline struct mempolicy *mpol_copy(struct mempolicy *old)
+{
+       return NULL;
+}
+
+static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a)
+{
+       return numa_node_id();
+}
+
+static inline int
+mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a)
+{
+       return 1;
+}
+
+struct shared_policy {};
+
+static inline int mpol_set_shared_policy(struct shared_policy *info,
+                                       struct vm_area_struct *vma,
+                                       struct mempolicy *new)
+{
+       return -EINVAL;
+}
+
+static inline void mpol_shared_policy_init(struct shared_policy *info)
+{
+}
+
+static inline void mpol_free_shared_policy(struct shared_policy *p)
+{
+}
+
+static inline struct mempolicy *
+mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+{
+       return NULL;
+}
+
+#define vma_policy(vma) NULL
+#define vma_set_policy(vma, pol) do {} while(0)
+
+#endif /* CONFIG_NUMA */
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h
new file mode 100644 (file)
index 0000000..acfde2d
--- /dev/null
@@ -0,0 +1,91 @@
+#include <linux/init.h>
+#include <linux/posix_acl.h>
+#include <linux/xattr_acl.h>
+
+#define REISERFS_ACL_VERSION   0x0001
+
+typedef struct {
+       __u16           e_tag;
+       __u16           e_perm;
+       __u32           e_id;
+} reiserfs_acl_entry;
+
+typedef struct {
+       __u16           e_tag;
+       __u16           e_perm;
+} reiserfs_acl_entry_short;
+
+typedef struct {
+       __u32           a_version;
+} reiserfs_acl_header;
+
+static inline size_t reiserfs_acl_size(int count)
+{
+       if (count <= 4) {
+               return sizeof(reiserfs_acl_header) +
+                      count * sizeof(reiserfs_acl_entry_short);
+       } else {
+               return sizeof(reiserfs_acl_header) +
+                      4 * sizeof(reiserfs_acl_entry_short) +
+                      (count - 4) * sizeof(reiserfs_acl_entry);
+       }
+}
+
+static inline int reiserfs_acl_count(size_t size)
+{
+       ssize_t s;
+       size -= sizeof(reiserfs_acl_header);
+       s = size - 4 * sizeof(reiserfs_acl_entry_short);
+       if (s < 0) {
+               if (size % sizeof(reiserfs_acl_entry_short))
+                       return -1;
+               return size / sizeof(reiserfs_acl_entry_short);
+       } else {
+               if (s % sizeof(reiserfs_acl_entry))
+                       return -1;
+               return s / sizeof(reiserfs_acl_entry) + 4;
+       }
+}
+
+
+#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+struct posix_acl * reiserfs_get_acl(struct inode *inode, int type);
+int reiserfs_set_acl(struct inode *inode, int type, struct posix_acl *acl);
+int reiserfs_acl_chmod (struct inode *inode);
+int reiserfs_inherit_default_acl (struct inode *dir, struct dentry *dentry, struct inode *inode);
+int reiserfs_cache_default_acl (struct inode *dir);
+extern int reiserfs_xattr_posix_acl_init (void) __init;
+extern int reiserfs_xattr_posix_acl_exit (void);
+extern struct reiserfs_xattr_handler posix_acl_default_handler;
+extern struct reiserfs_xattr_handler posix_acl_access_handler;
+#else
+
+#define reiserfs_set_acl NULL
+#define reiserfs_get_acl NULL
+#define reiserfs_cache_default_acl(inode) 0
+
+static inline int
+reiserfs_xattr_posix_acl_init (void)
+{
+    return 0;
+}
+
+static inline int
+reiserfs_xattr_posix_acl_exit (void)
+{
+    return 0;
+}
+
+static inline int
+reiserfs_acl_chmod (struct inode *inode)
+{
+    return 0;
+}
+
+static inline int
+reiserfs_inherit_default_acl (const struct inode *dir, struct dentry *dentry, struct inode *inode)
+{
+    return 0;
+}
+
+#endif
diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h
new file mode 100644 (file)
index 0000000..9c40c4e
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+  File: linux/reiserfs_xattr.h
+*/
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/xattr.h>
+
+/* Magic value in header */
+#define REISERFS_XATTR_MAGIC 0x52465841 /* "RFXA" */
+
+struct reiserfs_xattr_header {
+    __u32 h_magic;              /* magic number for identification */
+    __u32 h_hash;               /* hash of the value */
+};
+
+#ifdef __KERNEL__
+
+struct reiserfs_xattr_handler {
+       char *prefix;
+        int (*init)(void);
+        void (*exit)(void);
+       int (*get)(struct inode *inode, const char *name, void *buffer,
+                  size_t size);
+       int (*set)(struct inode *inode, const char *name, const void *buffer,
+                  size_t size, int flags);
+       int (*del)(struct inode *inode, const char *name);
+        int (*list)(struct inode *inode, const char *name, int namelen, char *out);
+        struct list_head handlers;
+};
+
+
+#ifdef CONFIG_REISERFS_FS_XATTR
+#define is_reiserfs_priv_object(inode) (REISERFS_I(inode)->i_flags & i_priv_object)
+#define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir)
+ssize_t reiserfs_getxattr (struct dentry *dentry, const char *name,
+                          void *buffer, size_t size);
+int reiserfs_setxattr (struct dentry *dentry, const char *name,
+                       const void *value, size_t size, int flags);
+ssize_t reiserfs_listxattr (struct dentry *dentry, char *buffer, size_t size);
+int reiserfs_removexattr (struct dentry *dentry, const char *name);
+int reiserfs_delete_xattrs (struct inode *inode);
+int reiserfs_chown_xattrs (struct inode *inode, struct iattr *attrs);
+int reiserfs_xattr_init (struct super_block *sb, int mount_flags);
+int reiserfs_permission (struct inode *inode, int mask, struct nameidata *nd);
+int reiserfs_permission_locked (struct inode *inode, int mask, struct nameidata *nd);
+
+int reiserfs_xattr_del (struct inode *, const char *);
+int reiserfs_xattr_get (const struct inode *, const char *, void *, size_t);
+int reiserfs_xattr_set (struct inode *, const char *, const void *,
+                               size_t, int);
+
+extern struct reiserfs_xattr_handler user_handler;
+extern struct reiserfs_xattr_handler trusted_handler;
+#ifdef CONFIG_REISERFS_FS_SECURITY
+extern struct reiserfs_xattr_handler security_handler;
+#endif
+
+int reiserfs_xattr_register_handlers (void) __init;
+void reiserfs_xattr_unregister_handlers (void);
+
+static inline void
+reiserfs_write_lock_xattrs(struct super_block *sb)
+{
+    down_write (&REISERFS_XATTR_DIR_SEM(sb));
+}
+static inline void
+reiserfs_write_unlock_xattrs(struct super_block *sb)
+{
+    up_write (&REISERFS_XATTR_DIR_SEM(sb));
+}
+static inline void
+reiserfs_read_lock_xattrs(struct super_block *sb)
+{
+    down_read (&REISERFS_XATTR_DIR_SEM(sb));
+}
+
+static inline void
+reiserfs_read_unlock_xattrs(struct super_block *sb)
+{
+    up_read (&REISERFS_XATTR_DIR_SEM(sb));
+}
+
+static inline void
+reiserfs_write_lock_xattr_i(struct inode *inode)
+{
+    down_write (&REISERFS_I(inode)->xattr_sem);
+}
+static inline void
+reiserfs_write_unlock_xattr_i(struct inode *inode)
+{
+    up_write (&REISERFS_I(inode)->xattr_sem);
+}
+static inline void
+reiserfs_read_lock_xattr_i(struct inode *inode)
+{
+    down_read (&REISERFS_I(inode)->xattr_sem);
+}
+
+static inline void
+reiserfs_read_unlock_xattr_i(struct inode *inode)
+{
+    up_read (&REISERFS_I(inode)->xattr_sem);
+}
+
+#else
+
+#define is_reiserfs_priv_object(inode) 0
+#define reiserfs_getxattr NULL
+#define reiserfs_setxattr NULL
+#define reiserfs_listxattr NULL
+#define reiserfs_removexattr NULL
+#define reiserfs_write_lock_xattrs(sb)
+#define reiserfs_write_unlock_xattrs(sb)
+#define reiserfs_read_lock_xattrs(sb)
+#define reiserfs_read_unlock_xattrs(sb)
+
+#define reiserfs_permission NULL
+
+#define reiserfs_xattr_register_handlers() 0
+#define reiserfs_xattr_unregister_handlers()
+
+static inline int reiserfs_delete_xattrs (struct inode *inode) { return 0; };
+static inline int reiserfs_chown_xattrs (struct inode *inode, struct iattr *attrs) { return 0; };
+static inline int reiserfs_xattr_init (struct super_block *sb, int mount_flags)
+{
+    sb->s_flags = (sb->s_flags & ~MS_POSIXACL); /* to be sure */
+    return 0;
+};
+#endif
+
+#endif  /* __KERNEL__ */
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
new file mode 100644 (file)
index 0000000..61f00fa
--- /dev/null
@@ -0,0 +1,383 @@
+/*
+ *     Sysfs attributes of bridge ports
+ *     Linux ethernet bridge
+ *
+ *     Authors:
+ *     Stephen Hemminger               <shemminger@osdl.org>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/rtnetlink.h>
+#include <linux/spinlock.h>
+#include <linux/times.h>
+
+#include "br_private.h"
+
+#define to_class_dev(obj) container_of(obj,struct class_device,kobj)
+#define to_net_dev(class) container_of(class, struct net_device, class_dev)
+#define to_bridge(cd)  ((struct net_bridge *)(to_net_dev(cd)->priv))
+
+/*
+ * Common code for storing bridge parameters.
+ */
+static ssize_t store_bridge_parm(struct class_device *cd,
+                                const char *buf, size_t len,
+                                void (*set)(struct net_bridge *, unsigned long))
+{
+       struct net_bridge *br = to_bridge(cd);
+       char *endp;
+       unsigned long val;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       val = simple_strtoul(buf, &endp, 0);
+       if (endp == buf)
+               return -EINVAL;
+
+       spin_lock_bh(&br->lock);
+       (*set)(br, val);
+       spin_unlock_bh(&br->lock);
+       return len;
+}
+
+
+static ssize_t show_forward_delay(struct class_device *cd, char *buf)
+{
+       struct net_bridge *br = to_bridge(cd);
+       return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
+}
+
+static void set_forward_delay(struct net_bridge *br, unsigned long val)
+{
+       unsigned long delay = clock_t_to_jiffies(val);
+       br->forward_delay = delay;
+       if (br_is_root_bridge(br))
+               br->bridge_forward_delay = delay;
+}
+
+static ssize_t store_forward_delay(struct class_device *cd, const char *buf,
+                                  size_t len)
+{
+       return store_bridge_parm(cd, buf, len, set_forward_delay);
+}
+static CLASS_DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
+                        show_forward_delay, store_forward_delay);
+
+static ssize_t show_hello_time(struct class_device *cd, char *buf)
+{
+       return sprintf(buf, "%lu\n",
+                      jiffies_to_clock_t(to_bridge(cd)->hello_time));
+}
+
+static void set_hello_time(struct net_bridge *br, unsigned long val)
+{
+       unsigned long t = clock_t_to_jiffies(val);
+       br->hello_time = t;
+       if (br_is_root_bridge(br))
+               br->bridge_hello_time = t;
+}
+
+static ssize_t store_hello_time(struct class_device *cd, const char *buf,
+                               size_t len)
+{
+       return store_bridge_parm(cd, buf, len, set_hello_time);
+}
+
+static CLASS_DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time,
+                        store_hello_time);
+
+static ssize_t show_max_age(struct class_device *cd, char *buf)
+{
+       return sprintf(buf, "%lu\n",
+                      jiffies_to_clock_t(to_bridge(cd)->max_age));
+}
+
+static void set_max_age(struct net_bridge *br, unsigned long val)
+{
+       unsigned long t = clock_t_to_jiffies(val);
+       br->max_age = t;
+       if (br_is_root_bridge(br))
+               br->bridge_max_age = t;
+}
+
+static ssize_t store_max_age(struct class_device *cd, const char *buf,
+                               size_t len)
+{
+       return store_bridge_parm(cd, buf, len, set_max_age);
+}
+
+static CLASS_DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age,
+                        store_max_age);
+
+static ssize_t show_ageing_time(struct class_device *cd, char *buf)
+{
+       struct net_bridge *br = to_bridge(cd);
+       return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time));
+}
+
+static void set_ageing_time(struct net_bridge *br, unsigned long val)
+{
+       br->ageing_time = clock_t_to_jiffies(val);
+}
+
+static ssize_t store_ageing_time(struct class_device *cd, const char *buf,
+                                size_t len)
+{
+       return store_bridge_parm(cd, buf, len, set_ageing_time);
+}
+
+static CLASS_DEVICE_ATTR(ageing_time, S_IRUGO | S_IWUSR, show_ageing_time,
+                        store_ageing_time);
+static ssize_t show_stp_state(struct class_device *cd, char *buf)
+{
+       struct net_bridge *br = to_bridge(cd);
+       return sprintf(buf, "%d\n", br->stp_enabled);
+}
+
+static void set_stp_state(struct net_bridge *br, unsigned long val)
+{
+       br->stp_enabled = val;
+}
+
+static ssize_t store_stp_state(struct class_device *cd,
+                              const char *buf, size_t len)
+{
+       return store_bridge_parm(cd, buf, len, set_stp_state);
+}
+
+static CLASS_DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state,
+                        store_stp_state);
+
+static ssize_t show_priority(struct class_device *cd, char *buf)
+{
+       struct net_bridge *br = to_bridge(cd);
+       return sprintf(buf, "%d\n",
+                      (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]);
+}
+
+static void set_priority(struct net_bridge *br, unsigned long val)
+{
+       br_stp_set_bridge_priority(br, (u16) val);
+}
+
+static ssize_t store_priority(struct class_device *cd,
+                              const char *buf, size_t len)
+{
+       return store_bridge_parm(cd, buf, len, set_priority);
+}
+static CLASS_DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, show_priority,
+                        store_priority);
+
+static ssize_t show_root_id(struct class_device *cd, char *buf)
+{
+       return br_show_bridge_id(buf, &to_bridge(cd)->designated_root);
+}
+static CLASS_DEVICE_ATTR(root_id, S_IRUGO, show_root_id, NULL);
+
+static ssize_t show_bridge_id(struct class_device *cd, char *buf)
+{
+       return br_show_bridge_id(buf, &to_bridge(cd)->bridge_id);
+}
+static CLASS_DEVICE_ATTR(bridge_id, S_IRUGO, show_bridge_id, NULL);
+
+static ssize_t show_root_port(struct class_device *cd, char *buf)
+{
+       return sprintf(buf, "%d\n", to_bridge(cd)->root_port);
+}
+static CLASS_DEVICE_ATTR(root_port, S_IRUGO, show_root_port, NULL);
+
+static ssize_t show_root_path_cost(struct class_device *cd, char *buf)
+{
+       return sprintf(buf, "%d\n", to_bridge(cd)->root_path_cost);
+}
+static CLASS_DEVICE_ATTR(root_path_cost, S_IRUGO, show_root_path_cost, NULL);
+
+static ssize_t show_topology_change(struct class_device *cd, char *buf)
+{
+       return sprintf(buf, "%d\n", to_bridge(cd)->topology_change);
+}
+static CLASS_DEVICE_ATTR(topology_change, S_IRUGO, show_topology_change, NULL);
+
+static ssize_t show_topology_change_detected(struct class_device *cd, char *buf)
+{
+       struct net_bridge *br = to_bridge(cd);
+       return sprintf(buf, "%d\n", br->topology_change_detected);
+}
+static CLASS_DEVICE_ATTR(topology_change_detected, S_IRUGO, show_topology_change_detected, NULL);
+
+static ssize_t show_hello_timer(struct class_device *cd, char *buf)
+{
+       struct net_bridge *br = to_bridge(cd);
+       return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer));
+}
+static CLASS_DEVICE_ATTR(hello_timer, S_IRUGO, show_hello_timer, NULL);
+
+static ssize_t show_tcn_timer(struct class_device *cd, char *buf)
+{
+       struct net_bridge *br = to_bridge(cd);
+       return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer));
+}
+static CLASS_DEVICE_ATTR(tcn_timer, S_IRUGO, show_tcn_timer, NULL);
+
+static ssize_t show_topology_change_timer(struct class_device *cd, char *buf)
+{
+       struct net_bridge *br = to_bridge(cd);
+       return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer));
+}
+static CLASS_DEVICE_ATTR(topology_change_timer, S_IRUGO, show_topology_change_timer, NULL);
+
+static ssize_t show_gc_timer(struct class_device *cd, char *buf)
+{
+       struct net_bridge *br = to_bridge(cd);
+       return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer));
+}
+static CLASS_DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL);
+
+static struct attribute *bridge_attrs[] = {
+       &class_device_attr_forward_delay.attr,
+       &class_device_attr_hello_time.attr,
+       &class_device_attr_max_age.attr,
+       &class_device_attr_ageing_time.attr,
+       &class_device_attr_stp_state.attr,
+       &class_device_attr_priority.attr,
+       &class_device_attr_bridge_id.attr,
+       &class_device_attr_root_id.attr,
+       &class_device_attr_root_path_cost.attr,
+       &class_device_attr_root_port.attr,
+       &class_device_attr_topology_change.attr,
+       &class_device_attr_topology_change_detected.attr,
+       &class_device_attr_hello_timer.attr,
+       &class_device_attr_tcn_timer.attr,
+       &class_device_attr_topology_change_timer.attr,
+       &class_device_attr_gc_timer.attr,
+       NULL
+};
+
+static struct attribute_group bridge_group = {
+       .name = SYSFS_BRIDGE_ATTR,
+       .attrs = bridge_attrs,
+};
+
+/*
+ * Export the forwarding information table as a binary file
+ * The records are struct __fdb_entry.
+ *
+ * Returns the number of bytes read.
+ */
+static ssize_t brforward_read(struct kobject *kobj, char *buf,
+                          loff_t off, size_t count)
+{
+       struct class_device *cdev = to_class_dev(kobj);
+       struct net_bridge *br = to_bridge(cdev);
+       int n;
+
+       /* must read whole records */
+       if (off % sizeof(struct __fdb_entry) != 0)
+               return -EINVAL;
+
+       n =  br_fdb_fillbuf(br, buf, 
+                           count / sizeof(struct __fdb_entry),
+                           off / sizeof(struct __fdb_entry));
+
+       if (n > 0)
+               n *= sizeof(struct __fdb_entry);
+       
+       return n;
+}
+
+static struct bin_attribute bridge_forward = {
+       .attr = { .name = SYSFS_BRIDGE_FDB,
+                 .mode = S_IRUGO, 
+                 .owner = THIS_MODULE, },
+       .read = brforward_read,
+};
+
+
+/*
+ * This is a dummy kset so bridge objects don't cause
+ * hotplug events 
+ */
+struct subsystem bridge_subsys = { 
+       .kset = { .hotplug_ops = NULL },
+};
+
+void br_sysfs_init(void)
+{
+       subsystem_register(&bridge_subsys);
+}
+
+void br_sysfs_fini(void)
+{
+       subsystem_unregister(&bridge_subsys);
+}
+
+/*
+ * Add entries in sysfs onto the existing network class device
+ * for the bridge.
+ *   Adds a attribute group "bridge" containing tuning parameters.
+ *   Binary attribute containing the forward table
+ *   Sub directory to hold links to interfaces.
+ *
+ * Note: the ifobj exists only to be a subdirectory
+ *   to hold links.  The ifobj exists in same data structure
+ *   as it's parent the bridge so reference counting works.
+ */
+int br_sysfs_addbr(struct net_device *dev)
+{
+       struct kobject *brobj = &dev->class_dev.kobj;
+       struct net_bridge *br = netdev_priv(dev);
+       int err;
+
+       err = sysfs_create_group(brobj, &bridge_group);
+       if (err) {
+               pr_info("%s: can't create group %s/%s\n",
+                       __FUNCTION__, dev->name, bridge_group.name);
+               goto out1;
+       }
+
+       err = sysfs_create_bin_file(brobj, &bridge_forward);
+       if (err) {
+               pr_info("%s: can't create attribue file %s/%s\n",
+                       __FUNCTION__, dev->name, bridge_forward.attr.name);
+               goto out2;
+       }
+
+       
+       kobject_set_name(&br->ifobj, SYSFS_BRIDGE_PORT_SUBDIR);
+       br->ifobj.ktype = NULL;
+       br->ifobj.kset = &bridge_subsys.kset;
+       br->ifobj.parent = brobj;
+
+       err = kobject_register(&br->ifobj);
+       if (err) {
+               pr_info("%s: can't add kobject (directory) %s/%s\n",
+                       __FUNCTION__, dev->name, br->ifobj.name);
+               goto out3;
+       }
+       return 0;
+ out3:
+       sysfs_remove_bin_file(&dev->class_dev.kobj, &bridge_forward);
+ out2:
+       sysfs_remove_group(&dev->class_dev.kobj, &bridge_group);
+ out1:
+       return err;
+
+}
+
+void br_sysfs_delbr(struct net_device *dev)
+{
+       struct kobject *kobj = &dev->class_dev.kobj;
+       struct net_bridge *br = netdev_priv(dev);
+
+       kobject_unregister(&br->ifobj);
+       sysfs_remove_bin_file(kobj, &bridge_forward);
+       sysfs_remove_group(kobj, &bridge_group);
+}
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
new file mode 100644 (file)
index 0000000..df85c66
--- /dev/null
@@ -0,0 +1,108 @@
+#!/usr/bin/perl
+
+#      Check the stack usage of functions
+#
+#      Copyright Joern Engel <joern@wh.fh-wedel.de>
+#      Inspired by Linus Torvalds
+#      Original idea maybe from Keith Owens
+#      s390 port and big speedup by Arnd Bergmann <arnd@bergmann-dalldorf.de>
+#      Mips port by Juan Quintela <quintela@mandrakesoft.com>
+#      IA64 port via Andreas Dilger
+#      Arm port by Holger Schurig
+#      Random bits by Matt Mackall <mpm@selenic.com>
+#
+#      Usage:
+#      objdump -d vmlinux | stackcheck_ppc.pl [arch]
+#
+#      TODO :  Port to all architectures (one regex per arch)
+
+# check for arch
+#
+# $re is used for two matches:
+# $& (whole re) matches the complete objdump line with the stack growth
+# $1 (first bracket) matches the size of the stack growth
+#
+# use anything else and feel the pain ;)
+{
+       my $arch = shift;
+       if ($arch eq "") {
+               $arch = `uname -m`;
+       }
+
+       $x      = "[0-9a-f]";   # hex character
+       $xs     = "[0-9a-f ]";  # hex character or space
+       if ($arch =~ /^arm$/) {
+               #c0008ffc:      e24dd064        sub     sp, sp, #100    ; 0x64
+               $re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
+       } elsif ($arch =~ /^i[3456]86$/) {
+               #c0105234:       81 ec ac 05 00 00       sub    $0x5ac,%esp
+               $re = qr/^.*[as][du][db]    \$(0x$x{1,8}),\%esp$/o;
+       } elsif ($arch =~ /^ia64$/) {
+               #e0000000044011fc:       01 0f fc 8c     adds r12=-384,r12
+               $re = qr/.*adds.*r12=-(([0-9]{2}|[3-9])[0-9]{2}),r12/o;
+       } elsif ($arch =~ /^mips64$/) {
+               #8800402c:       67bdfff0        daddiu  sp,sp,-16
+               $re = qr/.*daddiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
+       } elsif ($arch =~ /^mips$/) {
+               #88003254:       27bdffe0        addiu   sp,sp,-32
+               $re = qr/.*addiu.*sp,sp,-(([0-9]{2}|[3-9])[0-9]{2})/o;
+       } elsif ($arch =~ /^ppc$/) {
+               #c00029f4:       94 21 ff 30     stwu    r1,-208(r1)
+               $re = qr/.*stwu.*r1,-($x{1,8})\(r1\)/o;
+       } elsif ($arch =~ /^ppc64$/) {
+               #XXX
+               $re = qr/.*stdu.*r1,-($x{1,8})\(r1\)/o;
+       } elsif ($arch =~ /^s390x?$/) {
+               #   11160:       a7 fb ff 60             aghi   %r15,-160
+               $re = qr/.*ag?hi.*\%r15,-(([0-9]{2}|[3-9])[0-9]{2})/o;
+       } else {
+               print("wrong or unknown architecture\n");
+               exit
+       }
+}
+
+sub bysize($) {
+       ($asize = $a) =~ s/.*   +(.*)$/$1/;
+       ($bsize = $b) =~ s/.*   +(.*)$/$1/;
+       $bsize <=> $asize
+}
+
+#
+# main()
+#
+$funcre = qr/^$x* \<(.*)\>:$/;
+while ($line = <STDIN>) {
+       if ($line =~ m/$funcre/) {
+               $func = $1;
+       }
+       if ($line =~ m/$re/) {
+               my $size = $1;
+               $size = hex($size) if ($size =~ /^0x/);
+
+               if ($size > 0x80000000) {
+                       $size = - $size;
+                       $size += 0x80000000;
+                       $size += 0x80000000;
+               }
+
+               $line =~ m/^($xs*).*/;
+               my $addr = $1;
+               $addr =~ s/ /0/g;
+               $addr = "0x$addr";
+
+               my $intro = "$addr $func:";
+               my $padlen = 56 - length($intro);
+               while ($padlen > 0) {
+                       $intro .= '     ';
+                       $padlen -= 8;
+               }
+               next if ($size < 100);
+               $stack[@stack] = "$intro$size\n";
+       }
+}
+
+@sortedstack = sort bysize @stack;
+
+foreach $i (@sortedstack) {
+       print("$i");
+}
diff --git a/scripts/reference_init.pl b/scripts/reference_init.pl
new file mode 100644 (file)
index 0000000..b4e37dc
--- /dev/null
@@ -0,0 +1,102 @@
+#!/usr/bin/perl -w
+#
+# reference_init.pl (C) Keith Owens 2002 <kaos@ocs.com.au>
+#
+# List references to vmlinux init sections from non-init sections.
+
+# Unfortunately I had to exclude references from read only data to .init
+# sections, almost all of these are false positives, they are created by
+# gcc.  The downside of excluding rodata is that there really are some
+# user references from rodata to init code, e.g. drivers/video/vgacon.c
+#
+# const struct consw vga_con = {
+#        con_startup:            vgacon_startup,
+#
+# where vgacon_startup is __init.  If you want to wade through the false
+# positives, take out the check for rodata.
+
+use strict;
+die($0 . " takes no arguments\n") if($#ARGV >= 0);
+
+my %object;
+my $object;
+my $line;
+my $ignore;
+
+$| = 1;
+
+printf("Finding objects, ");
+open(OBJDUMP_LIST, "find . -name '*.o' | xargs objdump -h |") || die "getting objdump list failed";
+while (defined($line = <OBJDUMP_LIST>)) {
+       chomp($line);
+       if ($line =~ /:\s+file format/) {
+               ($object = $line) =~ s/:.*//;
+               $object{$object}->{'module'} = 0;
+               $object{$object}->{'size'} = 0;
+               $object{$object}->{'off'} = 0;
+       }
+       if ($line =~ /^\s*\d+\s+\.modinfo\s+/) {
+               $object{$object}->{'module'} = 1;
+       }
+       if ($line =~ /^\s*\d+\s+\.comment\s+/) {
+               ($object{$object}->{'size'}, $object{$object}->{'off'}) = (split(' ', $line))[2,5];
+       }
+}
+close(OBJDUMP_LIST);
+printf("%d objects, ", scalar keys(%object));
+$ignore = 0;
+foreach $object (keys(%object)) {
+       if ($object{$object}->{'module'}) {
+               ++$ignore;
+               delete($object{$object});
+       }
+}
+printf("ignoring %d module(s)\n", $ignore);
+
+# Ignore conglomerate objects, they have been built from multiple objects and we
+# only care about the individual objects.  If an object has more than one GCC:
+# string in the comment section then it is conglomerate.  This does not filter
+# out conglomerates that consist of exactly one object, can't be helped.
+
+printf("Finding conglomerates, ");
+$ignore = 0;
+foreach $object (keys(%object)) {
+       if (exists($object{$object}->{'off'})) {
+               my ($off, $size, $comment, $l);
+               $off = hex($object{$object}->{'off'});
+               $size = hex($object{$object}->{'size'});
+               open(OBJECT, "<$object") || die "cannot read $object";
+               seek(OBJECT, $off, 0) || die "seek to $off in $object failed";
+               $l = read(OBJECT, $comment, $size);
+               die "read $size bytes from $object .comment failed" if ($l != $size);
+               close(OBJECT);
+               if ($comment =~ /GCC\:.*GCC\:/m) {
+                       ++$ignore;
+                       delete($object{$object});
+               }
+       }
+}
+printf("ignoring %d conglomerate(s)\n", $ignore);
+
+printf("Scanning objects\n");
+foreach $object (sort(keys(%object))) {
+       my $from;
+       open(OBJDUMP, "objdump -r $object|") || die "cannot objdump -r $object";
+       while (defined($line = <OBJDUMP>)) {
+               chomp($line);
+               if ($line =~ /RELOCATION RECORDS FOR /) {
+                       ($from = $line) =~ s/.*\[([^]]*).*/$1/;
+               }
+               if (($line =~ /\.init$/ || $line =~ /\.init\./) &&
+                   ($from !~ /\.init$/ &&
+                    $from !~ /\.init\./ &&
+                    $from !~ /\.stab$/ &&
+                    $from !~ /\.rodata$/ &&
+                    $from !~ /\.text\.lock$/ &&
+                    $from !~ /\.debug_/)) {
+                       printf("Error: %s %s refers to %s\n", $object, $from, $line);
+               }
+       }
+       close(OBJDUMP);
+}
+printf("Done\n");