From e812ccbe0c915857ebea6a632bfadc631f7504a9 Mon Sep 17 00:00:00 2001 From: Planet-Lab Support Date: Mon, 12 Jul 2004 21:57:25 +0000 Subject: [PATCH] This commit was manufactured by cvs2svn to create branch 'vserver'. --- Documentation/arm/IXP4xx | 155 + Documentation/cpu-freq/amd-powernow.txt | 38 + Documentation/sound/alsa/Audigy-mixer.txt | 345 ++ arch/arm/configs/ixp4xx_defconfig | 1081 ++++ arch/arm/configs/mainstone_defconfig | 743 +++ arch/arm/configs/smdk2410_defconfig | 667 +++ arch/arm/mach-ixp4xx/Makefile | 10 + arch/arm/mach-ixp4xx/common-pci.c | 543 ++ arch/arm/mach-ixp4xx/common.c | 263 + arch/arm/mach-ixp4xx/coyote-pci.c | 69 + arch/arm/mach-ixp4xx/ixdp425-pci.c | 84 + arch/arm/mach-ixp4xx/prpmc1100-pci.c | 119 + arch/arm/mach-ixp4xx/prpmc1100-setup.c | 90 + arch/arm/mach-s3c2410/mach-smdk2410.c | 109 + arch/cris/arch-v10/drivers/ide.c | 945 ++++ arch/cris/kernel/crisksyms.c | 104 + arch/i386/mach-generic/es7000.c | 28 + arch/ia64/configs/sim_defconfig | 535 ++ arch/ia64/dig/topology.c | 43 + arch/ia64/lib/bitop.c | 88 + arch/mips/au1000/common/cputable.c | 56 + arch/parisc/kernel/unwind.c | 295 + arch/ppc/kernel/dma-mapping.c | 439 ++ arch/ppc/kernel/vecemu.c | 346 ++ arch/ppc/kernel/vector.S | 217 + arch/ppc/platforms/4xx/bubinga.c | 263 + arch/ppc/platforms/4xx/bubinga.h | 69 + arch/ppc/platforms/4xx/ibm405ep.c | 134 + arch/ppc/platforms/4xx/ibm405ep.h | 148 + arch/ppc/platforms/sbc82xx.c | 113 + arch/ppc/platforms/sbc82xx.h | 24 + arch/ppc/syslib/dcr.S | 41 + arch/ppc/syslib/ibm440gx_common.c | 212 + arch/ppc/syslib/ibm440gx_common.h | 54 + arch/ppc/syslib/ibm44x_common.h | 36 + arch/ppc/syslib/ocp.c | 485 ++ arch/ppc64/lib/locks.c | 285 + arch/s390/lib/string.c | 405 ++ arch/sparc64/lib/find_bit.c | 125 + arch/sparc64/lib/splock.S | 23 + arch/x86_64/kernel/domain.c | 93 + drivers/char/drm/drm_irq.h | 371 ++ drivers/char/drm/drm_pciids.h | 203 + drivers/char/watchdog/ixp4xx_wdt.c | 233 + drivers/i2c/busses/i2c-ixp4xx.c | 181 + drivers/i2c/chips/max1619.c | 378 ++ drivers/i2c/chips/rtc8564.c | 396 ++ drivers/i2c/chips/rtc8564.h | 78 + drivers/ide/h8300/ide-h8300.c | 119 + drivers/mtd/maps/ixp4xx.c | 244 + drivers/mtd/maps/wr_sbc82xx_flash.c | 167 + drivers/net/ibm_emac/ibm_emac.h | 263 + drivers/net/ibm_emac/ibm_emac_core.h | 146 + drivers/net/ibm_emac/ibm_emac_debug.c | 224 + drivers/net/ibm_emac/ibm_emac_mal.c | 467 ++ drivers/net/ibm_emac/ibm_emac_mal.h | 130 + drivers/net/ibm_emac/ibm_emac_phy.c | 297 + drivers/net/ibm_emac/ibm_emac_rgmii.h | 65 + drivers/net/ibm_emac/ibm_emac_tah.h | 48 + drivers/net/ibm_emac/ibm_emac_zmii.h | 93 + drivers/net/ne-h8300.c | 666 +++ drivers/pcmcia/pxa2xx_base.h | 3 + drivers/scsi/ipr.c | 6021 +++++++++++++++++++++ drivers/scsi/ipr.h | 1252 +++++ drivers/scsi/pcmcia/sym53c500_cs.c | 1042 ++++ drivers/scsi/qlogicfas408.h | 120 + drivers/scsi/sata_promise.h | 154 + drivers/usb/core/sysfs.c | 229 + drivers/usb/input/touchkitusb.c | 310 ++ drivers/usb/misc/phidgetservo.c | 327 ++ drivers/video/asiliantfb.c | 620 +++ drivers/video/gbefb.c | 1200 ++++ drivers/video/pxafb.h | 129 + fs/reiserfs/xattr.c | 1441 +++++ fs/reiserfs/xattr_acl.c | 563 ++ fs/xfs/linux-2.6/kmem.h | 201 + fs/xfs/linux-2.6/mrlock.h | 106 + fs/xfs/linux-2.6/sema.h | 67 + fs/xfs/linux-2.6/sv.h | 89 + fs/xfs/linux-2.6/xfs_aops.c | 1284 +++++ fs/xfs/linux-2.6/xfs_buf.c | 1812 +++++++ fs/xfs/linux-2.6/xfs_buf.h | 594 ++ fs/xfs/linux-2.6/xfs_file.c | 546 ++ fs/xfs/linux-2.6/xfs_fs_subr.c | 124 + fs/xfs/linux-2.6/xfs_globals.c | 72 + fs/xfs/linux-2.6/xfs_ioctl.c | 1246 +++++ fs/xfs/linux-2.6/xfs_iops.h | 51 + fs/xfs/linux-2.6/xfs_linux.h | 365 ++ fs/xfs/linux-2.6/xfs_lrw.c | 1028 ++++ fs/xfs/linux-2.6/xfs_lrw.h | 116 + fs/xfs/linux-2.6/xfs_stats.c | 132 + fs/xfs/linux-2.6/xfs_super.c | 873 +++ fs/xfs/linux-2.6/xfs_super.h | 129 + fs/xfs/linux-2.6/xfs_sysctl.c | 163 + fs/xfs/linux-2.6/xfs_sysctl.h | 110 + fs/xfs/linux-2.6/xfs_vfs.c | 328 ++ fs/xfs/linux-2.6/xfs_vfs.h | 208 + fs/xfs/linux-2.6/xfs_vnode.c | 442 ++ fs/xfs/linux-2.6/xfs_vnode.h | 651 +++ include/asm-arm/arch-ixp4xx/dma.h | 52 + include/asm-arm/arch-ixp4xx/io.h | 388 ++ include/asm-arm/arch-ixp4xx/irq.h | 13 + include/asm-arm/arch-ixp4xx/memory.h | 27 + include/asm-arm/arch-ixp4xx/param.h | 3 + include/asm-arm/arch-ixp4xx/platform.h | 116 + include/asm-arm/arch-ixp4xx/serial.h | 27 + include/asm-arm/arch-ixp4xx/system.h | 43 + include/asm-arm/arch-ixp4xx/time.h | 7 + include/asm-arm/arch-ixp4xx/uncompress.h | 64 + include/asm-arm/arch-pxa/pxafb.h | 68 + include/linux/mempolicy.h | 221 + include/linux/reiserfs_acl.h | 91 + include/linux/reiserfs_xattr.h | 132 + net/bridge/br_sysfs_br.c | 383 ++ scripts/checkstack.pl | 108 + scripts/reference_init.pl | 102 + 116 files changed, 40684 insertions(+) create mode 100644 Documentation/arm/IXP4xx create mode 100644 Documentation/cpu-freq/amd-powernow.txt create mode 100644 Documentation/sound/alsa/Audigy-mixer.txt create mode 100644 arch/arm/configs/ixp4xx_defconfig create mode 100644 arch/arm/configs/mainstone_defconfig create mode 100644 arch/arm/configs/smdk2410_defconfig create mode 100644 arch/arm/mach-ixp4xx/Makefile create mode 100644 arch/arm/mach-ixp4xx/common-pci.c create mode 100644 arch/arm/mach-ixp4xx/common.c create mode 100644 arch/arm/mach-ixp4xx/coyote-pci.c create mode 100644 arch/arm/mach-ixp4xx/ixdp425-pci.c create mode 100644 arch/arm/mach-ixp4xx/prpmc1100-pci.c create mode 100644 arch/arm/mach-ixp4xx/prpmc1100-setup.c create mode 100644 arch/arm/mach-s3c2410/mach-smdk2410.c create mode 100644 arch/cris/arch-v10/drivers/ide.c create mode 100644 arch/cris/kernel/crisksyms.c create mode 100644 arch/i386/mach-generic/es7000.c create mode 100644 arch/ia64/configs/sim_defconfig create mode 100644 arch/ia64/dig/topology.c create mode 100644 arch/ia64/lib/bitop.c create mode 100644 arch/mips/au1000/common/cputable.c create mode 100644 arch/parisc/kernel/unwind.c create mode 100644 arch/ppc/kernel/dma-mapping.c create mode 100644 arch/ppc/kernel/vecemu.c create mode 100644 arch/ppc/kernel/vector.S create mode 100644 arch/ppc/platforms/4xx/bubinga.c create mode 100644 arch/ppc/platforms/4xx/bubinga.h create mode 100644 arch/ppc/platforms/4xx/ibm405ep.c create mode 100644 arch/ppc/platforms/4xx/ibm405ep.h create mode 100644 arch/ppc/platforms/sbc82xx.c create mode 100644 arch/ppc/platforms/sbc82xx.h create mode 100644 arch/ppc/syslib/dcr.S create mode 100644 arch/ppc/syslib/ibm440gx_common.c create mode 100644 arch/ppc/syslib/ibm440gx_common.h create mode 100644 arch/ppc/syslib/ibm44x_common.h create mode 100644 arch/ppc/syslib/ocp.c create mode 100644 arch/ppc64/lib/locks.c create mode 100644 arch/s390/lib/string.c create mode 100644 arch/sparc64/lib/find_bit.c create mode 100644 arch/sparc64/lib/splock.S create mode 100644 arch/x86_64/kernel/domain.c create mode 100644 drivers/char/drm/drm_irq.h create mode 100644 drivers/char/drm/drm_pciids.h create mode 100644 drivers/char/watchdog/ixp4xx_wdt.c create mode 100644 drivers/i2c/busses/i2c-ixp4xx.c create mode 100644 drivers/i2c/chips/max1619.c create mode 100644 drivers/i2c/chips/rtc8564.c create mode 100644 drivers/i2c/chips/rtc8564.h create mode 100644 drivers/ide/h8300/ide-h8300.c create mode 100644 drivers/mtd/maps/ixp4xx.c create mode 100644 drivers/mtd/maps/wr_sbc82xx_flash.c create mode 100644 drivers/net/ibm_emac/ibm_emac.h create mode 100644 drivers/net/ibm_emac/ibm_emac_core.h create mode 100644 drivers/net/ibm_emac/ibm_emac_debug.c create mode 100644 drivers/net/ibm_emac/ibm_emac_mal.c create mode 100644 drivers/net/ibm_emac/ibm_emac_mal.h create mode 100644 drivers/net/ibm_emac/ibm_emac_phy.c create mode 100644 drivers/net/ibm_emac/ibm_emac_rgmii.h create mode 100644 drivers/net/ibm_emac/ibm_emac_tah.h create mode 100644 drivers/net/ibm_emac/ibm_emac_zmii.h create mode 100644 drivers/net/ne-h8300.c create mode 100644 drivers/pcmcia/pxa2xx_base.h create mode 100644 drivers/scsi/ipr.c create mode 100644 drivers/scsi/ipr.h create mode 100644 drivers/scsi/pcmcia/sym53c500_cs.c create mode 100644 drivers/scsi/qlogicfas408.h create mode 100644 drivers/scsi/sata_promise.h create mode 100644 drivers/usb/core/sysfs.c create mode 100644 drivers/usb/input/touchkitusb.c create mode 100644 drivers/usb/misc/phidgetservo.c create mode 100644 drivers/video/asiliantfb.c create mode 100644 drivers/video/gbefb.c create mode 100644 drivers/video/pxafb.h create mode 100644 fs/reiserfs/xattr.c create mode 100644 fs/reiserfs/xattr_acl.c create mode 100644 fs/xfs/linux-2.6/kmem.h create mode 100644 fs/xfs/linux-2.6/mrlock.h create mode 100644 fs/xfs/linux-2.6/sema.h create mode 100644 fs/xfs/linux-2.6/sv.h create mode 100644 fs/xfs/linux-2.6/xfs_aops.c create mode 100644 fs/xfs/linux-2.6/xfs_buf.c create mode 100644 fs/xfs/linux-2.6/xfs_buf.h create mode 100644 fs/xfs/linux-2.6/xfs_file.c create mode 100644 fs/xfs/linux-2.6/xfs_fs_subr.c create mode 100644 fs/xfs/linux-2.6/xfs_globals.c create mode 100644 fs/xfs/linux-2.6/xfs_ioctl.c create mode 100644 fs/xfs/linux-2.6/xfs_iops.h create mode 100644 fs/xfs/linux-2.6/xfs_linux.h create mode 100644 fs/xfs/linux-2.6/xfs_lrw.c create mode 100644 fs/xfs/linux-2.6/xfs_lrw.h create mode 100644 fs/xfs/linux-2.6/xfs_stats.c create mode 100644 fs/xfs/linux-2.6/xfs_super.c create mode 100644 fs/xfs/linux-2.6/xfs_super.h create mode 100644 fs/xfs/linux-2.6/xfs_sysctl.c create mode 100644 fs/xfs/linux-2.6/xfs_sysctl.h create mode 100644 fs/xfs/linux-2.6/xfs_vfs.c create mode 100644 fs/xfs/linux-2.6/xfs_vfs.h create mode 100644 fs/xfs/linux-2.6/xfs_vnode.c create mode 100644 fs/xfs/linux-2.6/xfs_vnode.h create mode 100644 include/asm-arm/arch-ixp4xx/dma.h create mode 100644 include/asm-arm/arch-ixp4xx/io.h create mode 100644 include/asm-arm/arch-ixp4xx/irq.h create mode 100644 include/asm-arm/arch-ixp4xx/memory.h create mode 100644 include/asm-arm/arch-ixp4xx/param.h create mode 100644 include/asm-arm/arch-ixp4xx/platform.h create mode 100644 include/asm-arm/arch-ixp4xx/serial.h create mode 100644 include/asm-arm/arch-ixp4xx/system.h create mode 100644 include/asm-arm/arch-ixp4xx/time.h create mode 100644 include/asm-arm/arch-ixp4xx/uncompress.h create mode 100644 include/asm-arm/arch-pxa/pxafb.h create mode 100644 include/linux/mempolicy.h create mode 100644 include/linux/reiserfs_acl.h create mode 100644 include/linux/reiserfs_xattr.h create mode 100644 net/bridge/br_sysfs_br.c create mode 100644 scripts/checkstack.pl create mode 100644 scripts/reference_init.pl diff --git a/Documentation/arm/IXP4xx b/Documentation/arm/IXP4xx new file mode 100644 index 000000000..d86d818a4 --- /dev/null +++ b/Documentation/arm/IXP4xx @@ -0,0 +1,155 @@ + +------------------------------------------------------------------------- +Release Notes for Linux on Intel's IXP4xx Network Processor + +Maintained by Deepak Saxena +------------------------------------------------------------------------- + +1. Overview + +Intel's IXP4xx network processor is a highly integrated SOC that +is targeted for network applications, though it has become popular +in industrial control and other areas due to low cost and power +consumption. The IXP4xx family currently consists of several processors +that support different network offload functions such as encryption, +routing, firewalling, etc. For more information on the various +versions of the CPU, see: + + http://developer.intel.com/design/network/products/npfamily/ixp4xx.htm + +Intel also made the IXCP1100 CPU for sometime which is an IXP4xx +stripped of much of the network intelligence. + +2. Linux Support + +Linux currently supports the following features on the IXP4xx chips: + +- Dual serial ports +- PCI interface +- Flash access (MTD/JFFS) +- I2C through GPIO +- GPIO for input/output/interrupts + See include/asm-arm/arch-ixp4xx/platform.h for access functions. +- Timers (watchdog, OS) + +The following components of the chips are not supported by Linux and +require the use of Intel's propietary CSR softare: + +- USB device interface +- Network interfaces (HSS, Utopia, NPEs, etc) +- Network offload functionality + +If you need to use any of the above, you need to download Intel's +software from: + + http://developer.intel.com/design/network/products/npfamily/ixp425swr1.htm + +DO NOT POST QUESTIONS TO THE LINUX MAILING LISTS REGARDING THE PROPIETARY +SOFTWARE. + +There are several websites that provide directions/pointers on using +Intel's software: + +http://ixp4xx-osdg.sourceforge.net/ + Open Source Developer's Guide for using uClinux and the Intel libraries + +http://gatewaymaker.sourceforge.net/ + Simple one page summary of building a gateway using an IXP425 and Linux + +http://ixp425.sourceforge.net/ + ATM device driver for IXP425 that relies on Intel's libraries + +3. Known Issues/Limitations + +3a. Limited inbound PCI window + +The IXP4xx family allows for up to 256MB of memory but the PCI interface +can only expose 64MB of that memory to the PCI bus. This means that if +you are running with > 64MB, all PCI buffers outside of the accessible +range will be bounced using the routines in arch/arm/common/dmabounce.c. + +3b. Limited outbound PCI window + +IXP4xx provides two methods of accessing PCI memory space: + +1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB). + To access PCI via this space, we simply ioremap() the BAR + into the kernel and we can use the standard read[bwl]/write[bwl] + macros. This is the preffered method due to speed but it + limits the system to just 64MB of PCI memory. This can be + problamatic if using video cards and other memory-heavy devices. + +2) If > 64MB of memory space is required, the IXP4xx can be + configured to use indirect registers to access PCI This allows + for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus. + The disadvantadge of this is that every PCI access requires + three local register accesses plus a spinlock, but in some + cases the performance hit is acceptable. In addition, you cannot + mmap() PCI devices in this case due to the indirect nature + of the PCI window. + +By default, the direct method is used for performance reasons. If +you need more PCI memory, enable the IXP4XX_INDIRECT_PCI config option. + +3c. GPIO as Interrupts + +Currently the code only handles level-sensitive GPIO interrupts + +4. Supported platforms + +ADI Engineering Coyote Gateway Reference Platform +http://www.adiengineering.com/productsCoyote.html + + The ADI Coyote platform is reference design for those building + small residential/office gateways. One NPE is connected to a 10/100 + interface, one to 4-port 10/100 switch, and the third to and ADSL + interface. In addition, it also supports to POTs interfaces connected + via SLICs. Note that those are not supported by Linux ATM. Finally, + the platform has two mini-PCI slots used for 802.11[bga] cards. + Finally, there is an IDE port hanging off the expansion bus. + +Gateworks Avila Network Platform +http://www.gateworks.com/avila_sbc.htm + + The Avila platform is basically and IXDP425 with the 4 PCI slots + replaced with mini-PCI slots and a CF IDE interface hanging off + the expansion bus. + +Intel IXDP425 Development Platform +http://developer.intel.com/design/network/products/npfamily/ixdp425.htm + + This is Intel's standard reference platform for the IXDP425 and is + also known as the Richfield board. It contains 4 PCI slots, 16MB + of flash, two 10/100 ports and one ADSL port. + +Motorola PrPMC1100 Processor Mezanine Card +http://www.fountainsys.com/datasheet/PrPMC1100.pdf + + The PrPMC1100 is based on the IXCP1100 and is meant to plug into + and IXP2400/2800 system to act as the system controller. It simply + contains a CPU and 16MB of flash on the board and needs to be + plugged into a carrier board to function. Currently Linux only + supports the Motorola PrPMC carrier board for this platform. + See https://mcg.motorola.com/us/ds/pdf/ds0144.pdf for info + on the carrier board. + +5. TODO LIST + +- Add support for Coyote IDE +- Add support for edge-based GPIO interrupts +- Add support for CF IDE on expansion bus + +6. Thanks + +The IXP4xx work has been funded by Intel Corp. and MontaVista Software, Inc. + +The following people have contributed patches/comments/etc: + +Lutz Jaenicke +Justin Mayfield +Robert E. Ranslam +[I know I've forgotten others, please email me to be added] + +------------------------------------------------------------------------- + +Last Update: 5/13/2004 diff --git a/Documentation/cpu-freq/amd-powernow.txt b/Documentation/cpu-freq/amd-powernow.txt new file mode 100644 index 000000000..254da155f --- /dev/null +++ b/Documentation/cpu-freq/amd-powernow.txt @@ -0,0 +1,38 @@ + +PowerNow! and Cool'n'Quiet are AMD names for frequency +management capabilities in AMD processors. As the hardware +implementation changes in new generations of the processors, +there is a different cpu-freq driver for each generation. + +Note that the driver's will not load on the "wrong" hardware, +so it is safe to try each driver in turn when in doubt as to +which is the correct driver. + +Note that the functionality to change frequency (and voltage) +is not available in all processors. The drivers will refuse +to load on processors without this capability. The capability +is detected with the cpuid instruction. + +The drivers use BIOS supplied tables to obtain frequency and +voltage information appropriate for a particular platform. +Frequency transitions will be unavailable if the BIOS does +not supply these tables. + +6th Generation: powernow-k6 + +7th Generation: powernow-k7: Athlon, Duron, Geode. + +8th Generation: powernow-k8: Athlon, Athlon 64, Opteron, Sempron. +Documentation on this functionality in 8th generation processors +is available in the "BIOS and Kernel Developer's Guide", publication +26094, in chapter 9, available for download from www.amd.com. + +BIOS supplied data, for powernow-k7 and for powernow-k8, may be +from either the PSB table or from ACPI objects. The ACPI support +is only available if the kernel config sets CONFIG_ACPI_PROCESSOR. +The powernow-k8 driver will attempt to use ACPI if so configured, +and fall back to PST if that fails. +The powernow-k7 driver will try to use the PSB support first, and +fall back to ACPI if the PSB support fails. A module parameter, +acpi_force, is provided to force ACPI support to be used instead +of PSB support. diff --git a/Documentation/sound/alsa/Audigy-mixer.txt b/Documentation/sound/alsa/Audigy-mixer.txt new file mode 100644 index 000000000..5132fd95e --- /dev/null +++ b/Documentation/sound/alsa/Audigy-mixer.txt @@ -0,0 +1,345 @@ + + Sound Blaster Audigy mixer / default DSP code + =========================================== + +This is based on SB-Live-mixer.txt. + +The EMU10K2 chips have a DSP part which can be programmed to support +various ways of sample processing, which is described here. +(This acticle does not deal with the overall functionality of the +EMU10K2 chips. See the manuals section for further details.) + +The ALSA driver programs this portion of chip by default code +(can be altered later) which offers the following functionality: + + +1) Digital mixer controls +------------------------- + +These controls are built using the DSP instructions. They offer extended +functionality. Only the default build-in code in the ALSA driver is described +here. Note that the controls work as attenuators: the maximum value is the +neutral position leaving the signal unchanged. Note that if the same destination +is mentioned in multiple controls, the signal is accumulated and can be wrapped +(set to maximal or minimal value without checking of overflow). + + +Explanation of used abbreviations: + +DAC - digital to analog converter +ADC - analog to digital converter +I2S - one-way three wire serial bus for digital sound by Philips Semiconductors + (this standard is used for connecting standalone DAC and ADC converters) +LFE - low frequency effects (subwoofer signal) +AC97 - a chip containing an analog mixer, DAC and ADC converters +IEC958 - S/PDIF +FX-bus - the EMU10K2 chip has an effect bus containing 64 accumulators. + Each of the synthesizer voices can feed its output to these accumulators + and the DSP microcontroller can operate with the resulting sum. + +name='PCM Front Playback Volume',index=0 + +This control is used to attenuate samples for left and right front PCM FX-bus +accumulators. ALSA uses accumulators 8 and 9 for left and right front PCM +samples for 5.1 playback. The result samples are forwarded to the front DAC PCM +slots of the Philips DAC. + +name='PCM Surround Playback Volume',index=0 + +This control is used to attenuate samples for left and right surround PCM FX-bus +accumulators. ALSA uses accumulators 2 and 3 for left and right surround PCM +samples for 5.1 playback. The result samples are forwarded to the surround DAC PCM +slots of the Philips DAC. + +name='PCM Center Playback Volume',index=0 + +This control is used to attenuate samples for center PCM FX-bus accumulator. +ALSA uses accumulator 6 for center PCM sample for 5.1 playback. The result sample +is forwarded to the center DAC PCM slot of the Philips DAC. + +name='PCM LFE Playback Volume',index=0 + +This control is used to attenuate sample for LFE PCM FX-bus accumulator. +ALSA uses accumulator 7 for LFE PCM sample for 5.1 playback. The result sample +is forwarded to the LFE DAC PCM slot of the Philips DAC. + +name='PCM Playback Volume',index=0 + +This control is used to attenuate samples for left and right PCM FX-bus +accumulators. ALSA uses accumulators 0 and 1 for left and right PCM samples for +stereo playback. The result samples are forwarded to the front DAC PCM slots +of the Philips DAC. + +name='PCM Capture Volume',index=0 + +This control is used to attenuate samples for left and right PCM FX-bus +accumulator. ALSA uses accumulators 0 and 1 for left and right PCM. +The result is forwarded to the ADC capture FIFO (thus to the standard capture +PCM device). + +name='Music Playback Volume',index=0 + +This control is used to attenuate samples for left and right MIDI FX-bus +accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples. +The result samples are forwarded to the front DAC PCM slots of the AC97 codec. + +name='Music Capture Volume',index=0 + +These controls are used to attenuate samples for left and right MIDI FX-bus +accumulator. ALSA uses accumulators 4 and 5 for left and right PCM. +The result is forwarded to the ADC capture FIFO (thus to the standard capture +PCM device). + +name='Mic Playback Volume',index=0 + +This control is used to attenuate samples for left and right Mic input. +For Mic input is used AC97 codec. The result samples are forwarded to +the front DAC PCM slots of the Philips DAC. Samples are forwarded to Mic +capture FIFO (device 1 - 16bit/8KHz mono) too without volume control. + +name='Mic Capture Volume',index=0 + +This control is used to attenuate samples for left and right Mic input. +The result is forwarded to the ADC capture FIFO (thus to the standard capture +PCM device). + +name='Audigy CD Playback Volume',index=0 + +This control is used to attenuate samples from left and right IEC958 TTL +digital inputs (usually used by a CDROM drive). The result samples are +forwarded to the front DAC PCM slots of the Philips DAC. + +name='Audigy CD Capture Volume',index=0 + +This control is used to attenuate samples from left and right IEC958 TTL +digital inputs (usually used by a CDROM drive). The result samples are +forwarded to the ADC capture FIFO (thus to the standard capture PCM device). + +name='IEC958 Optical Playback Volume',index=0 + +This control is used to attenuate samples from left and right IEC958 optical +digital input. The result samples are forwarded to the front DAC PCM slots +of the Philips DAC. + +name='IEC958 Optical Capture Volume',index=0 + +This control is used to attenuate samples from left and right IEC958 optical +digital inputs. The result samples are forwarded to the ADC capture FIFO +(thus to the standard capture PCM device). + +name='Line2 Playback Volume',index=0 + +This control is used to attenuate samples from left and right I2S ADC +inputs (on the AudigyDrive). The result samples are forwarded to the front +DAC PCM slots of the Philips DAC. + +name='Line2 Capture Volume',index=1 + +This control is used to attenuate samples from left and right I2S ADC +inputs (on the AudigyDrive). The result samples are forwarded to the ADC +capture FIFO (thus to the standard capture PCM device). + +name='Analog Mix Playback Volume',index=0 + +This control is used to attenuate samples from left and right I2S ADC +inputs from Philips ADC. The result samples are forwarded to the front +DAC PCM slots of the Philips DAC. This contains mix from analog sources +like CD, Line In, Aux, .... + +name='Analog Mix Capture Volume',index=1 + +This control is used to attenuate samples from left and right I2S ADC +inputs Philips ADC. The result samples are forwarded to the ADC +capture FIFO (thus to the standard capture PCM device). + +name='Aux2 Playback Volume',index=0 + +This control is used to attenuate samples from left and right I2S ADC +inputs (on the AudigyDrive). The result samples are forwarded to the front +DAC PCM slots of the Philips DAC. + +name='Aux2 Capture Volume',index=1 + +This control is used to attenuate samples from left and right I2S ADC +inputs (on the AudigyDrive). The result samples are forwarded to the ADC +capture FIFO (thus to the standard capture PCM device). + +name='Front Playback Volume',index=0 + +All stereo signals are mixed together and mirrored to surround, center and LFE. +This control is used to attenuate samples for left and right front speakers of +this mix. + +name='Surround Playback Volume',index=0 + +All stereo signals are mixed together and mirrored to surround, center and LFE. +This control is used to attenuate samples for left and right surround speakers of +this mix. + +name='Center Playback Volume',index=0 + +All stereo signals are mixed together and mirrored to surround, center and LFE. +This control is used to attenuate sample for center speaker of this mix. + +name='LFE Playback Volume',index=0 + +All stereo signals are mixed together and mirrored to surround, center and LFE. +This control is used to attenuate sample for LFE speaker of this mix. + +name='Tone Control - Switch',index=0 + +This control turns the tone control on or off. The samples for front, rear +and center / LFE outputs are affected. + +name='Tone Control - Bass',index=0 + +This control sets the bass intensity. There is no neutral value!! +When the tone control code is activated, the samples are always modified. +The closest value to pure signal is 20. + +name='Tone Control - Treble',index=0 + +This control sets the treble intensity. There is no neutral value!! +When the tone control code is activated, the samples are always modified. +The closest value to pure signal is 20. + +name='Master Playback Volume',index=0 + +This control is used to attenuate samples for front, surround, center and +LFE outputs. + +name='IEC958 Optical Raw Playback Switch',index=0 + +If this switch is on, then the samples for the IEC958 (S/PDIF) digital +output are taken only from the raw FX8010 PCM, otherwise standard front +PCM samples are taken. + + +2) PCM stream related controls +------------------------------ + +name='EMU10K1 PCM Volume',index 0-31 + +Channel volume attenuation in range 0-0xffff. The maximum value (no +attenuation) is default. The channel mapping for three values is +as follows: + + 0 - mono, default 0xffff (no attenuation) + 1 - left, default 0xffff (no attenuation) + 2 - right, default 0xffff (no attenuation) + +name='EMU10K1 PCM Send Routing',index 0-31 + +This control specifies the destination - FX-bus accumulators. There 24 +values with this mapping: + + 0 - mono, A destination (FX-bus 0-63), default 0 + 1 - mono, B destination (FX-bus 0-63), default 1 + 2 - mono, C destination (FX-bus 0-63), default 2 + 3 - mono, D destination (FX-bus 0-63), default 3 + 4 - mono, E destination (FX-bus 0-63), default 0 + 5 - mono, F destination (FX-bus 0-63), default 0 + 6 - mono, G destination (FX-bus 0-63), default 0 + 7 - mono, H destination (FX-bus 0-63), default 0 + 8 - left, A destination (FX-bus 0-63), default 0 + 9 - left, B destination (FX-bus 0-63), default 1 + 10 - left, C destination (FX-bus 0-63), default 2 + 11 - left, D destination (FX-bus 0-63), default 3 + 12 - left, E destination (FX-bus 0-63), default 0 + 13 - left, F destination (FX-bus 0-63), default 0 + 14 - left, G destination (FX-bus 0-63), default 0 + 15 - left, H destination (FX-bus 0-63), default 0 + 16 - right, A destination (FX-bus 0-63), default 0 + 17 - right, B destination (FX-bus 0-63), default 1 + 18 - right, C destination (FX-bus 0-63), default 2 + 19 - right, D destination (FX-bus 0-63), default 3 + 20 - right, E destination (FX-bus 0-63), default 0 + 21 - right, F destination (FX-bus 0-63), default 0 + 22 - right, G destination (FX-bus 0-63), default 0 + 23 - right, H destination (FX-bus 0-63), default 0 + +Don't forget that it's illegal to assign a channel to the same FX-bus accumulator +more than once (it means 0=0 && 1=0 is an invalid combination). + +name='EMU10K1 PCM Send Volume',index 0-31 + +It specifies the attenuation (amount) for given destination in range 0-255. +The channel mapping is following: + + 0 - mono, A destination attn, default 255 (no attenuation) + 1 - mono, B destination attn, default 255 (no attenuation) + 2 - mono, C destination attn, default 0 (mute) + 3 - mono, D destination attn, default 0 (mute) + 4 - mono, E destination attn, default 0 (mute) + 5 - mono, F destination attn, default 0 (mute) + 6 - mono, G destination attn, default 0 (mute) + 7 - mono, H destination attn, default 0 (mute) + 8 - left, A destination attn, default 255 (no attenuation) + 9 - left, B destination attn, default 0 (mute) + 10 - left, C destination attn, default 0 (mute) + 11 - left, D destination attn, default 0 (mute) + 12 - left, E destination attn, default 0 (mute) + 13 - left, F destination attn, default 0 (mute) + 14 - left, G destination attn, default 0 (mute) + 15 - left, H destination attn, default 0 (mute) + 16 - right, A destination attn, default 0 (mute) + 17 - right, B destination attn, default 255 (no attenuation) + 18 - right, C destination attn, default 0 (mute) + 19 - right, D destination attn, default 0 (mute) + 20 - right, E destination attn, default 0 (mute) + 21 - right, F destination attn, default 0 (mute) + 22 - right, G destination attn, default 0 (mute) + 23 - right, H destination attn, default 0 (mute) + + + +4) MANUALS/PATENTS: +------------------- + +ftp://opensource.creative.com/pub/doc +------------------------------------- + + Files: + LM4545.pdf AC97 Codec + + m2049.pdf The EMU10K1 Digital Audio Processor + + hog63.ps FX8010 - A DSP Chip Architecture for Audio Effects + + +WIPO Patents +------------ + Patent numbers: + WO 9901813 (A1) Audio Effects Processor with multiple asynchronous (Jan. 14, 1999) + streams + + WO 9901814 (A1) Processor with Instruction Set for Audio Effects (Jan. 14, 1999) + + WO 9901953 (A1) Audio Effects Processor having Decoupled Instruction + Execution and Audio Data Sequencing (Jan. 14, 1999) + + +US Patents (http://www.uspto.gov/) +---------------------------------- + + US 5925841 Digital Sampling Instrument employing cache memory (Jul. 20, 1999) + + US 5928342 Audio Effects Processor integrated on a single chip (Jul. 27, 1999) + with a multiport memory onto which multiple asynchronous + digital sound samples can be concurrently loaded + + US 5930158 Processor with Instruction Set for Audio Effects (Jul. 27, 1999) + + US 6032235 Memory initialization circuit (Tram) (Feb. 29, 2000) + + US 6138207 Interpolation looping of audio samples in cache connected to (Oct. 24, 2000) + system bus with prioritization and modification of bus transfers + in accordance with loop ends and minimum block sizes + + US 6151670 Method for conserving memory storage using a (Nov. 21, 2000) + pool of short term memory registers + + US 6195715 Interrupt control for multiple programs communicating with (Feb. 27, 2001) + a common interrupt by associating programs to GP registers, + defining interrupt register, polling GP registers, and invoking + callback routine associated with defined interrupt register diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig new file mode 100644 index 000000000..fd95f39bc --- /dev/null +++ b/arch/arm/configs/ixp4xx_defconfig @@ -0,0 +1,1081 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_ARM=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y + +# +# Loadable module support +# +CONFIG_MODULES=y +# CONFIG_MODULE_UNLOAD is not set +CONFIG_OBSOLETE_MODPARM=y +CONFIG_MODVERSIONS=y +CONFIG_KMOD=y + +# +# System Type +# +# CONFIG_ARCH_ADIFCC is not set +# CONFIG_ARCH_CLPS7500 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CO285 is not set +# CONFIG_ARCH_PXA is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_CAMELOT is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP3XX is not set +CONFIG_ARCH_IXP4XX=y +# CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_VERSATILE_PB is not set + +# +# CLPS711X/EP721X Implementations +# + +# +# Epxa10db +# + +# +# Footbridge Implementations +# + +# +# IOP3xx Implementation Options +# +# CONFIG_ARCH_IOP310 is not set +# CONFIG_ARCH_IOP321 is not set + +# +# IOP3xx Chipset Features +# +CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y + +# +# Intel IXP4xx Implementation Options +# + +# +# IXP4xx Platforms +# +CONFIG_ARCH_IXDP425=y +CONFIG_ARCH_IXCDP1100=y +CONFIG_ARCH_PRPMC1100=y +CONFIG_ARCH_ADI_COYOTE=y +# CONFIG_ARCH_AVILA is not set +CONFIG_ARCH_IXDP4XX=y + +# +# IXP4xx Options +# +# CONFIG_IXP4XX_INDIRECT_PCI is not set + +# +# Intel PXA250/210 Implementations +# + +# +# SA11x0 Implementations +# + +# +# TI OMAP Implementations +# + +# +# OMAP Core Type +# + +# +# OMAP Board Type +# + +# +# OMAP Feature Selections +# + +# +# S3C2410 Implementations +# + +# +# LH7A40X Implementations +# +CONFIG_DMABOUNCE=y + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_XSCALE=y +CONFIG_CPU_32v5=y +CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_TLB_V4WBI=y +CONFIG_CPU_MINICACHE=y + +# +# Processor Features +# +# CONFIG_ARM_THUMB is not set +CONFIG_CPU_BIG_ENDIAN=y +CONFIG_XSCALE_PMU=y + +# +# General setup +# +CONFIG_PCI=y +# CONFIG_ZBOOT_ROM is not set +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_PCI_LEGACY_PROC=y +CONFIG_PCI_NAMES=y + +# +# At least one math emulation must be selected +# +CONFIG_FPE_NWFPE=y +# CONFIG_FPE_NWFPE_XP is not set +# CONFIG_FPE_FASTFPE is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Generic Driver Options +# +# CONFIG_DEBUG_DRIVER is not set +CONFIG_PM=y +# CONFIG_PREEMPT is not set +CONFIG_APM=y +# CONFIG_ARTHUR is not set +CONFIG_CMDLINE="console=ttyS0,115200 ip=bootp root=/dev/nfs" +CONFIG_ALIGNMENT_TRAP=y + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_REDBOOT_PARTS=y +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# CONFIG_MTD_OBSOLETE_CHIPS is not set + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_ARM_INTEGRATOR is not set +CONFIG_MTD_IXP4XX=y +# CONFIG_MTD_EDB7312 is not set +# CONFIG_MTD_PCI is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLKMTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +CONFIG_MTD_NAND=m +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +CONFIG_MTD_NAND_IDS=m + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_CARMEL is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_BLK_DEV_INITRD=y + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=m +CONFIG_PACKET_MMAP=y +CONFIG_NETLINK_DEV=m +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_FWMARK=y +CONFIG_IP_ROUTE_NAT=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_TOS=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set + +# +# IP: Virtual Server Configuration +# +CONFIG_IP_VS=m +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +# CONFIG_IP_VS_PROTO_TCP is not set +# CONFIG_IP_VS_PROTO_UDP is not set +# CONFIG_IP_VS_PROTO_ESP is not set +# CONFIG_IP_VS_PROTO_AH is not set + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_SED is not set +# CONFIG_IP_VS_NQ is not set + +# +# IPVS application helper +# +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_BRIDGE_NETFILTER=y + +# +# IP: Netfilter Configuration +# +CONFIG_IP_NF_CONNTRACK=m +CONFIG_IP_NF_FTP=m +CONFIG_IP_NF_IRC=m +# CONFIG_IP_NF_TFTP is not set +# CONFIG_IP_NF_AMANDA is not set +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_LIMIT=m +# CONFIG_IP_NF_MATCH_IPRANGE is not set +CONFIG_IP_NF_MATCH_MAC=m +# CONFIG_IP_NF_MATCH_PKTTYPE is not set +CONFIG_IP_NF_MATCH_MARK=m +CONFIG_IP_NF_MATCH_MULTIPORT=m +CONFIG_IP_NF_MATCH_TOS=m +# CONFIG_IP_NF_MATCH_RECENT is not set +# CONFIG_IP_NF_MATCH_ECN is not set +# CONFIG_IP_NF_MATCH_DSCP is not set +CONFIG_IP_NF_MATCH_AH_ESP=m +CONFIG_IP_NF_MATCH_LENGTH=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_MATCH_TCPMSS=m +# CONFIG_IP_NF_MATCH_HELPER is not set +CONFIG_IP_NF_MATCH_STATE=m +# CONFIG_IP_NF_MATCH_CONNTRACK is not set +CONFIG_IP_NF_MATCH_OWNER=m +# CONFIG_IP_NF_MATCH_PHYSDEV is not set +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_REDIRECT=m +# CONFIG_IP_NF_TARGET_NETMAP is not set +# CONFIG_IP_NF_TARGET_SAME is not set +CONFIG_IP_NF_NAT_LOCAL=y +CONFIG_IP_NF_NAT_SNMP_BASIC=m +CONFIG_IP_NF_NAT_IRC=m +CONFIG_IP_NF_NAT_FTP=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_TOS=m +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_DSCP is not set +CONFIG_IP_NF_TARGET_MARK=m +# CONFIG_IP_NF_TARGET_CLASSIFY is not set +CONFIG_IP_NF_TARGET_LOG=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_TARGET_TCPMSS=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +# CONFIG_IP_NF_ARP_MANGLE is not set +CONFIG_IP_NF_COMPAT_IPCHAINS=m +CONFIG_IP_NF_COMPAT_IPFWADM=m +# CONFIG_IP_NF_RAW is not set + +# +# Bridge: Netfilter Configuration +# +# CONFIG_BRIDGE_NF_EBTABLES is not set +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +CONFIG_ATM=y +CONFIG_ATM_CLIP=y +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +CONFIG_IPX=m +# CONFIG_IPX_INTERN is not set +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=y +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_X25=m +CONFIG_LAPB=m +# CONFIG_NET_DIVERT is not set +CONFIG_ECONET=m +CONFIG_ECONET_AUNUDP=y +CONFIG_ECONET_NATIVE=y +CONFIG_WAN_ROUTER=m +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +# CONFIG_NET_SCH_HFSC is not set +CONFIG_NET_SCH_CSZ=m +# CONFIG_NET_SCH_ATM is not set +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +# CONFIG_NET_SCH_DELAY is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_QOS=y +CONFIG_NET_ESTIMATOR=y +CONFIG_NET_CLS=y +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_ROUTE=y +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_POLICE=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_ETHERTAP is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set + +# +# Tulip family network device support +# +# CONFIG_NET_TULIP is not set +# CONFIG_HP100 is not set +CONFIG_NET_PCI=y +# CONFIG_PCNET32 is not set +# CONFIG_AMD8111_ETH is not set +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_B44 is not set +# CONFIG_FORCEDETH is not set +# CONFIG_DGRS is not set +CONFIG_EEPRO100=y +# CONFIG_EEPRO100_PIO is not set +# CONFIG_E100 is not set +# CONFIG_FEALNX is not set +# CONFIG_NATSEMI is not set +# CONFIG_NE2K_PCI is not set +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_SIS900 is not set +# CONFIG_EPIC100 is not set +# CONFIG_SUNDANCE is not set +# CONFIG_TLAN is not set +# CONFIG_VIA_RHINE is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +CONFIG_NET_RADIO=y + +# +# Obsolete Wireless cards support (pre-802.11) +# +# CONFIG_STRIP is not set + +# +# Wireless 802.11b ISA/PCI cards support +# +# CONFIG_AIRO is not set +CONFIG_HERMES=y +# CONFIG_PLX_HERMES is not set +# CONFIG_TMD_HERMES is not set +CONFIG_PCI_HERMES=y +# CONFIG_ATMEL is not set + +# +# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support +# +CONFIG_NET_WIRELESS=y + +# +# Wan interfaces +# +CONFIG_WAN=y +# CONFIG_DSCC4 is not set +# CONFIG_LANMEDIA is not set +# CONFIG_SYNCLINK_SYNCPPP is not set +CONFIG_HDLC=m +CONFIG_HDLC_RAW=y +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=y +CONFIG_HDLC_FR=y +CONFIG_HDLC_PPP=y +CONFIG_HDLC_X25=y +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300 is not set +# CONFIG_FARSYNC is not set +CONFIG_DLCI=m +CONFIG_DLCI_COUNT=24 +CONFIG_DLCI_MAX=8 +CONFIG_WAN_ROUTER_DRIVERS=y +# CONFIG_CYCLADES_SYNC is not set +# CONFIG_LAPBETHER is not set +# CONFIG_X25_ASY is not set + +# +# ATM drivers +# +CONFIG_ATM_TCP=m +# CONFIG_ATM_LANAI is not set +# CONFIG_ATM_ENI is not set +# CONFIG_ATM_FIRESTREAM is not set +# CONFIG_ATM_ZATM is not set +# CONFIG_ATM_NICSTAR is not set +# CONFIG_ATM_IDT77252 is not set +# CONFIG_ATM_AMBASSADOR is not set +# CONFIG_ATM_HORIZON is not set +# CONFIG_ATM_IA is not set +# CONFIG_ATM_FORE200E_MAYBE is not set +# CONFIG_ATM_HE is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_RCPCI is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ATA/ATAPI/MFM/RLL support +# +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +CONFIG_BLK_DEV_IDEDISK=y +# CONFIG_IDEDISK_MULTI_MODE is not set +# CONFIG_IDEDISK_STROKE is not set +# CONFIG_BLK_DEV_IDECD is not set +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDEFLOPPY is not set +# CONFIG_IDE_TASK_IOCTL is not set +# CONFIG_IDE_TASKFILE_IO is not set + +# +# IDE chipset support/bugfixes +# +CONFIG_IDE_GENERIC=y +CONFIG_BLK_DEV_IDEPCI=y +# CONFIG_IDEPCI_SHARE_IRQ is not set +# CONFIG_BLK_DEV_OFFBOARD is not set +# CONFIG_BLK_DEV_GENERIC is not set +# CONFIG_BLK_DEV_OPTI621 is not set +# CONFIG_BLK_DEV_SL82C105 is not set +CONFIG_BLK_DEV_IDEDMA_PCI=y +# CONFIG_BLK_DEV_IDEDMA_FORCED is not set +# CONFIG_IDEDMA_PCI_AUTO is not set +CONFIG_BLK_DEV_ADMA=y +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +# CONFIG_BLK_DEV_AMD74XX is not set +CONFIG_BLK_DEV_CMD64X=y +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_CY82C693 is not set +# CONFIG_BLK_DEV_CS5520 is not set +# CONFIG_BLK_DEV_CS5530 is not set +# CONFIG_BLK_DEV_HPT34X is not set +CONFIG_BLK_DEV_HPT366=y +# CONFIG_BLK_DEV_SC1200 is not set +# CONFIG_BLK_DEV_PIIX is not set +# CONFIG_BLK_DEV_NS87415 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +CONFIG_BLK_DEV_PDC202XX_NEW=y +# CONFIG_PDC202XX_FORCE is not set +# CONFIG_BLK_DEV_SVWKS is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +CONFIG_BLK_DEV_IDEDMA=y +# CONFIG_IDEDMA_IVB is not set +# CONFIG_IDEDMA_AUTO is not set +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +# CONFIG_SERIO is not set +# CONFIG_SERIO_I8042 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=2 +# CONFIG_SERIAL_8250_EXTENDED is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +CONFIG_IXP4XX_WATCHDOG=y + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_CHARDEV=y + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set + +# +# I2C Hardware Bus support +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_I810 is not set +# CONFIG_I2C_ISA is not set +CONFIG_I2C_IXP4XX=y +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_PROSAVAGE is not set +# CONFIG_I2C_SAVAGE4 is not set +# CONFIG_SCx200_ACB is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_VOODOO3 is not set + +# +# Hardware Sensors Chip support +# +CONFIG_I2C_SENSOR=y +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_FSCHER is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83627HF is not set + +# +# Other I2C Chip support +# +CONFIG_SENSORS_EEPROM=y +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +# CONFIG_EXT2_FS_SECURITY is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +CONFIG_EXT3_FS_POSIX_ACL=y +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +# CONFIG_JFFS2_FS_NAND is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_INTERMEZZO_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Misc devices +# + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# Kernel hacking +# +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_INFO is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SLAB is not set +CONFIG_MAGIC_SYSRQ=y +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_WAITQ is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_ERRORS=y +CONFIG_DEBUG_LL=y +# CONFIG_DEBUG_ICEDCC is not set +# CONFIG_DEBUG_BDI2000_XSCALE is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y diff --git a/arch/arm/configs/mainstone_defconfig b/arch/arm/configs/mainstone_defconfig new file mode 100644 index 000000000..925b2777f --- /dev/null +++ b/arch/arm/configs/mainstone_defconfig @@ -0,0 +1,743 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_ARM=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_HOTPLUG=y +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y + +# +# Loadable module support +# +CONFIG_MODULES=y +# CONFIG_MODULE_UNLOAD is not set +CONFIG_OBSOLETE_MODPARM=y +# CONFIG_MODVERSIONS is not set +# CONFIG_KMOD is not set + +# +# System Type +# +# CONFIG_ARCH_ADIFCC is not set +# CONFIG_ARCH_CLPS7500 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CO285 is not set +CONFIG_ARCH_PXA=y +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_CAMELOT is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP3XX is not set +# CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_VERSATILE_PB is not set + +# +# CLPS711X/EP721X Implementations +# + +# +# Epxa10db +# + +# +# Footbridge Implementations +# + +# +# IOP3xx Implementation Options +# +# CONFIG_ARCH_IOP310 is not set +# CONFIG_ARCH_IOP321 is not set + +# +# IOP3xx Chipset Features +# + +# +# Intel PXA2xx Implementations +# +# CONFIG_ARCH_LUBBOCK is not set +CONFIG_MACH_MAINSTONE=y +# CONFIG_ARCH_PXA_IDP is not set +CONFIG_PXA27x=y +CONFIG_IWMMXT=y + +# +# SA11x0 Implementations +# + +# +# TI OMAP Implementations +# + +# +# OMAP Core Type +# + +# +# OMAP Board Type +# + +# +# OMAP Feature Selections +# + +# +# S3C2410 Implementations +# + +# +# LH7A40X Implementations +# + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_XSCALE=y +CONFIG_CPU_32v5=y +CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_TLB_V4WBI=y +CONFIG_CPU_MINICACHE=y + +# +# Processor Features +# +# CONFIG_ARM_THUMB is not set +CONFIG_XSCALE_PMU=y + +# +# General setup +# +# CONFIG_ZBOOT_ROM is not set +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 + +# +# PCMCIA/CardBus support +# +CONFIG_PCMCIA=y +# CONFIG_PCMCIA_DEBUG is not set +# CONFIG_TCIC is not set +CONFIG_PCMCIA_PXA2XX=y + +# +# At least one math emulation must be selected +# +CONFIG_FPE_NWFPE=y +# CONFIG_FPE_NWFPE_XP is not set +# CONFIG_FPE_FASTFPE is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Generic Driver Options +# +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_PM is not set +# CONFIG_PREEMPT is not set +# CONFIG_ARTHUR is not set +CONFIG_CMDLINE="root=/dev/nfs ip=bootp console=ttyS0,115200 mem=64M" +CONFIG_LEDS=y +CONFIG_LEDS_TIMER=y +CONFIG_LEDS_CPU=y +CONFIG_ALIGNMENT_TRAP=y + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_REDBOOT_PARTS=y +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +CONFIG_MTD_CFI_GEOMETRY=y +# CONFIG_MTD_CFI_B1 is not set +# CONFIG_MTD_CFI_B2 is not set +CONFIG_MTD_CFI_B4=y +# CONFIG_MTD_CFI_B8 is not set +# CONFIG_MTD_CFI_I1 is not set +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# CONFIG_MTD_OBSOLETE_CHIPS is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_ARM_INTEGRATOR is not set +# CONFIG_MTD_EDB7312 is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLKMTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +# CONFIG_MTD_NAND is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_PACKET is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_SMC91X=y + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# PCMCIA network device support +# +# CONFIG_NET_PCMCIA is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ATA/ATAPI/MFM/RLL support +# +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +CONFIG_BLK_DEV_IDEDISK=y +# CONFIG_IDEDISK_MULTI_MODE is not set +# CONFIG_IDEDISK_STROKE is not set +CONFIG_BLK_DEV_IDECS=y +# CONFIG_BLK_DEV_IDECD is not set +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDEFLOPPY is not set +# CONFIG_IDE_TASK_IOCTL is not set +# CONFIG_IDE_TASKFILE_IO is not set + +# +# IDE chipset support/bugfixes +# +# CONFIG_IDE_GENERIC is not set +# CONFIG_BLK_DEV_IDEDMA is not set +# CONFIG_IDEDMA_AUTO is not set +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_PXA=y +CONFIG_SERIAL_PXA_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set + +# +# PCMCIA character devices +# +# CONFIG_SYNCLINK_CS is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +# CONFIG_JFFS2_FS_NAND is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_INTERMEZZO_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set + +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Misc devices +# + +# +# USB support +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# Kernel hacking +# +CONFIG_FRAME_POINTER=y +CONFIG_DEBUG_USER=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SLAB is not set +CONFIG_MAGIC_SYSRQ=y +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_WAITQ is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_ERRORS=y +CONFIG_DEBUG_LL=y +# CONFIG_DEBUG_ICEDCC is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y diff --git a/arch/arm/configs/smdk2410_defconfig b/arch/arm/configs/smdk2410_defconfig new file mode 100644 index 000000000..a88724f26 --- /dev/null +++ b/arch/arm/configs/smdk2410_defconfig @@ -0,0 +1,667 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_ARM=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# System Type +# +# CONFIG_ARCH_ADIFCC is not set +# CONFIG_ARCH_CLPS7500 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CO285 is not set +# CONFIG_ARCH_PXA is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_CAMELOT is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP3XX is not set +# CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_SHARK is not set +CONFIG_ARCH_S3C2410=y +# CONFIG_ARCH_OMAP is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_VERSATILE_PB is not set + +# +# CLPS711X/EP721X Implementations +# + +# +# Epxa10db +# + +# +# Footbridge Implementations +# + +# +# IOP3xx Implementation Options +# +# CONFIG_ARCH_IOP310 is not set +# CONFIG_ARCH_IOP321 is not set + +# +# IOP3xx Chipset Features +# + +# +# Intel PXA250/210 Implementations +# + +# +# SA11x0 Implementations +# + +# +# TI OMAP Implementations +# + +# +# OMAP Core Type +# + +# +# OMAP Board Type +# + +# +# OMAP Feature Selections +# + +# +# S3C2410 Implementations +# +# CONFIG_ARCH_BAST is not set +# CONFIG_ARCH_H1940 is not set +CONFIG_ARCH_SMDK2410=y +# CONFIG_MACH_VR1000 is not set + +# +# LH7A40X Implementations +# + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_ARM920T=y +CONFIG_CPU_32v4=y +CONFIG_CPU_ABRT_EV4T=y +CONFIG_CPU_CACHE_V4WT=y +CONFIG_CPU_COPY_V4WB=y +CONFIG_CPU_TLB_V4WBI=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_WRITETHROUGH is not set + +# +# General setup +# +# CONFIG_ZBOOT_ROM is not set +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZBOOT_ROM_BSS=0 + +# +# At least one math emulation must be selected +# +# CONFIG_FPE_NWFPE is not set +# CONFIG_FPE_FASTFPE is not set +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_AOUT=y +# CONFIG_BINFMT_MISC is not set + +# +# Generic Driver Options +# +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_PM is not set +# CONFIG_PREEMPT is not set +# CONFIG_ARTHUR is not set +CONFIG_CMDLINE="root=1f04 mem=32M" +CONFIG_ALIGNMENT_TRAP=y + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_PARTITIONS is not set +# CONFIG_MTD_CONCAT is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# CONFIG_MTD_OBSOLETE_CHIPS is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_ARM_INTEGRATOR is not set +# CONFIG_MTD_EDB7312 is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLKMTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +# CONFIG_MTD_NAND is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_PACKET is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_S3C2410=y +CONFIG_SERIAL_S3C2410_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +CONFIG_ROMFS_FS=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_INTERMEZZO_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Graphics support +# +CONFIG_FB=y +CONFIG_FB_VIRTUAL=y + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_PCI_CONSOLE=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y + +# +# Logo configuration +# +# CONFIG_LOGO is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Misc devices +# + +# +# USB support +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# Kernel hacking +# +CONFIG_FRAME_POINTER=y +CONFIG_DEBUG_USER=y +# CONFIG_DEBUG_INFO is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SLAB is not set +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_WAITQ is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_ERRORS is not set +CONFIG_DEBUG_LL=y +# CONFIG_DEBUG_ICEDCC is not set +CONFIG_DEBUG_LL_PRINTK=y +CONFIG_DEBUG_S3C2410_PORT=y +CONFIG_DEBUG_S3C2410_UART=0 + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y +CONFIG_LIBCRC32C=y diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile new file mode 100644 index 000000000..f656397f8 --- /dev/null +++ b/arch/arm/mach-ixp4xx/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the linux kernel. +# + +obj-y += common.o common-pci.o + +obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o ixdp425-setup.o +obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o coyote-setup.o +obj-$(CONFIG_ARCH_PRPMC1100) += prpmc1100-pci.o prpmc1100-setup.o + diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c new file mode 100644 index 000000000..c20dc3226 --- /dev/null +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -0,0 +1,543 @@ +/* + * arch/arm/mach-ixp4xx/common-pci.c + * + * IXP4XX PCI routines for all platforms + * + * Maintainer: Deepak Saxena + * + * Copyright (C) 2002 Intel Corporation. + * Copyright (C) 2003 Greg Ungerer + * Copyright (C) 2003-2004 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +/* + * IXP4xx PCI read function is dependent on whether we are + * running A0 or B0 (AppleGate) silicon. + */ +int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data); + +/* + * Base address for PCI regsiter region + */ +unsigned long ixp4xx_pci_reg_base = 0; + +/* + * PCI cfg an I/O routines are done by programming a + * command/byte enable register, and then read/writing + * the data from a data regsiter. We need to ensure + * these transactions are atomic or we will end up + * with corrupt data on the bus or in a driver. + */ +static spinlock_t ixp4xx_pci_lock = SPIN_LOCK_UNLOCKED; + +/* + * Read from PCI config space + */ +static void crp_read(u32 ad_cbe, u32 *data) +{ + unsigned long flags; + spin_lock_irqsave(&ixp4xx_pci_lock, flags); + *PCI_CRP_AD_CBE = ad_cbe; + *data = *PCI_CRP_RDATA; + spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); +} + +/* + * Write to PCI config space + */ +static void crp_write(u32 ad_cbe, u32 data) +{ + unsigned long flags; + spin_lock_irqsave(&ixp4xx_pci_lock, flags); + *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe; + *PCI_CRP_WDATA = data; + spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); +} + +static inline int check_master_abort(void) +{ + /* check Master Abort bit after access */ + unsigned long isr = *PCI_ISR; + + if (isr & PCI_ISR_PFE) { + /* make sure the Master Abort bit is reset */ + *PCI_ISR = PCI_ISR_PFE; + pr_debug("%s failed\n", __FUNCTION__); + return 1; + } + + return 0; +} + +int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data) +{ + unsigned long flags; + int retval = 0; + int i; + + spin_lock_irqsave(&ixp4xx_pci_lock, flags); + + *PCI_NP_AD = addr; + + /* + * PCI workaround - only works if NP PCI space reads have + * no side effects!!! Read 8 times. last one will be good. + */ + for (i = 0; i < 8; i++) { + *PCI_NP_CBE = cmd; + *data = *PCI_NP_RDATA; + *data = *PCI_NP_RDATA; + } + + if(check_master_abort()) + retval = 1; + + spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + return retval; +} + +int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data) +{ + unsigned long flags; + int retval = 0; + + spin_lock_irqsave(&ixp4xx_pci_lock, flags); + + *PCI_NP_AD = addr; + + /* set up and execute the read */ + *PCI_NP_CBE = cmd; + + /* the result of the read is now in NP_RDATA */ + *data = *PCI_NP_RDATA; + + if(check_master_abort()) + retval = 1; + + spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + return retval; +} + +int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data) +{ + unsigned long flags; + int retval = 0; + + spin_lock_irqsave(&ixp4xx_pci_lock, flags); + + *PCI_NP_AD = addr; + + /* set up the write */ + *PCI_NP_CBE = cmd; + + /* execute the write by writing to NP_WDATA */ + *PCI_NP_WDATA = data; + + if(check_master_abort()) + retval = 1; + + spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + return retval; +} + +static u32 ixp4xx_config_addr(u8 bus_num, u16 devfn, int where) +{ + u32 addr; + if (!bus_num) { + /* type 0 */ + addr = BIT(32-PCI_SLOT(devfn)) | ((PCI_FUNC(devfn)) << 8) | + (where & ~3); + } else { + /* type 1 */ + addr = (bus_num << 16) | ((PCI_SLOT(devfn)) << 11) | + ((PCI_FUNC(devfn)) << 8) | (where & ~3) | 1; + } + return addr; +} + +/* + * Mask table, bits to mask for quantity of size 1, 2 or 4 bytes. + * 0 and 3 are not valid indexes... + */ +static u32 bytemask[] = { + /*0*/ 0, + /*1*/ 0xff, + /*2*/ 0xffff, + /*3*/ 0, + /*4*/ 0xffffffff, +}; + +static u32 local_byte_lane_enable_bits(u32 n, int size) +{ + if (size == 1) + return (0xf & ~BIT(n)) << CRP_AD_CBE_BESL; + if (size == 2) + return (0xf & ~(BIT(n) | BIT(n+1))) << CRP_AD_CBE_BESL; + if (size == 4) + return 0; + return 0xffffffff; +} + +static int local_read_config(int where, int size, u32 *value) +{ + u32 n, data; + pr_debug("local_read_config from %d size %d\n", where, size); + n = where % 4; + crp_read(where & ~3, &data); + *value = (data >> (8*n)) & bytemask[size]; + pr_debug("local_read_config read %#x\n", *value); + return PCIBIOS_SUCCESSFUL; +} + +static int local_write_config(int where, int size, u32 value) +{ + u32 n, byte_enables, data; + pr_debug("local_write_config %#x to %d size %d\n", value, where, size); + n = where % 4; + byte_enables = local_byte_lane_enable_bits(n, size); + if (byte_enables == 0xffffffff) + return PCIBIOS_BAD_REGISTER_NUMBER; + data = value << (8*n); + crp_write((where & ~3) | byte_enables, data); + return PCIBIOS_SUCCESSFUL; +} + +static u32 byte_lane_enable_bits(u32 n, int size) +{ + if (size == 1) + return (0xf & ~BIT(n)) << 4; + if (size == 2) + return (0xf & ~(BIT(n) | BIT(n+1))) << 4; + if (size == 4) + return 0; + return 0xffffffff; +} + +static int read_config(u8 bus_num, u16 devfn, int where, int size, u32 *value) +{ + u32 n, byte_enables, addr, data; + + pr_debug("read_config from %d size %d dev %d:%d:%d\n", where, size, + bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + *value = 0xffffffff; + n = where % 4; + byte_enables = byte_lane_enable_bits(n, size); + if (byte_enables == 0xffffffff) + return PCIBIOS_BAD_REGISTER_NUMBER; + + addr = ixp4xx_config_addr(bus_num, devfn, where); + if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_CONFIGREAD, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + *value = (data >> (8*n)) & bytemask[size]; + pr_debug("read_config_byte read %#x\n", *value); + return PCIBIOS_SUCCESSFUL; +} + +static int write_config(u8 bus_num, u16 devfn, int where, int size, u32 value) +{ + u32 n, byte_enables, addr, data; + + pr_debug("write_config_byte %#x to %d size %d dev %d:%d:%d\n", value, where, + size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + n = where % 4; + byte_enables = byte_lane_enable_bits(n, size); + if (byte_enables == 0xffffffff) + return PCIBIOS_BAD_REGISTER_NUMBER; + + addr = ixp4xx_config_addr(bus_num, devfn, where); + data = value << (8*n); + if (ixp4xx_pci_write(addr, byte_enables | NP_CMD_CONFIGWRITE, data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + return PCIBIOS_SUCCESSFUL; +} + +/* + * Generalized PCI config access functions. + */ +static int ixp4xx_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + if (bus->number && !PCI_SLOT(devfn)) + return local_read_config(where, size, value); + return read_config(bus->number, devfn, where, size, value); +} + +static int ixp4xx_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + if (bus->number && !PCI_SLOT(devfn)) + return local_write_config(where, size, value); + return write_config(bus->number, devfn, where, size, value); +} + +struct pci_ops ixp4xx_ops = { + .read = ixp4xx_read_config, + .write = ixp4xx_write_config, +}; + + +/* + * PCI abort handler + */ +static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) +{ + u32 isr, status; + + isr = *PCI_ISR; + local_read_config(PCI_STATUS, 2, &status); + pr_debug("PCI: abort_handler addr = %#lx, isr = %#x, " + "status = %#x\n", addr, isr, status); + + /* make sure the Master Abort bit is reset */ + *PCI_ISR = PCI_ISR_PFE; + status |= PCI_STATUS_REC_MASTER_ABORT; + local_write_config(PCI_STATUS, 2, status); + + /* + * If it was an imprecise abort, then we need to correct the + * return address to be _after_ the instruction. + */ + if (fsr & (1 << 10)) + regs->ARM_pc += 4; + + return 0; +} + + +/* + * Setup DMA mask to 64MB on PCI devices. Ignore all other devices. + */ +static int ixp4xx_pci_platform_notify(struct device *dev) +{ + if(dev->bus == &pci_bus_type) { + *dev->dma_mask = SZ_64M - 1; + dev->coherent_dma_mask = SZ_64M - 1; + dmabounce_register_dev(dev, 2048, 4096); + } + return 0; +} + +static int ixp4xx_pci_platform_notify_remove(struct device *dev) +{ + if(dev->bus == &pci_bus_type) { + dmabounce_unregister_dev(dev); + } + return 0; +} + +int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) +{ + return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M); +} + +void __init ixp4xx_pci_preinit(void) +{ + unsigned long processor_id; + + asm("mrc p15, 0, %0, cr0, cr0, 0;" : "=r"(processor_id) :); + + /* + * Determine which PCI read method to use + */ + if (!(processor_id & 0xf)) { + printk("PCI: IXP4xx A0 silicon detected - " + "PCI Non-Prefetch Workaround Enabled\n"); + ixp4xx_pci_read = ixp4xx_pci_read_errata; + } else + ixp4xx_pci_read = ixp4xx_pci_read_no_errata; + + + /* hook in our fault handler for PCI errors */ + hook_fault_code(16+6, abort_handler, SIGBUS, "imprecise external abort"); + + pr_debug("setup PCI-AHB(inbound) and AHB-PCI(outbound) address mappings\n"); + + /* + * We use identity AHB->PCI address translation + * in the 0x48000000 to 0x4bffffff address space + */ + *PCI_PCIMEMBASE = 0x48494A4B; + + /* + * We also use identity PCI->AHB address translation + * in 4 16MB BARs that begin at the physical memory start + */ + *PCI_AHBMEMBASE = (PHYS_OFFSET & 0xFF000000) + + ((PHYS_OFFSET & 0xFF000000) >> 8) + + ((PHYS_OFFSET & 0xFF000000) >> 16) + + ((PHYS_OFFSET & 0xFF000000) >> 24) + + 0x00010203; + + if (*PCI_CSR & PCI_CSR_HOST) { + printk("PCI: IXP4xx is host\n"); + + pr_debug("setup BARs in controller\n"); + + /* + * We configure the PCI inbound memory windows to be + * 1:1 mapped to SDRAM + */ + local_write_config(PCI_BASE_ADDRESS_0, 4, PHYS_OFFSET + 0x00000000); + local_write_config(PCI_BASE_ADDRESS_1, 4, PHYS_OFFSET + 0x01000000); + local_write_config(PCI_BASE_ADDRESS_2, 4, PHYS_OFFSET + 0x02000000); + local_write_config(PCI_BASE_ADDRESS_3, 4, PHYS_OFFSET + 0x03000000); + + /* + * Enable CSR window at 0xff000000. + */ + local_write_config(PCI_BASE_ADDRESS_4, 4, 0xff000008); + + /* + * Enable the IO window to be way up high, at 0xfffffc00 + */ + local_write_config(PCI_BASE_ADDRESS_5, 4, 0xfffffc01); + } else { + printk("PCI: IXP4xx is target - No bus scan performed\n"); + } + + printk("PCI: IXP4xx Using %s access for memory space\n", +#ifndef CONFIG_IXP4XX_INDIRECT_PCI + "direct" +#else + "indirect" +#endif + ); + + pr_debug("clear error bits in ISR\n"); + *PCI_ISR = PCI_ISR_PSE | PCI_ISR_PFE | PCI_ISR_PPE | PCI_ISR_AHBE; + + /* + * Set Initialize Complete in PCI Control Register: allow IXP4XX to + * respond to PCI configuration cycles. Specify that the AHB bus is + * operating in big endian mode. Set up byte lane swapping between + * little-endian PCI and the big-endian AHB bus + */ +#ifdef __ARMEB__ + *PCI_CSR = PCI_CSR_IC | PCI_CSR_ABE | PCI_CSR_PDS | PCI_CSR_ADS; +#else + *PCI_CSR = PCI_CSR_IC; +#endif + + pr_debug("DONE\n"); +} + +int ixp4xx_setup(int nr, struct pci_sys_data *sys) +{ + struct resource *res; + + if (nr >= 1) + return 0; + + res = kmalloc(sizeof(*res) * 2, GFP_KERNEL); + if (res == NULL) { + /* + * If we're out of memory this early, something is wrong, + * so we might as well catch it here. + */ + panic("PCI: unable to allocate resources?\n"); + } + memset(res, 0, sizeof(*res) * 2); + + local_write_config(PCI_COMMAND, 2, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY); + + res[0].name = "PCI I/O Space"; + res[0].start = 0x00001000; + res[0].end = 0xffff0000; + res[0].flags = IORESOURCE_IO; + + res[1].name = "PCI Memory Space"; + res[1].start = 0x48000000; +#ifndef CONFIG_IXP4XX_INDIRECT_PCI + res[1].end = 0x4bffffff; +#else + res[1].end = 0x4fffffff; +#endif + res[1].flags = IORESOURCE_MEM; + + request_resource(&ioport_resource, &res[0]); + request_resource(&iomem_resource, &res[1]); + + sys->resource[0] = &res[0]; + sys->resource[1] = &res[1]; + sys->resource[2] = NULL; + + platform_notify = ixp4xx_pci_platform_notify; + platform_notify_remove = ixp4xx_pci_platform_notify_remove; + + return 1; +} + +struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys) +{ + return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys); +} + +/* + * We override these so we properly do dmabounce otherwise drivers + * are able to set the dma_mask to 0xffffffff and we can no longer + * trap bounces. :( + * + * We just return true on everyhing except for < 64MB in which case + * we will fail miseralby and die since we can't handle that case. + */ +int +pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + if (mask >= SZ_64M - 1 ) + return 0; + + return -EIO; +} + +int +pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + if (mask >= SZ_64M - 1 ) + return 0; + + return -EIO; +} + +int +pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + if (mask >= SZ_64M - 1 ) + return 0; + + return -EIO; +} + +EXPORT_SYMBOL(pci_set_dma_mask); +EXPORT_SYMBOL(pci_dac_set_dma_mask); +EXPORT_SYMBOL(pci_set_consistent_dma_mask); + diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c new file mode 100644 index 000000000..f0166508d --- /dev/null +++ b/arch/arm/mach-ixp4xx/common.c @@ -0,0 +1,263 @@ +/* + * arch/arm/mach-ixp4xx/common.c + * + * Generic code shared across all IXP4XX platforms + * + * Maintainer: Deepak Saxena + * + * Copyright 2002 (c) Intel Corporation + * Copyright 2003-2004 (c) MontaVista, Software, Inc. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + + +/************************************************************************* + * GPIO acces functions + *************************************************************************/ + +/* + * Configure GPIO line for input, interrupt, or output operation + * + * TODO: Enable/disable the irq_desc based on interrupt or output mode. + * TODO: Should these be named ixp4xx_gpio_? + */ +void gpio_line_config(u8 line, u32 style) +{ + u32 enable; + volatile u32 *int_reg; + u32 int_style; + + enable = *IXP4XX_GPIO_GPOER; + + if (style & IXP4XX_GPIO_OUT) { + enable &= ~((1) << line); + } else if (style & IXP4XX_GPIO_IN) { + enable |= ((1) << line); + + switch (style & IXP4XX_GPIO_INTSTYLE_MASK) + { + case (IXP4XX_GPIO_ACTIVE_HIGH): + int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH; + break; + case (IXP4XX_GPIO_ACTIVE_LOW): + int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW; + break; + case (IXP4XX_GPIO_RISING_EDGE): + int_style = IXP4XX_GPIO_STYLE_RISING_EDGE; + break; + case (IXP4XX_GPIO_FALLING_EDGE): + int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE; + break; + case (IXP4XX_GPIO_TRANSITIONAL): + int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL; + break; + default: + int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH; + break; + } + + if (line >= 8) { /* pins 8-15 */ + line -= 8; + int_reg = IXP4XX_GPIO_GPIT2R; + } + else { /* pins 0-7 */ + int_reg = IXP4XX_GPIO_GPIT1R; + } + + /* Clear the style for the appropriate pin */ + *int_reg &= ~(IXP4XX_GPIO_STYLE_CLEAR << + (line * IXP4XX_GPIO_STYLE_SIZE)); + + /* Set the new style */ + *int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE)); + } + + *IXP4XX_GPIO_GPOER = enable; +} + +EXPORT_SYMBOL(gpio_line_config); + +/************************************************************************* + * IXP4xx chipset I/O mapping + *************************************************************************/ +static struct map_desc ixp4xx_io_desc[] __initdata = { + { /* UART, Interrupt ctrl, GPIO, timers, NPEs, MACs, USB .... */ + .virtual = IXP4XX_PERIPHERAL_BASE_VIRT, + .physical = IXP4XX_PERIPHERAL_BASE_PHYS, + .length = IXP4XX_PERIPHERAL_REGION_SIZE, + .type = MT_DEVICE + }, { /* Expansion Bus Config Registers */ + .virtual = IXP4XX_EXP_CFG_BASE_VIRT, + .physical = IXP4XX_EXP_CFG_BASE_PHYS, + .length = IXP4XX_EXP_CFG_REGION_SIZE, + .type = MT_DEVICE + }, { /* PCI Registers */ + .virtual = IXP4XX_PCI_CFG_BASE_VIRT, + .physical = IXP4XX_PCI_CFG_BASE_PHYS, + .length = IXP4XX_PCI_CFG_REGION_SIZE, + .type = MT_DEVICE + } +}; + +void __init ixp4xx_map_io(void) +{ + iotable_init(ixp4xx_io_desc, ARRAY_SIZE(ixp4xx_io_desc)); +} + + +/************************************************************************* + * IXP4xx chipset IRQ handling + * + * TODO: GPIO IRQs should be marked invalid until the user of the IRQ + * (be it PCI or something else) configures that GPIO line + * as an IRQ. Also, we should use a different chip structure for + * level-based GPIO vs edge-based GPIO. Currently nobody needs this as + * all HW that's publically available uses level IRQs, so we'll + * worry about it if/when we have HW to test. + **************************************************************************/ +static void ixp4xx_irq_mask(unsigned int irq) +{ + *IXP4XX_ICMR &= ~(1 << irq); +} + +static void ixp4xx_irq_mask_ack(unsigned int irq) +{ + ixp4xx_irq_mask(irq); +} + +static void ixp4xx_irq_unmask(unsigned int irq) +{ + static int irq2gpio[NR_IRQS] = { + -1, -1, -1, -1, -1, -1, 0, 1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, -1, -1, + }; + int line = irq2gpio[irq]; + + /* + * This only works for LEVEL gpio IRQs as per the IXP4xx developer's + * manual. If edge-triggered, need to move it to the mask_ack. + * Nobody seems to be using the edge-triggered mode on the GPIOs. + */ + if (line >= 0) + gpio_line_isr_clear(line); + + *IXP4XX_ICMR |= (1 << irq); +} + +static struct irqchip ixp4xx_irq_chip = { + .ack = ixp4xx_irq_mask_ack, + .mask = ixp4xx_irq_mask, + .unmask = ixp4xx_irq_unmask, +}; + +void __init ixp4xx_init_irq(void) +{ + int i = 0; + + /* Route all sources to IRQ instead of FIQ */ + *IXP4XX_ICLR = 0x0; + + /* Disable all interrupt */ + *IXP4XX_ICMR = 0x0; + + for(i = 0; i < NR_IRQS; i++) + { + set_irq_chip(i, &ixp4xx_irq_chip); + set_irq_handler(i, do_level_IRQ); + set_irq_flags(i, IRQF_VALID); + } +} + + +/************************************************************************* + * IXP4xx timer tick + * We use OS timer1 on the CPU for the timer tick and the timestamp + * counter as a source of real clock ticks to account for missed jiffies. + *************************************************************************/ + +static unsigned volatile last_jiffy_time; + +#define CLOCK_TICKS_PER_USEC (CLOCK_TICK_RATE / USEC_PER_SEC) + +/* IRQs are disabled before entering here from do_gettimeofday() */ +static unsigned long ixp4xx_gettimeoffset(void) +{ + u32 elapsed; + + elapsed = *IXP4XX_OSTS - last_jiffy_time; + + return elapsed / CLOCK_TICKS_PER_USEC; +} + +static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + /* Clear Pending Interrupt by writing '1' to it */ + *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND; + + /* + * Catch up with the real idea of time + */ + do { + do_timer(regs); + last_jiffy_time += LATCH; + } while((*IXP4XX_OSTS - last_jiffy_time) > LATCH); + + return IRQ_HANDLED; +} + +extern unsigned long (*gettimeoffset)(void); + +static struct irqaction timer_irq = { + .name = "IXP4xx Timer Tick", + .flags = SA_INTERRUPT +}; + +void __init time_init(void) +{ + gettimeoffset = ixp4xx_gettimeoffset; + timer_irq.handler = ixp4xx_timer_interrupt; + + /* Clear Pending Interrupt by writing '1' to it */ + *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND; + + /* Setup the Timer counter value */ + *IXP4XX_OSRT1 = (LATCH & ~IXP4XX_OST_RELOAD_MASK) | IXP4XX_OST_ENABLE; + + /* Reset time-stamp counter */ + *IXP4XX_OSTS = 0; + last_jiffy_time = 0; + + /* Connect the interrupt handler and enable the interrupt */ + setup_irq(IRQ_IXP4XX_TIMER1, &timer_irq); +} + + diff --git a/arch/arm/mach-ixp4xx/coyote-pci.c b/arch/arm/mach-ixp4xx/coyote-pci.c new file mode 100644 index 000000000..b46c74351 --- /dev/null +++ b/arch/arm/mach-ixp4xx/coyote-pci.c @@ -0,0 +1,69 @@ +/* + * arch/arch/mach-ixp4xx/coyote-pci.c + * + * PCI setup routines for ADI Engineering Coyote platform + * + * Copyright (C) 2002 Jungo Software Technologies. + * Copyright (C) 2003 MontaVista Softwrae, Inc. + * + * Maintainer: Deepak Saxena + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include + +#include +#include +#include + +#include + +extern void ixp4xx_pci_preinit(void); +extern int ixp4xx_setup(int nr, struct pci_sys_data *sys); +extern struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys); + +void __init coyote_pci_preinit(void) +{ + gpio_line_config(COYOTE_PCI_SLOT0_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + + gpio_line_config(COYOTE_PCI_SLOT1_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + + gpio_line_isr_clear(COYOTE_PCI_SLOT0_PIN); + gpio_line_isr_clear(COYOTE_PCI_SLOT1_PIN); + + ixp4xx_pci_preinit(); +} + +static int __init coyote_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + if (slot == COYOTE_PCI_SLOT0_DEVID) + return IRQ_COYOTE_PCI_SLOT0; + else if (slot == COYOTE_PCI_SLOT1_DEVID) + return IRQ_COYOTE_PCI_SLOT1; + else return -1; +} + +struct hw_pci coyote_pci __initdata = { + .nr_controllers = 1, + .preinit = coyote_pci_preinit, + .swizzle = pci_std_swizzle, + .setup = ixp4xx_setup, + .scan = ixp4xx_scan_bus, + .map_irq = coyote_map_irq, +}; + +int __init coyote_pci_init(void) +{ + if (machine_is_adi_coyote()) + pci_common_init(&coyote_pci); + return 0; +} + +subsys_initcall(coyote_pci_init); diff --git a/arch/arm/mach-ixp4xx/ixdp425-pci.c b/arch/arm/mach-ixp4xx/ixdp425-pci.c new file mode 100644 index 000000000..7baa60c2d --- /dev/null +++ b/arch/arm/mach-ixp4xx/ixdp425-pci.c @@ -0,0 +1,84 @@ +/* + * arch/arm/mach-ixp4xx/ixdp425-pci.c + * + * IXDP425 board-level PCI initialization + * + * Copyright (C) 2002 Intel Corporation. + * Copyright (C) 2003-2004 MontaVista Software, Inc. + * + * Maintainer: Deepak Saxena + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +void __init ixdp425_pci_preinit(void) +{ + gpio_line_config(IXDP425_PCI_INTA_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(IXDP425_PCI_INTB_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(IXDP425_PCI_INTC_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(IXDP425_PCI_INTD_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + + gpio_line_isr_clear(IXDP425_PCI_INTA_PIN); + gpio_line_isr_clear(IXDP425_PCI_INTB_PIN); + gpio_line_isr_clear(IXDP425_PCI_INTC_PIN); + gpio_line_isr_clear(IXDP425_PCI_INTD_PIN); + + ixp4xx_pci_preinit(); +} + +static int __init ixdp425_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static int pci_irq_table[IXDP425_PCI_IRQ_LINES] = { + IRQ_IXDP425_PCI_INTA, + IRQ_IXDP425_PCI_INTB, + IRQ_IXDP425_PCI_INTC, + IRQ_IXDP425_PCI_INTD + }; + + int irq = -1; + + if (slot >= 1 && slot <= IXDP425_PCI_MAX_DEV && + pin >= 1 && pin <= IXDP425_PCI_IRQ_LINES) { + irq = pci_irq_table[(slot + pin - 2) % 4]; + } + + return irq; +} + +struct hw_pci ixdp425_pci __initdata = { + .nr_controllers = 1, + .preinit = ixdp425_pci_preinit, + .swizzle = pci_std_swizzle, + .setup = ixp4xx_setup, + .scan = ixp4xx_scan_bus, + .map_irq = ixdp425_map_irq, +}; + +int __init ixdp425_pci_init(void) +{ + if (machine_is_ixdp425() || + machine_is_ixcdp1100() || + machine_is_avila()) + pci_common_init(&ixdp425_pci); + return 0; +} + +subsys_initcall(ixdp425_pci_init); + diff --git a/arch/arm/mach-ixp4xx/prpmc1100-pci.c b/arch/arm/mach-ixp4xx/prpmc1100-pci.c new file mode 100644 index 000000000..a0aed9ca3 --- /dev/null +++ b/arch/arm/mach-ixp4xx/prpmc1100-pci.c @@ -0,0 +1,119 @@ +/* + * arch/arm/mach-ixp4xx/prpmc1100-pci.c + * + * PrPMC1100 PCI initialization + * + * Copyright (C) 2003-2004 MontaVista Sofwtare, Inc. + * Based on IXDP425 code originally (C) Intel Corporation + * + * Author: Deepak Saxena + * + * PrPMC1100 PCI init code. GPIO usage is similar to that on + * IXDP425, but the IRQ routing is completely different and + * depends on what carrier you are using. This code is written + * to work on the Motorola PrPMC800 ATX carrier board. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include + +#include +#include +#include + +#include + + +void __init prpmc1100_pci_preinit(void) +{ + gpio_line_config(PRPMC1100_PCI_INTA_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(PRPMC1100_PCI_INTB_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(PRPMC1100_PCI_INTC_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(PRPMC1100_PCI_INTD_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + + gpio_line_isr_clear(PRPMC1100_PCI_INTA_PIN); + gpio_line_isr_clear(PRPMC1100_PCI_INTB_PIN); + gpio_line_isr_clear(PRPMC1100_PCI_INTC_PIN); + gpio_line_isr_clear(PRPMC1100_PCI_INTD_PIN); + + ixp4xx_pci_preinit(); +} + + +static int __init prpmc1100_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int irq = -1; + + static int pci_irq_table[][4] = { + { /* IDSEL 16 - PMC A1 */ + IRQ_PRPMC1100_PCI_INTD, + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB, + IRQ_PRPMC1100_PCI_INTC + }, { /* IDSEL 17 - PRPMC-A-B */ + IRQ_PRPMC1100_PCI_INTD, + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB, + IRQ_PRPMC1100_PCI_INTC + }, { /* IDSEL 18 - PMC A1-B */ + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB, + IRQ_PRPMC1100_PCI_INTC, + IRQ_PRPMC1100_PCI_INTD + }, { /* IDSEL 19 - Unused */ + 0, 0, 0, 0 + }, { /* IDSEL 20 - P2P Bridge */ + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB, + IRQ_PRPMC1100_PCI_INTC, + IRQ_PRPMC1100_PCI_INTD + }, { /* IDSEL 21 - PMC A2 */ + IRQ_PRPMC1100_PCI_INTC, + IRQ_PRPMC1100_PCI_INTD, + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB + }, { /* IDSEL 22 - PMC A2-B */ + IRQ_PRPMC1100_PCI_INTD, + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB, + IRQ_PRPMC1100_PCI_INTC + }, + }; + + if (slot >= PRPMC1100_PCI_MIN_DEVID && slot <= PRPMC1100_PCI_MAX_DEVID + && pin >= 1 && pin <= PRPMC1100_PCI_IRQ_LINES) { + irq = pci_irq_table[slot - PRPMC1100_PCI_MIN_DEVID][pin - 1]; + } + + return irq; +} + + +struct hw_pci prpmc1100_pci __initdata = { + .nr_controllers = 1, + .preinit = prpmc1100_pci_preinit, + .swizzle = pci_std_swizzle, + .setup = ixp4xx_setup, + .scan = ixp4xx_scan_bus, + .map_irq = prpmc1100_map_irq, +}; + +int __init prpmc1100_pci_init(void) +{ + if (machine_is_prpmc1100()) + pci_common_init(&prpmc1100_pci); + return 0; +} + +subsys_initcall(prpmc1100_pci_init); + diff --git a/arch/arm/mach-ixp4xx/prpmc1100-setup.c b/arch/arm/mach-ixp4xx/prpmc1100-setup.c new file mode 100644 index 000000000..b0603205d --- /dev/null +++ b/arch/arm/mach-ixp4xx/prpmc1100-setup.c @@ -0,0 +1,90 @@ +/* + * arch/arm/mach-ixp4xx/prpmc1100-setup.c + * + * Motorola PrPMC1100 board setup + * + * Copyright (C) 2003-2004 MontaVista Software, Inc. + * + * Author: Deepak Saxena + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __ARMEB__ +#define REG_OFFSET 3 +#else +#define REG_OFFSET 0 +#endif + +/* + * Only one serial port is connected on the PrPMC1100 + */ +static struct uart_port prpmc1100_serial_port = { + .membase = (char*)(IXP4XX_UART1_BASE_VIRT + REG_OFFSET), + .mapbase = (IXP4XX_UART1_BASE_PHYS), + .irq = IRQ_IXP4XX_UART1, + .flags = UPF_SKIP_TEST, + .iotype = UPIO_MEM, + .regshift = 2, + .uartclk = IXP4XX_UART_XTAL, + .line = 0, + .type = PORT_XSCALE, + .fifosize = 32 +}; + +void __init prpmc1100_map_io(void) +{ + early_serial_setup(&prpmc1100_serial_port); + + ixp4xx_map_io(); +} + +static struct flash_platform_data prpmc1100_flash_data = { + .map_name = "cfi_probe", + .width = 2, +}; + +static struct resource prpmc1100_flash_resource = { + .start = PRPMC1100_FLASH_BASE, + .end = PRPMC1100_FLASH_BASE + PRPMC1100_FLASH_SIZE, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device prpmc1100_flash_device = { + .name = "IXP4XX-Flash", + .id = 0, + .dev = { + .platform_data = &prpmc1100_flash_data, + }, + .num_resources = 1, + .resource = &prpmc1100_flash_resource, +}; + +static void __init prpmc1100_init(void) +{ + platform_add_device(&prpmc1100_flash_device); +} + +MACHINE_START(PRPMC1100, "Motorola PrPMC1100") + MAINTAINER("MontaVista Software, Inc.") + BOOT_MEM(PHYS_OFFSET, IXP4XX_PERIPHERAL_BASE_PHYS, + IXP4XX_PERIPHERAL_BASE_VIRT) + MAPIO(prpmc1100_map_io) + INITIRQ(ixp4xx_init_irq) + BOOT_PARAMS(0x0100) + INIT_MACHINE(prpmc1100_init) +MACHINE_END + diff --git a/arch/arm/mach-s3c2410/mach-smdk2410.c b/arch/arm/mach-s3c2410/mach-smdk2410.c new file mode 100644 index 000000000..4e0282b12 --- /dev/null +++ b/arch/arm/mach-s3c2410/mach-smdk2410.c @@ -0,0 +1,109 @@ +/*********************************************************************** + * + * linux/arch/arm/mach-s3c2410/mach-smdk2410.c + * + * Copyright (C) 2004 by FS Forth-Systeme GmbH + * All rights reserved. + * + * $Id: mach-smdk2410.c,v 1.1 2004/05/11 14:15:38 mpietrek Exp $ + * @Author: Jonas Dietsche + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + * + * @History: + * derived from linux/arch/arm/mach-s3c2410/mach-bast.c, written by + * Ben Dooks + ***********************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "s3c2410.h" + + +static struct map_desc smdk2410_iodesc[] __initdata = { + /* nothing here yet */ +}; + +#define UCON S3C2410_UCON_DEFAULT +#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB +#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE + +/* base baud rate for all our UARTs */ +static unsigned long smdk2410_serial_clock = 24*1000*1000; + +static struct s3c2410_uartcfg smdk2410_uartcfgs[] = { + [0] = { + .hwport = 0, + .flags = 0, + .clock = &smdk2410_serial_clock, + .ucon = UCON, + .ulcon = ULCON, + .ufcon = UFCON, + }, + [1] = { + .hwport = 1, + .flags = 0, + .clock = &smdk2410_serial_clock, + .ucon = UCON, + .ulcon = ULCON, + .ufcon = UFCON, + }, + [2] = { + .hwport = 2, + .flags = 0, + .clock = &smdk2410_serial_clock, + .ucon = UCON, + .ulcon = ULCON, + .ufcon = UFCON, + } +}; + + +void __init smdk2410_map_io(void) +{ + s3c2410_map_io(smdk2410_iodesc, ARRAY_SIZE(smdk2410_iodesc)); + s3c2410_uartcfgs = smdk2410_uartcfgs; +} + +void __init smdk2410_init_irq(void) +{ + s3c2410_init_irq(); +} + +MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switch + * to SMDK2410 */ + MAINTAINER("Jonas Dietsche") + BOOT_MEM(S3C2410_SDRAM_PA, S3C2410_PA_UART, S3C2410_VA_UART) + BOOT_PARAMS(S3C2410_SDRAM_PA + 0x100) + MAPIO(smdk2410_map_io) + INITIRQ(smdk2410_init_irq) +MACHINE_END diff --git a/arch/cris/arch-v10/drivers/ide.c b/arch/cris/arch-v10/drivers/ide.c new file mode 100644 index 000000000..335473c45 --- /dev/null +++ b/arch/cris/arch-v10/drivers/ide.c @@ -0,0 +1,945 @@ +/* $Id: ide.c,v 1.1 2004/01/22 08:22:58 starvik Exp $ + * + * Etrax specific IDE functions, like init and PIO-mode setting etc. + * Almost the entire ide.c is used for the rest of the Etrax ATA driver. + * Copyright (c) 2000-2004 Axis Communications AB + * + * Authors: Bjorn Wesen (initial version) + * Mikael Starvik (pio setup stuff, Linux 2.6 port) + */ + +/* Regarding DMA: + * + * There are two forms of DMA - "DMA handshaking" between the interface and the drive, + * and DMA between the memory and the interface. We can ALWAYS use the latter, since it's + * something built-in in the Etrax. However only some drives support the DMA-mode handshaking + * on the ATA-bus. The normal PC driver and Triton interface disables memory-if DMA when the + * device can't do DMA handshaking for some stupid reason. We don't need to do that. + */ + +#undef REALLY_SLOW_IO /* most systems can safely undef this */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* number of Etrax DMA descriptors */ +#define MAX_DMA_DESCRS 64 + +/* number of times to retry busy-flags when reading/writing IDE-registers + * this can't be too high because a hung harddisk might cause the watchdog + * to trigger (sometimes INB and OUTB are called with irq's disabled) + */ + +#define IDE_REGISTER_TIMEOUT 300 + +#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET +/* address where the memory-mapped IDE reset bit lives, if used */ +static volatile unsigned long *reset_addr; +#endif + +static int e100_read_command = 0; + +#define LOWDB(x) +#define D(x) + +void +etrax100_ide_outw(unsigned short data, ide_ioreg_t reg) { + int timeleft; + LOWDB(printk("ow: data 0x%x, reg 0x%x\n", data, reg)); + + /* note the lack of handling any timeouts. we stop waiting, but we don't + * really notify anybody. + */ + + timeleft = IDE_REGISTER_TIMEOUT; + /* wait for busy flag */ + while(timeleft && (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy))) + timeleft--; + + /* + * Fall through at a timeout, so the ongoing command will be + * aborted by the write below, which is expected to be a dummy + * command to the command register. This happens when a faulty + * drive times out on a command. See comment on timeout in + * INB. + */ + if(!timeleft) + printk("ATA timeout reg 0x%lx := 0x%x\n", reg, data); + + *R_ATA_CTRL_DATA = reg | data; /* write data to the drive's register */ + + timeleft = IDE_REGISTER_TIMEOUT; + /* wait for transmitter ready */ + while(timeleft && !(*R_ATA_STATUS_DATA & + IO_MASK(R_ATA_STATUS_DATA, tr_rdy))) + timeleft--; +} + +void +etrax100_ide_outb(unsigned char data, ide_ioreg_t reg) +{ + etrax100_ide_outw(data, reg); +} + +void +etrax100_ide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port) +{ + etrax100_ide_outw(addr, port); +} + +unsigned short +etrax100_ide_inw(ide_ioreg_t reg) { + int status; + int timeleft; + + timeleft = IDE_REGISTER_TIMEOUT; + /* wait for busy flag */ + while(timeleft && (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy))) + timeleft--; + + if(!timeleft) { + /* + * If we're asked to read the status register, like for + * example when a command does not complete for an + * extended time, but the ATA interface is stuck in a + * busy state at the *ETRAX* ATA interface level (as has + * happened repeatedly with at least one bad disk), then + * the best thing to do is to pretend that we read + * "busy" in the status register, so the IDE driver will + * time-out, abort the ongoing command and perform a + * reset sequence. Note that the subsequent OUT_BYTE + * call will also timeout on busy, but as long as the + * write is still performed, everything will be fine. + */ + if ((reg & IO_MASK (R_ATA_CTRL_DATA, addr)) + == IO_FIELD (R_ATA_CTRL_DATA, addr, IDE_STATUS_OFFSET)) + return BUSY_STAT; + else + /* For other rare cases we assume 0 is good enough. */ + return 0; + } + + *R_ATA_CTRL_DATA = reg | IO_STATE(R_ATA_CTRL_DATA, rw, read); /* read data */ + + timeleft = IDE_REGISTER_TIMEOUT; + /* wait for available */ + while(timeleft && !((status = *R_ATA_STATUS_DATA) & + IO_MASK(R_ATA_STATUS_DATA, dav))) + timeleft--; + + if(!timeleft) + return 0; + + LOWDB(printk("inb: 0x%x from reg 0x%x\n", status & 0xff, reg)); + + return (unsigned short)status; +} + +unsigned char +etrax100_ide_inb(ide_ioreg_t reg) +{ + return (unsigned char)etrax100_ide_inw(reg); +} + +/* PIO timing (in R_ATA_CONFIG) + * + * _____________________________ + * ADDRESS : ________/ + * + * _______________ + * DIOR : ____________/ \__________ + * + * _______________ + * DATA : XXXXXXXXXXXXXXXX_______________XXXXXXXX + * + * + * DIOR is unbuffered while address and data is buffered. + * This creates two problems: + * 1. The DIOR pulse is to early (because it is unbuffered) + * 2. The rise time of DIOR is long + * + * There are at least three different plausible solutions + * 1. Use a pad capable of larger currents in Etrax + * 2. Use an external buffer + * 3. Make the strobe pulse longer + * + * Some of the strobe timings below are modified to compensate + * for this. This implies a slight performance decrease. + * + * THIS SHOULD NEVER BE CHANGED! + * + * TODO: Is this true for the latest LX boards still ? + */ + +#define ATA_DMA2_STROBE 4 +#define ATA_DMA2_HOLD 0 +#define ATA_DMA1_STROBE 4 +#define ATA_DMA1_HOLD 1 +#define ATA_DMA0_STROBE 12 +#define ATA_DMA0_HOLD 9 +#define ATA_PIO4_SETUP 1 +#define ATA_PIO4_STROBE 5 +#define ATA_PIO4_HOLD 0 +#define ATA_PIO3_SETUP 1 +#define ATA_PIO3_STROBE 5 +#define ATA_PIO3_HOLD 1 +#define ATA_PIO2_SETUP 1 +#define ATA_PIO2_STROBE 6 +#define ATA_PIO2_HOLD 2 +#define ATA_PIO1_SETUP 2 +#define ATA_PIO1_STROBE 11 +#define ATA_PIO1_HOLD 4 +#define ATA_PIO0_SETUP 4 +#define ATA_PIO0_STROBE 19 +#define ATA_PIO0_HOLD 4 + +static int e100_dma_check (ide_drive_t *drive); +static int e100_dma_begin (ide_drive_t *drive); +static int e100_dma_end (ide_drive_t *drive); +static int e100_dma_read (ide_drive_t *drive); +static int e100_dma_write (ide_drive_t *drive); +static void e100_ide_input_data (ide_drive_t *drive, void *, unsigned int); +static void e100_ide_output_data (ide_drive_t *drive, void *, unsigned int); +static void e100_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int); +static void e100_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int); +static int e100_dma_off (ide_drive_t *drive); +static int e100_dma_verbose (ide_drive_t *drive); + + +/* + * good_dma_drives() lists the model names (from "hdparm -i") + * of drives which do not support mword2 DMA but which are + * known to work fine with this interface under Linux. + */ + +const char *good_dma_drives[] = {"Micropolis 2112A", + "CONNER CTMA 4000", + "CONNER CTT8000-A", + NULL}; + +static void tune_e100_ide(ide_drive_t *drive, byte pio) +{ + pio = 4; + /* pio = ide_get_best_pio_mode(drive, pio, 4, NULL); */ + + /* set pio mode! */ + + switch(pio) { + case 0: + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO0_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO0_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO0_HOLD ) ); + break; + case 1: + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO1_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO1_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO1_HOLD ) ); + break; + case 2: + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO2_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO2_HOLD ) ); + break; + case 3: + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO3_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO3_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO3_HOLD ) ); + break; + case 4: + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO4_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO4_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO4_HOLD ) ); + break; + } +} + +void __init +init_e100_ide (void) +{ + volatile unsigned int dummy; + int h; + + printk("ide: ETRAX 100LX built-in ATA DMA controller\n"); + + /* first fill in some stuff in the ide_hwifs fields */ + + for(h = 0; h < MAX_HWIFS; h++) { + ide_hwif_t *hwif = &ide_hwifs[h]; + hwif->mmio = 2; + hwif->chipset = ide_etrax100; + hwif->tuneproc = &tune_e100_ide; + hwif->ata_input_data = &e100_ide_input_data; + hwif->ata_output_data = &e100_ide_output_data; + hwif->atapi_input_bytes = &e100_atapi_input_bytes; + hwif->atapi_output_bytes = &e100_atapi_output_bytes; + hwif->ide_dma_check = &e100_dma_check; + hwif->ide_dma_end = &e100_dma_end; + hwif->ide_dma_write = &e100_dma_write; + hwif->ide_dma_read = &e100_dma_read; + hwif->ide_dma_begin = &e100_dma_begin; + hwif->OUTB = &etrax100_ide_outb; + hwif->OUTW = &etrax100_ide_outw; + hwif->OUTBSYNC = &etrax100_ide_outbsync; + hwif->INB = &etrax100_ide_inb; + hwif->INW = &etrax100_ide_inw; + hwif->ide_dma_off_quietly = &e100_dma_off; + hwif->ide_dma_verbose = &e100_dma_verbose; + hwif->sg_table = + kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES, GFP_KERNEL); + } + + /* actually reset and configure the etrax100 ide/ata interface */ + + *R_ATA_CTRL_DATA = 0; + *R_ATA_TRANSFER_CNT = 0; + *R_ATA_CONFIG = 0; + + genconfig_shadow = (genconfig_shadow & + ~IO_MASK(R_GEN_CONFIG, dma2) & + ~IO_MASK(R_GEN_CONFIG, dma3) & + ~IO_MASK(R_GEN_CONFIG, ata)) | + ( IO_STATE( R_GEN_CONFIG, dma3, ata ) | + IO_STATE( R_GEN_CONFIG, dma2, ata ) | + IO_STATE( R_GEN_CONFIG, ata, select ) ); + + *R_GEN_CONFIG = genconfig_shadow; + + /* pull the chosen /reset-line low */ + +#ifdef CONFIG_ETRAX_IDE_G27_RESET + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, 0); +#endif +#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET + REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, 0); +#endif +#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET + REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, 0); +#endif +#ifdef CONFIG_ETRAX_IDE_PB7_RESET + port_pb_dir_shadow = port_pb_dir_shadow | + IO_STATE(R_PORT_PB_DIR, dir7, output); + *R_PORT_PB_DIR = port_pb_dir_shadow; + REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, 7, 1); +#endif + + /* wait some */ + + udelay(25); + + /* de-assert bus-reset */ + +#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET + REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, 1); +#endif +#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET + REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, 1); +#endif +#ifdef CONFIG_ETRAX_IDE_G27_RESET + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, 1); +#endif + + /* make a dummy read to set the ata controller in a proper state */ + dummy = *R_ATA_STATUS_DATA; + + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO4_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO4_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO4_HOLD ) ); + + *R_ATA_CTRL_DATA = ( IO_STATE( R_ATA_CTRL_DATA, rw, read) | + IO_FIELD( R_ATA_CTRL_DATA, addr, 1 ) ); + + while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); /* wait for busy flag*/ + + *R_IRQ_MASK0_SET = ( IO_STATE( R_IRQ_MASK0_SET, ata_irq0, set ) | + IO_STATE( R_IRQ_MASK0_SET, ata_irq1, set ) | + IO_STATE( R_IRQ_MASK0_SET, ata_irq2, set ) | + IO_STATE( R_IRQ_MASK0_SET, ata_irq3, set ) ); + + printk("ide: waiting %d seconds for drives to regain consciousness\n", + CONFIG_ETRAX_IDE_DELAY); + + h = jiffies + (CONFIG_ETRAX_IDE_DELAY * HZ); + while(time_before(jiffies, h)) /* nothing */ ; + + /* reset the dma channels we will use */ + + RESET_DMA(ATA_TX_DMA_NBR); + RESET_DMA(ATA_RX_DMA_NBR); + WAIT_DMA(ATA_TX_DMA_NBR); + WAIT_DMA(ATA_RX_DMA_NBR); + +} + +static int e100_dma_off (ide_drive_t *drive) +{ + return 0; +} + +static int e100_dma_verbose (ide_drive_t *drive) +{ + printk(", DMA(mode 2)"); + return 0; +} + +static etrax_dma_descr mydescr; + +/* + * The following routines are mainly used by the ATAPI drivers. + * + * These routines will round up any request for an odd number of bytes, + * so if an odd bytecount is specified, be sure that there's at least one + * extra byte allocated for the buffer. + */ +static void +e100_atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount) +{ + ide_ioreg_t data_reg = IDE_DATA_REG; + + D(printk("atapi_input_bytes, dreg 0x%x, buffer 0x%x, count %d\n", + data_reg, buffer, bytecount)); + + if(bytecount & 1) { + printk("warning, odd bytecount in cdrom_in_bytes = %d.\n", bytecount); + bytecount++; /* to round off */ + } + + /* make sure the DMA channel is available */ + RESET_DMA(ATA_RX_DMA_NBR); + WAIT_DMA(ATA_RX_DMA_NBR); + + /* setup DMA descriptor */ + + mydescr.sw_len = bytecount; + mydescr.ctrl = d_eol; + mydescr.buf = virt_to_phys(buffer); + + /* start the dma channel */ + + *R_DMA_CH3_FIRST = virt_to_phys(&mydescr); + *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, start); + + /* initiate a multi word dma read using PIO handshaking */ + + *R_ATA_TRANSFER_CNT = IO_FIELD(R_ATA_TRANSFER_CNT, count, bytecount >> 1); + + *R_ATA_CTRL_DATA = data_reg | + IO_STATE(R_ATA_CTRL_DATA, rw, read) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | + IO_STATE(R_ATA_CTRL_DATA, handsh, pio) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + /* wait for completion */ + + LED_DISK_READ(1); + WAIT_DMA(ATA_RX_DMA_NBR); + LED_DISK_READ(0); + +#if 0 + /* old polled transfer code + * this should be moved into a new function that can do polled + * transfers if DMA is not available + */ + + /* initiate a multi word read */ + + *R_ATA_TRANSFER_CNT = wcount << 1; + + *R_ATA_CTRL_DATA = data_reg | + IO_STATE(R_ATA_CTRL_DATA, rw, read) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, register) | + IO_STATE(R_ATA_CTRL_DATA, handsh, pio) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + /* svinto has a latency until the busy bit actually is set */ + + nop(); nop(); + nop(); nop(); + nop(); nop(); + nop(); nop(); + nop(); nop(); + + /* unit should be busy during multi transfer */ + while((status = *R_ATA_STATUS_DATA) & IO_MASK(R_ATA_STATUS_DATA, busy)) { + while(!(status & IO_MASK(R_ATA_STATUS_DATA, dav))) + status = *R_ATA_STATUS_DATA; + *ptr++ = (unsigned short)(status & 0xffff); + } +#endif +} + +static void +e100_atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount) +{ + ide_ioreg_t data_reg = IDE_DATA_REG; + + D(printk("atapi_output_bytes, dreg 0x%x, buffer 0x%x, count %d\n", + data_reg, buffer, bytecount)); + + if(bytecount & 1) { + printk("odd bytecount %d in atapi_out_bytes!\n", bytecount); + bytecount++; + } + + /* make sure the DMA channel is available */ + RESET_DMA(ATA_TX_DMA_NBR); + WAIT_DMA(ATA_TX_DMA_NBR); + + /* setup DMA descriptor */ + + mydescr.sw_len = bytecount; + mydescr.ctrl = d_eol; + mydescr.buf = virt_to_phys(buffer); + + /* start the dma channel */ + + *R_DMA_CH2_FIRST = virt_to_phys(&mydescr); + *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, start); + + /* initiate a multi word dma write using PIO handshaking */ + + *R_ATA_TRANSFER_CNT = IO_FIELD(R_ATA_TRANSFER_CNT, count, bytecount >> 1); + + *R_ATA_CTRL_DATA = data_reg | + IO_STATE(R_ATA_CTRL_DATA, rw, write) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | + IO_STATE(R_ATA_CTRL_DATA, handsh, pio) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + /* wait for completion */ + + LED_DISK_WRITE(1); + WAIT_DMA(ATA_TX_DMA_NBR); + LED_DISK_WRITE(0); + +#if 0 + /* old polled write code - see comment in input_bytes */ + + /* wait for busy flag */ + while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); + + /* initiate a multi word write */ + + *R_ATA_TRANSFER_CNT = bytecount >> 1; + + ctrl = data_reg | + IO_STATE(R_ATA_CTRL_DATA, rw, write) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, register) | + IO_STATE(R_ATA_CTRL_DATA, handsh, pio) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + LED_DISK_WRITE(1); + + /* Etrax will set busy = 1 until the multi pio transfer has finished + * and tr_rdy = 1 after each successful word transfer. + * When the last byte has been transferred Etrax will first set tr_tdy = 1 + * and then busy = 0 (not in the same cycle). If we read busy before it + * has been set to 0 we will think that we should transfer more bytes + * and then tr_rdy would be 0 forever. This is solved by checking busy + * in the inner loop. + */ + + do { + *R_ATA_CTRL_DATA = ctrl | *ptr++; + while(!(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy)) && + (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy))); + } while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); + + LED_DISK_WRITE(0); +#endif + +} + +/* + * This is used for most PIO data transfers *from* the IDE interface + */ +static void +e100_ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount) +{ + e100_atapi_input_bytes(drive, buffer, wcount << 2); +} + +/* + * This is used for most PIO data transfers *to* the IDE interface + */ +static void +e100_ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount) +{ + e100_atapi_output_bytes(drive, buffer, wcount << 2); +} + +/* we only have one DMA channel on the chip for ATA, so we can keep these statically */ +static etrax_dma_descr ata_descrs[MAX_DMA_DESCRS]; +static unsigned int ata_tot_size; + +/* + * e100_ide_build_dmatable() prepares a dma request. + * Returns 0 if all went okay, returns 1 otherwise. + */ +static int e100_ide_build_dmatable (ide_drive_t *drive) +{ + ide_hwif_t *hwif = HWIF(drive); + struct scatterlist* sg; + struct request *rq = HWGROUP(drive)->rq; + unsigned long size, addr; + unsigned int count = 0; + int i = 0; + + sg = hwif->sg_table; + + ata_tot_size = 0; + + if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE) { + u8 *virt_addr = rq->buffer; + int sector_count = rq->nr_sectors; + memset(&sg[0], 0, sizeof(*sg)); + sg[0].page = virt_to_page(virt_addr); + sg[0].offset = offset_in_page(virt_addr); + sg[0].length = sector_count * SECTOR_SIZE; + hwif->sg_nents = i = 1; + } + else + { + hwif->sg_nents = i = blk_rq_map_sg(drive->queue, rq, hwif->sg_table); + } + + + while(i) { + /* + * Determine addr and size of next buffer area. We assume that + * individual virtual buffers are always composed linearly in + * physical memory. For example, we assume that any 8kB buffer + * is always composed of two adjacent physical 4kB pages rather + * than two possibly non-adjacent physical 4kB pages. + */ + /* group sequential buffers into one large buffer */ + addr = page_to_phys(sg->page) + sg->offset; + size = sg_dma_len(sg); + while (sg++, --i) { + if ((addr + size) != page_to_phys(sg->page) + sg->offset) + break; + size += sg_dma_len(sg); + } + + /* did we run out of descriptors? */ + + if(count >= MAX_DMA_DESCRS) { + printk("%s: too few DMA descriptors\n", drive->name); + return 1; + } + + /* however, this case is more difficult - R_ATA_TRANSFER_CNT cannot be more + than 65536 words per transfer, so in that case we need to either + 1) use a DMA interrupt to re-trigger R_ATA_TRANSFER_CNT and continue with + the descriptors, or + 2) simply do the request here, and get dma_intr to only ide_end_request on + those blocks that were actually set-up for transfer. + */ + + if(ata_tot_size + size > 131072) { + printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, (int)size); + return 1; + } + + /* If size > 65536 it has to be splitted into new descriptors. Since we don't handle + size > 131072 only one split is necessary */ + + if(size > 65536) { + /* ok we want to do IO at addr, size bytes. set up a new descriptor entry */ + ata_descrs[count].sw_len = 0; /* 0 means 65536, this is a 16-bit field */ + ata_descrs[count].ctrl = 0; + ata_descrs[count].buf = addr; + ata_descrs[count].next = virt_to_phys(&ata_descrs[count + 1]); + count++; + ata_tot_size += 65536; + /* size and addr should refere to not handled data */ + size -= 65536; + addr += 65536; + } + /* ok we want to do IO at addr, size bytes. set up a new descriptor entry */ + if(size == 65536) { + ata_descrs[count].sw_len = 0; /* 0 means 65536, this is a 16-bit field */ + } else { + ata_descrs[count].sw_len = size; + } + ata_descrs[count].ctrl = 0; + ata_descrs[count].buf = addr; + ata_descrs[count].next = virt_to_phys(&ata_descrs[count + 1]); + count++; + ata_tot_size += size; + } + + if (count) { + /* set the end-of-list flag on the last descriptor */ + ata_descrs[count - 1].ctrl |= d_eol; + /* return and say all is ok */ + return 0; + } + + printk("%s: empty DMA table?\n", drive->name); + return 1; /* let the PIO routines handle this weirdness */ +} + +static int config_drive_for_dma (ide_drive_t *drive) +{ + const char **list; + struct hd_driveid *id = drive->id; + + if (id && (id->capability & 1)) { + /* Enable DMA on any drive that supports mword2 DMA */ + if ((id->field_valid & 2) && (id->dma_mword & 0x404) == 0x404) { + drive->using_dma = 1; + return 0; /* DMA enabled */ + } + + /* Consult the list of known "good" drives */ + list = good_dma_drives; + while (*list) { + if (!strcmp(*list++,id->model)) { + drive->using_dma = 1; + return 0; /* DMA enabled */ + } + } + } + return 1; /* DMA not enabled */ +} + +/* + * etrax_dma_intr() is the handler for disk read/write DMA interrupts + */ +static ide_startstop_t etrax_dma_intr (ide_drive_t *drive) +{ + int i, dma_stat; + byte stat; + + LED_DISK_READ(0); + LED_DISK_WRITE(0); + + dma_stat = HWIF(drive)->ide_dma_end(drive); + stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */ + if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { + if (!dma_stat) { + struct request *rq; + rq = HWGROUP(drive)->rq; + for (i = rq->nr_sectors; i > 0;) { + i -= rq->current_nr_sectors; + DRIVER(drive)->end_request(drive, 1, rq->nr_sectors); + } + return ide_stopped; + } + printk("%s: bad DMA status\n", drive->name); + } + return DRIVER(drive)->error(drive, "dma_intr", stat); +} + +/* + * Functions below initiates/aborts DMA read/write operations on a drive. + * + * The caller is assumed to have selected the drive and programmed the drive's + * sector address using CHS or LBA. All that remains is to prepare for DMA + * and then issue the actual read/write DMA/PIO command to the drive. + * + * For ATAPI devices, we just prepare for DMA and return. The caller should + * then issue the packet command to the drive and call us again with + * ide_dma_begin afterwards. + * + * Returns 0 if all went well. + * Returns 1 if DMA read/write could not be started, in which case + * the caller should revert to PIO for the current request. + */ + +static int e100_dma_check(ide_drive_t *drive) +{ + return config_drive_for_dma (drive); +} + +static int e100_dma_end(ide_drive_t *drive) +{ + /* TODO: check if something went wrong with the DMA */ + return 0; +} + +static int e100_start_dma(ide_drive_t *drive, int atapi, int reading) +{ + if(reading) { + + RESET_DMA(ATA_RX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */ + WAIT_DMA(ATA_RX_DMA_NBR); + + /* set up the Etrax DMA descriptors */ + + if(e100_ide_build_dmatable (drive)) + return 1; + + if(!atapi) { + /* set the irq handler which will finish the request when DMA is done */ + + ide_set_handler(drive, &etrax_dma_intr, WAIT_CMD, NULL); + + /* issue cmd to drive */ + if ((HWGROUP(drive)->rq->cmd == IDE_DRIVE_TASKFILE) && + (drive->addressing == 1)) { + ide_task_t *args = HWGROUP(drive)->rq->special; + etrax100_ide_outb(args->tfRegister[IDE_COMMAND_OFFSET], IDE_COMMAND_REG); + } else if (drive->addressing) { + etrax100_ide_outb(WIN_READDMA_EXT, IDE_COMMAND_REG); + } else { + etrax100_ide_outb(WIN_READDMA, IDE_COMMAND_REG); + } + } + + /* begin DMA */ + + /* need to do this before RX DMA due to a chip bug + * it is enough to just flush the part of the cache that + * corresponds to the buffers we start, but since HD transfers + * usually are more than 8 kB, it is easier to optimize for the + * normal case and just flush the entire cache. its the only + * way to be sure! (OB movie quote) + */ + flush_etrax_cache(); + *R_DMA_CH3_FIRST = virt_to_phys(ata_descrs); + *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, start); + + /* initiate a multi word dma read using DMA handshaking */ + + *R_ATA_TRANSFER_CNT = + IO_FIELD(R_ATA_TRANSFER_CNT, count, ata_tot_size >> 1); + + *R_ATA_CTRL_DATA = + IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) | + IO_STATE(R_ATA_CTRL_DATA, rw, read) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | + IO_STATE(R_ATA_CTRL_DATA, handsh, dma) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + LED_DISK_READ(1); + + D(printk("dma read of %d bytes.\n", ata_tot_size)); + + } else { + /* writing */ + + RESET_DMA(ATA_TX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */ + WAIT_DMA(ATA_TX_DMA_NBR); + + /* set up the Etrax DMA descriptors */ + + if(e100_ide_build_dmatable (drive)) + return 1; + + if(!atapi) { + /* set the irq handler which will finish the request when DMA is done */ + + ide_set_handler(drive, &etrax_dma_intr, WAIT_CMD, NULL); + + /* issue cmd to drive */ + if ((HWGROUP(drive)->rq->cmd == IDE_DRIVE_TASKFILE) && + (drive->addressing == 1)) { + ide_task_t *args = HWGROUP(drive)->rq->special; + etrax100_ide_outb(args->tfRegister[IDE_COMMAND_OFFSET], IDE_COMMAND_REG); + } else if (drive->addressing) { + etrax100_ide_outb(WIN_WRITEDMA_EXT, IDE_COMMAND_REG); + } else { + etrax100_ide_outb(WIN_WRITEDMA, IDE_COMMAND_REG); + } + } + + /* begin DMA */ + + *R_DMA_CH2_FIRST = virt_to_phys(ata_descrs); + *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, start); + + /* initiate a multi word dma write using DMA handshaking */ + + *R_ATA_TRANSFER_CNT = + IO_FIELD(R_ATA_TRANSFER_CNT, count, ata_tot_size >> 1); + + *R_ATA_CTRL_DATA = + IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) | + IO_STATE(R_ATA_CTRL_DATA, rw, write) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | + IO_STATE(R_ATA_CTRL_DATA, handsh, dma) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + LED_DISK_WRITE(1); + + D(printk("dma write of %d bytes.\n", ata_tot_size)); + } + return 0; +} + +static int e100_dma_write(ide_drive_t *drive) +{ + e100_read_command = 0; + /* ATAPI-devices (not disks) first call ide_dma_read/write to set the direction + * then they call ide_dma_begin after they have issued the appropriate drive command + * themselves to actually start the chipset DMA. so we just return here if we're + * not a diskdrive. + */ + if (drive->media != ide_disk) + return 0; + return e100_start_dma(drive, 0, 0); +} + +static int e100_dma_read(ide_drive_t *drive) +{ + e100_read_command = 1; + /* ATAPI-devices (not disks) first call ide_dma_read/write to set the direction + * then they call ide_dma_begin after they have issued the appropriate drive command + * themselves to actually start the chipset DMA. so we just return here if we're + * not a diskdrive. + */ + if (drive->media != ide_disk) + return 0; + return e100_start_dma(drive, 0, 1); +} + +static int e100_dma_begin(ide_drive_t *drive) +{ + /* begin DMA, used by ATAPI devices which want to issue the + * appropriate IDE command themselves. + * + * they have already called ide_dma_read/write to set the + * static reading flag, now they call ide_dma_begin to do + * the real stuff. we tell our code below not to issue + * any IDE commands itself and jump into it. + */ + return e100_start_dma(drive, 1, e100_read_command); +} diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c new file mode 100644 index 000000000..6ded633f8 --- /dev/null +++ b/arch/cris/kernel/crisksyms.c @@ -0,0 +1,104 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void dump_thread(struct pt_regs *, struct user *); +extern unsigned long get_cmos_time(void); +extern void __Udiv(void); +extern void __Umod(void); +extern void __Div(void); +extern void __Mod(void); +extern void __ashrdi3(void); +extern void iounmap(void *addr); + +/* Platform dependent support */ +EXPORT_SYMBOL(dump_thread); +EXPORT_SYMBOL(enable_irq); +EXPORT_SYMBOL(disable_irq); +EXPORT_SYMBOL(kernel_thread); +EXPORT_SYMBOL(get_cmos_time); +EXPORT_SYMBOL(loops_per_usec); + +/* String functions */ +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(strpbrk); +EXPORT_SYMBOL(strstr); +EXPORT_SYMBOL(strcpy); +EXPORT_SYMBOL(strchr); +EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strlen); +EXPORT_SYMBOL(strcat); +EXPORT_SYMBOL(strncat); +EXPORT_SYMBOL(strncmp); +EXPORT_SYMBOL(strncpy); + +/* Math functions */ +EXPORT_SYMBOL(__Udiv); +EXPORT_SYMBOL(__Umod); +EXPORT_SYMBOL(__Div); +EXPORT_SYMBOL(__Mod); +EXPORT_SYMBOL(__ashrdi3); + +/* Memory functions */ +EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(iounmap); + +/* Semaphore functions */ +EXPORT_SYMBOL(__up); +EXPORT_SYMBOL(__down); +EXPORT_SYMBOL(__down_interruptible); +EXPORT_SYMBOL(__down_trylock); + +/* Export shadow registers for the CPU I/O pins */ +EXPORT_SYMBOL(genconfig_shadow); +EXPORT_SYMBOL(port_pa_data_shadow); +EXPORT_SYMBOL(port_pa_dir_shadow); +EXPORT_SYMBOL(port_pb_data_shadow); +EXPORT_SYMBOL(port_pb_dir_shadow); +EXPORT_SYMBOL(port_pb_config_shadow); +EXPORT_SYMBOL(port_g_data_shadow); + +/* Userspace access functions */ +EXPORT_SYMBOL(__copy_user_zeroing); +EXPORT_SYMBOL(__copy_user); + +/* Cache flush functions */ +EXPORT_SYMBOL(flush_etrax_cache); +EXPORT_SYMBOL(prepare_rx_descriptor); + +#undef memcpy +#undef memset +extern void * memset(void *, int, __kernel_size_t); +extern void * memcpy(void *, const void *, __kernel_size_t); +EXPORT_SYMBOL_NOVERS(memcpy); +EXPORT_SYMBOL_NOVERS(memset); + +#ifdef CONFIG_ETRAX_FAST_TIMER +/* Fast timer functions */ +EXPORT_SYMBOL(fast_timer_list); +EXPORT_SYMBOL(start_one_shot_timer); +EXPORT_SYMBOL(del_fast_timer); +EXPORT_SYMBOL(schedule_usleep); +#endif + diff --git a/arch/i386/mach-generic/es7000.c b/arch/i386/mach-generic/es7000.c new file mode 100644 index 000000000..48d3ec372 --- /dev/null +++ b/arch/i386/mach-generic/es7000.c @@ -0,0 +1,28 @@ +/* + * APIC driver for the Unisys ES7000 chipset. + */ +#define APIC_DEFINITION 1 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static __init int probe_es7000(void) +{ + /* probed later in mptable/ACPI hooks */ + return 0; +} + +struct genapic apic_es7000 = APIC_INIT("es7000", probe_es7000); diff --git a/arch/ia64/configs/sim_defconfig b/arch/ia64/configs/sim_defconfig new file mode 100644 index 000000000..5744b59f1 --- /dev/null +++ b/arch/ia64/configs/sim_defconfig @@ -0,0 +1,535 @@ +# +# Automatically generated make config: don't edit +# + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +# CONFIG_CLEAN_COMPILE is not set +# CONFIG_STANDALONE is not set +CONFIG_BROKEN=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=16 +# CONFIG_HOTPLUG is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_OBSOLETE_MODPARM=y +CONFIG_MODVERSIONS=y +CONFIG_KMOD=y +CONFIG_STOP_MACHINE=y + +# +# Processor type and features +# +CONFIG_IA64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_TIME_INTERPOLATION=y +CONFIG_EFI=y +# CONFIG_IA64_GENERIC is not set +# CONFIG_IA64_DIG is not set +# CONFIG_IA64_HP_ZX1 is not set +# CONFIG_IA64_SGI_SN2 is not set +CONFIG_IA64_HP_SIM=y +# CONFIG_ITANIUM is not set +CONFIG_MCKINLEY=y +# CONFIG_IA64_PAGE_SIZE_4KB is not set +# CONFIG_IA64_PAGE_SIZE_8KB is not set +# CONFIG_IA64_PAGE_SIZE_16KB is not set +CONFIG_IA64_PAGE_SIZE_64KB=y +CONFIG_IA64_L1_CACHE_SHIFT=7 +# CONFIG_MCKINLEY_ASTEP_SPECIFIC is not set +# CONFIG_VIRTUAL_MEM_MAP is not set +# CONFIG_IA64_CYCLONE is not set +CONFIG_FORCE_MAX_ZONEORDER=18 +CONFIG_SMP=y +CONFIG_NR_CPUS=64 +CONFIG_PREEMPT=y +CONFIG_HAVE_DEC_LOCK=y +CONFIG_IA32_SUPPORT=y +CONFIG_COMPAT=y +# CONFIG_PERFMON is not set +CONFIG_IA64_PALINFO=m + +# +# Firmware Drivers +# +CONFIG_EFI_VARS=y +# CONFIG_SMBIOS is not set +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_MISC=y + +# +# Power management and ACPI +# + +# +# Device Drivers +# + +# +# Generic Driver Options +# +# CONFIG_DEBUG_DRIVER is not set + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y + +# +# SCSI Transport Attributes +# +CONFIG_SCSI_SPI_ATTRS=y +# CONFIG_SCSI_FC_ATTRS is not set + +# +# SCSI low-level drivers +# +# CONFIG_SCSI_AIC7XXX_OLD is not set +# CONFIG_SCSI_SATA is not set +# CONFIG_SCSI_EATA_PIO is not set +# CONFIG_SCSI_DEBUG is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +# CONFIG_UNIX is not set +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_NETDEVICES is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +CONFIG_EFI_RTC=y +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_FS_XATTR is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +# CONFIG_TMPFS is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +CONFIG_NFS_DIRECTIO=y +CONFIG_NFSD=y +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V4 is not set +# CONFIG_NFSD_TCP is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +CONFIG_EFI_PARTITION=y + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set + +# +# HP Simulator drivers +# +CONFIG_HP_SIMETH=y +CONFIG_HP_SIMSERIAL=y +CONFIG_HP_SIMSERIAL_CONSOLE=y +CONFIG_HP_SIMSCSI=y + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_IA64_GRANULE_16MB is not set +CONFIG_IA64_GRANULE_64MB=y +CONFIG_DEBUG_KERNEL=y +# CONFIG_IA64_PRINT_HAZARDS is not set +# CONFIG_DISABLE_VHPT is not set +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_IA64_DEBUG_CMPXCHG is not set +# CONFIG_IA64_DEBUG_IRQ is not set +CONFIG_DEBUG_INFO=y +CONFIG_SYSVIPC_COMPAT=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set diff --git a/arch/ia64/dig/topology.c b/arch/ia64/dig/topology.c new file mode 100644 index 000000000..8dc31378b --- /dev/null +++ b/arch/ia64/dig/topology.c @@ -0,0 +1,43 @@ +/* + * arch/ia64/dig/topology.c + * Popuate driverfs with topology information. + * Derived entirely from i386/mach-default.c + * Intel Corporation - Ashok Raj + */ +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_PER_CPU(struct ia64_cpu, cpu_devices); + +/* + * First Pass: simply borrowed code for now. Later should hook into + * hotplug notification for node/cpu/memory as applicable + */ + +static int arch_register_cpu(int num) +{ + struct node *parent = NULL; + +#ifdef CONFIG_NUMA + //parent = &node_devices[cpu_to_node(num)].node; +#endif + + return register_cpu(&per_cpu(cpu_devices,num).cpu, num, parent); +} + +static int __init topology_init(void) +{ + int i; + + for_each_cpu(i) { + arch_register_cpu(i); + } + return 0; +} + +subsys_initcall(topology_init); diff --git a/arch/ia64/lib/bitop.c b/arch/ia64/lib/bitop.c new file mode 100644 index 000000000..1c6ee49fd --- /dev/null +++ b/arch/ia64/lib/bitop.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include + +/* + * Find next zero bit in a bitmap reasonably efficiently.. + */ + +int __find_next_zero_bit (void *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (64-offset); + if (size < 64) + goto found_first; + if (~tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if (~(tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* any bits zero? */ + return result + size; /* nope */ +found_middle: + return result + ffz(tmp); +} +EXPORT_SYMBOL(__find_next_zero_bit); + +/* + * Find next bit in a bitmap reasonably efficiently.. + */ +int __find_next_bit(const void *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp &= ~0UL << offset; + if (size < 64) + goto found_first; + if (tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if ((tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; + found_first: + tmp &= ~0UL >> (64-size); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ + found_middle: + return result + __ffs(tmp); +} +EXPORT_SYMBOL(__find_next_bit); diff --git a/arch/mips/au1000/common/cputable.c b/arch/mips/au1000/common/cputable.c new file mode 100644 index 000000000..26744b317 --- /dev/null +++ b/arch/mips/au1000/common/cputable.c @@ -0,0 +1,56 @@ +/* + * arch/mips/au1000/common/cputable.c + * + * Copyright (C) 2004 Dan Malek (dan@embeddededge.com) + * Copied from PowerPC and updated for Alchemy Au1xxx processors. + * + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +struct cpu_spec* cur_cpu_spec[NR_CPUS]; + +/* With some thought, we can probably use the mask to reduce the + * size of the table. + */ +struct cpu_spec cpu_specs[] = { + { 0xffffffff, 0x00030100, "Au1000 DA", 1, 0 }, + { 0xffffffff, 0x00030201, "Au1000 HA", 1, 0 }, + { 0xffffffff, 0x00030202, "Au1000 HB", 1, 0 }, + { 0xffffffff, 0x00030203, "Au1000 HC", 1, 1 }, + { 0xffffffff, 0x00030204, "Au1000 HD", 1, 1 }, + { 0xffffffff, 0x01030200, "Au1500 AB", 1, 1 }, + { 0xffffffff, 0x01030201, "Au1500 AC", 0, 1 }, + { 0xffffffff, 0x01030202, "Au1500 AD", 0, 1 }, + { 0xffffffff, 0x02030200, "Au1100 AB", 1, 1 }, + { 0xffffffff, 0x02030201, "Au1100 BA", 1, 1 }, + { 0xffffffff, 0x02030202, "Au1100 BC", 1, 1 }, + { 0xffffffff, 0x02030203, "Au1100 BD", 0, 1 }, + { 0xffffffff, 0x02030204, "Au1100 BE", 0, 1 }, + { 0xffffffff, 0x03030200, "Au1550 AA", 0, 1 }, + { 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0 }, +}; + +void +set_cpuspec(void) +{ + struct cpu_spec *sp; + u32 prid; + + prid = read_c0_prid(); + sp = cpu_specs; + while ((prid & sp->prid_mask) != sp->prid_value) + sp++; + cur_cpu_spec[0] = sp; +} diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c new file mode 100644 index 000000000..ccfd5fe5a --- /dev/null +++ b/arch/parisc/kernel/unwind.c @@ -0,0 +1,295 @@ +/* + * Kernel unwinding support + * + * (c) 2002-2004 Randolph Chung + * + * Derived partially from the IA64 implementation. The PA-RISC + * Runtime Architecture Document is also a useful reference to + * understand what is happening here + */ + +/* + * J. David Anglin writes: + * + * "You have to adjust the current sp to that at the begining of the function. + * There can be up to two stack additions to allocate the frame in the + * prologue. Similar things happen in the epilogue. In the presence of + * interrupts, you have to be concerned about where you are in the function + * and what stack adjustments have taken place." + * + * For now these cases are not handled, but they should be! + */ + +#include +#include +#include +#include + +#include + +#include + +/* #define DEBUG 1 */ +#ifdef DEBUG +#define dbg(x...) printk(x) +#else +#define dbg(x...) +#endif + +extern const struct unwind_table_entry __start___unwind[]; +extern const struct unwind_table_entry __stop___unwind[]; + +static spinlock_t unwind_lock; +/* + * the kernel unwind block is not dynamically allocated so that + * we can call unwind_init as early in the bootup process as + * possible (before the slab allocator is initialized) + */ +static struct unwind_table kernel_unwind_table; +static struct unwind_table *unwind_tables, *unwind_tables_end; + + +static inline const struct unwind_table_entry * +find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr) +{ + const struct unwind_table_entry *e = 0; + unsigned long lo, hi, mid; + + addr -= table->base_addr; + + for (lo = 0, hi = table->length; lo < hi; ) + { + mid = (lo + hi) / 2; + e = &table->table[mid]; + if (addr < e->region_start) + hi = mid; + else if (addr > e->region_end) + lo = mid + 1; + else + break; + } + + return e; +} + +static inline const struct unwind_table_entry * +find_unwind_entry(unsigned long addr) +{ + struct unwind_table *table = unwind_tables; + const struct unwind_table_entry *e = NULL; + + if (addr >= kernel_unwind_table.start && + addr <= kernel_unwind_table.end) + e = find_unwind_entry_in_table(&kernel_unwind_table, addr); + else + for (; table; table = table->next) + { + if (addr >= table->start && + addr <= table->end) + e = find_unwind_entry_in_table(table, addr); + if (e) + break; + } + + return e; +} + +static void +unwind_table_init(struct unwind_table *table, const char *name, + unsigned long base_addr, unsigned long gp, + const void *table_start, const void *table_end) +{ + const struct unwind_table_entry *start = table_start; + const struct unwind_table_entry *end = table_end - 1; + + table->name = name; + table->base_addr = base_addr; + table->gp = gp; + table->start = base_addr + start->region_start; + table->end = base_addr + end->region_end; + table->table = (struct unwind_table_entry *)table_start; + table->length = end - start; + table->next = NULL; +} + +void * +unwind_table_add(const char *name, unsigned long base_addr, + unsigned long gp, + const void *start, const void *end) +{ + struct unwind_table *table; + unsigned long flags; + + table = kmalloc(sizeof(struct unwind_table), GFP_USER); + if (table == NULL) + return 0; + unwind_table_init(table, name, base_addr, gp, start, end); + spin_lock_irqsave(&unwind_lock, flags); + if (unwind_tables) + { + unwind_tables_end->next = table; + unwind_tables_end = table; + } + else + { + unwind_tables = unwind_tables_end = table; + } + spin_unlock_irqrestore(&unwind_lock, flags); + + return table; +} + +/* Called from setup_arch to import the kernel unwind info */ +static int unwind_init(void) +{ + long start, stop; + register unsigned long gp __asm__ ("r27"); + + start = (long)&__start___unwind[0]; + stop = (long)&__stop___unwind[0]; + + printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", + start, stop, + (stop - start) / sizeof(struct unwind_table_entry)); + + unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START, + gp, + &__start___unwind[0], &__stop___unwind[0]); +#if 0 + { + int i; + for (i = 0; i < 10; i++) + { + printk("region 0x%x-0x%x\n", + __start___unwind[i].region_start, + __start___unwind[i].region_end); + } + } +#endif + return 0; +} + +static void unwind_frame_regs(struct unwind_frame_info *info) +{ + const struct unwind_table_entry *e; + unsigned long npc; + unsigned int insn; + long frame_size = 0; + int looking_for_rp, rpoffset = 0; + + e = find_unwind_entry(info->ip); + if (!e) { + unsigned long sp; + extern char _stext[], _etext[]; + + dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); + + /* Since we are doing the unwinding blind, we don't know if + we are adjusting the stack correctly or extracting the rp + correctly. The rp is checked to see if it belongs to the + kernel text section, if not we assume we don't have a + correct stack frame and we continue to unwind the stack. + This is not quite correct, and will fail for loadable + modules. */ + sp = info->sp & ~63; + do { + info->prev_sp = sp - 64; + + /* FIXME: what happens if we unwind too far so that + sp no longer falls in a mapped kernel page? */ +#ifndef __LP64__ + info->prev_ip = *(unsigned long *)(info->prev_sp - 20); +#else + info->prev_ip = *(unsigned long *)(info->prev_sp - 16); +#endif + + sp = info->prev_sp; + } while (info->prev_ip < (unsigned long)_stext || + info->prev_ip > (unsigned long)_etext); + } else { + + dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, Save_RP = %d size = %u\n", + e->region_start, e->region_end, e->Save_SP, e->Save_RP, e->Total_frame_size); + + looking_for_rp = e->Save_RP; + + for (npc = e->region_start; + (frame_size < (e->Total_frame_size << 3) || looking_for_rp) && + npc < info->ip; + npc += 4) { + + insn = *(unsigned int *)npc; + + if ((insn & 0xffffc000) == 0x37de0000 || + (insn & 0xffe00000) == 0x6fc00000) { + /* ldo X(sp), sp, or stwm X,D(sp) */ + frame_size += (insn & 0x1 ? -1 << 13 : 0) | + ((insn & 0x3fff) >> 1); + } else if ((insn & 0xffe00008) == 0x7ec00008) { + /* std,ma X,D(sp) */ + frame_size += (insn & 0x1 ? -1 << 13 : 0) | + (((insn >> 4) & 0x3ff) << 3); + } else if (insn == 0x6bc23fd9) { + /* stw rp,-20(sp) */ + rpoffset = 20; + looking_for_rp = 0; + } else if (insn == 0x0fc212c1) { + /* std rp,-16(sr0,sp) */ + rpoffset = 16; + looking_for_rp = 0; + } + } + + info->prev_sp = info->sp - frame_size; + if (rpoffset) + info->prev_ip = *(unsigned long *)(info->prev_sp - rpoffset); + } +} + +void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, + struct pt_regs *regs) +{ + memset(info, 0, sizeof(struct unwind_frame_info)); + info->t = t; + info->sp = regs->ksp; + info->ip = regs->kpc; + + dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", (int)t->pid, info->sp, info->ip); +} + +void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t) +{ + struct pt_regs *regs = &t->thread.regs; + unwind_frame_init(info, t, regs); +} + +int unwind_once(struct unwind_frame_info *next_frame) +{ + unwind_frame_regs(next_frame); + + if (next_frame->prev_sp == 0 || + next_frame->prev_ip == 0) + return -1; + + next_frame->sp = next_frame->prev_sp; + next_frame->ip = next_frame->prev_ip; + next_frame->prev_sp = 0; + next_frame->prev_ip = 0; + + dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", (int)next_frame->t->pid, next_frame->sp, next_frame->ip); + + return 0; +} + +int unwind_to_user(struct unwind_frame_info *info) +{ + int ret; + + do { + ret = unwind_once(info); + } while (!ret && !(info->ip & 3)); + + return ret; +} + +module_init(unwind_init); diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c new file mode 100644 index 000000000..c859f1127 --- /dev/null +++ b/arch/ppc/kernel/dma-mapping.c @@ -0,0 +1,439 @@ +/* + * PowerPC version derived from arch/arm/mm/consistent.c + * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) + * + * Copyright (C) 2000 Russell King + * + * Consistent memory allocators. Used for DMA devices that want to + * share uncached memory with the processor core. The function return + * is the virtual address and 'dma_handle' is the physical address. + * Mostly stolen from the ARM port, with some changes for PowerPC. + * -- Dan + * + * Reorganized to get rid of the arch-specific consistent_* functions + * and provide non-coherent implementations for the DMA API. -Matt + * + * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() + * implementation. This is pulled straight from ARM and barely + * modified. -Matt + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int map_page(unsigned long va, phys_addr_t pa, int flags); + +#include + +/* + * This address range defaults to a value that is safe for all + * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It + * can be further configured for specific applications under + * the "Advanced Setup" menu. -Matt + */ +#define CONSISTENT_BASE (CONFIG_CONSISTENT_START) +#define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE) +#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) + +/* + * This is the page table (2MB) covering uncached, DMA consistent allocations + */ +static pte_t *consistent_pte; +static spinlock_t consistent_lock = SPIN_LOCK_UNLOCKED; + +/* + * VM region handling support. + * + * This should become something generic, handling VM region allocations for + * vmalloc and similar (ioremap, module space, etc). + * + * I envisage vmalloc()'s supporting vm_struct becoming: + * + * struct vm_struct { + * struct vm_region region; + * unsigned long flags; + * struct page **pages; + * unsigned int nr_pages; + * unsigned long phys_addr; + * }; + * + * get_vm_area() would then call vm_region_alloc with an appropriate + * struct vm_region head (eg): + * + * struct vm_region vmalloc_head = { + * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), + * .vm_start = VMALLOC_START, + * .vm_end = VMALLOC_END, + * }; + * + * However, vmalloc_head.vm_start is variable (typically, it is dependent on + * the amount of RAM found at boot time.) I would imagine that get_vm_area() + * would have to initialise this each time prior to calling vm_region_alloc(). + */ +struct vm_region { + struct list_head vm_list; + unsigned long vm_start; + unsigned long vm_end; +}; + +static struct vm_region consistent_head = { + .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), + .vm_start = CONSISTENT_BASE, + .vm_end = CONSISTENT_END, +}; + +static struct vm_region * +vm_region_alloc(struct vm_region *head, size_t size, int gfp) +{ + unsigned long addr = head->vm_start, end = head->vm_end - size; + unsigned long flags; + struct vm_region *c, *new; + + new = kmalloc(sizeof(struct vm_region), gfp); + if (!new) + goto out; + + spin_lock_irqsave(&consistent_lock, flags); + + list_for_each_entry(c, &head->vm_list, vm_list) { + if ((addr + size) < addr) + goto nospc; + if ((addr + size) <= c->vm_start) + goto found; + addr = c->vm_end; + if (addr > end) + goto nospc; + } + + found: + /* + * Insert this entry _before_ the one we found. + */ + list_add_tail(&new->vm_list, &c->vm_list); + new->vm_start = addr; + new->vm_end = addr + size; + + spin_unlock_irqrestore(&consistent_lock, flags); + return new; + + nospc: + spin_unlock_irqrestore(&consistent_lock, flags); + kfree(new); + out: + return NULL; +} + +static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) +{ + struct vm_region *c; + + list_for_each_entry(c, &head->vm_list, vm_list) { + if (c->vm_start == addr) + goto out; + } + c = NULL; + out: + return c; +} + +/* + * Allocate DMA-coherent memory space and return both the kernel remapped + * virtual and bus address for that space. + */ +void * +__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp) +{ + struct page *page; + struct vm_region *c; + unsigned long order; + u64 mask = 0x00ffffff, limit; /* ISA default */ + + if (!consistent_pte) { + printk(KERN_ERR "%s: not initialised\n", __func__); + dump_stack(); + return NULL; + } + + size = PAGE_ALIGN(size); + limit = (mask + 1) & ~mask; + if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { + printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", + size, mask); + return NULL; + } + + order = get_order(size); + + if (mask != 0xffffffff) + gfp |= GFP_DMA; + + page = alloc_pages(gfp, order); + if (!page) + goto no_page; + + /* + * Invalidate any data that might be lurking in the + * kernel direct-mapped region for device DMA. + */ + { + unsigned long kaddr = (unsigned long)page_address(page); + memset(page_address(page), 0, size); + flush_dcache_range(kaddr, kaddr + size); + } + + /* + * Allocate a virtual address in the consistent mapping region. + */ + c = vm_region_alloc(&consistent_head, size, + gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); + if (c) { + pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start); + struct page *end = page + (1 << order); + + /* + * Set the "dma handle" + */ + *handle = page_to_bus(page); + + do { + BUG_ON(!pte_none(*pte)); + + set_page_count(page, 1); + SetPageReserved(page); + set_pte(pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); + page++; + pte++; + } while (size -= PAGE_SIZE); + + /* + * Free the otherwise unused pages. + */ + while (page < end) { + set_page_count(page, 1); + __free_page(page); + page++; + } + + return (void *)c->vm_start; + } + + if (page) + __free_pages(page, order); + no_page: + return NULL; +} + +/* + * free a page as defined by the above mapping. + */ +void __dma_free_coherent(size_t size, void *vaddr) +{ + struct vm_region *c; + unsigned long flags; + pte_t *ptep; + + size = PAGE_ALIGN(size); + + spin_lock_irqsave(&consistent_lock, flags); + + c = vm_region_find(&consistent_head, (unsigned long)vaddr); + if (!c) + goto no_area; + + if ((c->vm_end - c->vm_start) != size) { + printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", + __func__, c->vm_end - c->vm_start, size); + dump_stack(); + size = c->vm_end - c->vm_start; + } + + ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); + do { + pte_t pte = ptep_get_and_clear(ptep); + unsigned long pfn; + + ptep++; + + if (!pte_none(pte) && pte_present(pte)) { + pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) { + struct page *page = pfn_to_page(pfn); + ClearPageReserved(page); + + __free_page(page); + continue; + } + } + + printk(KERN_CRIT "%s: bad page in kernel page table\n", + __func__); + } while (size -= PAGE_SIZE); + + flush_tlb_kernel_range(c->vm_start, c->vm_end); + + list_del(&c->vm_list); + + spin_unlock_irqrestore(&consistent_lock, flags); + + kfree(c); + return; + + no_area: + spin_unlock_irqrestore(&consistent_lock, flags); + printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", + __func__, vaddr); + dump_stack(); +} +EXPORT_SYMBOL(dma_free_coherent); + +/* + * Initialise the consistent memory allocation. + */ +static int __init dma_alloc_init(void) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int ret = 0; + + spin_lock(&init_mm.page_table_lock); + + do { + pgd = pgd_offset(&init_mm, CONSISTENT_BASE); + pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); + if (!pmd) { + printk(KERN_ERR "%s: no pmd tables\n", __func__); + ret = -ENOMEM; + break; + } + WARN_ON(!pmd_none(*pmd)); + + pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE); + if (!pte) { + printk(KERN_ERR "%s: no pte tables\n", __func__); + ret = -ENOMEM; + break; + } + + consistent_pte = pte; + } while (0); + + spin_unlock(&init_mm.page_table_lock); + + return ret; +} + +core_initcall(dma_alloc_init); + +/* + * make an area consistent. + */ +void __dma_sync(void *vaddr, size_t size, int direction) +{ + unsigned long start = (unsigned long)vaddr; + unsigned long end = start + size; + + switch (direction) { + case DMA_NONE: + BUG(); + case DMA_FROM_DEVICE: /* invalidate only */ + invalidate_dcache_range(start, end); + break; + case DMA_TO_DEVICE: /* writeback only */ + clean_dcache_range(start, end); + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ + flush_dcache_range(start, end); + break; + } +} + +#ifdef CONFIG_HIGHMEM +/* + * __dma_sync_page() implementation for systems using highmem. + * In this case, each page of a buffer must be kmapped/kunmapped + * in order to have a virtual address for __dma_sync(). This must + * not sleep so kmap_atmomic()/kunmap_atomic() are used. + * + * Note: yes, it is possible and correct to have a buffer extend + * beyond the first page. + */ +static inline void __dma_sync_page_highmem(struct page *page, + unsigned long offset, size_t size, int direction) +{ + size_t seg_size = min((size_t)PAGE_SIZE, size) - offset; + size_t cur_size = seg_size; + unsigned long flags, start, seg_offset = offset; + int nr_segs = PAGE_ALIGN(size + (PAGE_SIZE - offset))/PAGE_SIZE; + int seg_nr = 0; + + local_irq_save(flags); + + do { + start = (unsigned long)kmap_atomic(page + seg_nr, + KM_PPC_SYNC_PAGE) + seg_offset; + + /* Sync this buffer segment */ + __dma_sync((void *)start, seg_size, direction); + kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); + seg_nr++; + + /* Calculate next buffer segment size */ + seg_size = min((size_t)PAGE_SIZE, size - cur_size); + + /* Add the segment size to our running total */ + cur_size += seg_size; + seg_offset = 0; + } while (seg_nr < nr_segs); + + local_irq_restore(flags); +} +#endif /* CONFIG_HIGHMEM */ + +/* + * __dma_sync_page makes memory consistent. identical to __dma_sync, but + * takes a struct page instead of a virtual address + */ +void __dma_sync_page(struct page *page, unsigned long offset, + size_t size, int direction) +{ +#ifdef CONFIG_HIGHMEM + __dma_sync_page_highmem(page, offset, size, direction); +#else + unsigned long start = (unsigned long)page_address(page) + offset; + __dma_sync((void *)start, size, direction); +#endif +} diff --git a/arch/ppc/kernel/vecemu.c b/arch/ppc/kernel/vecemu.c new file mode 100644 index 000000000..1430ef592 --- /dev/null +++ b/arch/ppc/kernel/vecemu.c @@ -0,0 +1,346 @@ +/* + * Routines to emulate some Altivec/VMX instructions, specifically + * those that can trap when given denormalized operands in Java mode. + */ +#include +#include +#include +#include +#include +#include + +/* Functions in vector.S */ +extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b); +extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b); +extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); +extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); +extern void vrefp(vector128 *dst, vector128 *src); +extern void vrsqrtefp(vector128 *dst, vector128 *src); +extern void vexptep(vector128 *dst, vector128 *src); + +static unsigned int exp2s[8] = { + 0x800000, + 0x8b95c2, + 0x9837f0, + 0xa5fed7, + 0xb504f3, + 0xc5672a, + 0xd744fd, + 0xeac0c7 +}; + +/* + * Computes an estimate of 2^x. The `s' argument is the 32-bit + * single-precision floating-point representation of x. + */ +static unsigned int eexp2(unsigned int s) +{ + int exp, pwr; + unsigned int mant, frac; + + /* extract exponent field from input */ + exp = ((s >> 23) & 0xff) - 127; + if (exp > 7) { + /* check for NaN input */ + if (exp == 128 && (s & 0x7fffff) != 0) + return s | 0x400000; /* return QNaN */ + /* 2^-big = 0, 2^+big = +Inf */ + return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */ + } + if (exp < -23) + return 0x3f800000; /* 1.0 */ + + /* convert to fixed point integer in 9.23 representation */ + pwr = (s & 0x7fffff) | 0x800000; + if (exp > 0) + pwr <<= exp; + else + pwr >>= -exp; + if (s & 0x80000000) + pwr = -pwr; + + /* extract integer part, which becomes exponent part of result */ + exp = (pwr >> 23) + 126; + if (exp >= 254) + return 0x7f800000; + if (exp < -23) + return 0; + + /* table lookup on top 3 bits of fraction to get mantissa */ + mant = exp2s[(pwr >> 20) & 7]; + + /* linear interpolation using remaining 20 bits of fraction */ + asm("mulhwu %0,%1,%2" : "=r" (frac) + : "r" (pwr << 12), "r" (0x172b83ff)); + asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant)); + mant += frac; + + if (exp >= 0) + return mant + (exp << 23); + + /* denormalized result */ + exp = -exp; + mant += 1 << (exp - 1); + return mant >> exp; +} + +/* + * Computes an estimate of log_2(x). The `s' argument is the 32-bit + * single-precision floating-point representation of x. + */ +static unsigned int elog2(unsigned int s) +{ + int exp, mant, lz, frac; + + exp = s & 0x7f800000; + mant = s & 0x7fffff; + if (exp == 0x7f800000) { /* Inf or NaN */ + if (mant != 0) + s |= 0x400000; /* turn NaN into QNaN */ + return s; + } + if ((exp | mant) == 0) /* +0 or -0 */ + return 0xff800000; /* return -Inf */ + + if (exp == 0) { + /* denormalized */ + asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant)); + mant <<= lz - 8; + exp = (-118 - lz) << 23; + } else { + mant |= 0x800000; + exp -= 127 << 23; + } + + if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */ + exp |= 0x400000; /* 0.5 * 2^23 */ + asm("mulhwu %0,%1,%2" : "=r" (mant) + : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */ + } + if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */ + exp |= 0x200000; /* 0.25 * 2^23 */ + asm("mulhwu %0,%1,%2" : "=r" (mant) + : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */ + } + if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */ + exp |= 0x100000; /* 0.125 * 2^23 */ + asm("mulhwu %0,%1,%2" : "=r" (mant) + : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */ + } + if (mant > 0x800000) { /* 1.0 * 2^23 */ + /* calculate (mant - 1) * 1.381097463 */ + /* 1.381097463 == 0.125 / (2^0.125 - 1) */ + asm("mulhwu %0,%1,%2" : "=r" (frac) + : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a)); + exp += frac; + } + s = exp & 0x80000000; + if (exp != 0) { + if (s) + exp = -exp; + asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp)); + lz = 8 - lz; + if (lz > 0) + exp >>= lz; + else if (lz < 0) + exp <<= -lz; + s += ((lz + 126) << 23) + exp; + } + return s; +} + +#define VSCR_SAT 1 + +static int ctsxs(unsigned int x, int scale, unsigned int *vscrp) +{ + int exp, mant; + + exp = (x >> 23) & 0xff; + mant = x & 0x7fffff; + if (exp == 255 && mant != 0) + return 0; /* NaN -> 0 */ + exp = exp - 127 + scale; + if (exp < 0) + return 0; /* round towards zero */ + if (exp >= 31) { + /* saturate, unless the result would be -2^31 */ + if (x + (scale << 23) != 0xcf000000) + *vscrp |= VSCR_SAT; + return (x & 0x80000000)? 0x80000000: 0x7fffffff; + } + mant |= 0x800000; + mant = (mant << 7) >> (30 - exp); + return (x & 0x80000000)? -mant: mant; +} + +static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp) +{ + int exp; + unsigned int mant; + + exp = (x >> 23) & 0xff; + mant = x & 0x7fffff; + if (exp == 255 && mant != 0) + return 0; /* NaN -> 0 */ + exp = exp - 127 + scale; + if (exp < 0) + return 0; /* round towards zero */ + if (x & 0x80000000) { + /* negative => saturate to 0 */ + *vscrp |= VSCR_SAT; + return 0; + } + if (exp >= 32) { + /* saturate */ + *vscrp |= VSCR_SAT; + return 0xffffffff; + } + mant |= 0x800000; + mant = (mant << 8) >> (31 - exp); + return mant; +} + +/* Round to floating integer, towards 0 */ +static unsigned int rfiz(unsigned int x) +{ + int exp; + + exp = ((x >> 23) & 0xff) - 127; + if (exp == 128 && (x & 0x7fffff) != 0) + return x | 0x400000; /* NaN -> make it a QNaN */ + if (exp >= 23) + return x; /* it's an integer already (or Inf) */ + if (exp < 0) + return x & 0x80000000; /* |x| < 1.0 rounds to 0 */ + return x & ~(0x7fffff >> exp); +} + +/* Round to floating integer, towards +/- Inf */ +static unsigned int rfii(unsigned int x) +{ + int exp, mask; + + exp = ((x >> 23) & 0xff) - 127; + if (exp == 128 && (x & 0x7fffff) != 0) + return x | 0x400000; /* NaN -> make it a QNaN */ + if (exp >= 23) + return x; /* it's an integer already (or Inf) */ + if ((x & 0x7fffffff) == 0) + return x; /* +/-0 -> +/-0 */ + if (exp < 0) + /* 0 < |x| < 1.0 rounds to +/- 1.0 */ + return (x & 0x80000000) | 0x3f800000; + mask = 0x7fffff >> exp; + /* mantissa overflows into exponent - that's OK, + it can't overflow into the sign bit */ + return (x + mask) & ~mask; +} + +/* Round to floating integer, to nearest */ +static unsigned int rfin(unsigned int x) +{ + int exp, half; + + exp = ((x >> 23) & 0xff) - 127; + if (exp == 128 && (x & 0x7fffff) != 0) + return x | 0x400000; /* NaN -> make it a QNaN */ + if (exp >= 23) + return x; /* it's an integer already (or Inf) */ + if (exp < -1) + return x & 0x80000000; /* |x| < 0.5 -> +/-0 */ + if (exp == -1) + /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */ + return (x & 0x80000000) | 0x3f800000; + half = 0x400000 >> exp; + /* add 0.5 to the magnitude and chop off the fraction bits */ + return (x + half) & ~(0x7fffff >> exp); +} + +int +emulate_altivec(struct pt_regs *regs) +{ + unsigned int instr, i; + unsigned int va, vb, vc, vd; + vector128 *vrs; + + if (get_user(instr, (unsigned int *) regs->nip)) + return -EFAULT; + if ((instr >> 26) != 4) + return -EINVAL; /* not an altivec instruction */ + vd = (instr >> 21) & 0x1f; + va = (instr >> 16) & 0x1f; + vb = (instr >> 11) & 0x1f; + vc = (instr >> 6) & 0x1f; + + vrs = current->thread.vr; + switch (instr & 0x3f) { + case 10: + switch (vc) { + case 0: /* vaddfp */ + vaddfp(&vrs[vd], &vrs[va], &vrs[vb]); + break; + case 1: /* vsubfp */ + vsubfp(&vrs[vd], &vrs[va], &vrs[vb]); + break; + case 4: /* vrefp */ + vrefp(&vrs[vd], &vrs[vb]); + break; + case 5: /* vrsqrtefp */ + vrsqrtefp(&vrs[vd], &vrs[vb]); + break; + case 6: /* vexptefp */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = eexp2(vrs[vb].u[i]); + break; + case 7: /* vlogefp */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = elog2(vrs[vb].u[i]); + break; + case 8: /* vrfin */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = rfin(vrs[vb].u[i]); + break; + case 9: /* vrfiz */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = rfiz(vrs[vb].u[i]); + break; + case 10: /* vrfip */ + for (i = 0; i < 4; ++i) { + u32 x = vrs[vb].u[i]; + x = (x & 0x80000000)? rfiz(x): rfii(x); + vrs[vd].u[i] = x; + } + break; + case 11: /* vrfim */ + for (i = 0; i < 4; ++i) { + u32 x = vrs[vb].u[i]; + x = (x & 0x80000000)? rfii(x): rfiz(x); + vrs[vd].u[i] = x; + } + break; + case 14: /* vctuxs */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va, + ¤t->thread.vscr.u[3]); + break; + case 15: /* vctsxs */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va, + ¤t->thread.vscr.u[3]); + break; + default: + return -EINVAL; + } + break; + case 46: /* vmaddfp */ + vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]); + break; + case 47: /* vnmsubfp */ + vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]); + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/arch/ppc/kernel/vector.S b/arch/ppc/kernel/vector.S new file mode 100644 index 000000000..d8fe6b5fb --- /dev/null +++ b/arch/ppc/kernel/vector.S @@ -0,0 +1,217 @@ +#include +#include + +/* + * The routines below are in assembler so we can closely control the + * usage of floating-point registers. These routines must be called + * with preempt disabled. + */ + .data +fpzero: + .long 0 +fpone: + .long 0x3f800000 /* 1.0 in single-precision FP */ +fphalf: + .long 0x3f000000 /* 0.5 in single-precision FP */ + + .text +/* + * Internal routine to enable floating point and set FPSCR to 0. + * Don't call it from C; it doesn't use the normal calling convention. + */ +fpenable: + mfmsr r10 + ori r11,r10,MSR_FP + mtmsr r11 + isync + stfd fr0,24(r1) + stfd fr1,16(r1) + stfd fr31,8(r1) + lis r11,fpzero@ha + mffs fr31 + lfs fr1,fpzero@l(r11) + mtfsf 0xff,fr1 + blr + +fpdisable: + mtfsf 0xff,fr31 + lfd fr31,8(r1) + lfd fr1,16(r1) + lfd fr0,24(r1) + mtmsr r10 + isync + blr + +/* + * Vector add, floating point. + */ + .globl vaddfp +vaddfp: + stwu r1,-32(r1) + mflr r0 + stw r0,36(r1) + bl fpenable + li r0,4 + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + lfsx fr1,r5,r6 + fadds fr0,fr0,fr1 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + bl fpdisable + lwz r0,36(r1) + mtlr r0 + addi r1,r1,32 + blr + +/* + * Vector subtract, floating point. + */ + .globl vsubfp +vsubfp: + stwu r1,-32(r1) + mflr r0 + stw r0,36(r1) + bl fpenable + li r0,4 + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + lfsx fr1,r5,r6 + fsubs fr0,fr0,fr1 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + bl fpdisable + lwz r0,36(r1) + mtlr r0 + addi r1,r1,32 + blr + +/* + * Vector multiply and add, floating point. + */ + .globl vmaddfp +vmaddfp: + stwu r1,-48(r1) + mflr r0 + stw r0,52(r1) + bl fpenable + stfd fr2,32(r1) + li r0,4 + mtctr r0 + li r7,0 +1: lfsx fr0,r4,r7 + lfsx fr1,r5,r7 + lfsx fr2,r6,r7 + fmadds fr0,fr0,fr1,fr2 + stfsx fr0,r3,r7 + addi r7,r7,4 + bdnz 1b + lfd fr2,32(r1) + bl fpdisable + lwz r0,52(r1) + mtlr r0 + addi r1,r1,48 + blr + +/* + * Vector negative multiply and subtract, floating point. + */ + .globl vnmsubfp +vnmsubfp: + stwu r1,-48(r1) + mflr r0 + stw r0,52(r1) + bl fpenable + stfd fr2,32(r1) + li r0,4 + mtctr r0 + li r7,0 +1: lfsx fr0,r4,r7 + lfsx fr1,r5,r7 + lfsx fr2,r6,r7 + fnmsubs fr0,fr0,fr1,fr2 + stfsx fr0,r3,r7 + addi r7,r7,4 + bdnz 1b + lfd fr2,32(r1) + bl fpdisable + lwz r0,52(r1) + mtlr r0 + addi r1,r1,48 + blr + +/* + * Vector reciprocal estimate. We just compute 1.0/x. + * r3 -> destination, r4 -> source. + */ + .globl vrefp +vrefp: + stwu r1,-32(r1) + mflr r0 + stw r0,36(r1) + bl fpenable + lis r9,fpone@ha + li r0,4 + lfs fr1,fpone@l(r9) + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + fdivs fr0,fr1,fr0 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + bl fpdisable + lwz r0,36(r1) + mtlr r0 + addi r1,r1,32 + blr + +/* + * Vector reciprocal square-root estimate, floating point. + * We use the frsqrte instruction for the initial estimate followed + * by 2 iterations of Newton-Raphson to get sufficient accuracy. + * r3 -> destination, r4 -> source. + */ + .globl vrsqrtefp +vrsqrtefp: + stwu r1,-48(r1) + mflr r0 + stw r0,52(r1) + bl fpenable + stfd fr2,32(r1) + stfd fr3,40(r1) + stfd fr4,48(r1) + stfd fr5,56(r1) + lis r9,fpone@ha + lis r8,fphalf@ha + li r0,4 + lfs fr4,fpone@l(r9) + lfs fr5,fphalf@l(r8) + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + frsqrte fr1,fr0 /* r = frsqrte(s) */ + fmuls fr3,fr1,fr0 /* r * s */ + fmuls fr2,fr1,fr5 /* r * 0.5 */ + fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ + fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ + fmuls fr3,fr1,fr0 /* r * s */ + fmuls fr2,fr1,fr5 /* r * 0.5 */ + fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ + fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ + stfsx fr1,r3,r6 + addi r6,r6,4 + bdnz 1b + lfd fr5,56(r1) + lfd fr4,48(r1) + lfd fr3,40(r1) + lfd fr2,32(r1) + bl fpdisable + lwz r0,36(r1) + mtlr r0 + addi r1,r1,32 + blr diff --git a/arch/ppc/platforms/4xx/bubinga.c b/arch/ppc/platforms/4xx/bubinga.c new file mode 100644 index 000000000..3678abf86 --- /dev/null +++ b/arch/ppc/platforms/4xx/bubinga.c @@ -0,0 +1,263 @@ +/* + * Support for IBM PPC 405EP evaluation board (Bubinga). + * + * Author: SAW (IBM), derived from walnut.c. + * Maintained by MontaVista Software + * + * 2003 (c) MontaVista Softare Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is + * licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +extern bd_t __res; + +void *bubinga_rtc_base; + +/* Some IRQs unique to the board + * Used by the generic 405 PCI setup functions in ppc4xx_pci.c + */ +int __init +ppc405_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) +{ + static char pci_irq_table[][4] = + /* + * PCI IDSEL/INTPIN->INTLINE + * A B C D + */ + { + {28, 28, 28, 28}, /* IDSEL 1 - PCI slot 1 */ + {29, 29, 29, 29}, /* IDSEL 2 - PCI slot 2 */ + {30, 30, 30, 30}, /* IDSEL 3 - PCI slot 3 */ + {31, 31, 31, 31}, /* IDSEL 4 - PCI slot 4 */ + }; + + const long min_idsel = 1, max_idsel = 4, irqs_per_slot = 4; + return PCI_IRQ_TABLE_LOOKUP; +}; + +/* The serial clock for the chip is an internal clock determined by + * different clock speeds/dividers. + * Calculate the proper input baud rate and setup the serial driver. + */ +static void __init +bubinga_early_serial_map(void) +{ + u32 uart_div; + int uart_clock; + struct uart_port port; + + /* Calculate the serial clock input frequency + * + * The base baud is the PLL OUTA (provided in the board info + * structure) divided by the external UART Divisor, divided + * by 16. + */ + uart_div = (mfdcr(DCRN_CPC0_UCR_BASE) & DCRN_CPC0_UCR_U0DIV); + uart_clock = __res.bi_pllouta_freq / uart_div; + + /* Setup serial port access */ + memset(&port, 0, sizeof(port)); + port.membase = (void*)ACTING_UART0_IO_BASE; + port.irq = ACTING_UART0_INT; + port.uartclk = uart_clock; + port.regshift = 0; + port.iotype = SERIAL_IO_MEM; + port.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST; + port.line = 0; + + if (early_serial_setup(&port) != 0) { + printk("Early serial init of port 0 failed\n"); + } + + port.membase = (void*)ACTING_UART1_IO_BASE; + port.irq = ACTING_UART1_INT; + port.line = 1; + + if (early_serial_setup(&port) != 0) { + printk("Early serial init of port 1 failed\n"); + } +} + +void __init +bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip) +{ + + unsigned int bar_response, bar; + /* + * Expected PCI mapping: + * + * PLB addr PCI memory addr + * --------------------- --------------------- + * 0000'0000 - 7fff'ffff <--- 0000'0000 - 7fff'ffff + * 8000'0000 - Bfff'ffff ---> 8000'0000 - Bfff'ffff + * + * PLB addr PCI io addr + * --------------------- --------------------- + * e800'0000 - e800'ffff ---> 0000'0000 - 0001'0000 + * + * The following code is simplified by assuming that the bootrom + * has been well behaved in following this mapping. + */ + +#ifdef DEBUG + int i; + + printk("ioremap PCLIO_BASE = 0x%x\n", pcip); + printk("PCI bridge regs before fixup \n"); + for (i = 0; i <= 3; i++) { + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].ma))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].la))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pcila))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pciha))); + } + printk(" ptm1ms\t0x%x\n", in_le32(&(pcip->ptm1ms))); + printk(" ptm1la\t0x%x\n", in_le32(&(pcip->ptm1la))); + printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms))); + printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la))); + +#endif + + /* added for IBM boot rom version 1.15 bios bar changes -AK */ + + /* Disable region first */ + out_le32((void *) &(pcip->pmm[0].ma), 0x00000000); + /* PLB starting addr, PCI: 0x80000000 */ + out_le32((void *) &(pcip->pmm[0].la), 0x80000000); + /* PCI start addr, 0x80000000 */ + out_le32((void *) &(pcip->pmm[0].pcila), PPC405_PCI_MEM_BASE); + /* 512MB range of PLB to PCI */ + out_le32((void *) &(pcip->pmm[0].pciha), 0x00000000); + /* Enable no pre-fetch, enable region */ + out_le32((void *) &(pcip->pmm[0].ma), ((0xffffffff - + (PPC405_PCI_UPPER_MEM - + PPC405_PCI_MEM_BASE)) | 0x01)); + + /* Disable region one */ + out_le32((void *) &(pcip->pmm[1].ma), 0x00000000); + out_le32((void *) &(pcip->pmm[1].la), 0x00000000); + out_le32((void *) &(pcip->pmm[1].pcila), 0x00000000); + out_le32((void *) &(pcip->pmm[1].pciha), 0x00000000); + out_le32((void *) &(pcip->pmm[1].ma), 0x00000000); + out_le32((void *) &(pcip->ptm1ms), 0x00000001); + + /* Disable region two */ + out_le32((void *) &(pcip->pmm[2].ma), 0x00000000); + out_le32((void *) &(pcip->pmm[2].la), 0x00000000); + out_le32((void *) &(pcip->pmm[2].pcila), 0x00000000); + out_le32((void *) &(pcip->pmm[2].pciha), 0x00000000); + out_le32((void *) &(pcip->pmm[2].ma), 0x00000000); + out_le32((void *) &(pcip->ptm2ms), 0x00000000); + out_le32((void *) &(pcip->ptm2la), 0x00000000); + + /* Zero config bars */ + for (bar = PCI_BASE_ADDRESS_1; bar <= PCI_BASE_ADDRESS_2; bar += 4) { + early_write_config_dword(hose, hose->first_busno, + PCI_FUNC(hose->first_busno), bar, + 0x00000000); + early_read_config_dword(hose, hose->first_busno, + PCI_FUNC(hose->first_busno), bar, + &bar_response); + DBG("BUS %d, device %d, Function %d bar 0x%8.8x is 0x%8.8x\n", + hose->first_busno, PCI_SLOT(hose->first_busno), + PCI_FUNC(hose->first_busno), bar, bar_response); + } + /* end work arround */ + +#ifdef DEBUG + printk("PCI bridge regs after fixup \n"); + for (i = 0; i <= 3; i++) { + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].ma))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].la))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pcila))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pciha))); + } + printk(" ptm1ms\t0x%x\n", in_le32(&(pcip->ptm1ms))); + printk(" ptm1la\t0x%x\n", in_le32(&(pcip->ptm1la))); + printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms))); + printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la))); + +#endif +} + +void __init +bubinga_setup_arch(void) +{ + ppc4xx_setup_arch(); + + ibm_ocp_set_emac(0, 1); + + bubinga_early_serial_map(); + + /* RTC step for the evb405ep */ + bubinga_rtc_base = (void *) BUBINGA_RTC_VADDR; + TODC_INIT(TODC_TYPE_DS1743, bubinga_rtc_base, bubinga_rtc_base, + bubinga_rtc_base, 8); + /* Identify the system */ + printk("IBM Bubinga port (MontaVista Software, Inc. )\n"); +} + +void __init +bubinga_map_io(void) +{ + ppc4xx_map_io(); + io_block_mapping(BUBINGA_RTC_VADDR, + BUBINGA_RTC_PADDR, BUBINGA_RTC_SIZE, _PAGE_IO); +} + +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + ppc4xx_init(r3, r4, r5, r6, r7); + + ppc_md.setup_arch = bubinga_setup_arch; + ppc_md.setup_io_mappings = bubinga_map_io; + +#ifdef CONFIG_GEN_RTC + ppc_md.time_init = todc_time_init; + ppc_md.set_rtc_time = todc_set_rtc_time; + ppc_md.get_rtc_time = todc_get_rtc_time; + ppc_md.nvram_read_val = todc_direct_read_val; + ppc_md.nvram_write_val = todc_direct_write_val; +#endif +#ifdef CONFIG_KGDB + ppc_md.early_serial_map = bubinga_early_serial_map; +#endif +} + diff --git a/arch/ppc/platforms/4xx/bubinga.h b/arch/ppc/platforms/4xx/bubinga.h new file mode 100644 index 000000000..b1df856f8 --- /dev/null +++ b/arch/ppc/platforms/4xx/bubinga.h @@ -0,0 +1,69 @@ +/* + * Support for IBM PPC 405EP evaluation board (Bubinga). + * + * Author: SAW (IBM), derived from walnut.h. + * Maintained by MontaVista Software + * + * 2003 (c) MontaVista Softare Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is + * licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#ifdef __KERNEL__ +#ifndef __BUBINGA_H__ +#define __BUBINGA_H__ + +/* 405EP */ +#include + +#ifndef __ASSEMBLY__ +/* + * Data structure defining board information maintained by the boot + * ROM on IBM's evaluation board. An effort has been made to + * keep the field names consistent with the 8xx 'bd_t' board info + * structures. + */ + +typedef struct board_info { + unsigned char bi_s_version[4]; /* Version of this structure */ + unsigned char bi_r_version[30]; /* Version of the IBM ROM */ + unsigned int bi_memsize; /* DRAM installed, in bytes */ + unsigned char bi_enetaddr[2][6]; /* Local Ethernet MAC address */ unsigned char bi_pci_enetaddr[6]; /* PCI Ethernet MAC address */ + unsigned int bi_intfreq; /* Processor speed, in Hz */ + unsigned int bi_busfreq; /* PLB Bus speed, in Hz */ + unsigned int bi_pci_busfreq; /* PCI Bus speed, in Hz */ + unsigned int bi_opb_busfreq; /* OPB Bus speed, in Hz */ + unsigned int bi_pllouta_freq; /* PLL OUTA speed, in Hz */ +} bd_t; + +/* Some 4xx parts use a different timebase frequency from the internal clock. +*/ +#define bi_tbfreq bi_intfreq + + +/* Memory map for the Bubinga board. + * Generic 4xx plus RTC. + */ + +extern void *bubinga_rtc_base; +#define BUBINGA_RTC_PADDR ((uint)0xf0000000) +#define BUBINGA_RTC_VADDR BUBINGA_RTC_PADDR +#define BUBINGA_RTC_SIZE ((uint)8*1024) + +/* The UART clock is based off an internal clock - + * define BASE_BAUD based on the internal clock and divider(s). + * Since BASE_BAUD must be a constant, we will initialize it + * using clock/divider values which OpenBIOS initializes + * for typical configurations at various CPU speeds. + * The base baud is calculated as (FWDA / EXT UART DIV / 16) + */ +#define BASE_BAUD 0 + +#define BUBINGA_FPGA_BASE 0xF0300000 + +#define PPC4xx_MACHINE_NAME "IBM Bubinga" + +#endif /* !__ASSEMBLY__ */ +#endif /* __BUBINGA_H__ */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/platforms/4xx/ibm405ep.c b/arch/ppc/platforms/4xx/ibm405ep.c new file mode 100644 index 000000000..fb48e8254 --- /dev/null +++ b/arch/ppc/platforms/4xx/ibm405ep.c @@ -0,0 +1,134 @@ +/* + * arch/ppc/platforms/ibm405ep.c + * + * Support for IBM PPC 405EP processors. + * + * Author: SAW (IBM), derived from ibmnp405l.c. + * Maintained by MontaVista Software + * + * 2003 (c) MontaVista Softare Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is + * licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +static struct ocp_func_mal_data ibm405ep_mal0_def = { + .num_tx_chans = 4, /* Number of TX channels */ + .num_rx_chans = 2, /* Number of RX channels */ + .txeob_irq = 11, /* TX End Of Buffer IRQ */ + .rxeob_irq = 12, /* RX End Of Buffer IRQ */ + .txde_irq = 13, /* TX Descriptor Error IRQ */ + .rxde_irq = 14, /* RX Descriptor Error IRQ */ + .serr_irq = 10, /* MAL System Error IRQ */ +}; +OCP_SYSFS_MAL_DATA() + +static struct ocp_func_emac_data ibm405ep_emac0_def = { + .rgmii_idx = -1, /* No RGMII */ + .rgmii_mux = -1, /* No RGMII */ + .zmii_idx = -1, /* ZMII device index */ + .zmii_mux = 0, /* ZMII input of this EMAC */ + .mal_idx = 0, /* MAL device index */ + .mal_rx_chan = 0, /* MAL rx channel number */ + .mal_tx_chan = 0, /* MAL tx channel number */ + .wol_irq = 9, /* WOL interrupt number */ + .mdio_idx = 0, /* MDIO via EMAC0 */ + .tah_idx = -1, /* No TAH */ +}; + +static struct ocp_func_emac_data ibm405ep_emac1_def = { + .rgmii_idx = -1, /* No RGMII */ + .rgmii_mux = -1, /* No RGMII */ + .zmii_idx = -1, /* ZMII device index */ + .zmii_mux = 0, /* ZMII input of this EMAC */ + .mal_idx = 0, /* MAL device index */ + .mal_rx_chan = 1, /* MAL rx channel number */ + .mal_tx_chan = 2, /* MAL tx channel number */ + .wol_irq = 9, /* WOL interrupt number */ + .mdio_idx = 0, /* MDIO via EMAC0 */ + .tah_idx = -1, /* No TAH */ +}; +OCP_SYSFS_EMAC_DATA() + +static struct ocp_func_iic_data ibm405ep_iic0_def = { + .fast_mode = 0, /* Use standad mode (100Khz) */ +}; +OCP_SYSFS_IIC_DATA() + +struct ocp_def core_ocp[] = { + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_OPB, + .index = 0, + .paddr = 0xEF600000, + .irq = OCP_IRQ_NA, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_16550, + .index = 0, + .paddr = UART0_IO_BASE, + .irq = UART0_INT, + .pm = IBM_CPM_UART0 + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_16550, + .index = 1, + .paddr = UART1_IO_BASE, + .irq = UART1_INT, + .pm = IBM_CPM_UART1 + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_IIC, + .paddr = 0xEF600500, + .irq = 2, + .pm = IBM_CPM_IIC0, + .additions = &ibm405ep_iic0_def, + .show = &ocp_show_iic_data + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_GPIO, + .paddr = 0xEF600700, + .irq = OCP_IRQ_NA, + .pm = IBM_CPM_GPIO0 + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_MAL, + .paddr = OCP_PADDR_NA, + .irq = OCP_IRQ_NA, + .pm = OCP_CPM_NA, + .additions = &ibm405ep_mal0_def, + .show = &ocp_show_mal_data + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_EMAC, + .index = 0, + .paddr = EMAC0_BASE, + .irq = 15, + .pm = OCP_CPM_NA, + .additions = &ibm405ep_emac0_def, + .show = &ocp_show_emac_data + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_EMAC, + .index = 1, + .paddr = 0xEF600900, + .irq = 17, + .pm = OCP_CPM_NA, + .additions = &ibm405ep_emac1_def, + .show = &ocp_show_emac_data + }, + { .vendor = OCP_VENDOR_INVALID + } +}; diff --git a/arch/ppc/platforms/4xx/ibm405ep.h b/arch/ppc/platforms/4xx/ibm405ep.h new file mode 100644 index 000000000..e051e3fe8 --- /dev/null +++ b/arch/ppc/platforms/4xx/ibm405ep.h @@ -0,0 +1,148 @@ +/* + * arch/ppc/platforms/4xx/ibm405ep.h + * + * IBM PPC 405EP processor defines. + * + * Author: SAW (IBM), derived from ibm405gp.h. + * Maintained by MontaVista Software + * + * 2003 (c) MontaVista Softare Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is + * licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#ifdef __KERNEL__ +#ifndef __ASM_IBM405EP_H__ +#define __ASM_IBM405EP_H__ + +#include + +/* ibm405.h at bottom of this file */ + +/* PCI + * PCI Bridge config reg definitions + * see 17-19 of manual + */ + +#define PPC405_PCI_CONFIG_ADDR 0xeec00000 +#define PPC405_PCI_CONFIG_DATA 0xeec00004 + +#define PPC405_PCI_PHY_MEM_BASE 0x80000000 /* hose_a->pci_mem_offset */ + /* setbat */ +#define PPC405_PCI_MEM_BASE PPC405_PCI_PHY_MEM_BASE /* setbat */ +#define PPC405_PCI_PHY_IO_BASE 0xe8000000 /* setbat */ +#define PPC405_PCI_IO_BASE PPC405_PCI_PHY_IO_BASE /* setbat */ + +#define PPC405_PCI_LOWER_MEM 0x80000000 /* hose_a->mem_space.start */ +#define PPC405_PCI_UPPER_MEM 0xBfffffff /* hose_a->mem_space.end */ +#define PPC405_PCI_LOWER_IO 0x00000000 /* hose_a->io_space.start */ +#define PPC405_PCI_UPPER_IO 0x0000ffff /* hose_a->io_space.end */ + +#define PPC405_ISA_IO_BASE PPC405_PCI_IO_BASE + +#define PPC4xx_PCI_IO_PADDR ((uint)PPC405_PCI_PHY_IO_BASE) +#define PPC4xx_PCI_IO_VADDR PPC4xx_PCI_IO_PADDR +#define PPC4xx_PCI_IO_SIZE ((uint)64*1024) +#define PPC4xx_PCI_CFG_PADDR ((uint)PPC405_PCI_CONFIG_ADDR) +#define PPC4xx_PCI_CFG_VADDR PPC4xx_PCI_CFG_PADDR +#define PPC4xx_PCI_CFG_SIZE ((uint)4*1024) +#define PPC4xx_PCI_LCFG_PADDR ((uint)0xef400000) +#define PPC4xx_PCI_LCFG_VADDR PPC4xx_PCI_LCFG_PADDR +#define PPC4xx_PCI_LCFG_SIZE ((uint)4*1024) +#define PPC4xx_ONB_IO_PADDR ((uint)0xef600000) +#define PPC4xx_ONB_IO_VADDR PPC4xx_ONB_IO_PADDR +#define PPC4xx_ONB_IO_SIZE ((uint)4*1024) + +/* serial port defines */ +#define RS_TABLE_SIZE 2 + +#define UART0_INT 0 +#define UART1_INT 1 + +#define PCIL0_BASE 0xEF400000 +#define UART0_IO_BASE 0xEF600300 +#define UART1_IO_BASE 0xEF600400 +#define EMAC0_BASE 0xEF600800 + +#define BD_EMAC_ADDR(e,i) bi_enetaddr[e][i] + +#if defined(CONFIG_UART0_TTYS0) +#define ACTING_UART0_IO_BASE UART0_IO_BASE +#define ACTING_UART1_IO_BASE UART1_IO_BASE +#define ACTING_UART0_INT UART0_INT +#define ACTING_UART1_INT UART1_INT +#else +#define ACTING_UART0_IO_BASE UART1_IO_BASE +#define ACTING_UART1_IO_BASE UART0_IO_BASE +#define ACTING_UART0_INT UART1_INT +#define ACTING_UART1_INT UART0_INT +#endif + +#define STD_UART_OP(num) \ + { 0, BASE_BAUD, 0, ACTING_UART##num##_INT, \ + (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST), \ + iomem_base: (u8 *)ACTING_UART##num##_IO_BASE, \ + io_type: SERIAL_IO_MEM}, + +#define SERIAL_DEBUG_IO_BASE ACTING_UART0_IO_BASE +#define SERIAL_PORT_DFNS \ + STD_UART_OP(0) \ + STD_UART_OP(1) + +/* DCR defines */ +#define DCRN_CPMSR_BASE 0x0BA +#define DCRN_CPMFR_BASE 0x0B9 + +#define DCRN_CPC0_PLLMR0_BASE 0x0F0 +#define DCRN_CPC0_BOOT_BASE 0x0F1 +#define DCRN_CPC0_CR1_BASE 0x0F2 +#define DCRN_CPC0_EPRCSR_BASE 0x0F3 +#define DCRN_CPC0_PLLMR1_BASE 0x0F4 +#define DCRN_CPC0_UCR_BASE 0x0F5 +#define DCRN_CPC0_UCR_U0DIV 0x07F +#define DCRN_CPC0_SRR_BASE 0x0F6 +#define DCRN_CPC0_JTAGID_BASE 0x0F7 +#define DCRN_CPC0_SPARE_BASE 0x0F8 +#define DCRN_CPC0_PCI_BASE 0x0F9 + + +#define IBM_CPM_GPT 0x80000000 /* GPT interface */ +#define IBM_CPM_PCI 0x40000000 /* PCI bridge */ +#define IBM_CPM_UIC 0x00010000 /* Universal Int Controller */ +#define IBM_CPM_CPU 0x00008000 /* processor core */ +#define IBM_CPM_EBC 0x00002000 /* EBC controller */ +#define IBM_CPM_SDRAM0 0x00004000 /* SDRAM memory controller */ +#define IBM_CPM_GPIO0 0x00001000 /* General Purpose IO */ +#define IBM_CPM_TMRCLK 0x00000400 /* CPU timers */ +#define IBM_CPM_PLB 0x00000100 /* PLB bus arbiter */ +#define IBM_CPM_OPB 0x00000080 /* PLB to OPB bridge */ +#define IBM_CPM_DMA 0x00000040 /* DMA controller */ +#define IBM_CPM_IIC0 0x00000010 /* IIC interface */ +#define IBM_CPM_UART1 0x00000002 /* serial port 0 */ +#define IBM_CPM_UART0 0x00000001 /* serial port 1 */ +#define DFLT_IBM4xx_PM ~(IBM_CPM_PCI | IBM_CPM_CPU | IBM_CPM_DMA \ + | IBM_CPM_OPB | IBM_CPM_EBC \ + | IBM_CPM_SDRAM0 | IBM_CPM_PLB \ + | IBM_CPM_UIC | IBM_CPM_TMRCLK) +#define DCRN_DMA0_BASE 0x100 +#define DCRN_DMA1_BASE 0x108 +#define DCRN_DMA2_BASE 0x110 +#define DCRN_DMA3_BASE 0x118 +#define DCRNCAP_DMA_SG 1 /* have DMA scatter/gather capability */ +#define DCRN_DMASR_BASE 0x120 +#define DCRN_EBC_BASE 0x012 +#define DCRN_DCP0_BASE 0x014 +#define DCRN_MAL_BASE 0x180 +#define DCRN_OCM0_BASE 0x018 +#define DCRN_PLB0_BASE 0x084 +#define DCRN_PLLMR_BASE 0x0B0 +#define DCRN_POB0_BASE 0x0A0 +#define DCRN_SDRAM0_BASE 0x010 +#define DCRN_UIC0_BASE 0x0C0 +#define UIC0 DCRN_UIC0_BASE + +#include + +#endif /* __ASM_IBM405EP_H__ */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/platforms/sbc82xx.c b/arch/ppc/platforms/sbc82xx.c new file mode 100644 index 000000000..0da699d3b --- /dev/null +++ b/arch/ppc/platforms/sbc82xx.c @@ -0,0 +1,113 @@ +/* + * arch/ppc/platforms/sbc82xx.c + * + * SBC82XX platform support + * + * Author: Guy Streeter + * + * Derived from: est8260_setup.c by Allen Curtis, ONZ + * + * Copyright 2004 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +static void (*callback_setup_arch)(void); + +extern unsigned char __res[sizeof(bd_t)]; + +extern void m8260_init(unsigned long r3, unsigned long r4, + unsigned long r5, unsigned long r6, unsigned long r7); + +extern void (*late_time_init)(void); + +static int +sbc82xx_show_cpuinfo(struct seq_file *m) +{ + bd_t *binfo = (bd_t *)__res; + + seq_printf(m, "vendor\t\t: Wind River\n" + "machine\t\t: SBC PowerQUICC II\n" + "\n" + "mem size\t\t: 0x%08lx\n" + "console baud\t\t: %ld\n" + "\n", + binfo->bi_memsize, + binfo->bi_baudrate); + return 0; +} + +static void __init +sbc82xx_setup_arch(void) +{ + printk("SBC PowerQUICC II Port\n"); + callback_setup_arch(); +} + +TODC_ALLOC(); + +/* + * Timer init happens before mem_init but after paging init, so we cannot + * directly use ioremap() at that time. + * late_time_init() is call after paging init. + */ +#ifdef CONFIG_GEN_RTC +static void sbc82xx_time_init(void) +{ + volatile memctl8260_t *mc = &immr->im_memctl; + TODC_INIT(TODC_TYPE_MK48T59, 0, 0, SBC82xx_TODC_NVRAM_ADDR, 0); + + /* Set up CS11 for RTC chip */ + mc->memc_br11=0; + mc->memc_or11=0xffff0836; + mc->memc_br11=0x80000801; + + todc_info->nvram_data = + (unsigned int)ioremap(todc_info->nvram_data, 0x2000); + BUG_ON(!todc_info->nvram_data); + ppc_md.get_rtc_time = todc_get_rtc_time; + ppc_md.set_rtc_time = todc_set_rtc_time; + ppc_md.nvram_read_val = todc_direct_read_val; + ppc_md.nvram_write_val = todc_direct_write_val; + todc_time_init(); +} +#endif /* CONFIG_GEN_RTC */ + +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* Generic 8260 platform initialization */ + m8260_init(r3, r4, r5, r6, r7); + + /* u-boot may be using one of the FCC Ethernet devices. + Use the MAC address to the SCC. */ + __res[offsetof(bd_t, bi_enetaddr[5])] &= ~3; + + /* Anything special for this platform */ + ppc_md.show_cpuinfo = sbc82xx_show_cpuinfo; + + callback_setup_arch = ppc_md.setup_arch; + ppc_md.setup_arch = sbc82xx_setup_arch; +#ifdef CONFIG_GEN_RTC + ppc_md.time_init = NULL; + ppc_md.get_rtc_time = NULL; + ppc_md.set_rtc_time = NULL; + ppc_md.nvram_read_val = NULL; + ppc_md.nvram_write_val = NULL; + late_time_init = sbc82xx_time_init; +#endif /* CONFIG_GEN_RTC */ +} diff --git a/arch/ppc/platforms/sbc82xx.h b/arch/ppc/platforms/sbc82xx.h new file mode 100644 index 000000000..b9d1c8ddb --- /dev/null +++ b/arch/ppc/platforms/sbc82xx.h @@ -0,0 +1,24 @@ +/* Board information for the SBCPowerQUICCII, which should be generic for + * all 8260 boards. The IMMR is now given to us so the hard define + * will soon be removed. All of the clock values are computed from + * the configuration SCMR and the Power-On-Reset word. + */ + +#ifndef __PPC_SBC82xx_H__ +#define __PPC_SBC82xx_H__ + +#include + +#define IMAP_ADDR 0xf0000000 +#define CPM_MAP_ADDR 0xf0000000 + +#define SBC82xx_TODC_NVRAM_ADDR 0x80000000 + +#define SBC82xx_MACADDR_NVRAM_FCC1 0x220000c9 /* JP6B */ +#define SBC82xx_MACADDR_NVRAM_SCC1 0x220000cf /* JP6A */ +#define SBC82xx_MACADDR_NVRAM_FCC2 0x220000d5 /* JP7A */ +#define SBC82xx_MACADDR_NVRAM_FCC3 0x220000db /* JP7B */ + +#define BOOTROM_RESTART_ADDR ((uint)0x40000104) + +#endif /* __PPC_SBC82xx_H__ */ diff --git a/arch/ppc/syslib/dcr.S b/arch/ppc/syslib/dcr.S new file mode 100644 index 000000000..895f10243 --- /dev/null +++ b/arch/ppc/syslib/dcr.S @@ -0,0 +1,41 @@ +/* + * arch/ppc/syslib/dcr.S + * + * "Indirect" DCR access + * + * Copyright (c) 2004 Eugene Surovegin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include + +#define DCR_ACCESS_PROLOG(table) \ + rlwinm r3,r3,4,18,27; \ + lis r5,table@h; \ + ori r5,r5,table@l; \ + add r3,r3,r5; \ + mtctr r3; \ + bctr + +_GLOBAL(__mfdcr) + DCR_ACCESS_PROLOG(__mfdcr_table) + +_GLOBAL(__mtdcr) + DCR_ACCESS_PROLOG(__mtdcr_table) + +__mfdcr_table: + mfdcr r3,0; blr +__mtdcr_table: + mtdcr 0,r4; blr + +dcr = 1 + .rept 1023 + mfdcr r3,dcr; blr + mtdcr dcr,r4; blr + dcr = dcr + 1 + .endr diff --git a/arch/ppc/syslib/ibm440gx_common.c b/arch/ppc/syslib/ibm440gx_common.c new file mode 100644 index 000000000..5da7bca6b --- /dev/null +++ b/arch/ppc/syslib/ibm440gx_common.c @@ -0,0 +1,212 @@ +/* + * arch/ppc/kernel/ibm440gx_common.c + * + * PPC440GX system library + * + * Eugene Surovegin or + * Copyright (c) 2003 Zultys Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include +#include +#include +#include + +/* + * Calculate 440GX clocks + */ +static inline u32 __fix_zero(u32 v, u32 def){ + return v ? v : def; +} + +void __init ibm440gx_get_clocks(struct ibm44x_clocks* p, unsigned int sys_clk, + unsigned int ser_clk) +{ + u32 pllc = CPR_READ(DCRN_CPR_PLLC); + u32 plld = CPR_READ(DCRN_CPR_PLLD); + u32 uart0 = SDR_READ(DCRN_SDR_UART0); + u32 uart1 = SDR_READ(DCRN_SDR_UART1); + + /* Dividers */ + u32 fbdv = __fix_zero((plld >> 24) & 0x1f, 32); + u32 fwdva = __fix_zero((plld >> 16) & 0xf, 16); + u32 fwdvb = __fix_zero((plld >> 8) & 7, 8); + u32 lfbdv = __fix_zero(plld & 0x3f, 64); + u32 pradv0 = __fix_zero((CPR_READ(DCRN_CPR_PRIMAD) >> 24) & 7, 8); + u32 prbdv0 = __fix_zero((CPR_READ(DCRN_CPR_PRIMBD) >> 24) & 7, 8); + u32 opbdv0 = __fix_zero((CPR_READ(DCRN_CPR_OPBD) >> 24) & 3, 4); + u32 perdv0 = __fix_zero((CPR_READ(DCRN_CPR_PERD) >> 24) & 3, 4); + + /* Input clocks for primary dividers */ + u32 clk_a, clk_b; + + if (pllc & 0x40000000){ + u32 m; + + /* Feedback path */ + switch ((pllc >> 24) & 7){ + case 0: + /* PLLOUTx */ + m = ((pllc & 0x20000000) ? fwdvb : fwdva) * lfbdv; + break; + case 1: + /* CPU */ + m = fwdva * pradv0; + break; + case 5: + /* PERClk */ + m = fwdvb * prbdv0 * opbdv0 * perdv0; + break; + default: + printk(KERN_EMERG "invalid PLL feedback source\n"); + goto bypass; + } + m *= fbdv; + p->vco = sys_clk * m; + clk_a = p->vco / fwdva; + clk_b = p->vco / fwdvb; + } + else { +bypass: + /* Bypass system PLL */ + p->vco = 0; + clk_a = clk_b = sys_clk; + } + + p->cpu = clk_a / pradv0; + p->plb = clk_b / prbdv0; + p->opb = p->plb / opbdv0; + p->ebc = p->opb / perdv0; + + /* UARTs clock */ + if (uart0 & 0x00800000) + p->uart0 = ser_clk; + else + p->uart0 = p->plb / __fix_zero(uart0 & 0xff, 256); + + if (uart1 & 0x00800000) + p->uart1 = ser_clk; + else + p->uart1 = p->plb / __fix_zero(uart1 & 0xff, 256); +} + +/* Enable L2 cache (call with IRQs disabled) */ +void __init ibm440gx_l2c_enable(void){ + u32 r; + + asm volatile ("sync" ::: "memory"); + + /* Disable SRAM */ + mtdcr(DCRN_SRAM0_DPC, mfdcr(DCRN_SRAM0_DPC) & ~SRAM_DPC_ENABLE); + mtdcr(DCRN_SRAM0_SB0CR, mfdcr(DCRN_SRAM0_SB0CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(DCRN_SRAM0_SB1CR, mfdcr(DCRN_SRAM0_SB1CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(DCRN_SRAM0_SB2CR, mfdcr(DCRN_SRAM0_SB2CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(DCRN_SRAM0_SB3CR, mfdcr(DCRN_SRAM0_SB3CR) & ~SRAM_SBCR_BU_MASK); + + /* Enable L2_MODE without ICU/DCU */ + r = mfdcr(DCRN_L2C0_CFG) & ~(L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_SS_MASK); + r |= L2C_CFG_L2M | L2C_CFG_SS_256; + mtdcr(DCRN_L2C0_CFG, r); + + mtdcr(DCRN_L2C0_ADDR, 0); + + /* Hardware Clear Command */ + mtdcr(DCRN_L2C0_CMD, L2C_CMD_HCC); + while (!(mfdcr(DCRN_L2C0_SR) & L2C_SR_CC)) ; + + /* Clear Cache Parity and Tag Errors */ + mtdcr(DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE); + + /* Enable 64G snoop region starting at 0 */ + r = mfdcr(DCRN_L2C0_SNP0) & ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK); + r |= L2C_SNP_SSR_32G | L2C_SNP_ESR; + mtdcr(DCRN_L2C0_SNP0, r); + + r = mfdcr(DCRN_L2C0_SNP1) & ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK); + r |= 0x80000000 | L2C_SNP_SSR_32G | L2C_SNP_ESR; + mtdcr(DCRN_L2C0_SNP1, r); + + asm volatile ("sync" ::: "memory"); + + /* Enable ICU/DCU ports */ + r = mfdcr(DCRN_L2C0_CFG); + r &= ~(L2C_CFG_DCW_MASK | L2C_CFG_CPIM | L2C_CFG_TPIM | L2C_CFG_LIM + | L2C_CFG_PMUX_MASK | L2C_CFG_PMIM | L2C_CFG_TPEI | L2C_CFG_CPEI + | L2C_CFG_NAM | L2C_CFG_NBRM); + r |= L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_TPC | L2C_CFG_CPC | L2C_CFG_FRAN + | L2C_CFG_SMCM; + mtdcr(DCRN_L2C0_CFG, r); + + asm volatile ("sync; isync" ::: "memory"); +} + +/* Disable L2 cache (call with IRQs disabled) */ +void __init ibm440gx_l2c_disable(void){ + u32 r; + + asm volatile ("sync" ::: "memory"); + + /* Disable L2C mode */ + r = mfdcr(DCRN_L2C0_CFG) & ~(L2C_CFG_L2M | L2C_CFG_ICU | L2C_CFG_DCU); + mtdcr(DCRN_L2C0_CFG, r); + + /* Enable SRAM */ + mtdcr(DCRN_SRAM0_DPC, mfdcr(DCRN_SRAM0_DPC) | SRAM_DPC_ENABLE); + mtdcr(DCRN_SRAM0_SB0CR, + SRAM_SBCR_BAS0 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW); + mtdcr(DCRN_SRAM0_SB1CR, + SRAM_SBCR_BAS1 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW); + mtdcr(DCRN_SRAM0_SB2CR, + SRAM_SBCR_BAS2 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW); + mtdcr(DCRN_SRAM0_SB3CR, + SRAM_SBCR_BAS3 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW); + + asm volatile ("sync; isync" ::: "memory"); +} + +int __init ibm440gx_get_eth_grp(void) +{ + return (SDR_READ(DCRN_SDR_PFC1) & DCRN_SDR_PFC1_EPS) >> DCRN_SDR_PFC1_EPS_SHIFT; +} + +void __init ibm440gx_set_eth_grp(int group) +{ + SDR_WRITE(DCRN_SDR_PFC1, (SDR_READ(DCRN_SDR_PFC1) & ~DCRN_SDR_PFC1_EPS) | (group << DCRN_SDR_PFC1_EPS_SHIFT)); +} + +void __init ibm440gx_tah_enable(void) +{ + /* Enable TAH0 and TAH1 */ + SDR_WRITE(DCRN_SDR_MFR,SDR_READ(DCRN_SDR_MFR) & + ~DCRN_SDR_MFR_TAH0); + SDR_WRITE(DCRN_SDR_MFR,SDR_READ(DCRN_SDR_MFR) & + ~DCRN_SDR_MFR_TAH1); +} + +int ibm440gx_show_cpuinfo(struct seq_file *m){ + + u32 l2c_cfg = mfdcr(DCRN_L2C0_CFG); + const char* s; + if (l2c_cfg & L2C_CFG_L2M){ + switch (l2c_cfg & (L2C_CFG_ICU | L2C_CFG_DCU)){ + case L2C_CFG_ICU: s = "I-Cache only"; break; + case L2C_CFG_DCU: s = "D-Cache only"; break; + default: s = "I-Cache/D-Cache"; break; + } + } + else + s = "disabled"; + + seq_printf(m, "L2-Cache\t: %s (0x%08x 0x%08x)\n", s, + l2c_cfg, mfdcr(DCRN_L2C0_SR)); + + return 0; +} + diff --git a/arch/ppc/syslib/ibm440gx_common.h b/arch/ppc/syslib/ibm440gx_common.h new file mode 100644 index 000000000..5dbca9896 --- /dev/null +++ b/arch/ppc/syslib/ibm440gx_common.h @@ -0,0 +1,54 @@ +/* + * arch/ppc/kernel/ibm440gx_common.h + * + * PPC440GX system library + * + * Eugene Surovegin or + * Copyright (c) 2003 Zultys Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifdef __KERNEL__ +#ifndef __PPC_SYSLIB_IBM440GX_COMMON_H +#define __PPC_SYSLIB_IBM440GX_COMMON_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +/* + * Please, refer to the Figure 14.1 in 440GX user manual + * + * if internal UART clock is used, ser_clk is ignored + */ +void ibm440gx_get_clocks(struct ibm44x_clocks*, unsigned int sys_clk, + unsigned int ser_clk) __init; + +/* Enable L2 cache */ +void ibm440gx_l2c_enable(void) __init; + +/* Disable L2 cache */ +void ibm440gx_l2c_disable(void) __init; + +/* Get Ethernet Group */ +int ibm440gx_get_eth_grp(void) __init; + +/* Set Ethernet Group */ +void ibm440gx_set_eth_grp(int group) __init; + +/* Enable TAH devices */ +void ibm440gx_tah_enable(void) __init; + +/* Add L2C info to /proc/cpuinfo */ +int ibm440gx_show_cpuinfo(struct seq_file*); + +#endif /* __ASSEMBLY__ */ +#endif /* __PPC_SYSLIB_IBM440GX_COMMON_H */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/syslib/ibm44x_common.h b/arch/ppc/syslib/ibm44x_common.h new file mode 100644 index 000000000..ee1053ac2 --- /dev/null +++ b/arch/ppc/syslib/ibm44x_common.h @@ -0,0 +1,36 @@ +/* + * arch/ppc/kernel/ibm44x_common.h + * + * PPC44x system library + * + * Eugene Surovegin or + * Copyright (c) 2003 Zultys Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifdef __KERNEL__ +#ifndef __PPC_SYSLIB_IBM44x_COMMON_H +#define __PPC_SYSLIB_IBM44x_COMMON_H + +#ifndef __ASSEMBLY__ + +/* + * All clocks are in Hz + */ +struct ibm44x_clocks { + unsigned int vco; /* VCO, 0 if system PLL is bypassed */ + unsigned int cpu; /* CPUCoreClk */ + unsigned int plb; /* PLBClk */ + unsigned int opb; /* OPBClk */ + unsigned int ebc; /* PerClk */ + unsigned int uart0; + unsigned int uart1; +}; + +#endif /* __ASSEMBLY__ */ +#endif /* __PPC_SYSLIB_IBM44x_COMMON_H */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/syslib/ocp.c b/arch/ppc/syslib/ocp.c new file mode 100644 index 000000000..a5156c517 --- /dev/null +++ b/arch/ppc/syslib/ocp.c @@ -0,0 +1,485 @@ +/* + * ocp.c + * + * (c) Benjamin Herrenschmidt (benh@kernel.crashing.org) + * Mipsys - France + * + * Derived from work (c) Armin Kuster akuster@pacbell.net + * + * Additional support and port to 2.6 LDM/sysfs by + * Matt Porter + * Copyright 2004 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * OCP (On Chip Peripheral) is a software emulated "bus" with a + * pseudo discovery method for dumb peripherals. Usually these type + * of peripherals are found on embedded SoC (System On a Chip) + * processors or highly integrated system controllers that have + * a host bridge and many peripherals. Common examples where + * this is already used include the PPC4xx, PPC85xx, MPC52xx, + * and MV64xxx parts. + * + * This subsystem creates a standard OCP bus type within the + * device model. The devices on the OCP bus are seeded by an + * an initial OCP device array created by the arch-specific + * Device entries can be added/removed/modified through OCP + * helper functions to accomodate system and board-specific + * parameters commonly found in embedded systems. OCP also + * provides a standard method for devices to describe extended + * attributes about themselves to the system. A standard access + * method allows OCP drivers to obtain the information, both + * SoC-specific and system/board-specific, needed for operation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +//#define DBG(x) printk x +#define DBG(x) + +extern int mem_init_done; + +extern struct ocp_def core_ocp[]; /* Static list of devices, provided by + CPU core */ + +LIST_HEAD(ocp_devices); /* List of all OCP devices */ +DECLARE_RWSEM(ocp_devices_sem); /* Global semaphores for those lists */ + +static int ocp_inited; + +/* Sysfs support */ +#define OCP_DEF_ATTR(field, format_string) \ +static ssize_t \ +show_##field(struct device *dev, char *buf) \ +{ \ + struct ocp_device *odev = to_ocp_dev(dev); \ + \ + return sprintf(buf, format_string, odev->def->field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); + +OCP_DEF_ATTR(vendor, "0x%04x\n"); +OCP_DEF_ATTR(function, "0x%04x\n"); +OCP_DEF_ATTR(index, "0x%04x\n"); +#ifdef CONFIG_PTE_64BIT +OCP_DEF_ATTR(paddr, "0x%016Lx\n"); +#else +OCP_DEF_ATTR(paddr, "0x%08lx\n"); +#endif +OCP_DEF_ATTR(irq, "%d\n"); +OCP_DEF_ATTR(pm, "%lu\n"); + +void ocp_create_sysfs_dev_files(struct ocp_device *odev) +{ + struct device *dev = &odev->dev; + + /* Current OCP device def attributes */ + device_create_file(dev, &dev_attr_vendor); + device_create_file(dev, &dev_attr_function); + device_create_file(dev, &dev_attr_index); + device_create_file(dev, &dev_attr_paddr); + device_create_file(dev, &dev_attr_irq); + device_create_file(dev, &dev_attr_pm); + /* Current OCP device additions attributes */ + if (odev->def->additions && odev->def->show) + odev->def->show(dev); +} + +/** + * ocp_device_match - Match one driver to one device + * @drv: driver to match + * @dev: device to match + * + * This function returns 0 if the driver and device don't match + */ +static int +ocp_device_match(struct device *dev, struct device_driver *drv) +{ + struct ocp_device *ocp_dev = to_ocp_dev(dev); + struct ocp_driver *ocp_drv = to_ocp_drv(drv); + const struct ocp_device_id *ids = ocp_drv->id_table; + + if (!ids) + return 0; + + while (ids->vendor || ids->function) { + if ((ids->vendor == OCP_ANY_ID + || ids->vendor == ocp_dev->def->vendor) + && (ids->function == OCP_ANY_ID + || ids->function == ocp_dev->def->function)) + return 1; + ids++; + } + return 0; +} + +static int +ocp_device_probe(struct device *dev) +{ + int error = 0; + struct ocp_driver *drv; + struct ocp_device *ocp_dev; + + drv = to_ocp_drv(dev->driver); + ocp_dev = to_ocp_dev(dev); + + if (drv->probe) { + error = drv->probe(ocp_dev); + if (error >= 0) { + ocp_dev->driver = drv; + error = 0; + } + } + return error; +} + +static int +ocp_device_remove(struct device *dev) +{ + struct ocp_device *ocp_dev = to_ocp_dev(dev); + + if (ocp_dev->driver) { + if (ocp_dev->driver->remove) + ocp_dev->driver->remove(ocp_dev); + ocp_dev->driver = NULL; + } + return 0; +} + +static int +ocp_device_suspend(struct device *dev, u32 state) +{ + struct ocp_device *ocp_dev = to_ocp_dev(dev); + struct ocp_driver *ocp_drv = to_ocp_drv(dev->driver); + + if (dev->driver && ocp_drv->suspend) + return ocp_drv->suspend(ocp_dev, state); + return 0; +} + +static int +ocp_device_resume(struct device *dev) +{ + struct ocp_device *ocp_dev = to_ocp_dev(dev); + struct ocp_driver *ocp_drv = to_ocp_drv(dev->driver); + + if (dev->driver && ocp_drv->resume) + return ocp_drv->resume(ocp_dev); + return 0; +} + +struct bus_type ocp_bus_type = { + .name = "ocp", + .match = ocp_device_match, + .suspend = ocp_device_suspend, + .resume = ocp_device_resume, +}; + +/** + * ocp_register_driver - Register an OCP driver + * @drv: pointer to statically defined ocp_driver structure + * + * The driver's probe() callback is called either recursively + * by this function or upon later call of ocp_driver_init + * + * NOTE: Detection of devices is a 2 pass step on this implementation, + * hotswap isn't supported. First, all OCP devices are put in the device + * list, _then_ all drivers are probed on each match. + */ +int +ocp_register_driver(struct ocp_driver *drv) +{ + /* initialize common driver fields */ + drv->driver.name = drv->name; + drv->driver.bus = &ocp_bus_type; + drv->driver.probe = ocp_device_probe; + drv->driver.remove = ocp_device_remove; + + /* register with core */ + return driver_register(&drv->driver); +} + +/** + * ocp_unregister_driver - Unregister an OCP driver + * @drv: pointer to statically defined ocp_driver structure + * + * The driver's remove() callback is called recursively + * by this function for any device already registered + */ +void +ocp_unregister_driver(struct ocp_driver *drv) +{ + DBG(("ocp: ocp_unregister_driver(%s)...\n", drv->name)); + + driver_unregister(&drv->driver); + + DBG(("ocp: ocp_unregister_driver(%s)... done.\n", drv->name)); +} + +/* Core of ocp_find_device(). Caller must hold ocp_devices_sem */ +static struct ocp_device * +__ocp_find_device(unsigned int vendor, unsigned int function, int index) +{ + struct list_head *entry; + struct ocp_device *dev, *found = NULL; + + DBG(("ocp: __ocp_find_device(vendor: %x, function: %x, index: %d)...\n", vendor, function, index)); + + list_for_each(entry, &ocp_devices) { + dev = list_entry(entry, struct ocp_device, link); + if (vendor != OCP_ANY_ID && vendor != dev->def->vendor) + continue; + if (function != OCP_ANY_ID && function != dev->def->function) + continue; + if (index != OCP_ANY_INDEX && index != dev->def->index) + continue; + found = dev; + break; + } + + DBG(("ocp: __ocp_find_device(vendor: %x, function: %x, index: %d)... done\n", vendor, function, index)); + + return found; +} + +/** + * ocp_find_device - Find a device by function & index + * @vendor: vendor ID of the device (or OCP_ANY_ID) + * @function: function code of the device (or OCP_ANY_ID) + * @idx: index of the device (or OCP_ANY_INDEX) + * + * This function allows a lookup of a given function by it's + * index, it's typically used to find the MAL or ZMII associated + * with an EMAC or similar horrors. + * You can pass vendor, though you usually want OCP_ANY_ID there... + */ +struct ocp_device * +ocp_find_device(unsigned int vendor, unsigned int function, int index) +{ + struct ocp_device *dev; + + down_read(&ocp_devices_sem); + dev = __ocp_find_device(vendor, function, index); + up_read(&ocp_devices_sem); + + return dev; +} + +/** + * ocp_get_one_device - Find a def by function & index + * @vendor: vendor ID of the device (or OCP_ANY_ID) + * @function: function code of the device (or OCP_ANY_ID) + * @idx: index of the device (or OCP_ANY_INDEX) + * + * This function allows a lookup of a given ocp_def by it's + * vendor, function, and index. The main purpose for is to + * allow modification of the def before binding to the driver + */ +struct ocp_def * +ocp_get_one_device(unsigned int vendor, unsigned int function, int index) +{ + struct ocp_device *dev; + struct ocp_def *found = NULL; + + DBG(("ocp: ocp_get_one_device(vendor: %x, function: %x, index: %d)...\n", + vendor, function, index)); + + dev = ocp_find_device(vendor, function, index); + + if (dev) + found = dev->def; + + DBG(("ocp: ocp_get_one_device(vendor: %x, function: %x, index: %d)... done.\n", + vendor, function, index)); + + return found; +} + +/** + * ocp_add_one_device - Add a device + * @def: static device definition structure + * + * This function adds a device definition to the + * device list. It may only be called before + * ocp_driver_init() and will return an error + * otherwise. + */ +int +ocp_add_one_device(struct ocp_def *def) +{ + struct ocp_device *dev; + + DBG(("ocp: ocp_add_one_device()...\n")); + + /* Can't be called after ocp driver init */ + if (ocp_inited) + return 1; + + if (mem_init_done) + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + else + dev = alloc_bootmem(sizeof(*dev)); + + if (dev == NULL) + return 1; + memset(dev, 0, sizeof(*dev)); + dev->def = def; + dev->current_state = 4; + sprintf(dev->name, "OCP device %04x:%04x:%04x", + dev->def->vendor, dev->def->function, dev->def->index); + down_write(&ocp_devices_sem); + list_add_tail(&dev->link, &ocp_devices); + up_write(&ocp_devices_sem); + + DBG(("ocp: ocp_add_one_device()...done\n")); + + return 0; +} + +/** + * ocp_remove_one_device - Remove a device by function & index + * @vendor: vendor ID of the device (or OCP_ANY_ID) + * @function: function code of the device (or OCP_ANY_ID) + * @idx: index of the device (or OCP_ANY_INDEX) + * + * This function allows removal of a given function by its + * index. It may only be called before ocp_driver_init() + * and will return an error otherwise. + */ +int +ocp_remove_one_device(unsigned int vendor, unsigned int function, int index) +{ + struct ocp_device *dev; + + DBG(("ocp: ocp_remove_one_device(vendor: %x, function: %x, index: %d)...\n", vendor, function, index)); + + /* Can't be called after ocp driver init */ + if (ocp_inited) + return 1; + + down_write(&ocp_devices_sem); + dev = __ocp_find_device(vendor, function, index); + list_del((struct list_head *)dev); + up_write(&ocp_devices_sem); + + DBG(("ocp: ocp_remove_one_device(vendor: %x, function: %x, index: %d)... done.\n", vendor, function, index)); + + return 0; +} + +/** + * ocp_for_each_device - Iterate over OCP devices + * @callback: routine to execute for each ocp device. + * @arg: user data to be passed to callback routine. + * + * This routine holds the ocp_device semaphore, so the + * callback routine cannot modify the ocp_device list. + */ +void +ocp_for_each_device(void(*callback)(struct ocp_device *, void *arg), void *arg) +{ + struct list_head *entry; + + if (callback) { + down_read(&ocp_devices_sem); + list_for_each(entry, &ocp_devices) + callback(list_entry(entry, struct ocp_device, link), + arg); + up_read(&ocp_devices_sem); + } +} + +/** + * ocp_early_init - Init OCP device management + * + * This function builds the list of devices before setup_arch. + * This allows platform code to modify the device lists before + * they are bound to drivers (changes to paddr, removing devices + * etc) + */ +int __init +ocp_early_init(void) +{ + struct ocp_def *def; + + DBG(("ocp: ocp_early_init()...\n")); + + /* Fill the devices list */ + for (def = core_ocp; def->vendor != OCP_VENDOR_INVALID; def++) + ocp_add_one_device(def); + + DBG(("ocp: ocp_early_init()... done.\n")); + + return 0; +} + +/** + * ocp_driver_init - Init OCP device management + * + * This function is meant to be called via OCP bus registration. + */ +static int __init +ocp_driver_init(void) +{ + int ret = 0, index = 0; + struct device *ocp_bus; + struct list_head *entry; + struct ocp_device *dev; + + if (ocp_inited) + return ret; + ocp_inited = 1; + + DBG(("ocp: ocp_driver_init()...\n")); + + /* Allocate/register primary OCP bus */ + ocp_bus = kmalloc(sizeof(struct device), GFP_KERNEL); + if (ocp_bus == NULL) + return 1; + memset(ocp_bus, 0, sizeof(struct device)); + strcpy(ocp_bus->bus_id, "ocp"); + + bus_register(&ocp_bus_type); + + device_register(ocp_bus); + + /* Put each OCP device into global device list */ + list_for_each(entry, &ocp_devices) { + dev = list_entry(entry, struct ocp_device, link); + sprintf(dev->dev.bus_id, "%2.2x", index); + dev->dev.parent = ocp_bus; + dev->dev.bus = &ocp_bus_type; + device_register(&dev->dev); + ocp_create_sysfs_dev_files(dev); + index++; + } + + DBG(("ocp: ocp_driver_init()... done.\n")); + + return 0; +} + +postcore_initcall(ocp_driver_init); + +EXPORT_SYMBOL(ocp_bus_type); +EXPORT_SYMBOL(ocp_find_device); +EXPORT_SYMBOL(ocp_register_driver); +EXPORT_SYMBOL(ocp_unregister_driver); diff --git a/arch/ppc64/lib/locks.c b/arch/ppc64/lib/locks.c new file mode 100644 index 000000000..4610cb938 --- /dev/null +++ b/arch/ppc64/lib/locks.c @@ -0,0 +1,285 @@ +/* + * Spin and read/write lock operations. + * + * Copyright (C) 2001-2004 Paul Mackerras , IBM + * Copyright (C) 2001 Anton Blanchard , IBM + * Copyright (C) 2002 Dave Engebretsen , IBM + * Rework to support virtual processors + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_SPINLINE + +/* + * On a system with shared processors (that is, where a physical + * processor is multiplexed between several virtual processors), + * there is no point spinning on a lock if the holder of the lock + * isn't currently scheduled on a physical processor. Instead + * we detect this situation and ask the hypervisor to give the + * rest of our timeslice to the lock holder. + * + * So that we can tell which virtual processor is holding a lock, + * we put 0x80000000 | smp_processor_id() in the lock when it is + * held. Conveniently, we have a word in the paca that holds this + * value. + */ + +/* waiting for a spinlock... */ +#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) +void __spin_yield(spinlock_t *lock) +{ + unsigned int lock_value, holder_cpu, yield_count; + struct paca_struct *holder_paca; + + lock_value = lock->lock; + if (lock_value == 0) + return; + holder_cpu = lock_value & 0xffff; + BUG_ON(holder_cpu >= NR_CPUS); + holder_paca = &paca[holder_cpu]; + yield_count = holder_paca->xLpPaca.xYieldCount; + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + if (lock->lock != lock_value) + return; /* something has changed */ +#ifdef CONFIG_PPC_ISERIES + HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, + ((u64)holder_cpu << 32) | yield_count); +#else + plpar_hcall_norets(H_CONFER, holder_cpu, yield_count); +#endif +} + +#else /* SPLPAR || ISERIES */ +#define __spin_yield(x) barrier() +#endif + +/* + * This returns the old value in the lock, so we succeeded + * in getting the lock if the return value is 0. + */ +static __inline__ unsigned long __spin_trylock(spinlock_t *lock) +{ + unsigned long tmp, tmp2; + + __asm__ __volatile__( +" lwz %1,24(13) # __spin_trylock\n\ +1: lwarx %0,0,%2\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n\ + stwcx. %1,0,%2\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp), "=&r" (tmp2) + : "r" (&lock->lock) + : "cr0", "memory"); + + return tmp; +} + +int _raw_spin_trylock(spinlock_t *lock) +{ + return __spin_trylock(lock) == 0; +} + +EXPORT_SYMBOL(_raw_spin_trylock); + +void _raw_spin_lock(spinlock_t *lock) +{ + while (1) { + if (likely(__spin_trylock(lock) == 0)) + break; + do { + HMT_low(); + __spin_yield(lock); + } while (likely(lock->lock != 0)); + HMT_medium(); + } +} + +EXPORT_SYMBOL(_raw_spin_lock); + +void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) +{ + unsigned long flags_dis; + + while (1) { + if (likely(__spin_trylock(lock) == 0)) + break; + local_save_flags(flags_dis); + local_irq_restore(flags); + do { + HMT_low(); + __spin_yield(lock); + } while (likely(lock->lock != 0)); + HMT_medium(); + local_irq_restore(flags_dis); + } +} + +EXPORT_SYMBOL(_raw_spin_lock_flags); + +void spin_unlock_wait(spinlock_t *lock) +{ + while (lock->lock) + __spin_yield(lock); +} + +EXPORT_SYMBOL(spin_unlock_wait); + +/* + * Waiting for a read lock or a write lock on a rwlock... + * This turns out to be the same for read and write locks, since + * we only know the holder if it is write-locked. + */ +#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) +void __rw_yield(rwlock_t *rw) +{ + int lock_value; + unsigned int holder_cpu, yield_count; + struct paca_struct *holder_paca; + + lock_value = rw->lock; + if (lock_value >= 0) + return; /* no write lock at present */ + holder_cpu = lock_value & 0xffff; + BUG_ON(holder_cpu >= NR_CPUS); + holder_paca = &paca[holder_cpu]; + yield_count = holder_paca->xLpPaca.xYieldCount; + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + if (rw->lock != lock_value) + return; /* something has changed */ +#ifdef CONFIG_PPC_ISERIES + HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, + ((u64)holder_cpu << 32) | yield_count); +#else + plpar_hcall_norets(H_CONFER, holder_cpu, yield_count); +#endif +} + +#else /* SPLPAR || ISERIES */ +#define __rw_yield(x) barrier() +#endif + +/* + * This returns the old value in the lock + 1, + * so we got a read lock if the return value is > 0. + */ +static __inline__ long __read_trylock(rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( +"1: lwarx %0,0,%1 # read_trylock\n\ + extsw %0,%0\n\ + addic. %0,%0,1\n\ + ble- 2f\n\ + stwcx. %0,0,%1\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp) + : "r" (&rw->lock) + : "cr0", "xer", "memory"); + + return tmp; +} + +int _raw_read_trylock(rwlock_t *rw) +{ + return __read_trylock(rw) > 0; +} + +EXPORT_SYMBOL(_raw_read_trylock); + +void _raw_read_lock(rwlock_t *rw) +{ + while (1) { + if (likely(__read_trylock(rw) > 0)) + break; + do { + HMT_low(); + __rw_yield(rw); + } while (likely(rw->lock < 0)); + HMT_medium(); + } +} + +EXPORT_SYMBOL(_raw_read_lock); + +void _raw_read_unlock(rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( + "eieio # read_unlock\n\ +1: lwarx %0,0,%1\n\ + addic %0,%0,-1\n\ + stwcx. %0,0,%1\n\ + bne- 1b" + : "=&r"(tmp) + : "r"(&rw->lock) + : "cr0", "memory"); +} + +EXPORT_SYMBOL(_raw_read_unlock); + +/* + * This returns the old value in the lock, + * so we got the write lock if the return value is 0. + */ +static __inline__ long __write_trylock(rwlock_t *rw) +{ + long tmp, tmp2; + + __asm__ __volatile__( +" lwz %1,24(13) # write_trylock\n\ +1: lwarx %0,0,%2\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n\ + stwcx. %1,0,%2\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp), "=&r" (tmp2) + : "r" (&rw->lock) + : "cr0", "memory"); + + return tmp; +} + +int _raw_write_trylock(rwlock_t *rw) +{ + return __write_trylock(rw) == 0; +} + +EXPORT_SYMBOL(_raw_write_trylock); + +void _raw_write_lock(rwlock_t *rw) +{ + while (1) { + if (likely(__write_trylock(rw) == 0)) + break; + do { + HMT_low(); + __rw_yield(rw); + } while (likely(rw->lock != 0)); + HMT_medium(); + } +} + +EXPORT_SYMBOL(_raw_write_lock); + +#endif /* CONFIG_SPINLINE */ diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c new file mode 100644 index 000000000..dea4957d0 --- /dev/null +++ b/arch/s390/lib/string.c @@ -0,0 +1,405 @@ +/* + * arch/s390/lib/string.c + * Optimized string functions + * + * S390 version + * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#define IN_ARCH_STRING_C 1 + +#include +#include + +/* + * Helper functions to find the end of a string + */ +static inline char *__strend(const char *s) +{ + register unsigned long r0 asm("0") = 0; + + asm volatile ("0: srst %0,%1\n" + " jo 0b" + : "+d" (r0), "+a" (s) : : "cc" ); + return (char *) r0; +} + +static inline char *__strnend(const char *s, size_t n) +{ + register unsigned long r0 asm("0") = 0; + const char *p = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b" + : "+d" (p), "+a" (s) : "d" (r0) : "cc" ); + return (char *) p; +} + +/** + * strlen - Find the length of a string + * @s: The string to be sized + * + * returns the length of @s + */ +size_t strlen(const char *s) +{ + return __strend(s) - s; +} +EXPORT_SYMBOL_NOVERS(strlen); + +/** + * strnlen - Find the length of a length-limited string + * @s: The string to be sized + * @n: The maximum number of bytes to search + * + * returns the minimum of the length of @s and @n + */ +size_t strnlen(const char * s, size_t n) +{ + return __strnend(s, n) - s; +} +EXPORT_SYMBOL_NOVERS(strnlen); + +/** + * strcpy - Copy a %NUL terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * + * returns a pointer to @dest + */ +char *strcpy(char *dest, const char *src) +{ + register int r0 asm("0") = 0; + char *ret = dest; + + asm volatile ("0: mvst %0,%1\n" + " jo 0b" + : "+&a" (dest), "+&a" (src) : "d" (r0) + : "cc", "memory" ); + return ret; +} +EXPORT_SYMBOL_NOVERS(strcpy); + +/** + * strlcpy - Copy a %NUL terminated string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @size: size of destination buffer + * + * Compatible with *BSD: the result is always a valid + * NUL-terminated string that fits in the buffer (unless, + * of course, the buffer size is zero). It does not pad + * out the result like strncpy() does. + */ +size_t strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = __strend(src) - src; + + if (size) { + size_t len = (ret >= size) ? size-1 : ret; + dest[len] = '\0'; + __builtin_memcpy(dest, src, len); + } + return ret; +} +EXPORT_SYMBOL_NOVERS(strlcpy); + +/** + * strncpy - Copy a length-limited, %NUL-terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @n: The maximum number of bytes to copy + * + * The result is not %NUL-terminated if the source exceeds + * @n bytes. + */ +char *strncpy(char *dest, const char *src, size_t n) +{ + size_t len = __strnend(src, n) - src; + __builtin_memset(dest + len, 0, n - len); + __builtin_memcpy(dest, src, len); + return dest; +} +EXPORT_SYMBOL_NOVERS(strncpy); + +/** + * strcat - Append one %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * + * returns a pointer to @dest + */ +char *strcat(char *dest, const char *src) +{ + register int r0 asm("0") = 0; + unsigned long dummy; + char *ret = dest; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + "1: mvst %0,%2\n" + " jo 1b" + : "=&a" (dummy), "+a" (dest), "+a" (src) + : "d" (r0), "0" (0UL) : "cc", "memory" ); + return ret; +} +EXPORT_SYMBOL_NOVERS(strcat); + +/** + * strlcat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @n: The size of the destination buffer. + */ +size_t strlcat(char *dest, const char *src, size_t n) +{ + size_t dsize = __strend(dest) - dest; + size_t len = __strend(src) - src; + size_t res = dsize + len; + + if (dsize < n) { + dest += dsize; + n -= dsize; + if (len >= n) + len = n - 1; + dest[len] = '\0'; + __builtin_memcpy(dest, src, len); + } + return res; +} +EXPORT_SYMBOL_NOVERS(strlcat); + +/** + * strncat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @n: The maximum numbers of bytes to copy + * + * returns a pointer to @dest + * + * Note that in contrast to strncpy, strncat ensures the result is + * terminated. + */ +char *strncat(char *dest, const char *src, size_t n) +{ + size_t len = __strnend(src, n) - src; + char *p = __strend(dest); + + p[len] = '\0'; + __builtin_memcpy(p, src, len); + return dest; +} +EXPORT_SYMBOL_NOVERS(strncat); + +/** + * strcmp - Compare two strings + * @cs: One string + * @ct: Another string + * + * returns 0 if @cs and @ct are equal, + * < 0 if @cs is less than @ct + * > 0 if @cs is greater than @ct + */ +int strcmp(const char *cs, const char *ct) +{ + register int r0 asm("0") = 0; + int ret = 0; + + asm volatile ("0: clst %2,%3\n" + " jo 0b\n" + " je 1f\n" + " ic %0,0(%2)\n" + " ic %1,0(%3)\n" + " sr %0,%1\n" + "1:" + : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct) + : : "cc" ); + return ret; +} +EXPORT_SYMBOL_NOVERS(strcmp); + +/** + * strrchr - Find the last occurrence of a character in a string + * @s: The string to be searched + * @c: The character to search for + */ +char * strrchr(const char * s, int c) +{ + size_t len = __strend(s) - s; + + if (len) + do { + if (s[len] == (char) c) + return (char *) s + len; + } while (--len > 0); + return 0; +} +EXPORT_SYMBOL_NOVERS(strrchr); + +/** + * strstr - Find the first substring in a %NUL terminated string + * @s1: The string to be searched + * @s2: The string to search for + */ +char * strstr(const char * s1,const char * s2) +{ + int l1, l2; + + l2 = __strend(s2) - s2; + if (!l2) + return (char *) s1; + l1 = __strend(s1) - s1; + while (l1-- >= l2) { + register unsigned long r2 asm("2") = (unsigned long) s1; + register unsigned long r3 asm("3") = (unsigned long) l2; + register unsigned long r4 asm("4") = (unsigned long) s2; + register unsigned long r5 asm("5") = (unsigned long) l2; + int cc; + + asm volatile ("0: clcle %1,%3,0\n" + " jo 0b\n" + " ipm %0\n" + " srl %0,28" + : "=&d" (cc), "+a" (r2), "+a" (r3), + "+a" (r4), "+a" (r5) : : "cc" ); + if (!cc) + return (char *) s1; + s1++; + } + return 0; +} +EXPORT_SYMBOL_NOVERS(strstr); + +/** + * memchr - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or %NULL + * if @c is not found + */ +void *memchr(const void *s, int c, size_t n) +{ + register int r0 asm("0") = (char) c; + const void *ret = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + " jl 1f\n" + " la %0,0\n" + "1:" + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); + return (void *) ret; +} +EXPORT_SYMBOL_NOVERS(memchr); + +/** + * memcmp - Compare two areas of memory + * @cs: One area of memory + * @ct: Another area of memory + * @count: The size of the area. + */ +int memcmp(const void *cs, const void *ct, size_t n) +{ + register unsigned long r2 asm("2") = (unsigned long) cs; + register unsigned long r3 asm("3") = (unsigned long) n; + register unsigned long r4 asm("4") = (unsigned long) ct; + register unsigned long r5 asm("5") = (unsigned long) n; + int ret; + + asm volatile ("0: clcle %1,%3,0\n" + " jo 0b\n" + " ipm %0\n" + " srl %0,28" + : "=&d" (ret), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5) + : : "cc" ); + if (ret) + ret = *(char *) r2 - *(char *) r4; + return ret; +} +EXPORT_SYMBOL_NOVERS(memcmp); + +/** + * memscan - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or 1 byte past + * the area if @c is not found + */ +void *memscan(void *s, int c, size_t n) +{ + register int r0 asm("0") = (char) c; + const void *ret = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); + return (void *) ret; +} +EXPORT_SYMBOL_NOVERS(memscan); + +/** + * memcpy - Copy one area of memory to another + * @dest: Where to copy to + * @src: Where to copy from + * @n: The size of the area. + * + * returns a pointer to @dest + */ +void *memcpy(void *dest, const void *src, size_t n) +{ + return __builtin_memcpy(dest, src, n); +} +EXPORT_SYMBOL_NOVERS(memcpy); + +/** + * bcopy - Copy one area of memory to another + * @src: Where to copy from + * @dest: Where to copy to + * @n: The size of the area. + * + * Note that this is the same as memcpy(), with the arguments reversed. + * memcpy() is the standard, bcopy() is a legacy BSD function. + */ +void bcopy(const void *srcp, void *destp, size_t n) +{ + __builtin_memcpy(destp, srcp, n); +} +EXPORT_SYMBOL_NOVERS(bcopy); + +/** + * memset - Fill a region of memory with the given value + * @s: Pointer to the start of the area. + * @c: The byte to fill the area with + * @n: The size of the area. + * + * returns a pointer to @s + */ +void *memset(void *s, int c, size_t n) +{ + char *xs; + + if (c == 0) + return __builtin_memset(s, 0, n); + + xs = (char *) s; + if (n > 0) + do { + *xs++ = c; + } while (--n > 0); + return s; +} +EXPORT_SYMBOL_NOVERS(memset); + +/* + * missing exports for string functions defined in lib/string.c + */ +EXPORT_SYMBOL_NOVERS(memmove); +EXPORT_SYMBOL_NOVERS(strchr); +EXPORT_SYMBOL_NOVERS(strnchr); +EXPORT_SYMBOL_NOVERS(strncmp); +EXPORT_SYMBOL_NOVERS(strpbrk); diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c new file mode 100644 index 000000000..420dfbafd --- /dev/null +++ b/arch/sparc64/lib/find_bit.c @@ -0,0 +1,125 @@ +#include + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = addr + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < 64) + goto found_first; + if (tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if ((tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (64 - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} + +/* find_next_zero_bit() finds the first zero bit in a bit string of length + * 'size' bits, starting the search at bit 'offset'. This is largely based + * on Linus's ALPHA routines, which are pretty portable BTW. + */ + +unsigned long find_next_zero_bit(unsigned long *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = addr + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (64-offset); + if (size < 64) + goto found_first; + if (~tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if (~(tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); +} + +unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = addr + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if(offset) { + tmp = __swab64p(p++); + tmp |= (~0UL >> (64-offset)); + if(size < 64) + goto found_first; + if(~tmp) + goto found_middle; + size -= 64; + result += 64; + } + while(size & ~63) { + if(~(tmp = __swab64p(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if(!size) + return result; + tmp = __swab64p(p); +found_first: + tmp |= (~0UL << size); + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); +} diff --git a/arch/sparc64/lib/splock.S b/arch/sparc64/lib/splock.S new file mode 100644 index 000000000..d17a3badd --- /dev/null +++ b/arch/sparc64/lib/splock.S @@ -0,0 +1,23 @@ +/* splock.S: Spinlock primitives too large to inline. + * + * Copyright (C) 2004 David S. Miller (davem@redhat.com) + */ + + .text + .align 64 + + .globl _raw_spin_lock_flags +_raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */ +1: ldstub [%o0], %g7 + brnz,pn %g7, 2f + membar #StoreLoad | #StoreStore + retl + nop + +2: rdpr %pil, %g2 ! Save PIL + wrpr %o1, %pil ! Set previous PIL +3: ldub [%o0], %g7 ! Spin on lock set + brnz,pt %g7, 3b + membar #LoadLoad + ba,pt %xcc, 1b ! Retry lock acquire + wrpr %g2, %pil ! Restore PIL diff --git a/arch/x86_64/kernel/domain.c b/arch/x86_64/kernel/domain.c new file mode 100644 index 000000000..0694958c7 --- /dev/null +++ b/arch/x86_64/kernel/domain.c @@ -0,0 +1,93 @@ +#include +#include + +/* Don't do any NUMA setup on Opteron right now. They seem to be + better off with flat scheduling. This is just for SMT. */ + +#ifdef CONFIG_SCHED_SMT + +static struct sched_group sched_group_cpus[NR_CPUS]; +static struct sched_group sched_group_phys[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_domain, cpu_domains); +static DEFINE_PER_CPU(struct sched_domain, phys_domains); +__init void arch_init_sched_domains(void) +{ + int i; + struct sched_group *first = NULL, *last = NULL; + + /* Set up domains */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + struct sched_domain *phys_domain = &per_cpu(phys_domains, i); + + *cpu_domain = SD_SIBLING_INIT; + /* Disable SMT NICE for CMP */ + /* RED-PEN use a generic flag */ + if (cpu_data[i].x86_vendor == X86_VENDOR_AMD) + cpu_domain->flags &= ~SD_SHARE_CPUPOWER; + cpu_domain->span = cpu_sibling_map[i]; + cpu_domain->parent = phys_domain; + cpu_domain->groups = &sched_group_cpus[i]; + + *phys_domain = SD_CPU_INIT; + phys_domain->span = cpu_possible_map; + phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)]; + } + + /* Set up CPU (sibling) groups */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + int j; + first = last = NULL; + + if (i != first_cpu(cpu_domain->span)) + continue; + + for_each_cpu_mask(j, cpu_domain->span) { + struct sched_group *cpu = &sched_group_cpus[j]; + + cpus_clear(cpu->cpumask); + cpu_set(j, cpu->cpumask); + cpu->cpu_power = SCHED_LOAD_SCALE; + + if (!first) + first = cpu; + if (last) + last->next = cpu; + last = cpu; + } + last->next = first; + } + + first = last = NULL; + /* Set up physical groups */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + struct sched_group *cpu = &sched_group_phys[i]; + + if (i != first_cpu(cpu_domain->span)) + continue; + + cpu->cpumask = cpu_domain->span; + /* + * Make each extra sibling increase power by 10% of + * the basic CPU. This is very arbitrary. + */ + cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10; + + if (!first) + first = cpu; + if (last) + last->next = cpu; + last = cpu; + } + last->next = first; + + mb(); + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + cpu_attach_domain(cpu_domain, i); + } +} + +#endif diff --git a/drivers/char/drm/drm_irq.h b/drivers/char/drm/drm_irq.h new file mode 100644 index 000000000..1d1d95116 --- /dev/null +++ b/drivers/char/drm/drm_irq.h @@ -0,0 +1,371 @@ +/** + * \file drm_irq.h + * IRQ support + * + * \author Rickard E. (Rik) Faith + * \author Gareth Hughes + */ + +/* + * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +#include /* For task queue support */ + +#ifndef __HAVE_SHARED_IRQ +#define __HAVE_SHARED_IRQ 0 +#endif + +#if __HAVE_SHARED_IRQ +#define DRM_IRQ_TYPE SA_SHIRQ +#else +#define DRM_IRQ_TYPE 0 +#endif + +/** + * Get interrupt from bus id. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_irq_busid structure. + * \return zero on success or a negative number on failure. + * + * Finds the PCI device with the specified bus id and gets its IRQ number. + * This IOCTL is deprecated, and will now return EINVAL for any busid not equal + * to that of the device that this DRM instance attached to. + */ +int DRM(irq_by_busid)(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_irq_busid_t p; + + if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p))) + return -EFAULT; + + if ((p.busnum >> 8) != dev->pci_domain || + (p.busnum & 0xff) != dev->pci_bus || + p.devnum != dev->pci_slot || + p.funcnum != dev->pci_func) + return -EINVAL; + + p.irq = dev->irq; + + DRM_DEBUG("%d:%d:%d => IRQ %d\n", + p.busnum, p.devnum, p.funcnum, p.irq); + if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p))) + return -EFAULT; + return 0; +} + +#if __HAVE_IRQ + +/** + * Install IRQ handler. + * + * \param dev DRM device. + * \param irq IRQ number. + * + * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver + * \c DRM(driver_irq_preinstall)() and \c DRM(driver_irq_postinstall)() functions + * before and after the installation. + */ +int DRM(irq_install)( drm_device_t *dev ) +{ + int ret; + + if ( dev->irq == 0 ) + return -EINVAL; + + down( &dev->struct_sem ); + + /* Driver must have been initialized */ + if ( !dev->dev_private ) { + up( &dev->struct_sem ); + return -EINVAL; + } + + if ( dev->irq_enabled ) { + up( &dev->struct_sem ); + return -EBUSY; + } + dev->irq_enabled = 1; + up( &dev->struct_sem ); + + DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq ); + +#if __HAVE_DMA + dev->dma->next_buffer = NULL; + dev->dma->next_queue = NULL; + dev->dma->this_buffer = NULL; +#endif + +#if __HAVE_IRQ_BH + INIT_WORK(&dev->work, DRM(irq_immediate_bh), dev); +#endif + +#if __HAVE_VBL_IRQ + init_waitqueue_head(&dev->vbl_queue); + + spin_lock_init( &dev->vbl_lock ); + + INIT_LIST_HEAD( &dev->vbl_sigs.head ); + + dev->vbl_pending = 0; +#endif + + /* Before installing handler */ + DRM(driver_irq_preinstall)(dev); + + /* Install handler */ + ret = request_irq( dev->irq, DRM(irq_handler), + DRM_IRQ_TYPE, dev->devname, dev ); + if ( ret < 0 ) { + down( &dev->struct_sem ); + dev->irq_enabled = 0; + up( &dev->struct_sem ); + return ret; + } + + /* After installing handler */ + DRM(driver_irq_postinstall)(dev); + + return 0; +} + +/** + * Uninstall the IRQ handler. + * + * \param dev DRM device. + * + * Calls the driver's \c DRM(driver_irq_uninstall)() function, and stops the irq. + */ +int DRM(irq_uninstall)( drm_device_t *dev ) +{ + int irq_enabled; + + down( &dev->struct_sem ); + irq_enabled = dev->irq_enabled; + dev->irq_enabled = 0; + up( &dev->struct_sem ); + + if ( !irq_enabled ) + return -EINVAL; + + DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq ); + + DRM(driver_irq_uninstall)( dev ); + + free_irq( dev->irq, dev ); + + return 0; +} + +/** + * IRQ control ioctl. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_control structure. + * \return zero on success or a negative number on failure. + * + * Calls irq_install() or irq_uninstall() according to \p arg. + */ +int DRM(control)( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_control_t ctl; + + if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) ) + return -EFAULT; + + switch ( ctl.func ) { + case DRM_INST_HANDLER: + if (dev->if_version < DRM_IF_VERSION(1, 2) && + ctl.irq != dev->irq) + return -EINVAL; + return DRM(irq_install)( dev ); + case DRM_UNINST_HANDLER: + return DRM(irq_uninstall)( dev ); + default: + return -EINVAL; + } +} + +#if __HAVE_VBL_IRQ + +/** + * Wait for VBLANK. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param data user argument, pointing to a drm_wait_vblank structure. + * \return zero on success or a negative number on failure. + * + * Verifies the IRQ is installed. + * + * If a signal is requested checks if this task has already scheduled the same signal + * for the same vblank sequence number - nothing to be done in + * that case. If the number of tasks waiting for the interrupt exceeds 100 the + * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this + * task. + * + * If a signal is not requested, then calls vblank_wait(). + */ +int DRM(wait_vblank)( DRM_IOCTL_ARGS ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_wait_vblank_t vblwait; + struct timeval now; + int ret = 0; + unsigned int flags; + + if (!dev->irq) + return -EINVAL; + + DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data, + sizeof(vblwait) ); + + switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) { + case _DRM_VBLANK_RELATIVE: + vblwait.request.sequence += atomic_read( &dev->vbl_received ); + vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; + case _DRM_VBLANK_ABSOLUTE: + break; + default: + return -EINVAL; + } + + flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + + if ( flags & _DRM_VBLANK_SIGNAL ) { + unsigned long irqflags; + drm_vbl_sig_t *vbl_sig; + + vblwait.reply.sequence = atomic_read( &dev->vbl_received ); + + spin_lock_irqsave( &dev->vbl_lock, irqflags ); + + /* Check if this task has already scheduled the same signal + * for the same vblank sequence number; nothing to be done in + * that case + */ + list_for_each_entry( vbl_sig, &dev->vbl_sigs.head, head ) { + if (vbl_sig->sequence == vblwait.request.sequence + && vbl_sig->info.si_signo == vblwait.request.signal + && vbl_sig->task == current) + { + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + goto done; + } + } + + if ( dev->vbl_pending >= 100 ) { + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + return -EBUSY; + } + + dev->vbl_pending++; + + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + + if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) { + return -ENOMEM; + } + + memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) ); + + vbl_sig->sequence = vblwait.request.sequence; + vbl_sig->info.si_signo = vblwait.request.signal; + vbl_sig->task = current; + + spin_lock_irqsave( &dev->vbl_lock, irqflags ); + + list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head ); + + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + } else { + ret = DRM(vblank_wait)( dev, &vblwait.request.sequence ); + + do_gettimeofday( &now ); + vblwait.reply.tval_sec = now.tv_sec; + vblwait.reply.tval_usec = now.tv_usec; + } + +done: + DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait, + sizeof(vblwait) ); + + return ret; +} + +/** + * Send the VBLANK signals. + * + * \param dev DRM device. + * + * Sends a signal for each task in drm_device::vbl_sigs and empties the list. + * + * If a signal is not requested, then calls vblank_wait(). + */ +void DRM(vbl_send_signals)( drm_device_t *dev ) +{ + struct list_head *list, *tmp; + drm_vbl_sig_t *vbl_sig; + unsigned int vbl_seq = atomic_read( &dev->vbl_received ); + unsigned long flags; + + spin_lock_irqsave( &dev->vbl_lock, flags ); + + list_for_each_safe( list, tmp, &dev->vbl_sigs.head ) { + vbl_sig = list_entry( list, drm_vbl_sig_t, head ); + if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) { + vbl_sig->info.si_code = vbl_seq; + send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task ); + + list_del( list ); + + DRM_FREE( vbl_sig, sizeof(*vbl_sig) ); + + dev->vbl_pending--; + } + } + + spin_unlock_irqrestore( &dev->vbl_lock, flags ); +} + +#endif /* __HAVE_VBL_IRQ */ + +#endif /* __HAVE_IRQ */ diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h new file mode 100644 index 000000000..3b5f8d3a2 --- /dev/null +++ b/drivers/char/drm/drm_pciids.h @@ -0,0 +1,203 @@ +/* + This file is auto-generated from the drm_pciids.txt in the DRM CVS + Please contact dri-devel@lists.sf.net to add new cards to this list +*/ +#define radeon_PCI_IDS \ + {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5963, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5968, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define r128_PCI_IDS \ + {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define mga_PCI_IDS \ + {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define mach64_PCI_IDS \ + {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define sisdrv_PCI_IDS \ + {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define tdfx_PCI_IDS \ + {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define viadrv_PCI_IDS \ + {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define i810_PCI_IDS \ + {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define i830_PCI_IDS \ + {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define gamma_PCI_IDS \ + {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define savage_PCI_IDS \ + {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define ffb_PCI_IDS \ + {0, 0, 0} + diff --git a/drivers/char/watchdog/ixp4xx_wdt.c b/drivers/char/watchdog/ixp4xx_wdt.c new file mode 100644 index 000000000..79493650f --- /dev/null +++ b/drivers/char/watchdog/ixp4xx_wdt.c @@ -0,0 +1,233 @@ +/* + * drivers/watchdog/ixp4xx_wdt.c + * + * Watchdog driver for Intel IXP4xx network processors + * + * Author: Deepak Saxena + * + * Copyright 2004 (c) MontaVista, Software, Inc. + * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_WATCHDOG_NOWAYOUT +static int nowayout = 1; +#else +static int nowayout = 0; +#endif +static int heartbeat = 60; /* (secs) Default is 1 minute */ +static unsigned long wdt_status; +static unsigned long boot_status; + +#define WDT_TICK_RATE (IXP4XX_PERIPHERAL_BUS_CLOCK * 1000000UL) + +#define WDT_IN_USE 0 +#define WDT_OK_TO_CLOSE 1 + +static void +wdt_enable(void) +{ + *IXP4XX_OSWK = IXP4XX_WDT_KEY; + *IXP4XX_OSWE = 0; + *IXP4XX_OSWT = WDT_TICK_RATE * heartbeat; + *IXP4XX_OSWE = IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE; + *IXP4XX_OSWK = 0; +} + +static void +wdt_disable(void) +{ + *IXP4XX_OSWK = IXP4XX_WDT_KEY; + *IXP4XX_OSWE = 0; + *IXP4XX_OSWK = 0; +} + +static int +ixp4xx_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(WDT_IN_USE, &wdt_status)) + return -EBUSY; + + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + wdt_enable(); + + return 0; +} + +static ssize_t +ixp4xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) +{ + /* Can't seek (pwrite) on this device */ + if (ppos != &file->f_pos) + return -ESPIPE; + + if (len) { + if (!nowayout) { + size_t i; + + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + for (i = 0; i != len; i++) { + char c; + + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + set_bit(WDT_OK_TO_CLOSE, &wdt_status); + } + } + wdt_enable(); + } + + return len; +} + +static struct watchdog_info ident = { + .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE | + WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .identity = "IXP4xx Watchdog", +}; + + +static int +ixp4xx_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -ENOIOCTLCMD; + int time; + + switch (cmd) { + case WDIOC_GETSUPPORT: + ret = copy_to_user((struct watchdog_info *)arg, &ident, + sizeof(ident)) ? -EFAULT : 0; + break; + + case WDIOC_GETSTATUS: + ret = put_user(0, (int *)arg); + break; + + case WDIOC_GETBOOTSTATUS: + ret = put_user(boot_status, (int *)arg); + break; + + case WDIOC_SETTIMEOUT: + ret = get_user(time, (int *)arg); + if (ret) + break; + + if (time <= 0 || time > 60) { + ret = -EINVAL; + break; + } + + heartbeat = time; + wdt_enable(); + /* Fall through */ + + case WDIOC_GETTIMEOUT: + ret = put_user(heartbeat, (int *)arg); + break; + + case WDIOC_KEEPALIVE: + wdt_enable(); + ret = 0; + break; + } + return ret; +} + +static int +ixp4xx_wdt_release(struct inode *inode, struct file *file) +{ + if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) { + wdt_disable(); + } else { + printk(KERN_CRIT "WATCHDOG: Device closed unexpectdly - " + "timer will not stop\n"); + } + + clear_bit(WDT_IN_USE, &wdt_status); + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + return 0; +} + + +static struct file_operations ixp4xx_wdt_fops = +{ + .owner = THIS_MODULE, + .write = ixp4xx_wdt_write, + .ioctl = ixp4xx_wdt_ioctl, + .open = ixp4xx_wdt_open, + .release = ixp4xx_wdt_release, +}; + +static struct miscdevice ixp4xx_wdt_miscdev = +{ + .minor = WATCHDOG_MINOR, + .name = "IXP4xx Watchdog", + .fops = &ixp4xx_wdt_fops, +}; + +static int __init ixp4xx_wdt_init(void) +{ + int ret; + unsigned long processor_id; + + asm("mrc p15, 0, %0, cr0, cr0, 0;" : "=r"(processor_id) :); + if (!(processor_id & 0xf)) { + printk("IXP4XXX Watchdog: Rev. A0 CPU detected - " + "watchdog disabled\n"); + + return -ENODEV; + } + + ret = misc_register(&ixp4xx_wdt_miscdev); + if (ret == 0) + printk("IXP4xx Watchdog Timer: heartbeat %d sec\n", heartbeat); + + boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ? + WDIOF_CARDRESET : 0; + + return ret; +} + +static void __exit ixp4xx_wdt_exit(void) +{ + misc_deregister(&ixp4xx_wdt_miscdev); +} + + +module_init(ixp4xx_wdt_init); +module_exit(ixp4xx_wdt_exit); + +MODULE_AUTHOR("Deepak Saxena ); +MODULE_DESCRIPTION("IXP4xx Network Processor Watchdog"); + +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 60s)"); + +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); + diff --git a/drivers/i2c/busses/i2c-ixp4xx.c b/drivers/i2c/busses/i2c-ixp4xx.c new file mode 100644 index 000000000..d8bfd59c1 --- /dev/null +++ b/drivers/i2c/busses/i2c-ixp4xx.c @@ -0,0 +1,181 @@ +/* + * drivers/i2c/i2c-adap-ixp4xx.c + * + * Intel's IXP4xx XScale NPU chipsets (IXP420, 421, 422, 425) do not have + * an on board I2C controller but provide 16 GPIO pins that are often + * used to create an I2C bus. This driver provides an i2c_adapter + * interface that plugs in under algo_bit and drives the GPIO pins + * as instructed by the alogorithm driver. + * + * Author: Deepak Saxena + * + * Copyright (c) 2003-2004 MontaVista Software Inc. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * NOTE: Since different platforms will use different GPIO pins for + * I2C, this driver uses an IXP4xx-specific platform_data + * pointer to pass the GPIO numbers to the driver. This + * allows us to support all the different IXP4xx platforms + * w/o having to put #ifdefs in this driver. + * + * See arch/arm/mach-ixp4xx/ixdp425.c for an example of building a + * device list and filling in the ixp4xx_i2c_pins data structure + * that is passed as the platform_data to this driver. + */ + +#include +#ifdef CONFIG_I2C_DEBUG_BUS +#define DEBUG 1 +#endif + +#include +#include +#include +#include +#include +#include + +#include /* Pick up IXP4xx-specific bits */ + +static inline int ixp4xx_scl_pin(void *data) +{ + return ((struct ixp4xx_i2c_pins*)data)->scl_pin; +} + +static inline int ixp4xx_sda_pin(void *data) +{ + return ((struct ixp4xx_i2c_pins*)data)->sda_pin; +} + +static void ixp4xx_bit_setscl(void *data, int val) +{ + gpio_line_set(ixp4xx_scl_pin(data), 0); + gpio_line_config(ixp4xx_scl_pin(data), + val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT ); +} + +static void ixp4xx_bit_setsda(void *data, int val) +{ + gpio_line_set(ixp4xx_sda_pin(data), 0); + gpio_line_config(ixp4xx_sda_pin(data), + val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT ); +} + +static int ixp4xx_bit_getscl(void *data) +{ + int scl; + + gpio_line_config(ixp4xx_scl_pin(data), IXP4XX_GPIO_IN ); + gpio_line_get(ixp4xx_scl_pin(data), &scl); + + return scl; +} + +static int ixp4xx_bit_getsda(void *data) +{ + int sda; + + gpio_line_config(ixp4xx_sda_pin(data), IXP4XX_GPIO_IN ); + gpio_line_get(ixp4xx_sda_pin(data), &sda); + + return sda; +} + +struct ixp4xx_i2c_data { + struct ixp4xx_i2c_pins *gpio_pins; + struct i2c_adapter adapter; + struct i2c_algo_bit_data algo_data; +}; + +static int ixp4xx_i2c_remove(struct device *dev) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct ixp4xx_i2c_data *drv_data = dev_get_drvdata(&plat_dev->dev); + + dev_set_drvdata(&plat_dev->dev, NULL); + + i2c_bit_del_bus(&drv_data->adapter); + + kfree(drv_data); + + return 0; +} + +static int ixp4xx_i2c_probe(struct device *dev) +{ + int err; + struct platform_device *plat_dev = to_platform_device(dev); + struct ixp4xx_i2c_pins *gpio = plat_dev->dev.platform_data; + struct ixp4xx_i2c_data *drv_data = + kmalloc(sizeof(struct ixp4xx_i2c_data), GFP_KERNEL); + + if(!drv_data) + return -ENOMEM; + + memzero(drv_data, sizeof(struct ixp4xx_i2c_data)); + drv_data->gpio_pins = gpio; + + /* + * We could make a lot of these structures static, but + * certain platforms may have multiple GPIO-based I2C + * buses for various device domains, so we need per-device + * algo_data->data. + */ + drv_data->algo_data.data = gpio; + drv_data->algo_data.setsda = ixp4xx_bit_setsda; + drv_data->algo_data.setscl = ixp4xx_bit_setscl; + drv_data->algo_data.getsda = ixp4xx_bit_getsda; + drv_data->algo_data.getscl = ixp4xx_bit_getscl; + drv_data->algo_data.udelay = 10; + drv_data->algo_data.mdelay = 10; + drv_data->algo_data.timeout = 100; + + drv_data->adapter.id = I2C_HW_B_IXP4XX, + drv_data->adapter.algo_data = &drv_data->algo_data, + + drv_data->adapter.dev.parent = &plat_dev->dev; + + gpio_line_config(gpio->scl_pin, IXP4XX_GPIO_IN); + gpio_line_config(gpio->sda_pin, IXP4XX_GPIO_IN); + gpio_line_set(gpio->scl_pin, 0); + gpio_line_set(gpio->sda_pin, 0); + + if ((err = i2c_bit_add_bus(&drv_data->adapter) != 0)) { + printk(KERN_ERR "ERROR: Could not install %s\n", dev->bus_id); + + kfree(drv_data); + return err; + } + + dev_set_drvdata(&plat_dev->dev, drv_data); + + return 0; +} + +static struct device_driver ixp4xx_i2c_driver = { + .name = "IXP4XX-I2C", + .bus = &platform_bus_type, + .probe = ixp4xx_i2c_probe, + .remove = ixp4xx_i2c_remove, +}; + +static int __init ixp4xx_i2c_init(void) +{ + return driver_register(&ixp4xx_i2c_driver); +} + +static void __exit ixp4xx_i2c_exit(void) +{ + driver_unregister(&ixp4xx_i2c_driver); +} + +module_init(ixp4xx_i2c_init); +module_exit(ixp4xx_i2c_exit); + +MODULE_DESCRIPTION("GPIO-based I2C adapter for IXP4xx systems"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Deepak Saxena "); + diff --git a/drivers/i2c/chips/max1619.c b/drivers/i2c/chips/max1619.c new file mode 100644 index 000000000..0f8a5ac5c --- /dev/null +++ b/drivers/i2c/chips/max1619.c @@ -0,0 +1,378 @@ +/* + * max1619.c - Part of lm_sensors, Linux kernel modules for hardware + * monitoring + * Copyright (C) 2003-2004 Alexey Fisher + * Jean Delvare + * + * Based on the lm90 driver. The MAX1619 is a sensor chip made by Maxim. + * It reports up to two temperatures (its own plus up to + * one external one). Complete datasheet can be + * obtained from Maxim's website at: + * http://pdfserv.maxim-ic.com/en/ds/MAX1619.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include +#include +#include +#include +#include +#include + + +static unsigned short normal_i2c[] = { I2C_CLIENT_END }; +static unsigned short normal_i2c_range[] = { 0x18, 0x1a, 0x29, 0x2b, + 0x4c, 0x4e, I2C_CLIENT_END }; +static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END }; +static unsigned int normal_isa_range[] = { I2C_CLIENT_ISA_END }; + +/* + * Insmod parameters + */ + +SENSORS_INSMOD_1(max1619); + +/* + * The MAX1619 registers + */ + +#define MAX1619_REG_R_MAN_ID 0xFE +#define MAX1619_REG_R_CHIP_ID 0xFF +#define MAX1619_REG_R_CONFIG 0x03 +#define MAX1619_REG_W_CONFIG 0x09 +#define MAX1619_REG_R_CONVRATE 0x04 +#define MAX1619_REG_W_CONVRATE 0x0A +#define MAX1619_REG_R_STATUS 0x02 +#define MAX1619_REG_R_LOCAL_TEMP 0x00 +#define MAX1619_REG_R_REMOTE_TEMP 0x01 +#define MAX1619_REG_R_REMOTE_HIGH 0x07 +#define MAX1619_REG_W_REMOTE_HIGH 0x0D +#define MAX1619_REG_R_REMOTE_LOW 0x08 +#define MAX1619_REG_W_REMOTE_LOW 0x0E +#define MAX1619_REG_R_REMOTE_CRIT 0x10 +#define MAX1619_REG_W_REMOTE_CRIT 0x12 +#define MAX1619_REG_R_TCRIT_HYST 0x11 +#define MAX1619_REG_W_TCRIT_HYST 0x13 + +/* + * Conversions and various macros + */ + +#define TEMP_FROM_REG(val) ((val & 0x80 ? val-0x100 : val) * 1000) +#define TEMP_TO_REG(val) ((val < 0 ? val+0x100*1000 : val) / 1000) + +/* + * Functions declaration + */ + +static int max1619_attach_adapter(struct i2c_adapter *adapter); +static int max1619_detect(struct i2c_adapter *adapter, int address, + int kind); +static void max1619_init_client(struct i2c_client *client); +static int max1619_detach_client(struct i2c_client *client); +static struct max1619_data *max1619_update_device(struct device *dev); + +/* + * Driver data (common to all clients) + */ + +static struct i2c_driver max1619_driver = { + .owner = THIS_MODULE, + .name = "max1619", + .flags = I2C_DF_NOTIFY, + .attach_adapter = max1619_attach_adapter, + .detach_client = max1619_detach_client, +}; + +/* + * Client data (each client gets its own) + */ + +struct max1619_data { + struct i2c_client client; + struct semaphore update_lock; + char valid; /* zero until following fields are valid */ + unsigned long last_updated; /* in jiffies */ + + /* registers values */ + u8 temp_input1; /* local */ + u8 temp_input2, temp_low2, temp_high2; /* remote */ + u8 temp_crit2; + u8 temp_hyst2; + u8 alarms; +}; + +/* + * Internal variables + */ + +static int max1619_id = 0; + +/* + * Sysfs stuff + */ + +#define show_temp(value) \ +static ssize_t show_##value(struct device *dev, char *buf) \ +{ \ + struct max1619_data *data = max1619_update_device(dev); \ + return sprintf(buf, "%d\n", TEMP_FROM_REG(data->value)); \ +} +show_temp(temp_input1); +show_temp(temp_input2); +show_temp(temp_low2); +show_temp(temp_high2); +show_temp(temp_crit2); +show_temp(temp_hyst2); + +#define set_temp2(value, reg) \ +static ssize_t set_##value(struct device *dev, const char *buf, \ + size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct max1619_data *data = i2c_get_clientdata(client); \ + data->value = TEMP_TO_REG(simple_strtol(buf, NULL, 10)); \ + i2c_smbus_write_byte_data(client, reg, data->value); \ + return count; \ +} + +set_temp2(temp_low2, MAX1619_REG_W_REMOTE_LOW); +set_temp2(temp_high2, MAX1619_REG_W_REMOTE_HIGH); +set_temp2(temp_crit2, MAX1619_REG_W_REMOTE_CRIT); +set_temp2(temp_hyst2, MAX1619_REG_W_TCRIT_HYST); + +static ssize_t show_alarms(struct device *dev, char *buf) +{ + struct max1619_data *data = max1619_update_device(dev); + return sprintf(buf, "%d\n", data->alarms); +} + +static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL); +static DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input2, NULL); +static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_low2, + set_temp_low2); +static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_high2, + set_temp_high2); +static DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit2, + set_temp_crit2); +static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp_hyst2, + set_temp_hyst2); +static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); + +/* + * Real code + */ + +static int max1619_attach_adapter(struct i2c_adapter *adapter) +{ + if (!(adapter->class & I2C_CLASS_HWMON)) + return 0; + return i2c_detect(adapter, &addr_data, max1619_detect); +} + +/* + * The following function does more than just detection. If detection + * succeeds, it also registers the new chip. + */ +static int max1619_detect(struct i2c_adapter *adapter, int address, int kind) +{ + struct i2c_client *new_client; + struct max1619_data *data; + int err = 0; + const char *name = ""; + u8 reg_config=0, reg_convrate=0, reg_status=0; + u8 man_id, chip_id; + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + goto exit; + + if (!(data = kmalloc(sizeof(struct max1619_data), GFP_KERNEL))) { + err = -ENOMEM; + goto exit; + } + memset(data, 0, sizeof(struct max1619_data)); + + /* The common I2C client data is placed right before the + MAX1619-specific data. */ + new_client = &data->client; + i2c_set_clientdata(new_client, data); + new_client->addr = address; + new_client->adapter = adapter; + new_client->driver = &max1619_driver; + new_client->flags = 0; + + /* + * Now we do the remaining detection. A negative kind means that + * the driver was loaded with no force parameter (default), so we + * must both detect and identify the chip. A zero kind means that + * the driver was loaded with the force parameter, the detection + * step shall be skipped. A positive kind means that the driver + * was loaded with the force parameter and a given kind of chip is + * requested, so both the detection and the identification steps + * are skipped. + */ + if (kind < 0) { /* detection */ + reg_config = i2c_smbus_read_byte_data(new_client, + MAX1619_REG_R_CONFIG); + reg_convrate = i2c_smbus_read_byte_data(new_client, + MAX1619_REG_R_CONVRATE); + reg_status = i2c_smbus_read_byte_data(new_client, + MAX1619_REG_R_STATUS); + if ((reg_config & 0x03) != 0x00 + || reg_convrate > 0x07 || (reg_status & 0x61 ) !=0x00) { + dev_dbg(&adapter->dev, + "MAX1619 detection failed at 0x%02x.\n", + address); + goto exit_free; + } + } + + if (kind <= 0) { /* identification */ + + man_id = i2c_smbus_read_byte_data(new_client, + MAX1619_REG_R_MAN_ID); + chip_id = i2c_smbus_read_byte_data(new_client, + MAX1619_REG_R_CHIP_ID); + + if ((man_id == 0x4D) && (chip_id == 0x04)){ + kind = max1619; + } + } + + if (kind <= 0) { /* identification failed */ + dev_info(&adapter->dev, + "Unsupported chip (man_id=0x%02X, " + "chip_id=0x%02X).\n", man_id, chip_id); + goto exit_free; + } + + + if (kind == max1619){ + name = "max1619"; + } + + /* We can fill in the remaining client fields */ + strlcpy(new_client->name, name, I2C_NAME_SIZE); + new_client->id = max1619_id++; + data->valid = 0; + init_MUTEX(&data->update_lock); + + /* Tell the I2C layer a new client has arrived */ + if ((err = i2c_attach_client(new_client))) + goto exit_free; + + /* Initialize the MAX1619 chip */ + max1619_init_client(new_client); + + /* Register sysfs hooks */ + device_create_file(&new_client->dev, &dev_attr_temp1_input); + device_create_file(&new_client->dev, &dev_attr_temp2_input); + device_create_file(&new_client->dev, &dev_attr_temp2_min); + device_create_file(&new_client->dev, &dev_attr_temp2_max); + device_create_file(&new_client->dev, &dev_attr_temp2_crit); + device_create_file(&new_client->dev, &dev_attr_temp2_crit_hyst); + device_create_file(&new_client->dev, &dev_attr_alarms); + + return 0; + +exit_free: + kfree(data); +exit: + return err; +} + +static void max1619_init_client(struct i2c_client *client) +{ + u8 config; + + /* + * Start the conversions. + */ + i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONVRATE, + 5); /* 2 Hz */ + config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG); + if (config & 0x40) + i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONFIG, + config & 0xBF); /* run */ +} + +static int max1619_detach_client(struct i2c_client *client) +{ + int err; + + if ((err = i2c_detach_client(client))) { + dev_err(&client->dev, "Client deregistration failed, " + "client not detached.\n"); + return err; + } + + kfree(i2c_get_clientdata(client)); + return 0; +} + +static struct max1619_data *max1619_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max1619_data *data = i2c_get_clientdata(client); + + down(&data->update_lock); + + if ((jiffies - data->last_updated > HZ * 2) || + (jiffies < data->last_updated) || + !data->valid) { + + dev_dbg(&client->dev, "Updating max1619 data.\n"); + data->temp_input1 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_LOCAL_TEMP); + data->temp_input2 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_REMOTE_TEMP); + data->temp_high2 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_REMOTE_HIGH); + data->temp_low2 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_REMOTE_LOW); + data->temp_crit2 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_REMOTE_CRIT); + data->temp_hyst2 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_TCRIT_HYST); + data->alarms = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_STATUS); + + data->last_updated = jiffies; + data->valid = 1; + } + + up(&data->update_lock); + + return data; +} + +static int __init sensors_max1619_init(void) +{ + return i2c_add_driver(&max1619_driver); +} + +static void __exit sensors_max1619_exit(void) +{ + i2c_del_driver(&max1619_driver); +} + +MODULE_AUTHOR("Alexey Fisher and" + "Jean Delvare "); +MODULE_DESCRIPTION("MAX1619 sensor driver"); +MODULE_LICENSE("GPL"); + +module_init(sensors_max1619_init); +module_exit(sensors_max1619_exit); diff --git a/drivers/i2c/chips/rtc8564.c b/drivers/i2c/chips/rtc8564.c new file mode 100644 index 000000000..0fa55d45e --- /dev/null +++ b/drivers/i2c/chips/rtc8564.c @@ -0,0 +1,396 @@ +/* + * linux/drivers/i2c/chips/rtc8564.c + * + * Copyright (C) 2002-2004 Stefan Eletzhofer + * + * based on linux/drivers/acron/char/pcf8583.c + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for system3's EPSON RTC 8564 chip + */ +#include +#include +#include +#include +#include +#include /* get the user-level API */ +#include +#include + +#include "rtc8564.h" + +#ifdef DEBUG +# define _DBG(x, fmt, args...) do{ if (debug>=x) printk(KERN_DEBUG"%s: " fmt "\n", __FUNCTION__, ##args); } while(0); +#else +# define _DBG(x, fmt, args...) do { } while(0); +#endif + +#define _DBGRTCTM(x, rtctm) if (debug>=x) printk("%s: secs=%d, mins=%d, hours=%d, mday=%d, " \ + "mon=%d, year=%d, wday=%d VL=%d\n", __FUNCTION__, \ + (rtctm).secs, (rtctm).mins, (rtctm).hours, (rtctm).mday, \ + (rtctm).mon, (rtctm).year, (rtctm).wday, (rtctm).vl); + +struct rtc8564_data { + struct i2c_client client; + u16 ctrl; +}; + +static inline u8 _rtc8564_ctrl1(struct i2c_client *client) +{ + struct rtc8564_data *data = i2c_get_clientdata(client); + return data->ctrl & 0xff; +} +static inline u8 _rtc8564_ctrl2(struct i2c_client *client) +{ + struct rtc8564_data *data = i2c_get_clientdata(client); + return (data->ctrl & 0xff00) >> 8; +} + +#define CTRL1(c) _rtc8564_ctrl1(c) +#define CTRL2(c) _rtc8564_ctrl2(c) + +#define BCD_TO_BIN(val) (((val)&15) + ((val)>>4)*10) +#define BIN_TO_BCD(val) ((((val)/10)<<4) + (val)%10) + +static int debug = 0; +MODULE_PARM(debug, "i"); + +static struct i2c_driver rtc8564_driver; + +static unsigned short ignore[] = { I2C_CLIENT_END }; +static unsigned short normal_addr[] = { 0x51, I2C_CLIENT_END }; + +static struct i2c_client_address_data addr_data = { + .normal_i2c = normal_addr, + .normal_i2c_range = ignore, + .probe = ignore, + .probe_range = ignore, + .ignore = ignore, + .ignore_range = ignore, + .force = ignore, +}; + +static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem); +static int rtc8564_write_mem(struct i2c_client *client, struct mem *mem); + +static int rtc8564_read(struct i2c_client *client, unsigned char adr, + unsigned char *buf, unsigned char len) +{ + int ret = -EIO; + unsigned char addr[1] = { adr }; + struct i2c_msg msgs[2] = { + {client->addr, 0, 1, addr}, + {client->addr, I2C_M_RD, len, buf} + }; + + _DBG(1, "client=%p, adr=%d, buf=%p, len=%d", client, adr, buf, len); + + if (!buf || !client) { + ret = -EINVAL; + goto done; + } + + ret = i2c_transfer(client->adapter, msgs, 2); + if (ret == 2) { + ret = 0; + } + +done: + return ret; +} + +static int rtc8564_write(struct i2c_client *client, unsigned char adr, + unsigned char *data, unsigned char len) +{ + int ret = 0; + unsigned char _data[16]; + struct i2c_msg wr; + int i; + + if (!client || !data || len > 15) { + ret = -EINVAL; + goto done; + } + + _DBG(1, "client=%p, adr=%d, buf=%p, len=%d", client, adr, data, len); + + _data[0] = adr; + for (i = 0; i < len; i++) { + _data[i + 1] = data[i]; + _DBG(5, "data[%d] = 0x%02x (%d)", i, data[i], data[i]); + } + + wr.addr = client->addr; + wr.flags = 0; + wr.len = len + 1; + wr.buf = _data; + + ret = i2c_transfer(client->adapter, &wr, 1); + if (ret == 1) { + ret = 0; + } + +done: + return ret; +} + +static int rtc8564_attach(struct i2c_adapter *adap, int addr, int kind) +{ + int ret; + struct i2c_client *new_client; + struct rtc8564_data *d; + unsigned char data[10]; + unsigned char ad[1] = { 0 }; + struct i2c_msg ctrl_wr[1] = { + {addr, 0, 2, data} + }; + struct i2c_msg ctrl_rd[2] = { + {addr, 0, 1, ad}, + {addr, I2C_M_RD, 2, data} + }; + + d = kmalloc(sizeof(struct rtc8564_data), GFP_KERNEL); + if (!d) { + ret = -ENOMEM; + goto done; + } + memset(d, 0, sizeof(struct rtc8564_data)); + new_client = &d->client; + + strlcpy(new_client->name, "RTC8564", I2C_NAME_SIZE); + i2c_set_clientdata(new_client, d); + new_client->id = rtc8564_driver.id; + new_client->flags = I2C_CLIENT_ALLOW_USE | I2C_DF_NOTIFY; + new_client->addr = addr; + new_client->adapter = adap; + new_client->driver = &rtc8564_driver; + + _DBG(1, "client=%p", new_client); + _DBG(1, "client.id=%d", new_client->id); + + /* init ctrl1 reg */ + data[0] = 0; + data[1] = 0; + ret = i2c_transfer(new_client->adapter, ctrl_wr, 1); + if (ret != 1) { + printk(KERN_INFO "rtc8564: cant init ctrl1\n"); + ret = -ENODEV; + goto done; + } + + /* read back ctrl1 and ctrl2 */ + ret = i2c_transfer(new_client->adapter, ctrl_rd, 2); + if (ret != 2) { + printk(KERN_INFO "rtc8564: cant read ctrl\n"); + ret = -ENODEV; + goto done; + } + + d->ctrl = data[0] | (data[1] << 8); + + _DBG(1, "RTC8564_REG_CTRL1=%02x, RTC8564_REG_CTRL2=%02x", + data[0], data[1]); + + ret = i2c_attach_client(new_client); +done: + if (ret) { + kfree(d); + } + return ret; +} + +static int rtc8564_probe(struct i2c_adapter *adap) +{ + return i2c_probe(adap, &addr_data, rtc8564_attach); +} + +static int rtc8564_detach(struct i2c_client *client) +{ + i2c_detach_client(client); + kfree(i2c_get_clientdata(client)); + return 0; +} + +static int rtc8564_get_datetime(struct i2c_client *client, struct rtc_tm *dt) +{ + int ret = -EIO; + unsigned char buf[15]; + + _DBG(1, "client=%p, dt=%p", client, dt); + + if (!dt || !client) + return -EINVAL; + + memset(buf, 0, sizeof(buf)); + + ret = rtc8564_read(client, 0, buf, 15); + if (ret) + return ret; + + /* century stored in minute alarm reg */ + dt->year = BCD_TO_BIN(buf[RTC8564_REG_YEAR]); + dt->year += 100 * BCD_TO_BIN(buf[RTC8564_REG_AL_MIN] & 0x3f); + dt->mday = BCD_TO_BIN(buf[RTC8564_REG_DAY] & 0x3f); + dt->wday = BCD_TO_BIN(buf[RTC8564_REG_WDAY] & 7); + dt->mon = BCD_TO_BIN(buf[RTC8564_REG_MON_CENT] & 0x1f); + + dt->secs = BCD_TO_BIN(buf[RTC8564_REG_SEC] & 0x7f); + dt->vl = (buf[RTC8564_REG_SEC] & 0x80) == 0x80; + dt->mins = BCD_TO_BIN(buf[RTC8564_REG_MIN] & 0x7f); + dt->hours = BCD_TO_BIN(buf[RTC8564_REG_HR] & 0x3f); + + _DBGRTCTM(2, *dt); + + return 0; +} + +static int +rtc8564_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo) +{ + int ret, len = 5; + unsigned char buf[15]; + + _DBG(1, "client=%p, dt=%p", client, dt); + + if (!dt || !client) + return -EINVAL; + + _DBGRTCTM(2, *dt); + + buf[RTC8564_REG_CTRL1] = CTRL1(client) | RTC8564_CTRL1_STOP; + buf[RTC8564_REG_CTRL2] = CTRL2(client); + buf[RTC8564_REG_SEC] = BIN_TO_BCD(dt->secs); + buf[RTC8564_REG_MIN] = BIN_TO_BCD(dt->mins); + buf[RTC8564_REG_HR] = BIN_TO_BCD(dt->hours); + + if (datetoo) { + len += 5; + buf[RTC8564_REG_DAY] = BIN_TO_BCD(dt->mday); + buf[RTC8564_REG_WDAY] = BIN_TO_BCD(dt->wday); + buf[RTC8564_REG_MON_CENT] = BIN_TO_BCD(dt->mon) & 0x1f; + /* century stored in minute alarm reg */ + buf[RTC8564_REG_YEAR] = BIN_TO_BCD(dt->year % 100); + buf[RTC8564_REG_AL_MIN] = BIN_TO_BCD(dt->year / 100); + } + + ret = rtc8564_write(client, 0, buf, len); + if (ret) { + _DBG(1, "error writing data! %d", ret); + } + + buf[RTC8564_REG_CTRL1] = CTRL1(client); + ret = rtc8564_write(client, 0, buf, 1); + if (ret) { + _DBG(1, "error writing data! %d", ret); + } + + return ret; +} + +static int rtc8564_get_ctrl(struct i2c_client *client, unsigned int *ctrl) +{ + struct rtc8564_data *data = i2c_get_clientdata(client); + + if (!ctrl || !client) + return -1; + + *ctrl = data->ctrl; + return 0; +} + +static int rtc8564_set_ctrl(struct i2c_client *client, unsigned int *ctrl) +{ + struct rtc8564_data *data = i2c_get_clientdata(client); + unsigned char buf[2]; + + if (!ctrl || !client) + return -1; + + buf[0] = *ctrl & 0xff; + buf[1] = (*ctrl & 0xff00) >> 8; + data->ctrl = *ctrl; + + return rtc8564_write(client, 0, buf, 2); +} + +static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem) +{ + + if (!mem || !client) + return -EINVAL; + + return rtc8564_read(client, mem->loc, mem->data, mem->nr); +} + +static int rtc8564_write_mem(struct i2c_client *client, struct mem *mem) +{ + + if (!mem || !client) + return -EINVAL; + + return rtc8564_write(client, mem->loc, mem->data, mem->nr); +} + +static int +rtc8564_command(struct i2c_client *client, unsigned int cmd, void *arg) +{ + + _DBG(1, "cmd=%d", cmd); + + switch (cmd) { + case RTC_GETDATETIME: + return rtc8564_get_datetime(client, arg); + + case RTC_SETTIME: + return rtc8564_set_datetime(client, arg, 0); + + case RTC_SETDATETIME: + return rtc8564_set_datetime(client, arg, 1); + + case RTC_GETCTRL: + return rtc8564_get_ctrl(client, arg); + + case RTC_SETCTRL: + return rtc8564_set_ctrl(client, arg); + + case MEM_READ: + return rtc8564_read_mem(client, arg); + + case MEM_WRITE: + return rtc8564_write_mem(client, arg); + + default: + return -EINVAL; + } +} + +static struct i2c_driver rtc8564_driver = { + .owner = THIS_MODULE, + .name = "RTC8564", + .id = I2C_DRIVERID_RTC8564, + .flags = I2C_DF_NOTIFY, + .attach_adapter = rtc8564_probe, + .detach_client = rtc8564_detach, + .command = rtc8564_command +}; + +static __init int rtc8564_init(void) +{ + return i2c_add_driver(&rtc8564_driver); +} + +static __exit void rtc8564_exit(void) +{ + i2c_del_driver(&rtc8564_driver); +} + +MODULE_AUTHOR("Stefan Eletzhofer "); +MODULE_DESCRIPTION("EPSON RTC8564 Driver"); +MODULE_LICENSE("GPL"); + +module_init(rtc8564_init); +module_exit(rtc8564_exit); diff --git a/drivers/i2c/chips/rtc8564.h b/drivers/i2c/chips/rtc8564.h new file mode 100644 index 000000000..e5342d10b --- /dev/null +++ b/drivers/i2c/chips/rtc8564.h @@ -0,0 +1,78 @@ +/* + * linux/drivers/i2c/chips/rtc8564.h + * + * Copyright (C) 2002-2004 Stefan Eletzhofer + * + * based on linux/drivers/acron/char/pcf8583.h + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +struct rtc_tm { + unsigned char secs; + unsigned char mins; + unsigned char hours; + unsigned char mday; + unsigned char mon; + unsigned short year; /* xxxx 4 digits :) */ + unsigned char wday; + unsigned char vl; +}; + +struct mem { + unsigned int loc; + unsigned int nr; + unsigned char *data; +}; + +#define RTC_GETDATETIME 0 +#define RTC_SETTIME 1 +#define RTC_SETDATETIME 2 +#define RTC_GETCTRL 3 +#define RTC_SETCTRL 4 +#define MEM_READ 5 +#define MEM_WRITE 6 + +#define RTC8564_REG_CTRL1 0x0 /* T 0 S 0 | T 0 0 0 */ +#define RTC8564_REG_CTRL2 0x1 /* 0 0 0 TI/TP | AF TF AIE TIE */ +#define RTC8564_REG_SEC 0x2 /* VL 4 2 1 | 8 4 2 1 */ +#define RTC8564_REG_MIN 0x3 /* x 4 2 1 | 8 4 2 1 */ +#define RTC8564_REG_HR 0x4 /* x x 2 1 | 8 4 2 1 */ +#define RTC8564_REG_DAY 0x5 /* x x 2 1 | 8 4 2 1 */ +#define RTC8564_REG_WDAY 0x6 /* x x x x | x 4 2 1 */ +#define RTC8564_REG_MON_CENT 0x7 /* C x x 1 | 8 4 2 1 */ +#define RTC8564_REG_YEAR 0x8 /* 8 4 2 1 | 8 4 2 1 */ +#define RTC8564_REG_AL_MIN 0x9 /* AE 4 2 1 | 8 4 2 1 */ +#define RTC8564_REG_AL_HR 0xa /* AE 4 2 1 | 8 4 2 1 */ +#define RTC8564_REG_AL_DAY 0xb /* AE x 2 1 | 8 4 2 1 */ +#define RTC8564_REG_AL_WDAY 0xc /* AE x x x | x 4 2 1 */ +#define RTC8564_REG_CLKOUT 0xd /* FE x x x | x x FD1 FD0 */ +#define RTC8564_REG_TCTL 0xe /* TE x x x | x x FD1 FD0 */ +#define RTC8564_REG_TIMER 0xf /* 8 bit binary */ + +/* Control reg */ +#define RTC8564_CTRL1_TEST1 (1<<3) +#define RTC8564_CTRL1_STOP (1<<5) +#define RTC8564_CTRL1_TEST2 (1<<7) + +#define RTC8564_CTRL2_TIE (1<<0) +#define RTC8564_CTRL2_AIE (1<<1) +#define RTC8564_CTRL2_TF (1<<2) +#define RTC8564_CTRL2_AF (1<<3) +#define RTC8564_CTRL2_TI_TP (1<<4) + +/* CLKOUT frequencies */ +#define RTC8564_FD_32768HZ (0x0) +#define RTC8564_FD_1024HZ (0x1) +#define RTC8564_FD_32 (0x2) +#define RTC8564_FD_1HZ (0x3) + +/* Timer CTRL */ +#define RTC8564_TD_4096HZ (0x0) +#define RTC8564_TD_64HZ (0x1) +#define RTC8564_TD_1HZ (0x2) +#define RTC8564_TD_1_60HZ (0x3) + +#define I2C_DRIVERID_RTC8564 0xf000 diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c new file mode 100644 index 000000000..fb91cb8bf --- /dev/null +++ b/drivers/ide/h8300/ide-h8300.c @@ -0,0 +1,119 @@ +/* + * drivers/ide/ide-h8300.c + * H8/300 generic IDE interface + */ + +#include +#include +#include + +#include +#include + +#define bswap(d) \ +({ \ + u16 r; \ + __asm__("mov.b %w1,r1h\n\t" \ + "mov.b %x1,r1l\n\t" \ + "mov.w r1,%0" \ + :"=r"(r) \ + :"r"(d) \ + :"er1"); \ + (r); \ +}) + +static void mm_outw(u16 d, unsigned long a) +{ + __asm__("mov.b %w0,r2h\n\t" + "mov.b %x0,r2l\n\t" + "mov.w r2,@%1" + : + :"r"(d),"r"(a) + :"er2"); +} + +static u16 mm_inw(unsigned long a) +{ + register u16 r __asm__("er0"); + __asm__("mov.w @%1,r2\n\t" + "mov.b r2l,%x0\n\t" + "mov.b r2h,%w0" + :"=r"(r) + :"r"(a) + :"er2"); + return r; +} + +static void mm_outsw(unsigned long addr, void *buf, u32 len) +{ + unsigned short *bp = (unsigned short *)buf; + for (; len > 0; len--, bp++) + *(volatile u16 *)addr = bswap(*bp); +} + +static void mm_insw(unsigned long addr, void *buf, u32 len) +{ + unsigned short *bp = (unsigned short *)buf; + for (; len > 0; len--, bp++) + *bp = bswap(*(volatile u16 *)addr); +} + +#define H8300_IDE_GAP (2) + +static inline void hw_setup(hw_regs_t *hw) +{ + int i; + + memset(hw, 0, sizeof(hw_regs_t)); + for (i = 0; i <= IDE_STATUS_OFFSET; i++) + hw->io_ports[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; + hw->io_ports[IDE_CONTROL_OFFSET] = CONFIG_H8300_IDE_ALT; + hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ; + hw->dma = NO_DMA; + hw->chipset = ide_generic; +} + +static inline void hwif_setup(ide_hwif_t *hwif) +{ + default_hwif_iops(hwif); + + hwif->mmio = 2; + hwif->OUTW = mm_outw; + hwif->OUTSW = mm_outsw; + hwif->INW = mm_inw; + hwif->INSW = mm_insw; + hwif->OUTL = NULL; + hwif->INL = NULL; + hwif->OUTSL = NULL; + hwif->INSL = NULL; +} + +void __init h8300_ide_init(void) +{ + hw_regs_t hw; + ide_hwif_t *hwif; + int idx; + + if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300")) + goto out_busy; + if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) { + release_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8); + goto out_busy; + } + + hw_setup(&hw); + + /* register if */ + idx = ide_register_hw(&hw, &hwif); + if (idx == -1) { + printk(KERN_ERR "ide-h8300: IDE I/F register failed\n"); + return; + } + + hwif_setup(hwif); + printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", idx); + return; + +out_busy: + printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); +} diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c new file mode 100644 index 000000000..a10f92126 --- /dev/null +++ b/drivers/mtd/maps/ixp4xx.c @@ -0,0 +1,244 @@ +/* + * $Id: ixp4xx.c,v 1.1 2004/05/13 22:21:26 dsaxena Exp $ + * + * drivers/mtd/maps/ixp4xx.c + * + * MTD Map file for IXP4XX based systems. Please do not make per-board + * changes in here. If your board needs special setup, do it in your + * platform level code in arch/arm/mach-ixp4xx/board-setup.c + * + * Original Author: Intel Corporation + * Maintainer: Deepak Saxena + * + * Copyright (C) 2002 Intel Corporation + * Copyright (C) 2003-2004 MontaVista Software, Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef __ARMEB__ +#define BYTE0(h) ((h) & 0xFF) +#define BYTE1(h) (((h) >> 8) & 0xFF) +#else +#define BYTE0(h) (((h) >> 8) & 0xFF) +#define BYTE1(h) ((h) & 0xFF) +#endif + +static __u16 +ixp4xx_read16(struct map_info *map, unsigned long ofs) +{ + return *(__u16 *) (map->map_priv_1 + ofs); +} + +/* + * The IXP4xx expansion bus only allows 16-bit wide acceses + * when attached to a 16-bit wide device (such as the 28F128J3A), + * so we can't just memcpy_fromio(). + */ +static void +ixp4xx_copy_from(struct map_info *map, void *to, + unsigned long from, ssize_t len) +{ + int i; + u8 *dest = (u8 *) to; + u16 *src = (u16 *) (map->map_priv_1 + from); + u16 data; + + for (i = 0; i < (len / 2); i++) { + data = src[i]; + dest[i * 2] = BYTE0(data); + dest[i * 2 + 1] = BYTE1(data); + } + + if (len & 1) + dest[len - 1] = BYTE0(src[i]); +} + +static void +ixp4xx_write16(struct map_info *map, __u16 d, unsigned long adr) +{ + *(__u16 *) (map->map_priv_1 + adr) = d; +} + +struct ixp4xx_flash_info { + struct mtd_info *mtd; + struct map_info map; + struct mtd_partition *partitions; + struct resource *res; +}; + +static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; + +static int +ixp4xx_flash_remove(struct device *_dev) +{ + struct platform_device *dev = to_platform_device(_dev); + struct flash_platform_data *plat = dev->dev.platform_data; + struct ixp4xx_flash_info *info = dev_get_drvdata(&dev->dev); + + dev_set_drvdata(&dev->dev, NULL); + + if(!info) + return 0; + + /* + * This is required for a soft reboot to work. + */ + ixp4xx_write16(&info->map, 0xff, 0x55 * 0x2); + + if (info->mtd) { + del_mtd_partitions(info->mtd); + map_destroy(info->mtd); + } + if (info->map.map_priv_1) + iounmap((void *) info->map.map_priv_1); + + if (info->partitions) + kfree(info->partitions); + + if (info->res) { + release_resource(info->res); + kfree(info->res); + } + + if (plat->exit) + plat->exit(); + + /* Disable flash write */ + *IXP4XX_EXP_CS0 &= ~IXP4XX_FLASH_WRITABLE; + + return 0; +} + +static int ixp4xx_flash_probe(struct device *_dev) +{ + struct platform_device *dev = to_platform_device(_dev); + struct flash_platform_data *plat = dev->dev.platform_data; + struct ixp4xx_flash_info *info; + int err = -1; + + if (!plat) + return -ENODEV; + + if (plat->init) { + err = plat->init(); + if (err) + return err; + } + + info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL); + if(!info) { + err = -ENOMEM; + goto Error; + } + memzero(info, sizeof(struct ixp4xx_flash_info)); + + dev_set_drvdata(&dev->dev, info); + + /* + * Enable flash write + * TODO: Move this out to board specific code + */ + *IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE; + + /* + * Tell the MTD layer we're not 1:1 mapped so that it does + * not attempt to do a direct access on us. + */ + info->map.phys = NO_XIP; + info->map.size = dev->resource->end - dev->resource->start + 1; + + /* + * We only support 16-bit accesses for now. If and when + * any board use 8-bit access, we'll fixup the driver to + * handle that. + */ + info->map.buswidth = 2; + info->map.name = dev->dev.bus_id; + info->map.read16 = ixp4xx_read16, + info->map.write16 = ixp4xx_write16, + info->map.copy_from = ixp4xx_copy_from, + + info->res = request_mem_region(dev->resource->start, + dev->resource->end - dev->resource->start + 1, + "IXP4XXFlash"); + if (!info->res) { + printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n"); + err = -ENOMEM; + goto Error; + } + + info->map.map_priv_1 = + (unsigned long) ioremap(dev->resource->start, + dev->resource->end - dev->resource->start + 1); + if (!info->map.map_priv_1) { + printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n"); + err = -EIO; + goto Error; + } + + info->mtd = do_map_probe(plat->map_name, &info->map); + if (!info->mtd) { + printk(KERN_ERR "IXP4XXFlash: map_probe failed\n"); + err = -ENXIO; + goto Error; + } + info->mtd->owner = THIS_MODULE; + + err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0); + if (err > 0) { + err = add_mtd_partitions(info->mtd, info->partitions, err); + if(err) + printk(KERN_ERR "Could not parse partitions\n"); + } + + if (err) + goto Error; + + return 0; + +Error: + ixp4xx_flash_remove(_dev); + return err; +} + +static struct device_driver ixp4xx_flash_driver = { + .name = "IXP4XX-Flash", + .bus = &platform_bus_type, + .probe = ixp4xx_flash_probe, + .remove = ixp4xx_flash_remove, +}; + +static int __init ixp4xx_flash_init(void) +{ + return driver_register(&ixp4xx_flash_driver); +} + +static void __exit ixp4xx_flash_exit(void) +{ + driver_unregister(&ixp4xx_flash_driver); +} + + +module_init(ixp4xx_flash_init); +module_exit(ixp4xx_flash_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems") +MODULE_AUTHOR("Deepak Saxena"); + diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c new file mode 100644 index 000000000..1901302d4 --- /dev/null +++ b/drivers/mtd/maps/wr_sbc82xx_flash.c @@ -0,0 +1,167 @@ +/* + * $Id: wr_sbc82xx_flash.c,v 1.1 2004/06/07 10:21:32 dwmw2 Exp $ + * + * Map for flash chips on Wind River PowerQUICC II SBC82xx board. + * + * Copyright (C) 2004 Red Hat, Inc. + * + * Author: David Woodhouse + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static struct mtd_info *sbcmtd[3]; +static struct mtd_partition *sbcmtd_parts[3]; + +struct map_info sbc82xx_flash_map[3] = { + {.name = "Boot flash"}, + {.name = "Alternate boot flash"}, + {.name = "User flash"} +}; + +static struct mtd_partition smallflash_parts[] = { + { + .name = "space", + .size = 0x100000, + .offset = 0, + }, { + .name = "bootloader", + .size = MTDPART_SIZ_FULL, + .offset = MTDPART_OFS_APPEND, + } +}; + +static struct mtd_partition bigflash_parts[] = { + { + .name = "bootloader", + .size = 0x80000, + .offset = 0, + }, { + .name = "file system", + .size = MTDPART_SIZ_FULL, + .offset = MTDPART_OFS_APPEND, + } +}; + +static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL}; + +int __init init_sbc82xx_flash(void) +{ + volatile memctl8260_t *mc = &immr->im_memctl; + int bigflash; + int i; + + /* First, register the boot flash, whichever we're booting from */ + if ((mc->memc_br0 & 0x00001800) == 0x00001800) { + bigflash = 0; + } else if ((mc->memc_br0 & 0x00001800) == 0x00000800) { + bigflash = 1; + } else { + printk(KERN_WARNING "Bus Controller register BR0 is %08x. Cannot determine flash configuration\n", mc->memc_br0); + return 1; + } + + /* Set parameters for the big flash chip (CS6 or CS0) */ + sbc82xx_flash_map[bigflash].buswidth = 4; + sbc82xx_flash_map[bigflash].size = 0x4000000; + + /* Set parameters for the small flash chip (CS0 or CS6) */ + sbc82xx_flash_map[!bigflash].buswidth = 1; + sbc82xx_flash_map[!bigflash].size = 0x200000; + + /* Set parameters for the user flash chip (CS1) */ + sbc82xx_flash_map[2].buswidth = 4; + sbc82xx_flash_map[2].size = 0x4000000; + + sbc82xx_flash_map[0].phys = mc->memc_br0 & 0xffff8000; + sbc82xx_flash_map[1].phys = mc->memc_br6 & 0xffff8000; + sbc82xx_flash_map[2].phys = mc->memc_br1 & 0xffff8000; + + for (i=0; i<3; i++) { + int8_t flashcs[3] = { 0, 6, 1 }; + int nr_parts; + + printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d", + sbc82xx_flash_map[i].name, sbc82xx_flash_map[i].size >> 20, flashcs[i]); + if (!sbc82xx_flash_map[i].phys) { + /* We know it can't be at zero. */ + printk("): disabled by bootloader.\n"); + continue; + } + printk(" at %08lx)\n", sbc82xx_flash_map[i].phys); + + sbc82xx_flash_map[i].virt = (unsigned long)ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size); + + if (!sbc82xx_flash_map[i].virt) { + printk("Failed to ioremap\n"); + continue; + } + + simple_map_init(&sbc82xx_flash_map[i]); + + sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]); + + if (!sbcmtd[i]) + continue; + + sbcmtd[i]->owner = THIS_MODULE; + + nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes, + &sbcmtd_parts[i], 0); + if (nr_parts > 0) { + add_mtd_partitions (sbcmtd[i], sbcmtd_parts[i], nr_parts); + continue; + } + + /* No partitioning detected. Use default */ + if (i == 2) { + add_mtd_device(sbcmtd[i]); + } else if (i == bigflash) { + add_mtd_partitions (sbcmtd[i], bigflash_parts, ARRAY_SIZE(bigflash_parts)); + } else { + add_mtd_partitions (sbcmtd[i], smallflash_parts, ARRAY_SIZE(smallflash_parts)); + } + } + return 0; +} + +static void __exit cleanup_sbc82xx_flash(void) +{ + int i; + + for (i=0; i<3; i++) { + if (!sbcmtd[i]) + continue; + + if (i<2 || sbcmtd_parts[i]) + del_mtd_partitions(sbcmtd[i]); + else + del_mtd_device(sbcmtd[i]); + + kfree(sbcmtd_parts[i]); + map_destroy(sbcmtd[i]); + + iounmap((void *)sbc82xx_flash_map[i].virt); + sbc82xx_flash_map[i].virt = 0; + } +} + +module_init(init_sbc82xx_flash); +module_exit(cleanup_sbc82xx_flash); + + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Woodhouse "); +MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II"); diff --git a/drivers/net/ibm_emac/ibm_emac.h b/drivers/net/ibm_emac/ibm_emac.h new file mode 100644 index 000000000..5310033ad --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac.h @@ -0,0 +1,263 @@ +/* + * ibm_emac.h + * + * + * Armin Kuster akuster@mvista.com + * June, 2002 + * + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _IBM_EMAC_H_ +#define _IBM_EMAC_H_ +/* General defines needed for the driver */ + +/* Emac */ +typedef struct emac_regs { + u32 em0mr0; + u32 em0mr1; + u32 em0tmr0; + u32 em0tmr1; + u32 em0rmr; + u32 em0isr; + u32 em0iser; + u32 em0iahr; + u32 em0ialr; + u32 em0vtpid; + u32 em0vtci; + u32 em0ptr; + u32 em0iaht1; + u32 em0iaht2; + u32 em0iaht3; + u32 em0iaht4; + u32 em0gaht1; + u32 em0gaht2; + u32 em0gaht3; + u32 em0gaht4; + u32 em0lsah; + u32 em0lsal; + u32 em0ipgvr; + u32 em0stacr; + u32 em0trtr; + u32 em0rwmr; +} emac_t; + +/* MODE REG 0 */ +#define EMAC_M0_RXI 0x80000000 +#define EMAC_M0_TXI 0x40000000 +#define EMAC_M0_SRST 0x20000000 +#define EMAC_M0_TXE 0x10000000 +#define EMAC_M0_RXE 0x08000000 +#define EMAC_M0_WKE 0x04000000 + +/* MODE Reg 1 */ +#define EMAC_M1_FDE 0x80000000 +#define EMAC_M1_ILE 0x40000000 +#define EMAC_M1_VLE 0x20000000 +#define EMAC_M1_EIFC 0x10000000 +#define EMAC_M1_APP 0x08000000 +#define EMAC_M1_AEMI 0x02000000 +#define EMAC_M1_IST 0x01000000 +#define EMAC_M1_MF_1000GPCS 0x00c00000 /* Internal GPCS */ +#define EMAC_M1_MF_1000MBPS 0x00800000 /* External GPCS */ +#define EMAC_M1_MF_100MBPS 0x00400000 +#define EMAC_M1_RFS_16K 0x00280000 /* 000 for 512 byte */ +#define EMAC_M1_TR 0x00008000 +#ifdef CONFIG_IBM_EMAC4 +#define EMAC_M1_RFS_8K 0x00200000 +#define EMAC_M1_RFS_4K 0x00180000 +#define EMAC_M1_RFS_2K 0x00100000 +#define EMAC_M1_RFS_1K 0x00080000 +#define EMAC_M1_TX_FIFO_16K 0x00050000 /* 0's for 512 byte */ +#define EMAC_M1_TX_FIFO_8K 0x00040000 +#define EMAC_M1_TX_FIFO_4K 0x00030000 +#define EMAC_M1_TX_FIFO_2K 0x00020000 +#define EMAC_M1_TX_FIFO_1K 0x00010000 +#define EMAC_M1_TX_TR 0x00008000 +#define EMAC_M1_TX_MWSW 0x00001000 /* 0 wait for status */ +#define EMAC_M1_JUMBO_ENABLE 0x00000800 /* Upt to 9Kr status */ +#define EMAC_M1_OPB_CLK_66 0x00000008 /* 66Mhz */ +#define EMAC_M1_OPB_CLK_83 0x00000010 /* 83Mhz */ +#define EMAC_M1_OPB_CLK_100 0x00000018 /* 100Mhz */ +#define EMAC_M1_OPB_CLK_100P 0x00000020 /* 100Mhz+ */ +#else /* CONFIG_IBM_EMAC4 */ +#define EMAC_M1_RFS_4K 0x00300000 /* ~4k for 512 byte */ +#define EMAC_M1_RFS_2K 0x00200000 +#define EMAC_M1_RFS_1K 0x00100000 +#define EMAC_M1_TX_FIFO_2K 0x00080000 /* 0's for 512 byte */ +#define EMAC_M1_TX_FIFO_1K 0x00040000 +#define EMAC_M1_TR0_DEPEND 0x00010000 /* 0'x for single packet */ +#define EMAC_M1_TR1_DEPEND 0x00004000 +#define EMAC_M1_TR1_MULTI 0x00002000 +#define EMAC_M1_JUMBO_ENABLE 0x00001000 +#endif /* CONFIG_IBM_EMAC4 */ +#define EMAC_M1_BASE (EMAC_M1_TX_FIFO_2K | \ + EMAC_M1_APP | \ + EMAC_M1_TR) + +/* Transmit Mode Register 0 */ +#define EMAC_TMR0_GNP0 0x80000000 +#define EMAC_TMR0_GNP1 0x40000000 +#define EMAC_TMR0_GNPD 0x20000000 +#define EMAC_TMR0_FC 0x10000000 +#define EMAC_TMR0_TFAE_2_32 0x00000001 +#define EMAC_TMR0_TFAE_4_64 0x00000002 +#define EMAC_TMR0_TFAE_8_128 0x00000003 +#define EMAC_TMR0_TFAE_16_256 0x00000004 +#define EMAC_TMR0_TFAE_32_512 0x00000005 +#define EMAC_TMR0_TFAE_64_1024 0x00000006 +#define EMAC_TMR0_TFAE_128_2048 0x00000007 + +/* Receive Mode Register */ +#define EMAC_RMR_SP 0x80000000 +#define EMAC_RMR_SFCS 0x40000000 +#define EMAC_RMR_ARRP 0x20000000 +#define EMAC_RMR_ARP 0x10000000 +#define EMAC_RMR_AROP 0x08000000 +#define EMAC_RMR_ARPI 0x04000000 +#define EMAC_RMR_PPP 0x02000000 +#define EMAC_RMR_PME 0x01000000 +#define EMAC_RMR_PMME 0x00800000 +#define EMAC_RMR_IAE 0x00400000 +#define EMAC_RMR_MIAE 0x00200000 +#define EMAC_RMR_BAE 0x00100000 +#define EMAC_RMR_MAE 0x00080000 +#define EMAC_RMR_RFAF_2_32 0x00000001 +#define EMAC_RMR_RFAF_4_64 0x00000002 +#define EMAC_RMR_RFAF_8_128 0x00000003 +#define EMAC_RMR_RFAF_16_256 0x00000004 +#define EMAC_RMR_RFAF_32_512 0x00000005 +#define EMAC_RMR_RFAF_64_1024 0x00000006 +#define EMAC_RMR_RFAF_128_2048 0x00000007 +#define EMAC_RMR_BASE (EMAC_RMR_IAE | EMAC_RMR_BAE) + +/* Interrupt Status & enable Regs */ +#define EMAC_ISR_OVR 0x02000000 +#define EMAC_ISR_PP 0x01000000 +#define EMAC_ISR_BP 0x00800000 +#define EMAC_ISR_RP 0x00400000 +#define EMAC_ISR_SE 0x00200000 +#define EMAC_ISR_ALE 0x00100000 +#define EMAC_ISR_BFCS 0x00080000 +#define EMAC_ISR_PTLE 0x00040000 +#define EMAC_ISR_ORE 0x00020000 +#define EMAC_ISR_IRE 0x00010000 +#define EMAC_ISR_DBDM 0x00000200 +#define EMAC_ISR_DB0 0x00000100 +#define EMAC_ISR_SE0 0x00000080 +#define EMAC_ISR_TE0 0x00000040 +#define EMAC_ISR_DB1 0x00000020 +#define EMAC_ISR_SE1 0x00000010 +#define EMAC_ISR_TE1 0x00000008 +#define EMAC_ISR_MOS 0x00000002 +#define EMAC_ISR_MOF 0x00000001 + +/* STA CONTROL REG */ +#define EMAC_STACR_OC 0x00008000 +#define EMAC_STACR_PHYE 0x00004000 +#define EMAC_STACR_WRITE 0x00002000 +#define EMAC_STACR_READ 0x00001000 +#define EMAC_STACR_CLK_83MHZ 0x00000800 /* 0's for 50Mhz */ +#define EMAC_STACR_CLK_66MHZ 0x00000400 +#define EMAC_STACR_CLK_100MHZ 0x00000C00 + +/* Transmit Request Threshold Register */ +#define EMAC_TRTR_1600 0x18000000 /* 0's for 64 Bytes */ +#define EMAC_TRTR_1024 0x0f000000 +#define EMAC_TRTR_512 0x07000000 +#define EMAC_TRTR_256 0x03000000 +#define EMAC_TRTR_192 0x10000000 +#define EMAC_TRTR_128 0x01000000 + +#define EMAC_TX_CTRL_GFCS 0x0200 +#define EMAC_TX_CTRL_GP 0x0100 +#define EMAC_TX_CTRL_ISA 0x0080 +#define EMAC_TX_CTRL_RSA 0x0040 +#define EMAC_TX_CTRL_IVT 0x0020 +#define EMAC_TX_CTRL_RVT 0x0010 +#define EMAC_TX_CTRL_TAH_CSUM 0x000e /* TAH only */ +#define EMAC_TX_CTRL_TAH_SEG4 0x000a /* TAH only */ +#define EMAC_TX_CTRL_TAH_SEG3 0x0008 /* TAH only */ +#define EMAC_TX_CTRL_TAH_SEG2 0x0006 /* TAH only */ +#define EMAC_TX_CTRL_TAH_SEG1 0x0004 /* TAH only */ +#define EMAC_TX_CTRL_TAH_SEG0 0x0002 /* TAH only */ +#define EMAC_TX_CTRL_TAH_DIS 0x0000 /* TAH only */ + +#define EMAC_TX_CTRL_DFLT ( \ + MAL_TX_CTRL_INTR | EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP ) + +/* madmal transmit status / Control bits */ +#define EMAC_TX_ST_BFCS 0x0200 +#define EMAC_TX_ST_BPP 0x0100 +#define EMAC_TX_ST_LCS 0x0080 +#define EMAC_TX_ST_ED 0x0040 +#define EMAC_TX_ST_EC 0x0020 +#define EMAC_TX_ST_LC 0x0010 +#define EMAC_TX_ST_MC 0x0008 +#define EMAC_TX_ST_SC 0x0004 +#define EMAC_TX_ST_UR 0x0002 +#define EMAC_TX_ST_SQE 0x0001 + +/* madmal receive status / Control bits */ +#define EMAC_RX_ST_OE 0x0200 +#define EMAC_RX_ST_PP 0x0100 +#define EMAC_RX_ST_BP 0x0080 +#define EMAC_RX_ST_RP 0x0040 +#define EMAC_RX_ST_SE 0x0020 +#define EMAC_RX_ST_AE 0x0010 +#define EMAC_RX_ST_BFCS 0x0008 +#define EMAC_RX_ST_PTL 0x0004 +#define EMAC_RX_ST_ORE 0x0002 +#define EMAC_RX_ST_IRE 0x0001 +#define EMAC_BAD_RX_PACKET 0x02ff +#define EMAC_CSUM_VER_ERROR 0x0003 + +/* identify a bad rx packet dependent on emac features */ +#ifdef CONFIG_IBM_EMAC4 +#define EMAC_IS_BAD_RX_PACKET(desc) \ + (((desc & (EMAC_BAD_RX_PACKET & ~EMAC_CSUM_VER_ERROR)) || \ + ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_ORE) || \ + ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_IRE))) +#else +#define EMAC_IS_BAD_RX_PACKET(desc) \ + (desc & EMAC_BAD_RX_PACKET) +#endif + +/* Revision specific EMAC register defaults */ +#ifdef CONFIG_IBM_EMAC4 +#define EMAC_M1_DEFAULT (EMAC_M1_BASE | \ + EMAC_M1_OPB_CLK_83 | \ + EMAC_M1_TX_MWSW) +#define EMAC_RMR_DEFAULT (EMAC_RMR_BASE | \ + EMAC_RMR_RFAF_128_2048) +#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP0 | \ + EMAC_TMR0_TFAE_128_2048) +#define EMAC_TRTR_DEFAULT EMAC_TRTR_1024 +#else /* !CONFIG_IBM_EMAC4 */ +#define EMAC_M1_DEFAULT EMAC_M1_BASE +#define EMAC_RMR_DEFAULT EMAC_RMR_BASE +#define EMAC_TMR0_XMIT EMAC_TMR0_GNP0 +#define EMAC_TRTR_DEFAULT EMAC_TRTR_1600 +#endif /* CONFIG_IBM_EMAC4 */ + +/* SoC implementation specific EMAC register defaults */ +#if defined(CONFIG_440GP) +#define EMAC_RWMR_DEFAULT 0x80009000 +#define EMAC_TMR0_DEFAULT 0x00000000 +#define EMAC_TMR1_DEFAULT 0xf8640000 +#elif defined(CONFIG_440GX) +#define EMAC_RWMR_DEFAULT 0x1000a200 +#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_128_2048 +#define EMAC_TMR1_DEFAULT 0x88810000 +#else +#define EMAC_RWMR_DEFAULT 0x0f002000 +#define EMAC_TMR0_DEFAULT 0x00000000 +#define EMAC_TMR1_DEFAULT 0x380f0000 +#endif /* CONFIG_440GP */ + +#endif diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h new file mode 100644 index 000000000..691ce4e5c --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_core.h @@ -0,0 +1,146 @@ +/* + * ibm_emac_core.h + * + * Ethernet driver for the built in ethernet on the IBM 405 PowerPC + * processor. + * + * Armin Kuster akuster@mvista.com + * Sept, 2001 + * + * Orignial driver + * Johnnie Peters + * jpeters@mvista.com + * + * Copyright 2000 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _IBM_EMAC_CORE_H_ +#define _IBM_EMAC_CORE_H_ + +#include +#include +#include /* For phys_addr_t */ + +#include "ibm_emac.h" +#include "ibm_emac_phy.h" +#include "ibm_emac_rgmii.h" +#include "ibm_emac_zmii.h" +#include "ibm_emac_mal.h" +#include "ibm_emac_tah.h" + +#ifndef CONFIG_IBM_EMAC_TXB +#define NUM_TX_BUFF 64 +#define NUM_RX_BUFF 64 +#else +#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB +#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB +#endif + +/* This does 16 byte alignment, exactly what we need. + * The packet length includes FCS, but we don't want to + * include that when passing upstream as it messes up + * bridging applications. + */ +#ifndef CONFIG_IBM_EMAC_SKBRES +#define SKB_RES 2 +#else +#define SKB_RES CONFIG_IBM_EMAC_SKBRES +#endif + +/* Note about alignement. alloc_skb() returns a cache line + * aligned buffer. However, dev_alloc_skb() will add 16 more + * bytes and "reserve" them, so our buffer will actually end + * on a half cache line. What we do is to use directly + * alloc_skb, allocate 16 more bytes to match the total amount + * allocated by dev_alloc_skb(), but we don't reserve. + */ +#define MAX_NUM_BUF_DESC 255 +#define DESC_BUF_SIZE 4080 /* max 4096-16 */ +#define DESC_BUF_SIZE_REG (DESC_BUF_SIZE / 16) + +/* Transmitter timeout. */ +#define TX_TIMEOUT (2*HZ) + +/* MDIO latency delay */ +#define MDIO_DELAY 50 + +/* Power managment shift registers */ +#define IBM_CPM_EMMII 0 /* Shift value for MII */ +#define IBM_CPM_EMRX 1 /* Shift value for recv */ +#define IBM_CPM_EMTX 2 /* Shift value for MAC */ +#define IBM_CPM_EMAC(x) (((x)>>IBM_CPM_EMMII) | ((x)>>IBM_CPM_EMRX) | ((x)>>IBM_CPM_EMTX)) + +#define ENET_HEADER_SIZE 14 +#define ENET_FCS_SIZE 4 +#define ENET_DEF_MTU_SIZE 1500 +#define ENET_DEF_BUF_SIZE (ENET_DEF_MTU_SIZE + ENET_HEADER_SIZE + ENET_FCS_SIZE) +#define EMAC_MIN_FRAME 64 +#define EMAC_MAX_FRAME 9018 +#define EMAC_MIN_MTU (EMAC_MIN_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE) +#define EMAC_MAX_MTU (EMAC_MAX_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE) + +#ifdef CONFIG_IBM_EMAC_ERRMSG +void emac_serr_dump_0(struct net_device *dev); +void emac_serr_dump_1(struct net_device *dev); +void emac_err_dump(struct net_device *dev, int em0isr); +void emac_phy_dump(struct net_device *); +void emac_desc_dump(struct net_device *); +void emac_mac_dump(struct net_device *); +void emac_mal_dump(struct net_device *); +#else +#define emac_serr_dump_0(dev) do { } while (0) +#define emac_serr_dump_1(dev) do { } while (0) +#define emac_err_dump(dev,x) do { } while (0) +#define emac_phy_dump(dev) do { } while (0) +#define emac_desc_dump(dev) do { } while (0) +#define emac_mac_dump(dev) do { } while (0) +#define emac_mal_dump(dev) do { } while (0) +#endif + +struct ocp_enet_private { + struct sk_buff *tx_skb[NUM_TX_BUFF]; + struct sk_buff *rx_skb[NUM_RX_BUFF]; + struct mal_descriptor *tx_desc; + struct mal_descriptor *rx_desc; + struct mal_descriptor *rx_dirty; + struct net_device_stats stats; + int tx_cnt; + int rx_slot; + int dirty_rx; + int tx_slot; + int ack_slot; + int rx_buffer_size; + + struct mii_phy phy_mii; + int mii_phy_addr; + int want_autoneg; + int timer_ticks; + struct timer_list link_timer; + struct net_device *mdio_dev; + + struct ocp_device *rgmii_dev; + int rgmii_input; + + struct ocp_device *zmii_dev; + int zmii_input; + + struct ibm_ocp_mal *mal; + int mal_tx_chan, mal_rx_chan; + struct mal_commac commac; + + struct ocp_device *tah_dev; + + int opened; + int going_away; + int wol_irq; + emac_t *emacp; + struct ocp_device *ocpdev; + struct net_device *ndev; + spinlock_t lock; +}; +#endif /* _IBM_EMAC_CORE_H_ */ diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c new file mode 100644 index 000000000..c8512046c --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_debug.c @@ -0,0 +1,224 @@ +/* + * ibm_ocp_debug.c + * + * This has all the debug routines that where in *_enet.c + * + * Armin Kuster akuster@mvista.com + * April , 2002 + * + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include "ibm_ocp_mal.h" +#include "ibm_ocp_zmii.h" +#include "ibm_ocp_enet.h" + +extern int emac_phy_read(struct net_device *dev, int mii_id, int reg); + +void emac_phy_dump(struct net_device *dev) +{ + struct ocp_enet_private *fep = dev->priv; + unsigned long i; + uint data; + + printk(KERN_DEBUG " Prepare for Phy dump....\n"); + for (i = 0; i < 0x1A; i++) { + data = emac_phy_read(dev, fep->mii_phy_addr, i); + printk(KERN_DEBUG "Phy reg 0x%lx ==> %4x\n", i, data); + if (i == 0x07) + i = 0x0f; + } +} + +void emac_desc_dump(struct net_device *dev) +{ + struct ocp_enet_private *fep = dev->priv; + int curr_slot; + + printk(KERN_DEBUG + "dumping the receive descriptors: current slot is %d\n", + fep->rx_slot); + for (curr_slot = 0; curr_slot < NUM_RX_BUFF; curr_slot++) { + printk(KERN_DEBUG + "Desc %02d: status 0x%04x, length %3d, addr 0x%x\n", + curr_slot, fep->rx_desc[curr_slot].ctrl, + fep->rx_desc[curr_slot].data_len, + (unsigned int)fep->rx_desc[curr_slot].data_ptr); + } +} + +void emac_mac_dump(struct net_device *dev) +{ + struct ocp_enet_private *fep = dev->priv; + volatile emac_t *emacp = fep->emacp; + + printk(KERN_DEBUG "EMAC DEBUG ********** \n"); + printk(KERN_DEBUG "EMAC_M0 ==> 0x%x\n", in_be32(&emacp->em0mr0)); + printk(KERN_DEBUG "EMAC_M1 ==> 0x%x\n", in_be32(&emacp->em0mr1)); + printk(KERN_DEBUG "EMAC_TXM0==> 0x%x\n", in_be32(&emacp->em0tmr0)); + printk(KERN_DEBUG "EMAC_TXM1==> 0x%x\n", in_be32(&emacp->em0tmr1)); + printk(KERN_DEBUG "EMAC_RXM ==> 0x%x\n", in_be32(&emacp->em0rmr)); + printk(KERN_DEBUG "EMAC_ISR ==> 0x%x\n", in_be32(&emacp->em0isr)); + printk(KERN_DEBUG "EMAC_IER ==> 0x%x\n", in_be32(&emacp->em0iser)); + printk(KERN_DEBUG "EMAC_IAH ==> 0x%x\n", in_be32(&emacp->em0iahr)); + printk(KERN_DEBUG "EMAC_IAL ==> 0x%x\n", in_be32(&emacp->em0ialr)); + printk(KERN_DEBUG "EMAC_VLAN_TPID_REG ==> 0x%x\n", + in_be32(&emacp->em0vtpid)); +} + +void emac_mal_dump(struct net_device *dev) +{ + struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; + + printk(KERN_DEBUG " MAL DEBUG ********** \n"); + printk(KERN_DEBUG " MCR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALCR)); + printk(KERN_DEBUG " ESR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALESR)); + printk(KERN_DEBUG " IER ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALIER)); +#ifdef CONFIG_40x + printk(KERN_DEBUG " DBR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALDBR)); +#endif /* CONFIG_40x */ + printk(KERN_DEBUG " TXCASR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCASR)); + printk(KERN_DEBUG " TXCARR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCARR)); + printk(KERN_DEBUG " TXEOBISR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXEOBISR)); + printk(KERN_DEBUG " TXDEIR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXDEIR)); + printk(KERN_DEBUG " RXCASR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCASR)); + printk(KERN_DEBUG " RXCARR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCARR)); + printk(KERN_DEBUG " RXEOBISR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXEOBISR)); + printk(KERN_DEBUG " RXDEIR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXDEIR)); + printk(KERN_DEBUG " TXCTP0R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP0R)); + printk(KERN_DEBUG " TXCTP1R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP1R)); + printk(KERN_DEBUG " TXCTP2R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP2R)); + printk(KERN_DEBUG " TXCTP3R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP3R)); + printk(KERN_DEBUG " RXCTP0R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP0R)); + printk(KERN_DEBUG " RXCTP1R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP1R)); + printk(KERN_DEBUG " RCBS0 ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS0)); + printk(KERN_DEBUG " RCBS1 ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS1)); +} + +void emac_serr_dump_0(struct net_device *dev) +{ + struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; + unsigned long int mal_error, plb_error, plb_addr; + + mal_error = get_mal_dcrn(mal, DCRN_MALESR); + printk(KERN_DEBUG "ppc405_eth_serr: %s channel %ld \n", + (mal_error & 0x40000000) ? "Receive" : + "Transmit", (mal_error & 0x3e000000) >> 25); + printk(KERN_DEBUG " ----- latched error -----\n"); + if (mal_error & MALESR_DE) + printk(KERN_DEBUG " DE: descriptor error\n"); + if (mal_error & MALESR_OEN) + printk(KERN_DEBUG " ONE: OPB non-fullword error\n"); + if (mal_error & MALESR_OTE) + printk(KERN_DEBUG " OTE: OPB timeout error\n"); + if (mal_error & MALESR_OSE) + printk(KERN_DEBUG " OSE: OPB slave error\n"); + + if (mal_error & MALESR_PEIN) { + plb_error = mfdcr(DCRN_PLB0_BESR); + printk(KERN_DEBUG + " PEIN: PLB error, PLB0_BESR is 0x%x\n", + (unsigned int)plb_error); + plb_addr = mfdcr(DCRN_PLB0_BEAR); + printk(KERN_DEBUG + " PEIN: PLB error, PLB0_BEAR is 0x%x\n", + (unsigned int)plb_addr); + } +} + +void emac_serr_dump_1(struct net_device *dev) +{ + struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; + int mal_error = get_mal_dcrn(mal, DCRN_MALESR); + + printk(KERN_DEBUG " ----- cumulative errors -----\n"); + if (mal_error & MALESR_DEI) + printk(KERN_DEBUG " DEI: descriptor error interrupt\n"); + if (mal_error & MALESR_ONEI) + printk(KERN_DEBUG " OPB non-fullword error interrupt\n"); + if (mal_error & MALESR_OTEI) + printk(KERN_DEBUG " OTEI: timeout error interrupt\n"); + if (mal_error & MALESR_OSEI) + printk(KERN_DEBUG " OSEI: slave error interrupt\n"); + if (mal_error & MALESR_PBEI) + printk(KERN_DEBUG " PBEI: PLB bus error interrupt\n"); +} + +void emac_err_dump(struct net_device *dev, int em0isr) +{ + printk(KERN_DEBUG "%s: on-chip ethernet error:\n", dev->name); + + if (em0isr & EMAC_ISR_OVR) + printk(KERN_DEBUG " OVR: overrun\n"); + if (em0isr & EMAC_ISR_PP) + printk(KERN_DEBUG " PP: control pause packet\n"); + if (em0isr & EMAC_ISR_BP) + printk(KERN_DEBUG " BP: packet error\n"); + if (em0isr & EMAC_ISR_RP) + printk(KERN_DEBUG " RP: runt packet\n"); + if (em0isr & EMAC_ISR_SE) + printk(KERN_DEBUG " SE: short event\n"); + if (em0isr & EMAC_ISR_ALE) + printk(KERN_DEBUG " ALE: odd number of nibbles in packet\n"); + if (em0isr & EMAC_ISR_BFCS) + printk(KERN_DEBUG " BFCS: bad FCS\n"); + if (em0isr & EMAC_ISR_PTLE) + printk(KERN_DEBUG " PTLE: oversized packet\n"); + if (em0isr & EMAC_ISR_ORE) + printk(KERN_DEBUG + " ORE: packet length field > max allowed LLC\n"); + if (em0isr & EMAC_ISR_IRE) + printk(KERN_DEBUG " IRE: In Range error\n"); + if (em0isr & EMAC_ISR_DBDM) + printk(KERN_DEBUG " DBDM: xmit error or SQE\n"); + if (em0isr & EMAC_ISR_DB0) + printk(KERN_DEBUG " DB0: xmit error or SQE on TX channel 0\n"); + if (em0isr & EMAC_ISR_SE0) + printk(KERN_DEBUG + " SE0: Signal Quality Error test failure from TX channel 0\n"); + if (em0isr & EMAC_ISR_TE0) + printk(KERN_DEBUG " TE0: xmit channel 0 aborted\n"); + if (em0isr & EMAC_ISR_DB1) + printk(KERN_DEBUG " DB1: xmit error or SQE on TX channel \n"); + if (em0isr & EMAC_ISR_SE1) + printk(KERN_DEBUG + " SE1: Signal Quality Error test failure from TX channel 1\n"); + if (em0isr & EMAC_ISR_TE1) + printk(KERN_DEBUG " TE1: xmit channel 1 aborted\n"); + if (em0isr & EMAC_ISR_MOS) + printk(KERN_DEBUG " MOS\n"); + if (em0isr & EMAC_ISR_MOF) + printk(KERN_DEBUG " MOF\n"); + + emac_mac_dump(dev); + emac_mal_dump(dev); +} diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c new file mode 100644 index 000000000..02d847cfa --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_mal.c @@ -0,0 +1,467 @@ +/* + * ibm_ocp_mal.c + * + * Armin Kuster akuster@mvista.com + * Juen, 2002 + * + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ibm_emac_mal.h" + +// Locking: Should we share a lock with the client ? The client could provide +// a lock pointer (optionally) in the commac structure... I don't think this is +// really necessary though + +/* This lock protects the commac list. On today UP implementations, it's + * really only used as IRQ protection in mal_{register,unregister}_commac() + */ +static rwlock_t mal_list_lock = RW_LOCK_UNLOCKED; + +int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) +{ + unsigned long flags; + + write_lock_irqsave(&mal_list_lock, flags); + + /* Don't let multiple commacs claim the same channel */ + if ((mal->tx_chan_mask & commac->tx_chan_mask) || + (mal->rx_chan_mask & commac->rx_chan_mask)) { + write_unlock_irqrestore(&mal_list_lock, flags); + return -EBUSY; + } + + mal->tx_chan_mask |= commac->tx_chan_mask; + mal->rx_chan_mask |= commac->rx_chan_mask; + + list_add(&commac->list, &mal->commac); + + write_unlock_irqrestore(&mal_list_lock, flags); + + MOD_INC_USE_COUNT; + + return 0; +} + +int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) +{ + unsigned long flags; + + write_lock_irqsave(&mal_list_lock, flags); + + mal->tx_chan_mask &= ~commac->tx_chan_mask; + mal->rx_chan_mask &= ~commac->rx_chan_mask; + + list_del_init(&commac->list); + + write_unlock_irqrestore(&mal_list_lock, flags); + + MOD_DEC_USE_COUNT; + + return 0; +} + +int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size) +{ + switch (channel) { + case 0: + set_mal_dcrn(mal, DCRN_MALRCBS0, size); + break; +#ifdef DCRN_MALRCBS1 + case 1: + set_mal_dcrn(mal, DCRN_MALRCBS1, size); + break; +#endif +#ifdef DCRN_MALRCBS2 + case 2: + set_mal_dcrn(mal, DCRN_MALRCBS2, size); + break; +#endif +#ifdef DCRN_MALRCBS3 + case 3: + set_mal_dcrn(mal, DCRN_MALRCBS3, size); + break; +#endif + default: + return -EINVAL; + } + + return 0; +} + +static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ibm_ocp_mal *mal = dev_instance; + unsigned long mal_error; + + /* + * This SERR applies to one of the devices on the MAL, here we charge + * it against the first EMAC registered for the MAL. + */ + + mal_error = get_mal_dcrn(mal, DCRN_MALESR); + + printk(KERN_ERR "%s: System Error (MALESR=%lx)\n", + "MAL" /* FIXME: get the name right */ , mal_error); + + /* FIXME: decipher error */ + /* DIXME: distribute to commacs, if possible */ + + /* Clear the error status register */ + set_mal_dcrn(mal, DCRN_MALESR, mal_error); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ibm_ocp_mal *mal = dev_instance; + struct list_head *l; + unsigned long isr; + + isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR); + set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr); + + read_lock(&mal_list_lock); + list_for_each(l, &mal->commac) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + + if (isr & mc->tx_chan_mask) { + mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask); + } + } + read_unlock(&mal_list_lock); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ibm_ocp_mal *mal = dev_instance; + struct list_head *l; + unsigned long isr; + + isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR); + set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr); + + read_lock(&mal_list_lock); + list_for_each(l, &mal->commac) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + + if (isr & mc->rx_chan_mask) { + mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask); + } + } + read_unlock(&mal_list_lock); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ibm_ocp_mal *mal = dev_instance; + struct list_head *l; + unsigned long deir; + + deir = get_mal_dcrn(mal, DCRN_MALTXDEIR); + + /* FIXME: print which MAL correctly */ + printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n", + "MAL", deir); + + read_lock(&mal_list_lock); + list_for_each(l, &mal->commac) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + + if (deir & mc->tx_chan_mask) { + mc->ops->txde(mc->dev, deir & mc->tx_chan_mask); + } + } + read_unlock(&mal_list_lock); + + return IRQ_HANDLED; +} + +/* + * This interrupt should be very rare at best. This occurs when + * the hardware has a problem with the receive descriptors. The manual + * states that it occurs when the hardware cannot the receive descriptor + * empty bit is not set. The recovery mechanism will be to + * traverse through the descriptors, handle any that are marked to be + * handled and reinitialize each along the way. At that point the driver + * will be restarted. + */ +static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ibm_ocp_mal *mal = dev_instance; + struct list_head *l; + unsigned long deir; + + deir = get_mal_dcrn(mal, DCRN_MALRXDEIR); + + /* + * This really is needed. This case encountered in stress testing. + */ + if (deir == 0) + return IRQ_HANDLED; + + /* FIXME: print which MAL correctly */ + printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n", + "MAL", deir); + + read_lock(&mal_list_lock); + list_for_each(l, &mal->commac) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + + if (deir & mc->rx_chan_mask) { + mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask); + } + } + read_unlock(&mal_list_lock); + + return IRQ_HANDLED; +} + +static int __init mal_probe(struct ocp_device *ocpdev) +{ + struct ibm_ocp_mal *mal = NULL; + struct ocp_func_mal_data *maldata; + int err = 0; + + maldata = (struct ocp_func_mal_data *)ocpdev->def->additions; + if (maldata == NULL) { + printk(KERN_ERR "mal%d: Missing additional datas !\n", + ocpdev->def->index); + return -ENODEV; + } + + mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL); + if (mal == NULL) { + printk(KERN_ERR + "mal%d: Out of memory allocating MAL structure !\n", + ocpdev->def->index); + return -ENOMEM; + } + memset(mal, 0, sizeof(*mal)); + + switch (ocpdev->def->index) { + case 0: + mal->dcrbase = DCRN_MAL_BASE; + break; +#ifdef DCRN_MAL1_BASE + case 1: + mal->dcrbase = DCRN_MAL1_BASE; + break; +#endif + default: + BUG(); + } + + /**************************/ + + INIT_LIST_HEAD(&mal->commac); + + set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF); + set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF); + + set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR); /* 384 */ + /* FIXME: Add delay */ + + /* Set the MAL configuration register */ + set_mal_dcrn(mal, DCRN_MALCR, + MALCR_PLBB | MALCR_OPBBL | MALCR_LEA | + MALCR_PLBLT_DEFAULT); + + /* It would be nice to allocate buffers separately for each + * channel, but we can't because the channels share the upper + * 13 bits of address lines. Each channels buffer must also + * be 4k aligned, so we allocate 4k for each channel. This is + * inefficient FIXME: do better, if possible */ + mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev, + MAL_DT_ALIGN * + maldata->num_tx_chans, + &mal->tx_phys_addr, GFP_KERNEL); + if (mal->tx_virt_addr == NULL) { + printk(KERN_ERR + "mal%d: Out of memory allocating MAL descriptors !\n", + ocpdev->def->index); + err = -ENOMEM; + goto fail; + } + + /* God, oh, god, I hate DCRs */ + set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr); +#ifdef DCRN_MALTXCTP1R + if (maldata->num_tx_chans > 1) + set_mal_dcrn(mal, DCRN_MALTXCTP1R, + mal->tx_phys_addr + MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP1R */ +#ifdef DCRN_MALTXCTP2R + if (maldata->num_tx_chans > 2) + set_mal_dcrn(mal, DCRN_MALTXCTP2R, + mal->tx_phys_addr + 2 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP2R */ +#ifdef DCRN_MALTXCTP3R + if (maldata->num_tx_chans > 3) + set_mal_dcrn(mal, DCRN_MALTXCTP3R, + mal->tx_phys_addr + 3 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP3R */ +#ifdef DCRN_MALTXCTP4R + if (maldata->num_tx_chans > 4) + set_mal_dcrn(mal, DCRN_MALTXCTP4R, + mal->tx_phys_addr + 4 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP4R */ +#ifdef DCRN_MALTXCTP5R + if (maldata->num_tx_chans > 5) + set_mal_dcrn(mal, DCRN_MALTXCTP5R, + mal->tx_phys_addr + 5 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP5R */ +#ifdef DCRN_MALTXCTP6R + if (maldata->num_tx_chans > 6) + set_mal_dcrn(mal, DCRN_MALTXCTP6R, + mal->tx_phys_addr + 6 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP6R */ +#ifdef DCRN_MALTXCTP7R + if (maldata->num_tx_chans > 7) + set_mal_dcrn(mal, DCRN_MALTXCTP7R, + mal->tx_phys_addr + 7 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP7R */ + + mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev, + MAL_DT_ALIGN * + maldata->num_rx_chans, + &mal->rx_phys_addr, GFP_KERNEL); + + set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr); +#ifdef DCRN_MALRXCTP1R + if (maldata->num_rx_chans > 1) + set_mal_dcrn(mal, DCRN_MALRXCTP1R, + mal->rx_phys_addr + MAL_DT_ALIGN); +#endif /* DCRN_MALRXCTP1R */ +#ifdef DCRN_MALRXCTP2R + if (maldata->num_rx_chans > 2) + set_mal_dcrn(mal, DCRN_MALRXCTP2R, + mal->rx_phys_addr + 2 * MAL_DT_ALIGN); +#endif /* DCRN_MALRXCTP2R */ +#ifdef DCRN_MALRXCTP3R + if (maldata->num_rx_chans > 3) + set_mal_dcrn(mal, DCRN_MALRXCTP3R, + mal->rx_phys_addr + 3 * MAL_DT_ALIGN); +#endif /* DCRN_MALRXCTP3R */ + + err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal); + if (err) + goto fail; + err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal); + if (err) + goto fail; + err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); + if (err) + goto fail; + err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); + if (err) + goto fail; + err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); + if (err) + goto fail; + + set_mal_dcrn(mal, DCRN_MALIER, + MALIER_DE | MALIER_NE | MALIER_TE | + MALIER_OPBE | MALIER_PLBE); + + /* Advertise me to the rest of the world */ + ocp_set_drvdata(ocpdev, mal); + + printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n", + ocpdev->def->index, maldata->num_tx_chans, + maldata->num_rx_chans); + + return 0; + + fail: + /* FIXME: dispose requested IRQs ! */ + if (err && mal) + kfree(mal); + return err; +} + +static void __exit mal_remove(struct ocp_device *ocpdev) +{ + struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev); + struct ocp_func_mal_data *maldata = ocpdev->def->additions; + + BUG_ON(!maldata); + + ocp_set_drvdata(ocpdev, NULL); + + /* FIXME: shut down the MAL, deal with dependency with emac */ + free_irq(maldata->serr_irq, mal); + free_irq(maldata->txde_irq, mal); + free_irq(maldata->txeob_irq, mal); + free_irq(maldata->rxde_irq, mal); + free_irq(maldata->rxeob_irq, mal); + + if (mal->tx_virt_addr) + dma_free_coherent(&ocpdev->dev, + MAL_DT_ALIGN * maldata->num_tx_chans, + mal->tx_virt_addr, mal->tx_phys_addr); + + if (mal->rx_virt_addr) + dma_free_coherent(&ocpdev->dev, + MAL_DT_ALIGN * maldata->num_rx_chans, + mal->rx_virt_addr, mal->rx_phys_addr); + + kfree(mal); +} + +/* Structure for a device driver */ +static struct ocp_device_id mal_ids[] = { + {.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL}, + {.vendor = OCP_VENDOR_INVALID} +}; + +static struct ocp_driver mal_driver = { + .name = "mal", + .id_table = mal_ids, + + .probe = mal_probe, + .remove = mal_remove, +}; + +static int __init init_mals(void) +{ + int rc; + + rc = ocp_register_driver(&mal_driver); + if (rc < 0) { + ocp_unregister_driver(&mal_driver); + return -ENODEV; + } + + return 0; +} + +static void __exit exit_mals(void) +{ + ocp_unregister_driver(&mal_driver); +} + +module_init(init_mals); +module_exit(exit_mals); diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h new file mode 100644 index 000000000..8e456ce5a --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_mal.h @@ -0,0 +1,130 @@ +#ifndef _IBM_EMAC_MAL_H +#define _IBM_EMAC_MAL_H + +#include + +#define MAL_DT_ALIGN (4096) /* Alignment for each channel's descriptor table */ + +#define MAL_CHAN_MASK(chan) (0x80000000 >> (chan)) + +/* MAL Buffer Descriptor structure */ +struct mal_descriptor { + unsigned short ctrl; /* MAL / Commac status control bits */ + short data_len; /* Max length is 4K-1 (12 bits) */ + unsigned char *data_ptr; /* pointer to actual data buffer */ +} __attribute__ ((packed)); + +/* the following defines are for the MadMAL status and control registers. */ +/* MADMAL transmit and receive status/control bits */ +#define MAL_RX_CTRL_EMPTY 0x8000 +#define MAL_RX_CTRL_WRAP 0x4000 +#define MAL_RX_CTRL_CM 0x2000 +#define MAL_RX_CTRL_LAST 0x1000 +#define MAL_RX_CTRL_FIRST 0x0800 +#define MAL_RX_CTRL_INTR 0x0400 + +#define MAL_TX_CTRL_READY 0x8000 +#define MAL_TX_CTRL_WRAP 0x4000 +#define MAL_TX_CTRL_CM 0x2000 +#define MAL_TX_CTRL_LAST 0x1000 +#define MAL_TX_CTRL_INTR 0x0400 + +struct mal_commac_ops { + void (*txeob) (void *dev, u32 chanmask); + void (*txde) (void *dev, u32 chanmask); + void (*rxeob) (void *dev, u32 chanmask); + void (*rxde) (void *dev, u32 chanmask); +}; + +struct mal_commac { + struct mal_commac_ops *ops; + void *dev; + u32 tx_chan_mask, rx_chan_mask; + struct list_head list; +}; + +struct ibm_ocp_mal { + int dcrbase; + + struct list_head commac; + u32 tx_chan_mask, rx_chan_mask; + + dma_addr_t tx_phys_addr; + struct mal_descriptor *tx_virt_addr; + + dma_addr_t rx_phys_addr; + struct mal_descriptor *rx_virt_addr; +}; + +#define GET_MAL_STANZA(base,dcrn) \ + case base: \ + x = mfdcr(dcrn(base)); \ + break; + +#define SET_MAL_STANZA(base,dcrn, val) \ + case base: \ + mtdcr(dcrn(base), (val)); \ + break; + +#define GET_MAL0_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL_BASE,dcrn) +#define SET_MAL0_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL_BASE,dcrn,val) + +#ifdef DCRN_MAL1_BASE +#define GET_MAL1_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL1_BASE,dcrn) +#define SET_MAL1_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL1_BASE,dcrn,val) +#else /* ! DCRN_MAL1_BASE */ +#define GET_MAL1_STANZA(dcrn) +#define SET_MAL1_STANZA(dcrn,val) +#endif + +#define get_mal_dcrn(mal, dcrn) ({ \ + u32 x; \ + switch ((mal)->dcrbase) { \ + GET_MAL0_STANZA(dcrn) \ + GET_MAL1_STANZA(dcrn) \ + default: \ + BUG(); \ + } \ +x; }) + +#define set_mal_dcrn(mal, dcrn, val) do { \ + switch ((mal)->dcrbase) { \ + SET_MAL0_STANZA(dcrn,val) \ + SET_MAL1_STANZA(dcrn,val) \ + default: \ + BUG(); \ + } } while (0) + +static inline void mal_enable_tx_channels(struct ibm_ocp_mal *mal, u32 chanmask) +{ + set_mal_dcrn(mal, DCRN_MALTXCASR, + get_mal_dcrn(mal, DCRN_MALTXCASR) | chanmask); +} + +static inline void mal_disable_tx_channels(struct ibm_ocp_mal *mal, + u32 chanmask) +{ + set_mal_dcrn(mal, DCRN_MALTXCARR, chanmask); +} + +static inline void mal_enable_rx_channels(struct ibm_ocp_mal *mal, u32 chanmask) +{ + set_mal_dcrn(mal, DCRN_MALRXCASR, + get_mal_dcrn(mal, DCRN_MALRXCASR) | chanmask); +} + +static inline void mal_disable_rx_channels(struct ibm_ocp_mal *mal, + u32 chanmask) +{ + set_mal_dcrn(mal, DCRN_MALRXCARR, chanmask); +} + +extern int mal_register_commac(struct ibm_ocp_mal *mal, + struct mal_commac *commac); +extern int mal_unregister_commac(struct ibm_ocp_mal *mal, + struct mal_commac *commac); + +extern int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, + unsigned long size); + +#endif /* _IBM_EMAC_MAL_H */ diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c new file mode 100644 index 000000000..b439087df --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_phy.c @@ -0,0 +1,297 @@ +/* + * ibm_ocp_phy.c + * + * PHY drivers for the ibm ocp ethernet driver. Borrowed + * from sungem_phy.c, though I only kept the generic MII + * driver for now. + * + * This file should be shared with other drivers or eventually + * merged as the "low level" part of miilib + * + * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) + * + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ibm_emac_phy.h" + +static int reset_one_mii_phy(struct mii_phy *phy, int phy_id) +{ + u16 val; + int limit = 10000; + + val = __phy_read(phy, phy_id, MII_BMCR); + val &= ~BMCR_ISOLATE; + val |= BMCR_RESET; + __phy_write(phy, phy_id, MII_BMCR, val); + + udelay(100); + + while (limit--) { + val = __phy_read(phy, phy_id, MII_BMCR); + if ((val & BMCR_RESET) == 0) + break; + udelay(10); + } + if ((val & BMCR_ISOLATE) && limit > 0) + __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); + + return (limit <= 0); +} + +static int cis8201_init(struct mii_phy *phy) +{ + u16 epcr; + + epcr = phy_read(phy, MII_CIS8201_EPCR); + epcr &= ~EPCR_MODE_MASK; + + switch (phy->mode) { + case PHY_MODE_TBI: + epcr |= EPCR_TBI_MODE; + break; + case PHY_MODE_RTBI: + epcr |= EPCR_RTBI_MODE; + break; + case PHY_MODE_GMII: + epcr |= EPCR_GMII_MODE; + break; + case PHY_MODE_RGMII: + default: + epcr |= EPCR_RGMII_MODE; + } + + phy_write(phy, MII_CIS8201_EPCR, epcr); + + return 0; +} + +static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) +{ + u16 ctl, adv; + + phy->autoneg = 1; + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = 0; + phy->advertising = advertise; + + /* Setup standard advertise */ + adv = phy_read(phy, MII_ADVERTISE); + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (advertise & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + phy_write(phy, MII_ADVERTISE, adv); + + /* Start/Restart aneg */ + ctl = phy_read(phy, MII_BMCR); + ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); + phy_write(phy, MII_BMCR, ctl); + + return 0; +} + +static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) +{ + u16 ctl; + + phy->autoneg = 0; + phy->speed = speed; + phy->duplex = fd; + phy->pause = 0; + + ctl = phy_read(phy, MII_BMCR); + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE); + + /* First reset the PHY */ + phy_write(phy, MII_BMCR, ctl | BMCR_RESET); + + /* Select speed & duplex */ + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + ctl |= BMCR_SPEED100; + break; + case SPEED_1000: + default: + return -EINVAL; + } + if (fd == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + phy_write(phy, MII_BMCR, ctl); + + return 0; +} + +static int genmii_poll_link(struct mii_phy *phy) +{ + u16 status; + + (void)phy_read(phy, MII_BMSR); + status = phy_read(phy, MII_BMSR); + if ((status & BMSR_LSTATUS) == 0) + return 0; + if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) + return 0; + return 1; +} + +#define MII_CIS8201_ACSR 0x1c +#define ACSR_DUPLEX_STATUS 0x0020 +#define ACSR_SPEED_1000BASET 0x0010 +#define ACSR_SPEED_100BASET 0x0008 + +static int cis8201_read_link(struct mii_phy *phy) +{ + u16 acsr; + + if (phy->autoneg) { + acsr = phy_read(phy, MII_CIS8201_ACSR); + + if (acsr & ACSR_DUPLEX_STATUS) + phy->duplex = DUPLEX_FULL; + else + phy->duplex = DUPLEX_HALF; + if (acsr & ACSR_SPEED_1000BASET) { + phy->speed = SPEED_1000; + } else if (acsr & ACSR_SPEED_100BASET) + phy->speed = SPEED_100; + else + phy->speed = SPEED_10; + phy->pause = 0; + } + /* On non-aneg, we assume what we put in BMCR is the speed, + * though magic-aneg shouldn't prevent this case from occurring + */ + + return 0; +} + +static int genmii_read_link(struct mii_phy *phy) +{ + u16 lpa; + + if (phy->autoneg) { + lpa = phy_read(phy, MII_LPA); + + if (lpa & (LPA_10FULL | LPA_100FULL)) + phy->duplex = DUPLEX_FULL; + else + phy->duplex = DUPLEX_HALF; + if (lpa & (LPA_100FULL | LPA_100HALF)) + phy->speed = SPEED_100; + else + phy->speed = SPEED_10; + phy->pause = 0; + } + /* On non-aneg, we assume what we put in BMCR is the speed, + * though magic-aneg shouldn't prevent this case from occurring + */ + + return 0; +} + +#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII) +#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \ + SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) + +/* CIS8201 phy ops */ +static struct mii_phy_ops cis8201_phy_ops = { + init:cis8201_init, + setup_aneg:genmii_setup_aneg, + setup_forced:genmii_setup_forced, + poll_link:genmii_poll_link, + read_link:cis8201_read_link +}; + +/* Generic implementation for most 10/100 PHYs */ +static struct mii_phy_ops generic_phy_ops = { + setup_aneg:genmii_setup_aneg, + setup_forced:genmii_setup_forced, + poll_link:genmii_poll_link, + read_link:genmii_read_link +}; + +static struct mii_phy_def cis8201_phy_def = { + phy_id:0x000fc410, + phy_id_mask:0x000ffff0, + name:"CIS8201 Gigabit Ethernet", + features:MII_GBIT_FEATURES, + magic_aneg:0, + ops:&cis8201_phy_ops +}; + +static struct mii_phy_def genmii_phy_def = { + phy_id:0x00000000, + phy_id_mask:0x00000000, + name:"Generic MII", + features:MII_BASIC_FEATURES, + magic_aneg:0, + ops:&generic_phy_ops +}; + +static struct mii_phy_def *mii_phy_table[] = { + &cis8201_phy_def, + &genmii_phy_def, + NULL +}; + +int mii_phy_probe(struct mii_phy *phy, int mii_id) +{ + int rc; + u32 id; + struct mii_phy_def *def; + int i; + + phy->autoneg = 0; + phy->advertising = 0; + phy->mii_id = mii_id; + phy->speed = 0; + phy->duplex = 0; + phy->pause = 0; + + /* Take PHY out of isloate mode and reset it. */ + rc = reset_one_mii_phy(phy, mii_id); + if (rc) + return -ENODEV; + + /* Read ID and find matching entry */ + id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)) + & 0xfffffff0; + for (i = 0; (def = mii_phy_table[i]) != NULL; i++) + if ((id & def->phy_id_mask) == def->phy_id) + break; + /* Should never be NULL (we have a generic entry), but... */ + if (def == NULL) + return -ENODEV; + + phy->def = def; + + /* Setup default advertising */ + phy->advertising = def->features; + + return 0; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h new file mode 100644 index 000000000..49f188f4e --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h @@ -0,0 +1,65 @@ +/* + * Defines for the IBM RGMII bridge + * + * Based on ocp_zmii.h/ibm_emac_zmii.h + * Armin Kuster akuster@mvista.com + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _IBM_EMAC_RGMII_H_ +#define _IBM_EMAC_RGMII_H_ + +#include + +/* RGMII bridge */ +typedef struct rgmii_regs { + u32 fer; /* Function enable register */ + u32 ssr; /* Speed select register */ +} rgmii_t; + +#define RGMII_INPUTS 4 + +/* RGMII device */ +struct ibm_ocp_rgmii { + struct rgmii_regs *base; + int mode[RGMII_INPUTS]; + int users; /* number of EMACs using this RGMII bridge */ +}; + +/* Fuctional Enable Reg */ +#define RGMII_FER_MASK(x) (0x00000007 << (4*x)) +#define RGMII_RTBI 0x00000004 +#define RGMII_RGMII 0x00000005 +#define RGMII_TBI 0x00000006 +#define RGMII_GMII 0x00000007 + +/* Speed Selection reg */ + +#define RGMII_SP2_100 0x00000002 +#define RGMII_SP2_1000 0x00000004 +#define RGMII_SP3_100 0x00000200 +#define RGMII_SP3_1000 0x00000400 + +#define RGMII_MII2_SPDMASK 0x00000007 +#define RGMII_MII3_SPDMASK 0x00000700 + +#define RGMII_MII2_100MB RGMII_SP2_100 & ~RGMII_SP2_1000 +#define RGMII_MII2_1000MB RGMII_SP2_1000 & ~RGMII_SP2_100 +#define RGMII_MII2_10MB ~(RGMII_SP2_100 | RGMII_SP2_1000) +#define RGMII_MII3_100MB RGMII_SP3_100 & ~RGMII_SP3_1000 +#define RGMII_MII3_1000MB RGMII_SP3_1000 & ~RGMII_SP3_100 +#define RGMII_MII3_10MB ~(RGMII_SP3_100 | RGMII_SP3_1000) + +#define RTBI 0 +#define RGMII 1 +#define TBI 2 +#define GMII 3 + +#endif /* _IBM_EMAC_RGMII_H_ */ diff --git a/drivers/net/ibm_emac/ibm_emac_tah.h b/drivers/net/ibm_emac/ibm_emac_tah.h new file mode 100644 index 000000000..ecfc69805 --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_tah.h @@ -0,0 +1,48 @@ +/* + * Defines for the IBM TAH + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _IBM_EMAC_TAH_H +#define _IBM_EMAC_TAH_H + +/* TAH */ +typedef struct tah_regs { + u32 tah_revid; + u32 pad[3]; + u32 tah_mr; + u32 tah_ssr0; + u32 tah_ssr1; + u32 tah_ssr2; + u32 tah_ssr3; + u32 tah_ssr4; + u32 tah_ssr5; + u32 tah_tsr; +} tah_t; + +/* TAH engine */ +#define TAH_MR_CVR 0x80000000 +#define TAH_MR_SR 0x40000000 +#define TAH_MR_ST_256 0x01000000 +#define TAH_MR_ST_512 0x02000000 +#define TAH_MR_ST_768 0x03000000 +#define TAH_MR_ST_1024 0x04000000 +#define TAH_MR_ST_1280 0x05000000 +#define TAH_MR_ST_1536 0x06000000 +#define TAH_MR_TFS_16KB 0x00000000 +#define TAH_MR_TFS_2KB 0x00200000 +#define TAH_MR_TFS_4KB 0x00400000 +#define TAH_MR_TFS_6KB 0x00600000 +#define TAH_MR_TFS_8KB 0x00800000 +#define TAH_MR_TFS_10KB 0x00a00000 +#define TAH_MR_DTFP 0x00100000 +#define TAH_MR_DIG 0x00080000 + +#endif /* _IBM_EMAC_TAH_H */ diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h new file mode 100644 index 000000000..6f6cd2a39 --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_zmii.h @@ -0,0 +1,93 @@ +/* + * ocp_zmii.h + * + * Defines for the IBM ZMII bridge + * + * Armin Kuster akuster@mvista.com + * Dec, 2001 + * + * Copyright 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _IBM_EMAC_ZMII_H_ +#define _IBM_EMAC_ZMII_H_ + +#include + +/* ZMII bridge registers */ +struct zmii_regs { + u32 fer; /* Function enable reg */ + u32 ssr; /* Speed select reg */ + u32 smiirs; /* SMII status reg */ +}; + +#define ZMII_INPUTS 4 + +/* ZMII device */ +struct ibm_ocp_zmii { + struct zmii_regs *base; + int mode[ZMII_INPUTS]; + int users; /* number of EMACs using this ZMII bridge */ +}; + +/* Fuctional Enable Reg */ + +#define ZMII_FER_MASK(x) (0xf0000000 >> (4*x)) + +#define ZMII_MDI0 0x80000000 +#define ZMII_SMII0 0x40000000 +#define ZMII_RMII0 0x20000000 +#define ZMII_MII0 0x10000000 +#define ZMII_MDI1 0x08000000 +#define ZMII_SMII1 0x04000000 +#define ZMII_RMII1 0x02000000 +#define ZMII_MII1 0x01000000 +#define ZMII_MDI2 0x00800000 +#define ZMII_SMII2 0x00400000 +#define ZMII_RMII2 0x00200000 +#define ZMII_MII2 0x00100000 +#define ZMII_MDI3 0x00080000 +#define ZMII_SMII3 0x00040000 +#define ZMII_RMII3 0x00020000 +#define ZMII_MII3 0x00010000 + +/* Speed Selection reg */ + +#define ZMII_SCI0 0x40000000 +#define ZMII_FSS0 0x20000000 +#define ZMII_SP0 0x10000000 +#define ZMII_SCI1 0x04000000 +#define ZMII_FSS1 0x02000000 +#define ZMII_SP1 0x01000000 +#define ZMII_SCI2 0x00400000 +#define ZMII_FSS2 0x00200000 +#define ZMII_SP2 0x00100000 +#define ZMII_SCI3 0x00040000 +#define ZMII_FSS3 0x00020000 +#define ZMII_SP3 0x00010000 + +#define ZMII_MII0_100MB ZMII_SP0 +#define ZMII_MII0_10MB ~ZMII_SP0 +#define ZMII_MII1_100MB ZMII_SP1 +#define ZMII_MII1_10MB ~ZMII_SP1 +#define ZMII_MII2_100MB ZMII_SP2 +#define ZMII_MII2_10MB ~ZMII_SP2 +#define ZMII_MII3_100MB ZMII_SP3 +#define ZMII_MII3_10MB ~ZMII_SP3 + +/* SMII Status reg */ + +#define ZMII_STS0 0xFF000000 /* EMAC0 smii status mask */ +#define ZMII_STS1 0x00FF0000 /* EMAC1 smii status mask */ + +#define SMII 0 +#define RMII 1 +#define MII 2 +#define MDI 3 + +#endif /* _IBM_EMAC_ZMII_H_ */ diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c new file mode 100644 index 000000000..86f34b5ec --- /dev/null +++ b/drivers/net/ne-h8300.c @@ -0,0 +1,666 @@ +/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */ +/* + original ne.c + Written 1992-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + H8/300 modified + Yoshinori Sato +*/ + +static const char version1[] = +"ne-h8300.c:v1.00 2004/04/11 ysato\n"; + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "8390.h" + +/* Some defines that people can play with if so inclined. */ + +/* Do we perform extra sanity checks on stuff ? */ +/* #define NE_SANITY_CHECK */ + +/* Do we implement the read before write bugfix ? */ +/* #define NE_RW_BUGFIX */ + +/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ +/* #define PACKETBUF_MEMSIZE 0x40 */ + +/* A zero-terminated list of I/O addresses to be probed at boot. */ + +/* ---- No user-serviceable parts below ---- */ + +#define NE_BASE (dev->base_addr) +#define NE_CMD 0x00 +#define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */ +#define NE_RESET (ei_status.word16?0x3f:0x1f) /* Issue a read to reset, a write to clear. */ +#define NE_IO_EXTENT (ei_status.word16?0x40:0x20) + +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + +static int ne_probe1(struct net_device *dev, int ioaddr); + +static int ne_open(struct net_device *dev); +static int ne_close(struct net_device *dev); + +static void ne_reset_8390(struct net_device *dev); +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ne_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ne_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); + + +static u32 reg_offset[16]; + +static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr) +{ + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); + int i; + unsigned char bus_width; + + bus_width = *(volatile unsigned char *)ABWCR; + bus_width &= 1 << ((base_addr >> 21) & 7); + + for (i = 0; i < sizeof(reg_offset) / sizeof(u32); i++) + if (bus_width == 0) + reg_offset[i] = i * 2 + 1; + else + reg_offset[i] = i; + + ei_local->reg_offset = reg_offset; + return 0; +} + +static int __initdata h8300_ne_count = 0; +#ifdef CONFIG_H8300H_H8MAX +static unsigned long __initdata h8300_ne_base[] = { 0x800600 }; +static int h8300_ne_irq[] = {EXT_IRQ4}; +#endif +#ifdef CONFIG_H8300H_AKI3068NET +static unsigned long __initdata h8300_ne_base[] = { 0x200000 }; +static int h8300_ne_irq[] = {EXT_IRQ5}; +#endif + +static inline int init_dev(struct net_device *dev) +{ + if (h8300_ne_count < (sizeof(h8300_ne_base) / sizeof(unsigned long))) { + dev->base_addr = h8300_ne_base[h8300_ne_count]; + dev->irq = h8300_ne_irq[h8300_ne_count]; + h8300_ne_count++; + return 0; + } else + return -ENODEV; +} + +/* Probe for various non-shared-memory ethercards. + + NEx000-clone boards have a Station Address PROM (SAPROM) in the packet + buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of + the SAPROM, while other supposed NE2000 clones must be detected by their + SA prefix. + + Reading the SAPROM from a word-wide card with the 8390 set in byte-wide + mode results in doubled values, which can be detected and compensated for. + + The probe is also responsible for initializing the card and filling + in the 'dev' and 'ei_status' structures. + + We use the minimum memory size for some ethercard product lines, iff we can't + distinguish models. You can increase the packet buffer size by setting + PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are: + E1010 starts at 0x100 and ends at 0x2000. + E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory") + E2010 starts at 0x100 and ends at 0x4000. + E2010-x starts at 0x100 and ends at 0xffff. */ + +static int __init do_ne_probe(struct net_device *dev) +{ + unsigned int base_addr = dev->base_addr; + + SET_MODULE_OWNER(dev); + + /* First check any supplied i/o locations. User knows best. */ + if (base_addr > 0x1ff) /* Check a single specified location. */ + return ne_probe1(dev, base_addr); + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + return -ENODEV; +} + +static void cleanup_card(struct net_device *dev) +{ + free_irq(dev->irq, dev); + release_region(dev->base_addr, NE_IO_EXTENT); +} + +struct net_device * __init ne_probe(int unit) +{ + struct net_device *dev = alloc_ei_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + if (init_dev(dev)) + return ERR_PTR(-ENODEV); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = init_reg_offset(dev, dev->base_addr); + if (err) + goto out; + + err = do_ne_probe(dev); + if (err) + goto out; + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: + cleanup_card(dev); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static int __init ne_probe1(struct net_device *dev, int ioaddr) +{ + int i; + unsigned char SA_prom[16]; + int wordlength = 2; + const char *name = NULL; + int start_page, stop_page; + int reg0, ret; + static unsigned version_printed; + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); + unsigned char bus_width; + + if (!request_region(ioaddr, NE_IO_EXTENT, dev->name)) + return -EBUSY; + + reg0 = inb_p(ioaddr); + if (reg0 == 0xFF) { + ret = -ENODEV; + goto err_out; + } + + /* Do a preliminary verification that we have a 8390. */ + { + int regd; + outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); + regd = inb_p(ioaddr + EI_SHIFT(0x0d)); + outb_p(0xff, ioaddr + EI_SHIFT(0x0d)); + outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); + inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ + if (inb_p(ioaddr + EN0_COUNTER0) != 0) { + outb_p(reg0, ioaddr + EI_SHIFT(0)); + outb_p(regd, ioaddr + EI_SHIFT(0x0d)); /* Restore the old values. */ + ret = -ENODEV; + goto err_out; + } + } + + if (ei_debug && version_printed++ == 0) + printk(KERN_INFO "%s", version1); + + printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr); + + /* Read the 16 bytes of station address PROM. + We must first initialize registers, similar to NS8390_init(eifdev, 0). + We can't reliably read the SAPROM address without this. + (I learned the hard way!). */ + { + struct {unsigned char value, offset; } program_seq[] = + { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + + for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); + + } + bus_width = *(volatile unsigned char *)ABWCR; + bus_width &= 1 << ((ioaddr >> 21) & 7); + ei_status.word16 = (bus_width == 0); /* temporary setting */ + for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) { + SA_prom[i] = inb_p(ioaddr + NE_DATAPORT); + inb_p(ioaddr + NE_DATAPORT); /* dummy read */ + } + + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + if (bus_width) + wordlength = 1; + else + outb_p(0x49, ioaddr + EN0_DCFG); + + /* Set up the rest of the parameters. */ + name = (wordlength == 2) ? "NE2000" : "NE1000"; + + if (! dev->irq) { + printk(" failed to detect IRQ line.\n"); + ret = -EAGAIN; + goto err_out; + } + + /* Snarf the interrupt now. There's no point in waiting since we cannot + share and the board will usually be enabled. */ + ret = request_irq(dev->irq, ei_interrupt, 0, name, dev); + if (ret) { + printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); + goto err_out; + } + + dev->base_addr = ioaddr; + + for(i = 0; i < ETHER_ADDR_LEN; i++) { + printk(" %2.2x", SA_prom[i]); + dev->dev_addr[i] = SA_prom[i]; + } + + printk("\n%s: %s found at %#x, using IRQ %d.\n", + dev->name, name, ioaddr, dev->irq); + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = (wordlength == 2); + + ei_status.rx_start_page = start_page + TX_PAGES; +#ifdef PACKETBUF_MEMSIZE + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; +#endif + + ei_status.reset_8390 = &ne_reset_8390; + ei_status.block_input = &ne_block_input; + ei_status.block_output = &ne_block_output; + ei_status.get_8390_hdr = &ne_get_8390_hdr; + ei_status.priv = 0; + dev->open = &ne_open; + dev->stop = &ne_close; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = ei_poll; +#endif + NS8390_init(dev, 0); + return 0; + +err_out: + release_region(ioaddr, NE_IO_EXTENT); + return ret; +} + +static int ne_open(struct net_device *dev) +{ + ei_open(dev); + return 0; +} + +static int ne_close(struct net_device *dev) +{ + if (ei_debug > 1) + printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); + ei_close(dev); + return 0; +} + +/* Hard reset the card. This used to pause for the same period that a + 8390 reset command required, but that shouldn't be necessary. */ + +static void ne_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); + + if (ei_debug > 1) + printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); + + /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ + outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) + if (jiffies - reset_start_time > 2*HZ/100) { + printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); + break; + } + outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); + outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO); + outb_p(0, NE_BASE + EN0_RCNTHI); + outb_p(0, NE_BASE + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, NE_BASE + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); + + if (ei_status.word16) { + int len; + unsigned short *p = (unsigned short *)hdr; + for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--) + *p++ = inw(NE_BASE + NE_DATAPORT); + } else + insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); + + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; + + le16_to_cpus(&hdr->count); +} + +/* Block input and output, similar to the Crynwr packet driver. If you + are porting to a new ethercard, look at the packet driver source for hints. + The NEx000 doesn't share the on-board packet memory -- you have to put + the packet out through the "remote DMA" dataport using outb. */ + +static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); +#ifdef NE_SANITY_CHECK + int xfer_count = count; +#endif + char *buf = skb->data; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_block_input " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); + outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); + outb_p(count >> 8, NE_BASE + EN0_RCNTHI); + outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO); + outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); + if (ei_status.word16) + { + int len; + unsigned short *p = (unsigned short *)buf; + for (len = count>>1; len > 0; len--) + *p++ = inw(NE_BASE + NE_DATAPORT); + if (count & 0x01) + { + buf[count-1] = inb(NE_BASE + NE_DATAPORT); +#ifdef NE_SANITY_CHECK + xfer_count++; +#endif + } + } else { + insb(NE_BASE + NE_DATAPORT, buf, count); + } + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. If you see + this message you either 1) have a slightly incompatible clone + or 2) have noise/speed problems with your bus. */ + + if (ei_debug > 1) + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here + -- it's broken for Rx on some cards! */ + int high = inb_p(NE_BASE + EN0_RSARHI); + int low = inb_p(NE_BASE + EN0_RSARLO); + addr = (high << 8) + low; + if (((ring_offset + xfer_count) & 0xff) == low) + break; + } while (--tries > 0); + if (tries <= 0) + printk(KERN_WARNING "%s: RX transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, ring_offset + xfer_count, addr); + } +#endif + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void ne_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); + unsigned long dma_start; +#ifdef NE_SANITY_CHECK + int retries = 0; +#endif + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + + if (ei_status.word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_block_output." + "[DMAstat:%d][irqlock:%d]\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD); + +#ifdef NE_SANITY_CHECK +retry: +#endif + +#ifdef NE8390_RW_BUGFIX + /* Handle the read-before-write bug the same way as the + Crynwr packet driver -- the NatSemi method doesn't work. + Actually this doesn't always work either, but if you have + problems with your NEx000 this is better than nothing! */ + + outb_p(0x42, NE_BASE + EN0_RCNTLO); + outb_p(0x00, NE_BASE + EN0_RCNTHI); + outb_p(0x42, NE_BASE + EN0_RSARLO); + outb_p(0x00, NE_BASE + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); + /* Make certain that the dummy read has occurred. */ + udelay(6); +#endif + + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); + + /* Now the normal output. */ + outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); + outb_p(count >> 8, NE_BASE + EN0_RCNTHI); + outb_p(0x00, NE_BASE + EN0_RSARLO); + outb_p(start_page, NE_BASE + EN0_RSARHI); + + outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD); + if (ei_status.word16) { + int len; + unsigned short *p = (unsigned short *)buf; + for (len = count>>1; len > 0; len--) + outw(*p++, NE_BASE + NE_DATAPORT); + } else { + outsb(NE_BASE + NE_DATAPORT, buf, count); + } + + dma_start = jiffies; + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. */ + + if (ei_debug > 1) + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + int high = inb_p(NE_BASE + EN0_RSARHI); + int low = inb_p(NE_BASE + EN0_RSARLO); + addr = (high << 8) + low; + if ((start_page << 8) + count == addr) + break; + } while (--tries > 0); + + if (tries <= 0) + { + printk(KERN_WARNING "%s: Tx packet transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, (start_page << 8) + count, addr); + if (retries++ == 0) + goto retry; + } + } +#endif + + while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) + if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ + printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); + ne_reset_8390(dev); + NS8390_init(dev,1); + break; + } + + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; + return; +} + + +#ifdef MODULE +#define MAX_NE_CARDS 1 /* Max number of NE cards per module */ +static struct net_device *dev_ne[MAX_NE_CARDS]; +static int io[MAX_NE_CARDS]; +static int irq[MAX_NE_CARDS]; +static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */ + +MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE_CARDS) "i"); +MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE_CARDS) "i"); +MODULE_PARM(bad, "1-" __MODULE_STRING(MAX_NE_CARDS) "i"); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s)"); +MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that no ISA autoprobe takes place. We can't guarantee +that the ne2k probe is the last 8390 based probe to take place (as it +is at boot) and so the probe will get confused by any other 8390 cards. +ISA device autoprobes on a running machine are not recommended anyway. */ + +int init_module(void) +{ + int this_dev, found = 0; + int err; + + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + struct net_device *dev = alloc_ei_netdev(); + if (!dev) + break; + if (io[this_dev]) { + dev->irq = irq[this_dev]; + dev->mem_end = bad[this_dev]; + dev->base_addr = io[this_dev]; + } else { + dev->base_addr = h8300_ne_base[this_dev]; + dev->irq = h8300_ne_irq[this_dev]; + } + err = init_reg_offset(dev, dev->base_addr); + if (!err) { + if (do_ne_probe(dev) == 0) { + if (register_netdev(dev) == 0) { + dev_ne[found++] = dev; + continue; + } + cleanup_card(dev); + } + } + free_netdev(dev); + if (found) + break; + if (io[this_dev] != 0) + printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr); + else + printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n"); + return -ENXIO; + } + if (found) + return 0; + return -ENODEV; +} + +void cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + struct net_device *dev = dev_ne[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h new file mode 100644 index 000000000..e46cff345 --- /dev/null +++ b/drivers/pcmcia/pxa2xx_base.h @@ -0,0 +1,3 @@ +/* temporary measure */ +extern int pxa2xx_drv_pcmcia_probe(struct device *); + diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c new file mode 100644 index 000000000..e9c5098ed --- /dev/null +++ b/drivers/scsi/ipr.c @@ -0,0 +1,6021 @@ +/* + * ipr.c -- driver for IBM Power Linux RAID adapters + * + * Written By: Brian King, IBM Corporation + * + * Copyright (C) 2003, 2004 IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +/* + * Notes: + * + * This driver is used to control the following SCSI adapters: + * + * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B + * + * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter + * PCI-X Dual Channel Ultra 320 SCSI Adapter + * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card + * Embedded SCSI adapter on p615 and p655 systems + * + * Supported Hardware Features: + * - Ultra 320 SCSI controller + * - PCI-X host interface + * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine + * - Non-Volatile Write Cache + * - Supports attachment of non-RAID disks, tape, and optical devices + * - RAID Levels 0, 5, 10 + * - Hot spare + * - Background Parity Checking + * - Background Data Scrubbing + * - Ability to increase the capacity of an existing RAID 5 disk array + * by adding disks + * + * Driver Features: + * - Tagged command queuing + * - Adapter microcode download + * - PCI hot plug + * - SCSI device hot plug + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipr.h" + +/* + * Global Data + */ +static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head); +static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; +static unsigned int ipr_max_speed = 1; +static int ipr_testmode = 0; +static spinlock_t ipr_driver_lock = SPIN_LOCK_UNLOCKED; + +/* This table describes the differences between DMA controller chips */ +static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { + { /* Gemstone */ + .mailbox = 0x0042C, + .cache_line_size = 0x20, + { + .set_interrupt_mask_reg = 0x0022C, + .clr_interrupt_mask_reg = 0x00230, + .sense_interrupt_mask_reg = 0x0022C, + .clr_interrupt_reg = 0x00228, + .sense_interrupt_reg = 0x00224, + .ioarrin_reg = 0x00404, + .sense_uproc_interrupt_reg = 0x00214, + .set_uproc_interrupt_reg = 0x00214, + .clr_uproc_interrupt_reg = 0x00218 + } + }, + { /* Snipe */ + .mailbox = 0x0052C, + .cache_line_size = 0x20, + { + .set_interrupt_mask_reg = 0x00288, + .clr_interrupt_mask_reg = 0x0028C, + .sense_interrupt_mask_reg = 0x00288, + .clr_interrupt_reg = 0x00284, + .sense_interrupt_reg = 0x00280, + .ioarrin_reg = 0x00504, + .sense_uproc_interrupt_reg = 0x00290, + .set_uproc_interrupt_reg = 0x00290, + .clr_uproc_interrupt_reg = 0x00294 + } + }, +}; + +static int ipr_max_bus_speeds [] = { + IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE +}; + +MODULE_AUTHOR("Brian King "); +MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver"); +module_param_named(max_speed, ipr_max_speed, uint, 0); +MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320"); +module_param_named(log_level, ipr_log_level, uint, 0); +MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); +module_param_named(testmode, ipr_testmode, int, 0); +MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IPR_DRIVER_VERSION); + +static const char *ipr_gpdd_dev_end_states[] = { + "Command complete", + "Terminated by host", + "Terminated by device reset", + "Terminated by bus reset", + "Unknown", + "Command not started" +}; + +static const char *ipr_gpdd_dev_bus_phases[] = { + "Bus free", + "Arbitration", + "Selection", + "Message out", + "Command", + "Message in", + "Data out", + "Data in", + "Status", + "Reselection", + "Unknown" +}; + +/* A constant array of IOASCs/URCs/Error Messages */ +static const +struct ipr_error_table_t ipr_error_table[] = { + {0x00000000, 1, 1, + "8155: An unknown error was received"}, + {0x00330000, 0, 0, + "Soft underlength error"}, + {0x005A0000, 0, 0, + "Command to be cancelled not found"}, + {0x00808000, 0, 0, + "Qualified success"}, + {0x01080000, 1, 1, + "FFFE: Soft device bus error recovered by the IOA"}, + {0x01170600, 0, 1, + "FFF9: Device sector reassign successful"}, + {0x01170900, 0, 1, + "FFF7: Media error recovered by device rewrite procedures"}, + {0x01180200, 0, 1, + "7001: IOA sector reassignment successful"}, + {0x01180500, 0, 1, + "FFF9: Soft media error. Sector reassignment recommended"}, + {0x01180600, 0, 1, + "FFF7: Media error recovered by IOA rewrite procedures"}, + {0x01418000, 0, 1, + "FF3D: Soft PCI bus error recovered by the IOA"}, + {0x01440000, 1, 1, + "FFF6: Device hardware error recovered by the IOA"}, + {0x01448100, 0, 1, + "FFF6: Device hardware error recovered by the device"}, + {0x01448200, 1, 1, + "FF3D: Soft IOA error recovered by the IOA"}, + {0x01448300, 0, 1, + "FFFA: Undefined device response recovered by the IOA"}, + {0x014A0000, 1, 1, + "FFF6: Device bus error, message or command phase"}, + {0x015D0000, 0, 1, + "FFF6: Failure prediction threshold exceeded"}, + {0x015D9200, 0, 1, + "8009: Impending cache battery pack failure"}, + {0x02040400, 0, 0, + "34FF: Disk device format in progress"}, + {0x023F0000, 0, 0, + "Synchronization required"}, + {0x024E0000, 0, 0, + "No ready, IOA shutdown"}, + {0x02670100, 0, 1, + "3020: Storage subsystem configuration error"}, + {0x03110B00, 0, 0, + "FFF5: Medium error, data unreadable, recommend reassign"}, + {0x03110C00, 0, 0, + "7000: Medium error, data unreadable, do not reassign"}, + {0x03310000, 0, 1, + "FFF3: Disk media format bad"}, + {0x04050000, 0, 1, + "3002: Addressed device failed to respond to selection"}, + {0x04080000, 1, 1, + "3100: Device bus error"}, + {0x04080100, 0, 1, + "3109: IOA timed out a device command"}, + {0x04088000, 0, 0, + "3120: SCSI bus is not operational"}, + {0x04118000, 0, 1, + "9000: IOA reserved area data check"}, + {0x04118100, 0, 1, + "9001: IOA reserved area invalid data pattern"}, + {0x04118200, 0, 1, + "9002: IOA reserved area LRC error"}, + {0x04320000, 0, 1, + "102E: Out of alternate sectors for disk storage"}, + {0x04330000, 1, 1, + "FFF4: Data transfer underlength error"}, + {0x04338000, 1, 1, + "FFF4: Data transfer overlength error"}, + {0x043E0100, 0, 1, + "3400: Logical unit failure"}, + {0x04408500, 0, 1, + "FFF4: Device microcode is corrupt"}, + {0x04418000, 1, 1, + "8150: PCI bus error"}, + {0x04430000, 1, 0, + "Unsupported device bus message received"}, + {0x04440000, 1, 1, + "FFF4: Disk device problem"}, + {0x04448200, 1, 1, + "8150: Permanent IOA failure"}, + {0x04448300, 0, 1, + "3010: Disk device returned wrong response to IOA"}, + {0x04448400, 0, 1, + "8151: IOA microcode error"}, + {0x04448500, 0, 0, + "Device bus status error"}, + {0x04448600, 0, 1, + "8157: IOA error requiring IOA reset to recover"}, + {0x04490000, 0, 0, + "Message reject received from the device"}, + {0x04449200, 0, 1, + "8008: A permanent cache battery pack failure occurred"}, + {0x0444A000, 0, 1, + "9090: Disk unit has been modified after the last known status"}, + {0x0444A200, 0, 1, + "9081: IOA detected device error"}, + {0x0444A300, 0, 1, + "9082: IOA detected device error"}, + {0x044A0000, 1, 1, + "3110: Device bus error, message or command phase"}, + {0x04670400, 0, 1, + "9091: Incorrect hardware configuration change has been detected"}, + {0x046E0000, 0, 1, + "FFF4: Command to logical unit failed"}, + {0x05240000, 1, 0, + "Illegal request, invalid request type or request packet"}, + {0x05250000, 0, 0, + "Illegal request, invalid resource handle"}, + {0x05260000, 0, 0, + "Illegal request, invalid field in parameter list"}, + {0x05260100, 0, 0, + "Illegal request, parameter not supported"}, + {0x05260200, 0, 0, + "Illegal request, parameter value invalid"}, + {0x052C0000, 0, 0, + "Illegal request, command sequence error"}, + {0x06040500, 0, 1, + "9031: Array protection temporarily suspended, protection resuming"}, + {0x06040600, 0, 1, + "9040: Array protection temporarily suspended, protection resuming"}, + {0x06290000, 0, 1, + "FFFB: SCSI bus was reset"}, + {0x06290500, 0, 0, + "FFFE: SCSI bus transition to single ended"}, + {0x06290600, 0, 0, + "FFFE: SCSI bus transition to LVD"}, + {0x06298000, 0, 1, + "FFFB: SCSI bus was reset by another initiator"}, + {0x063F0300, 0, 1, + "3029: A device replacement has occurred"}, + {0x064C8000, 0, 1, + "9051: IOA cache data exists for a missing or failed device"}, + {0x06670100, 0, 1, + "9025: Disk unit is not supported at its physical location"}, + {0x06670600, 0, 1, + "3020: IOA detected a SCSI bus configuration error"}, + {0x06678000, 0, 1, + "3150: SCSI bus configuration error"}, + {0x06690200, 0, 1, + "9041: Array protection temporarily suspended"}, + {0x066B0200, 0, 1, + "9030: Array no longer protected due to missing or failed disk unit"}, + {0x07270000, 0, 0, + "Failure due to other device"}, + {0x07278000, 0, 1, + "9008: IOA does not support functions expected by devices"}, + {0x07278100, 0, 1, + "9010: Cache data associated with attached devices cannot be found"}, + {0x07278200, 0, 1, + "9011: Cache data belongs to devices other than those attached"}, + {0x07278400, 0, 1, + "9020: Array missing 2 or more devices with only 1 device present"}, + {0x07278500, 0, 1, + "9021: Array missing 2 or more devices with 2 or more devices present"}, + {0x07278600, 0, 1, + "9022: Exposed array is missing a required device"}, + {0x07278700, 0, 1, + "9023: Array member(s) not at required physical locations"}, + {0x07278800, 0, 1, + "9024: Array not functional due to present hardware configuration"}, + {0x07278900, 0, 1, + "9026: Array not functional due to present hardware configuration"}, + {0x07278A00, 0, 1, + "9027: Array is missing a device and parity is out of sync"}, + {0x07278B00, 0, 1, + "9028: Maximum number of arrays already exist"}, + {0x07278C00, 0, 1, + "9050: Required cache data cannot be located for a disk unit"}, + {0x07278D00, 0, 1, + "9052: Cache data exists for a device that has been modified"}, + {0x07278F00, 0, 1, + "9054: IOA resources not available due to previous problems"}, + {0x07279100, 0, 1, + "9092: Disk unit requires initialization before use"}, + {0x07279200, 0, 1, + "9029: Incorrect hardware configuration change has been detected"}, + {0x07279600, 0, 1, + "9060: One or more disk pairs are missing from an array"}, + {0x07279700, 0, 1, + "9061: One or more disks are missing from an array"}, + {0x07279800, 0, 1, + "9062: One or more disks are missing from an array"}, + {0x07279900, 0, 1, + "9063: Maximum number of functional arrays has been exceeded"}, + {0x0B260000, 0, 0, + "Aborted command, invalid descriptor"}, + {0x0B5A0000, 0, 0, + "Command terminated by host"} +}; + +static const struct ipr_ses_table_entry ipr_ses_table[] = { + { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 }, + { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 }, + { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */ + { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */ + { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */ + { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */ + { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 }, + { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 }, + { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, + { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, + { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 }, + { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 }, + { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 } +}; + +/* + * Function Prototypes + */ +static int ipr_reset_alert(struct ipr_cmnd *); +static void ipr_process_ccn(struct ipr_cmnd *); +static void ipr_process_error(struct ipr_cmnd *); +static void ipr_reset_ioa_job(struct ipr_cmnd *); +static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *, + enum ipr_shutdown_type); + +#ifdef CONFIG_SCSI_IPR_TRACE +/** + * ipr_trc_hook - Add a trace entry to the driver trace + * @ipr_cmd: ipr command struct + * @type: trace type + * @add_data: additional data + * + * Return value: + * none + **/ +static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, + u8 type, u32 add_data) +{ + struct ipr_trace_entry *trace_entry; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++]; + trace_entry->time = jiffies; + trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; + trace_entry->type = type; + trace_entry->cmd_index = ipr_cmd->cmd_index; + trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; + trace_entry->u.add_data = add_data; +} +#else +#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0) +#endif + +/** + * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + + memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); + ioarcb->write_data_transfer_length = 0; + ioarcb->read_data_transfer_length = 0; + ioarcb->write_ioadl_len = 0; + ioarcb->read_ioadl_len = 0; + ioasa->ioasc = 0; + ioasa->residual_data_len = 0; + + ipr_cmd->scsi_cmd = NULL; + ipr_cmd->sense_buffer[0] = 0; + ipr_cmd->dma_use_sg = 0; +} + +/** + * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd) +{ + ipr_reinit_ipr_cmnd(ipr_cmd); + ipr_cmd->u.scratch = 0; + init_timer(&ipr_cmd->timer); +} + +/** + * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block + * @ioa_cfg: ioa config struct + * + * Return value: + * pointer to ipr command struct + **/ +static +struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_cmnd *ipr_cmd; + + ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue); + list_del(&ipr_cmd->queue); + ipr_init_ipr_cmnd(ipr_cmd); + + return ipr_cmd; +} + +/** + * ipr_unmap_sglist - Unmap scatterlist if mapped + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * Return value: + * nothing + **/ +static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + + if (ipr_cmd->dma_use_sg) { + if (scsi_cmd->use_sg > 0) { + pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer, + scsi_cmd->use_sg, + scsi_cmd->sc_data_direction); + } else { + pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle, + scsi_cmd->request_bufflen, + scsi_cmd->sc_data_direction); + } + } +} + +/** + * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts + * @ioa_cfg: ioa config struct + * @clr_ints: interrupts to clear + * + * This function masks all interrupts on the adapter, then clears the + * interrupts specified in the mask + * + * Return value: + * none + **/ +static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, + u32 clr_ints) +{ + volatile u32 int_reg; + + /* Stop new interrupts */ + ioa_cfg->allow_interrupts = 0; + + /* Set interrupt mask to stop all new interrupts */ + writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); + + /* Clear any pending interrupts */ + writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); +} + +/** + * ipr_save_pcix_cmd_reg - Save PCI-X command register + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) +{ + int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); + + if (pcix_cmd_reg == 0) { + dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); + return -EIO; + } + + if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg, + &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { + dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); + return -EIO; + } + + ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; + return 0; +} + +/** + * ipr_set_pcix_cmd_reg - Setup PCI-X command register + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) +{ + int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); + + if (pcix_cmd_reg) { + if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg, + ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { + dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); + return -EIO; + } + } else { + dev_err(&ioa_cfg->pdev->dev, + "Failed to setup PCI-X command register\n"); + return -EIO; + } + + return 0; +} + +/** + * ipr_scsi_eh_done - mid-layer done function for aborted ops + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler for + * ops generated by the SCSI mid-layer which are being aborted. + * + * Return value: + * none + **/ +static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + + scsi_cmd->result |= (DID_ERROR << 16); + + ipr_unmap_sglist(ioa_cfg, ipr_cmd); + scsi_cmd->scsi_done(scsi_cmd); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); +} + +/** + * ipr_fail_all_ops - Fails all outstanding ops. + * @ioa_cfg: ioa config struct + * + * This function fails all outstanding ops. + * + * Return value: + * none + **/ +static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_cmnd *ipr_cmd, *temp; + + ENTER; + list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { + list_del(&ipr_cmd->queue); + + ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); + ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID); + + if (ipr_cmd->scsi_cmd) + ipr_cmd->done = ipr_scsi_eh_done; + + ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); + del_timer(&ipr_cmd->timer); + ipr_cmd->done(ipr_cmd); + } + + LEAVE; +} + +/** + * ipr_do_req - Send driver initiated requests. + * @ipr_cmd: ipr command struct + * @done: done function + * @timeout_func: timeout function + * @timeout: timeout value + * + * This function sends the specified command to the adapter with the + * timeout given. The done function is invoked on command completion. + * + * Return value: + * none + **/ +static void ipr_do_req(struct ipr_cmnd *ipr_cmd, + void (*done) (struct ipr_cmnd *), + void (*timeout_func) (struct ipr_cmnd *), u32 timeout) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + + ipr_cmd->done = done; + + ipr_cmd->timer.data = (unsigned long) ipr_cmd; + ipr_cmd->timer.expires = jiffies + timeout; + ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func; + + add_timer(&ipr_cmd->timer); + + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); + + mb(); + writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), + ioa_cfg->regs.ioarrin_reg); +} + +/** + * ipr_internal_cmd_done - Op done function for an internally generated op. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for an internally generated, + * blocking op. It simply wakes the sleeping thread. + * + * Return value: + * none + **/ +static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) +{ + if (ipr_cmd->u.sibling) + ipr_cmd->u.sibling = NULL; + else + complete(&ipr_cmd->completion); +} + +/** + * ipr_send_blocking_cmd - Send command and sleep on its completion. + * @ipr_cmd: ipr command struct + * @timeout_func: function to invoke if command times out + * @timeout: timeout + * + * Return value: + * none + **/ +static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, + void (*timeout_func) (struct ipr_cmnd *ipr_cmd), + u32 timeout) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + init_completion(&ipr_cmd->completion); + ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout); + + spin_unlock_irq(ioa_cfg->host->host_lock); + wait_for_completion(&ipr_cmd->completion); + spin_lock_irq(ioa_cfg->host->host_lock); +} + +/** + * ipr_send_hcam - Send an HCAM to the adapter. + * @ioa_cfg: ioa config struct + * @type: HCAM type + * @hostrcb: hostrcb struct + * + * This function will send a Host Controlled Async command to the adapter. + * If HCAMs are currently not allowed to be issued to the adapter, it will + * place the hostrcb on the free queue. + * + * Return value: + * none + **/ +static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioarcb *ioarcb; + + if (ioa_cfg->allow_cmds) { + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); + + ipr_cmd->u.hostrcb = hostrcb; + ioarcb = &ipr_cmd->ioarcb; + + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; + ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; + ioarcb->cmd_pkt.cdb[1] = type; + ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; + + ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam)); + ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ipr_cmd->ioadl[0].flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam)); + ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma); + + if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) + ipr_cmd->done = ipr_process_ccn; + else + ipr_cmd->done = ipr_process_error; + + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); + + mb(); + writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), + ioa_cfg->regs.ioarrin_reg); + } else { + list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); + } +} + +/** + * ipr_init_res_entry - Initialize a resource entry struct. + * @res: resource entry struct + * + * Return value: + * none + **/ +static void ipr_init_res_entry(struct ipr_resource_entry *res) +{ + res->needs_sync_complete = 1; + res->in_erp = 0; + res->add_to_ml = 0; + res->del_from_ml = 0; + res->resetting_device = 0; + res->tcq_active = 0; + res->qdepth = IPR_MAX_CMD_PER_LUN; + res->sdev = NULL; +} + +/** + * ipr_handle_config_change - Handle a config change from the adapter + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb + * + * Return value: + * none + **/ +static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_resource_entry *res = NULL; + struct ipr_config_table_entry *cfgte; + u32 is_ndn = 1; + + cfgte = &hostrcb->hcam.u.ccn.cfgte; + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr, + sizeof(cfgte->res_addr))) { + is_ndn = 0; + break; + } + } + + if (is_ndn) { + if (list_empty(&ioa_cfg->free_res_q)) { + ipr_send_hcam(ioa_cfg, + IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, + hostrcb); + return; + } + + res = list_entry(ioa_cfg->free_res_q.next, + struct ipr_resource_entry, queue); + + list_del(&res->queue); + ipr_init_res_entry(res); + list_add_tail(&res->queue, &ioa_cfg->used_res_q); + } + + memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); + + if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { + if (res->sdev) { + res->sdev->hostdata = NULL; + res->del_from_ml = 1; + if (ioa_cfg->allow_ml_add_del) + schedule_work(&ioa_cfg->work_q); + } else + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + } else if (!res->sdev) { + res->add_to_ml = 1; + if (ioa_cfg->allow_ml_add_del) + schedule_work(&ioa_cfg->work_q); + } + + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); +} + +/** + * ipr_process_ccn - Op done function for a CCN. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for a configuration + * change notification host controlled async from the adapter. + * + * Return value: + * none + **/ +static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + list_del(&hostrcb->queue); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + + if (ioasc) { + if (ioasc != IPR_IOASC_IOA_WAS_RESET) + dev_err(&ioa_cfg->pdev->dev, + "Host RCB failed with IOASC: 0x%08X\n", ioasc); + + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); + } else { + ipr_handle_config_change(ioa_cfg, hostrcb); + } +} + +/** + * ipr_log_vpd - Log the passed VPD to the error log. + * @vpids: vendor/product id struct + * @serial_num: serial number string + * + * Return value: + * none + **/ +static void ipr_log_vpd(struct ipr_std_inq_vpids *vpids, u8 *serial_num) +{ + char buffer[max_t(int, sizeof(struct ipr_std_inq_vpids), + IPR_SERIAL_NUM_LEN) + 1]; + + memcpy(buffer, vpids, sizeof(struct ipr_std_inq_vpids)); + buffer[sizeof(struct ipr_std_inq_vpids)] = '\0'; + ipr_err("Vendor/Product ID: %s\n", buffer); + + memcpy(buffer, serial_num, IPR_SERIAL_NUM_LEN); + buffer[IPR_SERIAL_NUM_LEN] = '\0'; + ipr_err(" Serial Number: %s\n", buffer); +} + +/** + * ipr_log_cache_error - Log a cache error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_02_error *error = + &hostrcb->hcam.u.error.u.type_02_error; + + ipr_err("-----Current Configuration-----\n"); + ipr_err("Cache Directory Card Information:\n"); + ipr_log_vpd(&error->ioa_vpids, error->ioa_sn); + ipr_err("Adapter Card Information:\n"); + ipr_log_vpd(&error->cfc_vpids, error->cfc_sn); + + ipr_err("-----Expected Configuration-----\n"); + ipr_err("Cache Directory Card Information:\n"); + ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpids, + error->ioa_last_attached_to_cfc_sn); + ipr_err("Adapter Card Information:\n"); + ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpids, + error->cfc_last_attached_to_ioa_sn); + + ipr_err("Additional IOA Data: %08X %08X %08X\n", + be32_to_cpu(error->ioa_data[0]), + be32_to_cpu(error->ioa_data[1]), + be32_to_cpu(error->ioa_data[2])); +} + +/** + * ipr_log_config_error - Log a configuration error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int errors_logged, i; + struct ipr_hostrcb_device_data_entry *dev_entry; + struct ipr_hostrcb_type_03_error *error; + + error = &hostrcb->hcam.u.error.u.type_03_error; + errors_logged = be32_to_cpu(error->errors_logged); + + ipr_err("Device Errors Detected/Logged: %d/%d\n", + be32_to_cpu(error->errors_detected), errors_logged); + + dev_entry = error->dev_entry; + + for (i = 0; i < errors_logged; i++, dev_entry++) { + ipr_err_separator; + + if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { + ipr_err("Device %d: missing\n", i + 1); + } else { + ipr_err("Device %d: %d:%d:%d:%d\n", i + 1, + ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus, + dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun); + } + ipr_log_vpd(&dev_entry->dev_vpids, dev_entry->dev_sn); + + ipr_err("-----New Device Information-----\n"); + ipr_log_vpd(&dev_entry->new_dev_vpids, dev_entry->new_dev_sn); + + ipr_err("Cache Directory Card Information:\n"); + ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpids, + dev_entry->ioa_last_with_dev_sn); + + ipr_err("Adapter Card Information:\n"); + ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpids, + dev_entry->cfc_last_with_dev_sn); + + ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", + be32_to_cpu(dev_entry->ioa_data[0]), + be32_to_cpu(dev_entry->ioa_data[1]), + be32_to_cpu(dev_entry->ioa_data[2]), + be32_to_cpu(dev_entry->ioa_data[3]), + be32_to_cpu(dev_entry->ioa_data[4])); + } +} + +/** + * ipr_log_array_error - Log an array configuration error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int i; + struct ipr_hostrcb_type_04_error *error; + struct ipr_hostrcb_array_data_entry *array_entry; + u8 zero_sn[IPR_SERIAL_NUM_LEN]; + + memset(zero_sn, '0', IPR_SERIAL_NUM_LEN); + + error = &hostrcb->hcam.u.error.u.type_04_error; + + ipr_err_separator; + + ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", + error->protection_level, + ioa_cfg->host->host_no, + error->last_func_vset_res_addr.bus, + error->last_func_vset_res_addr.target, + error->last_func_vset_res_addr.lun); + + ipr_err_separator; + + array_entry = error->array_member; + + for (i = 0; i < 18; i++) { + if (!memcmp(array_entry->serial_num, zero_sn, IPR_SERIAL_NUM_LEN)) + continue; + + if (error->exposed_mode_adn == i) { + ipr_err("Exposed Array Member %d:\n", i); + } else { + ipr_err("Array Member %d:\n", i); + } + + ipr_log_vpd(&array_entry->vpids, array_entry->serial_num); + + if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { + ipr_err("Current Location: unknown\n"); + } else { + ipr_err("Current Location: %d:%d:%d:%d\n", + ioa_cfg->host->host_no, + array_entry->dev_res_addr.bus, + array_entry->dev_res_addr.target, + array_entry->dev_res_addr.lun); + } + + if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { + ipr_err("Expected Location: unknown\n"); + } else { + ipr_err("Expected Location: %d:%d:%d:%d\n", + ioa_cfg->host->host_no, + array_entry->expected_dev_res_addr.bus, + array_entry->expected_dev_res_addr.target, + array_entry->expected_dev_res_addr.lun); + } + + ipr_err_separator; + + if (i == 9) + array_entry = error->array_member2; + else + array_entry++; + } +} + +/** + * ipr_log_generic_error - Log an adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int i; + int ioa_data_len = be32_to_cpu(hostrcb->hcam.length); + + if (ioa_data_len == 0) + return; + + ipr_err("IOA Error Data:\n"); + ipr_err("Offset 0 1 2 3 4 5 6 7 8 9 A B C D E F\n"); + + for (i = 0; i < ioa_data_len / 4; i += 4) { + ipr_err("%08X: %08X %08X %08X %08X\n", i*4, + be32_to_cpu(hostrcb->hcam.u.raw.data[i]), + be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]), + be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]), + be32_to_cpu(hostrcb->hcam.u.raw.data[i+3])); + } +} + +/** + * ipr_get_error - Find the specfied IOASC in the ipr_error_table. + * @ioasc: IOASC + * + * This function will return the index of into the ipr_error_table + * for the specified IOASC. If the IOASC is not in the table, + * 0 will be returned, which points to the entry used for unknown errors. + * + * Return value: + * index into the ipr_error_table + **/ +static u32 ipr_get_error(u32 ioasc) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) + if (ipr_error_table[i].ioasc == ioasc) + return i; + + return 0; +} + +/** + * ipr_handle_log_data - Log an adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * This function logs an adapter error to the system. + * + * Return value: + * none + **/ +static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + u32 ioasc; + int error_index; + + if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) + return; + + if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) + dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); + + ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); + + if (ioasc == IPR_IOASC_BUS_WAS_RESET || + ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) { + /* Tell the midlayer we had a bus reset so it will handle the UA properly */ + scsi_report_bus_reset(ioa_cfg->host, + hostrcb->hcam.u.error.failing_dev_res_addr.bus); + } + + error_index = ipr_get_error(ioasc); + + if (!ipr_error_table[error_index].log_hcam) + return; + + if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) { + ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr, + "%s\n", ipr_error_table[error_index].error); + } else { + dev_err(&ioa_cfg->pdev->dev, "%s\n", + ipr_error_table[error_index].error); + } + + /* Set indication we have logged an error */ + ioa_cfg->errors_logged++; + + if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) + return; + + switch (hostrcb->hcam.overlay_id) { + case IPR_HOST_RCB_OVERLAY_ID_1: + ipr_log_generic_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_2: + ipr_log_cache_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_3: + ipr_log_config_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_4: + case IPR_HOST_RCB_OVERLAY_ID_6: + ipr_log_array_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: + ipr_log_generic_error(ioa_cfg, hostrcb); + break; + default: + dev_err(&ioa_cfg->pdev->dev, + "Unknown error received. Overlay ID: %d\n", + hostrcb->hcam.overlay_id); + break; + } +} + +/** + * ipr_process_error - Op done function for an adapter error log. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for an error log host + * controlled async from the adapter. It will log the error and + * send the HCAM back to the adapter. + * + * Return value: + * none + **/ +static void ipr_process_error(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + list_del(&hostrcb->queue); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + + if (!ioasc) { + ipr_handle_log_data(ioa_cfg, hostrcb); + } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) { + dev_err(&ioa_cfg->pdev->dev, + "Host RCB failed with IOASC: 0x%08X\n", ioasc); + } + + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); +} + +/** + * ipr_timeout - An internally generated op has timed out. + * @ipr_cmd: ipr command struct + * + * This function blocks host requests and initiates an + * adapter reset. + * + * Return value: + * none + **/ +static void ipr_timeout(struct ipr_cmnd *ipr_cmd) +{ + unsigned long lock_flags = 0; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, + "Adapter being reset due to command timeout.\n"); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +/** + * ipr_reset_reload - Reset/Reload the IOA + * @ioa_cfg: ioa config struct + * @shutdown_type: shutdown type + * + * This function resets the adapter and re-initializes it. + * This function assumes that all new host commands have been stopped. + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg, + enum ipr_shutdown_type shutdown_type) +{ + if (!ioa_cfg->in_reset_reload) + ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); + + spin_unlock_irq(ioa_cfg->host->host_lock); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irq(ioa_cfg->host->host_lock); + + /* If we got hit with a host reset while we were already resetting + the adapter for some reason, and the reset failed. */ + if (ioa_cfg->ioa_is_dead) { + ipr_trace; + return FAILED; + } + + return SUCCESS; +} + +/** + * ipr_find_ses_entry - Find matching SES in SES table + * @res: resource entry struct of SES + * + * Return value: + * pointer to SES table entry / NULL on failure + **/ +static const struct ipr_ses_table_entry * +ipr_find_ses_entry(struct ipr_resource_entry *res) +{ + int i, j, matches; + const struct ipr_ses_table_entry *ste = ipr_ses_table; + + for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { + for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { + if (ste->compare_product_id_byte[j] == 'X') { + if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j]) + matches++; + else + break; + } else + matches++; + } + + if (matches == IPR_PROD_ID_LEN) + return ste; + } + + return NULL; +} + +/** + * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus + * @ioa_cfg: ioa config struct + * @bus: SCSI bus + * @bus_width: bus width + * + * Return value: + * SCSI bus speed in units of 100KHz, 1600 is 160 MHz + * For a 2-byte wide SCSI bus, the maximum transfer speed is + * twice the maximum transfer rate (e.g. for a wide enabled bus, + * max 160MHz = max 320MB/sec). + **/ +static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) +{ + struct ipr_resource_entry *res; + const struct ipr_ses_table_entry *ste; + u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width); + + /* Loop through each config table entry in the config table buffer */ + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data))) + continue; + + if (bus != res->cfgte.res_addr.bus) + continue; + + if (!(ste = ipr_find_ses_entry(res))) + continue; + + max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); + } + + return max_xfer_rate; +} + +/** + * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA + * @ioa_cfg: ioa config struct + * @max_delay: max delay in micro-seconds to wait + * + * Waits for an IODEBUG ACK from the IOA, doing busy looping. + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) +{ + volatile u32 pcii_reg; + int delay = 1; + + /* Read interrupt reg until IOA signals IO Debug Acknowledge */ + while (delay < max_delay) { + pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + + if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE) + return 0; + + /* udelay cannot be used if delay is more than a few milliseconds */ + if ((delay / 1000) > MAX_UDELAY_MS) + mdelay(delay / 1000); + else + udelay(delay); + + delay += delay; + } + return -EIO; +} + +/** + * ipr_get_ldump_data_section - Dump IOA memory + * @ioa_cfg: ioa config struct + * @start_addr: adapter address to dump + * @dest: destination kernel buffer + * @length_in_words: length to dump in 4 byte words + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, + u32 start_addr, + u32 *dest, u32 length_in_words) +{ + volatile u32 temp_pcii_reg; + int i, delay = 0; + + /* Write IOA interrupt reg starting LDUMP state */ + writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), + ioa_cfg->regs.set_uproc_interrupt_reg); + + /* Wait for IO debug acknowledge */ + if (ipr_wait_iodbg_ack(ioa_cfg, + IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) { + dev_err(&ioa_cfg->pdev->dev, + "IOA dump long data transfer timeout\n"); + return -EIO; + } + + /* Signal LDUMP interlocked - clear IO debug ack */ + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, + ioa_cfg->regs.clr_interrupt_reg); + + /* Write Mailbox with starting address */ + writel(start_addr, ioa_cfg->ioa_mailbox); + + /* Signal address valid - clear IOA Reset alert */ + writel(IPR_UPROCI_RESET_ALERT, + ioa_cfg->regs.clr_uproc_interrupt_reg); + + for (i = 0; i < length_in_words; i++) { + /* Wait for IO debug acknowledge */ + if (ipr_wait_iodbg_ack(ioa_cfg, + IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) { + dev_err(&ioa_cfg->pdev->dev, + "IOA dump short data transfer timeout\n"); + return -EIO; + } + + /* Read data from mailbox and increment destination pointer */ + *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); + dest++; + + /* For all but the last word of data, signal data received */ + if (i < (length_in_words - 1)) { + /* Signal dump data received - Clear IO debug Ack */ + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, + ioa_cfg->regs.clr_interrupt_reg); + } + } + + /* Signal end of block transfer. Set reset alert then clear IO debug ack */ + writel(IPR_UPROCI_RESET_ALERT, + ioa_cfg->regs.set_uproc_interrupt_reg); + + writel(IPR_UPROCI_IO_DEBUG_ALERT, + ioa_cfg->regs.clr_uproc_interrupt_reg); + + /* Signal dump data received - Clear IO debug Ack */ + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, + ioa_cfg->regs.clr_interrupt_reg); + + /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ + while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { + temp_pcii_reg = + readl(ioa_cfg->regs.sense_uproc_interrupt_reg); + + if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) + return 0; + + udelay(10); + delay += 10; + } + + return 0; +} + +#ifdef CONFIG_SCSI_IPR_DUMP +/** + * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer + * @ioa_cfg: ioa config struct + * @pci_address: adapter address + * @length: length of data to copy + * + * Copy data from PCI adapter to kernel buffer. + * Note: length MUST be a 4 byte multiple + * Return value: + * 0 on success / other on failure + **/ +static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, + unsigned long pci_address, u32 length) +{ + int bytes_copied = 0; + int cur_len, rc, rem_len, rem_page_len; + u32 *page; + unsigned long lock_flags = 0; + struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; + + while (bytes_copied < length && + (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) { + if (ioa_dump->page_offset >= PAGE_SIZE || + ioa_dump->page_offset == 0) { + page = (u32 *)__get_free_page(GFP_ATOMIC); + + if (!page) { + ipr_trace; + return bytes_copied; + } + + ioa_dump->page_offset = 0; + ioa_dump->ioa_data[ioa_dump->next_page_index] = page; + ioa_dump->next_page_index++; + } else + page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; + + rem_len = length - bytes_copied; + rem_page_len = PAGE_SIZE - ioa_dump->page_offset; + cur_len = min(rem_len, rem_page_len); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->sdt_state == ABORT_DUMP) { + rc = -EIO; + } else { + rc = ipr_get_ldump_data_section(ioa_cfg, + pci_address + bytes_copied, + &page[ioa_dump->page_offset / 4], + (cur_len / sizeof(u32))); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + if (!rc) { + ioa_dump->page_offset += cur_len; + bytes_copied += cur_len; + } else { + ipr_trace; + break; + } + schedule(); + } + + return bytes_copied; +} + +/** + * ipr_init_dump_entry_hdr - Initialize a dump entry header. + * @hdr: dump entry header struct + * + * Return value: + * nothing + **/ +static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr) +{ + hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; + hdr->num_elems = 1; + hdr->offset = sizeof(*hdr); + hdr->status = IPR_DUMP_STATUS_SUCCESS; +} + +/** + * ipr_dump_ioa_type_data - Fill in the adapter type in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; + + ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); + driver_dump->ioa_type_entry.hdr.len = + sizeof(struct ipr_dump_ioa_type_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; + driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; + driver_dump->ioa_type_entry.type = ioa_cfg->type; + driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | + (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | + ucode_vpd->minor_release[1]; + driver_dump->hdr.num_entries++; +} + +/** + * ipr_dump_version_data - Fill in the driver version in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); + driver_dump->version_entry.hdr.len = + sizeof(struct ipr_dump_version_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; + driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; + strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); + driver_dump->hdr.num_entries++; +} + +/** + * ipr_dump_trace_data - Fill in the IOA trace in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); + driver_dump->trace_entry.hdr.len = + sizeof(struct ipr_dump_trace_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; + driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; + memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); + driver_dump->hdr.num_entries++; +} + +/** + * ipr_dump_location_data - Fill in the IOA location in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); + driver_dump->location_entry.hdr.len = + sizeof(struct ipr_dump_location_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; + driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; + strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id); + driver_dump->hdr.num_entries++; +} + +/** + * ipr_get_ioa_dump - Perform a dump of the driver and adapter. + * @ioa_cfg: ioa config struct + * @dump: dump struct + * + * Return value: + * nothing + **/ +static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) +{ + unsigned long start_addr, sdt_word; + unsigned long lock_flags = 0; + struct ipr_driver_dump *driver_dump = &dump->driver_dump; + struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; + u32 num_entries, start_off, end_off; + u32 bytes_to_copy, bytes_copied, rc; + struct ipr_sdt *sdt; + int i; + + ENTER; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->sdt_state != GET_DUMP) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + start_addr = readl(ioa_cfg->ioa_mailbox); + + if (!ipr_sdt_is_fmt2(start_addr)) { + dev_err(&ioa_cfg->pdev->dev, + "Invalid dump table format: %lx\n", start_addr); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); + + driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; + + /* Initialize the overall dump header */ + driver_dump->hdr.len = sizeof(struct ipr_driver_dump); + driver_dump->hdr.num_entries = 1; + driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); + driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; + driver_dump->hdr.os = IPR_DUMP_OS_LINUX; + driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; + + ipr_dump_version_data(ioa_cfg, driver_dump); + ipr_dump_location_data(ioa_cfg, driver_dump); + ipr_dump_ioa_type_data(ioa_cfg, driver_dump); + ipr_dump_trace_data(ioa_cfg, driver_dump); + + /* Update dump_header */ + driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); + + /* IOA Dump entry */ + ipr_init_dump_entry_hdr(&ioa_dump->hdr); + ioa_dump->format = IPR_SDT_FMT2; + ioa_dump->hdr.len = 0; + ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; + ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; + + /* First entries in sdt are actually a list of dump addresses and + lengths to gather the real dump data. sdt represents the pointer + to the ioa generated dump table. Dump data will be extracted based + on entries in this table */ + sdt = &ioa_dump->sdt; + + rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (u32 *)sdt, + sizeof(struct ipr_sdt) / sizeof(u32)); + + /* Smart Dump table is ready to use and the first entry is valid */ + if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) { + dev_err(&ioa_cfg->pdev->dev, + "Dump of IOA failed. Dump table not valid: %d, %X.\n", + rc, be32_to_cpu(sdt->hdr.state)); + driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; + ioa_cfg->sdt_state = DUMP_OBTAINED; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + num_entries = be32_to_cpu(sdt->hdr.num_entries_used); + + if (num_entries > IPR_NUM_SDT_ENTRIES) + num_entries = IPR_NUM_SDT_ENTRIES; + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + for (i = 0; i < num_entries; i++) { + if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) { + driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; + break; + } + + if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { + sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset); + start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; + end_off = be32_to_cpu(sdt->entry[i].end_offset); + + if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) { + bytes_to_copy = end_off - start_off; + if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { + sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; + continue; + } + + /* Copy data from adapter to driver buffers */ + bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, + bytes_to_copy); + + ioa_dump->hdr.len += bytes_copied; + + if (bytes_copied != bytes_to_copy) { + driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; + break; + } + } + } + } + + dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); + + /* Update dump_header */ + driver_dump->hdr.len += ioa_dump->hdr.len; + wmb(); + ioa_cfg->sdt_state = DUMP_OBTAINED; + LEAVE; +} + +#else +#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0) +#endif + +/** + * ipr_worker_thread - Worker thread + * @data: ioa config struct + * + * Called at task level from a work thread. This function takes care + * of adding and removing device from the mid-layer as configuration + * changes are detected by the adapter. + * + * Return value: + * nothing + **/ +static void ipr_worker_thread(void *data) +{ + unsigned long lock_flags; + struct ipr_resource_entry *res; + struct scsi_device *sdev; + struct ipr_dump *dump; + struct ipr_ioa_cfg *ioa_cfg = data; + u8 bus, target, lun; + int did_work; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->sdt_state == GET_DUMP) { + dump = ioa_cfg->dump; + if (!dump || !kobject_get(&dump->kobj)) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + ipr_get_ioa_dump(ioa_cfg, dump); + kobject_put(&dump->kobj); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->sdt_state == DUMP_OBTAINED) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + +restart: + do { + did_work = 0; + if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->del_from_ml && res->sdev) { + did_work = 1; + sdev = res->sdev; + if (!scsi_device_get(sdev)) { + res->sdev = NULL; + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_remove_device(sdev); + scsi_device_put(sdev); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + break; + } + } + } while(did_work); + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->add_to_ml) { + bus = res->cfgte.res_addr.bus; + target = res->cfgte.res_addr.target; + lun = res->cfgte.res_addr.lun; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_add_device(ioa_cfg->host, bus, target, lun); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + goto restart; + } + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +#ifdef CONFIG_SCSI_IPR_TRACE +/** + * ipr_read_trace - Dump the adapter trace + * @kobj: kobject struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_read_trace(struct kobject *kobj, char *buf, + loff_t off, size_t count) +{ + struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int size = IPR_TRACE_SIZE; + char *src = (char *)ioa_cfg->trace; + + if (off > size) + return 0; + if (off + count > size) { + size -= off; + count = size; + } + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + memcpy(buf, &src[off], count); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return count; +} + +static struct bin_attribute ipr_trace_attr = { + .attr = { + .name = "trace", + .mode = S_IRUGO, + }, + .size = 0, + .read = ipr_read_trace, +}; +#endif + +/** + * ipr_show_fw_version - Show the firmware version + * @class_dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; + unsigned long lock_flags = 0; + int len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n", + ucode_vpd->major_release, ucode_vpd->card_type, + ucode_vpd->minor_release[0], + ucode_vpd->minor_release[1]); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct class_device_attribute ipr_fw_version_attr = { + .attr = { + .name = "fw_version", + .mode = S_IRUGO, + }, + .show = ipr_show_fw_version, +}; + +/** + * ipr_show_log_level - Show the adapter's error logging level + * @class_dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +/** + * ipr_store_log_level - Change the adapter's error logging level + * @class_dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_store_log_level(struct class_device *class_dev, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return strlen(buf); +} + +static struct class_device_attribute ipr_log_level_attr = { + .attr = { + .name = "log_level", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ipr_show_log_level, + .store = ipr_store_log_level +}; + +/** + * ipr_store_diagnostics - IOA Diagnostics interface + * @class_dev: class_device struct + * @buf: buffer + * @count: buffer size + * + * This function will reset the adapter and wait a reasonable + * amount of time for any errors that the adapter might log. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_diagnostics(struct class_device *class_dev, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int rc = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->errors_logged = 0; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + + if (ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + /* Wait for a second for any errors to be logged */ + schedule_timeout(HZ); + } else { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return -EIO; + } + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) + rc = -EIO; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + return rc; +} + +static struct class_device_attribute ipr_diagnostics_attr = { + .attr = { + .name = "run_diagnostics", + .mode = S_IWUSR, + }, + .store = ipr_store_diagnostics +}; + +/** + * ipr_store_reset_adapter - Reset the adapter + * @class_dev: class_device struct + * @buf: buffer + * @count: buffer size + * + * This function will reset the adapter. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_reset_adapter(struct class_device *class_dev, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags; + int result = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (!ioa_cfg->in_reset_reload) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + return result; +} + +static struct class_device_attribute ipr_ioa_reset_attr = { + .attr = { + .name = "reset_host", + .mode = S_IWUSR, + }, + .store = ipr_store_reset_adapter +}; + +/** + * ipr_alloc_ucode_buffer - Allocates a microcode download buffer + * @buf_len: buffer length + * + * Allocates a DMA'able buffer in chunks and assembles a scatter/gather + * list to use for microcode download + * + * Return value: + * pointer to sglist / NULL on failure + **/ +static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) +{ + int sg_size, order, bsize_elem, num_elem, i, j; + struct ipr_sglist *sglist; + struct scatterlist *scatterlist; + struct page *page; + + /* Get the minimum size per scatter/gather element */ + sg_size = buf_len / (IPR_MAX_SGLIST - 1); + + /* Get the actual size per element */ + order = get_order(sg_size); + + /* Determine the actual number of bytes per element */ + bsize_elem = PAGE_SIZE * (1 << order); + + /* Determine the actual number of sg entries needed */ + if (buf_len % bsize_elem) + num_elem = (buf_len / bsize_elem) + 1; + else + num_elem = buf_len / bsize_elem; + + /* Allocate a scatter/gather list for the DMA */ + sglist = kmalloc(sizeof(struct ipr_sglist) + + (sizeof(struct scatterlist) * (num_elem - 1)), + GFP_KERNEL); + + if (sglist == NULL) { + ipr_trace; + return NULL; + } + + memset(sglist, 0, sizeof(struct ipr_sglist) + + (sizeof(struct scatterlist) * (num_elem - 1))); + + scatterlist = sglist->scatterlist; + + sglist->order = order; + sglist->num_sg = num_elem; + + /* Allocate a bunch of sg elements */ + for (i = 0; i < num_elem; i++) { + page = alloc_pages(GFP_KERNEL, order); + if (!page) { + ipr_trace; + + /* Free up what we already allocated */ + for (j = i - 1; j >= 0; j--) + __free_pages(scatterlist[j].page, order); + kfree(sglist); + return NULL; + } + + scatterlist[i].page = page; + } + + return sglist; +} + +/** + * ipr_free_ucode_buffer - Frees a microcode download buffer + * @p_dnld: scatter/gather list pointer + * + * Free a DMA'able ucode download buffer previously allocated with + * ipr_alloc_ucode_buffer + * + * Return value: + * nothing + **/ +static void ipr_free_ucode_buffer(struct ipr_sglist *sglist) +{ + int i; + + for (i = 0; i < sglist->num_sg; i++) + __free_pages(sglist->scatterlist[i].page, sglist->order); + + kfree(sglist); +} + +/** + * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer + * @sglist: scatter/gather list pointer + * @buffer: buffer pointer + * @len: buffer length + * + * Copy a microcode image from a user buffer into a buffer allocated by + * ipr_alloc_ucode_buffer + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, + u8 *buffer, u32 len) +{ + int bsize_elem, i, result = 0; + struct scatterlist *scatterlist; + void *kaddr; + + /* Determine the actual number of bytes per element */ + bsize_elem = PAGE_SIZE * (1 << sglist->order); + + scatterlist = sglist->scatterlist; + + for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { + kaddr = kmap(scatterlist[i].page); + memcpy(kaddr, buffer, bsize_elem); + kunmap(scatterlist[i].page); + + scatterlist[i].length = bsize_elem; + + if (result != 0) { + ipr_trace; + return result; + } + } + + if (len % bsize_elem) { + kaddr = kmap(scatterlist[i].page); + memcpy(kaddr, buffer, len % bsize_elem); + kunmap(scatterlist[i].page); + + scatterlist[i].length = len % bsize_elem; + } + + sglist->buffer_len = len; + return result; +} + +/** + * ipr_map_ucode_buffer - Map a microcode download buffer + * @ipr_cmd: ipr command struct + * @sglist: scatter/gather list + * @len: total length of download buffer + * + * Maps a microcode download scatter/gather list for DMA and + * builds the IOADL. + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd, + struct ipr_sglist *sglist, int len) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct scatterlist *scatterlist = sglist->scatterlist; + int i; + + ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist, + sglist->num_sg, DMA_TO_DEVICE); + + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->write_data_transfer_length = cpu_to_be32(len); + ioarcb->write_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + + for (i = 0; i < ipr_cmd->dma_use_sg; i++) { + ioadl[i].flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i])); + ioadl[i].address = + cpu_to_be32(sg_dma_address(&scatterlist[i])); + } + + if (likely(ipr_cmd->dma_use_sg)) { + ioadl[i-1].flags_and_data_len |= + cpu_to_be32(IPR_IOADL_FLAGS_LAST); + } + else { + dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + return -EIO; + } + + return 0; +} + +/** + * ipr_store_update_fw - Update the firmware on the adapter + * @class_dev: class_device struct + * @buf: buffer + * @count: buffer size + * + * This function will update the firmware on the adapter. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_update_fw(struct class_device *class_dev, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_ucode_image_header *image_hdr; + const struct firmware *fw_entry; + struct ipr_sglist *sglist; + unsigned long lock_flags; + char fname[100]; + char *src; + int len, result, dnld_size; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + len = snprintf(fname, 99, "%s", buf); + fname[len-1] = '\0'; + + if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { + dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); + return -EIO; + } + + image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; + + if (be32_to_cpu(image_hdr->header_length) > fw_entry->size || + (ioa_cfg->vpd_cbs->page3_data.card_type && + ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) { + dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n"); + release_firmware(fw_entry); + return -EINVAL; + } + + src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); + dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); + sglist = ipr_alloc_ucode_buffer(dnld_size); + + if (!sglist) { + dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); + release_firmware(fw_entry); + return -ENOMEM; + } + + result = ipr_copy_ucode_buffer(sglist, src, dnld_size); + + if (result) { + dev_err(&ioa_cfg->pdev->dev, + "Microcode buffer copy to DMA buffer failed\n"); + ipr_free_ucode_buffer(sglist); + release_firmware(fw_entry); + return result; + } + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->ucode_sglist) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + dev_err(&ioa_cfg->pdev->dev, + "Microcode download already in progress\n"); + ipr_free_ucode_buffer(sglist); + release_firmware(fw_entry); + return -EIO; + } + + ioa_cfg->ucode_sglist = sglist; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->ucode_sglist = NULL; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + ipr_free_ucode_buffer(sglist); + release_firmware(fw_entry); + + return count; +} + +static struct class_device_attribute ipr_update_fw_attr = { + .attr = { + .name = "update_fw", + .mode = S_IWUSR, + }, + .store = ipr_store_update_fw +}; + +static struct class_device_attribute *ipr_ioa_attrs[] = { + &ipr_fw_version_attr, + &ipr_log_level_attr, + &ipr_diagnostics_attr, + &ipr_ioa_reset_attr, + &ipr_update_fw_attr, + NULL, +}; + +#ifdef CONFIG_SCSI_IPR_DUMP +/** + * ipr_read_dump - Dump the adapter + * @kobj: kobject struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_read_dump(struct kobject *kobj, char *buf, + loff_t off, size_t count) +{ + struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_dump *dump; + unsigned long lock_flags = 0; + char *src; + int len; + size_t rc = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + dump = ioa_cfg->dump; + + if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump || !kobject_get(&dump->kobj)) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + if (off > dump->driver_dump.hdr.len) { + kobject_put(&dump->kobj); + return 0; + } + + if (off + count > dump->driver_dump.hdr.len) { + count = dump->driver_dump.hdr.len - off; + rc = count; + } + + if (count && off < sizeof(dump->driver_dump)) { + if (off + count > sizeof(dump->driver_dump)) + len = sizeof(dump->driver_dump) - off; + else + len = count; + src = (u8 *)&dump->driver_dump + off; + memcpy(buf, src, len); + buf += len; + off += len; + count -= len; + } + + off -= sizeof(dump->driver_dump); + + if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) { + if (off + count > offsetof(struct ipr_ioa_dump, ioa_data)) + len = offsetof(struct ipr_ioa_dump, ioa_data) - off; + else + len = count; + src = (u8 *)&dump->ioa_dump + off; + memcpy(buf, src, len); + buf += len; + off += len; + count -= len; + } + + off -= offsetof(struct ipr_ioa_dump, ioa_data); + + while (count) { + if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) + len = PAGE_ALIGN(off) - off; + else + len = count; + src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; + src += off & ~PAGE_MASK; + memcpy(buf, src, len); + buf += len; + off += len; + count -= len; + } + + kobject_put(&dump->kobj); + return rc; +} + +/** + * ipr_release_dump - Free adapter dump memory + * @kobj: kobject struct + * + * Return value: + * nothing + **/ +static void ipr_release_dump(struct kobject *kobj) +{ + struct ipr_dump *dump = container_of(kobj,struct ipr_dump,kobj); + struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; + unsigned long lock_flags = 0; + int i; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->dump = NULL; + ioa_cfg->sdt_state = INACTIVE; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + for (i = 0; i < dump->ioa_dump.next_page_index; i++) + free_page((unsigned long) dump->ioa_dump.ioa_data[i]); + + kfree(dump); + LEAVE; +} + +static struct kobj_type ipr_dump_kobj_type = { + .release = ipr_release_dump, +}; + +/** + * ipr_alloc_dump - Prepare for adapter dump + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_dump *dump; + unsigned long lock_flags = 0; + + ENTER; + dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL); + + if (!dump) { + ipr_err("Dump memory allocation failed\n"); + return -ENOMEM; + } + + memset(dump, 0, sizeof(struct ipr_dump)); + kobject_init(&dump->kobj); + dump->kobj.ktype = &ipr_dump_kobj_type; + dump->ioa_cfg = ioa_cfg; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (INACTIVE != ioa_cfg->sdt_state) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + kfree(dump); + return 0; + } + + ioa_cfg->dump = dump; + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) { + ioa_cfg->dump_taken = 1; + schedule_work(&ioa_cfg->work_q); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + LEAVE; + return 0; +} + +/** + * ipr_free_dump - Free adapter dump memory + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_dump *dump; + unsigned long lock_flags = 0; + + ENTER; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + dump = ioa_cfg->dump; + if (!dump) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; + } + + ioa_cfg->dump = NULL; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + kobject_put(&dump->kobj); + + LEAVE; + return 0; +} + +/** + * ipr_write_dump - Setup dump state of adapter + * @kobj: kobject struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_write_dump(struct kobject *kobj, char *buf, + loff_t off, size_t count) +{ + struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + int rc; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (buf[0] == '1') + rc = ipr_alloc_dump(ioa_cfg); + else if (buf[0] == '0') + rc = ipr_free_dump(ioa_cfg); + else + return -EINVAL; + + if (rc) + return rc; + else + return count; +} + +static struct bin_attribute ipr_dump_attr = { + .attr = { + .name = "dump", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = ipr_read_dump, + .write = ipr_write_dump +}; +#else +static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; +#endif + +/** + * ipr_store_queue_depth - Change the device's queue depth + * @dev: device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_store_queue_depth(struct device *dev, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + int qdepth = simple_strtoul(buf, NULL, 10); + int tagged = 0; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) { + res->qdepth = qdepth; + + if (ipr_is_gscsi(res) && res->tcq_active) + tagged = MSG_ORDERED_TAG; + + len = strlen(buf); + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_adjust_queue_depth(sdev, tagged, qdepth); + return len; +} + +static struct device_attribute ipr_queue_depth_attr = { + .attr = { + .name = "queue_depth", + .mode = S_IRUSR | S_IWUSR, + }, + .store = ipr_store_queue_depth +}; + +/** + * ipr_show_tcq_enable - Show if the device is enabled for tcqing + * @dev: device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_tcq_enable(struct device *dev, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) + len = snprintf(buf, PAGE_SIZE, "%d\n", res->tcq_active); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +/** + * ipr_store_tcq_enable - Change the device's TCQing state + * @dev: device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_store_tcq_enable(struct device *dev, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + int tcq_active = simple_strtoul(buf, NULL, 10); + int qdepth = IPR_MAX_CMD_PER_LUN; + int tagged = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + res = (struct ipr_resource_entry *)sdev->hostdata; + + if (res) { + res->tcq_active = 0; + qdepth = res->qdepth; + + if (ipr_is_gscsi(res) && sdev->tagged_supported) { + if (tcq_active) { + tagged = MSG_ORDERED_TAG; + res->tcq_active = 1; + } + + len = strlen(buf); + } else if (tcq_active) { + len = -EINVAL; + } + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_adjust_queue_depth(sdev, tagged, qdepth); + return len; +} + +static struct device_attribute ipr_tcqing_attr = { + .attr = { + .name = "tcq_enable", + .mode = S_IRUSR | S_IWUSR, + }, + .store = ipr_store_tcq_enable, + .show = ipr_show_tcq_enable +}; + +/** + * ipr_show_adapter_handle - Show the adapter's resource handle for this device + * @dev: device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_adapter_handle(struct device *dev, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) + len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_adapter_handle_attr = { + .attr = { + .name = "adapter_handle", + .mode = S_IRUSR, + }, + .show = ipr_show_adapter_handle +}; + +static struct device_attribute *ipr_dev_attrs[] = { + &ipr_queue_depth_attr, + &ipr_tcqing_attr, + &ipr_adapter_handle_attr, + NULL, +}; + +/** + * ipr_biosparam - Return the HSC mapping + * @sdev: scsi device struct + * @block_device: block device pointer + * @capacity: capacity of the device + * @parm: Array containing returned HSC values. + * + * This function generates the HSC parms that fdisk uses. + * We want to make sure we return something that places partitions + * on 4k boundaries for best performance with the IOA. + * + * Return value: + * 0 on success + **/ +static int ipr_biosparam(struct scsi_device *sdev, + struct block_device *block_device, + sector_t capacity, int *parm) +{ + int heads, sectors, cylinders; + + heads = 128; + sectors = 32; + + cylinders = capacity; + sector_div(cylinders, (128 * 32)); + + /* return result */ + parm[0] = heads; + parm[1] = sectors; + parm[2] = cylinders; + + return 0; +} + +/** + * ipr_slave_destroy - Unconfigure a SCSI device + * @sdev: scsi device struct + * + * Return value: + * nothing + **/ +static void ipr_slave_destroy(struct scsi_device *sdev) +{ + struct ipr_resource_entry *res; + struct ipr_ioa_cfg *ioa_cfg; + unsigned long lock_flags = 0; + + ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *) sdev->hostdata; + if (res) { + sdev->hostdata = NULL; + res->sdev = NULL; + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); +} + +/** + * ipr_slave_configure - Configure a SCSI device + * @sdev: scsi device struct + * + * This function configures the specified scsi device. + * + * Return value: + * 0 on success + **/ +static int ipr_slave_configure(struct scsi_device *sdev) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = sdev->hostdata; + if (res) { + if (ipr_is_af_dasd_device(res)) + sdev->type = TYPE_RAID; + if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) + sdev->scsi_level = 4; + if (ipr_is_vset_device(res)) + sdev->timeout = IPR_VSET_RW_TIMEOUT; + + sdev->allow_restart = 1; + scsi_adjust_queue_depth(sdev, 0, res->qdepth); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; +} + +/** + * ipr_slave_alloc - Prepare for commands to a device. + * @sdev: scsi device struct + * + * This function saves a pointer to the resource entry + * in the scsi device struct if the device exists. We + * can then use this pointer in ipr_queuecommand when + * handling new commands. + * + * Return value: + * 0 on success + **/ +static int ipr_slave_alloc(struct scsi_device *sdev) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags; + + sdev->hostdata = NULL; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if ((res->cfgte.res_addr.bus == sdev->channel) && + (res->cfgte.res_addr.target == sdev->id) && + (res->cfgte.res_addr.lun == sdev->lun)) { + res->sdev = sdev; + res->add_to_ml = 0; + sdev->hostdata = res; + res->needs_sync_complete = 1; + break; + } + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + return 0; +} + +/** + * ipr_eh_host_reset - Reset the host adapter + * @scsi_cmd: scsi command struct + * + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg; + int rc; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + + dev_err(&ioa_cfg->pdev->dev, + "Adapter being reset as a result of error recovery.\n"); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV); + + LEAVE; + return rc; +} + +/** + * ipr_eh_dev_reset - Reset the device + * @scsi_cmd: scsi command struct + * + * This function issues a device reset to the affected device. + * A LUN reset will be sent to the device first. If that does + * not work, a target reset will be sent. + * + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; + struct ipr_cmd_pkt *cmd_pkt; + u32 ioasc; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + res = scsi_cmd->device->hostdata; + + if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) + return FAILED; + + /* + * If we are currently going through reset/reload, return failed. This will force the + * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the + * reset to complete + */ + if (ioa_cfg->in_reset_reload) + return FAILED; + if (ioa_cfg->ioa_is_dead) + return FAILED; + + list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { + if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { + if (ipr_cmd->scsi_cmd) + ipr_cmd->done = ipr_scsi_eh_done; + } + } + + res->resetting_device = 1; + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + + ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; + cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_RESET_DEVICE; + + ipr_sdev_err(scsi_cmd->device, "Resetting device\n"); + ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); + + ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + res->resetting_device = 0; + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + + LEAVE; + return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); +} + +/** + * ipr_bus_reset_done - Op done function for bus reset. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for a bus reset + * + * Return value: + * none + **/ +static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_resource_entry *res; + + ENTER; + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle, + sizeof(res->cfgte.res_handle))) { + scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus); + break; + } + } + + /* + * If abort has not completed, indicate the reset has, else call the + * abort's done function to wake the sleeping eh thread + */ + if (ipr_cmd->u.sibling->u.sibling) + ipr_cmd->u.sibling->u.sibling = NULL; + else + ipr_cmd->u.sibling->done(ipr_cmd->u.sibling); + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + LEAVE; +} + +/** + * ipr_abort_timeout - An abort task has timed out + * @ipr_cmd: ipr command struct + * + * This function handles when an abort task times out. If this + * happens we issue a bus reset since we have resources tied + * up that must be freed before returning to the midlayer. + * + * Return value: + * none + **/ +static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_cmnd *reset_cmd; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_cmd_pkt *cmd_pkt; + unsigned long lock_flags = 0; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n"); + reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ipr_cmd->u.sibling = reset_cmd; + reset_cmd->u.sibling = ipr_cmd; + reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; + cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_RESET_DEVICE; + cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; + + ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +/** + * ipr_cancel_op - Cancel specified op + * @scsi_cmd: scsi command struct + * + * This function cancels specified op. + * + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; + struct ipr_cmd_pkt *cmd_pkt; + u32 ioasc, ioarcb_addr; + int op_found = 0; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; + res = scsi_cmd->device->hostdata; + + if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) + return FAILED; + + list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { + if (ipr_cmd->scsi_cmd == scsi_cmd) { + ipr_cmd->done = ipr_scsi_eh_done; + op_found = 1; + break; + } + } + + if (!op_found) + return SUCCESS; + + ioarcb_addr = be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr); + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; + cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_ABORT_TASK; + cmd_pkt->cdb[2] = (ioarcb_addr >> 24) & 0xff; + cmd_pkt->cdb[3] = (ioarcb_addr >> 16) & 0xff; + cmd_pkt->cdb[4] = (ioarcb_addr >> 8) & 0xff; + cmd_pkt->cdb[5] = ioarcb_addr & 0xff; + ipr_cmd->u.sdev = scsi_cmd->device; + + ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]); + ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_ABORT_TASK_TIMEOUT); + ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + /* + * If the abort task timed out and we sent a bus reset, we will get + * one the following responses to the abort + */ + if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) { + ioasc = 0; + ipr_trace; + } + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + res->needs_sync_complete = 1; + + LEAVE; + return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); +} + +/** + * ipr_eh_abort - Abort a single op + * @scsi_cmd: scsi command struct + * + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + + /* If we are currently going through reset/reload, return failed. This will force the + mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the + reset to complete */ + if (ioa_cfg->in_reset_reload) + return FAILED; + if (ioa_cfg->ioa_is_dead) + return FAILED; + if (!scsi_cmd->device->hostdata) + return FAILED; + + LEAVE; + return ipr_cancel_op(scsi_cmd); +} + +/** + * ipr_handle_other_interrupt - Handle "other" interrupts + * @ioa_cfg: ioa config struct + * @int_reg: interrupt register + * + * Return value: + * IRQ_NONE / IRQ_HANDLED + **/ +static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, + volatile u32 int_reg) +{ + irqreturn_t rc = IRQ_HANDLED; + + if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { + /* Mask the interrupt */ + writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); + + /* Clear the interrupt */ + writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + + list_del(&ioa_cfg->reset_cmd->queue); + del_timer(&ioa_cfg->reset_cmd->timer); + ipr_reset_ioa_job(ioa_cfg->reset_cmd); + } else { + if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) + ioa_cfg->ioa_unit_checked = 1; + else + dev_err(&ioa_cfg->pdev->dev, + "Permanent IOA failure. 0x%08X\n", int_reg); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + ipr_mask_and_clear_interrupts(ioa_cfg, ~0); + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } + + return rc; +} + +/** + * ipr_isr - Interrupt service routine + * @irq: irq number + * @devp: pointer to ioa config struct + * @regs: pt_regs struct + * + * Return value: + * IRQ_NONE / IRQ_HANDLED + **/ +static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; + unsigned long lock_flags = 0; + volatile u32 int_reg, int_mask_reg; + u32 ioasc; + u16 cmd_index; + struct ipr_cmnd *ipr_cmd; + irqreturn_t rc = IRQ_NONE; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + /* If interrupts are disabled, ignore the interrupt */ + if (!ioa_cfg->allow_interrupts) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return IRQ_NONE; + } + + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + + /* If an interrupt on the adapter did not occur, ignore it */ + if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return IRQ_NONE; + } + + while (1) { + ipr_cmd = NULL; + + while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == + ioa_cfg->toggle_bit) { + + cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) & + IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; + + if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n"); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return IRQ_HANDLED; + } + + ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; + + ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); + + list_del(&ipr_cmd->queue); + del_timer(&ipr_cmd->timer); + ipr_cmd->done(ipr_cmd); + + rc = IRQ_HANDLED; + + if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) { + ioa_cfg->hrrq_curr++; + } else { + ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start; + ioa_cfg->toggle_bit ^= 1u; + } + } + + if (ipr_cmd != NULL) { + /* Clear the PCI interrupt */ + writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + } else + break; + } + + if (unlikely(rc == IRQ_NONE)) + rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return rc; +} + +/** + * ipr_build_ioadl - Build a scatter/gather list and map the buffer + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * Return value: + * 0 on success / -1 on failure + **/ +static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + int i; + struct scatterlist *sglist; + u32 length; + u32 ioadl_flags = 0; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + + length = scsi_cmd->request_bufflen; + + if (length == 0) + return 0; + + if (scsi_cmd->use_sg) { + ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, + scsi_cmd->request_buffer, + scsi_cmd->use_sg, + scsi_cmd->sc_data_direction); + + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->write_data_transfer_length = cpu_to_be32(length); + ioarcb->write_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_READ; + ioarcb->read_data_transfer_length = cpu_to_be32(length); + ioarcb->read_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } + + sglist = scsi_cmd->request_buffer; + + for (i = 0; i < ipr_cmd->dma_use_sg; i++) { + ioadl[i].flags_and_data_len = + cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); + ioadl[i].address = + cpu_to_be32(sg_dma_address(&sglist[i])); + } + + if (likely(ipr_cmd->dma_use_sg)) { + ioadl[i-1].flags_and_data_len |= + cpu_to_be32(IPR_IOADL_FLAGS_LAST); + return 0; + } else + dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + } else { + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->write_data_transfer_length = cpu_to_be32(length); + ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_READ; + ioarcb->read_data_transfer_length = cpu_to_be32(length); + ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + } + + ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev, + scsi_cmd->request_buffer, length, + scsi_cmd->sc_data_direction); + + if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) { + ipr_cmd->dma_use_sg = 1; + ioadl[0].flags_and_data_len = + cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST); + ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle); + return 0; + } else + dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n"); + } + + return -1; +} + +/** + * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes + * @scsi_cmd: scsi command struct + * + * Return value: + * task attributes + **/ +static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd) +{ + u8 tag[2]; + u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK; + + if (scsi_populate_tag_msg(scsi_cmd, tag)) { + switch (tag[0]) { + case MSG_SIMPLE_TAG: + rc = IPR_FLAGS_LO_SIMPLE_TASK; + break; + case MSG_HEAD_TAG: + rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK; + break; + case MSG_ORDERED_TAG: + rc = IPR_FLAGS_LO_ORDERED_TASK; + break; + }; + } + + return rc; +} + +/** + * ipr_erp_done - Process completion of ERP for a device + * @ipr_cmd: ipr command struct + * + * This function copies the sense buffer into the scsi_cmd + * struct and pushes the scsi_done function. + * + * Return value: + * nothing + **/ +static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_resource_entry *res = scsi_cmd->device->hostdata; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { + scsi_cmd->result |= (DID_ERROR << 16); + ipr_sdev_err(scsi_cmd->device, + "Request Sense failed with IOASC: 0x%08X\n", ioasc); + } else { + memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE); + } + + if (res) + res->needs_sync_complete = 1; + ipr_unmap_sglist(ioa_cfg, ipr_cmd); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + scsi_cmd->scsi_done(scsi_cmd); +} + +/** + * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioarcb *ioarcb; + struct ipr_ioasa *ioasa; + + ioarcb = &ipr_cmd->ioarcb; + ioasa = &ipr_cmd->ioasa; + + memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); + ioarcb->write_data_transfer_length = 0; + ioarcb->read_data_transfer_length = 0; + ioarcb->write_ioadl_len = 0; + ioarcb->read_ioadl_len = 0; + ioasa->ioasc = 0; + ioasa->residual_data_len = 0; +} + +/** + * ipr_erp_request_sense - Send request sense to a device + * @ipr_cmd: ipr command struct + * + * This function sends a request sense to a device as a result + * of a check condition. + * + * Return value: + * nothing + **/ +static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + + ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); + + cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; + cmd_pkt->cdb[0] = REQUEST_SENSE; + cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; + cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; + cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; + cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); + + ipr_cmd->ioadl[0].flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE); + ipr_cmd->ioadl[0].address = + cpu_to_be32(ipr_cmd->sense_buffer_dma); + + ipr_cmd->ioarcb.read_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ipr_cmd->ioarcb.read_data_transfer_length = + cpu_to_be32(SCSI_SENSE_BUFFERSIZE); + + ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, + IPR_REQUEST_SENSE_TIMEOUT * 2); +} + +/** + * ipr_erp_cancel_all - Send cancel all to a device + * @ipr_cmd: ipr command struct + * + * This function sends a cancel all to a device to clear the + * queue. If we are running TCQ on the device, QERR is set to 1, + * which means all outstanding ops have been dropped on the floor. + * Cancel all will return them to us. + * + * Return value: + * nothing + **/ +static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_resource_entry *res = scsi_cmd->device->hostdata; + struct ipr_cmd_pkt *cmd_pkt; + + res->in_erp = 1; + + ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); + + cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; + + ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout, + IPR_CANCEL_ALL_TIMEOUT); +} + +/** + * ipr_dump_ioasa - Dump contents of IOASA + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler when ops + * fail. It will log the IOASA if appropriate. Only called + * for GPDD ops. + * + * Return value: + * none + **/ +static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + int i; + u16 data_len; + u32 ioasc; + struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + u32 *ioasa_data = (u32 *)ioasa; + int error_index; + + ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; + + if (0 == ioasc) + return; + + if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) + return; + + error_index = ipr_get_error(ioasc); + + if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { + /* Don't log an error if the IOA already logged one */ + if (ioasa->ilid != 0) + return; + + if (ipr_error_table[error_index].log_ioasa == 0) + return; + } + + ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n", + ipr_error_table[error_index].error); + + if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) && + (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) { + ipr_sdev_err(ipr_cmd->scsi_cmd->device, + "Device End state: %s Phase: %s\n", + ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state], + ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]); + } + + if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len)) + data_len = sizeof(struct ipr_ioasa); + else + data_len = be16_to_cpu(ioasa->ret_stat_len); + + ipr_err("IOASA Dump:\n"); + + for (i = 0; i < data_len / 4; i += 4) { + ipr_err("%08X: %08X %08X %08X %08X\n", i*4, + be32_to_cpu(ioasa_data[i]), + be32_to_cpu(ioasa_data[i+1]), + be32_to_cpu(ioasa_data[i+2]), + be32_to_cpu(ioasa_data[i+3])); + } +} + +/** + * ipr_gen_sense - Generate SCSI sense data from an IOASA + * @ioasa: IOASA + * @sense_buf: sense data buffer + * + * Return value: + * none + **/ +static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) +{ + u32 failing_lba; + u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; + struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; + struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + u32 ioasc = be32_to_cpu(ioasa->ioasc); + + memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); + + if (ioasc >= IPR_FIRST_DRIVER_IOASC) + return; + + ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; + + if (ipr_is_vset_device(res) && + ioasc == IPR_IOASC_MED_DO_NOT_REALLOC && + ioasa->u.vset.failing_lba_hi != 0) { + sense_buf[0] = 0x72; + sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc); + sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc); + sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc); + + sense_buf[7] = 12; + sense_buf[8] = 0; + sense_buf[9] = 0x0A; + sense_buf[10] = 0x80; + + failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); + + sense_buf[12] = (failing_lba & 0xff000000) >> 24; + sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[15] = failing_lba & 0x000000ff; + + failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); + + sense_buf[16] = (failing_lba & 0xff000000) >> 24; + sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[19] = failing_lba & 0x000000ff; + } else { + sense_buf[0] = 0x70; + sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc); + sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc); + sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc); + + /* Illegal request */ + if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && + (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) { + sense_buf[7] = 10; /* additional length */ + + /* IOARCB was in error */ + if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24) + sense_buf[15] = 0xC0; + else /* Parameter data was invalid */ + sense_buf[15] = 0x80; + + sense_buf[16] = + ((IPR_FIELD_POINTER_MASK & + be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff; + sense_buf[17] = + (IPR_FIELD_POINTER_MASK & + be32_to_cpu(ioasa->ioasc_specific)) & 0xff; + } else { + if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { + if (ipr_is_vset_device(res)) + failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); + else + failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); + + sense_buf[0] |= 0x80; /* Or in the Valid bit */ + sense_buf[3] = (failing_lba & 0xff000000) >> 24; + sense_buf[4] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[5] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[6] = failing_lba & 0x000000ff; + } + + sense_buf[7] = 6; /* additional length */ + } + } +} + +/** + * ipr_erp_start - Process an error response for a SCSI op + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * This function determines whether or not to initiate ERP + * on the affected device. + * + * Return value: + * nothing + **/ +static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_resource_entry *res = scsi_cmd->device->hostdata; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + if (!res) { + ipr_scsi_eh_done(ipr_cmd); + return; + } + + if (ipr_is_gscsi(res)) + ipr_dump_ioasa(ioa_cfg, ipr_cmd); + else + ipr_gen_sense(ipr_cmd); + + switch (ioasc & IPR_IOASC_IOASC_MASK) { + case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: + scsi_cmd->result |= (DID_ERROR << 16); + break; + case IPR_IOASC_IR_RESOURCE_HANDLE: + scsi_cmd->result |= (DID_NO_CONNECT << 16); + break; + case IPR_IOASC_HW_SEL_TIMEOUT: + scsi_cmd->result |= (DID_NO_CONNECT << 16); + res->needs_sync_complete = 1; + break; + case IPR_IOASC_SYNC_REQUIRED: + if (!res->in_erp) + res->needs_sync_complete = 1; + scsi_cmd->result |= (DID_IMM_RETRY << 16); + break; + case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ + scsi_cmd->result |= (DID_PASSTHROUGH << 16); + break; + case IPR_IOASC_BUS_WAS_RESET: + case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: + /* + * Report the bus reset and ask for a retry. The device + * will give CC/UA the next command. + */ + if (!res->resetting_device) + scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); + scsi_cmd->result |= (DID_ERROR << 16); + res->needs_sync_complete = 1; + break; + case IPR_IOASC_HW_DEV_BUS_STATUS: + scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); + if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { + ipr_erp_cancel_all(ipr_cmd); + return; + } + break; + case IPR_IOASC_NR_INIT_CMD_REQUIRED: + break; + default: + scsi_cmd->result |= (DID_ERROR << 16); + if (!ipr_is_vset_device(res)) + res->needs_sync_complete = 1; + break; + } + + ipr_unmap_sglist(ioa_cfg, ipr_cmd); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + scsi_cmd->scsi_done(scsi_cmd); +} + +/** + * ipr_scsi_done - mid-layer done function + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler for + * ops generated by the SCSI mid-layer + * + * Return value: + * none + **/ +static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len); + + if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { + ipr_unmap_sglist(ioa_cfg, ipr_cmd); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + scsi_cmd->scsi_done(scsi_cmd); + } else + ipr_erp_start(ioa_cfg, ipr_cmd); +} + +/** + * ipr_save_ioafp_mode_select - Save adapters mode select data + * @ioa_cfg: ioa config struct + * @scsi_cmd: scsi command struct + * + * This function saves mode select data for the adapter to + * use following an adapter reset. + * + * Return value: + * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure + **/ +static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg, + struct scsi_cmnd *scsi_cmd) +{ + if (!ioa_cfg->saved_mode_pages) { + ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages), + GFP_ATOMIC); + if (!ioa_cfg->saved_mode_pages) { + dev_err(&ioa_cfg->pdev->dev, + "IOA mode select buffer allocation failed\n"); + return SCSI_MLQUEUE_HOST_BUSY; + } + } + + memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]); + ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4]; + return 0; +} + +/** + * ipr_queuecommand - Queue a mid-layer request + * @scsi_cmd: scsi command struct + * @done: done function + * + * This function queues a request generated by the mid-layer. + * + * Return value: + * 0 on success + * SCSI_MLQUEUE_DEVICE_BUSY if device is busy + * SCSI_MLQUEUE_HOST_BUSY if host is busy + **/ +static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, + void (*done) (struct scsi_cmnd *)) +{ + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; + struct ipr_ioarcb *ioarcb; + struct ipr_cmnd *ipr_cmd; + int rc = 0; + + scsi_cmd->scsi_done = done; + ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; + res = scsi_cmd->device->hostdata; + scsi_cmd->result = (DID_OK << 16); + + /* + * We are currently blocking all devices due to a host reset + * We have told the host to stop giving us new requests, but + * ERP ops don't count. FIXME + */ + if (unlikely(!ioa_cfg->allow_cmds)) + return SCSI_MLQUEUE_HOST_BUSY; + + /* + * FIXME - Create scsi_set_host_offline interface + * and the ioa_is_dead check can be removed + */ + if (unlikely(ioa_cfg->ioa_is_dead || !res)) { + memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + scsi_cmd->result = (DID_NO_CONNECT << 16); + scsi_cmd->scsi_done(scsi_cmd); + return 0; + } + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ioarcb = &ipr_cmd->ioarcb; + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + + memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); + ipr_cmd->scsi_cmd = scsi_cmd; + ioarcb->res_handle = res->cfgte.res_handle; + ipr_cmd->done = ipr_scsi_done; + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); + + if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { + if (scsi_cmd->underflow == 0) + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; + + if (res->needs_sync_complete) { + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; + res->needs_sync_complete = 0; + } + + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; + ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); + } + + if (!ipr_is_gscsi(res) && scsi_cmd->cmnd[0] >= 0xC0) + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + + if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT) + rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd); + + if (likely(rc == 0)) + rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); + + if (likely(rc == 0)) { + mb(); + writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), + ioa_cfg->regs.ioarrin_reg); + } else { + list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + return SCSI_MLQUEUE_HOST_BUSY; + } + + return 0; +} + +/** + * ipr_info - Get information about the card/driver + * @scsi_host: scsi host struct + * + * Return value: + * pointer to buffer with description string + **/ +static const char * ipr_ioa_info(struct Scsi_Host *host) +{ + static char buffer[512]; + struct ipr_ioa_cfg *ioa_cfg; + unsigned long lock_flags = 0; + + ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; + + spin_lock_irqsave(host->host_lock, lock_flags); + sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); + spin_unlock_irqrestore(host->host_lock, lock_flags); + + return buffer; +} + +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "IPR", + .info = ipr_ioa_info, + .queuecommand = ipr_queuecommand, + .eh_abort_handler = ipr_eh_abort, + .eh_device_reset_handler = ipr_eh_dev_reset, + .eh_host_reset_handler = ipr_eh_host_reset, + .slave_alloc = ipr_slave_alloc, + .slave_configure = ipr_slave_configure, + .slave_destroy = ipr_slave_destroy, + .bios_param = ipr_biosparam, + .can_queue = IPR_MAX_COMMANDS, + .this_id = -1, + .sg_tablesize = IPR_MAX_SGLIST, + .max_sectors = IPR_MAX_SECTORS, + .cmd_per_lun = IPR_MAX_CMD_PER_LUN, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = ipr_ioa_attrs, + .sdev_attrs = ipr_dev_attrs, + .proc_name = IPR_NAME +}; + +#ifdef CONFIG_PPC_PSERIES +static const u16 ipr_blocked_processors[] = { + PV_NORTHSTAR, + PV_PULSAR, + PV_POWER4, + PV_ICESTAR, + PV_SSTAR, + PV_POWER4p, + PV_630, + PV_630p +}; + +/** + * ipr_invalid_adapter - Determine if this adapter is supported on this hardware + * @ioa_cfg: ioa cfg struct + * + * Adapters that use Gemstone revision < 3.1 do not work reliably on + * certain pSeries hardware. This function determines if the given + * adapter is in one of these confgurations or not. + * + * Return value: + * 1 if adapter is not supported / 0 if adapter is supported + **/ +static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) +{ + u8 rev_id; + int i; + + if (ioa_cfg->type == 0x5702) { + if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID, + &rev_id) == PCIBIOS_SUCCESSFUL) { + if (rev_id < 4) { + for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){ + if (__is_processor(ipr_blocked_processors[i])) + return 1; + } + } + } + } + return 0; +} +#else +#define ipr_invalid_adapter(ioa_cfg) 0 +#endif + +/** + * ipr_ioa_bringdown_done - IOA bring down completion. + * @ipr_cmd: ipr command struct + * + * This function processes the completion of an adapter bring down. + * It wakes any reset sleepers. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ioa_cfg->in_reset_reload = 0; + ioa_cfg->reset_retries = 0; + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + wake_up_all(&ioa_cfg->reset_wait_q); + + spin_unlock_irq(ioa_cfg->host->host_lock); + scsi_unblock_requests(ioa_cfg->host); + spin_lock_irq(ioa_cfg->host->host_lock); + LEAVE; + + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioa_reset_done - IOA reset completion. + * @ipr_cmd: ipr command struct + * + * This function processes the completion of an adapter reset. + * It schedules any necessary mid-layer add/removes and + * wakes any reset sleepers. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_resource_entry *res; + struct ipr_hostrcb *hostrcb, *temp; + int i = 0; + + ENTER; + ioa_cfg->in_reset_reload = 0; + ioa_cfg->allow_cmds = 1; + ioa_cfg->reset_cmd = NULL; + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) { + ipr_trace; + schedule_work(&ioa_cfg->work_q); + break; + } + } + + list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) { + list_del(&hostrcb->queue); + if (i++ < IPR_NUM_LOG_HCAMS) + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); + else + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); + } + + dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); + + ioa_cfg->reset_retries = 0; + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + wake_up_all(&ioa_cfg->reset_wait_q); + + spin_unlock_irq(ioa_cfg->host->host_lock); + scsi_unblock_requests(ioa_cfg->host); + spin_lock_irq(ioa_cfg->host->host_lock); + + if (!ioa_cfg->allow_cmds) + scsi_block_requests(ioa_cfg->host); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer + * @supported_dev: supported device struct + * @vpids: vendor product id struct + * + * Return value: + * none + **/ +static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, + struct ipr_std_inq_vpids *vpids) +{ + memset(supported_dev, 0, sizeof(struct ipr_supported_device)); + memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); + supported_dev->num_records = 1; + supported_dev->data_length = + cpu_to_be16(sizeof(struct ipr_supported_device)); + supported_dev->reserved = 0; +} + +/** + * ipr_set_supported_devs - Send Set Supported Devices for a device + * @ipr_cmd: ipr command struct + * + * This function send a Set Supported Devices to the adapter + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_resource_entry *res = ipr_cmd->u.res; + + ipr_cmd->job_step = ipr_ioa_reset_done; + + list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { + if (!ipr_is_af_dasd_device(res)) + continue; + + ipr_cmd->u.res = res; + ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids); + + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + + ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; + ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; + + ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | + sizeof(struct ipr_supported_device)); + ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma + + offsetof(struct ipr_misc_cbs, supp_dev)); + ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ioarcb->write_data_transfer_length = + cpu_to_be32(sizeof(struct ipr_supported_device)); + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, + IPR_SET_SUP_DEVICE_TIMEOUT); + + ipr_cmd->job_step = ipr_set_supported_devs; + return IPR_RC_JOB_RETURN; + } + + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_get_mode_page - Locate specified mode page + * @mode_pages: mode page buffer + * @page_code: page code to find + * @len: minimum required length for mode page + * + * Return value: + * pointer to mode page / NULL on failure + **/ +static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages, + u32 page_code, u32 len) +{ + struct ipr_mode_page_hdr *mode_hdr; + u32 page_length; + u32 length; + + if (!mode_pages || (mode_pages->hdr.length == 0)) + return NULL; + + length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; + mode_hdr = (struct ipr_mode_page_hdr *) + (mode_pages->data + mode_pages->hdr.block_desc_len); + + while (length) { + if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) { + if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) + return mode_hdr; + break; + } else { + page_length = (sizeof(struct ipr_mode_page_hdr) + + mode_hdr->page_length); + length -= page_length; + mode_hdr = (struct ipr_mode_page_hdr *) + ((unsigned long)mode_hdr + page_length); + } + } + return NULL; +} + +/** + * ipr_check_term_power - Check for term power errors + * @ioa_cfg: ioa config struct + * @mode_pages: IOAFP mode pages buffer + * + * Check the IOAFP's mode page 28 for term power errors + * + * Return value: + * nothing + **/ +static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_mode_pages *mode_pages) +{ + int i; + int entry_length; + struct ipr_dev_bus_entry *bus; + struct ipr_mode_page28 *mode_page; + + mode_page = ipr_get_mode_page(mode_pages, 0x28, + sizeof(struct ipr_mode_page28)); + + entry_length = mode_page->entry_length; + + bus = mode_page->bus; + + for (i = 0; i < mode_page->num_entries; i++) { + if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { + dev_err(&ioa_cfg->pdev->dev, + "Term power is absent on scsi bus %d\n", + bus->res_addr.bus); + } + + bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length); + } +} + +/** + * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table + * @ioa_cfg: ioa config struct + * + * Looks through the config table checking for SES devices. If + * the SES device is in the SES table indicating a maximum SCSI + * bus speed, the speed is limited for the bus. + * + * Return value: + * none + **/ +static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) +{ + u32 max_xfer_rate; + int i; + + for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { + max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, + ioa_cfg->bus_attr[i].bus_width); + + if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) + ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; + } +} + +/** + * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28 + * @ioa_cfg: ioa config struct + * @mode_pages: mode page 28 buffer + * + * Updates mode page 28 based on driver configuration + * + * Return value: + * none + **/ +static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_mode_pages *mode_pages) +{ + int i, entry_length; + struct ipr_dev_bus_entry *bus; + struct ipr_bus_attributes *bus_attr; + struct ipr_mode_page28 *mode_page; + + mode_page = ipr_get_mode_page(mode_pages, 0x28, + sizeof(struct ipr_mode_page28)); + + entry_length = mode_page->entry_length; + + /* Loop for each device bus entry */ + for (i = 0, bus = mode_page->bus; + i < mode_page->num_entries; + i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) { + if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { + dev_err(&ioa_cfg->pdev->dev, + "Invalid resource address reported: 0x%08X\n", + IPR_GET_PHYS_LOC(bus->res_addr)); + continue; + } + + bus_attr = &ioa_cfg->bus_attr[i]; + bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; + bus->bus_width = bus_attr->bus_width; + bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); + bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; + if (bus_attr->qas_enabled) + bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; + else + bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; + } +} + +/** + * ipr_build_mode_select - Build a mode select command + * @ipr_cmd: ipr command struct + * @res_handle: resource handle to send command to + * @parm: Byte 2 of Mode Sense command + * @dma_addr: DMA buffer address + * @xfer_len: data transfer length + * + * Return value: + * none + **/ +static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, + u32 res_handle, u8 parm, u32 dma_addr, + u8 xfer_len) +{ + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + + ioarcb->res_handle = res_handle; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; + ioarcb->cmd_pkt.cdb[1] = parm; + ioarcb->cmd_pkt.cdb[4] = xfer_len; + + ioadl->flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len); + ioadl->address = cpu_to_be32(dma_addr); + ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len); +} + +/** + * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA + * @ipr_cmd: ipr command struct + * + * This function sets up the SCSI bus attributes and sends + * a Mode Select for Page 28 to activate them. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; + int length; + + ENTER; + if (ioa_cfg->saved_mode_pages) { + memcpy(mode_pages, ioa_cfg->saved_mode_pages, + ioa_cfg->saved_mode_page_len); + length = ioa_cfg->saved_mode_page_len; + } else { + ipr_scsi_bus_speed_limit(ioa_cfg); + ipr_check_term_power(ioa_cfg, mode_pages); + ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); + length = mode_pages->hdr.length + 1; + mode_pages->hdr.length = 0; + } + + ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), + length); + + ipr_cmd->job_step = ipr_set_supported_devs; + ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, + struct ipr_resource_entry, queue); + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_build_mode_sense - Builds a mode sense command + * @ipr_cmd: ipr command struct + * @res: resource entry struct + * @parm: Byte 2 of mode sense command + * @dma_addr: DMA address of mode sense buffer + * @xfer_len: Size of DMA buffer + * + * Return value: + * none + **/ +static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, + u32 res_handle, + u8 parm, u32 dma_addr, u8 xfer_len) +{ + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + + ioarcb->res_handle = res_handle; + ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; + ioarcb->cmd_pkt.cdb[2] = parm; + ioarcb->cmd_pkt.cdb[4] = xfer_len; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + + ioadl->flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len); + ioadl->address = cpu_to_be32(dma_addr); + ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); +} + +/** + * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA + * @ipr_cmd: ipr command struct + * + * This function send a Page 28 mode sense to the IOA to + * retrieve SCSI bus attributes. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), + 0x28, ioa_cfg->vpd_cbs_dma + + offsetof(struct ipr_misc_cbs, mode_pages), + sizeof(struct ipr_mode_pages)); + + ipr_cmd->job_step = ipr_ioafp_mode_select_page28; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_init_res_table - Initialize the resource table + * @ipr_cmd: ipr command struct + * + * This function looks through the existing resource table, comparing + * it with the config table. This function will take care of old/new + * devices and schedule adding/removing them from the mid-layer + * as appropriate. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_resource_entry *res, *temp; + struct ipr_config_table_entry *cfgte; + int found, i; + LIST_HEAD(old_res); + + ENTER; + if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ) + dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); + + list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) + list_move_tail(&res->queue, &old_res); + + for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) { + cfgte = &ioa_cfg->cfg_table->dev[i]; + found = 0; + + list_for_each_entry_safe(res, temp, &old_res, queue) { + if (!memcmp(&res->cfgte.res_addr, + &cfgte->res_addr, sizeof(cfgte->res_addr))) { + list_move_tail(&res->queue, &ioa_cfg->used_res_q); + found = 1; + break; + } + } + + if (!found) { + if (list_empty(&ioa_cfg->free_res_q)) { + dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); + break; + } + + found = 1; + res = list_entry(ioa_cfg->free_res_q.next, + struct ipr_resource_entry, queue); + list_move_tail(&res->queue, &ioa_cfg->used_res_q); + ipr_init_res_entry(res); + res->add_to_ml = 1; + } + + if (found) + memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); + } + + list_for_each_entry_safe(res, temp, &old_res, queue) { + if (res->sdev) { + res->del_from_ml = 1; + list_move_tail(&res->queue, &ioa_cfg->used_res_q); + } else { + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + } + } + + ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; + + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Query IOA Configuration command + * to the adapter to retrieve the IOA configuration table. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; + + ENTER; + dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", + ucode_vpd->major_release, ucode_vpd->card_type, + ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + + ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; + ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; + + ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ioarcb->read_data_transfer_length = + cpu_to_be32(sizeof(struct ipr_config_table)); + + ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma); + ioadl->flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table)); + + ipr_cmd->job_step = ipr_init_res_table; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_inquiry - Send an Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This utility function sends an inquiry to the adapter. + * + * Return value: + * none + **/ +static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, + u32 dma_addr, u8 xfer_len) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + + ENTER; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + + ioarcb->cmd_pkt.cdb[0] = INQUIRY; + ioarcb->cmd_pkt.cdb[1] = flags; + ioarcb->cmd_pkt.cdb[2] = page; + ioarcb->cmd_pkt.cdb[4] = xfer_len; + + ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); + + ioadl->address = cpu_to_be32(dma_addr); + ioadl->flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len); + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + LEAVE; +} + +/** + * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Page 3 inquiry to the adapter + * to retrieve software VPD information. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + char type[5]; + + ENTER; + + /* Grab the type out of the VPD and store it away */ + memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); + type[4] = '\0'; + ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); + + ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; + + ipr_ioafp_inquiry(ipr_cmd, 1, 3, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), + sizeof(struct ipr_inquiry_page3)); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a standard inquiry to the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ipr_cmd->job_step = ipr_ioafp_page3_inquiry; + + ipr_ioafp_inquiry(ipr_cmd, 0, 0, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), + sizeof(struct ipr_ioa_vpd)); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ. + * @ipr_cmd: ipr command struct + * + * This function send an Identify Host Request Response Queue + * command to establish the HRRQ with the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + + ENTER; + dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); + + ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ioarcb->cmd_pkt.cdb[2] = + ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff; + ioarcb->cmd_pkt.cdb[3] = + ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff; + ioarcb->cmd_pkt.cdb[4] = + ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[5] = + ((u32) ioa_cfg->host_rrq_dma) & 0xff; + ioarcb->cmd_pkt.cdb[7] = + ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = + (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff; + + ipr_cmd->job_step = ipr_ioafp_std_inquiry; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_timer_done - Adapter reset timer function + * @ipr_cmd: ipr command struct + * + * Description: This function is used in adapter reset processing + * for timing events. If the reset_cmd pointer in the IOA + * config struct is not this adapter's we are doing nested + * resets and fail_all_ops will take care of freeing the + * command block. + * + * Return value: + * none + **/ +static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->reset_cmd == ipr_cmd) { + list_del(&ipr_cmd->queue); + ipr_cmd->done(ipr_cmd); + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); +} + +/** + * ipr_reset_start_timer - Start a timer for adapter reset job + * @ipr_cmd: ipr command struct + * @timeout: timeout value + * + * Description: This function is used in adapter reset processing + * for timing events. If the reset_cmd pointer in the IOA + * config struct is not this adapter's we are doing nested + * resets and fail_all_ops will take care of freeing the + * command block. + * + * Return value: + * none + **/ +static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, + unsigned long timeout) +{ + list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q); + ipr_cmd->done = ipr_reset_ioa_job; + + ipr_cmd->timer.data = (unsigned long) ipr_cmd; + ipr_cmd->timer.expires = jiffies + timeout; + ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done; + add_timer(&ipr_cmd->timer); +} + +/** + * ipr_init_ioa_mem - Initialize ioa_cfg control block + * @ioa_cfg: ioa cfg struct + * + * Return value: + * nothing + **/ +static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) +{ + memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS); + + /* Initialize Host RRQ pointers */ + ioa_cfg->hrrq_start = ioa_cfg->host_rrq; + ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1]; + ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start; + ioa_cfg->toggle_bit = 1; + + /* Zero out config table */ + memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table)); +} + +/** + * ipr_reset_enable_ioa - Enable the IOA following a reset. + * @ipr_cmd: ipr command struct + * + * This function reinitializes some control blocks and + * enables destructive diagnostics on the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + volatile u32 int_reg; + + ENTER; + ipr_cmd->job_step = ipr_ioafp_indentify_hrrq; + ipr_init_ioa_mem(ioa_cfg); + + ioa_cfg->allow_interrupts = 1; + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + + if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { + writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), + ioa_cfg->regs.clr_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + return IPR_RC_JOB_CONTINUE; + } + + /* Enable destructive diagnostics on IOA */ + writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg); + + writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + + dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); + + ipr_cmd->timer.data = (unsigned long) ipr_cmd; + ipr_cmd->timer.expires = jiffies + IPR_OPERATIONAL_TIMEOUT; + ipr_cmd->timer.function = (void (*)(unsigned long))ipr_timeout; + add_timer(&ipr_cmd->timer); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_wait_for_dump - Wait for a dump to timeout. + * @ipr_cmd: ipr command struct + * + * This function is invoked when an adapter dump has run out + * of processing time. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + if (ioa_cfg->sdt_state == GET_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + + ipr_cmd->job_step = ipr_reset_alert; + + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_unit_check_no_data - Log a unit check/no data error log + * @ioa_cfg: ioa config struct + * + * Logs an error indicating the adapter unit checked, but for some + * reason, we were unable to fetch the unit check buffer. + * + * Return value: + * nothing + **/ +static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) +{ + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); +} + +/** + * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA + * @ioa_cfg: ioa config struct + * + * Fetches the unit check buffer from the adapter by clocking the data + * through the mailbox register. + * + * Return value: + * nothing + **/ +static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) +{ + unsigned long mailbox; + struct ipr_hostrcb *hostrcb; + struct ipr_uc_sdt sdt; + int rc, length; + + mailbox = readl(ioa_cfg->ioa_mailbox); + + if (!ipr_sdt_is_fmt2(mailbox)) { + ipr_unit_check_no_data(ioa_cfg); + return; + } + + memset(&sdt, 0, sizeof(struct ipr_uc_sdt)); + rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (u32 *) &sdt, + (sizeof(struct ipr_uc_sdt)) / sizeof(u32)); + + if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) || + !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) { + ipr_unit_check_no_data(ioa_cfg); + return; + } + + /* Find length of the first sdt entry (UC buffer) */ + length = (be32_to_cpu(sdt.entry[0].end_offset) - + be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK; + + hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, + struct ipr_hostrcb, queue); + list_del(&hostrcb->queue); + memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); + + rc = ipr_get_ldump_data_section(ioa_cfg, + be32_to_cpu(sdt.entry[0].bar_str_offset), + (u32 *)&hostrcb->hcam, + min(length, (int)sizeof(hostrcb->hcam)) / sizeof(u32)); + + if (!rc) + ipr_handle_log_data(ioa_cfg, hostrcb); + else + ipr_unit_check_no_data(ioa_cfg); + + list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); +} + +/** + * ipr_reset_restore_cfg_space - Restore PCI config space. + * @ipr_cmd: ipr command struct + * + * Description: This function restores the saved PCI config space of + * the adapter, fails all outstanding ops back to the callers, and + * fetches the dump/unit check if applicable to this reset. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc; + + ENTER; + rc = pci_restore_state(ioa_cfg->pdev, ioa_cfg->pci_cfg_buf); + + if (rc != PCIBIOS_SUCCESSFUL) { + ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + return IPR_RC_JOB_CONTINUE; + } + + if (ipr_set_pcix_cmd_reg(ioa_cfg)) { + ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + return IPR_RC_JOB_CONTINUE; + } + + ipr_fail_all_ops(ioa_cfg); + + if (ioa_cfg->ioa_unit_checked) { + ioa_cfg->ioa_unit_checked = 0; + ipr_get_unit_check_buffer(ioa_cfg); + ipr_cmd->job_step = ipr_reset_alert; + ipr_reset_start_timer(ipr_cmd, 0); + return IPR_RC_JOB_RETURN; + } + + if (ioa_cfg->in_ioa_bringdown) { + ipr_cmd->job_step = ipr_ioa_bringdown_done; + } else { + ipr_cmd->job_step = ipr_reset_enable_ioa; + + if (GET_DUMP == ioa_cfg->sdt_state) { + ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT); + ipr_cmd->job_step = ipr_reset_wait_for_dump; + schedule_work(&ioa_cfg->work_q); + return IPR_RC_JOB_RETURN; + } + } + + ENTER; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_start_bist - Run BIST on the adapter. + * @ipr_cmd: ipr command struct + * + * Description: This function runs BIST on the adapter, then delays 2 seconds. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc; + + ENTER; + rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); + + if (rc != PCIBIOS_SUCCESSFUL) { + ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + rc = IPR_RC_JOB_CONTINUE; + } else { + ipr_cmd->job_step = ipr_reset_restore_cfg_space; + ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); + rc = IPR_RC_JOB_RETURN; + } + + LEAVE; + return rc; +} + +/** + * ipr_reset_allowed - Query whether or not IOA can be reset + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 if reset not allowed / non-zero if reset is allowed + **/ +static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) +{ + volatile u32 temp_reg; + + temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0); +} + +/** + * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA. + * @ipr_cmd: ipr command struct + * + * Description: This function waits for adapter permission to run BIST, + * then runs BIST. If the adapter does not give permission after a + * reasonable time, we will reset the adapter anyway. The impact of + * resetting the adapter without warning the adapter is the risk of + * losing the persistent error log on the adapter. If the adapter is + * reset while it is writing to the flash on the adapter, the flash + * segment will have bad ECC and be zeroed. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc = IPR_RC_JOB_RETURN; + + if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { + ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; + ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); + } else { + ipr_cmd->job_step = ipr_reset_start_bist; + rc = IPR_RC_JOB_CONTINUE; + } + + return rc; +} + +/** + * ipr_reset_alert_part2 - Alert the adapter of a pending reset + * @ipr_cmd: ipr command struct + * + * Description: This function alerts the adapter that it will be reset. + * If memory space is not currently enabled, proceed directly + * to running BIST on the adapter. The timer must always be started + * so we guarantee we do not run BIST from ipr_isr. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + u16 cmd_reg; + int rc; + + ENTER; + rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); + + if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { + ipr_mask_and_clear_interrupts(ioa_cfg, ~0); + writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); + ipr_cmd->job_step = ipr_reset_wait_to_start_bist; + } else { + ipr_cmd->job_step = ipr_reset_start_bist; + } + + ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; + ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_ucode_download_done - Microcode download completion + * @ipr_cmd: ipr command struct + * + * Description: This function unmaps the microcode download buffer. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; + + pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist, + sglist->num_sg, DMA_TO_DEVICE); + + ipr_cmd->job_step = ipr_reset_alert; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_ucode_download - Download microcode to the adapter + * @ipr_cmd: ipr command struct + * + * Description: This function checks to see if it there is microcode + * to download to the adapter. If there is, a download is performed. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; + + ENTER; + ipr_cmd->job_step = ipr_reset_alert; + + if (!sglist) + return IPR_RC_JOB_CONTINUE; + + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; + ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; + ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; + ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; + ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; + + if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) { + dev_err(&ioa_cfg->pdev->dev, + "Failed to map microcode download buffer\n"); + return IPR_RC_JOB_CONTINUE; + } + + ipr_cmd->job_step = ipr_reset_ucode_download_done; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, + IPR_WRITE_BUFFER_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_shutdown_ioa - Shutdown the adapter + * @ipr_cmd: ipr command struct + * + * Description: This function issues an adapter shutdown of the + * specified type to the specified adapter as part of the + * adapter reset job. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; + unsigned long timeout; + int rc = IPR_RC_JOB_CONTINUE; + + ENTER; + if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) { + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; + ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; + + if (shutdown_type == IPR_SHUTDOWN_ABBREV) + timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; + else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) + timeout = IPR_INTERNAL_TIMEOUT; + else + timeout = IPR_SHUTDOWN_TIMEOUT; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); + + rc = IPR_RC_JOB_RETURN; + ipr_cmd->job_step = ipr_reset_ucode_download; + } else + ipr_cmd->job_step = ipr_reset_alert; + + LEAVE; + return rc; +} + +/** + * ipr_reset_ioa_job - Adapter reset job + * @ipr_cmd: ipr command struct + * + * Description: This function is the job router for the adapter reset job. + * + * Return value: + * none + **/ +static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) +{ + u32 rc, ioasc; + unsigned long scratch = ipr_cmd->u.scratch; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + do { + ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + if (ioa_cfg->reset_cmd != ipr_cmd) { + /* + * We are doing nested adapter resets and this is + * not the current reset job. + */ + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + return; + } + + if (IPR_IOASC_SENSE_KEY(ioasc)) { + dev_err(&ioa_cfg->pdev->dev, + "0x%02X failed with IOASC: 0x%08X\n", + ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); + + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + return; + } + + ipr_reinit_ipr_cmnd(ipr_cmd); + ipr_cmd->u.scratch = scratch; + rc = ipr_cmd->job_step(ipr_cmd); + } while(rc == IPR_RC_JOB_CONTINUE); +} + +/** + * _ipr_initiate_ioa_reset - Initiate an adapter reset + * @ioa_cfg: ioa config struct + * @job_step: first job step of reset job + * @shutdown_type: shutdown type + * + * Description: This function will initiate the reset of the given adapter + * starting at the selected job step. + * If the caller needs to wait on the completion of the reset, + * the caller must sleep on the reset_wait_q. + * + * Return value: + * none + **/ +static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, + int (*job_step) (struct ipr_cmnd *), + enum ipr_shutdown_type shutdown_type) +{ + struct ipr_cmnd *ipr_cmd; + + ioa_cfg->in_reset_reload = 1; + ioa_cfg->allow_cmds = 0; + scsi_block_requests(ioa_cfg->host); + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ioa_cfg->reset_cmd = ipr_cmd; + ipr_cmd->job_step = job_step; + ipr_cmd->u.shutdown_type = shutdown_type; + + ipr_reset_ioa_job(ipr_cmd); +} + +/** + * ipr_initiate_ioa_reset - Initiate an adapter reset + * @ioa_cfg: ioa config struct + * @shutdown_type: shutdown type + * + * Description: This function will initiate the reset of the given adapter. + * If the caller needs to wait on the completion of the reset, + * the caller must sleep on the reset_wait_q. + * + * Return value: + * none + **/ +static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, + enum ipr_shutdown_type shutdown_type) +{ + if (ioa_cfg->ioa_is_dead) + return; + + if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + + if (ioa_cfg->reset_retries++ > IPR_NUM_RESET_RELOAD_RETRIES) { + dev_err(&ioa_cfg->pdev->dev, + "IOA taken offline - error recovery failed\n"); + + ioa_cfg->reset_retries = 0; + ioa_cfg->ioa_is_dead = 1; + + if (ioa_cfg->in_ioa_bringdown) { + ioa_cfg->reset_cmd = NULL; + ioa_cfg->in_reset_reload = 0; + ipr_fail_all_ops(ioa_cfg); + wake_up_all(&ioa_cfg->reset_wait_q); + + spin_unlock_irq(ioa_cfg->host->host_lock); + scsi_unblock_requests(ioa_cfg->host); + spin_lock_irq(ioa_cfg->host->host_lock); + return; + } else { + ioa_cfg->in_ioa_bringdown = 1; + shutdown_type = IPR_SHUTDOWN_NONE; + } + } + + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, + shutdown_type); +} + +/** + * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) + * @ioa_cfg: ioa cfg struct + * + * Description: This is the second phase of adapter intialization + * This function takes care of initilizing the adapter to the point + * where it can accept new commands. + + * Return value: + * 0 on sucess / -EIO on failure + **/ +static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) +{ + int rc = 0; + unsigned long host_lock_flags = 0; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + + if (ioa_cfg->ioa_is_dead) { + rc = -EIO; + } else if (ipr_invalid_adapter(ioa_cfg)) { + if (!ipr_testmode) + rc = -EIO; + + dev_err(&ioa_cfg->pdev->dev, + "Adapter not supported in this hardware configuration.\n"); + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + + LEAVE; + return rc; +} + +/** + * ipr_free_cmd_blks - Frees command blocks allocated for an adapter + * @ioa_cfg: ioa config struct + * + * Return value: + * none + **/ +static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { + if (ioa_cfg->ipr_cmnd_list[i]) + pci_pool_free(ioa_cfg->ipr_cmd_pool, + ioa_cfg->ipr_cmnd_list[i], + ioa_cfg->ipr_cmnd_list_dma[i]); + + ioa_cfg->ipr_cmnd_list[i] = NULL; + } + + if (ioa_cfg->ipr_cmd_pool) + pci_pool_destroy (ioa_cfg->ipr_cmd_pool); + + ioa_cfg->ipr_cmd_pool = NULL; +} + +/** + * ipr_free_mem - Frees memory allocated for an adapter + * @ioa_cfg: ioa cfg struct + * + * Return value: + * nothing + **/ +static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + kfree(ioa_cfg->res_entries); + pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs), + ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); + ipr_free_cmd_blks(ioa_cfg); + pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, + ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); + pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table), + ioa_cfg->cfg_table, + ioa_cfg->cfg_table_dma); + + for (i = 0; i < IPR_NUM_HCAMS; i++) { + pci_free_consistent(ioa_cfg->pdev, + sizeof(struct ipr_hostrcb), + ioa_cfg->hostrcb[i], + ioa_cfg->hostrcb_dma[i]); + } + + ipr_free_dump(ioa_cfg); + kfree(ioa_cfg->saved_mode_pages); + kfree(ioa_cfg->trace); +} + +/** + * ipr_free_all_resources - Free all allocated resources for an adapter. + * @ipr_cmd: ipr command struct + * + * This function frees all allocated resources for the + * specified adapter. + * + * Return value: + * none + **/ +static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) +{ + ENTER; + free_irq(ioa_cfg->pdev->irq, ioa_cfg); + iounmap((void *) ioa_cfg->hdw_dma_regs); + release_mem_region(ioa_cfg->hdw_dma_regs_pci, + pci_resource_len(ioa_cfg->pdev, 0)); + ipr_free_mem(ioa_cfg); + scsi_host_put(ioa_cfg->host); + LEAVE; +} + +/** + * ipr_alloc_cmd_blks - Allocate command blocks for an adapter + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / -ENOMEM on allocation failure + **/ +static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioarcb *ioarcb; + u32 dma_addr; + int i; + + ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, + sizeof(struct ipr_cmnd), 8, 0); + + if (!ioa_cfg->ipr_cmd_pool) + return -ENOMEM; + + for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { + ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr); + + if (!ipr_cmd) { + ipr_free_cmd_blks(ioa_cfg); + return -ENOMEM; + } + + memset(ipr_cmd, 0, sizeof(*ipr_cmd)); + ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; + ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; + + ioarcb = &ipr_cmd->ioarcb; + ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr); + ioarcb->host_response_handle = cpu_to_be32(i << 2); + ioarcb->write_ioadl_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + ioarcb->ioasa_host_pci_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); + ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); + ipr_cmd->cmd_index = i; + ipr_cmd->ioa_cfg = ioa_cfg; + ipr_cmd->sense_buffer_dma = dma_addr + + offsetof(struct ipr_cmnd, sense_buffer); + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + } + + return 0; +} + +/** + * ipr_alloc_mem - Allocate memory for an adapter + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / non-zero for error + **/ +static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + ENTER; + ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) * + IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); + + if (!ioa_cfg->res_entries) + goto cleanup; + + memset(ioa_cfg->res_entries, 0, + sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS); + + for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) + list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); + + ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, + sizeof(struct ipr_misc_cbs), + &ioa_cfg->vpd_cbs_dma); + + if (!ioa_cfg->vpd_cbs) + goto cleanup; + + if (ipr_alloc_cmd_blks(ioa_cfg)) + goto cleanup; + + ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev, + sizeof(u32) * IPR_NUM_CMD_BLKS, + &ioa_cfg->host_rrq_dma); + + if (!ioa_cfg->host_rrq) + goto cleanup; + + ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev, + sizeof(struct ipr_config_table), + &ioa_cfg->cfg_table_dma); + + if (!ioa_cfg->cfg_table) + goto cleanup; + + for (i = 0; i < IPR_NUM_HCAMS; i++) { + ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev, + sizeof(struct ipr_hostrcb), + &ioa_cfg->hostrcb_dma[i]); + + if (!ioa_cfg->hostrcb[i]) + goto cleanup; + + memset(ioa_cfg->hostrcb[i], 0, sizeof(struct ipr_hostrcb)); + ioa_cfg->hostrcb[i]->hostrcb_dma = + ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); + list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); + } + + ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) * + IPR_NUM_TRACE_ENTRIES, GFP_KERNEL); + + if (!ioa_cfg->trace) + goto cleanup; + + memset(ioa_cfg->trace, 0, + sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES); + + LEAVE; + return 0; + +cleanup: + ipr_free_mem(ioa_cfg); + + LEAVE; + return -ENOMEM; +} + +/** + * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values + * @ioa_cfg: ioa config struct + * + * Return value: + * none + **/ +static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { + ioa_cfg->bus_attr[i].bus = i; + ioa_cfg->bus_attr[i].qas_enabled = 0; + ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; + if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds)) + ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; + else + ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; + } +} + +/** + * ipr_init_ioa_cfg - Initialize IOA config struct + * @ioa_cfg: ioa config struct + * @host: scsi host struct + * @pdev: PCI dev struct + * + * Return value: + * none + **/ +static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, + struct Scsi_Host *host, struct pci_dev *pdev) +{ + ioa_cfg->host = host; + ioa_cfg->pdev = pdev; + ioa_cfg->log_level = ipr_log_level; + sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); + sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); + sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL); + sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL); + sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); + sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); + sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); + sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); + + INIT_LIST_HEAD(&ioa_cfg->free_q); + INIT_LIST_HEAD(&ioa_cfg->pending_q); + INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); + INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); + INIT_LIST_HEAD(&ioa_cfg->free_res_q); + INIT_LIST_HEAD(&ioa_cfg->used_res_q); + INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); + init_waitqueue_head(&ioa_cfg->reset_wait_q); + ioa_cfg->sdt_state = INACTIVE; + + ipr_initialize_bus_attr(ioa_cfg); + + host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; + host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; + host->max_channel = IPR_MAX_BUS_TO_SCAN; + host->unique_id = host->host_no; + host->max_cmd_len = IPR_MAX_CDB_LEN; + pci_set_drvdata(pdev, ioa_cfg); + + memcpy(&ioa_cfg->regs, &ioa_cfg->chip_cfg->regs, sizeof(ioa_cfg->regs)); + + ioa_cfg->regs.set_interrupt_mask_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.clr_interrupt_mask_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.sense_interrupt_mask_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.clr_interrupt_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.sense_interrupt_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.ioarrin_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.sense_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.set_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.clr_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs; +} + +/** + * ipr_probe_ioa - Allocates memory and does first stage of initialization + * @pdev: PCI device struct + * @dev_id: PCI device id struct + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int __devinit ipr_probe_ioa(struct pci_dev *pdev, + const struct pci_device_id *dev_id) +{ + struct ipr_ioa_cfg *ioa_cfg; + struct Scsi_Host *host; + unsigned long ipr_regs, ipr_regs_pci; + u32 rc = PCIBIOS_SUCCESSFUL; + + ENTER; + + if ((rc = pci_enable_device(pdev))) { + dev_err(&pdev->dev, "Cannot enable adapter\n"); + return rc; + } + + dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); + + host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); + + if (!host) { + dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); + return -ENOMEM; + } + + ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; + memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); + + ioa_cfg->chip_cfg = (const struct ipr_chip_cfg_t *)dev_id->driver_data; + + ipr_regs_pci = pci_resource_start(pdev, 0); + + if (!request_mem_region(ipr_regs_pci, + pci_resource_len(pdev, 0), IPR_NAME)) { + dev_err(&pdev->dev, + "Couldn't register memory range of registers\n"); + scsi_host_put(host); + return -ENOMEM; + } + + ipr_regs = (unsigned long)ioremap(ipr_regs_pci, + pci_resource_len(pdev, 0)); + + if (!ipr_regs) { + dev_err(&pdev->dev, + "Couldn't map memory range of registers\n"); + release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0)); + scsi_host_put(host); + return -ENOMEM; + } + + ioa_cfg->hdw_dma_regs = ipr_regs; + ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; + ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; + + ipr_init_ioa_cfg(ioa_cfg, host, pdev); + + pci_set_master(pdev); + rc = pci_set_dma_mask(pdev, 0xffffffff); + + if (rc != PCIBIOS_SUCCESSFUL) { + dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); + rc = -EIO; + goto cleanup_nomem; + } + + rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, + ioa_cfg->chip_cfg->cache_line_size); + + if (rc != PCIBIOS_SUCCESSFUL) { + dev_err(&pdev->dev, "Write of cache line size failed\n"); + rc = -EIO; + goto cleanup_nomem; + } + + /* Save away PCI config space for use following IOA reset */ + rc = pci_save_state(pdev, ioa_cfg->pci_cfg_buf); + + if (rc != PCIBIOS_SUCCESSFUL) { + dev_err(&pdev->dev, "Failed to save PCI config space\n"); + rc = -EIO; + goto cleanup_nomem; + } + + if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) + goto cleanup_nomem; + + if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) + goto cleanup_nomem; + + if ((rc = ipr_alloc_mem(ioa_cfg))) + goto cleanup; + + ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); + rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg); + + if (rc) { + dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", + pdev->irq, rc); + goto cleanup_nolog; + } + + spin_lock(&ipr_driver_lock); + list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); + spin_unlock(&ipr_driver_lock); + + LEAVE; + return 0; + +cleanup: + dev_err(&pdev->dev, "Couldn't allocate enough memory for device driver!\n"); +cleanup_nolog: + ipr_free_mem(ioa_cfg); +cleanup_nomem: + iounmap((void *) ipr_regs); + release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0)); + scsi_host_put(host); + + return rc; +} + +/** + * ipr_scan_vsets - Scans for VSET devices + * @ioa_cfg: ioa config struct + * + * Description: Since the VSET resources do not follow SAM in that we can have + * sparse LUNs with no LUN 0, we have to scan for these ourselves. + * + * Return value: + * none + **/ +static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg) +{ + int target, lun; + + for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++) + for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ ) + scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun); +} + +/** + * ipr_initiate_ioa_bringdown - Bring down an adapter + * @ioa_cfg: ioa config struct + * @shutdown_type: shutdown type + * + * Description: This function will initiate bringing down the adapter. + * This consists of issuing an IOA shutdown to the adapter + * to flush the cache, and running BIST. + * If the caller needs to wait on the completion of the reset, + * the caller must sleep on the reset_wait_q. + * + * Return value: + * none + **/ +static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, + enum ipr_shutdown_type shutdown_type) +{ + ENTER; + if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + ioa_cfg->reset_retries = 0; + ioa_cfg->in_ioa_bringdown = 1; + ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); + LEAVE; +} + +/** + * __ipr_remove - Remove a single adapter + * @pdev: pci device struct + * + * Adapter hot plug remove entry point. + * + * Return value: + * none + **/ +static void __ipr_remove(struct pci_dev *pdev) +{ + unsigned long host_lock_flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + ENTER; + + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + + spin_lock(&ipr_driver_lock); + list_del(&ioa_cfg->queue); + spin_unlock(&ipr_driver_lock); + + if (ioa_cfg->sdt_state == ABORT_DUMP) + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + + ipr_free_all_resources(ioa_cfg); + + LEAVE; +} + +/** + * ipr_remove - IOA hot plug remove entry point + * @pdev: pci device struct + * + * Adapter hot plug remove entry point. + * + * Return value: + * none + **/ +static void ipr_remove(struct pci_dev *pdev) +{ + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + + ENTER; + + ioa_cfg->allow_cmds = 0; + flush_scheduled_work(); + ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj, + &ipr_trace_attr); + ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj, + &ipr_dump_attr); + scsi_remove_host(ioa_cfg->host); + + __ipr_remove(pdev); + + LEAVE; +} + +/** + * ipr_probe - Adapter hot plug add entry point + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int __devinit ipr_probe(struct pci_dev *pdev, + const struct pci_device_id *dev_id) +{ + struct ipr_ioa_cfg *ioa_cfg; + int rc; + + rc = ipr_probe_ioa(pdev, dev_id); + + if (rc) + return rc; + + ioa_cfg = pci_get_drvdata(pdev); + rc = ipr_probe_ioa_part2(ioa_cfg); + + if (rc) { + __ipr_remove(pdev); + return rc; + } + + rc = scsi_add_host(ioa_cfg->host, &pdev->dev); + + if (rc) { + __ipr_remove(pdev); + return rc; + } + + rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj, + &ipr_trace_attr); + + if (rc) { + scsi_remove_host(ioa_cfg->host); + __ipr_remove(pdev); + return rc; + } + + rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj, + &ipr_dump_attr); + + if (rc) { + ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj, + &ipr_trace_attr); + scsi_remove_host(ioa_cfg->host); + __ipr_remove(pdev); + return rc; + } + + scsi_scan_host(ioa_cfg->host); + ipr_scan_vsets(ioa_cfg); + scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN); + ioa_cfg->allow_ml_add_del = 1; + schedule_work(&ioa_cfg->work_q); + return 0; +} + +/** + * ipr_shutdown - Shutdown handler. + * @dev: device struct + * + * This function is invoked upon system shutdown/reboot. It will issue + * an adapter shutdown to the adapter to flush the write cache. + * + * Return value: + * none + **/ +static void ipr_shutdown(struct device *dev) +{ + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(to_pci_dev(dev)); + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); +} + +static struct pci_device_id ipr_pci_table[] __devinitdata = { + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, + { } +}; +MODULE_DEVICE_TABLE(pci, ipr_pci_table); + +static struct pci_driver ipr_driver = { + .name = IPR_NAME, + .id_table = ipr_pci_table, + .probe = ipr_probe, + .remove = ipr_remove, + .driver = { + .shutdown = ipr_shutdown, + }, +}; + +/** + * ipr_init - Module entry point + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int __init ipr_init(void) +{ + ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", + IPR_DRIVER_VERSION, IPR_DRIVER_DATE); + + pci_register_driver(&ipr_driver); + + return 0; +} + +/** + * ipr_exit - Module unload + * + * Module unload entry point. + * + * Return value: + * none + **/ +static void __exit ipr_exit(void) +{ + pci_unregister_driver(&ipr_driver); +} + +module_init(ipr_init); +module_exit(ipr_exit); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h new file mode 100644 index 000000000..468c80796 --- /dev/null +++ b/drivers/scsi/ipr.h @@ -0,0 +1,1252 @@ +/* + * ipr.h -- driver for IBM Power Linux RAID adapters + * + * Written By: Brian King, IBM Corporation + * + * Copyright (C) 2003, 2004 IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _IPR_H +#define _IPR_H + +#include +#include +#include +#include +#include +#ifdef CONFIG_KDB +#include +#endif + +/* + * Literals + */ +#define IPR_DRIVER_VERSION "2.0.7" +#define IPR_DRIVER_DATE "(May 21, 2004)" + +/* + * IPR_DBG_TRACE: Setting this to 1 will turn on some general function tracing + * resulting in a bunch of extra debugging printks to the console + * + * IPR_DEBUG: Setting this to 1 will turn on some error path tracing. + * Enables the ipr_trace macro. + */ +#ifdef IPR_DEBUG_ALL +#define IPR_DEBUG 1 +#define IPR_DBG_TRACE 1 +#else +#define IPR_DEBUG 0 +#define IPR_DBG_TRACE 0 +#endif + +/* + * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding + * ops per device for devices not running tagged command queuing. + * This can be adjusted at runtime through sysfs device attributes. + */ +#define IPR_MAX_CMD_PER_LUN 6 + +/* + * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of + * ops the mid-layer can send to the adapter. + */ +#define IPR_NUM_BASE_CMD_BLKS 100 + +#define IPR_SUBS_DEV_ID_2780 0x0264 +#define IPR_SUBS_DEV_ID_5702 0x0266 +#define IPR_SUBS_DEV_ID_5703 0x0278 +#define IPR_SUBS_DEV_ID_572E 0x02D3 +#define IPR_SUBS_DEV_ID_573D 0x02D4 + +#define IPR_NAME "ipr" + +/* + * Return codes + */ +#define IPR_RC_JOB_CONTINUE 1 +#define IPR_RC_JOB_RETURN 2 + +/* + * IOASCs + */ +#define IPR_IOASC_NR_INIT_CMD_REQUIRED 0x02040200 +#define IPR_IOASC_SYNC_REQUIRED 0x023f0000 +#define IPR_IOASC_MED_DO_NOT_REALLOC 0x03110C00 +#define IPR_IOASC_HW_SEL_TIMEOUT 0x04050000 +#define IPR_IOASC_HW_DEV_BUS_STATUS 0x04448500 +#define IPR_IOASC_IOASC_MASK 0xFFFFFF00 +#define IPR_IOASC_SCSI_STATUS_MASK 0x000000FF +#define IPR_IOASC_IR_RESOURCE_HANDLE 0x05250000 +#define IPR_IOASC_BUS_WAS_RESET 0x06290000 +#define IPR_IOASC_BUS_WAS_RESET_BY_OTHER 0x06298000 +#define IPR_IOASC_ABORTED_CMD_TERM_BY_HOST 0x0B5A0000 + +#define IPR_FIRST_DRIVER_IOASC 0x10000000 +#define IPR_IOASC_IOA_WAS_RESET 0x10000001 +#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002 + +#define IPR_NUM_LOG_HCAMS 2 +#define IPR_NUM_CFG_CHG_HCAMS 2 +#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) +#define IPR_MAX_NUM_TARGETS_PER_BUS 0x10 +#define IPR_MAX_NUM_LUNS_PER_TARGET 256 +#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8 +#define IPR_VSET_BUS 0xff +#define IPR_IOA_BUS 0xff +#define IPR_IOA_TARGET 0xff +#define IPR_IOA_LUN 0xff +#define IPR_MAX_NUM_BUSES 4 +#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES + +#define IPR_NUM_RESET_RELOAD_RETRIES 3 + +/* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */ +#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ + ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 3) + +#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS +#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ + IPR_NUM_INTERNAL_CMD_BLKS) + +#define IPR_MAX_PHYSICAL_DEVS 192 + +#define IPR_MAX_SGLIST 64 +#define IPR_MAX_SECTORS 512 +#define IPR_MAX_CDB_LEN 16 + +#define IPR_DEFAULT_BUS_WIDTH 16 +#define IPR_80MBs_SCSI_RATE ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) +#define IPR_U160_SCSI_RATE ((160 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) +#define IPR_U320_SCSI_RATE ((320 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) +#define IPR_MAX_SCSI_RATE(width) ((320 * 10) / ((width) / 8)) + +#define IPR_IOA_RES_HANDLE 0xffffffff +#define IPR_IOA_RES_ADDR 0x00ffffff + +/* + * Adapter Commands + */ +#define IPR_RESET_DEVICE 0xC3 +#define IPR_RESET_TYPE_SELECT 0x80 +#define IPR_LUN_RESET 0x40 +#define IPR_TARGET_RESET 0x20 +#define IPR_BUS_RESET 0x10 +#define IPR_ID_HOST_RR_Q 0xC4 +#define IPR_QUERY_IOA_CONFIG 0xC5 +#define IPR_ABORT_TASK 0xC7 +#define IPR_CANCEL_ALL_REQUESTS 0xCE +#define IPR_HOST_CONTROLLED_ASYNC 0xCF +#define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01 +#define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02 +#define IPR_SET_SUPPORTED_DEVICES 0xFB +#define IPR_IOA_SHUTDOWN 0xF7 +#define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05 + +/* + * Timeouts + */ +#define IPR_SHUTDOWN_TIMEOUT (10 * 60 * HZ) +#define IPR_VSET_RW_TIMEOUT (2 * 60 * HZ) +#define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ) +#define IPR_DEVICE_RESET_TIMEOUT (30 * HZ) +#define IPR_CANCEL_ALL_TIMEOUT (30 * HZ) +#define IPR_ABORT_TASK_TIMEOUT (30 * HZ) +#define IPR_INTERNAL_TIMEOUT (30 * HZ) +#define IPR_WRITE_BUFFER_TIMEOUT (10 * 60 * HZ) +#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ) +#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ) +#define IPR_OPERATIONAL_TIMEOUT (5 * 60 * HZ) +#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ) +#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) +#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) +#define IPR_DUMP_TIMEOUT (15 * HZ) + +/* + * SCSI Literals + */ +#define IPR_VENDOR_ID_LEN 8 +#define IPR_PROD_ID_LEN 16 +#define IPR_SERIAL_NUM_LEN 8 + +/* + * Hardware literals + */ +#define IPR_FMT2_MBX_ADDR_MASK 0x0fffffff +#define IPR_FMT2_MBX_BAR_SEL_MASK 0xf0000000 +#define IPR_FMT2_MKR_BAR_SEL_SHIFT 28 +#define IPR_GET_FMT2_BAR_SEL(mbx) \ +(((mbx) & IPR_FMT2_MBX_BAR_SEL_MASK) >> IPR_FMT2_MKR_BAR_SEL_SHIFT) +#define IPR_SDT_FMT2_BAR0_SEL 0x0 +#define IPR_SDT_FMT2_BAR1_SEL 0x1 +#define IPR_SDT_FMT2_BAR2_SEL 0x2 +#define IPR_SDT_FMT2_BAR3_SEL 0x3 +#define IPR_SDT_FMT2_BAR4_SEL 0x4 +#define IPR_SDT_FMT2_BAR5_SEL 0x5 +#define IPR_SDT_FMT2_EXP_ROM_SEL 0x8 +#define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2 +#define IPR_DOORBELL 0x82800000 + +#define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0) +#define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3) +#define IPR_PCII_IOA_UNIT_CHECKED (0x80000000 >> 4) +#define IPR_PCII_NO_HOST_RRQ (0x80000000 >> 5) +#define IPR_PCII_CRITICAL_OPERATION (0x80000000 >> 6) +#define IPR_PCII_IO_DEBUG_ACKNOWLEDGE (0x80000000 >> 7) +#define IPR_PCII_IOARRIN_LOST (0x80000000 >> 27) +#define IPR_PCII_MMIO_ERROR (0x80000000 >> 28) +#define IPR_PCII_PROC_ERR_STATE (0x80000000 >> 29) +#define IPR_PCII_HRRQ_UPDATED (0x80000000 >> 30) +#define IPR_PCII_CORE_ISSUED_RST_REQ (0x80000000 >> 31) + +#define IPR_PCII_ERROR_INTERRUPTS \ +(IPR_PCII_IOARCB_XFER_FAILED | IPR_PCII_IOA_UNIT_CHECKED | \ +IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR) + +#define IPR_PCII_OPER_INTERRUPTS \ +(IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED | IPR_PCII_IOA_TRANS_TO_OPER) + +#define IPR_UPROCI_RESET_ALERT (0x80000000 >> 7) +#define IPR_UPROCI_IO_DEBUG_ALERT (0x80000000 >> 9) + +#define IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC 200000 /* 200 ms */ +#define IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC 200000 /* 200 ms */ + +/* + * Dump literals + */ +#define IPR_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024) +#define IPR_NUM_SDT_ENTRIES 511 +#define IPR_MAX_NUM_DUMP_PAGES ((IPR_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) + +/* + * Misc literals + */ +#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST + +/* + * Adapter interface types + */ + +struct ipr_res_addr { + u8 reserved; + u8 bus; + u8 target; + u8 lun; +#define IPR_GET_PHYS_LOC(res_addr) \ + (((res_addr).bus << 16) | ((res_addr).target << 8) | (res_addr).lun) +}__attribute__((packed, aligned (4))); + +struct ipr_std_inq_vpids { + u8 vendor_id[IPR_VENDOR_ID_LEN]; + u8 product_id[IPR_PROD_ID_LEN]; +}__attribute__((packed)); + +struct ipr_std_inq_data { + u8 peri_qual_dev_type; +#define IPR_STD_INQ_PERI_QUAL(peri) ((peri) >> 5) +#define IPR_STD_INQ_PERI_DEV_TYPE(peri) ((peri) & 0x1F) + + u8 removeable_medium_rsvd; +#define IPR_STD_INQ_REMOVEABLE_MEDIUM 0x80 + +#define IPR_IS_DASD_DEVICE(std_inq) \ +((IPR_STD_INQ_PERI_DEV_TYPE((std_inq).peri_qual_dev_type) == TYPE_DISK) && \ +!(((std_inq).removeable_medium_rsvd) & IPR_STD_INQ_REMOVEABLE_MEDIUM)) + +#define IPR_IS_SES_DEVICE(std_inq) \ +(IPR_STD_INQ_PERI_DEV_TYPE((std_inq).peri_qual_dev_type) == TYPE_ENCLOSURE) + + u8 version; + u8 aen_naca_fmt; + u8 additional_len; + u8 sccs_rsvd; + u8 bq_enc_multi; + u8 sync_cmdq_flags; + + struct ipr_std_inq_vpids vpids; + + u8 ros_rsvd_ram_rsvd[4]; + + u8 serial_num[IPR_SERIAL_NUM_LEN]; +}__attribute__ ((packed)); + +struct ipr_config_table_entry { + u8 service_level; + u8 array_id; + u8 flags; +#define IPR_IS_IOA_RESOURCE 0x80 +#define IPR_IS_ARRAY_MEMBER 0x20 +#define IPR_IS_HOT_SPARE 0x10 + + u8 rsvd_subtype; +#define IPR_RES_SUBTYPE(res) (((res)->cfgte.rsvd_subtype) & 0x0f) +#define IPR_SUBTYPE_AF_DASD 0 +#define IPR_SUBTYPE_GENERIC_SCSI 1 +#define IPR_SUBTYPE_VOLUME_SET 2 + + struct ipr_res_addr res_addr; + u32 res_handle; + u32 reserved4[2]; + struct ipr_std_inq_data std_inq_data; +}__attribute__ ((packed, aligned (4))); + +struct ipr_config_table_hdr { + u8 num_entries; + u8 flags; +#define IPR_UCODE_DOWNLOAD_REQ 0x10 + u16 reserved; +}__attribute__((packed, aligned (4))); + +struct ipr_config_table { + struct ipr_config_table_hdr hdr; + struct ipr_config_table_entry dev[IPR_MAX_PHYSICAL_DEVS]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_cfg_ch_not { + struct ipr_config_table_entry cfgte; + u8 reserved[936]; +}__attribute__((packed, aligned (4))); + +struct ipr_supported_device { + u16 data_length; + u8 reserved; + u8 num_records; + struct ipr_std_inq_vpids vpids; + u8 reserved2[16]; +}__attribute__((packed, aligned (4))); + +/* Command packet structure */ +struct ipr_cmd_pkt { + u16 reserved; /* Reserved by IOA */ + u8 request_type; +#define IPR_RQTYPE_SCSICDB 0x00 +#define IPR_RQTYPE_IOACMD 0x01 +#define IPR_RQTYPE_HCAM 0x02 + + u8 luntar_luntrn; + + u8 flags_hi; +#define IPR_FLAGS_HI_WRITE_NOT_READ 0x80 +#define IPR_FLAGS_HI_NO_ULEN_CHK 0x20 +#define IPR_FLAGS_HI_SYNC_OVERRIDE 0x10 +#define IPR_FLAGS_HI_SYNC_COMPLETE 0x08 +#define IPR_FLAGS_HI_NO_LINK_DESC 0x04 + + u8 flags_lo; +#define IPR_FLAGS_LO_ALIGNED_BFR 0x20 +#define IPR_FLAGS_LO_DELAY_AFTER_RST 0x10 +#define IPR_FLAGS_LO_UNTAGGED_TASK 0x00 +#define IPR_FLAGS_LO_SIMPLE_TASK 0x02 +#define IPR_FLAGS_LO_ORDERED_TASK 0x04 +#define IPR_FLAGS_LO_HEAD_OF_Q_TASK 0x06 +#define IPR_FLAGS_LO_ACA_TASK 0x08 + + u8 cdb[16]; + u16 timeout; +}__attribute__ ((packed, aligned(4))); + +/* IOA Request Control Block 128 bytes */ +struct ipr_ioarcb { + u32 ioarcb_host_pci_addr; + u32 reserved; + u32 res_handle; + u32 host_response_handle; + u32 reserved1; + u32 reserved2; + u32 reserved3; + + u32 write_data_transfer_length; + u32 read_data_transfer_length; + u32 write_ioadl_addr; + u32 write_ioadl_len; + u32 read_ioadl_addr; + u32 read_ioadl_len; + + u32 ioasa_host_pci_addr; + u16 ioasa_len; + u16 reserved4; + + struct ipr_cmd_pkt cmd_pkt; + + u32 add_cmd_parms_len; + u32 add_cmd_parms[10]; +}__attribute__((packed, aligned (4))); + +struct ipr_ioadl_desc { + u32 flags_and_data_len; +#define IPR_IOADL_FLAGS_MASK 0xff000000 +#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK) +#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff +#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK) +#define IPR_IOADL_FLAGS_READ 0x48000000 +#define IPR_IOADL_FLAGS_READ_LAST 0x49000000 +#define IPR_IOADL_FLAGS_WRITE 0x68000000 +#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000 +#define IPR_IOADL_FLAGS_LAST 0x01000000 + + u32 address; +}__attribute__((packed, aligned (8))); + +struct ipr_ioasa_vset { + u32 failing_lba_hi; + u32 failing_lba_lo; + u32 ioa_data[22]; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa_af_dasd { + u32 failing_lba; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa_gpdd { + u8 end_state; + u8 bus_phase; + u16 reserved; + u32 ioa_data[23]; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa_raw { + u32 ioa_data[24]; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa { + u32 ioasc; +#define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) +#define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) +#define IPR_IOASC_SENSE_QUAL(ioasc) (((ioasc) & 0x0000ff00) >> 8) +#define IPR_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff) + + u16 ret_stat_len; /* Length of the returned IOASA */ + + u16 avail_stat_len; /* Total Length of status available. */ + + u32 residual_data_len; /* number of bytes in the host data */ + /* buffers that were not used by the IOARCB command. */ + + u32 ilid; +#define IPR_NO_ILID 0 +#define IPR_DRIVER_ILID 0xffffffff + + u32 fd_ioasc; + + u32 fd_phys_locator; + + u32 fd_res_handle; + + u32 ioasc_specific; /* status code specific field */ +#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff +#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) +#define IPR_FIELD_POINTER_MASK 0x0000ffff + + union { + struct ipr_ioasa_vset vset; + struct ipr_ioasa_af_dasd dasd; + struct ipr_ioasa_gpdd gpdd; + struct ipr_ioasa_raw raw; + } u; +}__attribute__((packed, aligned (4))); + +struct ipr_mode_parm_hdr { + u8 length; + u8 medium_type; + u8 device_spec_parms; + u8 block_desc_len; +}__attribute__((packed)); + +struct ipr_mode_pages { + struct ipr_mode_parm_hdr hdr; + u8 data[255 - sizeof(struct ipr_mode_parm_hdr)]; +}__attribute__((packed)); + +struct ipr_mode_page_hdr { + u8 ps_page_code; +#define IPR_MODE_PAGE_PS 0x80 +#define IPR_GET_MODE_PAGE_CODE(hdr) ((hdr)->ps_page_code & 0x3F) + u8 page_length; +}__attribute__ ((packed)); + +struct ipr_dev_bus_entry { + struct ipr_res_addr res_addr; + u8 flags; +#define IPR_SCSI_ATTR_ENABLE_QAS 0x80 +#define IPR_SCSI_ATTR_DISABLE_QAS 0x40 +#define IPR_SCSI_ATTR_QAS_MASK 0xC0 +#define IPR_SCSI_ATTR_ENABLE_TM 0x20 +#define IPR_SCSI_ATTR_NO_TERM_PWR 0x10 +#define IPR_SCSI_ATTR_TM_SUPPORTED 0x08 +#define IPR_SCSI_ATTR_LVD_TO_SE_NOT_ALLOWED 0x04 + + u8 scsi_id; + u8 bus_width; + u8 extended_reset_delay; +#define IPR_EXTENDED_RESET_DELAY 7 + + u32 max_xfer_rate; + + u8 spinup_delay; + u8 reserved3; + u16 reserved4; +}__attribute__((packed, aligned (4))); + +struct ipr_mode_page28 { + struct ipr_mode_page_hdr hdr; + u8 num_entries; + u8 entry_length; + struct ipr_dev_bus_entry bus[0]; +}__attribute__((packed)); + +struct ipr_ioa_vpd { + struct ipr_std_inq_data std_inq_data; + u8 ascii_part_num[12]; + u8 reserved[40]; + u8 ascii_plant_code[4]; +}__attribute__((packed)); + +struct ipr_inquiry_page3 { + u8 peri_qual_dev_type; + u8 page_code; + u8 reserved1; + u8 page_length; + u8 ascii_len; + u8 reserved2[3]; + u8 load_id[4]; + u8 major_release; + u8 card_type; + u8 minor_release[2]; + u8 ptf_number[4]; + u8 patch_number[4]; +}__attribute__((packed)); + +struct ipr_hostrcb_device_data_entry { + struct ipr_std_inq_vpids dev_vpids; + u8 dev_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_res_addr dev_res_addr; + struct ipr_std_inq_vpids new_dev_vpids; + u8 new_dev_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids ioa_last_with_dev_vpids; + u8 ioa_last_with_dev_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids cfc_last_with_dev_vpids; + u8 cfc_last_with_dev_sn[IPR_SERIAL_NUM_LEN]; + u32 ioa_data[5]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_array_data_entry { + struct ipr_std_inq_vpids vpids; + u8 serial_num[IPR_SERIAL_NUM_LEN]; + struct ipr_res_addr expected_dev_res_addr; + struct ipr_res_addr dev_res_addr; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_ff_error { + u32 ioa_data[246]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_01_error { + u32 seek_counter; + u32 read_counter; + u8 sense_data[32]; + u32 ioa_data[236]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_02_error { + struct ipr_std_inq_vpids ioa_vpids; + u8 ioa_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids cfc_vpids; + u8 cfc_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids ioa_last_attached_to_cfc_vpids; + u8 ioa_last_attached_to_cfc_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids cfc_last_attached_to_ioa_vpids; + u8 cfc_last_attached_to_ioa_sn[IPR_SERIAL_NUM_LEN]; + u32 ioa_data[3]; + u8 reserved[844]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_03_error { + struct ipr_std_inq_vpids ioa_vpids; + u8 ioa_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids cfc_vpids; + u8 cfc_sn[IPR_SERIAL_NUM_LEN]; + u32 errors_detected; + u32 errors_logged; + u8 ioa_data[12]; + struct ipr_hostrcb_device_data_entry dev_entry[3]; + u8 reserved[444]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_04_error { + struct ipr_std_inq_vpids ioa_vpids; + u8 ioa_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids cfc_vpids; + u8 cfc_sn[IPR_SERIAL_NUM_LEN]; + u8 ioa_data[12]; + struct ipr_hostrcb_array_data_entry array_member[10]; + u32 exposed_mode_adn; + u32 array_id; + struct ipr_std_inq_vpids incomp_dev_vpids; + u8 incomp_dev_sn[IPR_SERIAL_NUM_LEN]; + u32 ioa_data2; + struct ipr_hostrcb_array_data_entry array_member2[8]; + struct ipr_res_addr last_func_vset_res_addr; + u8 vset_serial_num[IPR_SERIAL_NUM_LEN]; + u8 protection_level[8]; + u8 reserved[124]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_error { + u32 failing_dev_ioasc; + struct ipr_res_addr failing_dev_res_addr; + u32 failing_dev_res_handle; + u32 prc; + union { + struct ipr_hostrcb_type_ff_error type_ff_error; + struct ipr_hostrcb_type_01_error type_01_error; + struct ipr_hostrcb_type_02_error type_02_error; + struct ipr_hostrcb_type_03_error type_03_error; + struct ipr_hostrcb_type_04_error type_04_error; + } u; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_raw { + u32 data[sizeof(struct ipr_hostrcb_error)/sizeof(u32)]; +}__attribute__((packed, aligned (4))); + +struct ipr_hcam { + u8 op_code; +#define IPR_HOST_RCB_OP_CODE_CONFIG_CHANGE 0xE1 +#define IPR_HOST_RCB_OP_CODE_LOG_DATA 0xE2 + + u8 notify_type; +#define IPR_HOST_RCB_NOTIF_TYPE_EXISTING_CHANGED 0x00 +#define IPR_HOST_RCB_NOTIF_TYPE_NEW_ENTRY 0x01 +#define IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY 0x02 +#define IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY 0x10 +#define IPR_HOST_RCB_NOTIF_TYPE_INFORMATION_ENTRY 0x11 + + u8 notifications_lost; +#define IPR_HOST_RCB_NO_NOTIFICATIONS_LOST 0 +#define IPR_HOST_RCB_NOTIFICATIONS_LOST 0x80 + + u8 flags; +#define IPR_HOSTRCB_INTERNAL_OPER 0x80 +#define IPR_HOSTRCB_ERR_RESP_SENT 0x40 + + u8 overlay_id; +#define IPR_HOST_RCB_OVERLAY_ID_1 0x01 +#define IPR_HOST_RCB_OVERLAY_ID_2 0x02 +#define IPR_HOST_RCB_OVERLAY_ID_3 0x03 +#define IPR_HOST_RCB_OVERLAY_ID_4 0x04 +#define IPR_HOST_RCB_OVERLAY_ID_6 0x06 +#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF + + u8 reserved1[3]; + u32 ilid; + u32 time_since_last_ioa_reset; + u32 reserved2; + u32 length; + + union { + struct ipr_hostrcb_error error; + struct ipr_hostrcb_cfg_ch_not ccn; + struct ipr_hostrcb_raw raw; + } u; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb { + struct ipr_hcam hcam; + u32 hostrcb_dma; + struct list_head queue; +}; + +/* IPR smart dump table structures */ +struct ipr_sdt_entry { + u32 bar_str_offset; + u32 end_offset; + u8 entry_byte; + u8 reserved[3]; + + u8 flags; +#define IPR_SDT_ENDIAN 0x80 +#define IPR_SDT_VALID_ENTRY 0x20 + + u8 resv; + u16 priority; +}__attribute__((packed, aligned (4))); + +struct ipr_sdt_header { + u32 state; + u32 num_entries; + u32 num_entries_used; + u32 dump_size; +}__attribute__((packed, aligned (4))); + +struct ipr_sdt { + struct ipr_sdt_header hdr; + struct ipr_sdt_entry entry[IPR_NUM_SDT_ENTRIES]; +}__attribute__((packed, aligned (4))); + +struct ipr_uc_sdt { + struct ipr_sdt_header hdr; + struct ipr_sdt_entry entry[1]; +}__attribute__((packed, aligned (4))); + +/* + * Driver types + */ +struct ipr_bus_attributes { + u8 bus; + u8 qas_enabled; + u8 bus_width; + u8 reserved; + u32 max_xfer_rate; +}; + +struct ipr_resource_entry { + struct ipr_config_table_entry cfgte; + u8 needs_sync_complete:1; + u8 in_erp:1; + u8 add_to_ml:1; + u8 del_from_ml:1; + u8 resetting_device:1; + u8 tcq_active:1; + + int qdepth; + struct scsi_device *sdev; + struct list_head queue; +}; + +struct ipr_resource_hdr { + u16 num_entries; + u16 reserved; +}; + +struct ipr_resource_table { + struct ipr_resource_hdr hdr; + struct ipr_resource_entry dev[IPR_MAX_PHYSICAL_DEVS]; +}; + +struct ipr_misc_cbs { + struct ipr_ioa_vpd ioa_vpd; + struct ipr_inquiry_page3 page3_data; + struct ipr_mode_pages mode_pages; + struct ipr_supported_device supp_dev; +}; + +struct ipr_interrupts { + unsigned long set_interrupt_mask_reg; + unsigned long clr_interrupt_mask_reg; + unsigned long sense_interrupt_mask_reg; + unsigned long clr_interrupt_reg; + + unsigned long sense_interrupt_reg; + unsigned long ioarrin_reg; + unsigned long sense_uproc_interrupt_reg; + unsigned long set_uproc_interrupt_reg; + unsigned long clr_uproc_interrupt_reg; +}; + +struct ipr_chip_cfg_t { + u32 mailbox; + u8 cache_line_size; + struct ipr_interrupts regs; +}; + +enum ipr_shutdown_type { + IPR_SHUTDOWN_NORMAL = 0x00, + IPR_SHUTDOWN_PREPARE_FOR_NORMAL = 0x40, + IPR_SHUTDOWN_ABBREV = 0x80, + IPR_SHUTDOWN_NONE = 0x100 +}; + +struct ipr_trace_entry { + u32 time; + + u8 op_code; + u8 type; +#define IPR_TRACE_START 0x00 +#define IPR_TRACE_FINISH 0xff + u16 cmd_index; + + u32 res_handle; + union { + u32 ioasc; + u32 add_data; + u32 res_addr; + } u; +}; + +struct ipr_sglist { + u32 order; + u32 num_sg; + u32 buffer_len; + struct scatterlist scatterlist[1]; +}; + +enum ipr_sdt_state { + INACTIVE, + WAIT_FOR_DUMP, + GET_DUMP, + ABORT_DUMP, + DUMP_OBTAINED +}; + +/* Per-controller data */ +struct ipr_ioa_cfg { + char eye_catcher[8]; +#define IPR_EYECATCHER "iprcfg" + + struct list_head queue; + + u8 allow_interrupts:1; + u8 in_reset_reload:1; + u8 in_ioa_bringdown:1; + u8 ioa_unit_checked:1; + u8 ioa_is_dead:1; + u8 dump_taken:1; + u8 allow_cmds:1; + u8 allow_ml_add_del:1; + + u16 type; /* CCIN of the card */ + + u8 log_level; +#define IPR_MAX_LOG_LEVEL 4 +#define IPR_DEFAULT_LOG_LEVEL 2 + +#define IPR_NUM_TRACE_INDEX_BITS 8 +#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) +#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) + char trace_start[8]; +#define IPR_TRACE_START_LABEL "trace" + struct ipr_trace_entry *trace; + u32 trace_index:IPR_NUM_TRACE_INDEX_BITS; + + /* + * Queue for free command blocks + */ + char ipr_free_label[8]; +#define IPR_FREEQ_LABEL "free-q" + struct list_head free_q; + + /* + * Queue for command blocks outstanding to the adapter + */ + char ipr_pending_label[8]; +#define IPR_PENDQ_LABEL "pend-q" + struct list_head pending_q; + + char cfg_table_start[8]; +#define IPR_CFG_TBL_START "cfg" + struct ipr_config_table *cfg_table; + u32 cfg_table_dma; + + char resource_table_label[8]; +#define IPR_RES_TABLE_LABEL "res_tbl" + struct ipr_resource_entry *res_entries; + struct list_head free_res_q; + struct list_head used_res_q; + + char ipr_hcam_label[8]; +#define IPR_HCAM_LABEL "hcams" + struct ipr_hostrcb *hostrcb[IPR_NUM_HCAMS]; + u32 hostrcb_dma[IPR_NUM_HCAMS]; + struct list_head hostrcb_free_q; + struct list_head hostrcb_pending_q; + + u32 *host_rrq; + u32 host_rrq_dma; +#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc +#define IPR_HRRQ_RESP_BIT_SET 0x00000002 +#define IPR_HRRQ_TOGGLE_BIT 0x00000001 +#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2 + volatile u32 *hrrq_start; + volatile u32 *hrrq_end; + volatile u32 *hrrq_curr; + volatile u32 toggle_bit; + + struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES]; + + const struct ipr_chip_cfg_t *chip_cfg; + + unsigned long hdw_dma_regs; /* iomapped PCI memory space */ + unsigned long hdw_dma_regs_pci; /* raw PCI memory space */ + unsigned long ioa_mailbox; + struct ipr_interrupts regs; + + u32 pci_cfg_buf[64]; + u16 saved_pcix_cmd_reg; + u16 reset_retries; + + u32 errors_logged; + + struct Scsi_Host *host; + struct pci_dev *pdev; + struct ipr_sglist *ucode_sglist; + struct ipr_mode_pages *saved_mode_pages; + u8 saved_mode_page_len; + + struct work_struct work_q; + + wait_queue_head_t reset_wait_q; + + struct ipr_dump *dump; + enum ipr_sdt_state sdt_state; + + struct ipr_misc_cbs *vpd_cbs; + u32 vpd_cbs_dma; + + struct pci_pool *ipr_cmd_pool; + + struct ipr_cmnd *reset_cmd; + + char ipr_cmd_label[8]; +#define IPR_CMD_LABEL "ipr_cmnd" + struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; + u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; +}; + +struct ipr_cmnd { + struct ipr_ioarcb ioarcb; + struct ipr_ioasa ioasa; + struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; + struct list_head queue; + struct scsi_cmnd *scsi_cmd; + struct completion completion; + struct timer_list timer; + void (*done) (struct ipr_cmnd *); + int (*job_step) (struct ipr_cmnd *); + u16 cmd_index; + u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; + dma_addr_t sense_buffer_dma; + unsigned short dma_use_sg; + dma_addr_t dma_handle; + union { + enum ipr_shutdown_type shutdown_type; + struct ipr_hostrcb *hostrcb; + unsigned long time_left; + unsigned long scratch; + struct ipr_resource_entry *res; + struct ipr_cmnd *sibling; + struct scsi_device *sdev; + } u; + + struct ipr_ioa_cfg *ioa_cfg; +}; + +struct ipr_ses_table_entry { + char product_id[17]; + char compare_product_id_byte[17]; + u32 max_bus_speed_limit; /* MB/sec limit for this backplane */ +}; + +struct ipr_dump_header { + u32 eye_catcher; +#define IPR_DUMP_EYE_CATCHER 0xC5D4E3F2 + u32 len; + u32 num_entries; + u32 first_entry_offset; + u32 status; +#define IPR_DUMP_STATUS_SUCCESS 0 +#define IPR_DUMP_STATUS_QUAL_SUCCESS 2 +#define IPR_DUMP_STATUS_FAILED 0xffffffff + u32 os; +#define IPR_DUMP_OS_LINUX 0x4C4E5558 + u32 driver_name; +#define IPR_DUMP_DRIVER_NAME 0x49505232 +}__attribute__((packed, aligned (4))); + +struct ipr_dump_entry_header { + u32 eye_catcher; +#define IPR_DUMP_EYE_CATCHER 0xC5D4E3F2 + u32 len; + u32 num_elems; + u32 offset; + u32 data_type; +#define IPR_DUMP_DATA_TYPE_ASCII 0x41534349 +#define IPR_DUMP_DATA_TYPE_BINARY 0x42494E41 + u32 id; +#define IPR_DUMP_IOA_DUMP_ID 0x494F4131 +#define IPR_DUMP_LOCATION_ID 0x4C4F4341 +#define IPR_DUMP_TRACE_ID 0x54524143 +#define IPR_DUMP_DRIVER_VERSION_ID 0x44525652 +#define IPR_DUMP_DRIVER_TYPE_ID 0x54595045 +#define IPR_DUMP_IOA_CTRL_BLK 0x494F4342 +#define IPR_DUMP_PEND_OPS 0x414F5053 + u32 status; +}__attribute__((packed, aligned (4))); + +struct ipr_dump_location_entry { + struct ipr_dump_entry_header hdr; + u8 location[BUS_ID_SIZE]; +}__attribute__((packed)); + +struct ipr_dump_trace_entry { + struct ipr_dump_entry_header hdr; + u32 trace[IPR_TRACE_SIZE / sizeof(u32)]; +}__attribute__((packed, aligned (4))); + +struct ipr_dump_version_entry { + struct ipr_dump_entry_header hdr; + u8 version[sizeof(IPR_DRIVER_VERSION)]; +}; + +struct ipr_dump_ioa_type_entry { + struct ipr_dump_entry_header hdr; + u32 type; + u32 fw_version; +}; + +struct ipr_driver_dump { + struct ipr_dump_header hdr; + struct ipr_dump_version_entry version_entry; + struct ipr_dump_location_entry location_entry; + struct ipr_dump_ioa_type_entry ioa_type_entry; + struct ipr_dump_trace_entry trace_entry; +}__attribute__((packed)); + +struct ipr_ioa_dump { + struct ipr_dump_entry_header hdr; + struct ipr_sdt sdt; + u32 *ioa_data[IPR_MAX_NUM_DUMP_PAGES]; + u32 reserved; + u32 next_page_index; + u32 page_offset; + u32 format; +#define IPR_SDT_FMT2 2 +#define IPR_SDT_UNKNOWN 3 +}__attribute__((packed, aligned (4))); + +struct ipr_dump { + struct kobject kobj; + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_driver_dump driver_dump; + struct ipr_ioa_dump ioa_dump; +}; + +struct ipr_error_table_t { + u32 ioasc; + int log_ioasa; + int log_hcam; + char *error; +}; + +struct ipr_software_inq_lid_info { + u32 load_id; + u32 timestamp[3]; +}__attribute__((packed, aligned (4))); + +struct ipr_ucode_image_header { + u32 header_length; + u32 lid_table_offset; + u8 major_release; + u8 card_type; + u8 minor_release[2]; + u8 reserved[20]; + char eyecatcher[16]; + u32 num_lids; + struct ipr_software_inq_lid_info lid[1]; +}__attribute__((packed, aligned (4))); + +/* + * Macros + */ +#if IPR_DEBUG +#define IPR_DBG_CMD(CMD) do { CMD; } while (0) +#else +#define IPR_DBG_CMD(CMD) +#endif + +#define ipr_breakpoint_data KERN_ERR IPR_NAME\ +": %s: %s: Line: %d ioa_cfg: %p\n", __FILE__, \ +__FUNCTION__, __LINE__, ioa_cfg + +#if defined(CONFIG_KDB) && !defined(CONFIG_PPC_ISERIES) +#define ipr_breakpoint {printk(ipr_breakpoint_data); KDB_ENTER();} +#define ipr_breakpoint_or_die {printk(ipr_breakpoint_data); KDB_ENTER();} +#else +#define ipr_breakpoint +#define ipr_breakpoint_or_die panic(ipr_breakpoint_data) +#endif + +#ifdef CONFIG_SCSI_IPR_TRACE +#define ipr_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr) +#define ipr_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr) +#else +#define ipr_create_trace_file(kobj, attr) 0 +#define ipr_remove_trace_file(kobj, attr) do { } while(0) +#endif + +#ifdef CONFIG_SCSI_IPR_DUMP +#define ipr_create_dump_file(kobj, attr) sysfs_create_bin_file(kobj, attr) +#define ipr_remove_dump_file(kobj, attr) sysfs_remove_bin_file(kobj, attr) +#else +#define ipr_create_dump_file(kobj, attr) 0 +#define ipr_remove_dump_file(kobj, attr) do { } while(0) +#endif + +/* + * Error logging macros + */ +#define ipr_err(...) printk(KERN_ERR IPR_NAME ": "__VA_ARGS__) +#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__) +#define ipr_crit(...) printk(KERN_CRIT IPR_NAME ": "__VA_ARGS__) +#define ipr_warn(...) printk(KERN_WARNING IPR_NAME": "__VA_ARGS__) +#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)) + +#define ipr_sdev_printk(level, sdev, fmt, ...) \ + printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, sdev->host->host_no, \ + sdev->channel, sdev->id, sdev->lun, ##__VA_ARGS__) + +#define ipr_sdev_err(sdev, fmt, ...) \ + ipr_sdev_printk(KERN_ERR, sdev, fmt, ##__VA_ARGS__) + +#define ipr_sdev_info(sdev, fmt, ...) \ + ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__) + +#define ipr_sdev_dbg(sdev, fmt, ...) \ + IPR_DBG_CMD(ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__)) + +#define ipr_res_printk(level, ioa_cfg, res, fmt, ...) \ + printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, ioa_cfg->host->host_no, \ + res.bus, res.target, res.lun, ##__VA_ARGS__) + +#define ipr_res_err(ioa_cfg, res, fmt, ...) \ + ipr_res_printk(KERN_ERR, ioa_cfg, res, fmt, ##__VA_ARGS__) +#define ipr_res_dbg(ioa_cfg, res, fmt, ...) \ + IPR_DBG_CMD(ipr_res_printk(KERN_INFO, ioa_cfg, res, fmt, ##__VA_ARGS__)) + +#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ + __FILE__, __FUNCTION__, __LINE__) + +#if IPR_DBG_TRACE +#define ENTER printk(KERN_INFO IPR_NAME": Entering %s\n", __FUNCTION__) +#define LEAVE printk(KERN_INFO IPR_NAME": Leaving %s\n", __FUNCTION__) +#else +#define ENTER +#define LEAVE +#endif + +#define ipr_err_separator \ +ipr_err("----------------------------------------------------------\n") + + +/* + * Inlines + */ + +/** + * ipr_is_ioa_resource - Determine if a resource is the IOA + * @res: resource entry struct + * + * Return value: + * 1 if IOA / 0 if not IOA + **/ +static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res) +{ + return (res->cfgte.flags & IPR_IS_IOA_RESOURCE) ? 1 : 0; +} + +/** + * ipr_is_af_dasd_device - Determine if a resource is an AF DASD + * @res: resource entry struct + * + * Return value: + * 1 if AF DASD / 0 if not AF DASD + **/ +static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res) +{ + if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && + !ipr_is_ioa_resource(res) && + IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_AF_DASD) + return 1; + else + return 0; +} + +/** + * ipr_is_vset_device - Determine if a resource is a VSET + * @res: resource entry struct + * + * Return value: + * 1 if VSET / 0 if not VSET + **/ +static inline int ipr_is_vset_device(struct ipr_resource_entry *res) +{ + if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && + !ipr_is_ioa_resource(res) && + IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_VOLUME_SET) + return 1; + else + return 0; +} + +/** + * ipr_is_gscsi - Determine if a resource is a generic scsi resource + * @res: resource entry struct + * + * Return value: + * 1 if GSCSI / 0 if not GSCSI + **/ +static inline int ipr_is_gscsi(struct ipr_resource_entry *res) +{ + if (!ipr_is_ioa_resource(res) && + IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_SCSI) + return 1; + else + return 0; +} + +/** + * ipr_is_device - Determine if resource address is that of a device + * @res_addr: resource address struct + * + * Return value: + * 1 if AF / 0 if not AF + **/ +static inline int ipr_is_device(struct ipr_res_addr *res_addr) +{ + if ((res_addr->bus < IPR_MAX_NUM_BUSES) && + (res_addr->target < IPR_MAX_NUM_TARGETS_PER_BUS)) + return 1; + + return 0; +} + +/** + * ipr_sdt_is_fmt2 - Determine if a SDT address is in format 2 + * @sdt_word: SDT address + * + * Return value: + * 1 if format 2 / 0 if not + **/ +static inline int ipr_sdt_is_fmt2(u32 sdt_word) +{ + u32 bar_sel = IPR_GET_FMT2_BAR_SEL(sdt_word); + + switch (bar_sel) { + case IPR_SDT_FMT2_BAR0_SEL: + case IPR_SDT_FMT2_BAR1_SEL: + case IPR_SDT_FMT2_BAR2_SEL: + case IPR_SDT_FMT2_BAR3_SEL: + case IPR_SDT_FMT2_BAR4_SEL: + case IPR_SDT_FMT2_BAR5_SEL: + case IPR_SDT_FMT2_EXP_ROM_SEL: + return 1; + }; + + return 0; +} + +#endif diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c new file mode 100644 index 000000000..4277db3b6 --- /dev/null +++ b/drivers/scsi/pcmcia/sym53c500_cs.c @@ -0,0 +1,1042 @@ +/* +* sym53c500_cs.c Bob Tracy (rct@frus.com) +* +* A rewrite of the pcmcia-cs add-on driver for newer (circa 1997) +* New Media Bus Toaster PCMCIA SCSI cards using the Symbios Logic +* 53c500 controller: intended for use with 2.6 and later kernels. +* The pcmcia-cs add-on version of this driver is not supported +* beyond 2.4. It consisted of three files with history/copyright +* information as follows: +* +* SYM53C500.h +* Bob Tracy (rct@frus.com) +* Original by Tom Corner (tcorner@via.at). +* Adapted from NCR53c406a.h which is Copyrighted (C) 1994 +* Normunds Saumanis (normunds@rx.tech.swh.lv) +* +* SYM53C500.c +* Bob Tracy (rct@frus.com) +* Original driver by Tom Corner (tcorner@via.at) was adapted +* from NCR53c406a.c which is Copyrighted (C) 1994, 1995, 1996 +* Normunds Saumanis (normunds@fi.ibm.com) +* +* sym53c500.c +* Bob Tracy (rct@frus.com) +* Original by Tom Corner (tcorner@via.at) was adapted from a +* driver for the Qlogic SCSI card written by +* David Hinds (dhinds@allegro.stanford.edu). +* +* This program is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License as published by the +* Free Software Foundation; either version 2, or (at your option) any +* later version. +* +* This program is distributed in the hope that it will be useful, but +* WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* General Public License for more details. +*/ + +#define SYM53C500_DEBUG 0 +#define VERBOSE_SYM53C500_DEBUG 0 + +/* +* Set this to 0 if you encounter kernel lockups while transferring +* data in PIO mode. Note this can be changed via "sysfs". +*/ +#define USE_FAST_PIO 1 + +/* =============== End of user configurable parameters ============== */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* ================================================================== */ + +#ifdef PCMCIA_DEBUG +static int pc_debug = PCMCIA_DEBUG; +module_param(pc_debug, int, 0); +#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) +static char *version = +"sym53c500_cs.c 0.9b 2004/05/10 (Bob Tracy)"; +#else +#define DEBUG(n, args...) +#endif + +/* ================================================================== */ + +/* Parameters that can be set with 'insmod' */ + +/* Bit map of interrupts to choose from */ +static unsigned int irq_mask = 0xdeb8; /* 3, 6, 7, 9-12, 14, 15 */ +static int irq_list[4] = { -1 }; +static int num_irqs = 1; + +module_param(irq_mask, int, 0); +MODULE_PARM_DESC(irq_mask, "IRQ mask bits (default: 0xdeb8)"); +module_param_array(irq_list, int, num_irqs, 0); +MODULE_PARM_DESC(irq_list, "Comma-separated list of up to 4 IRQs to try (default: auto select)."); + +/* ================================================================== */ + +#define SYNC_MODE 0 /* Synchronous transfer mode */ + +/* Default configuration */ +#define C1_IMG 0x07 /* ID=7 */ +#define C2_IMG 0x48 /* FE SCSI2 */ +#define C3_IMG 0x20 /* CDB */ +#define C4_IMG 0x04 /* ANE */ +#define C5_IMG 0xa4 /* ? changed from b6= AA PI SIE POL */ +#define C7_IMG 0x80 /* added for SYM53C500 t. corner */ + +/* Hardware Registers: offsets from io_port (base) */ + +/* Control Register Set 0 */ +#define TC_LSB 0x00 /* transfer counter lsb */ +#define TC_MSB 0x01 /* transfer counter msb */ +#define SCSI_FIFO 0x02 /* scsi fifo register */ +#define CMD_REG 0x03 /* command register */ +#define STAT_REG 0x04 /* status register */ +#define DEST_ID 0x04 /* selection/reselection bus id */ +#define INT_REG 0x05 /* interrupt status register */ +#define SRTIMOUT 0x05 /* select/reselect timeout reg */ +#define SEQ_REG 0x06 /* sequence step register */ +#define SYNCPRD 0x06 /* synchronous transfer period */ +#define FIFO_FLAGS 0x07 /* indicates # of bytes in fifo */ +#define SYNCOFF 0x07 /* synchronous offset register */ +#define CONFIG1 0x08 /* configuration register */ +#define CLKCONV 0x09 /* clock conversion register */ +/* #define TESTREG 0x0A */ /* test mode register */ +#define CONFIG2 0x0B /* configuration 2 register */ +#define CONFIG3 0x0C /* configuration 3 register */ +#define CONFIG4 0x0D /* configuration 4 register */ +#define TC_HIGH 0x0E /* transfer counter high */ +/* #define FIFO_BOTTOM 0x0F */ /* reserve FIFO byte register */ + +/* Control Register Set 1 */ +/* #define JUMPER_SENSE 0x00 */ /* jumper sense port reg (r/w) */ +/* #define SRAM_PTR 0x01 */ /* SRAM address pointer reg (r/w) */ +/* #define SRAM_DATA 0x02 */ /* SRAM data register (r/w) */ +#define PIO_FIFO 0x04 /* PIO FIFO registers (r/w) */ +/* #define PIO_FIFO1 0x05 */ /* */ +/* #define PIO_FIFO2 0x06 */ /* */ +/* #define PIO_FIFO3 0x07 */ /* */ +#define PIO_STATUS 0x08 /* PIO status (r/w) */ +/* #define ATA_CMD 0x09 */ /* ATA command/status reg (r/w) */ +/* #define ATA_ERR 0x0A */ /* ATA features/error reg (r/w) */ +#define PIO_FLAG 0x0B /* PIO flag interrupt enable (r/w) */ +#define CONFIG5 0x09 /* configuration 5 register */ +/* #define SIGNATURE 0x0E */ /* signature register (r) */ +/* #define CONFIG6 0x0F */ /* configuration 6 register (r) */ +#define CONFIG7 0x0d + +/* select register set 0 */ +#define REG0(x) (outb(C4_IMG, (x) + CONFIG4)) +/* select register set 1 */ +#define REG1(x) outb(C7_IMG, (x) + CONFIG7); outb(C5_IMG, (x) + CONFIG5) + +#if SYM53C500_DEBUG +#define DEB(x) x +#else +#define DEB(x) +#endif + +#if VERBOSE_SYM53C500_DEBUG +#define VDEB(x) x +#else +#define VDEB(x) +#endif + +#define LOAD_DMA_COUNT(x, count) \ + outb(count & 0xff, (x) + TC_LSB); \ + outb((count >> 8) & 0xff, (x) + TC_MSB); \ + outb((count >> 16) & 0xff, (x) + TC_HIGH); + +/* Chip commands */ +#define DMA_OP 0x80 + +#define SCSI_NOP 0x00 +#define FLUSH_FIFO 0x01 +#define CHIP_RESET 0x02 +#define SCSI_RESET 0x03 +#define RESELECT 0x40 +#define SELECT_NO_ATN 0x41 +#define SELECT_ATN 0x42 +#define SELECT_ATN_STOP 0x43 +#define ENABLE_SEL 0x44 +#define DISABLE_SEL 0x45 +#define SELECT_ATN3 0x46 +#define RESELECT3 0x47 +#define TRANSFER_INFO 0x10 +#define INIT_CMD_COMPLETE 0x11 +#define MSG_ACCEPT 0x12 +#define TRANSFER_PAD 0x18 +#define SET_ATN 0x1a +#define RESET_ATN 0x1b +#define SEND_MSG 0x20 +#define SEND_STATUS 0x21 +#define SEND_DATA 0x22 +#define DISCONN_SEQ 0x23 +#define TERMINATE_SEQ 0x24 +#define TARG_CMD_COMPLETE 0x25 +#define DISCONN 0x27 +#define RECV_MSG 0x28 +#define RECV_CMD 0x29 +#define RECV_DATA 0x2a +#define RECV_CMD_SEQ 0x2b +#define TARGET_ABORT_DMA 0x04 + +/* ================================================================== */ + +struct scsi_info_t { + dev_link_t link; + dev_node_t node; + struct Scsi_Host *host; + unsigned short manf_id; +}; + +/* +* Repository for per-instance host data. +*/ +struct sym53c500_data { + struct scsi_cmnd *current_SC; + int fast_pio; +}; + +enum Phase { + idle, + data_out, + data_in, + command_ph, + status_ph, + message_out, + message_in +}; + +/* ================================================================== */ + +/* +* Global (within this module) variables other than +* sym53c500_driver_template (the scsi_host_template). +*/ +static dev_link_t *dev_list; +static dev_info_t dev_info = "sym53c500_cs"; + +/* ================================================================== */ + +static void +chip_init(int io_port) +{ + REG1(io_port); + outb(0x01, io_port + PIO_STATUS); + outb(0x00, io_port + PIO_FLAG); + + outb(C4_IMG, io_port + CONFIG4); /* REG0(io_port); */ + outb(C3_IMG, io_port + CONFIG3); + outb(C2_IMG, io_port + CONFIG2); + outb(C1_IMG, io_port + CONFIG1); + + outb(0x05, io_port + CLKCONV); /* clock conversion factor */ + outb(0x9C, io_port + SRTIMOUT); /* Selection timeout */ + outb(0x05, io_port + SYNCPRD); /* Synchronous transfer period */ + outb(SYNC_MODE, io_port + SYNCOFF); /* synchronous mode */ +} + +static void +SYM53C500_int_host_reset(int io_port) +{ + outb(C4_IMG, io_port + CONFIG4); /* REG0(io_port); */ + outb(CHIP_RESET, io_port + CMD_REG); + outb(SCSI_NOP, io_port + CMD_REG); /* required after reset */ + outb(SCSI_RESET, io_port + CMD_REG); + chip_init(io_port); +} + +static __inline__ int +SYM53C500_pio_read(int fast_pio, int base, unsigned char *request, unsigned int reqlen) +{ + int i; + int len; /* current scsi fifo size */ + + REG1(base); + while (reqlen) { + i = inb(base + PIO_STATUS); + /* VDEB(printk("pio_status=%x\n", i)); */ + if (i & 0x80) + return 0; + + switch (i & 0x1e) { + default: + case 0x10: /* fifo empty */ + len = 0; + break; + case 0x0: + len = 1; + break; + case 0x8: /* fifo 1/3 full */ + len = 42; + break; + case 0xc: /* fifo 2/3 full */ + len = 84; + break; + case 0xe: /* fifo full */ + len = 128; + break; + } + + if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occurred */ + return 0; + } + + if (len) { + if (len > reqlen) + len = reqlen; + + if (fast_pio && len > 3) { + insl(base + PIO_FIFO, request, len >> 2); + request += len & 0xfc; + reqlen -= len & 0xfc; + } else { + while (len--) { + *request++ = inb(base + PIO_FIFO); + reqlen--; + } + } + } + } + return 0; +} + +static __inline__ int +SYM53C500_pio_write(int fast_pio, int base, unsigned char *request, unsigned int reqlen) +{ + int i = 0; + int len; /* current scsi fifo size */ + + REG1(base); + while (reqlen && !(i & 0x40)) { + i = inb(base + PIO_STATUS); + /* VDEB(printk("pio_status=%x\n", i)); */ + if (i & 0x80) /* error */ + return 0; + + switch (i & 0x1e) { + case 0x10: + len = 128; + break; + case 0x0: + len = 84; + break; + case 0x8: + len = 42; + break; + case 0xc: + len = 1; + break; + default: + case 0xe: + len = 0; + break; + } + + if (len) { + if (len > reqlen) + len = reqlen; + + if (fast_pio && len > 3) { + outsl(base + PIO_FIFO, request, len >> 2); + request += len & 0xfc; + reqlen -= len & 0xfc; + } else { + while (len--) { + outb(*request++, base + PIO_FIFO); + reqlen--; + } + } + } + } + return 0; +} + +static irqreturn_t +SYM53C500_intr(int irq, void *dev_id, struct pt_regs *regs) +{ + unsigned long flags; + struct Scsi_Host *dev = dev_id; + DEB(unsigned char fifo_size;) + DEB(unsigned char seq_reg;) + unsigned char status, int_reg; + unsigned char pio_status; + struct scatterlist *sglist; + unsigned int sgcount; + int port_base = dev->io_port; + struct sym53c500_data *data = + (struct sym53c500_data *)dev->hostdata; + struct scsi_cmnd *curSC = data->current_SC; + int fast_pio = data->fast_pio; + + spin_lock_irqsave(dev->host_lock, flags); + + VDEB(printk("SYM53C500_intr called\n")); + + REG1(port_base); + pio_status = inb(port_base + PIO_STATUS); + REG0(port_base); + status = inb(port_base + STAT_REG); + DEB(seq_reg = inb(port_base + SEQ_REG)); + int_reg = inb(port_base + INT_REG); + DEB(fifo_size = inb(port_base + FIFO_FLAGS) & 0x1f); + +#if SYM53C500_DEBUG + printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x", + status, seq_reg, int_reg, fifo_size); + printk(", pio=%02x\n", pio_status); +#endif /* SYM53C500_DEBUG */ + + if (int_reg & 0x80) { /* SCSI reset intr */ + DEB(printk("SYM53C500: reset intr received\n")); + curSC->result = DID_RESET << 16; + goto idle_out; + } + + if (pio_status & 0x80) { + printk("SYM53C500: Warning: PIO error!\n"); + curSC->result = DID_ERROR << 16; + goto idle_out; + } + + if (status & 0x20) { /* Parity error */ + printk("SYM53C500: Warning: parity error!\n"); + curSC->result = DID_PARITY << 16; + goto idle_out; + } + + if (status & 0x40) { /* Gross error */ + printk("SYM53C500: Warning: gross error!\n"); + curSC->result = DID_ERROR << 16; + goto idle_out; + } + + if (int_reg & 0x20) { /* Disconnect */ + DEB(printk("SYM53C500: disconnect intr received\n")); + if (curSC->SCp.phase != message_in) { /* Unexpected disconnect */ + curSC->result = DID_NO_CONNECT << 16; + } else { /* Command complete, return status and message */ + curSC->result = (curSC->SCp.Status & 0xff) + | ((curSC->SCp.Message & 0xff) << 8) | (DID_OK << 16); + } + goto idle_out; + } + + switch (status & 0x07) { /* scsi phase */ + case 0x00: /* DATA-OUT */ + if (int_reg & 0x10) { /* Target requesting info transfer */ + curSC->SCp.phase = data_out; + VDEB(printk("SYM53C500: Data-Out phase\n")); + outb(FLUSH_FIFO, port_base + CMD_REG); + LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */ + outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); + if (!curSC->use_sg) /* Don't use scatter-gather */ + SYM53C500_pio_write(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen); + else { /* use scatter-gather */ + sgcount = curSC->use_sg; + sglist = curSC->request_buffer; + while (sgcount--) { + SYM53C500_pio_write(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length); + sglist++; + } + } + REG0(port_base); + } + break; + + case 0x01: /* DATA-IN */ + if (int_reg & 0x10) { /* Target requesting info transfer */ + curSC->SCp.phase = data_in; + VDEB(printk("SYM53C500: Data-In phase\n")); + outb(FLUSH_FIFO, port_base + CMD_REG); + LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */ + outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); + if (!curSC->use_sg) /* Don't use scatter-gather */ + SYM53C500_pio_read(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen); + else { /* Use scatter-gather */ + sgcount = curSC->use_sg; + sglist = curSC->request_buffer; + while (sgcount--) { + SYM53C500_pio_read(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length); + sglist++; + } + } + REG0(port_base); + } + break; + + case 0x02: /* COMMAND */ + curSC->SCp.phase = command_ph; + printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n"); + break; + + case 0x03: /* STATUS */ + curSC->SCp.phase = status_ph; + VDEB(printk("SYM53C500: Status phase\n")); + outb(FLUSH_FIFO, port_base + CMD_REG); + outb(INIT_CMD_COMPLETE, port_base + CMD_REG); + break; + + case 0x04: /* Reserved */ + case 0x05: /* Reserved */ + printk("SYM53C500: WARNING: Reserved phase!!!\n"); + break; + + case 0x06: /* MESSAGE-OUT */ + DEB(printk("SYM53C500: Message-Out phase\n")); + curSC->SCp.phase = message_out; + outb(SET_ATN, port_base + CMD_REG); /* Reject the message */ + outb(MSG_ACCEPT, port_base + CMD_REG); + break; + + case 0x07: /* MESSAGE-IN */ + VDEB(printk("SYM53C500: Message-In phase\n")); + curSC->SCp.phase = message_in; + + curSC->SCp.Status = inb(port_base + SCSI_FIFO); + curSC->SCp.Message = inb(port_base + SCSI_FIFO); + + VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f)); + DEB(printk("Status = %02x Message = %02x\n", curSC->SCp.Status, curSC->SCp.Message)); + + if (curSC->SCp.Message == SAVE_POINTERS || curSC->SCp.Message == DISCONNECT) { + outb(SET_ATN, port_base + CMD_REG); /* Reject message */ + DEB(printk("Discarding SAVE_POINTERS message\n")); + } + outb(MSG_ACCEPT, port_base + CMD_REG); + break; + } +out: + spin_unlock_irqrestore(dev->host_lock, flags); + return IRQ_HANDLED; + +idle_out: + curSC->SCp.phase = idle; + curSC->scsi_done(curSC); + goto out; +} + +static void +SYM53C500_release(dev_link_t *link) +{ + struct scsi_info_t *info = link->priv; + struct Scsi_Host *shost = info->host; + + DEBUG(0, "SYM53C500_release(0x%p)\n", link); + + /* + * Do this before releasing/freeing resources. + */ + scsi_remove_host(shost); + + /* + * Interrupts getting hosed on card removal. Try + * the following code, mostly from qlogicfas.c. + */ + if (shost->irq) + free_irq(shost->irq, shost); + if (shost->dma_channel != 0xff) + free_dma(shost->dma_channel); + if (shost->io_port && shost->n_io_port) + release_region(shost->io_port, shost->n_io_port); + + link->dev = NULL; + + pcmcia_release_configuration(link->handle); + pcmcia_release_io(link->handle, &link->io); + pcmcia_release_irq(link->handle, &link->irq); + + link->state &= ~DEV_CONFIG; + + scsi_host_put(shost); +} /* SYM53C500_release */ + +static const char* +SYM53C500_info(struct Scsi_Host *SChost) +{ + static char info_msg[256]; + struct sym53c500_data *data = + (struct sym53c500_data *)SChost->hostdata; + + DEB(printk("SYM53C500_info called\n")); + (void)snprintf(info_msg, sizeof(info_msg), + "SYM53C500 at 0x%lx, IRQ %d, %s PIO mode.", + SChost->io_port, SChost->irq, data->fast_pio ? "fast" : "slow"); + return (info_msg); +} + +static int +SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +{ + int i; + int port_base = SCpnt->device->host->io_port; + struct sym53c500_data *data = + (struct sym53c500_data *)SCpnt->device->host->hostdata; + + VDEB(printk("SYM53C500_queue called\n")); + + DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", + SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id, + SCpnt->device->lun, SCpnt->request_bufflen)); + + VDEB(for (i = 0; i < SCpnt->cmd_len; i++) + printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i])); + VDEB(printk("\n")); + + data->current_SC = SCpnt; + data->current_SC->scsi_done = done; + data->current_SC->SCp.phase = command_ph; + data->current_SC->SCp.Status = 0; + data->current_SC->SCp.Message = 0; + + /* We are locked here already by the mid layer */ + REG0(port_base); + outb(SCpnt->device->id, port_base + DEST_ID); /* set destination */ + outb(FLUSH_FIFO, port_base + CMD_REG); /* reset the fifos */ + + for (i = 0; i < SCpnt->cmd_len; i++) { + outb(SCpnt->cmnd[i], port_base + SCSI_FIFO); + } + outb(SELECT_NO_ATN, port_base + CMD_REG); + + return 0; +} + +static int +SYM53C500_host_reset(struct scsi_cmnd *SCpnt) +{ + int port_base = SCpnt->device->host->io_port; + + DEB(printk("SYM53C500_host_reset called\n")); + SYM53C500_int_host_reset(port_base); + + return SUCCESS; +} + +static int +SYM53C500_biosparm(struct scsi_device *disk, + struct block_device *dev, + sector_t capacity, int *info_array) +{ + int size; + + DEB(printk("SYM53C500_biosparm called\n")); + + size = capacity; + info_array[0] = 64; /* heads */ + info_array[1] = 32; /* sectors */ + info_array[2] = size >> 11; /* cylinders */ + if (info_array[2] > 1024) { /* big disk */ + info_array[0] = 255; + info_array[1] = 63; + info_array[2] = size / (255 * 63); + } + return 0; +} + +static ssize_t +SYM53C500_show_pio(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *SHp = class_to_shost(cdev); + struct sym53c500_data *data = + (struct sym53c500_data *)SHp->hostdata; + + return snprintf(buf, 4, "%d\n", data->fast_pio); +} + +static ssize_t +SYM53C500_store_pio(struct class_device *cdev, const char *buf, size_t count) +{ + int pio; + struct Scsi_Host *SHp = class_to_shost(cdev); + struct sym53c500_data *data = + (struct sym53c500_data *)SHp->hostdata; + + pio = simple_strtoul(buf, NULL, 0); + if (pio == 0 || pio == 1) { + data->fast_pio = pio; + return count; + } + else + return -EINVAL; +} + +/* +* SCSI HBA device attributes we want to +* make available via sysfs. +*/ +static struct class_device_attribute SYM53C500_pio_attr = { + .attr = { + .name = "fast_pio", + .mode = (S_IRUGO | S_IWUSR), + }, + .show = SYM53C500_show_pio, + .store = SYM53C500_store_pio, +}; + +static struct class_device_attribute *SYM53C500_shost_attrs[] = { + &SYM53C500_pio_attr, + NULL, +}; + +/* +* scsi_host_template initializer +*/ +static struct scsi_host_template sym53c500_driver_template = { + .module = THIS_MODULE, + .name = "SYM53C500", + .info = SYM53C500_info, + .queuecommand = SYM53C500_queue, + .eh_host_reset_handler = SYM53C500_host_reset, + .bios_param = SYM53C500_biosparm, + .proc_name = "SYM53C500", + .can_queue = 1, + .this_id = 7, + .sg_tablesize = 32, + .cmd_per_lun = 1, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = SYM53C500_shost_attrs +}; + +#define CS_CHECK(fn, ret) \ +do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) + +static void +SYM53C500_config(dev_link_t *link) +{ + client_handle_t handle = link->handle; + struct scsi_info_t *info = link->priv; + tuple_t tuple; + cisparse_t parse; + int i, last_ret, last_fn; + int irq_level, port_base; + unsigned short tuple_data[32]; + struct Scsi_Host *host; + struct scsi_host_template *tpnt = &sym53c500_driver_template; + struct sym53c500_data *data; + + DEBUG(0, "SYM53C500_config(0x%p)\n", link); + + tuple.TupleData = (cisdata_t *)tuple_data; + tuple.TupleDataMax = 64; + tuple.TupleOffset = 0; + tuple.DesiredTuple = CISTPL_CONFIG; + CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); + CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); + CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); + link->conf.ConfigBase = parse.config.base; + + tuple.DesiredTuple = CISTPL_MANFID; + if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) && + (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS)) + info->manf_id = le16_to_cpu(tuple.TupleData[0]); + + /* Configure card */ + link->state |= DEV_CONFIG; + + tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; + CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); + while (1) { + if (pcmcia_get_tuple_data(handle, &tuple) != 0 || + pcmcia_parse_tuple(handle, &tuple, &parse) != 0) + goto next_entry; + link->conf.ConfigIndex = parse.cftable_entry.index; + link->io.BasePort1 = parse.cftable_entry.io.win[0].base; + link->io.NumPorts1 = parse.cftable_entry.io.win[0].len; + + if (link->io.BasePort1 != 0) { + i = pcmcia_request_io(handle, &link->io); + if (i == CS_SUCCESS) + break; + } +next_entry: + CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); + } + + CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); + CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); + + /* + * That's the trouble with copying liberally from another driver. + * Some things probably aren't relevant, and I suspect this entire + * section dealing with manufacturer IDs can be scrapped. --rct + */ + if ((info->manf_id == MANFID_MACNICA) || + (info->manf_id == MANFID_PIONEER) || + (info->manf_id == 0x0098)) { + /* set ATAcmd */ + outb(0xb4, link->io.BasePort1 + 0xd); + outb(0x24, link->io.BasePort1 + 0x9); + outb(0x04, link->io.BasePort1 + 0xd); + } + + /* + * irq_level == 0 implies tpnt->can_queue == 0, which + * is not supported in 2.6. Thus, only irq_level > 0 + * will be allowed. + * + * Possible port_base values are as follows: + * + * 0x130, 0x230, 0x280, 0x290, + * 0x320, 0x330, 0x340, 0x350 + */ + port_base = link->io.BasePort1; + irq_level = link->irq.AssignedIRQ; + + DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n", + port_base, irq_level, USE_FAST_PIO);) + + chip_init(port_base); + + host = scsi_host_alloc(tpnt, sizeof(struct sym53c500_data)); + if (!host) { + printk("SYM53C500: Unable to register host, giving up.\n"); + goto err_release; + } + + data = (struct sym53c500_data *)host->hostdata; + + if (irq_level > 0) { + if (request_irq(irq_level, SYM53C500_intr, 0, "SYM53C500", host)) { + printk("SYM53C500: unable to allocate IRQ %d\n", irq_level); + goto err_free_scsi; + } + DEB(printk("SYM53C500: allocated IRQ %d\n", irq_level)); + } else if (irq_level == 0) { + DEB(printk("SYM53C500: No interrupts detected\n")); + goto err_free_scsi; + } else { + DEB(printk("SYM53C500: Shouldn't get here!\n")); + goto err_free_scsi; + } + + host->unique_id = port_base; + host->irq = irq_level; + host->io_port = port_base; + host->n_io_port = 0x10; + host->dma_channel = -1; + + /* + * Note fast_pio is set to USE_FAST_PIO by + * default, but can be changed via "sysfs". + */ + data->fast_pio = USE_FAST_PIO; + + sprintf(info->node.dev_name, "scsi%d", host->host_no); + link->dev = &info->node; + info->host = host; + + if (scsi_add_host(host, NULL)) + goto err_free_irq; + + scsi_scan_host(host); + + goto out; /* SUCCESS */ + +err_free_irq: + free_irq(irq_level, host); +err_free_scsi: + scsi_host_put(host); +err_release: + release_region(port_base, 0x10); + printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n"); + +out: + link->state &= ~DEV_CONFIG_PENDING; + return; + +cs_failed: + cs_error(link->handle, last_fn, last_ret); + SYM53C500_release(link); + return; +} /* SYM53C500_config */ + +static int +SYM53C500_event(event_t event, int priority, event_callback_args_t *args) +{ + dev_link_t *link = args->client_data; + struct scsi_info_t *info = link->priv; + + DEBUG(1, "SYM53C500_event(0x%06x)\n", event); + + switch (event) { + case CS_EVENT_CARD_REMOVAL: + link->state &= ~DEV_PRESENT; + if (link->state & DEV_CONFIG) + SYM53C500_release(link); + break; + case CS_EVENT_CARD_INSERTION: + link->state |= DEV_PRESENT | DEV_CONFIG_PENDING; + SYM53C500_config(link); + break; + case CS_EVENT_PM_SUSPEND: + link->state |= DEV_SUSPEND; + /* Fall through... */ + case CS_EVENT_RESET_PHYSICAL: + if (link->state & DEV_CONFIG) + pcmcia_release_configuration(link->handle); + break; + case CS_EVENT_PM_RESUME: + link->state &= ~DEV_SUSPEND; + /* Fall through... */ + case CS_EVENT_CARD_RESET: + if (link->state & DEV_CONFIG) { + pcmcia_request_configuration(link->handle, &link->conf); + /* See earlier comment about manufacturer IDs. */ + if ((info->manf_id == MANFID_MACNICA) || + (info->manf_id == MANFID_PIONEER) || + (info->manf_id == 0x0098)) { + outb(0x80, link->io.BasePort1 + 0xd); + outb(0x24, link->io.BasePort1 + 0x9); + outb(0x04, link->io.BasePort1 + 0xd); + } + /* + * If things don't work after a "resume", + * this is a good place to start looking. + */ + SYM53C500_int_host_reset(link->io.BasePort1); + } + break; + } + return 0; +} /* SYM53C500_event */ + +static void +SYM53C500_detach(dev_link_t *link) +{ + dev_link_t **linkp; + + DEBUG(0, "SYM53C500_detach(0x%p)\n", link); + + /* Locate device structure */ + for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next) + if (*linkp == link) + break; + if (*linkp == NULL) + return; + + if (link->state & DEV_CONFIG) + SYM53C500_release(link); + + if (link->handle) + pcmcia_deregister_client(link->handle); + + /* Unlink device structure, free bits. */ + *linkp = link->next; + kfree(link->priv); + link->priv = NULL; +} /* SYM53C500_detach */ + +static dev_link_t * +SYM53C500_attach(void) +{ + struct scsi_info_t *info; + client_reg_t client_reg; + dev_link_t *link; + int i, ret; + + DEBUG(0, "SYM53C500_attach()\n"); + + /* Create new SCSI device */ + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return NULL; + memset(info, 0, sizeof(*info)); + link = &info->link; + link->priv = info; + link->io.NumPorts1 = 16; + link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; + link->io.IOAddrLines = 10; + link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; + link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID; + if (irq_list[0] == -1) + link->irq.IRQInfo2 = irq_mask; + else + for (i = 0; i < 4; i++) + link->irq.IRQInfo2 |= 1 << irq_list[i]; + link->conf.Attributes = CONF_ENABLE_IRQ; + link->conf.Vcc = 50; + link->conf.IntType = INT_MEMORY_AND_IO; + link->conf.Present = PRESENT_OPTION; + + /* Register with Card Services */ + link->next = dev_list; + dev_list = link; + client_reg.dev_info = &dev_info; + client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE; + client_reg.event_handler = &SYM53C500_event; + client_reg.EventMask = CS_EVENT_RESET_REQUEST | CS_EVENT_CARD_RESET | + CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL | + CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME; + client_reg.Version = 0x0210; + client_reg.event_callback_args.client_data = link; + ret = pcmcia_register_client(&link->handle, &client_reg); + if (ret != 0) { + cs_error(link->handle, RegisterClient, ret); + SYM53C500_detach(link); + return NULL; + } + + return link; +} /* SYM53C500_attach */ + +MODULE_AUTHOR("Bob Tracy "); +MODULE_DESCRIPTION("SYM53C500 PCMCIA SCSI driver"); +MODULE_LICENSE("GPL"); + +static struct pcmcia_driver sym53c500_cs_driver = { + .owner = THIS_MODULE, + .drv = { + .name = "sym53c500_cs", + }, + .attach = SYM53C500_attach, + .detach = SYM53C500_detach, +}; + +static int __init +init_sym53c500_cs(void) +{ + return pcmcia_register_driver(&sym53c500_cs_driver); +} + +static void __exit +exit_sym53c500_cs(void) +{ + pcmcia_unregister_driver(&sym53c500_cs_driver); +} + +module_init(init_sym53c500_cs); +module_exit(exit_sym53c500_cs); diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h new file mode 100644 index 000000000..f01cbd66c --- /dev/null +++ b/drivers/scsi/qlogicfas408.h @@ -0,0 +1,120 @@ +/* to be used by qlogicfas and qlogic_cs */ +#ifndef __QLOGICFAS408_H +#define __QLOGICFAS408_H + +/*----------------------------------------------------------------*/ +/* Configuration */ + +/* Set the following to max out the speed of the PIO PseudoDMA transfers, + again, 0 tends to be slower, but more stable. */ + +#define QL_TURBO_PDMA 1 + +/* This should be 1 to enable parity detection */ + +#define QL_ENABLE_PARITY 1 + +/* This will reset all devices when the driver is initialized (during bootup). + The other linux drivers don't do this, but the DOS drivers do, and after + using DOS or some kind of crash or lockup this will bring things back + without requiring a cold boot. It does take some time to recover from a + reset, so it is slower, and I have seen timeouts so that devices weren't + recognized when this was set. */ + +#define QL_RESET_AT_START 0 + +/* crystal frequency in megahertz (for offset 5 and 9) + Please set this for your card. Most Qlogic cards are 40 Mhz. The + Control Concepts ISA (not VLB) is 24 Mhz */ + +#define XTALFREQ 40 + +/**********/ +/* DANGER! modify these at your own risk */ +/* SLOWCABLE can usually be reset to zero if you have a clean setup and + proper termination. The rest are for synchronous transfers and other + advanced features if your device can transfer faster than 5Mb/sec. + If you are really curious, email me for a quick howto until I have + something official */ +/**********/ + +/*****/ +/* config register 1 (offset 8) options */ +/* This needs to be set to 1 if your cabling is long or noisy */ +#define SLOWCABLE 1 + +/*****/ +/* offset 0xc */ +/* This will set fast (10Mhz) synchronous timing when set to 1 + For this to have an effect, FASTCLK must also be 1 */ +#define FASTSCSI 0 + +/* This when set to 1 will set a faster sync transfer rate */ +#define FASTCLK 0 /*(XTALFREQ>25?1:0)*/ + +/*****/ +/* offset 6 */ +/* This is the sync transfer divisor, XTALFREQ/X will be the maximum + achievable data rate (assuming the rest of the system is capable + and set properly) */ +#define SYNCXFRPD 5 /*(XTALFREQ/5)*/ + +/*****/ +/* offset 7 */ +/* This is the count of how many synchronous transfers can take place + i.e. how many reqs can occur before an ack is given. + The maximum value for this is 15, the upper bits can modify + REQ/ACK assertion and deassertion during synchronous transfers + If this is 0, the bus will only transfer asynchronously */ +#define SYNCOFFST 0 +/* for the curious, bits 7&6 control the deassertion delay in 1/2 cycles + of the 40Mhz clock. If FASTCLK is 1, specifying 01 (1/2) will + cause the deassertion to be early by 1/2 clock. Bits 5&4 control + the assertion delay, also in 1/2 clocks (FASTCLK is ignored here). */ + +/*----------------------------------------------------------------*/ + +struct qlogicfas408_priv { + int qbase; /* Port */ + int qinitid; /* initiator ID */ + int qabort; /* Flag to cause an abort */ + int qlirq; /* IRQ being used */ + int int_type; /* type of irq, 2 for ISA board, 0 for PCMCIA */ + char qinfo[80]; /* description */ + Scsi_Cmnd *qlcmd; /* current command being processed */ + struct Scsi_Host *shost; /* pointer back to host */ + struct qlogicfas408_priv *next; /* next private struct */ +}; + +/* The qlogic card uses two register maps - These macros select which one */ +#define REG0 ( outb( inb( qbase + 0xd ) & 0x7f , qbase + 0xd ), outb( 4 , qbase + 0xd )) +#define REG1 ( outb( inb( qbase + 0xd ) | 0x80 , qbase + 0xd ), outb( 0xb4 | int_type, qbase + 0xd )) + +/* following is watchdog timeout in microseconds */ +#define WATCHDOG 5000000 + +/*----------------------------------------------------------------*/ +/* the following will set the monitor border color (useful to find + where something crashed or gets stuck at and as a simple profiler) */ + +#define rtrc(i) {} + +#define get_priv_by_cmd(x) (struct qlogicfas408_priv *)&((x)->device->host->hostdata[0]) +#define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0]) + +irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id, struct pt_regs *regs); +int qlogicfas408_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)); +int qlogicfas408_biosparam(struct scsi_device * disk, + struct block_device *dev, + sector_t capacity, int ip[]); +int qlogicfas408_abort(Scsi_Cmnd * cmd); +int qlogicfas408_bus_reset(Scsi_Cmnd * cmd); +int qlogicfas408_host_reset(Scsi_Cmnd * cmd); +int qlogicfas408_device_reset(Scsi_Cmnd * cmd); +const char *qlogicfas408_info(struct Scsi_Host *host); +int qlogicfas408_get_chip_type(int qbase, int int_type); +void qlogicfas408_setup(int qbase, int id, int int_type); +int qlogicfas408_detect(int qbase, int int_type); +void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv); +#endif /* __QLOGICFAS408_H */ + diff --git a/drivers/scsi/sata_promise.h b/drivers/scsi/sata_promise.h new file mode 100644 index 000000000..6e7e96b9e --- /dev/null +++ b/drivers/scsi/sata_promise.h @@ -0,0 +1,154 @@ +/* + * sata_promise.h - Promise SATA common definitions and inline funcs + * + * Copyright 2003-2004 Red Hat, Inc. + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#ifndef __SATA_PROMISE_H__ +#define __SATA_PROMISE_H__ + +#include + +enum pdc_packet_bits { + PDC_PKT_READ = (1 << 2), + PDC_PKT_NODATA = (1 << 3), + + PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5), + PDC_PKT_CLEAR_BSY = (1 << 4), + PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4), + PDC_LAST_REG = (1 << 3), + + PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1), +}; + +static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf, + dma_addr_t sg_table, + unsigned int devno, u8 *buf) +{ + u8 dev_reg; + u32 *buf32 = (u32 *) buf; + + /* set control bits (byte 0), zero delay seq id (byte 3), + * and seq id (byte 2) + */ + switch (tf->protocol) { + case ATA_PROT_DMA: + if (!(tf->flags & ATA_TFLAG_WRITE)) + buf32[0] = cpu_to_le32(PDC_PKT_READ); + else + buf32[0] = 0; + break; + + case ATA_PROT_NODATA: + buf32[0] = cpu_to_le32(PDC_PKT_NODATA); + break; + + default: + BUG(); + break; + } + + buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */ + buf32[2] = 0; /* no next-packet */ + + if (devno == 0) + dev_reg = ATA_DEVICE_OBS; + else + dev_reg = ATA_DEVICE_OBS | ATA_DEV1; + + /* select device */ + buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE; + buf[13] = dev_reg; + + /* device control register */ + buf[14] = (1 << 5) | PDC_REG_DEVCTL; + buf[15] = tf->ctl; + + return 16; /* offset of next byte */ +} + +static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf, + unsigned int i) +{ + if (tf->flags & ATA_TFLAG_DEVICE) { + buf[i++] = (1 << 5) | ATA_REG_DEVICE; + buf[i++] = tf->device; + } + + /* and finally the command itself; also includes end-of-pkt marker */ + buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD; + buf[i++] = tf->command; + + return i; +} + +static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i) +{ + /* the "(1 << 5)" should be read "(count << 5)" */ + + /* ATA command block registers */ + buf[i++] = (1 << 5) | ATA_REG_FEATURE; + buf[i++] = tf->feature; + + buf[i++] = (1 << 5) | ATA_REG_NSECT; + buf[i++] = tf->nsect; + + buf[i++] = (1 << 5) | ATA_REG_LBAL; + buf[i++] = tf->lbal; + + buf[i++] = (1 << 5) | ATA_REG_LBAM; + buf[i++] = tf->lbam; + + buf[i++] = (1 << 5) | ATA_REG_LBAH; + buf[i++] = tf->lbah; + + return i; +} + +static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i) +{ + /* the "(2 << 5)" should be read "(count << 5)" */ + + /* ATA command block registers */ + buf[i++] = (2 << 5) | ATA_REG_FEATURE; + buf[i++] = tf->hob_feature; + buf[i++] = tf->feature; + + buf[i++] = (2 << 5) | ATA_REG_NSECT; + buf[i++] = tf->hob_nsect; + buf[i++] = tf->nsect; + + buf[i++] = (2 << 5) | ATA_REG_LBAL; + buf[i++] = tf->hob_lbal; + buf[i++] = tf->lbal; + + buf[i++] = (2 << 5) | ATA_REG_LBAM; + buf[i++] = tf->hob_lbam; + buf[i++] = tf->lbam; + + buf[i++] = (2 << 5) | ATA_REG_LBAH; + buf[i++] = tf->hob_lbah; + buf[i++] = tf->lbah; + + return i; +} + + +#endif /* __SATA_PROMISE_H__ */ diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c new file mode 100644 index 000000000..38c64f656 --- /dev/null +++ b/drivers/usb/core/sysfs.c @@ -0,0 +1,229 @@ +/* + * drivers/usb/core/sysfs.c + * + * (C) Copyright 2002 David Brownell + * (C) Copyright 2002 Greg Kroah-Hartman + * (C) Copyright 2002 IBM Corp. + * + * All of the sysfs file attributes for usb devices and interfaces. + * + */ + + +#include +#include + +#ifdef CONFIG_USB_DEBUG + #define DEBUG +#else + #undef DEBUG +#endif +#include + +#include "usb.h" + +/* Active configuration fields */ +#define usb_actconfig_show(field, multiplier, format_string) \ +static ssize_t show_##field (struct device *dev, char *buf) \ +{ \ + struct usb_device *udev; \ + \ + udev = to_usb_device (dev); \ + if (udev->actconfig) \ + return sprintf (buf, format_string, \ + udev->actconfig->desc.field * multiplier); \ + else \ + return 0; \ +} \ + +#define usb_actconfig_attr(field, multiplier, format_string) \ +usb_actconfig_show(field, multiplier, format_string) \ +static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); + +usb_actconfig_attr (bNumInterfaces, 1, "%2d\n") +usb_actconfig_attr (bmAttributes, 1, "%2x\n") +usb_actconfig_attr (bMaxPower, 2, "%3dmA\n") + +/* configuration value is always present, and r/w */ +usb_actconfig_show(bConfigurationValue, 1, "%u\n"); + +static ssize_t +set_bConfigurationValue (struct device *dev, const char *buf, size_t count) +{ + struct usb_device *udev = udev = to_usb_device (dev); + int config, value; + + if (sscanf (buf, "%u", &config) != 1 || config > 255) + return -EINVAL; + down(&udev->serialize); + value = usb_set_configuration (udev, config); + up(&udev->serialize); + return (value < 0) ? value : count; +} + +static DEVICE_ATTR(bConfigurationValue, S_IRUGO | S_IWUSR, + show_bConfigurationValue, set_bConfigurationValue); + +/* String fields */ +#define usb_string_attr(name, field) \ +static ssize_t show_##name(struct device *dev, char *buf) \ +{ \ + struct usb_device *udev; \ + int len; \ + \ + udev = to_usb_device (dev); \ + len = usb_string(udev, udev->descriptor.field, buf, PAGE_SIZE); \ + if (len < 0) \ + return 0; \ + buf[len] = '\n'; \ + buf[len+1] = 0; \ + return len+1; \ +} \ +static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); + +usb_string_attr(product, iProduct); +usb_string_attr(manufacturer, iManufacturer); +usb_string_attr(serial, iSerialNumber); + +static ssize_t +show_speed (struct device *dev, char *buf) +{ + struct usb_device *udev; + char *speed; + + udev = to_usb_device (dev); + + switch (udev->speed) { + case USB_SPEED_LOW: + speed = "1.5"; + break; + case USB_SPEED_UNKNOWN: + case USB_SPEED_FULL: + speed = "12"; + break; + case USB_SPEED_HIGH: + speed = "480"; + break; + default: + speed = "unknown"; + } + return sprintf (buf, "%s\n", speed); +} +static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL); + +static ssize_t +show_devnum (struct device *dev, char *buf) +{ + struct usb_device *udev; + + udev = to_usb_device (dev); + return sprintf (buf, "%d\n", udev->devnum); +} +static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL); + +static ssize_t +show_version (struct device *dev, char *buf) +{ + struct usb_device *udev; + + udev = to_usb_device (dev); + return sprintf (buf, "%2x.%02x\n", udev->descriptor.bcdUSB >> 8, + udev->descriptor.bcdUSB & 0xff); +} +static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); + +static ssize_t +show_maxchild (struct device *dev, char *buf) +{ + struct usb_device *udev; + + udev = to_usb_device (dev); + return sprintf (buf, "%d\n", udev->maxchild); +} +static DEVICE_ATTR(maxchild, S_IRUGO, show_maxchild, NULL); + +/* Descriptor fields */ +#define usb_descriptor_attr(field, format_string) \ +static ssize_t \ +show_##field (struct device *dev, char *buf) \ +{ \ + struct usb_device *udev; \ + \ + udev = to_usb_device (dev); \ + return sprintf (buf, format_string, udev->descriptor.field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); + +usb_descriptor_attr (idVendor, "%04x\n") +usb_descriptor_attr (idProduct, "%04x\n") +usb_descriptor_attr (bcdDevice, "%04x\n") +usb_descriptor_attr (bDeviceClass, "%02x\n") +usb_descriptor_attr (bDeviceSubClass, "%02x\n") +usb_descriptor_attr (bDeviceProtocol, "%02x\n") +usb_descriptor_attr (bNumConfigurations, "%d\n") + + +void usb_create_sysfs_dev_files (struct usb_device *udev) +{ + struct device *dev = &udev->dev; + + /* current configuration's attributes */ + device_create_file (dev, &dev_attr_bNumInterfaces); + device_create_file (dev, &dev_attr_bConfigurationValue); + device_create_file (dev, &dev_attr_bmAttributes); + device_create_file (dev, &dev_attr_bMaxPower); + + /* device attributes */ + device_create_file (dev, &dev_attr_idVendor); + device_create_file (dev, &dev_attr_idProduct); + device_create_file (dev, &dev_attr_bcdDevice); + device_create_file (dev, &dev_attr_bDeviceClass); + device_create_file (dev, &dev_attr_bDeviceSubClass); + device_create_file (dev, &dev_attr_bDeviceProtocol); + device_create_file (dev, &dev_attr_bNumConfigurations); + + /* speed varies depending on how you connect the device */ + device_create_file (dev, &dev_attr_speed); + // FIXME iff there are other speed configs, show how many + + if (udev->descriptor.iManufacturer) + device_create_file (dev, &dev_attr_manufacturer); + if (udev->descriptor.iProduct) + device_create_file (dev, &dev_attr_product); + if (udev->descriptor.iSerialNumber) + device_create_file (dev, &dev_attr_serial); + + device_create_file (dev, &dev_attr_devnum); + device_create_file (dev, &dev_attr_version); + device_create_file (dev, &dev_attr_maxchild); +} + +/* Interface fields */ +#define usb_intf_attr(field, format_string) \ +static ssize_t \ +show_##field (struct device *dev, char *buf) \ +{ \ + struct usb_interface *intf = to_usb_interface (dev); \ + \ + return sprintf (buf, format_string, intf->cur_altsetting->desc.field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); + +usb_intf_attr (bInterfaceNumber, "%02x\n") +usb_intf_attr (bAlternateSetting, "%2d\n") +usb_intf_attr (bNumEndpoints, "%02x\n") +usb_intf_attr (bInterfaceClass, "%02x\n") +usb_intf_attr (bInterfaceSubClass, "%02x\n") +usb_intf_attr (bInterfaceProtocol, "%02x\n") +usb_intf_attr (iInterface, "%02x\n") + +void usb_create_sysfs_intf_files (struct usb_interface *intf) +{ + device_create_file (&intf->dev, &dev_attr_bInterfaceNumber); + device_create_file (&intf->dev, &dev_attr_bAlternateSetting); + device_create_file (&intf->dev, &dev_attr_bNumEndpoints); + device_create_file (&intf->dev, &dev_attr_bInterfaceClass); + device_create_file (&intf->dev, &dev_attr_bInterfaceSubClass); + device_create_file (&intf->dev, &dev_attr_bInterfaceProtocol); + device_create_file (&intf->dev, &dev_attr_iInterface); +} diff --git a/drivers/usb/input/touchkitusb.c b/drivers/usb/input/touchkitusb.c new file mode 100644 index 000000000..4917b042e --- /dev/null +++ b/drivers/usb/input/touchkitusb.c @@ -0,0 +1,310 @@ +/****************************************************************************** + * touchkitusb.c -- Driver for eGalax TouchKit USB Touchscreens + * + * Copyright (C) 2004 by Daniel Ritz + * Copyright (C) by Todd E. Johnson (mtouchusb.c) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Based upon mtouchusb.c + * + *****************************************************************************/ + +//#define DEBUG + +#include +#include +#include +#include +#include +#include + +#if !defined(DEBUG) && defined(CONFIG_USB_DEBUG) +#define DEBUG +#endif +#include + + +#define TOUCHKIT_MIN_XC 0x0 +#define TOUCHKIT_MAX_XC 0x07ff +#define TOUCHKIT_XC_FUZZ 0x0 +#define TOUCHKIT_XC_FLAT 0x0 +#define TOUCHKIT_MIN_YC 0x0 +#define TOUCHKIT_MAX_YC 0x07ff +#define TOUCHKIT_YC_FUZZ 0x0 +#define TOUCHKIT_YC_FLAT 0x0 +#define TOUCHKIT_REPORT_DATA_SIZE 8 + +#define TOUCHKIT_DOWN 0x01 +#define TOUCHKIT_POINT_TOUCH 0x81 +#define TOUCHKIT_POINT_NOTOUCH 0x80 + +#define TOUCHKIT_GET_TOUCHED(dat) ((((dat)[0]) & TOUCHKIT_DOWN) ? 1 : 0) +#define TOUCHKIT_GET_X(dat) (((dat)[3] << 7) | (dat)[4]) +#define TOUCHKIT_GET_Y(dat) (((dat)[1] << 7) | (dat)[2]) + +#define DRIVER_VERSION "v0.1" +#define DRIVER_AUTHOR "Daniel Ritz " +#define DRIVER_DESC "eGalax TouchKit USB HID Touchscreen Driver" + +struct touchkit_usb { + unsigned char *data; + dma_addr_t data_dma; + struct urb *irq; + struct usb_device *udev; + struct input_dev input; + int open; + char name[128]; + char phys[64]; +}; + +static struct usb_device_id touchkit_devices[] = { + {USB_DEVICE(0x3823, 0x0001)}, + {USB_DEVICE(0x0eef, 0x0001)}, + {} +}; + +static void touchkit_irq(struct urb *urb, struct pt_regs *regs) +{ + struct touchkit_usb *touchkit = urb->context; + int retval; + + switch (urb->status) { + case 0: + /* success */ + break; + case -ETIMEDOUT: + /* this urb is timing out */ + dbg("%s - urb timed out - was the device unplugged?", + __FUNCTION__); + return; + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* this urb is terminated, clean up */ + dbg("%s - urb shutting down with status: %d", + __FUNCTION__, urb->status); + return; + default: + dbg("%s - nonzero urb status received: %d", + __FUNCTION__, urb->status); + goto exit; + } + + input_regs(&touchkit->input, regs); + input_report_key(&touchkit->input, BTN_TOUCH, + TOUCHKIT_GET_TOUCHED(touchkit->data)); + input_report_abs(&touchkit->input, ABS_X, + TOUCHKIT_GET_X(touchkit->data)); + input_report_abs(&touchkit->input, ABS_Y, + TOUCHKIT_GET_Y(touchkit->data)); + input_sync(&touchkit->input); + +exit: + retval = usb_submit_urb(urb, GFP_ATOMIC); + if (retval) + err("%s - usb_submit_urb failed with result: %d", + __FUNCTION__, retval); +} + +static int touchkit_open(struct input_dev *input) +{ + struct touchkit_usb *touchkit = input->private; + + if (touchkit->open++) + return 0; + + touchkit->irq->dev = touchkit->udev; + + if (usb_submit_urb(touchkit->irq, GFP_ATOMIC)) { + touchkit->open--; + return -EIO; + } + + return 0; +} + +static void touchkit_close(struct input_dev *input) +{ + struct touchkit_usb *touchkit = input->private; + + if (!--touchkit->open) + usb_unlink_urb(touchkit->irq); +} + +static int touchkit_alloc_buffers(struct usb_device *udev, + struct touchkit_usb *touchkit) +{ + touchkit->data = usb_buffer_alloc(udev, TOUCHKIT_REPORT_DATA_SIZE, + SLAB_ATOMIC, &touchkit->data_dma); + + if (!touchkit->data) + return -1; + + return 0; +} + +static void touchkit_free_buffers(struct usb_device *udev, + struct touchkit_usb *touchkit) +{ + if (touchkit->data) + usb_buffer_free(udev, TOUCHKIT_REPORT_DATA_SIZE, + touchkit->data, touchkit->data_dma); +} + +static int touchkit_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + int ret; + struct touchkit_usb *touchkit; + struct usb_host_interface *interface; + struct usb_endpoint_descriptor *endpoint; + struct usb_device *udev = interface_to_usbdev(intf); + char path[64]; + char *buf; + + interface = intf->cur_altsetting; + endpoint = &interface->endpoint[0].desc; + + touchkit = kmalloc(sizeof(struct touchkit_usb), GFP_KERNEL); + if (!touchkit) + return -ENOMEM; + + memset(touchkit, 0, sizeof(struct touchkit_usb)); + touchkit->udev = udev; + + if (touchkit_alloc_buffers(udev, touchkit)) { + ret = -ENOMEM; + goto out_free; + } + + touchkit->input.private = touchkit; + touchkit->input.open = touchkit_open; + touchkit->input.close = touchkit_close; + + usb_make_path(udev, path, 64); + sprintf(touchkit->phys, "%s/input0", path); + + touchkit->input.name = touchkit->name; + touchkit->input.phys = touchkit->phys; + touchkit->input.id.bustype = BUS_USB; + touchkit->input.id.vendor = udev->descriptor.idVendor; + touchkit->input.id.product = udev->descriptor.idProduct; + touchkit->input.id.version = udev->descriptor.bcdDevice; + touchkit->input.dev = &intf->dev; + + touchkit->input.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); + touchkit->input.absbit[0] = BIT(ABS_X) | BIT(ABS_Y); + touchkit->input.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH); + + /* Used to Scale Compensated Data */ + touchkit->input.absmin[ABS_X] = TOUCHKIT_MIN_XC; + touchkit->input.absmax[ABS_X] = TOUCHKIT_MAX_XC; + touchkit->input.absfuzz[ABS_X] = TOUCHKIT_XC_FUZZ; + touchkit->input.absflat[ABS_X] = TOUCHKIT_XC_FLAT; + touchkit->input.absmin[ABS_Y] = TOUCHKIT_MIN_YC; + touchkit->input.absmax[ABS_Y] = TOUCHKIT_MAX_YC; + touchkit->input.absfuzz[ABS_Y] = TOUCHKIT_YC_FUZZ; + touchkit->input.absflat[ABS_Y] = TOUCHKIT_YC_FLAT; + + buf = kmalloc(63, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out_free_buffers; + } + + if (udev->descriptor.iManufacturer && + usb_string(udev, udev->descriptor.iManufacturer, buf, 63) > 0) + strcat(touchkit->name, buf); + if (udev->descriptor.iProduct && + usb_string(udev, udev->descriptor.iProduct, buf, 63) > 0) + sprintf(touchkit->name, "%s %s", touchkit->name, buf); + + if (!strlen(touchkit->name)) + sprintf(touchkit->name, "USB Touchscreen %04x:%04x", + touchkit->input.id.vendor, touchkit->input.id.product); + + kfree(buf); + + touchkit->irq = usb_alloc_urb(0, GFP_KERNEL); + if (!touchkit->irq) { + dbg("%s - usb_alloc_urb failed: touchkit->irq", __FUNCTION__); + ret = -ENOMEM; + goto out_free_buffers; + } + + usb_fill_int_urb(touchkit->irq, touchkit->udev, + usb_rcvintpipe(touchkit->udev, 0x81), + touchkit->data, TOUCHKIT_REPORT_DATA_SIZE, + touchkit_irq, touchkit, endpoint->bInterval); + + input_register_device(&touchkit->input); + + printk(KERN_INFO "input: %s on %s\n", touchkit->name, path); + usb_set_intfdata(intf, touchkit); + + return 0; + +out_free_buffers: + touchkit_free_buffers(udev, touchkit); +out_free: + kfree(touchkit); + return ret; +} + +static void touchkit_disconnect(struct usb_interface *intf) +{ + struct touchkit_usb *touchkit = usb_get_intfdata(intf); + + dbg("%s - called", __FUNCTION__); + + if (!touchkit) + return; + + dbg("%s - touchkit is initialized, cleaning up", __FUNCTION__); + usb_set_intfdata(intf, NULL); + input_unregister_device(&touchkit->input); + usb_unlink_urb(touchkit->irq); + usb_free_urb(touchkit->irq); + touchkit_free_buffers(interface_to_usbdev(intf), touchkit); + kfree(touchkit); +} + +MODULE_DEVICE_TABLE(usb, touchkit_devices); + +static struct usb_driver touchkit_driver = { + .owner = THIS_MODULE, + .name = "touchkitusb", + .probe = touchkit_probe, + .disconnect = touchkit_disconnect, + .id_table = touchkit_devices, +}; + +static int __init touchkit_init(void) +{ + return usb_register(&touchkit_driver); +} + +static void __exit touchkit_cleanup(void) +{ + usb_deregister(&touchkit_driver); +} + +module_init(touchkit_init); +module_exit(touchkit_cleanup); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/misc/phidgetservo.c b/drivers/usb/misc/phidgetservo.c new file mode 100644 index 000000000..9018774ae --- /dev/null +++ b/drivers/usb/misc/phidgetservo.c @@ -0,0 +1,327 @@ +/* + * USB PhidgetServo driver 1.0 + * + * Copyright (C) 2004 Sean Young + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This is a driver for the USB PhidgetServo version 2.0 and 3.0 servo + * controllers available at: http://www.phidgets.com/ + * + * Note that the driver takes input as: degrees.minutes + * -23 < degrees < 203 + * 0 < minutes < 59 + * + * CAUTION: Generally you should use 0 < degrees < 180 as anything else + * is probably beyond the range of your servo and may damage it. + */ + +#include +#ifdef CONFIG_USB_DEBUG +#define DEBUG 1 +#endif +#include +#include +#include +#include +#include +#include + +#define DRIVER_AUTHOR "Sean Young " +#define DRIVER_DESC "USB PhidgetServo Driver" + +#define VENDOR_ID_GLAB 0x06c2 +#define DEVICE_ID_4MOTOR_SERVO_30 0x0038 +#define DEVICE_ID_1MOTOR_SERVO_30 0x0039 + +#define VENDOR_ID_WISEGROUP 0x0925 +#define DEVICE_ID_1MOTOR_SERVO_20 0x8101 +#define DEVICE_ID_4MOTOR_SERVO_20 0x8104 + +static struct usb_device_id id_table[] = { + {USB_DEVICE(VENDOR_ID_GLAB, DEVICE_ID_4MOTOR_SERVO_30)}, + {USB_DEVICE(VENDOR_ID_GLAB, DEVICE_ID_1MOTOR_SERVO_30)}, + {USB_DEVICE(VENDOR_ID_WISEGROUP, DEVICE_ID_4MOTOR_SERVO_20)}, + {USB_DEVICE(VENDOR_ID_WISEGROUP, DEVICE_ID_1MOTOR_SERVO_20)}, + {} +}; + +MODULE_DEVICE_TABLE(usb, id_table); + +struct phidget_servo { + struct usb_device *udev; + int version; + int quad_servo; + int pulse[4]; + int degrees[4]; + int minutes[4]; +}; + +static void +change_position_v30(struct phidget_servo *servo, int servo_no, int degrees, + int minutes) +{ + int retval; + unsigned char *buffer; + + buffer = kmalloc(6, GFP_KERNEL); + if (!buffer) { + dev_err(&servo->udev->dev, "%s - out of memory\n", + __FUNCTION__); + return; + } + + /* + * pulse = 0 - 4095 + * angle = 0 - 180 degrees + * + * pulse = angle * 10.6 + 243.8 + */ + servo->pulse[servo_no] = ((degrees*60 + minutes)*106 + 2438*60)/600; + servo->degrees[servo_no]= degrees; + servo->minutes[servo_no]= minutes; + + /* + * The PhidgetServo v3.0 is controlled by sending 6 bytes, + * 4 * 12 bits for each servo. + * + * low = lower 8 bits pulse + * high = higher 4 bits pulse + * + * offset bits + * +---+-----------------+ + * | 0 | low 0 | + * +---+--------+--------+ + * | 1 | high 1 | high 0 | + * +---+--------+--------+ + * | 2 | low 1 | + * +---+-----------------+ + * | 3 | low 2 | + * +---+--------+--------+ + * | 4 | high 3 | high 2 | + * +---+--------+--------+ + * | 5 | low 3 | + * +---+-----------------+ + */ + + buffer[0] = servo->pulse[0] & 0xff; + buffer[1] = (servo->pulse[0] >> 8 & 0x0f) + | (servo->pulse[1] >> 4 & 0xf0); + buffer[2] = servo->pulse[1] & 0xff; + buffer[3] = servo->pulse[2] & 0xff; + buffer[4] = (servo->pulse[2] >> 8 & 0x0f) + | (servo->pulse[3] >> 4 & 0xf0); + buffer[5] = servo->pulse[3] & 0xff; + + dev_dbg(&servo->udev->dev, + "data: %02x %02x %02x %02x %02x %02x\n", + buffer[0], buffer[1], buffer[2], + buffer[3], buffer[4], buffer[5]); + + retval = usb_control_msg(servo->udev, + usb_sndctrlpipe(servo->udev, 0), + 0x09, 0x21, 0x0200, 0x0000, buffer, 6, 2 * HZ); + if (retval != 6) + dev_err(&servo->udev->dev, "retval = %d\n", retval); + kfree(buffer); +} + +static void +change_position_v20(struct phidget_servo *servo, int servo_no, int degrees, + int minutes) +{ + int retval; + unsigned char *buffer; + + buffer = kmalloc(2, GFP_KERNEL); + if (!buffer) { + dev_err(&servo->udev->dev, "%s - out of memory\n", + __FUNCTION__); + return; + } + + /* + * angle = 0 - 180 degrees + * pulse = angle + 23 + */ + servo->pulse[servo_no]= degrees + 23; + servo->degrees[servo_no]= degrees; + servo->minutes[servo_no]= 0; + + /* + * The PhidgetServo v2.0 is controlled by sending two bytes. The + * first byte is the servo number xor'ed with 2: + * + * servo 0 = 2 + * servo 1 = 3 + * servo 2 = 0 + * servo 3 = 1 + * + * The second byte is the position. + */ + + buffer[0] = servo_no ^ 2; + buffer[1] = servo->pulse[servo_no]; + + dev_dbg(&servo->udev->dev, "data: %02x %02x\n", buffer[0], buffer[1]); + + retval = usb_control_msg(servo->udev, + usb_sndctrlpipe(servo->udev, 0), + 0x09, 0x21, 0x0200, 0x0000, buffer, 2, 2 * HZ); + if (retval != 2) + dev_err(&servo->udev->dev, "retval = %d\n", retval); + kfree(buffer); +} + +#define show_set(value) \ +static ssize_t set_servo##value (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + int degrees, minutes; \ + struct usb_interface *intf = to_usb_interface (dev); \ + struct phidget_servo *servo = usb_get_intfdata (intf); \ + \ + minutes = 0; \ + /* must at least convert degrees */ \ + if (sscanf (buf, "%d.%d", °rees, &minutes) < 1) { \ + return -EINVAL; \ + } \ + \ + if (degrees < -23 || degrees > (180 + 23) || \ + minutes < 0 || minutes > 59) { \ + return -EINVAL; \ + } \ + \ + if (servo->version >= 3) \ + change_position_v30 (servo, value, degrees, minutes); \ + else \ + change_position_v20 (servo, value, degrees, minutes); \ + \ + return count; \ +} \ + \ +static ssize_t show_servo##value (struct device *dev, char *buf) \ +{ \ + struct usb_interface *intf = to_usb_interface (dev); \ + struct phidget_servo *servo = usb_get_intfdata (intf); \ + \ + return sprintf (buf, "%d.%02d\n", servo->degrees[value], \ + servo->minutes[value]); \ +} \ +static DEVICE_ATTR(servo##value, S_IWUGO | S_IRUGO, \ + show_servo##value, set_servo##value); + +show_set(0); +show_set(1); +show_set(2); +show_set(3); + +static int +servo_probe(struct usb_interface *interface, const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(interface); + struct phidget_servo *dev = NULL; + + dev = kmalloc(sizeof (struct phidget_servo), GFP_KERNEL); + if (dev == NULL) { + dev_err(&interface->dev, "%s - out of memory\n", __FUNCTION__); + return -ENOMEM; + } + memset(dev, 0x00, sizeof (*dev)); + + dev->udev = usb_get_dev(udev); + switch (udev->descriptor.idVendor) { + case VENDOR_ID_WISEGROUP: + dev->version = 2; + break; + case VENDOR_ID_GLAB: + dev->version = 3; + break; + } + switch (udev->descriptor.idProduct) { + case DEVICE_ID_4MOTOR_SERVO_20: + case DEVICE_ID_4MOTOR_SERVO_30: + dev->quad_servo = 1; + break; + case DEVICE_ID_1MOTOR_SERVO_20: + case DEVICE_ID_1MOTOR_SERVO_30: + dev->quad_servo = 0; + break; + } + + usb_set_intfdata(interface, dev); + + device_create_file(&interface->dev, &dev_attr_servo0); + if (dev->quad_servo) { + device_create_file(&interface->dev, &dev_attr_servo1); + device_create_file(&interface->dev, &dev_attr_servo2); + device_create_file(&interface->dev, &dev_attr_servo3); + } + + dev_info(&interface->dev, "USB %d-Motor PhidgetServo v%d.0 attached\n", + dev->quad_servo ? 4 : 1, dev->version); + if (dev->version == 2) + dev_info(&interface->dev, + "WARNING: v2.0 not tested! Please report if it works.\n"); + + return 0; +} + +static void +servo_disconnect(struct usb_interface *interface) +{ + struct phidget_servo *dev; + + dev = usb_get_intfdata(interface); + usb_set_intfdata(interface, NULL); + + device_remove_file(&interface->dev, &dev_attr_servo0); + if (dev->quad_servo) { + device_remove_file(&interface->dev, &dev_attr_servo1); + device_remove_file(&interface->dev, &dev_attr_servo2); + device_remove_file(&interface->dev, &dev_attr_servo3); + } + + usb_put_dev(dev->udev); + + kfree(dev); + + dev_info(&interface->dev, "USB %d-Motor PhidgetServo v%d.0 detached\n", + dev->quad_servo ? 4 : 1, dev->version); +} + +static struct usb_driver servo_driver = { + .owner = THIS_MODULE, + .name = "phidgetservo", + .probe = servo_probe, + .disconnect = servo_disconnect, + .id_table = id_table +}; + +static int __init +phidget_servo_init(void) +{ + int retval = 0; + + retval = usb_register(&servo_driver); + if (retval) + err("usb_register failed. Error number %d", retval); + + return retval; +} + +static void __exit +phidget_servo_exit(void) +{ + usb_deregister(&servo_driver); +} + +module_init(phidget_servo_init); +module_exit(phidget_servo_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c new file mode 100644 index 000000000..034ec2996 --- /dev/null +++ b/drivers/video/asiliantfb.c @@ -0,0 +1,620 @@ +/* + * drivers/video/asiliantfb.c + * frame buffer driver for Asiliant 69000 chip + * Copyright (C) 2001-2003 Saito.K & Jeanne + * + * from driver/video/chipsfb.c and, + * + * drivers/video/asiliantfb.c -- frame buffer device for + * Asiliant 69030 chip (formerly Intel, formerly Chips & Technologies) + * Author: apc@agelectronics.co.uk + * Copyright (C) 2000 AG Electronics + * Note: the data sheets don't seem to be available from Asiliant. + * They are available by searching developer.intel.com, but are not otherwise + * linked to. + * + * This driver should be portable with minimal effort to the 69000 display + * chip, and to the twin-display mode of the 69030. + * Contains code from Thomas Hhenleitner (thanks) + * + * Derived from the CT65550 driver chipsfb.c: + * Copyright (C) 1998 Paul Mackerras + * ...which was derived from the Powermac "chips" driver: + * Copyright (C) 1997 Fabio Riccardi. + * And from the frame buffer device for Open Firmware-initialized devices: + * Copyright (C) 1997 Geert Uytterhoeven. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Built in clock of the 69030 */ +const unsigned Fref = 14318180; + +#define mmio_base (p->screen_base + 0x400000) + +#define mm_write_ind(num, val, ap, dp) do { \ + writeb((num), mmio_base + (ap)); writeb((val), mmio_base + (dp)); \ +} while (0) + +static void mm_write_xr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x7ac, 0x7ad); +} +#define write_xr(num, val) mm_write_xr(p, num, val) + +static void mm_write_fr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x7a0, 0x7a1); +} +#define write_fr(num, val) mm_write_fr(p, num, val) + +static void mm_write_cr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x7a8, 0x7a9); +} +#define write_cr(num, val) mm_write_cr(p, num, val) + +static void mm_write_gr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x79c, 0x79d); +} +#define write_gr(num, val) mm_write_gr(p, num, val) + +static void mm_write_sr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x788, 0x789); +} +#define write_sr(num, val) mm_write_sr(p, num, val) + +static void mm_write_ar(struct fb_info *p, u8 reg, u8 data) +{ + readb(mmio_base + 0x7b4); + mm_write_ind(reg, data, 0x780, 0x780); +} +#define write_ar(num, val) mm_write_ar(p, num, val) + +/* + * Exported functions + */ +int asiliantfb_init(void); + +static int asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *); +static int asiliantfb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); +static int asiliantfb_set_par(struct fb_info *info); +static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *info); + +static struct fb_ops asiliantfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = asiliantfb_check_var, + .fb_set_par = asiliantfb_set_par, + .fb_setcolreg = asiliantfb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_cursor = soft_cursor, +}; + +/* Calculate the ratios for the dot clocks without using a single long long + * value */ +static void asiliant_calc_dclk2(u32 *ppixclock, u8 *dclk2_m, u8 *dclk2_n, u8 *dclk2_div) +{ + unsigned pixclock = *ppixclock; + unsigned Ftarget = 1000000 * (1000000 / pixclock); + unsigned n; + unsigned best_error = 0xffffffff; + unsigned best_m = 0xffffffff, + best_n = 0xffffffff; + unsigned ratio; + unsigned remainder; + unsigned char divisor = 0; + + /* Calculate the frequency required. This is hard enough. */ + ratio = 1000000 / pixclock; + remainder = 1000000 % pixclock; + Ftarget = 1000000 * ratio + (1000000 * remainder) / pixclock; + + while (Ftarget < 100000000) { + divisor += 0x10; + Ftarget <<= 1; + } + + ratio = Ftarget / Fref; + remainder = Ftarget % Fref; + + /* This expresses the constraint that 150kHz <= Fref/n <= 5Mhz, + * together with 3 <= n <= 257. */ + for (n = 3; n <= 257; n++) { + unsigned m = n * ratio + (n * remainder) / Fref; + + /* 3 <= m <= 257 */ + if (m >= 3 && m <= 257) { + unsigned new_error = ((Ftarget * n) - (Fref * m)) >= 0 ? + ((Ftarget * n) - (Fref * m)) : ((Fref * m) - (Ftarget * n)); + if (new_error < best_error) { + best_n = n; + best_m = m; + best_error = new_error; + } + } + /* But if VLD = 4, then 4m <= 1028 */ + else if (m <= 1028) { + /* remember there are still only 8-bits of precision in m, so + * avoid over-optimistic error calculations */ + unsigned new_error = ((Ftarget * n) - (Fref * (m & ~3))) >= 0 ? + ((Ftarget * n) - (Fref * (m & ~3))) : ((Fref * (m & ~3)) - (Ftarget * n)); + if (new_error < best_error) { + best_n = n; + best_m = m; + best_error = new_error; + } + } + } + if (best_m > 257) + best_m >>= 2; /* divide m by 4, and leave VCO loop divide at 4 */ + else + divisor |= 4; /* or set VCO loop divide to 1 */ + *dclk2_m = best_m - 2; + *dclk2_n = best_n - 2; + *dclk2_div = divisor; + *ppixclock = pixclock; + return; +} + +static void asiliant_set_timing(struct fb_info *p) +{ + unsigned hd = p->var.xres / 8; + unsigned hs = (p->var.xres + p->var.right_margin) / 8; + unsigned he = (p->var.xres + p->var.right_margin + p->var.hsync_len) / 8; + unsigned ht = (p->var.left_margin + p->var.xres + p->var.right_margin + p->var.hsync_len) / 8; + unsigned vd = p->var.yres; + unsigned vs = p->var.yres + p->var.lower_margin; + unsigned ve = p->var.yres + p->var.lower_margin + p->var.vsync_len; + unsigned vt = p->var.upper_margin + p->var.yres + p->var.lower_margin + p->var.vsync_len; + unsigned wd = (p->var.xres_virtual * ((p->var.bits_per_pixel+7)/8)) / 8; + + if ((p->var.xres == 640) && (p->var.yres == 480) && (p->var.pixclock == 39722)) { + write_fr(0x01, 0x02); /* LCD */ + } else { + write_fr(0x01, 0x01); /* CRT */ + } + + write_cr(0x11, (ve - 1) & 0x0f); + write_cr(0x00, (ht - 5) & 0xff); + write_cr(0x01, hd - 1); + write_cr(0x02, hd); + write_cr(0x03, ((ht - 1) & 0x1f) | 0x80); + write_cr(0x04, hs); + write_cr(0x05, (((ht - 1) & 0x20) <<2) | (he & 0x1f)); + write_cr(0x3c, (ht - 1) & 0xc0); + write_cr(0x06, (vt - 2) & 0xff); + write_cr(0x30, (vt - 2) >> 8); + write_cr(0x07, 0x00); + write_cr(0x08, 0x00); + write_cr(0x09, 0x00); + write_cr(0x10, (vs - 1) & 0xff); + write_cr(0x32, ((vs - 1) >> 8) & 0xf); + write_cr(0x11, ((ve - 1) & 0x0f) | 0x80); + write_cr(0x12, (vd - 1) & 0xff); + write_cr(0x31, ((vd - 1) & 0xf00) >> 8); + write_cr(0x13, wd & 0xff); + write_cr(0x41, (wd & 0xf00) >> 8); + write_cr(0x15, (vs - 1) & 0xff); + write_cr(0x33, ((vs - 1) >> 8) & 0xf); + write_cr(0x38, ((ht - 5) & 0x100) >> 8); + write_cr(0x16, (vt - 1) & 0xff); + write_cr(0x18, 0x00); + + if (p->var.xres == 640) { + writeb(0xc7, mmio_base + 0x784); /* set misc output reg */ + } else { + writeb(0x07, mmio_base + 0x784); /* set misc output reg */ + } +} + +static int asiliantfb_check_var(struct fb_var_screeninfo *var, + struct fb_info *p) +{ + unsigned long Ftarget, ratio, remainder; + + ratio = 1000000 / var->pixclock; + remainder = 1000000 % var->pixclock; + Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock; + + /* First check the constraint that the maximum post-VCO divisor is 32, + * and the maximum Fvco is 220MHz */ + if (Ftarget > 220000000 || Ftarget < 3125000) { + printk(KERN_ERR "asiliantfb dotclock must be between 3.125 and 220MHz\n"); + return -ENXIO; + } + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + + if (var->bits_per_pixel == 24) { + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = var->blue.length = var->green.length = 8; + } else if (var->bits_per_pixel == 16) { + switch (var->red.offset) { + case 11: + var->green.length = 6; + break; + case 10: + var->green.length = 5; + break; + default: + return -EINVAL; + } + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = var->blue.length = 5; + } else if (var->bits_per_pixel == 8) { + var->red.offset = var->green.offset = var->blue.offset = 0; + var->red.length = var->green.length = var->blue.length = 8; + } + return 0; +} + +static int asiliantfb_set_par(struct fb_info *p) +{ + u8 dclk2_m; /* Holds m-2 value for register */ + u8 dclk2_n; /* Holds n-2 value for register */ + u8 dclk2_div; /* Holds divisor bitmask */ + + /* Set pixclock */ + asiliant_calc_dclk2(&p->var.pixclock, &dclk2_m, &dclk2_n, &dclk2_div); + + /* Set color depth */ + if (p->var.bits_per_pixel == 24) { + write_xr(0x81, 0x16); /* 24 bit packed color mode */ + write_xr(0x82, 0x00); /* Disable palettes */ + write_xr(0x20, 0x20); /* 24 bit blitter mode */ + } else if (p->var.bits_per_pixel == 16) { + if (p->var.red.offset == 11) + write_xr(0x81, 0x15); /* 16 bit color mode */ + else + write_xr(0x81, 0x14); /* 15 bit color mode */ + write_xr(0x82, 0x00); /* Disable palettes */ + write_xr(0x20, 0x10); /* 16 bit blitter mode */ + } else if (p->var.bits_per_pixel == 8) { + write_xr(0x0a, 0x02); /* Linear */ + write_xr(0x81, 0x12); /* 8 bit color mode */ + write_xr(0x82, 0x00); /* Graphics gamma enable */ + write_xr(0x20, 0x00); /* 8 bit blitter mode */ + } + p->fix.line_length = p->var.xres * (p->var.bits_per_pixel >> 3); + p->fix.visual = (p->var.bits_per_pixel == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; + write_xr(0xc4, dclk2_m); + write_xr(0xc5, dclk2_n); + write_xr(0xc7, dclk2_div); + /* Set up the CR registers */ + asiliant_set_timing(p); + return 0; +} + +static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *p) +{ + if (regno > 255) + return 1; + red >>= 8; + green >>= 8; + blue >>= 8; + + /* Set hardware palete */ + writeb(regno, mmio_base + 0x790); + udelay(1); + writeb(red, mmio_base + 0x791); + writeb(green, mmio_base + 0x791); + writeb(blue, mmio_base + 0x791); + + switch(p->var.bits_per_pixel) { + case 15: + if (regno < 16) { + ((u32 *)(p->pseudo_palette))[regno] = + ((red & 0xf8) << 7) | + ((green & 0xf8) << 2) | + ((blue & 0xf8) >> 3); + } + break; + case 16: + if (regno < 16) { + ((u32 *)(p->pseudo_palette))[regno] = + ((red & 0xf8) << 8) | + ((green & 0xfc) << 3) | + ((blue & 0xf8) >> 3); + } + break; + case 24: + if (regno < 24) { + ((u32 *)(p->pseudo_palette))[regno] = + (red << 16) | + (green << 8) | + (blue); + } + break; + } + return 0; +} + +struct chips_init_reg { + unsigned char addr; + unsigned char data; +}; + +#define N_ELTS(x) (sizeof(x) / sizeof(x[0])) + +static struct chips_init_reg chips_init_sr[] = +{ + {0x00, 0x03}, /* Reset register */ + {0x01, 0x01}, /* Clocking mode */ + {0x02, 0x0f}, /* Plane mask */ + {0x04, 0x0e} /* Memory mode */ +}; + +static struct chips_init_reg chips_init_gr[] = +{ + {0x03, 0x00}, /* Data rotate */ + {0x05, 0x00}, /* Graphics mode */ + {0x06, 0x01}, /* Miscellaneous */ + {0x08, 0x00} /* Bit mask */ +}; + +static struct chips_init_reg chips_init_ar[] = +{ + {0x10, 0x01}, /* Mode control */ + {0x11, 0x00}, /* Overscan */ + {0x12, 0x0f}, /* Memory plane enable */ + {0x13, 0x00} /* Horizontal pixel panning */ +}; + +static struct chips_init_reg chips_init_cr[] = +{ + {0x0c, 0x00}, /* Start address high */ + {0x0d, 0x00}, /* Start address low */ + {0x40, 0x00}, /* Extended Start Address */ + {0x41, 0x00}, /* Extended Start Address */ + {0x14, 0x00}, /* Underline location */ + {0x17, 0xe3}, /* CRT mode control */ + {0x70, 0x00} /* Interlace control */ +}; + + +static struct chips_init_reg chips_init_fr[] = +{ + {0x01, 0x02}, + {0x03, 0x08}, + {0x08, 0xcc}, + {0x0a, 0x08}, + {0x18, 0x00}, + {0x1e, 0x80}, + {0x40, 0x83}, + {0x41, 0x00}, + {0x48, 0x13}, + {0x4d, 0x60}, + {0x4e, 0x0f}, + + {0x0b, 0x01}, + + {0x21, 0x51}, + {0x22, 0x1d}, + {0x23, 0x5f}, + {0x20, 0x4f}, + {0x34, 0x00}, + {0x24, 0x51}, + {0x25, 0x00}, + {0x27, 0x0b}, + {0x26, 0x00}, + {0x37, 0x80}, + {0x33, 0x0b}, + {0x35, 0x11}, + {0x36, 0x02}, + {0x31, 0xea}, + {0x32, 0x0c}, + {0x30, 0xdf}, + {0x10, 0x0c}, + {0x11, 0xe0}, + {0x12, 0x50}, + {0x13, 0x00}, + {0x16, 0x03}, + {0x17, 0xbd}, + {0x1a, 0x00}, +}; + + +static struct chips_init_reg chips_init_xr[] = +{ + {0xce, 0x00}, /* set default memory clock */ + {0xcc, 200 }, /* MCLK ratio M */ + {0xcd, 18 }, /* MCLK ratio N */ + {0xce, 0x90}, /* MCLK divisor = 2 */ + + {0xc4, 209 }, + {0xc5, 118 }, + {0xc7, 32 }, + {0xcf, 0x06}, + {0x09, 0x01}, /* IO Control - CRT controller extensions */ + {0x0a, 0x02}, /* Frame buffer mapping */ + {0x0b, 0x01}, /* PCI burst write */ + {0x40, 0x03}, /* Memory access control */ + {0x80, 0x82}, /* Pixel pipeline configuration 0 */ + {0x81, 0x12}, /* Pixel pipeline configuration 1 */ + {0x82, 0x08}, /* Pixel pipeline configuration 2 */ + + {0xd0, 0x0f}, + {0xd1, 0x01}, +}; + +static void __init chips_hw_init(struct fb_info *p) +{ + int i; + + for (i = 0; i < N_ELTS(chips_init_xr); ++i) + write_xr(chips_init_xr[i].addr, chips_init_xr[i].data); + write_xr(0x81, 0x12); + write_xr(0x82, 0x08); + write_xr(0x20, 0x00); + for (i = 0; i < N_ELTS(chips_init_sr); ++i) + write_sr(chips_init_sr[i].addr, chips_init_sr[i].data); + for (i = 0; i < N_ELTS(chips_init_gr); ++i) + write_gr(chips_init_gr[i].addr, chips_init_gr[i].data); + for (i = 0; i < N_ELTS(chips_init_ar); ++i) + write_ar(chips_init_ar[i].addr, chips_init_ar[i].data); + /* Enable video output in attribute index register */ + writeb(0x20, mmio_base + 0x780); + for (i = 0; i < N_ELTS(chips_init_cr); ++i) + write_cr(chips_init_cr[i].addr, chips_init_cr[i].data); + for (i = 0; i < N_ELTS(chips_init_fr); ++i) + write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); +} + +static struct fb_fix_screeninfo asiliantfb_fix __initdata = { + .id = "Asiliant 69000", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_PSEUDOCOLOR, + .accel = FB_ACCEL_NONE, + .line_length = 640, + .smem_len = 0x200000, /* 2MB */ +}; + +static struct fb_var_screeninfo asiliantfb_var __initdata = { + .xres = 640, + .yres = 480, + .xres_virtual = 640, + .yres_virtual = 480, + .bits_per_pixel = 8, + .red = { .length = 8 }, + .green = { .length = 8 }, + .blue = { .length = 8 }, + .height = -1, + .width = -1, + .vmode = FB_VMODE_NONINTERLACED, + .pixclock = 39722, + .left_margin = 48, + .right_margin = 16, + .upper_margin = 33, + .lower_margin = 10, + .hsync_len = 96, + .vsync_len = 2, +}; + +static void __init init_asiliant(struct fb_info *p, unsigned long addr) +{ + p->fix = asiliantfb_fix; + p->fix.smem_start = addr; + p->var = asiliantfb_var; + p->fbops = &asiliantfb_ops; + p->flags = FBINFO_FLAG_DEFAULT; + + fb_alloc_cmap(&p->cmap, 256, 0); + + if (register_framebuffer(p) < 0) { + printk(KERN_ERR "C&T 69000 framebuffer failed to register\n"); + return; + } + + printk(KERN_INFO "fb%d: Asiliant 69000 frame buffer (%dK RAM detected)\n", + p->node, p->fix.smem_len / 1024); + + writeb(0xff, mmio_base + 0x78c); + chips_hw_init(p); +} + +static int __devinit +asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) +{ + unsigned long addr, size; + struct fb_info *p; + + if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) + return -ENODEV; + addr = pci_resource_start(dp, 0); + size = pci_resource_len(dp, 0); + if (addr == 0) + return -ENODEV; + if (!request_mem_region(addr, size, "asiliantfb")) + return -EBUSY; + + p = framebuffer_alloc(sizeof(u32) * 256, &dp->dev); + if (!p) { + release_mem_region(addr, size); + return -ENOMEM; + } + p->pseudo_palette = p->par; + p->par = NULL; + + p->screen_base = ioremap(addr, 0x800000); + if (p->screen_base == NULL) { + release_mem_region(addr, size); + framebuffer_release(p); + return -ENOMEM; + } + + pci_write_config_dword(dp, 4, 0x02800083); + writeb(3, addr + 0x400784); + + init_asiliant(p, addr); + + /* Clear the entire framebuffer */ + memset(p->screen_base, 0, 0x200000); + + pci_set_drvdata(dp, p); + return 0; +} + +static void __devexit asiliantfb_remove(struct pci_dev *dp) +{ + struct fb_info *p = pci_get_drvdata(dp); + + unregister_framebuffer(p); + iounmap(p->screen_base); + release_mem_region(pci_resource_start(dp, 0), pci_resource_len(dp, 0)); + pci_set_drvdata(dp, NULL); + framebuffer_release(p); +} + +static struct pci_device_id asiliantfb_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_CT, PCI_DEVICE_ID_CT_69000, PCI_ANY_ID, PCI_ANY_ID }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, asiliantfb_pci_tbl); + +static struct pci_driver asiliantfb_driver = { + .name = "asiliantfb", + .id_table = asiliantfb_pci_tbl, + .probe = asiliantfb_pci_init, + .remove = __devexit_p(asiliantfb_remove), +}; + +int __init asiliantfb_init(void) +{ + return pci_module_init(&asiliantfb_driver); +} + +static void __exit asiliantfb_exit(void) +{ + pci_unregister_driver(&asiliantfb_driver); +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c new file mode 100644 index 000000000..2afc4148b --- /dev/null +++ b/drivers/video/gbefb.c @@ -0,0 +1,1200 @@ +/* + * SGI GBE frame buffer driver + * + * Copyright (C) 1999 Silicon Graphics, Inc. - Jeffrey Newquist + * Copyright (C) 2002 Vivien Chappelier + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_X86 +#include +#endif +#ifdef CONFIG_MIPS +#include +#endif +#include +#include +#include + +#include