From 3b80810666f89a2ad2905d2983647f0d12217209 Mon Sep 17 00:00:00 2001 From: Planet-Lab Support Date: Fri, 21 Jan 2005 03:33:56 +0000 Subject: [PATCH] This commit was manufactured by cvs2svn to create branch 'jdike'. --- Documentation/arm/IXP4xx | 155 + Documentation/arm/Sharp-LH/SDRAM | 51 + Documentation/arm/VFP/release-notes.txt | 55 + Documentation/block/as-iosched.txt | 165 + Documentation/block/deadline-iosched.txt | 78 + Documentation/cpu-freq/amd-powernow.txt | 38 + Documentation/device-mapper/dm-io.txt | 75 + Documentation/device-mapper/kcopyd.txt | 47 + Documentation/device-mapper/linear.txt | 61 + Documentation/device-mapper/striped.txt | 58 + Documentation/device-mapper/zero.txt | 37 + Documentation/fb/sisfb.txt | 158 + .../filesystems/automount-support.txt | 118 + Documentation/hpet.txt | 298 + Documentation/i2c/i2c-parport | 156 + Documentation/powerpc/hvcs.txt | 534 ++ Documentation/powerpc/mpc52xx.txt | 48 + Documentation/sound/alsa/Audigy-mixer.txt | 345 + Documentation/usb/sn9c102.txt | 276 + arch/arm/boot/bootp/initrd.S | 6 + arch/arm/boot/bootp/kernel.S | 6 + arch/arm/boot/compressed/piggy.S | 6 + arch/arm/common/locomo.c | 762 +++ arch/arm/common/time-acorn.c | 67 + arch/arm/configs/ixp4xx_defconfig | 1081 +++ arch/arm/configs/mainstone_defconfig | 743 ++ arch/arm/configs/smdk2410_defconfig | 667 ++ arch/arm/mach-footbridge/time.c | 296 + arch/arm/mach-integrator/clock.c | 138 + arch/arm/mach-integrator/clock.h | 25 + arch/arm/mach-ixp4xx/Makefile | 10 + arch/arm/mach-ixp4xx/common-pci.c | 543 ++ arch/arm/mach-ixp4xx/common.c | 263 + arch/arm/mach-ixp4xx/coyote-pci.c | 69 + arch/arm/mach-ixp4xx/ixdp425-pci.c | 84 + arch/arm/mach-ixp4xx/prpmc1100-pci.c | 119 + arch/arm/mach-ixp4xx/prpmc1100-setup.c | 90 + arch/arm/mach-lh7a40x/time.c | 67 + arch/arm/mach-omap/time.c | 217 + arch/arm/mach-pxa/time.c | 124 + arch/arm/mach-s3c2410/gpio.c | 98 + arch/arm/mach-s3c2410/mach-smdk2410.c | 109 + arch/arm/mach-s3c2410/time.c | 176 + arch/arm/mach-sa1100/collie.c | 144 + arch/arm/mach-sa1100/time.c | 119 + arch/arm/mach-versatile/clock.c | 146 + arch/arm/mach-versatile/clock.h | 25 + arch/arm/vfp/Makefile | 12 + arch/arm/vfp/entry.S | 45 + arch/arm/vfp/vfp.h | 333 + arch/arm/vfp/vfpdouble.c | 1186 ++++ arch/arm/vfp/vfphw.S | 210 + arch/arm/vfp/vfpinstr.h | 88 + arch/arm/vfp/vfpmodule.c | 288 + arch/arm/vfp/vfpsingle.c | 1224 ++++ arch/cris/arch-v10/drivers/ide.c | 945 +++ arch/cris/kernel/crisksyms.c | 104 + arch/i386/crypto/Makefile | 9 + arch/i386/crypto/aes-i586-asm.S | 341 + arch/i386/crypto/aes.c | 520 ++ arch/i386/lib/bitops.c | 70 + arch/i386/mach-generic/es7000.c | 28 + arch/ia64/configs/sim_defconfig | 535 ++ arch/ia64/dig/topology.c | 43 + arch/ia64/lib/bitop.c | 88 + arch/mips/au1000/common/cputable.c | 56 + arch/mips/configs/ocelot_g_defconfig | 592 ++ arch/mips/kernel/module.c | 53 + arch/mips/mm/tlb-r8k.c | 253 + arch/mips/mm/tlb64-glue-r4k.S | 41 + arch/mips/mm/tlb64-glue-sb1.S | 66 + arch/mips/mm/tlbex32-r3k.S | 224 + arch/mips/mm/tlbex32-r4k.S | 524 ++ arch/mips/mm/tlbex64-r4k.S | 203 + arch/mips/pci/fixup-jaguar.c | 42 + arch/mips/pci/fixup-mpc30x.c | 48 + arch/mips/pci/fixup-ocelot-c.c | 39 + arch/mips/pci/fixup-ocelot-g.c | 35 + arch/mips/pci/fixup-tb0219.c | 64 + arch/mips/pci/ops-marvell.c | 93 + arch/mips/pci/ops-titan-ht.c | 125 + arch/mips/pci/ops-vr41xx.c | 126 + arch/mips/pci/pci-yosemite.c | 37 + arch/mips/pmc-sierra/yosemite/dbg_io.c | 184 + arch/mips/pmc-sierra/yosemite/i2c-yosemite.c | 188 + arch/mips/pmc-sierra/yosemite/py-console.c | 130 + arch/mips/vr41xx/tanbac-tb0229/tb0219.c | 44 + arch/parisc/configs/n4000_defconfig | 905 +++ arch/parisc/kernel/unwind.c | 295 + arch/parisc/lib/debuglocks.c | 227 + arch/ppc/boot/simple/mpc52xx_tty.c | 138 + arch/ppc/configs/ads8272_defconfig | 583 ++ arch/ppc/configs/lite5200_defconfig | 436 ++ arch/ppc/configs/rpx8260_defconfig | 556 ++ arch/ppc/kernel/dma-mapping.c | 439 ++ arch/ppc/kernel/head_e500.S | 1329 ++++ arch/ppc/kernel/vecemu.c | 346 + arch/ppc/kernel/vector.S | 217 + arch/ppc/lib/rheap.c | 692 ++ arch/ppc/mm/fsl_booke_mmu.c | 236 + arch/ppc/oprofile/Kconfig | 23 + arch/ppc/oprofile/Makefile | 9 + arch/ppc/oprofile/init.c | 23 + arch/ppc/platforms/4xx/bubinga.c | 263 + arch/ppc/platforms/4xx/bubinga.h | 69 + arch/ppc/platforms/4xx/ibm405ep.c | 134 + arch/ppc/platforms/4xx/ibm405ep.h | 148 + arch/ppc/platforms/85xx/Kconfig | 44 + arch/ppc/platforms/85xx/Makefile | 7 + arch/ppc/platforms/85xx/mpc8540.c | 97 + arch/ppc/platforms/85xx/mpc8540_ads.c | 238 + arch/ppc/platforms/85xx/mpc8540_ads.h | 30 + arch/ppc/platforms/85xx/mpc8555.c | 88 + arch/ppc/platforms/85xx/mpc8555_cds.h | 26 + arch/ppc/platforms/85xx/mpc8560.c | 74 + arch/ppc/platforms/85xx/mpc8560_ads.c | 241 + arch/ppc/platforms/85xx/mpc8560_ads.h | 27 + arch/ppc/platforms/85xx/mpc85xx_ads_common.c | 237 + arch/ppc/platforms/85xx/mpc85xx_ads_common.h | 50 + arch/ppc/platforms/85xx/mpc85xx_cds_common.c | 473 ++ arch/ppc/platforms/85xx/mpc85xx_cds_common.h | 78 + arch/ppc/platforms/85xx/sbc8560.c | 249 + arch/ppc/platforms/85xx/sbc8560.h | 48 + arch/ppc/platforms/85xx/sbc85xx.c | 215 + arch/ppc/platforms/85xx/sbc85xx.h | 55 + arch/ppc/platforms/lite5200.c | 152 + arch/ppc/platforms/lite5200.h | 23 + arch/ppc/platforms/mpc5200.c | 29 + arch/ppc/platforms/pq2ads.h | 90 + arch/ppc/platforms/pq2ads_setup.c | 66 + arch/ppc/platforms/rpx8260.c | 65 + arch/ppc/platforms/rpx8260.h | 74 + arch/ppc/platforms/sbc82xx.c | 113 + arch/ppc/platforms/sbc82xx.h | 24 + arch/ppc/syslib/cpm2_common.c | 205 + arch/ppc/syslib/cpm2_pic.c | 131 + arch/ppc/syslib/cpm2_pic.h | 13 + arch/ppc/syslib/dcr.S | 41 + arch/ppc/syslib/ibm440gx_common.c | 212 + arch/ppc/syslib/ibm440gx_common.h | 54 + arch/ppc/syslib/ibm44x_common.h | 36 + arch/ppc/syslib/m8260_pci.c | 194 + arch/ppc/syslib/m8260_pci.h | 75 + arch/ppc/syslib/m8260_pci_erratum9.c | 474 ++ arch/ppc/syslib/mpc52xx_pic.c | 252 + arch/ppc/syslib/mpc52xx_setup.c | 228 + arch/ppc/syslib/ocp.c | 485 ++ arch/ppc/syslib/ppc4xx_sgdma.c | 455 ++ arch/ppc/syslib/ppc85xx_common.c | 46 + arch/ppc/syslib/ppc85xx_common.h | 29 + arch/ppc/syslib/ppc85xx_setup.c | 341 + arch/ppc/syslib/ppc85xx_setup.h | 67 + arch/ppc64/kernel/hvcserver.c | 219 + arch/ppc64/kernel/vecemu.c | 346 + arch/ppc64/kernel/vector.S | 172 + arch/ppc64/lib/e2a.c | 108 + arch/ppc64/lib/locks.c | 285 + arch/ppc64/lib/usercopy.c | 41 + arch/ppc64/mm/slb.c | 136 + arch/ppc64/mm/slb_low.S | 179 + arch/s390/kernel/vtime.c | 480 ++ arch/s390/lib/string.c | 405 ++ arch/sh/boards/renesas/hs7751rvoip/Makefile | 12 + arch/sh/boards/renesas/hs7751rvoip/io.c | 310 + arch/sh/boards/renesas/hs7751rvoip/irq.c | 122 + arch/sh/boards/renesas/hs7751rvoip/led.c | 27 + arch/sh/boards/renesas/hs7751rvoip/mach.c | 55 + arch/sh/boards/renesas/hs7751rvoip/pci.c | 150 + arch/sh/boards/renesas/hs7751rvoip/setup.c | 89 + arch/sh/boards/renesas/rts7751r2d/Makefile | 10 + arch/sh/boards/renesas/rts7751r2d/io.c | 319 + arch/sh/boards/renesas/rts7751r2d/irq.c | 135 + arch/sh/boards/renesas/rts7751r2d/led.c | 67 + arch/sh/boards/renesas/rts7751r2d/mach.c | 68 + arch/sh/boards/renesas/rts7751r2d/setup.c | 31 + arch/sh/boards/renesas/systemh/Makefile | 13 + arch/sh/boards/renesas/systemh/io.c | 283 + arch/sh/boards/renesas/systemh/irq.c | 111 + arch/sh/boards/renesas/systemh/setup.c | 80 + arch/sh/boards/se/7300/Makefile | 7 + arch/sh/boards/se/7300/io.c | 261 + arch/sh/boards/se/7300/irq.c | 37 + arch/sh/boards/se/7300/led.c | 69 + arch/sh/boards/se/7300/setup.c | 66 + arch/sh/cchips/voyagergx/Makefile | 8 + arch/sh/cchips/voyagergx/consistent.c | 126 + arch/sh/cchips/voyagergx/irq.c | 194 + arch/sh/cchips/voyagergx/setup.c | 37 + arch/sh/configs/rts7751r2d_defconfig | 809 +++ arch/sh/configs/se7300_defconfig | 461 ++ arch/sh/drivers/dma/dma-sysfs.c | 133 + arch/sh/drivers/pci/fixups-rts7751r2d.c | 32 + arch/sh/drivers/pci/ops-rts7751r2d.c | 74 + arch/sh/kernel/cpu/adc.c | 36 + arch/sh/kernel/cpu/bus.c | 195 + arch/sh/kernel/early_printk.c | 135 + arch/sh/ramdisk/Makefile | 19 + arch/sh/ramdisk/ld.script | 9 + arch/sh64/Kconfig | 320 + arch/sh64/Makefile | 112 + arch/sh64/boot/Makefile | 20 + arch/sh64/boot/compressed/Makefile | 46 + arch/sh64/boot/compressed/cache.c | 39 + arch/sh64/boot/compressed/head.S | 164 + arch/sh64/boot/compressed/install.sh | 56 + arch/sh64/boot/compressed/misc.c | 251 + arch/sh64/boot/compressed/vmlinux.lds.S | 65 + arch/sh64/configs/cayman_defconfig | 660 ++ arch/sh64/defconfig | 668 ++ arch/sh64/kernel/Makefile | 38 + arch/sh64/kernel/alphanum.c | 45 + arch/sh64/kernel/asm-offsets.c | 33 + arch/sh64/kernel/dma.c | 297 + arch/sh64/kernel/early_printk.c | 107 + arch/sh64/kernel/entry.S | 2101 ++++++ arch/sh64/kernel/fpu.c | 170 + arch/sh64/kernel/head.S | 373 + arch/sh64/kernel/init_task.c | 46 + arch/sh64/kernel/irq.c | 720 ++ arch/sh64/kernel/irq_intc.c | 272 + arch/sh64/kernel/led.c | 41 + arch/sh64/kernel/pci-dma.c | 50 + arch/sh64/kernel/pci_sh5.c | 546 ++ arch/sh64/kernel/pci_sh5.h | 107 + arch/sh64/kernel/pcibios.c | 168 + arch/sh64/kernel/process.c | 963 +++ arch/sh64/kernel/ptrace.c | 362 + arch/sh64/kernel/semaphore.c | 140 + arch/sh64/kernel/setup.c | 389 ++ arch/sh64/kernel/sh_ksyms.c | 84 + arch/sh64/kernel/signal.c | 737 ++ arch/sh64/kernel/switchto.S | 199 + arch/sh64/kernel/sys_sh64.c | 286 + arch/sh64/kernel/syscalls.S | 340 + arch/sh64/kernel/time.c | 637 ++ arch/sh64/kernel/traps.c | 958 +++ arch/sh64/kernel/unwind.c | 326 + arch/sh64/kernel/vmlinux.lds.S | 183 + arch/sh64/lib/Makefile | 19 + arch/sh64/lib/c-checksum.c | 231 + arch/sh64/lib/copy_user_memcpy.S | 213 + arch/sh64/lib/dbg.c | 394 ++ arch/sh64/lib/io.c | 207 + arch/sh64/lib/memcpy.c | 82 + arch/sh64/lib/old-checksum.c | 17 + arch/sh64/lib/page_clear.S | 51 + arch/sh64/lib/page_copy.S | 82 + arch/sh64/lib/panic.c | 58 + arch/sh64/lib/udelay.c | 60 + arch/sh64/mach-cayman/Makefile | 11 + arch/sh64/mach-cayman/irq.c | 196 + arch/sh64/mach-cayman/led.c | 51 + arch/sh64/mach-cayman/setup.c | 209 + arch/sh64/mach-harp/Makefile | 14 + arch/sh64/mach-harp/setup.c | 139 + arch/sh64/mach-romram/Makefile | 14 + arch/sh64/mach-romram/setup.c | 142 + arch/sh64/mach-sim/Makefile | 14 + arch/sh64/mach-sim/setup.c | 164 + arch/sh64/mm/Makefile | 44 + arch/sh64/mm/cache.c | 1055 +++ arch/sh64/mm/extable.c | 80 + arch/sh64/mm/fault.c | 591 ++ arch/sh64/mm/hugetlbpage.c | 264 + arch/sh64/mm/init.c | 199 + arch/sh64/mm/ioremap.c | 469 ++ arch/sh64/mm/tlb.c | 166 + arch/sh64/mm/tlbmiss.c | 282 + arch/sh64/oprofile/Kconfig | 23 + arch/sh64/oprofile/Makefile | 12 + arch/sh64/oprofile/op_model_null.c | 23 + arch/sparc64/lib/clear_page.S | 105 + arch/sparc64/lib/copy_page.S | 239 + arch/sparc64/lib/find_bit.c | 125 + arch/sparc64/lib/splock.S | 23 + arch/sparc64/mm/tlb.c | 158 + arch/um/drivers/cow_kern.c | 630 ++ arch/um/drivers/cow_sys.h | 48 + arch/um/drivers/net_kern.c.orig | 870 +++ arch/um/include/aio.h | 36 + arch/um/include/filehandle.h | 51 + arch/um/include/skas_ptregs.h | 26 + arch/um/kernel/filehandle.c | 250 + arch/um/kernel/smp.c.orig | 302 + arch/um/kernel/user_syms.c | 113 - arch/um/os-Linux/aio.c | 404 ++ arch/um/os-Linux/time.c | 21 + arch/um/sys-i386/bitops.c | 70 + arch/um/sys-i386/extable.c | 30 - arch/um/sys-i386/semaphore.c | 301 + arch/um/sys-i386/time.c | 24 + arch/um/util/mk_constants | Bin 0 -> 15114 bytes arch/um/util/mk_task | Bin 0 -> 31218 bytes arch/x86_64/kernel/domain.c | 93 + crypto/khazad.c | 915 +++ crypto/tea.c | 248 + drivers/block/sx8.c | 1763 +++++ drivers/char/drm/drm_irq.h | 371 + drivers/char/drm/drm_pciids.h | 203 + drivers/char/ds1286.c | 578 ++ drivers/char/hpet.c | 1076 +++ drivers/char/hvcs.c | 1579 +++++ drivers/char/ip27-rtc.c | 327 + drivers/char/lcd.h | 184 + drivers/char/watchdog/ixp2000_wdt.c | 219 + drivers/char/watchdog/ixp4xx_wdt.c | 233 + drivers/firmware/pcdp.c | 213 + drivers/firmware/pcdp.h | 80 + drivers/i2c/busses/i2c-ixp4xx.c | 181 + drivers/i2c/chips/adm1025.c | 570 ++ drivers/i2c/chips/adm1031.c | 985 +++ drivers/i2c/chips/lm77.c | 413 ++ drivers/i2c/chips/max1619.c | 378 ++ drivers/i2c/chips/rtc8564.c | 396 ++ drivers/i2c/chips/rtc8564.h | 78 + drivers/ide/h8300/ide-h8300.c | 119 + drivers/md/dm-exception-store.c | 648 ++ drivers/md/dm-io.c | 647 ++ drivers/md/dm-io.h | 77 + drivers/md/dm-log.c | 629 ++ drivers/md/dm-log.h | 124 + drivers/md/dm-raid1.c | 1278 ++++ drivers/md/dm-snap.c | 1213 ++++ drivers/md/dm-snap.h | 161 + drivers/md/dm-zero.c | 98 + drivers/md/kcopyd.c | 699 ++ drivers/md/kcopyd.h | 42 + drivers/media/video/ovcamchip/Makefile | 4 + drivers/media/video/ovcamchip/ov6x20.c | 415 ++ drivers/media/video/ovcamchip/ov6x30.c | 374 + drivers/media/video/ovcamchip/ov76be.c | 303 + drivers/media/video/ovcamchip/ov7x10.c | 335 + drivers/media/video/ovcamchip/ov7x20.c | 455 ++ .../media/video/ovcamchip/ovcamchip_core.c | 446 ++ .../media/video/ovcamchip/ovcamchip_priv.h | 87 + drivers/mtd/chips/cfi_util.c | 92 + drivers/mtd/devices/phram.c | 362 + drivers/mtd/maps/db1550-flash.c | 188 + drivers/mtd/maps/db1x00-flash.c | 219 + drivers/mtd/maps/dmv182.c | 150 + drivers/mtd/maps/ichxrom.c | 407 ++ drivers/mtd/maps/integrator-flash-v24.c | 258 + drivers/mtd/maps/ixp4xx.c | 244 + drivers/mtd/maps/mpc1211.c | 81 + drivers/mtd/maps/omap-toto-flash.c | 137 + drivers/mtd/maps/pb1550-flash.c | 204 + drivers/mtd/maps/sbc8240.c | 247 + drivers/mtd/maps/wr_sbc82xx_flash.c | 167 + drivers/mtd/nand/au1550nd.c | 394 ++ drivers/mtd/nand/diskonchip.c | 1237 ++++ drivers/mtd/nand/nand_base.c | 2581 +++++++ drivers/mtd/nand/nand_bbt.c | 1053 +++ drivers/mtd/nand/ppchameleonevb.c | 430 ++ drivers/mtd/nand/toto.c | 221 + drivers/mtd/nand/tx4925ndfmc.c | 442 ++ drivers/mtd/nand/tx4938ndfmc.c | 422 ++ drivers/net/fec_8xx/Kconfig | 14 + drivers/net/fec_8xx/Makefile | 12 + drivers/net/fec_8xx/fec_8xx-netta.c | 153 + drivers/net/fec_8xx/fec_8xx.h | 218 + drivers/net/fec_8xx/fec_main.c | 1275 ++++ drivers/net/fec_8xx/fec_mii.c | 380 ++ drivers/net/ibm_emac/ibm_emac.h | 263 + drivers/net/ibm_emac/ibm_emac_core.h | 146 + drivers/net/ibm_emac/ibm_emac_debug.c | 224 + drivers/net/ibm_emac/ibm_emac_mal.c | 467 ++ drivers/net/ibm_emac/ibm_emac_mal.h | 130 + drivers/net/ibm_emac/ibm_emac_phy.c | 297 + drivers/net/ibm_emac/ibm_emac_rgmii.h | 65 + drivers/net/ibm_emac/ibm_emac_tah.h | 48 + drivers/net/ibm_emac/ibm_emac_zmii.h | 93 + drivers/net/ne-h8300.c | 666 ++ drivers/net/smc91x.c | 2172 ++++++ drivers/net/smc91x.h | 866 +++ drivers/net/via-velocity.c | 3277 +++++++++ drivers/net/via-velocity.h | 1885 ++++++ drivers/net/wireless/prism54/prismcompat.h | 46 + drivers/pcmcia/pd6729.c | 732 ++ drivers/pcmcia/pd6729.h | 28 + drivers/pcmcia/pxa2xx_base.h | 3 + drivers/pcmcia/socket_sysfs.c | 161 + drivers/s390/net/ctcdbug.c | 83 + drivers/s390/net/ctcdbug.h | 123 + drivers/scsi/3w-9xxx.c | 2153 ++++++ drivers/scsi/3w-9xxx.h | 704 ++ drivers/scsi/fdomain.h | 24 + drivers/scsi/ipr.c | 6021 +++++++++++++++++ drivers/scsi/ipr.h | 1252 ++++ drivers/scsi/pcmcia/sym53c500_cs.c | 1042 +++ drivers/scsi/qlogicfas408.h | 120 + drivers/scsi/sata_nv.c | 355 + drivers/scsi/sata_promise.h | 154 + drivers/serial/cpm_uart/Makefile | 11 + drivers/serial/cpm_uart/cpm_uart.h | 92 + drivers/serial/cpm_uart/cpm_uart_core.c | 1179 ++++ drivers/serial/cpm_uart/cpm_uart_cpm1.c | 275 + drivers/serial/cpm_uart/cpm_uart_cpm1.h | 45 + drivers/serial/cpm_uart/cpm_uart_cpm2.c | 328 + drivers/serial/cpm_uart/cpm_uart_cpm2.h | 45 + drivers/serial/mpc52xx_uart.c | 869 +++ drivers/serial/serial_lh7a40x.c | 708 ++ drivers/serial/sn_console.c | 1194 ++++ drivers/usb/class/cdc-acm.h | 115 + drivers/usb/core/sysfs.c | 229 + drivers/usb/host/ohci-lh7a404.c | 385 ++ drivers/usb/input/touchkitusb.c | 310 + drivers/usb/media/sn9c102.h | 181 + drivers/usb/media/sn9c102_core.c | 2439 +++++++ drivers/usb/media/sn9c102_pas106b.c | 209 + drivers/usb/media/sn9c102_sensor.h | 270 + drivers/usb/media/sn9c102_tas5110c1b.c | 98 + drivers/usb/media/sn9c102_tas5130d1b.c | 120 + drivers/usb/media/w9968cf_vpp.h | 43 + drivers/usb/misc/phidgetservo.c | 327 + drivers/video/asiliantfb.c | 620 ++ drivers/video/gbefb.c | 1200 ++++ drivers/video/pxafb.h | 129 + drivers/video/riva/rivafb-i2c.c | 209 + drivers/w1/Kconfig | 31 + drivers/w1/Makefile | 9 + drivers/w1/matrox_w1.c | 246 + drivers/w1/w1.c | 623 ++ drivers/w1/w1.h | 112 + drivers/w1/w1_family.c | 133 + drivers/w1/w1_family.h | 65 + drivers/w1/w1_int.c | 207 + drivers/w1/w1_int.h | 36 + drivers/w1/w1_io.c | 138 + drivers/w1/w1_io.h | 35 + drivers/w1/w1_log.h | 38 + drivers/w1/w1_netlink.c | 55 + drivers/w1/w1_netlink.h | 44 + drivers/w1/w1_therm.c | 177 + fs/Makefile.orig | 94 + fs/ext3/resize.c | 956 --- fs/hostfs/externfs.c | 1317 ++++ fs/hostfs/host_file.c | 442 ++ fs/hostfs/host_fs.c | 467 ++ fs/hostfs/humfs.c | 1026 +++ fs/hostfs/meta_fs.c | 520 ++ fs/hostfs/metadata.h | 79 + fs/isofs/export.c | 228 + fs/jffs2/compr.h | 122 + fs/nls/nls_ascii.c | 167 + fs/ntfs/collate.c | 121 + fs/ntfs/collate.h | 48 + fs/ntfs/index.c | 514 ++ fs/ntfs/index.h | 148 + fs/ntfs/quota.c | 115 + fs/ntfs/quota.h | 35 + fs/reiserfs/xattr.c | 1441 ++++ fs/reiserfs/xattr_acl.c | 563 ++ fs/xfs/linux-2.6/kmem.c | 127 + fs/xfs/linux-2.6/kmem.h | 201 + fs/xfs/linux-2.6/mrlock.h | 106 + fs/xfs/linux-2.6/sema.h | 67 + fs/xfs/linux-2.6/sv.h | 89 + fs/xfs/linux-2.6/xfs_aops.c | 1284 ++++ fs/xfs/linux-2.6/xfs_buf.c | 1812 +++++ fs/xfs/linux-2.6/xfs_buf.h | 594 ++ fs/xfs/linux-2.6/xfs_file.c | 546 ++ fs/xfs/linux-2.6/xfs_fs_subr.c | 124 + fs/xfs/linux-2.6/xfs_globals.c | 72 + fs/xfs/linux-2.6/xfs_ioctl.c | 1246 ++++ fs/xfs/linux-2.6/xfs_iops.h | 51 + fs/xfs/linux-2.6/xfs_linux.h | 365 + fs/xfs/linux-2.6/xfs_lrw.c | 1028 +++ fs/xfs/linux-2.6/xfs_lrw.h | 116 + fs/xfs/linux-2.6/xfs_stats.c | 132 + fs/xfs/linux-2.6/xfs_super.c | 873 +++ fs/xfs/linux-2.6/xfs_super.h | 129 + fs/xfs/linux-2.6/xfs_sysctl.c | 163 + fs/xfs/linux-2.6/xfs_sysctl.h | 110 + fs/xfs/linux-2.6/xfs_vfs.c | 328 + fs/xfs/linux-2.6/xfs_vfs.h | 208 + fs/xfs/linux-2.6/xfs_vnode.c | 442 ++ fs/xfs/linux-2.6/xfs_vnode.h | 651 ++ include/asm-alpha/setup.h | 6 + include/asm-arm/arch-ixp4xx/dma.h | 52 + include/asm-arm/arch-ixp4xx/io.h | 388 ++ include/asm-arm/arch-ixp4xx/irq.h | 13 + include/asm-arm/arch-ixp4xx/memory.h | 27 + include/asm-arm/arch-ixp4xx/param.h | 3 + include/asm-arm/arch-ixp4xx/platform.h | 116 + include/asm-arm/arch-ixp4xx/serial.h | 27 + include/asm-arm/arch-ixp4xx/system.h | 43 + include/asm-arm/arch-ixp4xx/time.h | 7 + include/asm-arm/arch-ixp4xx/uncompress.h | 64 + include/asm-arm/arch-pxa/pxafb.h | 68 + include/asm-arm/arch-sa1100/collie.h | 150 + include/asm-arm/hardware/clock.h | 121 + include/asm-arm/hardware/locomo.h | 204 + include/asm-arm/mach/time.h | 20 + include/asm-arm/vfp.h | 78 + include/asm-arm/vfpmacros.h | 25 + include/asm-i386/pgtable-2level-defs.h | 20 + include/asm-i386/pgtable-3level-defs.h | 22 + include/asm-ia64/setup.h | 6 + include/asm-mips/gt64240.h | 1235 ++++ .../mach-yosemite/cpu-feature-overrides.h | 38 + include/asm-mips/marvell.h | 57 + include/asm-mips/setup.h | 8 + include/asm-mips/vr41xx/tb0219.h | 42 + include/asm-parisc/numnodes.h | 9 + include/asm-ppc/cpm2.h | 1041 +++ include/asm-ppc/fsl_ocp.h | 54 + include/asm-ppc/immap_85xx.h | 126 + include/asm-ppc/immap_cpm2.h | 648 ++ include/asm-ppc/m8260_pci.h | 186 + include/asm-ppc/mpc52xx.h | 380 ++ include/asm-ppc/mpc52xx_psc.h | 191 + include/asm-ppc/mpc8260_pci9.h | 51 + include/asm-ppc/mpc85xx.h | 128 + include/asm-ppc/ppc4xx_dma.h | 570 ++ include/asm-ppc/rheap.h | 85 + include/asm-ppc64/hvcserver.h | 44 + include/asm-sh/adc.h | 12 + include/asm-sh/bus-sh.h | 65 + include/asm-sh/cpu-sh3/adc.h | 28 + include/asm-sh/fixmap.h | 111 + include/asm-sh/hp6xx/ide.h | 8 + include/asm-sh/hs7751rvoip/hs7751rvoip.h | 47 + include/asm-sh/hs7751rvoip/ide.h | 8 + include/asm-sh/hs7751rvoip/io.h | 39 + include/asm-sh/rts7751r2d/ide.h | 8 + include/asm-sh/rts7751r2d/io.h | 37 + include/asm-sh/rts7751r2d/rts7751r2d.h | 73 + include/asm-sh/rts7751r2d/voyagergx_reg.h | 313 + include/asm-sh/se7300/io.h | 29 + include/asm-sh/se7300/se7300.h | 61 + include/asm-sh/setup.h | 8 + include/asm-sh64/a.out.h | 37 + include/asm-sh64/atomic.h | 126 + include/asm-sh64/bitops.h | 516 ++ include/asm-sh64/bug.h | 7 + include/asm-sh64/bugs.h | 38 + include/asm-sh64/byteorder.h | 49 + include/asm-sh64/cache.h | 141 + include/asm-sh64/cacheflush.h | 44 + include/asm-sh64/cayman.h | 20 + include/asm-sh64/checksum.h | 95 + include/asm-sh64/cpumask.h | 6 + include/asm-sh64/current.h | 28 + include/asm-sh64/delay.h | 11 + include/asm-sh64/div64.h | 6 + include/asm-sh64/dma-mapping.h | 163 + include/asm-sh64/dma.h | 41 + include/asm-sh64/elf.h | 101 + include/asm-sh64/errno.h | 6 + include/asm-sh64/fcntl.h | 7 + include/asm-sh64/hardirq.h | 7 + include/asm-sh64/hardware.h | 45 + include/asm-sh64/hdreg.h | 6 + include/asm-sh64/hw_irq.h | 16 + include/asm-sh64/ide.h | 30 + include/asm-sh64/io.h | 217 + include/asm-sh64/ioctl.h | 83 + include/asm-sh64/ioctls.h | 111 + include/asm-sh64/ipc.h | 6 + include/asm-sh64/ipcbuf.h | 40 + include/asm-sh64/irq.h | 148 + include/asm-sh64/keyboard.h | 74 + include/asm-sh64/kmap_types.h | 7 + include/asm-sh64/linkage.h | 7 + include/asm-sh64/local.h | 7 + include/asm-sh64/mc146818rtc.h | 7 + include/asm-sh64/mman.h | 6 + include/asm-sh64/mmu.h | 7 + include/asm-sh64/mmu_context.h | 209 + include/asm-sh64/module.h | 12 + include/asm-sh64/msgbuf.h | 42 + include/asm-sh64/namei.h | 24 + include/asm-sh64/page.h | 137 + include/asm-sh64/param.h | 43 + include/asm-sh64/pci.h | 110 + include/asm-sh64/percpu.h | 6 + include/asm-sh64/pgalloc.h | 202 + include/asm-sh64/pgtable.h | 498 ++ include/asm-sh64/platform.h | 69 + include/asm-sh64/poll.h | 36 + include/asm-sh64/posix_types.h | 131 + include/asm-sh64/processor.h | 292 + include/asm-sh64/ptrace.h | 36 + include/asm-sh64/registers.h | 106 + include/asm-sh64/resource.h | 6 + include/asm-sh64/scatterlist.h | 23 + include/asm-sh64/sections.h | 7 + include/asm-sh64/segment.h | 6 + include/asm-sh64/semaphore-helper.h | 101 + include/asm-sh64/semaphore.h | 146 + include/asm-sh64/sembuf.h | 36 + include/asm-sh64/serial.h | 33 + include/asm-sh64/setup.h | 16 + include/asm-sh64/shmbuf.h | 53 + include/asm-sh64/shmparam.h | 20 + include/asm-sh64/sigcontext.h | 30 + include/asm-sh64/siginfo.h | 6 + include/asm-sh64/signal.h | 185 + include/asm-sh64/smp.h | 15 + include/asm-sh64/smplock.h | 77 + include/asm-sh64/socket.h | 6 + include/asm-sh64/sockios.h | 24 + include/asm-sh64/softirq.h | 30 + include/asm-sh64/spinlock.h | 17 + include/asm-sh64/stat.h | 88 + include/asm-sh64/statfs.h | 6 + include/asm-sh64/string.h | 21 + include/asm-sh64/system.h | 194 + include/asm-sh64/termbits.h | 6 + include/asm-sh64/termios.h | 117 + include/asm-sh64/thread_info.h | 82 + include/asm-sh64/timex.h | 36 + include/asm-sh64/tlb.h | 92 + include/asm-sh64/tlbflush.h | 31 + include/asm-sh64/topology.h | 6 + include/asm-sh64/types.h | 76 + include/asm-sh64/uaccess.h | 320 + include/asm-sh64/ucontext.h | 23 + include/asm-sh64/unaligned.h | 28 + include/asm-sh64/unistd.h | 555 ++ include/asm-sh64/user.h | 71 + include/asm-sparc64/cmt.h | 59 + include/asm-um/module-i386.h | 13 + include/asm-um/pgtable.h.orig | 415 ++ include/asm-um/setup.h | 6 + include/asm-um/smplock.h | 6 - include/asm-um/spinlock.h | 10 - include/asm-um/unistd.h.orig | 144 + include/asm-v850/setup.h | 6 + include/linux/crc-ccitt.h | 15 + include/linux/dmi.h | 47 + include/linux/ds1286.h | 54 + include/linux/gfp.h.orig | 128 + include/linux/ghash.h | 236 + include/linux/hpet.h | 133 + include/linux/mempolicy.h | 221 + include/linux/mm.h.orig | 729 ++ include/linux/mtd/physmap.h | 61 + include/linux/netfilter_ipv4/ipt_addrtype.h | 11 + include/linux/netfilter_ipv4/ipt_realm.h | 10 + include/linux/proc_mm.h | 48 + include/linux/reiserfs_acl.h | 91 + include/linux/reiserfs_xattr.h | 132 + include/linux/snmp.h | 266 + include/media/ovcamchip.h | 104 + include/mtd/inftl-user.h | 91 + include/mtd/jffs2-user.h | 35 + include/mtd/mtd-abi.h | 97 + include/mtd/mtd-user.h | 20 + include/mtd/nftl-user.h | 76 + include/net/ip6_checksum.h | 94 + include/net/pkt_act.h | 286 + include/scsi/scsi_dbg.h | 18 + kernel/power/smp.c | 85 + lib/crc-ccitt.c | 69 + mm/Makefile.orig | 17 + mm/mmap.c.orig | 1805 +++++ mm/mprotect.c.orig | 282 + mm/page_alloc.c.orig | 2013 ++++++ mm/proc_mm.c | 174 + net/bluetooth/hidp/Kconfig | 12 + net/bluetooth/hidp/Makefile | 7 + net/bluetooth/hidp/core.c | 649 ++ net/bluetooth/hidp/hidp.h | 122 + net/bluetooth/hidp/sock.c | 212 + net/bridge/br_sysfs_br.c | 383 ++ net/core/stream.c | 41 + net/ipv4/datagram.c | 73 + net/ipv4/netfilter/ipt_addrtype.c | 77 + net/ipv4/netfilter/ipt_realm.c | 76 + net/ipv4/xfrm4_output.c | 120 + net/ipv6/xfrm6_output.c | 141 + net/ipv6/xfrm6_tunnel.c | 634 ++ net/sched/act_api.c | 1017 +++ net/sched/sch_netem.c | 858 +++ scripts/checkstack.pl | 108 + scripts/elfconfig.h | 5 + scripts/mk_elfconfig | Bin 0 -> 6015 bytes scripts/mkmakefile | 31 + scripts/mod/Makefile | 16 + scripts/mod/empty.c | 1 + scripts/mod/file2alias.c | 282 + scripts/mod/mk_elfconfig.c | 65 + scripts/mod/modpost.c | 739 ++ scripts/mod/modpost.h | 103 + scripts/mod/sumversion.c | 544 ++ scripts/modpost | Bin 0 -> 24656 bytes scripts/package/Makefile | 71 + scripts/package/builddeb | 79 + scripts/package/mkspec | 63 + scripts/reference_init.pl | 102 + security/selinux/nlmsgtab.c | 153 + 692 files changed, 176045 insertions(+), 1115 deletions(-) create mode 100644 Documentation/arm/IXP4xx create mode 100644 Documentation/arm/Sharp-LH/SDRAM create mode 100644 Documentation/arm/VFP/release-notes.txt create mode 100644 Documentation/block/as-iosched.txt create mode 100644 Documentation/block/deadline-iosched.txt create mode 100644 Documentation/cpu-freq/amd-powernow.txt create mode 100644 Documentation/device-mapper/dm-io.txt create mode 100644 Documentation/device-mapper/kcopyd.txt create mode 100644 Documentation/device-mapper/linear.txt create mode 100644 Documentation/device-mapper/striped.txt create mode 100644 Documentation/device-mapper/zero.txt create mode 100644 Documentation/fb/sisfb.txt create mode 100644 Documentation/filesystems/automount-support.txt create mode 100644 Documentation/hpet.txt create mode 100644 Documentation/i2c/i2c-parport create mode 100644 Documentation/powerpc/hvcs.txt create mode 100644 Documentation/powerpc/mpc52xx.txt create mode 100644 Documentation/sound/alsa/Audigy-mixer.txt create mode 100644 Documentation/usb/sn9c102.txt create mode 100644 arch/arm/boot/bootp/initrd.S create mode 100644 arch/arm/boot/bootp/kernel.S create mode 100644 arch/arm/boot/compressed/piggy.S create mode 100644 arch/arm/common/locomo.c create mode 100644 arch/arm/common/time-acorn.c create mode 100644 arch/arm/configs/ixp4xx_defconfig create mode 100644 arch/arm/configs/mainstone_defconfig create mode 100644 arch/arm/configs/smdk2410_defconfig create mode 100644 arch/arm/mach-footbridge/time.c create mode 100644 arch/arm/mach-integrator/clock.c create mode 100644 arch/arm/mach-integrator/clock.h create mode 100644 arch/arm/mach-ixp4xx/Makefile create mode 100644 arch/arm/mach-ixp4xx/common-pci.c create mode 100644 arch/arm/mach-ixp4xx/common.c create mode 100644 arch/arm/mach-ixp4xx/coyote-pci.c create mode 100644 arch/arm/mach-ixp4xx/ixdp425-pci.c create mode 100644 arch/arm/mach-ixp4xx/prpmc1100-pci.c create mode 100644 arch/arm/mach-ixp4xx/prpmc1100-setup.c create mode 100644 arch/arm/mach-lh7a40x/time.c create mode 100644 arch/arm/mach-omap/time.c create mode 100644 arch/arm/mach-pxa/time.c create mode 100644 arch/arm/mach-s3c2410/gpio.c create mode 100644 arch/arm/mach-s3c2410/mach-smdk2410.c create mode 100644 arch/arm/mach-s3c2410/time.c create mode 100644 arch/arm/mach-sa1100/collie.c create mode 100644 arch/arm/mach-sa1100/time.c create mode 100644 arch/arm/mach-versatile/clock.c create mode 100644 arch/arm/mach-versatile/clock.h create mode 100644 arch/arm/vfp/Makefile create mode 100644 arch/arm/vfp/entry.S create mode 100644 arch/arm/vfp/vfp.h create mode 100644 arch/arm/vfp/vfpdouble.c create mode 100644 arch/arm/vfp/vfphw.S create mode 100644 arch/arm/vfp/vfpinstr.h create mode 100644 arch/arm/vfp/vfpmodule.c create mode 100644 arch/arm/vfp/vfpsingle.c create mode 100644 arch/cris/arch-v10/drivers/ide.c create mode 100644 arch/cris/kernel/crisksyms.c create mode 100644 arch/i386/crypto/Makefile create mode 100644 arch/i386/crypto/aes-i586-asm.S create mode 100644 arch/i386/crypto/aes.c create mode 100644 arch/i386/lib/bitops.c create mode 100644 arch/i386/mach-generic/es7000.c create mode 100644 arch/ia64/configs/sim_defconfig create mode 100644 arch/ia64/dig/topology.c create mode 100644 arch/ia64/lib/bitop.c create mode 100644 arch/mips/au1000/common/cputable.c create mode 100644 arch/mips/configs/ocelot_g_defconfig create mode 100644 arch/mips/kernel/module.c create mode 100644 arch/mips/mm/tlb-r8k.c create mode 100644 arch/mips/mm/tlb64-glue-r4k.S create mode 100644 arch/mips/mm/tlb64-glue-sb1.S create mode 100644 arch/mips/mm/tlbex32-r3k.S create mode 100644 arch/mips/mm/tlbex32-r4k.S create mode 100644 arch/mips/mm/tlbex64-r4k.S create mode 100644 arch/mips/pci/fixup-jaguar.c create mode 100644 arch/mips/pci/fixup-mpc30x.c create mode 100644 arch/mips/pci/fixup-ocelot-c.c create mode 100644 arch/mips/pci/fixup-ocelot-g.c create mode 100644 arch/mips/pci/fixup-tb0219.c create mode 100644 arch/mips/pci/ops-marvell.c create mode 100644 arch/mips/pci/ops-titan-ht.c create mode 100644 arch/mips/pci/ops-vr41xx.c create mode 100644 arch/mips/pci/pci-yosemite.c create mode 100644 arch/mips/pmc-sierra/yosemite/dbg_io.c create mode 100644 arch/mips/pmc-sierra/yosemite/i2c-yosemite.c create mode 100644 arch/mips/pmc-sierra/yosemite/py-console.c create mode 100644 arch/mips/vr41xx/tanbac-tb0229/tb0219.c create mode 100644 arch/parisc/configs/n4000_defconfig create mode 100644 arch/parisc/kernel/unwind.c create mode 100644 arch/parisc/lib/debuglocks.c create mode 100644 arch/ppc/boot/simple/mpc52xx_tty.c create mode 100644 arch/ppc/configs/ads8272_defconfig create mode 100644 arch/ppc/configs/lite5200_defconfig create mode 100644 arch/ppc/configs/rpx8260_defconfig create mode 100644 arch/ppc/kernel/dma-mapping.c create mode 100644 arch/ppc/kernel/head_e500.S create mode 100644 arch/ppc/kernel/vecemu.c create mode 100644 arch/ppc/kernel/vector.S create mode 100644 arch/ppc/lib/rheap.c create mode 100644 arch/ppc/mm/fsl_booke_mmu.c create mode 100644 arch/ppc/oprofile/Kconfig create mode 100644 arch/ppc/oprofile/Makefile create mode 100644 arch/ppc/oprofile/init.c create mode 100644 arch/ppc/platforms/4xx/bubinga.c create mode 100644 arch/ppc/platforms/4xx/bubinga.h create mode 100644 arch/ppc/platforms/4xx/ibm405ep.c create mode 100644 arch/ppc/platforms/4xx/ibm405ep.h create mode 100644 arch/ppc/platforms/85xx/Kconfig create mode 100644 arch/ppc/platforms/85xx/Makefile create mode 100644 arch/ppc/platforms/85xx/mpc8540.c create mode 100644 arch/ppc/platforms/85xx/mpc8540_ads.c create mode 100644 arch/ppc/platforms/85xx/mpc8540_ads.h create mode 100644 arch/ppc/platforms/85xx/mpc8555.c create mode 100644 arch/ppc/platforms/85xx/mpc8555_cds.h create mode 100644 arch/ppc/platforms/85xx/mpc8560.c create mode 100644 arch/ppc/platforms/85xx/mpc8560_ads.c create mode 100644 arch/ppc/platforms/85xx/mpc8560_ads.h create mode 100644 arch/ppc/platforms/85xx/mpc85xx_ads_common.c create mode 100644 arch/ppc/platforms/85xx/mpc85xx_ads_common.h create mode 100644 arch/ppc/platforms/85xx/mpc85xx_cds_common.c create mode 100644 arch/ppc/platforms/85xx/mpc85xx_cds_common.h create mode 100644 arch/ppc/platforms/85xx/sbc8560.c create mode 100644 arch/ppc/platforms/85xx/sbc8560.h create mode 100644 arch/ppc/platforms/85xx/sbc85xx.c create mode 100644 arch/ppc/platforms/85xx/sbc85xx.h create mode 100644 arch/ppc/platforms/lite5200.c create mode 100644 arch/ppc/platforms/lite5200.h create mode 100644 arch/ppc/platforms/mpc5200.c create mode 100644 arch/ppc/platforms/pq2ads.h create mode 100644 arch/ppc/platforms/pq2ads_setup.c create mode 100644 arch/ppc/platforms/rpx8260.c create mode 100644 arch/ppc/platforms/rpx8260.h create mode 100644 arch/ppc/platforms/sbc82xx.c create mode 100644 arch/ppc/platforms/sbc82xx.h create mode 100644 arch/ppc/syslib/cpm2_common.c create mode 100644 arch/ppc/syslib/cpm2_pic.c create mode 100644 arch/ppc/syslib/cpm2_pic.h create mode 100644 arch/ppc/syslib/dcr.S create mode 100644 arch/ppc/syslib/ibm440gx_common.c create mode 100644 arch/ppc/syslib/ibm440gx_common.h create mode 100644 arch/ppc/syslib/ibm44x_common.h create mode 100644 arch/ppc/syslib/m8260_pci.c create mode 100644 arch/ppc/syslib/m8260_pci.h create mode 100644 arch/ppc/syslib/m8260_pci_erratum9.c create mode 100644 arch/ppc/syslib/mpc52xx_pic.c create mode 100644 arch/ppc/syslib/mpc52xx_setup.c create mode 100644 arch/ppc/syslib/ocp.c create mode 100644 arch/ppc/syslib/ppc4xx_sgdma.c create mode 100644 arch/ppc/syslib/ppc85xx_common.c create mode 100644 arch/ppc/syslib/ppc85xx_common.h create mode 100644 arch/ppc/syslib/ppc85xx_setup.c create mode 100644 arch/ppc/syslib/ppc85xx_setup.h create mode 100644 arch/ppc64/kernel/hvcserver.c create mode 100644 arch/ppc64/kernel/vecemu.c create mode 100644 arch/ppc64/kernel/vector.S create mode 100644 arch/ppc64/lib/e2a.c create mode 100644 arch/ppc64/lib/locks.c create mode 100644 arch/ppc64/lib/usercopy.c create mode 100644 arch/ppc64/mm/slb.c create mode 100644 arch/ppc64/mm/slb_low.S create mode 100644 arch/s390/kernel/vtime.c create mode 100644 arch/s390/lib/string.c create mode 100644 arch/sh/boards/renesas/hs7751rvoip/Makefile create mode 100644 arch/sh/boards/renesas/hs7751rvoip/io.c create mode 100644 arch/sh/boards/renesas/hs7751rvoip/irq.c create mode 100644 arch/sh/boards/renesas/hs7751rvoip/led.c create mode 100644 arch/sh/boards/renesas/hs7751rvoip/mach.c create mode 100644 arch/sh/boards/renesas/hs7751rvoip/pci.c create mode 100644 arch/sh/boards/renesas/hs7751rvoip/setup.c create mode 100644 arch/sh/boards/renesas/rts7751r2d/Makefile create mode 100644 arch/sh/boards/renesas/rts7751r2d/io.c create mode 100644 arch/sh/boards/renesas/rts7751r2d/irq.c create mode 100644 arch/sh/boards/renesas/rts7751r2d/led.c create mode 100644 arch/sh/boards/renesas/rts7751r2d/mach.c create mode 100644 arch/sh/boards/renesas/rts7751r2d/setup.c create mode 100644 arch/sh/boards/renesas/systemh/Makefile create mode 100644 arch/sh/boards/renesas/systemh/io.c create mode 100644 arch/sh/boards/renesas/systemh/irq.c create mode 100644 arch/sh/boards/renesas/systemh/setup.c create mode 100644 arch/sh/boards/se/7300/Makefile create mode 100644 arch/sh/boards/se/7300/io.c create mode 100644 arch/sh/boards/se/7300/irq.c create mode 100644 arch/sh/boards/se/7300/led.c create mode 100644 arch/sh/boards/se/7300/setup.c create mode 100644 arch/sh/cchips/voyagergx/Makefile create mode 100644 arch/sh/cchips/voyagergx/consistent.c create mode 100644 arch/sh/cchips/voyagergx/irq.c create mode 100644 arch/sh/cchips/voyagergx/setup.c create mode 100644 arch/sh/configs/rts7751r2d_defconfig create mode 100644 arch/sh/configs/se7300_defconfig create mode 100644 arch/sh/drivers/dma/dma-sysfs.c create mode 100644 arch/sh/drivers/pci/fixups-rts7751r2d.c create mode 100644 arch/sh/drivers/pci/ops-rts7751r2d.c create mode 100644 arch/sh/kernel/cpu/adc.c create mode 100644 arch/sh/kernel/cpu/bus.c create mode 100644 arch/sh/kernel/early_printk.c create mode 100644 arch/sh/ramdisk/Makefile create mode 100644 arch/sh/ramdisk/ld.script create mode 100644 arch/sh64/Kconfig create mode 100644 arch/sh64/Makefile create mode 100644 arch/sh64/boot/Makefile create mode 100644 arch/sh64/boot/compressed/Makefile create mode 100644 arch/sh64/boot/compressed/cache.c create mode 100644 arch/sh64/boot/compressed/head.S create mode 100644 arch/sh64/boot/compressed/install.sh create mode 100644 arch/sh64/boot/compressed/misc.c create mode 100644 arch/sh64/boot/compressed/vmlinux.lds.S create mode 100644 arch/sh64/configs/cayman_defconfig create mode 100644 arch/sh64/defconfig create mode 100644 arch/sh64/kernel/Makefile create mode 100644 arch/sh64/kernel/alphanum.c create mode 100644 arch/sh64/kernel/asm-offsets.c create mode 100644 arch/sh64/kernel/dma.c create mode 100644 arch/sh64/kernel/early_printk.c create mode 100644 arch/sh64/kernel/entry.S create mode 100644 arch/sh64/kernel/fpu.c create mode 100644 arch/sh64/kernel/head.S create mode 100644 arch/sh64/kernel/init_task.c create mode 100644 arch/sh64/kernel/irq.c create mode 100644 arch/sh64/kernel/irq_intc.c create mode 100644 arch/sh64/kernel/led.c create mode 100644 arch/sh64/kernel/pci-dma.c create mode 100644 arch/sh64/kernel/pci_sh5.c create mode 100644 arch/sh64/kernel/pci_sh5.h create mode 100644 arch/sh64/kernel/pcibios.c create mode 100644 arch/sh64/kernel/process.c create mode 100644 arch/sh64/kernel/ptrace.c create mode 100644 arch/sh64/kernel/semaphore.c create mode 100644 arch/sh64/kernel/setup.c create mode 100644 arch/sh64/kernel/sh_ksyms.c create mode 100644 arch/sh64/kernel/signal.c create mode 100644 arch/sh64/kernel/switchto.S create mode 100644 arch/sh64/kernel/sys_sh64.c create mode 100644 arch/sh64/kernel/syscalls.S create mode 100644 arch/sh64/kernel/time.c create mode 100644 arch/sh64/kernel/traps.c create mode 100644 arch/sh64/kernel/unwind.c create mode 100644 arch/sh64/kernel/vmlinux.lds.S create mode 100644 arch/sh64/lib/Makefile create mode 100644 arch/sh64/lib/c-checksum.c create mode 100644 arch/sh64/lib/copy_user_memcpy.S create mode 100644 arch/sh64/lib/dbg.c create mode 100644 arch/sh64/lib/io.c create mode 100644 arch/sh64/lib/memcpy.c create mode 100644 arch/sh64/lib/old-checksum.c create mode 100644 arch/sh64/lib/page_clear.S create mode 100644 arch/sh64/lib/page_copy.S create mode 100644 arch/sh64/lib/panic.c create mode 100644 arch/sh64/lib/udelay.c create mode 100644 arch/sh64/mach-cayman/Makefile create mode 100644 arch/sh64/mach-cayman/irq.c create mode 100644 arch/sh64/mach-cayman/led.c create mode 100644 arch/sh64/mach-cayman/setup.c create mode 100644 arch/sh64/mach-harp/Makefile create mode 100644 arch/sh64/mach-harp/setup.c create mode 100644 arch/sh64/mach-romram/Makefile create mode 100644 arch/sh64/mach-romram/setup.c create mode 100644 arch/sh64/mach-sim/Makefile create mode 100644 arch/sh64/mach-sim/setup.c create mode 100644 arch/sh64/mm/Makefile create mode 100644 arch/sh64/mm/cache.c create mode 100644 arch/sh64/mm/extable.c create mode 100644 arch/sh64/mm/fault.c create mode 100644 arch/sh64/mm/hugetlbpage.c create mode 100644 arch/sh64/mm/init.c create mode 100644 arch/sh64/mm/ioremap.c create mode 100644 arch/sh64/mm/tlb.c create mode 100644 arch/sh64/mm/tlbmiss.c create mode 100644 arch/sh64/oprofile/Kconfig create mode 100644 arch/sh64/oprofile/Makefile create mode 100644 arch/sh64/oprofile/op_model_null.c create mode 100644 arch/sparc64/lib/clear_page.S create mode 100644 arch/sparc64/lib/copy_page.S create mode 100644 arch/sparc64/lib/find_bit.c create mode 100644 arch/sparc64/lib/splock.S create mode 100644 arch/sparc64/mm/tlb.c create mode 100644 arch/um/drivers/cow_kern.c create mode 100644 arch/um/drivers/cow_sys.h create mode 100644 arch/um/drivers/net_kern.c.orig create mode 100644 arch/um/include/aio.h create mode 100644 arch/um/include/filehandle.h create mode 100644 arch/um/include/skas_ptregs.h create mode 100644 arch/um/kernel/filehandle.c create mode 100644 arch/um/kernel/smp.c.orig delete mode 100644 arch/um/kernel/user_syms.c create mode 100644 arch/um/os-Linux/aio.c create mode 100644 arch/um/os-Linux/time.c create mode 100644 arch/um/sys-i386/bitops.c delete mode 100644 arch/um/sys-i386/extable.c create mode 100644 arch/um/sys-i386/semaphore.c create mode 100644 arch/um/sys-i386/time.c create mode 100644 arch/um/util/mk_constants create mode 100644 arch/um/util/mk_task create mode 100644 arch/x86_64/kernel/domain.c create mode 100644 crypto/khazad.c create mode 100644 crypto/tea.c create mode 100644 drivers/block/sx8.c create mode 100644 drivers/char/drm/drm_irq.h create mode 100644 drivers/char/drm/drm_pciids.h create mode 100644 drivers/char/ds1286.c create mode 100644 drivers/char/hpet.c create mode 100644 drivers/char/hvcs.c create mode 100644 drivers/char/ip27-rtc.c create mode 100644 drivers/char/lcd.h create mode 100644 drivers/char/watchdog/ixp2000_wdt.c create mode 100644 drivers/char/watchdog/ixp4xx_wdt.c create mode 100644 drivers/firmware/pcdp.c create mode 100644 drivers/firmware/pcdp.h create mode 100644 drivers/i2c/busses/i2c-ixp4xx.c create mode 100644 drivers/i2c/chips/adm1025.c create mode 100644 drivers/i2c/chips/adm1031.c create mode 100644 drivers/i2c/chips/lm77.c create mode 100644 drivers/i2c/chips/max1619.c create mode 100644 drivers/i2c/chips/rtc8564.c create mode 100644 drivers/i2c/chips/rtc8564.h create mode 100644 drivers/ide/h8300/ide-h8300.c create mode 100644 drivers/md/dm-exception-store.c create mode 100644 drivers/md/dm-io.c create mode 100644 drivers/md/dm-io.h create mode 100644 drivers/md/dm-log.c create mode 100644 drivers/md/dm-log.h create mode 100644 drivers/md/dm-raid1.c create mode 100644 drivers/md/dm-snap.c create mode 100644 drivers/md/dm-snap.h create mode 100644 drivers/md/dm-zero.c create mode 100644 drivers/md/kcopyd.c create mode 100644 drivers/md/kcopyd.h create mode 100644 drivers/media/video/ovcamchip/Makefile create mode 100644 drivers/media/video/ovcamchip/ov6x20.c create mode 100644 drivers/media/video/ovcamchip/ov6x30.c create mode 100644 drivers/media/video/ovcamchip/ov76be.c create mode 100644 drivers/media/video/ovcamchip/ov7x10.c create mode 100644 drivers/media/video/ovcamchip/ov7x20.c create mode 100644 drivers/media/video/ovcamchip/ovcamchip_core.c create mode 100644 drivers/media/video/ovcamchip/ovcamchip_priv.h create mode 100644 drivers/mtd/chips/cfi_util.c create mode 100644 drivers/mtd/devices/phram.c create mode 100644 drivers/mtd/maps/db1550-flash.c create mode 100644 drivers/mtd/maps/db1x00-flash.c create mode 100644 drivers/mtd/maps/dmv182.c create mode 100644 drivers/mtd/maps/ichxrom.c create mode 100644 drivers/mtd/maps/integrator-flash-v24.c create mode 100644 drivers/mtd/maps/ixp4xx.c create mode 100644 drivers/mtd/maps/mpc1211.c create mode 100644 drivers/mtd/maps/omap-toto-flash.c create mode 100644 drivers/mtd/maps/pb1550-flash.c create mode 100644 drivers/mtd/maps/sbc8240.c create mode 100644 drivers/mtd/maps/wr_sbc82xx_flash.c create mode 100644 drivers/mtd/nand/au1550nd.c create mode 100644 drivers/mtd/nand/diskonchip.c create mode 100644 drivers/mtd/nand/nand_base.c create mode 100644 drivers/mtd/nand/nand_bbt.c create mode 100644 drivers/mtd/nand/ppchameleonevb.c create mode 100644 drivers/mtd/nand/toto.c create mode 100644 drivers/mtd/nand/tx4925ndfmc.c create mode 100644 drivers/mtd/nand/tx4938ndfmc.c create mode 100644 drivers/net/fec_8xx/Kconfig create mode 100644 drivers/net/fec_8xx/Makefile create mode 100644 drivers/net/fec_8xx/fec_8xx-netta.c create mode 100644 drivers/net/fec_8xx/fec_8xx.h create mode 100644 drivers/net/fec_8xx/fec_main.c create mode 100644 drivers/net/fec_8xx/fec_mii.c create mode 100644 drivers/net/ibm_emac/ibm_emac.h create mode 100644 drivers/net/ibm_emac/ibm_emac_core.h create mode 100644 drivers/net/ibm_emac/ibm_emac_debug.c create mode 100644 drivers/net/ibm_emac/ibm_emac_mal.c create mode 100644 drivers/net/ibm_emac/ibm_emac_mal.h create mode 100644 drivers/net/ibm_emac/ibm_emac_phy.c create mode 100644 drivers/net/ibm_emac/ibm_emac_rgmii.h create mode 100644 drivers/net/ibm_emac/ibm_emac_tah.h create mode 100644 drivers/net/ibm_emac/ibm_emac_zmii.h create mode 100644 drivers/net/ne-h8300.c create mode 100644 drivers/net/smc91x.c create mode 100644 drivers/net/smc91x.h create mode 100644 drivers/net/via-velocity.c create mode 100644 drivers/net/via-velocity.h create mode 100644 drivers/net/wireless/prism54/prismcompat.h create mode 100644 drivers/pcmcia/pd6729.c create mode 100644 drivers/pcmcia/pd6729.h create mode 100644 drivers/pcmcia/pxa2xx_base.h create mode 100644 drivers/pcmcia/socket_sysfs.c create mode 100644 drivers/s390/net/ctcdbug.c create mode 100644 drivers/s390/net/ctcdbug.h create mode 100644 drivers/scsi/3w-9xxx.c create mode 100644 drivers/scsi/3w-9xxx.h create mode 100644 drivers/scsi/fdomain.h create mode 100644 drivers/scsi/ipr.c create mode 100644 drivers/scsi/ipr.h create mode 100644 drivers/scsi/pcmcia/sym53c500_cs.c create mode 100644 drivers/scsi/qlogicfas408.h create mode 100644 drivers/scsi/sata_nv.c create mode 100644 drivers/scsi/sata_promise.h create mode 100644 drivers/serial/cpm_uart/Makefile create mode 100644 drivers/serial/cpm_uart/cpm_uart.h create mode 100644 drivers/serial/cpm_uart/cpm_uart_core.c create mode 100644 drivers/serial/cpm_uart/cpm_uart_cpm1.c create mode 100644 drivers/serial/cpm_uart/cpm_uart_cpm1.h create mode 100644 drivers/serial/cpm_uart/cpm_uart_cpm2.c create mode 100644 drivers/serial/cpm_uart/cpm_uart_cpm2.h create mode 100644 drivers/serial/mpc52xx_uart.c create mode 100644 drivers/serial/serial_lh7a40x.c create mode 100644 drivers/serial/sn_console.c create mode 100644 drivers/usb/class/cdc-acm.h create mode 100644 drivers/usb/core/sysfs.c create mode 100644 drivers/usb/host/ohci-lh7a404.c create mode 100644 drivers/usb/input/touchkitusb.c create mode 100644 drivers/usb/media/sn9c102.h create mode 100644 drivers/usb/media/sn9c102_core.c create mode 100644 drivers/usb/media/sn9c102_pas106b.c create mode 100644 drivers/usb/media/sn9c102_sensor.h create mode 100644 drivers/usb/media/sn9c102_tas5110c1b.c create mode 100644 drivers/usb/media/sn9c102_tas5130d1b.c create mode 100644 drivers/usb/media/w9968cf_vpp.h create mode 100644 drivers/usb/misc/phidgetservo.c create mode 100644 drivers/video/asiliantfb.c create mode 100644 drivers/video/gbefb.c create mode 100644 drivers/video/pxafb.h create mode 100644 drivers/video/riva/rivafb-i2c.c create mode 100644 drivers/w1/Kconfig create mode 100644 drivers/w1/Makefile create mode 100644 drivers/w1/matrox_w1.c create mode 100644 drivers/w1/w1.c create mode 100644 drivers/w1/w1.h create mode 100644 drivers/w1/w1_family.c create mode 100644 drivers/w1/w1_family.h create mode 100644 drivers/w1/w1_int.c create mode 100644 drivers/w1/w1_int.h create mode 100644 drivers/w1/w1_io.c create mode 100644 drivers/w1/w1_io.h create mode 100644 drivers/w1/w1_log.h create mode 100644 drivers/w1/w1_netlink.c create mode 100644 drivers/w1/w1_netlink.h create mode 100644 drivers/w1/w1_therm.c create mode 100644 fs/Makefile.orig delete mode 100644 fs/ext3/resize.c create mode 100644 fs/hostfs/externfs.c create mode 100644 fs/hostfs/host_file.c create mode 100644 fs/hostfs/host_fs.c create mode 100644 fs/hostfs/humfs.c create mode 100644 fs/hostfs/meta_fs.c create mode 100644 fs/hostfs/metadata.h create mode 100644 fs/isofs/export.c create mode 100644 fs/jffs2/compr.h create mode 100644 fs/nls/nls_ascii.c create mode 100644 fs/ntfs/collate.c create mode 100644 fs/ntfs/collate.h create mode 100644 fs/ntfs/index.c create mode 100644 fs/ntfs/index.h create mode 100644 fs/ntfs/quota.c create mode 100644 fs/ntfs/quota.h create mode 100644 fs/reiserfs/xattr.c create mode 100644 fs/reiserfs/xattr_acl.c create mode 100644 fs/xfs/linux-2.6/kmem.c create mode 100644 fs/xfs/linux-2.6/kmem.h create mode 100644 fs/xfs/linux-2.6/mrlock.h create mode 100644 fs/xfs/linux-2.6/sema.h create mode 100644 fs/xfs/linux-2.6/sv.h create mode 100644 fs/xfs/linux-2.6/xfs_aops.c create mode 100644 fs/xfs/linux-2.6/xfs_buf.c create mode 100644 fs/xfs/linux-2.6/xfs_buf.h create mode 100644 fs/xfs/linux-2.6/xfs_file.c create mode 100644 fs/xfs/linux-2.6/xfs_fs_subr.c create mode 100644 fs/xfs/linux-2.6/xfs_globals.c create mode 100644 fs/xfs/linux-2.6/xfs_ioctl.c create mode 100644 fs/xfs/linux-2.6/xfs_iops.h create mode 100644 fs/xfs/linux-2.6/xfs_linux.h create mode 100644 fs/xfs/linux-2.6/xfs_lrw.c create mode 100644 fs/xfs/linux-2.6/xfs_lrw.h create mode 100644 fs/xfs/linux-2.6/xfs_stats.c create mode 100644 fs/xfs/linux-2.6/xfs_super.c create mode 100644 fs/xfs/linux-2.6/xfs_super.h create mode 100644 fs/xfs/linux-2.6/xfs_sysctl.c create mode 100644 fs/xfs/linux-2.6/xfs_sysctl.h create mode 100644 fs/xfs/linux-2.6/xfs_vfs.c create mode 100644 fs/xfs/linux-2.6/xfs_vfs.h create mode 100644 fs/xfs/linux-2.6/xfs_vnode.c create mode 100644 fs/xfs/linux-2.6/xfs_vnode.h create mode 100644 include/asm-alpha/setup.h create mode 100644 include/asm-arm/arch-ixp4xx/dma.h create mode 100644 include/asm-arm/arch-ixp4xx/io.h create mode 100644 include/asm-arm/arch-ixp4xx/irq.h create mode 100644 include/asm-arm/arch-ixp4xx/memory.h create mode 100644 include/asm-arm/arch-ixp4xx/param.h create mode 100644 include/asm-arm/arch-ixp4xx/platform.h create mode 100644 include/asm-arm/arch-ixp4xx/serial.h create mode 100644 include/asm-arm/arch-ixp4xx/system.h create mode 100644 include/asm-arm/arch-ixp4xx/time.h create mode 100644 include/asm-arm/arch-ixp4xx/uncompress.h create mode 100644 include/asm-arm/arch-pxa/pxafb.h create mode 100644 include/asm-arm/arch-sa1100/collie.h create mode 100644 include/asm-arm/hardware/clock.h create mode 100644 include/asm-arm/hardware/locomo.h create mode 100644 include/asm-arm/mach/time.h create mode 100644 include/asm-arm/vfp.h create mode 100644 include/asm-arm/vfpmacros.h create mode 100644 include/asm-i386/pgtable-2level-defs.h create mode 100644 include/asm-i386/pgtable-3level-defs.h create mode 100644 include/asm-ia64/setup.h create mode 100644 include/asm-mips/gt64240.h create mode 100644 include/asm-mips/mach-yosemite/cpu-feature-overrides.h create mode 100644 include/asm-mips/marvell.h create mode 100644 include/asm-mips/setup.h create mode 100644 include/asm-mips/vr41xx/tb0219.h create mode 100644 include/asm-parisc/numnodes.h create mode 100644 include/asm-ppc/cpm2.h create mode 100644 include/asm-ppc/fsl_ocp.h create mode 100644 include/asm-ppc/immap_85xx.h create mode 100644 include/asm-ppc/immap_cpm2.h create mode 100644 include/asm-ppc/m8260_pci.h create mode 100644 include/asm-ppc/mpc52xx.h create mode 100644 include/asm-ppc/mpc52xx_psc.h create mode 100644 include/asm-ppc/mpc8260_pci9.h create mode 100644 include/asm-ppc/mpc85xx.h create mode 100644 include/asm-ppc/ppc4xx_dma.h create mode 100644 include/asm-ppc/rheap.h create mode 100644 include/asm-ppc64/hvcserver.h create mode 100644 include/asm-sh/adc.h create mode 100644 include/asm-sh/bus-sh.h create mode 100644 include/asm-sh/cpu-sh3/adc.h create mode 100644 include/asm-sh/fixmap.h create mode 100644 include/asm-sh/hp6xx/ide.h create mode 100644 include/asm-sh/hs7751rvoip/hs7751rvoip.h create mode 100644 include/asm-sh/hs7751rvoip/ide.h create mode 100644 include/asm-sh/hs7751rvoip/io.h create mode 100644 include/asm-sh/rts7751r2d/ide.h create mode 100644 include/asm-sh/rts7751r2d/io.h create mode 100644 include/asm-sh/rts7751r2d/rts7751r2d.h create mode 100644 include/asm-sh/rts7751r2d/voyagergx_reg.h create mode 100644 include/asm-sh/se7300/io.h create mode 100644 include/asm-sh/se7300/se7300.h create mode 100644 include/asm-sh/setup.h create mode 100644 include/asm-sh64/a.out.h create mode 100644 include/asm-sh64/atomic.h create mode 100644 include/asm-sh64/bitops.h create mode 100644 include/asm-sh64/bug.h create mode 100644 include/asm-sh64/bugs.h create mode 100644 include/asm-sh64/byteorder.h create mode 100644 include/asm-sh64/cache.h create mode 100644 include/asm-sh64/cacheflush.h create mode 100644 include/asm-sh64/cayman.h create mode 100644 include/asm-sh64/checksum.h create mode 100644 include/asm-sh64/cpumask.h create mode 100644 include/asm-sh64/current.h create mode 100644 include/asm-sh64/delay.h create mode 100644 include/asm-sh64/div64.h create mode 100644 include/asm-sh64/dma-mapping.h create mode 100644 include/asm-sh64/dma.h create mode 100644 include/asm-sh64/elf.h create mode 100644 include/asm-sh64/errno.h create mode 100644 include/asm-sh64/fcntl.h create mode 100644 include/asm-sh64/hardirq.h create mode 100644 include/asm-sh64/hardware.h create mode 100644 include/asm-sh64/hdreg.h create mode 100644 include/asm-sh64/hw_irq.h create mode 100644 include/asm-sh64/ide.h create mode 100644 include/asm-sh64/io.h create mode 100644 include/asm-sh64/ioctl.h create mode 100644 include/asm-sh64/ioctls.h create mode 100644 include/asm-sh64/ipc.h create mode 100644 include/asm-sh64/ipcbuf.h create mode 100644 include/asm-sh64/irq.h create mode 100644 include/asm-sh64/keyboard.h create mode 100644 include/asm-sh64/kmap_types.h create mode 100644 include/asm-sh64/linkage.h create mode 100644 include/asm-sh64/local.h create mode 100644 include/asm-sh64/mc146818rtc.h create mode 100644 include/asm-sh64/mman.h create mode 100644 include/asm-sh64/mmu.h create mode 100644 include/asm-sh64/mmu_context.h create mode 100644 include/asm-sh64/module.h create mode 100644 include/asm-sh64/msgbuf.h create mode 100644 include/asm-sh64/namei.h create mode 100644 include/asm-sh64/page.h create mode 100644 include/asm-sh64/param.h create mode 100644 include/asm-sh64/pci.h create mode 100644 include/asm-sh64/percpu.h create mode 100644 include/asm-sh64/pgalloc.h create mode 100644 include/asm-sh64/pgtable.h create mode 100644 include/asm-sh64/platform.h create mode 100644 include/asm-sh64/poll.h create mode 100644 include/asm-sh64/posix_types.h create mode 100644 include/asm-sh64/processor.h create mode 100644 include/asm-sh64/ptrace.h create mode 100644 include/asm-sh64/registers.h create mode 100644 include/asm-sh64/resource.h create mode 100644 include/asm-sh64/scatterlist.h create mode 100644 include/asm-sh64/sections.h create mode 100644 include/asm-sh64/segment.h create mode 100644 include/asm-sh64/semaphore-helper.h create mode 100644 include/asm-sh64/semaphore.h create mode 100644 include/asm-sh64/sembuf.h create mode 100644 include/asm-sh64/serial.h create mode 100644 include/asm-sh64/setup.h create mode 100644 include/asm-sh64/shmbuf.h create mode 100644 include/asm-sh64/shmparam.h create mode 100644 include/asm-sh64/sigcontext.h create mode 100644 include/asm-sh64/siginfo.h create mode 100644 include/asm-sh64/signal.h create mode 100644 include/asm-sh64/smp.h create mode 100644 include/asm-sh64/smplock.h create mode 100644 include/asm-sh64/socket.h create mode 100644 include/asm-sh64/sockios.h create mode 100644 include/asm-sh64/softirq.h create mode 100644 include/asm-sh64/spinlock.h create mode 100644 include/asm-sh64/stat.h create mode 100644 include/asm-sh64/statfs.h create mode 100644 include/asm-sh64/string.h create mode 100644 include/asm-sh64/system.h create mode 100644 include/asm-sh64/termbits.h create mode 100644 include/asm-sh64/termios.h create mode 100644 include/asm-sh64/thread_info.h create mode 100644 include/asm-sh64/timex.h create mode 100644 include/asm-sh64/tlb.h create mode 100644 include/asm-sh64/tlbflush.h create mode 100644 include/asm-sh64/topology.h create mode 100644 include/asm-sh64/types.h create mode 100644 include/asm-sh64/uaccess.h create mode 100644 include/asm-sh64/ucontext.h create mode 100644 include/asm-sh64/unaligned.h create mode 100644 include/asm-sh64/unistd.h create mode 100644 include/asm-sh64/user.h create mode 100644 include/asm-sparc64/cmt.h create mode 100644 include/asm-um/module-i386.h create mode 100644 include/asm-um/pgtable.h.orig create mode 100644 include/asm-um/setup.h delete mode 100644 include/asm-um/smplock.h delete mode 100644 include/asm-um/spinlock.h create mode 100644 include/asm-um/unistd.h.orig create mode 100644 include/asm-v850/setup.h create mode 100644 include/linux/crc-ccitt.h create mode 100644 include/linux/dmi.h create mode 100644 include/linux/ds1286.h create mode 100644 include/linux/gfp.h.orig create mode 100644 include/linux/ghash.h create mode 100644 include/linux/hpet.h create mode 100644 include/linux/mempolicy.h create mode 100644 include/linux/mm.h.orig create mode 100644 include/linux/mtd/physmap.h create mode 100644 include/linux/netfilter_ipv4/ipt_addrtype.h create mode 100644 include/linux/netfilter_ipv4/ipt_realm.h create mode 100644 include/linux/proc_mm.h create mode 100644 include/linux/reiserfs_acl.h create mode 100644 include/linux/reiserfs_xattr.h create mode 100644 include/linux/snmp.h create mode 100644 include/media/ovcamchip.h create mode 100644 include/mtd/inftl-user.h create mode 100644 include/mtd/jffs2-user.h create mode 100644 include/mtd/mtd-abi.h create mode 100644 include/mtd/mtd-user.h create mode 100644 include/mtd/nftl-user.h create mode 100644 include/net/ip6_checksum.h create mode 100644 include/net/pkt_act.h create mode 100644 include/scsi/scsi_dbg.h create mode 100644 kernel/power/smp.c create mode 100644 lib/crc-ccitt.c create mode 100644 mm/Makefile.orig create mode 100644 mm/mmap.c.orig create mode 100644 mm/mprotect.c.orig create mode 100644 mm/page_alloc.c.orig create mode 100644 mm/proc_mm.c create mode 100644 net/bluetooth/hidp/Kconfig create mode 100644 net/bluetooth/hidp/Makefile create mode 100644 net/bluetooth/hidp/core.c create mode 100644 net/bluetooth/hidp/hidp.h create mode 100644 net/bluetooth/hidp/sock.c create mode 100644 net/bridge/br_sysfs_br.c create mode 100644 net/core/stream.c create mode 100644 net/ipv4/datagram.c create mode 100644 net/ipv4/netfilter/ipt_addrtype.c create mode 100644 net/ipv4/netfilter/ipt_realm.c create mode 100644 net/ipv4/xfrm4_output.c create mode 100644 net/ipv6/xfrm6_output.c create mode 100644 net/ipv6/xfrm6_tunnel.c create mode 100644 net/sched/act_api.c create mode 100644 net/sched/sch_netem.c create mode 100644 scripts/checkstack.pl create mode 100644 scripts/elfconfig.h create mode 100644 scripts/mk_elfconfig create mode 100644 scripts/mkmakefile create mode 100644 scripts/mod/Makefile create mode 100644 scripts/mod/empty.c create mode 100644 scripts/mod/file2alias.c create mode 100644 scripts/mod/mk_elfconfig.c create mode 100644 scripts/mod/modpost.c create mode 100644 scripts/mod/modpost.h create mode 100644 scripts/mod/sumversion.c create mode 100644 scripts/modpost create mode 100644 scripts/package/Makefile create mode 100644 scripts/package/builddeb create mode 100644 scripts/package/mkspec create mode 100644 scripts/reference_init.pl create mode 100644 security/selinux/nlmsgtab.c diff --git a/Documentation/arm/IXP4xx b/Documentation/arm/IXP4xx new file mode 100644 index 000000000..d86d818a4 --- /dev/null +++ b/Documentation/arm/IXP4xx @@ -0,0 +1,155 @@ + +------------------------------------------------------------------------- +Release Notes for Linux on Intel's IXP4xx Network Processor + +Maintained by Deepak Saxena +------------------------------------------------------------------------- + +1. Overview + +Intel's IXP4xx network processor is a highly integrated SOC that +is targeted for network applications, though it has become popular +in industrial control and other areas due to low cost and power +consumption. The IXP4xx family currently consists of several processors +that support different network offload functions such as encryption, +routing, firewalling, etc. For more information on the various +versions of the CPU, see: + + http://developer.intel.com/design/network/products/npfamily/ixp4xx.htm + +Intel also made the IXCP1100 CPU for sometime which is an IXP4xx +stripped of much of the network intelligence. + +2. Linux Support + +Linux currently supports the following features on the IXP4xx chips: + +- Dual serial ports +- PCI interface +- Flash access (MTD/JFFS) +- I2C through GPIO +- GPIO for input/output/interrupts + See include/asm-arm/arch-ixp4xx/platform.h for access functions. +- Timers (watchdog, OS) + +The following components of the chips are not supported by Linux and +require the use of Intel's propietary CSR softare: + +- USB device interface +- Network interfaces (HSS, Utopia, NPEs, etc) +- Network offload functionality + +If you need to use any of the above, you need to download Intel's +software from: + + http://developer.intel.com/design/network/products/npfamily/ixp425swr1.htm + +DO NOT POST QUESTIONS TO THE LINUX MAILING LISTS REGARDING THE PROPIETARY +SOFTWARE. + +There are several websites that provide directions/pointers on using +Intel's software: + +http://ixp4xx-osdg.sourceforge.net/ + Open Source Developer's Guide for using uClinux and the Intel libraries + +http://gatewaymaker.sourceforge.net/ + Simple one page summary of building a gateway using an IXP425 and Linux + +http://ixp425.sourceforge.net/ + ATM device driver for IXP425 that relies on Intel's libraries + +3. Known Issues/Limitations + +3a. Limited inbound PCI window + +The IXP4xx family allows for up to 256MB of memory but the PCI interface +can only expose 64MB of that memory to the PCI bus. This means that if +you are running with > 64MB, all PCI buffers outside of the accessible +range will be bounced using the routines in arch/arm/common/dmabounce.c. + +3b. Limited outbound PCI window + +IXP4xx provides two methods of accessing PCI memory space: + +1) A direct mapped window from 0x48000000 to 0x4bffffff (64MB). + To access PCI via this space, we simply ioremap() the BAR + into the kernel and we can use the standard read[bwl]/write[bwl] + macros. This is the preffered method due to speed but it + limits the system to just 64MB of PCI memory. This can be + problamatic if using video cards and other memory-heavy devices. + +2) If > 64MB of memory space is required, the IXP4xx can be + configured to use indirect registers to access PCI This allows + for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus. + The disadvantadge of this is that every PCI access requires + three local register accesses plus a spinlock, but in some + cases the performance hit is acceptable. In addition, you cannot + mmap() PCI devices in this case due to the indirect nature + of the PCI window. + +By default, the direct method is used for performance reasons. If +you need more PCI memory, enable the IXP4XX_INDIRECT_PCI config option. + +3c. GPIO as Interrupts + +Currently the code only handles level-sensitive GPIO interrupts + +4. Supported platforms + +ADI Engineering Coyote Gateway Reference Platform +http://www.adiengineering.com/productsCoyote.html + + The ADI Coyote platform is reference design for those building + small residential/office gateways. One NPE is connected to a 10/100 + interface, one to 4-port 10/100 switch, and the third to and ADSL + interface. In addition, it also supports to POTs interfaces connected + via SLICs. Note that those are not supported by Linux ATM. Finally, + the platform has two mini-PCI slots used for 802.11[bga] cards. + Finally, there is an IDE port hanging off the expansion bus. + +Gateworks Avila Network Platform +http://www.gateworks.com/avila_sbc.htm + + The Avila platform is basically and IXDP425 with the 4 PCI slots + replaced with mini-PCI slots and a CF IDE interface hanging off + the expansion bus. + +Intel IXDP425 Development Platform +http://developer.intel.com/design/network/products/npfamily/ixdp425.htm + + This is Intel's standard reference platform for the IXDP425 and is + also known as the Richfield board. It contains 4 PCI slots, 16MB + of flash, two 10/100 ports and one ADSL port. + +Motorola PrPMC1100 Processor Mezanine Card +http://www.fountainsys.com/datasheet/PrPMC1100.pdf + + The PrPMC1100 is based on the IXCP1100 and is meant to plug into + and IXP2400/2800 system to act as the system controller. It simply + contains a CPU and 16MB of flash on the board and needs to be + plugged into a carrier board to function. Currently Linux only + supports the Motorola PrPMC carrier board for this platform. + See https://mcg.motorola.com/us/ds/pdf/ds0144.pdf for info + on the carrier board. + +5. TODO LIST + +- Add support for Coyote IDE +- Add support for edge-based GPIO interrupts +- Add support for CF IDE on expansion bus + +6. Thanks + +The IXP4xx work has been funded by Intel Corp. and MontaVista Software, Inc. + +The following people have contributed patches/comments/etc: + +Lutz Jaenicke +Justin Mayfield +Robert E. Ranslam +[I know I've forgotten others, please email me to be added] + +------------------------------------------------------------------------- + +Last Update: 5/13/2004 diff --git a/Documentation/arm/Sharp-LH/SDRAM b/Documentation/arm/Sharp-LH/SDRAM new file mode 100644 index 000000000..93ddc23c2 --- /dev/null +++ b/Documentation/arm/Sharp-LH/SDRAM @@ -0,0 +1,51 @@ +README on the SDRAM Controller for the LH7a40X +============================================== + +The standard configuration for the SDRAM controller generates a sparse +memory array. The precise layout is determined by the SDRAM chips. A +default kernel configuration assembles the discontiguous memory +regions into separate memory nodes via the NUMA (Non-Uniform Memory +Architecture) facilities. In this default configuration, the kernel +is forgiving about the precise layout. As long as it is given an +accurate picture of available memory by the bootloader the kernel will +execute correctly. + +The SDRC supports a mode where some of the chip select lines are +swapped in order to make SDRAM look like a synchronous ROM. Setting +this bit means that the RAM will present as a contiguous array. Some +programmers prefer this to the discontiguous layout. Be aware that +may be a penalty for this feature where some some configurations of +memory are significantly reduced; i.e. 64MiB of RAM appears as only 32 +MiB. + +There are a couple of configuration options to override the default +behavior. When the SROMLL bit is set and memory appears as a +contiguous array, there is no reason to support NUMA. +CONFIG_LH7A40X_CONTIGMEM disables NUMA support. When physical memory +is discontiguous, the memory tables are organized such that there are +two banks per nodes with a small gap between them. This layout wastes +some kernel memory for page tables representing non-existent memory. +CONFIG_LH7A40X_ONE_BANK_PER_NODE optimizes the node tables such that +there are no gaps. These options control the low level organization +of the memory management tables in ways that may prevent the kernel +from booting or may cause the kernel to allocated excessively large +page tables. Be warned. Only change these options if you know what +you are doing. The default behavior is a reasonable compromise that +will suit all users. + +-- + +A typical 32MiB system with the default configuration options will +find physical memory managed as follows. + + node 0: 0xc0000000 4MiB + 0xc1000000 4MiB + node 1: 0xc4000000 4MiB + 0xc5000000 4MiB + node 2: 0xc8000000 4MiB + 0xc9000000 4MiB + node 3: 0xcc000000 4MiB + 0xcd000000 4MiB + +Setting CONFIG_LH7A40X_ONE_BANK_PER_NODE will put each bank into a +separate node. diff --git a/Documentation/arm/VFP/release-notes.txt b/Documentation/arm/VFP/release-notes.txt new file mode 100644 index 000000000..f28e0222f --- /dev/null +++ b/Documentation/arm/VFP/release-notes.txt @@ -0,0 +1,55 @@ +Release notes for Linux Kernel VFP support code +----------------------------------------------- + +Date: 20 May 2004 +Author: Russell King + +This is the first release of the Linux Kernel VFP support code. It +provides support for the exceptions bounced from VFP hardware found +on ARM926EJ-S. + +This release has been validated against the SoftFloat-2b library by +John R. Hauser using the TestFloat-2a test suite. Details of this +library and test suite can be found at: + + http://www.cs.berkeley.edu/~jhauser/arithmetic/SoftFloat.html + +The operations which have been tested with this package are: + + - fdiv + - fsub + - fadd + - fmul + - fcmp + - fcmpe + - fcvtd + - fcvts + - fsito + - ftosi + - fsqrt + +All the above pass softfloat tests with the following exceptions: + +- fadd/fsub shows some differences in the handling of +0 / -0 results + when input operands differ in signs. +- the handling of underflow exceptions is slightly different. If a + result underflows before rounding, but becomes a normalised number + after rounding, we do not signal an underflow exception. + +Other operations which have been tested by basic assembly-only tests +are: + + - fcpy + - fabs + - fneg + - ftoui + - ftosiz + - ftouiz + +The combination operations have not been tested: + + - fmac + - fnmac + - fmsc + - fnmsc + - fnmul diff --git a/Documentation/block/as-iosched.txt b/Documentation/block/as-iosched.txt new file mode 100644 index 000000000..fd763cc48 --- /dev/null +++ b/Documentation/block/as-iosched.txt @@ -0,0 +1,165 @@ +Anticipatory IO scheduler +------------------------- +Nick Piggin 13 Sep 2003 + +Attention! Database servers, especially those using "TCQ" disks should +investigate performance with the 'deadline' IO scheduler. Any system with high +disk performance requirements should do so, in fact. + +If you see unusual performance characteristics of your disk systems, or you +see big performance regressions versus the deadline scheduler, please email +me. Database users don't bother unless you're willing to test a lot of patches +from me ;) its a known issue. + +Also, users with hardware RAID controllers, doing striping, may find +highly variable performance results with using the as-iosched. The +as-iosched anticipatory implementation is based on the notion that a disk +device has only one physical seeking head. A striped RAID controller +actually has a head for each physical device in the logical RAID device. + +However, setting the antic_expire (see tunable parameters below) produces +very similar behavior to the deadline IO scheduler. + + +Selecting IO schedulers +----------------------- +To choose IO schedulers at boot time, use the argument 'elevator=deadline'. +'noop' and 'as' (the default) are also available. IO schedulers are assigned +globally at boot time only presently. + + +Anticipatory IO scheduler Policies +---------------------------------- +The as-iosched implementation implements several layers of policies +to determine when an IO request is dispatched to the disk controller. +Here are the policies outlined, in order of application. + +1. one-way Elevator algorithm. + +The elevator algorithm is similar to that used in deadline scheduler, with +the addition that it allows limited backward movement of the elevator +(i.e. seeks backwards). A seek backwards can occur when choosing between +two IO requests where one is behind the elevator's current position, and +the other is in front of the elevator's position. If the seek distance to +the request in back of the elevator is less than half the seek distance to +the request in front of the elevator, then the request in back can be chosen. +Backward seeks are also limited to a maximum of MAXBACK (1024*1024) sectors. +This favors forward movement of the elevator, while allowing opportunistic +"short" backward seeks. + +2. FIFO expiration times for reads and for writes. + +This is again very similar to the deadline IO scheduler. The expiration +times for requests on these lists is tunable using the parameters read_expire +and write_expire discussed below. When a read or a write expires in this way, +the IO scheduler will interrupt its current elevator sweep or read anticipation +to service the expired request. + +3. Read and write request batching + +A batch is a collection of read requests or a collection of write +requests. The as scheduler alternates dispatching read and write batches +to the driver. In the case a read batch, the scheduler submits read +requests to the driver as long as there are read requests to submit, and +the read batch time limit has not been exceeded (read_batch_expire). +The read batch time limit begins counting down only when there are +competing write requests pending. + +In the case of a write batch, the scheduler submits write requests to +the driver as long as there are write requests available, and the +write batch time limit has not been exceeded (write_batch_expire). +However, the length of write batches will be gradually shortened +when read batches frequently exceed their time limit. + +When changing between batch types, the scheduler waits for all requests +from the previous batch to complete before scheduling requests for the +next batch. + +The read and write fifo expiration times described in policy 2 above +are checked only when in scheduling IO of a batch for the corresponding +(read/write) type. So for example, the read FIFO timeout values are +tested only during read batches. Likewise, the write FIFO timeout +values are tested only during write batches. For this reason, +it is generally not recommended for the read batch time +to be longer than the write expiration time, nor for the write batch +time to exceed the read expiration time (see tunable parameters below). + +When the IO scheduler changes from a read to a write batch, +it begins the elevator from the request that is on the head of the +write expiration FIFO. Likewise, when changing from a write batch to +a read batch, scheduler begins the elevator from the first entry +on the read expiration FIFO. + +4. Read anticipation. + +Read anticipation occurs only when scheduling a read batch. +This implementation of read anticipation allows only one read request +to be dispatched to the disk controller at a time. In +contrast, many write requests may be dispatched to the disk controller +at a time during a write batch. It is this characteristic that can make +the anticipatory scheduler perform anomalously with controllers supporting +TCQ, or with hardware striped RAID devices. Setting the antic_expire +queue paramter (see below) to zero disables this behavior, and the anticipatory +scheduler behaves essentially like the deadline scheduler. + +When read anticipation is enabled (antic_expire is not zero), reads +are dispatched to the disk controller one at a time. +At the end of each read request, the IO scheduler examines its next +candidate read request from its sorted read list. If that next request +is from the same process as the request that just completed, +or if the next request in the queue is "very close" to the +just completed request, it is dispatched immediately. Otherwise, +statistics (average think time, average seek distance) on the process +that submitted the just completed request are examined. If it seems +likely that that process will submit another request soon, and that +request is likely to be near the just completed request, then the IO +scheduler will stop dispatching more read requests for up time (antic_expire) +milliseconds, hoping that process will submit a new request near the one +that just completed. If such a request is made, then it is dispatched +immediately. If the antic_expire wait time expires, then the IO scheduler +will dispatch the next read request from the sorted read queue. + +To decide whether an anticipatory wait is worthwhile, the scheduler +maintains statistics for each process that can be used to compute +mean "think time" (the time between read requests), and mean seek +distance for that process. One observation is that these statistics +are associated with each process, but those statistics are not associated +with a specific IO device. So for example, if a process is doing IO +on several file systems on separate devices, the statistics will be +a combination of IO behavior from all those devices. + + +Tuning the anticipatory IO scheduler +------------------------------------ +When using 'as', the anticipatory IO scheduler there are 5 parameters under +/sys/block/*/iosched/. All are units of milliseconds. + +The parameters are: +* read_expire + Controls how long until a read request becomes "expired". It also controls the + interval between which expired requests are served, so set to 50, a request + might take anywhere < 100ms to be serviced _if_ it is the next on the + expired list. Obviously request expiration strategies won't make the disk + go faster. The result basically equates to the timeslice a single reader + gets in the presence of other IO. 100*((seek time / read_expire) + 1) is + very roughly the % streaming read efficiency your disk should get with + multiple readers. + +* read_batch_expire + Controls how much time a batch of reads is given before pending writes are + served. A higher value is more efficient. This might be set below read_expire + if writes are to be given higher priority than reads, but reads are to be + as efficient as possible when there are no writes. Generally though, it + should be some multiple of read_expire. + +* write_expire, and +* write_batch_expire are equivalent to the above, for writes. + +* antic_expire + Controls the maximum amount of time we can anticipate a good read (one + with a short seek distance from the most recently completed request) before + giving up. Many other factors may cause anticipation to be stopped early, + or some processes will not be "anticipated" at all. Should be a bit higher + for big seek time devices though not a linear correspondence - most + processes have only a few ms thinktime. + diff --git a/Documentation/block/deadline-iosched.txt b/Documentation/block/deadline-iosched.txt new file mode 100644 index 000000000..2b1318600 --- /dev/null +++ b/Documentation/block/deadline-iosched.txt @@ -0,0 +1,78 @@ +Deadline IO scheduler tunables +============================== + +This little file attempts to document how the deadline io scheduler works. +In particular, it will clarify the meaning of the exposed tunables that may be +of interest to power users. + +Each io queue has a set of io scheduler tunables associated with it. These +tunables control how the io scheduler works. You can find these entries +in: + +/sys/block//iosched + +assuming that you have sysfs mounted on /sys. If you don't have sysfs mounted, +you can do so by typing: + +# mount none /sys -t sysfs + + +******************************************************************************** + + +read_expire (in ms) +----------- + +The goal of the deadline io scheduler is to attempt to guarentee a start +service time for a request. As we focus mainly on read latencies, this is +tunable. When a read request first enters the io scheduler, it is assigned +a deadline that is the current time + the read_expire value in units of +miliseconds. + + +write_expire (in ms) +----------- + +Similar to read_expire mentioned above, but for writes. + + +fifo_batch +---------- + +When a read request expires its deadline, we must move some requests from +the sorted io scheduler list to the block device dispatch queue. fifo_batch +controls how many requests we move, based on the cost of each request. A +request is either qualified as a seek or a stream. The io scheduler knows +the last request that was serviced by the drive (or will be serviced right +before this one). See seek_cost and stream_unit. + + +write_starved (number of dispatches) +------------- + +When we have to move requests from the io scheduler queue to the block +device dispatch queue, we always give a preference to reads. However, we +don't want to starve writes indefinitely either. So writes_starved controls +how many times we give preference to reads over writes. When that has been +done writes_starved number of times, we dispatch some writes based on the +same criteria as reads. + + +front_merges (bool) +------------ + +Sometimes it happens that a request enters the io scheduler that is contigious +with a request that is already on the queue. Either it fits in the back of that +request, or it fits at the front. That is called either a back merge candidate +or a front merge candidate. Due to the way files are typically laid out, +back merges are much more common than front merges. For some work loads, you +may even know that it is a waste of time to spend any time attempting to +front merge requests. Setting front_merges to 0 disables this functionality. +Front merges may still occur due to the cached last_merge hint, but since +that comes at basically 0 cost we leave that on. We simply disable the +rbtree front sector lookup when the io scheduler merge function is called. + + +Nov 11 2002, Jens Axboe + + diff --git a/Documentation/cpu-freq/amd-powernow.txt b/Documentation/cpu-freq/amd-powernow.txt new file mode 100644 index 000000000..254da155f --- /dev/null +++ b/Documentation/cpu-freq/amd-powernow.txt @@ -0,0 +1,38 @@ + +PowerNow! and Cool'n'Quiet are AMD names for frequency +management capabilities in AMD processors. As the hardware +implementation changes in new generations of the processors, +there is a different cpu-freq driver for each generation. + +Note that the driver's will not load on the "wrong" hardware, +so it is safe to try each driver in turn when in doubt as to +which is the correct driver. + +Note that the functionality to change frequency (and voltage) +is not available in all processors. The drivers will refuse +to load on processors without this capability. The capability +is detected with the cpuid instruction. + +The drivers use BIOS supplied tables to obtain frequency and +voltage information appropriate for a particular platform. +Frequency transitions will be unavailable if the BIOS does +not supply these tables. + +6th Generation: powernow-k6 + +7th Generation: powernow-k7: Athlon, Duron, Geode. + +8th Generation: powernow-k8: Athlon, Athlon 64, Opteron, Sempron. +Documentation on this functionality in 8th generation processors +is available in the "BIOS and Kernel Developer's Guide", publication +26094, in chapter 9, available for download from www.amd.com. + +BIOS supplied data, for powernow-k7 and for powernow-k8, may be +from either the PSB table or from ACPI objects. The ACPI support +is only available if the kernel config sets CONFIG_ACPI_PROCESSOR. +The powernow-k8 driver will attempt to use ACPI if so configured, +and fall back to PST if that fails. +The powernow-k7 driver will try to use the PSB support first, and +fall back to ACPI if the PSB support fails. A module parameter, +acpi_force, is provided to force ACPI support to be used instead +of PSB support. diff --git a/Documentation/device-mapper/dm-io.txt b/Documentation/device-mapper/dm-io.txt new file mode 100644 index 000000000..3b5d9a52c --- /dev/null +++ b/Documentation/device-mapper/dm-io.txt @@ -0,0 +1,75 @@ +dm-io +===== + +Dm-io provides synchronous and asynchronous I/O services. There are three +types of I/O services available, and each type has a sync and an async +version. + +The user must set up an io_region structure to describe the desired location +of the I/O. Each io_region indicates a block-device along with the starting +sector and size of the region. + + struct io_region { + struct block_device *bdev; + sector_t sector; + sector_t count; + }; + +Dm-io can read from one io_region or write to one or more io_regions. Writes +to multiple regions are specified by an array of io_region structures. + +The first I/O service type takes a list of memory pages as the data buffer for +the I/O, along with an offset into the first page. + + struct page_list { + struct page_list *next; + struct page *page; + }; + + int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, + struct page_list *pl, unsigned int offset, + unsigned long *error_bits); + int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, + struct page_list *pl, unsigned int offset, + io_notify_fn fn, void *context); + +The second I/O service type takes an array of bio vectors as the data buffer +for the I/O. This service can be handy if the caller has a pre-assembled bio, +but wants to direct different portions of the bio to different devices. + + int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, + int rw, struct bio_vec *bvec, + unsigned long *error_bits); + int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, + int rw, struct bio_vec *bvec, + io_notify_fn fn, void *context); + +The third I/O service type takes a pointer to a vmalloc'd memory buffer as the +data buffer for the I/O. This service can be handy if the caller needs to do +I/O to a large region but doesn't want to allocate a large number of individual +memory pages. + + int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, + void *data, unsigned long *error_bits); + int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, + void *data, io_notify_fn fn, void *context); + +Callers of the asynchronous I/O services must include the name of a completion +callback routine and a pointer to some context data for the I/O. + + typedef void (*io_notify_fn)(unsigned long error, void *context); + +The "error" parameter in this callback, as well as the "*error" parameter in +all of the synchronous versions, is a bitset (instead of a simple error value). +In the case of an write-I/O to multiple regions, this bitset allows dm-io to +indicate success or failure on each individual region. + +Before using any of the dm-io services, the user should call dm_io_get() +and specify the number of pages they expect to perform I/O on concurrently. +Dm-io will attempt to resize its mempool to make sure enough pages are +always available in order to avoid unnecessary waiting while performing I/O. + +When the user is finished using the dm-io services, they should call +dm_io_put() and specify the same number of pages that were given on the +dm_io_get() call. + diff --git a/Documentation/device-mapper/kcopyd.txt b/Documentation/device-mapper/kcopyd.txt new file mode 100644 index 000000000..820382c4c --- /dev/null +++ b/Documentation/device-mapper/kcopyd.txt @@ -0,0 +1,47 @@ +kcopyd +====== + +Kcopyd provides the ability to copy a range of sectors from one block-device +to one or more other block-devices, with an asynchronous completion +notification. It is used by dm-snapshot and dm-mirror. + +Users of kcopyd must first create a client and indicate how many memory pages +to set aside for their copy jobs. This is done with a call to +kcopyd_client_create(). + + int kcopyd_client_create(unsigned int num_pages, + struct kcopyd_client **result); + +To start a copy job, the user must set up io_region structures to describe +the source and destinations of the copy. Each io_region indicates a +block-device along with the starting sector and size of the region. The source +of the copy is given as one io_region structure, and the destinations of the +copy are given as an array of io_region structures. + + struct io_region { + struct block_device *bdev; + sector_t sector; + sector_t count; + }; + +To start the copy, the user calls kcopyd_copy(), passing in the client +pointer, pointers to the source and destination io_regions, the name of a +completion callback routine, and a pointer to some context data for the copy. + + int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, + unsigned int num_dests, struct io_region *dests, + unsigned int flags, kcopyd_notify_fn fn, void *context); + + typedef void (*kcopyd_notify_fn)(int read_err, unsigned int write_err, + void *context); + +When the copy completes, kcopyd will call the user's completion routine, +passing back the user's context pointer. It will also indicate if a read or +write error occurred during the copy. + +When a user is done with all their copy jobs, they should call +kcopyd_client_destroy() to delete the kcopyd client, which will release the +associated memory pages. + + void kcopyd_client_destroy(struct kcopyd_client *kc); + diff --git a/Documentation/device-mapper/linear.txt b/Documentation/device-mapper/linear.txt new file mode 100644 index 000000000..d5307d380 --- /dev/null +++ b/Documentation/device-mapper/linear.txt @@ -0,0 +1,61 @@ +dm-linear +========= + +Device-Mapper's "linear" target maps a linear range of the Device-Mapper +device onto a linear range of another device. This is the basic building +block of logical volume managers. + +Parameters: + : Full pathname to the underlying block-device, or a + "major:minor" device-number. + : Starting sector within the device. + + +Example scripts +=============== +[[ +#!/bin/sh +# Create an identity mapping for a device +echo "0 `blockdev --getsize $1` linear $1 0" | dmsetup create identity +]] + + +[[ +#!/bin/sh +# Join 2 devices together +size1=`blockdev --getsize $1` +size2=`blockdev --getsize $2` +echo "0 $size1 linear $1 0 +$size1 $size2 linear $2 0" | dmsetup create joined +]] + + +[[ +#!/usr/bin/perl -w +# Split a device into 4M chunks and then join them together in reverse order. + +my $name = "reverse"; +my $extent_size = 4 * 1024 * 2; +my $dev = $ARGV[0]; +my $table = ""; +my $count = 0; + +if (!defined($dev)) { + die("Please specify a device.\n"); +} + +my $dev_size = `blockdev --getsize $dev`; +my $extents = int($dev_size / $extent_size) - + (($dev_size % $extent_size) ? 1 : 0); + +while ($extents > 0) { + my $this_start = $count * $extent_size; + $extents--; + $count++; + my $this_offset = $extents * $extent_size; + + $table .= "$this_start $extent_size linear $dev $this_offset\n"; +} + +`echo \"$table\" | dmsetup create $name`; +]] diff --git a/Documentation/device-mapper/striped.txt b/Documentation/device-mapper/striped.txt new file mode 100644 index 000000000..f34d3236b --- /dev/null +++ b/Documentation/device-mapper/striped.txt @@ -0,0 +1,58 @@ +dm-stripe +========= + +Device-Mapper's "striped" target is used to create a striped (i.e. RAID-0) +device across one or more underlying devices. Data is written in "chunks", +with consecutive chunks rotating among the underlying devices. This can +potentially provide improved I/O throughput by utilizing several physical +devices in parallel. + +Parameters: [ ]+ + : Number of underlying devices. + : Size of each chunk of data. Must be a power-of-2 and at + least as large as the system's PAGE_SIZE. + : Full pathname to the underlying block-device, or a + "major:minor" device-number. + : Starting sector within the device. + +One or more underlying devices can be specified. The striped device size must +be a multiple of the chunk size and a multiple of the number of underlying +devices. + + +Example scripts +=============== + +[[ +#!/usr/bin/perl -w +# Create a striped device across any number of underlying devices. The device +# will be called "stripe_dev" and have a chunk-size of 128k. + +my $chunk_size = 128 * 2; +my $dev_name = "stripe_dev"; +my $num_devs = @ARGV; +my @devs = @ARGV; +my ($min_dev_size, $stripe_dev_size, $i); + +if (!$num_devs) { + die("Specify at least one device\n"); +} + +$min_dev_size = `blockdev --getsize $devs[0]`; +for ($i = 1; $i < $num_devs; $i++) { + my $this_size = `blockdev --getsize $devs[$i]`; + $min_dev_size = ($min_dev_size < $this_size) ? + $min_dev_size : $this_size; +} + +$stripe_dev_size = $min_dev_size * $num_devs; +$stripe_dev_size -= $stripe_dev_size % ($chunk_size * $num_devs); + +$table = "0 $stripe_dev_size striped $num_devs $chunk_size"; +for ($i = 0; $i < $num_devs; $i++) { + $table .= " $devs[$i] 0"; +} + +`echo $table | dmsetup create $dev_name`; +]] + diff --git a/Documentation/device-mapper/zero.txt b/Documentation/device-mapper/zero.txt new file mode 100644 index 000000000..20fb38e7f --- /dev/null +++ b/Documentation/device-mapper/zero.txt @@ -0,0 +1,37 @@ +dm-zero +======= + +Device-Mapper's "zero" target provides a block-device that always returns +zero'd data on reads and silently drops writes. This is similar behavior to +/dev/zero, but as a block-device instead of a character-device. + +Dm-zero has no target-specific parameters. + +One very interesting use of dm-zero is for creating "sparse" devices in +conjunction with dm-snapshot. A sparse device reports a device-size larger +than the amount of actual storage space available for that device. A user can +write data anywhere within the sparse device and read it back like a normal +device. Reads to previously unwritten areas will return a zero'd buffer. When +enough data has been written to fill up the actual storage space, the sparse +device is deactivated. This can be very useful for testing device and +filesystem limitations. + +To create a sparse device, start by creating a dm-zero device that's the +desired size of the sparse device. For this example, we'll assume a 10TB +sparse device. + +TEN_TERABYTES=`expr 10 \* 1024 \* 1024 \* 1024 \* 2` # 10 TB in sectors +echo "0 $TEN_TERABYTES zero" | dmsetup create zero1 + +Then create a snapshot of the zero device, using any available block-device as +the COW device. The size of the COW device will determine the amount of real +space available to the sparse device. For this example, we'll assume /dev/sdb1 +is an available 10GB partition. + +echo "0 $TEN_TERABYTES snapshot /dev/mapper/zero1 /dev/sdb1 p 128" | \ + dmsetup create sparse1 + +This will create a 10TB sparse device called /dev/mapper/sparse1 that has +10GB of actual storage space available. If more than 10GB of data is written +to this device, it will start returning I/O errors. + diff --git a/Documentation/fb/sisfb.txt b/Documentation/fb/sisfb.txt new file mode 100644 index 000000000..3b50c517a --- /dev/null +++ b/Documentation/fb/sisfb.txt @@ -0,0 +1,158 @@ + +What is sisfb? +============== + +sisfb is a framebuffer device driver for SiS (Silicon Integrated Systems) +graphics chips. Supported are: + +- SiS 300 series: SiS 300/305, 540, 630(S), 730(S) +- SiS 315 series: SiS 315/H/PRO, 55x, (M)65x, 740, (M)661(F/M)X, (M)741(GX) +- SiS 330 series: SiS 330 ("Xabre"), (M)760 + + +Why do I need a framebuffer driver? +=================================== + +sisfb is eg. useful if you want a high-resolution text console. Besides that, +sisfb is required to run DirectFB (which comes with an additional, dedicated +driver for the 315 series). + +On the 300 series, sisfb on kernels older than 2.6.3 furthermore plays an +important role in connection with DRM/DRI: Sisfb manages the memory heap +used by DRM/DRI for 3D texture and other data. This memory management is +required for using DRI/DRM. + +Kernels >= around 2.6.3 do not need sisfb any longer for DRI/DRM memory +management. The SiS DRM driver has been updated and features a memory manager +of its own (which will be used if sisfb is not compiled). So unless you want +a graphical console, you don't need sisfb on kernels >=2.6.3. + +Sidenote: Since this seems to be a commonly made mistake: sisfb and vesafb +cannot be active at the same time! Do only select one of them in your kernel +configuration. + + +How are parameters passed to sisfb? +=================================== + +Well, it depends: If compiled statically into the kernel, use lilo's append +statement to add the parameters to the kernel command line. Please see lilo's +(or GRUB's) documentation for more information. If sisfb is a kernel module, +parameters are given with the modprobe (or insmod) command. + +Example for sisfb as part of the static kernel: Add the following line to your +lilo.conf: + + append="video=sisfb:mode:1024x768x16,mem:12288,rate:75" + +Example for sisfb as a module: Start sisfb by typing + + modprobe sisfb mode=1024x768x16 rate=75 mem=12288 + +A common mistake is that folks use a wrong parameter format when using the +driver compiled into the kernel. Please note: If compiled into the kernel, +the parameter format is video=sisfb:mode:none or video=sisfb:mode:1024x768x16 +(or whatever mode you want to use, alternatively using any other format +described above or the vesa keyword instead of mode). If compiled as a module, +the parameter format reads mode=none or mode=1024x768x16 (or whatever mode you +want to use). Using a "=" for a ":" (and vice versa) is a huge difference! +Additionally: If you give more than one argument to the in-kernel sisfb, the +arguments are separated with ",". For example: + + video=sisfb:mode:1024x768x16,rate:75,mem:12288 + + +How do I use it? +================ + +Preface statement: This file only covers very little of the driver's +capabilities and features. Please refer to the author's and maintainer's +website at http://www.winischhofer.net/linuxsisvga.shtml for more +information. Additionally, "modinfo sisfb" gives an overview over all +supported options including some explanation. + +The desired display mode can be specified using the keyword "mode" with +a parameter in one of the follwing formats: + - XxYxDepth or + - XxY-Depth or + - XxY-Depth@Rate or + - XxY + - or simply use the VESA mode number in hexadecimal or decimal. + +For example: 1024x768x16, 1024x768-16@75, 1280x1024-16. If no depth is +specified, it defaults to 8. If no rate is given, it defaults to 60Hz. Depth 32 +means 24bit color depth (but 32 bit framebuffer depth, which is not relevant +to the user). + +Additionally, sisfb understands the keyword "vesa" followed by a VESA mode +number in decimal or hexadecimal. For example: vesa=791 or vesa=0x117. Please +use either "mode" or "vesa" but not both. + +Linux 2.4 only: If no mode is given, sisfb defaults to "no mode" (mode=none) if +compiled as a module; if sisfb is statically compiled into the kernel, it +defaults to 800x600x8 unless CRT2 type is LCD, in which case the LCD's native +resolution is used. If you want to switch to a different mode, use the fbset +shell command. + +Linux 2.6 only: If no mode is given, sisfb defaults to 800x600x8 unless CRT2 +type is LCD, in which case it defaults to the LCD's native resolution. If +you want to switch to another mode, use the stty shell command. + +You should compile in both vgacon (to boot if you remove you SiS card from +your system) and sisfb (for graphics mode). Under Linux 2.6, also "Framebuffer +console support" (fbcon) is needed for a graphical console. + +You should *not* compile-in vesafb. And please do not use the "vga=" keyword +in lilo's or grub's configuration file; mode selection is done using the +"mode" or "vesa" keywords as a parameter. See above and below. + + +X11 +=== + +If using XFree86 or X.org, it is recommended that you don't use the "fbdev" +driver but the dedicated "sis" X driver. The "sis" X driver and sisfb are +developed by the same person (Thomas Winischhofer) and cooperate well with +each other. + + +SVGALib +======= + +SVGALib, if directly accessing the hardware, never restores the screen +correctly, especially on laptops or if the output devices are LCD or TV. +Therefore, use the chipset "FBDEV" in SVGALib configuration. This will make +SVGALib use the framebuffer device for mode switches and restoration. + + +Configuration +============= + +(Some) accepted options: + +off - Disable sisfb. This option is only understood if sisfb is + in-kernel, not a module. +mem:X - size of memory for the console, rest will be used for DRI/DRM. X + is in kilobytes. On 300 series, the default is 4096, 8192 or + 16384 (each in kilobyte) depending on how much video ram the card + has. On 315/330 series, the default is the maximum available ram + (since DRI/DRM is not supported for these chipsets). +noaccel - do not use 2D acceleration engine. (Default: use acceleration) +noypan - disable y-panning and scroll by redrawing the entire screen. + This is much slower than y-panning. (Default: use y-panning) +vesa:X - selects startup videomode. X is number from 0 to 0x1FF and + represents the VESA mode number (can be given in decimal or + hexadecimal form, the latter prefixed with "0x"). +mode:X - selects startup videomode. Please see above for the format of + "X". + +Boolean options such as "noaccel" or "noypan" are to be given without a +parameter if sisfb is in-kernel (for example "video=sisfb:noypan). If +sisfb is a module, these are to be set to 1 (for example "modprobe sisfb +noypan=1"). + +-- +Thomas Winischhofer +May 27, 2004 + + diff --git a/Documentation/filesystems/automount-support.txt b/Documentation/filesystems/automount-support.txt new file mode 100644 index 000000000..58c65a171 --- /dev/null +++ b/Documentation/filesystems/automount-support.txt @@ -0,0 +1,118 @@ +Support is available for filesystems that wish to do automounting support (such +as kAFS which can be found in fs/afs/). This facility includes allowing +in-kernel mounts to be performed and mountpoint degradation to be +requested. The latter can also be requested by userspace. + + +====================== +IN-KERNEL AUTOMOUNTING +====================== + +A filesystem can now mount another filesystem on one of its directories by the +following procedure: + + (1) Give the directory a follow_link() operation. + + When the directory is accessed, the follow_link op will be called, and + it will be provided with the location of the mountpoint in the nameidata + structure (vfsmount and dentry). + + (2) Have the follow_link() op do the following steps: + + (a) Call do_kern_mount() to call the appropriate filesystem to set up a + superblock and gain a vfsmount structure representing it. + + (b) Copy the nameidata provided as an argument and substitute the dentry + argument into it the copy. + + (c) Call do_add_mount() to install the new vfsmount into the namespace's + mountpoint tree, thus making it accessible to userspace. Use the + nameidata set up in (b) as the destination. + + If the mountpoint will be automatically expired, then do_add_mount() + should also be given the location of an expiration list (see further + down). + + (d) Release the path in the nameidata argument and substitute in the new + vfsmount and its root dentry. The ref counts on these will need + incrementing. + +Then from userspace, you can just do something like: + + [root@andromeda root]# mount -t afs \#root.afs. /afs + [root@andromeda root]# ls /afs + asd cambridge cambridge.redhat.com grand.central.org + [root@andromeda root]# ls /afs/cambridge + afsdoc + [root@andromeda root]# ls /afs/cambridge/afsdoc/ + ChangeLog html LICENSE pdf RELNOTES-1.2.2 + +And then if you look in the mountpoint catalogue, you'll see something like: + + [root@andromeda root]# cat /proc/mounts + ... + #root.afs. /afs afs rw 0 0 + #root.cell. /afs/cambridge.redhat.com afs rw 0 0 + #afsdoc. /afs/cambridge.redhat.com/afsdoc afs rw 0 0 + + +=========================== +AUTOMATIC MOUNTPOINT EXPIRY +=========================== + +Automatic expiration of mountpoints is easy, provided you've mounted the +mountpoint to be expired in the automounting procedure outlined above. + +To do expiration, you need to follow these steps: + + (3) Create at least one list off which the vfsmounts to be expired can be + hung. Access to this list will be governed by the vfsmount_lock. + + (4) In step (2c) above, the call to do_add_mount() should be provided with a + pointer to this list. It will hang the vfsmount off of it if it succeeds. + + (5) When you want mountpoints to be expired, call mark_mounts_for_expiry() + with a pointer to this list. This will process the list, marking every + vfsmount thereon for potential expiry on the next call. + + If a vfsmount was already flagged for expiry, and if its usage count is 1 + (it's only referenced by its parent vfsmount), then it will be deleted + from the namespace and thrown away (effectively unmounted). + + It may prove simplest to simply call this at regular intervals, using + some sort of timed event to drive it. + +The expiration flag is cleared by calls to mntput. This means that expiration +will only happen on the second expiration request after the last time the +mountpoint was accessed. + +If a mountpoint is moved, it gets removed from the expiration list. If a bind +mount is made on an expirable mount, the new vfsmount will not be on the +expiration list and will not expire. + +If a namespace is copied, all mountpoints contained therein will be copied, +and the copies of those that are on an expiration list will be added to the +same expiration list. + + +======================= +USERSPACE DRIVEN EXPIRY +======================= + +As an alternative, it is possible for userspace to request expiry of any +mountpoint (though some will be rejected - the current process's idea of the +rootfs for example). It does this by passing the MNT_EXPIRE flag to +umount(). This flag is considered incompatible with MNT_FORCE and MNT_DETACH. + +If the mountpoint in question is in referenced by something other than +umount() or its parent mountpoint, an EBUSY error will be returned and the +mountpoint will not be marked for expiration or unmounted. + +If the mountpoint was not already marked for expiry at that time, an EAGAIN +error will be given and it won't be unmounted. + +Otherwise if it was already marked and it wasn't referenced, unmounting will +take place as usual. + +Again, the expiration flag is cleared every time anything other than umount() +looks at a mountpoint. diff --git a/Documentation/hpet.txt b/Documentation/hpet.txt new file mode 100644 index 000000000..584ebc277 --- /dev/null +++ b/Documentation/hpet.txt @@ -0,0 +1,298 @@ + High Precision Event Timer Driver for Linux + +The High Precision Event Timer (HPET) hardware is the future replacement for the 8254 and Real +Time Clock (RTC) periodic timer functionality. Each HPET can have up two 32 timers. It is possible +to configure the first two timers as legacy replacements for 8254 and RTC periodic. A specification +done by INTEL and Microsoft can be found at http://www.intel.com/labs/platcomp/hpet/hpetspec.htm. + +The driver supports detection of HPET driver allocation and initialization of the HPET before the +driver module_init routine is called. This enables platform code which uses timer 0 or 1 as the +main timer to intercept HPET initialization. An example of this initialization can be found in +arch/i386/kernel/time_hpet.c. + +The driver provides two APIs which are very similar to the API found in the rtc.c driver. +There is a user space API and a kernel space API. An example user space program is provided +below. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +extern void hpet_open_close(int, const char **); +extern void hpet_info(int, const char **); +extern void hpet_poll(int, const char **); +extern void hpet_fasync(int, const char **); +extern void hpet_read(int, const char **); + +#include +#include +#include + +struct hpet_command { + char *command; + void (*func)(int argc, const char ** argv); +} hpet_command[] = { + { + "open-close", + hpet_open_close + }, + { + "info", + hpet_info + }, + { + "poll", + hpet_poll + }, + { + "fasync", + hpet_fasync + }, +}; + +int +main(int argc, const char ** argv) +{ + int i; + + argc--; + argv++; + + if (!argc) { + fprintf(stderr, "-hpet: requires command\n"); + return -1; + } + + + for (i = 0; i < (sizeof (hpet_command) / sizeof (hpet_command[0])); i++) + if (!strcmp(argv[0], hpet_command[i].command)) { + argc--; + argv++; + fprintf(stderr, "-hpet: executing %s\n", + hpet_command[i].command); + hpet_command[i].func(argc, argv); + return 0; + } + + fprintf(stderr, "do_hpet: command %s not implemented\n", argv[0]); + + return -1; +} + +void +hpet_open_close(int argc, const char **argv) +{ + int fd; + + if (argc != 1) { + fprintf(stderr, "hpet_open_close: device-name\n"); + return; + } + + fd = open(argv[0], O_RDWR); + if (fd < 0) + fprintf(stderr, "hpet_open_close: open failed\n"); + else + close(fd); + + return; +} + +void +hpet_info(int argc, const char **argv) +{ +} + +void +hpet_poll(int argc, const char **argv) +{ + unsigned long freq; + int iterations, i, fd; + struct pollfd pfd; + struct hpet_info info; + struct timeval stv, etv; + struct timezone tz; + long usec; + + if (argc != 3) { + fprintf(stderr, "hpet_poll: device-name freq iterations\n"); + return; + } + + freq = atoi(argv[1]); + iterations = atoi(argv[2]); + + fd = open(argv[0], O_RDWR); + + if (fd < 0) { + fprintf(stderr, "hpet_poll: open of %s failed\n", argv[0]); + return; + } + + if (ioctl(fd, HPET_IRQFREQ, freq) < 0) { + fprintf(stderr, "hpet_poll: HPET_IRQFREQ failed\n"); + goto out; + } + + if (ioctl(fd, HPET_INFO, &info) < 0) { + fprintf(stderr, "hpet_poll: failed to get info\n"); + goto out; + } + + fprintf(stderr, "hpet_poll: info.hi_flags 0x%lx\n", info.hi_flags); + + if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) { + fprintf(stderr, "hpet_poll: HPET_EPI failed\n"); + goto out; + } + + if (ioctl(fd, HPET_IE_ON, 0) < 0) { + fprintf(stderr, "hpet_poll, HPET_IE_ON failed\n"); + goto out; + } + + pfd.fd = fd; + pfd.events = POLLIN; + + for (i = 0; i < iterations; i++) { + pfd.revents = 0; + gettimeofday(&stv, &tz); + if (poll(&pfd, 1, -1) < 0) + fprintf(stderr, "hpet_poll: poll failed\n"); + else { + long data; + + gettimeofday(&etv, &tz); + usec = stv.tv_sec * 1000000 + stv.tv_usec; + usec = (etv.tv_sec * 1000000 + etv.tv_usec) - usec; + + fprintf(stderr, + "hpet_poll: expired time = 0x%lx\n", usec); + + fprintf(stderr, "hpet_poll: revents = 0x%x\n", + pfd.revents); + + if (read(fd, &data, sizeof(data)) != sizeof(data)) { + fprintf(stderr, "hpet_poll: read failed\n"); + } + else + fprintf(stderr, "hpet_poll: data 0x%lx\n", + data); + } + } + +out: + close(fd); + return; +} + +static int hpet_sigio_count; + +static void +hpet_sigio(int val) +{ + fprintf(stderr, "hpet_sigio: called\n"); + hpet_sigio_count++; +} + +void +hpet_fasync(int argc, const char **argv) +{ + unsigned long freq; + int iterations, i, fd, value; + sig_t oldsig; + struct hpet_info info; + + hpet_sigio_count = 0; + fd = -1; + + if ((oldsig = signal(SIGIO, hpet_sigio)) == SIG_ERR) { + fprintf(stderr, "hpet_fasync: failed to set signal handler\n"); + return; + } + + if (argc != 3) { + fprintf(stderr, "hpet_fasync: device-name freq iterations\n"); + goto out; + } + + fd = open(argv[0], O_RDWR); + + if (fd < 0) { + fprintf(stderr, "hpet_fasync: failed to open %s\n", argv[0]); + return; + } + + + if ((fcntl(fd, F_SETOWN, getpid()) == 1) || + ((value = fcntl(fd, F_GETFL)) == 1) || + (fcntl(fd, F_SETFL, value | O_ASYNC) == 1)) { + fprintf(stderr, "hpet_fasync: fcntl failed\n"); + goto out; + } + + freq = atoi(argv[1]); + iterations = atoi(argv[2]); + + if (ioctl(fd, HPET_IRQFREQ, freq) < 0) { + fprintf(stderr, "hpet_fasync: HPET_IRQFREQ failed\n"); + goto out; + } + + if (ioctl(fd, HPET_INFO, &info) < 0) { + fprintf(stderr, "hpet_fasync: failed to get info\n"); + goto out; + } + + fprintf(stderr, "hpet_fasync: info.hi_flags 0x%lx\n", info.hi_flags); + + if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) { + fprintf(stderr, "hpet_fasync: HPET_EPI failed\n"); + goto out; + } + + if (ioctl(fd, HPET_IE_ON, 0) < 0) { + fprintf(stderr, "hpet_fasync, HPET_IE_ON failed\n"); + goto out; + } + + for (i = 0; i < iterations; i++) { + (void) pause(); + fprintf(stderr, "hpet_fasync: count = %d\n", hpet_sigio_count); + } + +out: + signal(SIGIO, oldsig); + + if (fd >= 0) + close(fd); + + return; +} + +The kernel API has three interfaces exported from the driver: + + hpet_register(struct hpet_task *tp, int periodic) + hpet_unregister(struct hpet_task *tp) + hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg) + +The kernel module using this interface fills in the ht_func and ht_data members of the +hpet_task structure before calling hpet_register. hpet_control simply vectors to the hpet_ioctl +routine and has the same commands and respective arguments as the user API. hpet_unregister +is used to terminate usage of the HPET timer reserved by hpet_register. + + diff --git a/Documentation/i2c/i2c-parport b/Documentation/i2c/i2c-parport new file mode 100644 index 000000000..d359461ce --- /dev/null +++ b/Documentation/i2c/i2c-parport @@ -0,0 +1,156 @@ +================== +i2c-parport driver +================== + +2004-07-06, Jean Delvare + +This is a unified driver for several i2c-over-parallel-port adapters, +such as the ones made by Philips, Velleman or ELV. This driver is +meant as a replacement for the older, individual drivers: + * i2c-philips-par + * i2c-elv + * i2c-velleman + * video/i2c-parport (NOT the same as this one, dedicated to home brew + teletext adapters) + +It currently supports the following devices: + * Philips adapter + * home brew teletext adapter + * Velleman K8000 adapter + * ELV adapter + * Analog Devices evaluation boards (ADM1025, ADM1030, ADM1031, ADM1032) + +These devices use different pinout configurations, so you have to tell +the driver what you have, using the type module parameter. There is no +way to autodetect the devices. Support for different pinout configurations +can be easily added when needed. + + +Building your own adapter +------------------------- + +If you want to build you own i2c-over-parallel-port adapter, here is +a sample electronics schema (credits go to Sylvain Munaut): + +Device PC +Side ___________________Vdd (+) Side + | | | + --- --- --- + | | | | | | + |R| |R| |R| + | | | | | | + --- --- --- + | | | + | | /| | +SCL ----------x--------o |-----------x------------------- pin 2 + | \| | | + | | | + | |\ | | +SDA ----------x----x---| o---x--------------------------- pin 13 + | |/ | + | | + | /| | + ---------o |----------------x-------------- pin 3 + \| | | + | | + --- --- + | | | | + |R| |R| + | | | | + --- --- + | | + ### ### + GND GND + +Remarks: + - This is the exact pinout and electronics used on the Analog Devices + evaluation boards. + /| + - All inverters -o |- must be 74HC05, they must be open collector output. + \| + - All resitors are 10k. + - Pins 18-25 of the parallel port connected to GND. + - Pins 4-9 (D2-D7) could be used as VDD is the driver drives them high. + The ADM1032 evaluation board uses D4-D7. Beware that the amount of + current you can draw from the parallel port is limited. Also note that + all connected lines MUST BE driven at the same state, else you'll short + circuit the output buffers! So plugging the I2C adapter after loading + the i2c-parport module might be a good safety since data line state + prior to init may be unknown. + - This is 5V! + - Obviously you cannot read SCL (so it's not really standard-compliant). + Pretty easy to add, just copy the SDA part and use another input pin. + That would give (ELV compatible pinout): + + +Device PC +Side ______________________________Vdd (+) Side + | | | | + --- --- --- --- + | | | | | | | | + |R| |R| |R| |R| + | | | | | | | | + --- --- --- --- + | | | | + | | |\ | | +SCL ----------x--------x--| o---x------------------------ pin 15 + | | |/ | + | | | + | | /| | + | ---o |-------------x-------------- pin 2 + | \| | | + | | | + | | | + | |\ | | +SDA ---------------x---x--| o--------x------------------- pin 10 + | |/ | + | | + | /| | + ---o |------------------x--------- pin 3 + \| | | + | | + --- --- + | | | | + |R| |R| + | | | | + --- --- + | | + ### ### + GND GND + + +If possible, you should use the same pinout configuration as existing +adapters do, so you won't even have to change the code. + + +Similar (but different) drivers +------------------------------- + +This driver is NOT the same as the i2c-pport driver found in the i2c package. +The i2c-pport driver makes use of modern parallel port features so that +you don't need additional electronics. It has other restrictions however, and +was not ported to Linux 2.6 (yet). + +This driver is also NOT the same as the i2c-pcf-epp driver found in the +lm_sensors package. The i2c-pcf-epp driver doesn't use the parallel port +as an I2C bus directly. Instead, it uses it to control an external I2C bus +master. That driver was not ported to Linux 2.6 (yet) either. + + +Legacy documentation for Velleman adapter +----------------------------------------- + +Useful links: +Velleman http://www.velleman.be/ +Velleman K8000 Howto http://howto.htlw16.ac.at/k8000-howto.html + +The project has lead to new libs for the Velleman K8000 and K8005: + LIBK8000 v1.99.1 and LIBK8005 v0.21 +With these libs, you can control the K8000 interface card and the K8005 +stepper motor card with the simple commands which are in the original +Velleman software, like SetIOchannel, ReadADchannel, SendStepCCWFull and +many more, using /dev/velleman. + http://home.wanadoo.nl/hihihi/libk8000.htm + http://home.wanadoo.nl/hihihi/libk8005.htm + http://struyve.mine.nu:8080/index.php?block=k8000 + http://sourceforge.net/projects/libk8005/ diff --git a/Documentation/powerpc/hvcs.txt b/Documentation/powerpc/hvcs.txt new file mode 100644 index 000000000..111ad15a3 --- /dev/null +++ b/Documentation/powerpc/hvcs.txt @@ -0,0 +1,534 @@ +=========================================================================== + HVCS + IBM "Hypervisor Virtual Console Server" Installation Guide + for Linux Kernel 2.6.4+ + Copyright (C) 2004 IBM Corporation + +=========================================================================== +NOTE:Eight space tabs are the optimum editor setting for reading this file. +=========================================================================== + + Author(s) : Ryan S. Arnold + Date Created: March, 02, 2004 + Last Changed: July, 07, 2004 + +--------------------------------------------------------------------------- +Table of contents: + + 1. Driver Introduction: + 2. System Requirements + 3. Build Options: + 3.1 Built-in: + 3.2 Module: + 4. Installation: + 5. Connection: + 6. Disconnection: + 7. Configuration: + 8. Questions & Answers: + 9. Reporting Bugs: + +--------------------------------------------------------------------------- +1. Driver Introduction: + +This is the device driver for the IBM Hypervisor Virtual Console Server, +"hvcs". The IBM hvcs provides a tty driver interface to allow Linux user +space applications access to the system consoles of logically partitioned +operating systems (Linux and AIX) running on the same partitioned Power5 +ppc64 system. Physical hardware consoles per partition are not practical +on this hardware so system consoles are accessed by this driver using +firmware interfaces to virtual terminal devices. + +--------------------------------------------------------------------------- +2. System Requirements: + +This device driver was written using 2.6.4 Linux kernel APIs and will only +build and run on kernels of this version or later. + +This driver was written to operate solely on IBM Power5 ppc64 hardware +though some care was taken to abstract the architecture dependent firmware +calls from the driver code. + +Sysfs must be mounted on the system so that the user can determine which +major and minor numbers are associated with each vty-server. Directions +for sysfs mounting are outside the scope of this document. + +--------------------------------------------------------------------------- +3. Build Options: + +The hvcs driver registers itself as a tty driver. The tty layer +dynamically allocates a block of major and minor numbers in a quantity +requested by the registering driver. The hvcs driver asks the tty layer +for 64 of these major/minor numbers by default to use for hvcs device node +entries. + +If the default number of device entries is adequate then this driver can be +built into the kernel. If not, the default can be over-ridden by inserting +the driver as a module with insmod parameters. + +--------------------------------------------------------------------------- +3.1 Built-in: + +The following menuconfig example demonstrates selecting to build this +driver into the kernel. + + Device Drivers ---> + Character devices ---> + <*> IBM Hypervisor Virtual Console Server Support + +Begin the kernel make process. + +--------------------------------------------------------------------------- +3.2 Module: + +The following menuconfig example demonstrates selecting to build this +driver as a kernel module. + + Device Drivers ---> + Character devices ---> + IBM Hypervisor Virtual Console Server Support + +The make process will build the following kernel modules: + + hvcs.ko + hvcserver.ko + +To insert the module with the default allocation execute the following +commands in the order they appear: + + insmod hvcserver.ko + insmod hvcs.ko + +The hvcserver module contains architecture specific firmware calls and must +be inserted first, otherwise the hvcs module will not find some of the +symbols it expects. + +To override the default use an insmod parameter as follows (requesting 4 +tty devices as an example): + + insmod hvcs.ko hvcs_parm_num_devs=4 + +There is a maximum number of dev entries that can be specified on insmod. +We think that 1024 is currently a decent maximum number of server adapters +to allow. This can always be changed by modifying the constant in the +source file before building. + +NOTE: The length of time it takes to insmod the driver seems to be related +to the number of tty interfaces the registering driver requests. + +In order to remove the driver module execute the following command: + + rmmod hvcs.ko + +The recommended method for installing hvcs as a module is to use depmod to +build a current modules.dep file in /lib/modules/`uname -r` and then +execute: + +modprobe hvcs hvcs_parm_num_devs=4 + +The modules.dep file indicates that hvcserver.ko needs to be inserted +before hvcs.ko and modprobe uses this file to smartly insert the modules in +the proper order. + +The following modprobe command is used to remove hvcs and hvcserver in the +proper order: + +modprobe -r hvcs + +--------------------------------------------------------------------------- +4. Installation: + +The tty layer creates sysfs entries which contain the major and minor +numbers allocated for the hvcs driver. The following snippet of "tree" +output of the sysfs directory shows where these numbers are presented: + + sys/ + |-- *other sysfs base dirs* + | + |-- class + | |-- *other classes of devices* + | | + | `-- tty + | |-- *other tty devices* + | | + | |-- hvcs0 + | | `-- dev + | |-- hvcs1 + | | `-- dev + | |-- hvcs2 + | | `-- dev + | |-- hvcs3 + | | `-- dev + | | + | |-- *other tty devices* + | + |-- *other sysfs base dirs* + +For the above examples the following output is a result of cat'ing the +"dev" entry in the hvcs directory: + + Pow5:/sys/class/tty/hvcs0/ # cat dev + 254:0 + + Pow5:/sys/class/tty/hvcs1/ # cat dev + 254:1 + + Pow5:/sys/class/tty/hvcs2/ # cat dev + 254:2 + + Pow5:/sys/class/tty/hvcs3/ # cat dev + 254:3 + +The output from reading the "dev" attribute is the char device major and +minor numbers that the tty layer has allocated for this driver's use. Most +systems running hvcs will already have the device entries created or udev +will do it automatically. + +Given the example output above, to manually create a /dev/hvcs* node entry +mknod can be used as follows: + + mknod /dev/hvcs0 c 254 0 + mknod /dev/hvcs1 c 254 1 + mknod /dev/hvcs2 c 254 2 + mknod /dev/hvcs3 c 254 3 + +Using mknod to manually create the device entries makes these device nodes +persistent. Once created they will exist prior to the driver insmod. + +Attempting to connect an application to /dev/hvcs* prior to insertion of +the hvcs module will result in an error message similar to the following: + + "/dev/hvcs*: No such device". + +NOTE: Just because there is a device node present doesn't mean that there +is a vty-server device configured for that node. + +--------------------------------------------------------------------------- +5. Connection + +Since this driver controls devices that provide a tty interface a user can +interact with the device node entries using any standard tty-interactive +method (e.g. "cat", "dd", "echo"). The intent of this driver however, is +to provide real time console interaction with a Linux partition's console, +which requires the use of applications that provide bi-directional, +interactive I/O with a tty device. + +Applications (e.g. "minicom" and "screen") that act as terminal emulators +or perform terminal type control sequence conversion on the data being +passed through them are NOT acceptable for providing interactive console +I/O. These programs often emulate antiquated terminal types (vt100 and +ANSI) and expect inbound data to take the form of one of these supported +terminal types but they either do not convert, or do not _adequately_ +convert, outbound data into the terminal type of the terminal which invoked +them (though screen makes an attempt and can apparently be configured with +much termcap wrestling.) + +For this reason kermit and cu are two of the recommended applications for +interacting with a Linux console via an hvcs device. These programs simply +act as a conduit for data transfer to and from the tty device. They do not +require inbound data to take the form of a particular terminal type, nor do +they cook outbound data to a particular terminal type. + +In order to ensure proper functioning of console applications one must make +sure that once connected to a /dev/hvcs console that the console's $TERM +env variable is set to the exact terminal type of the terminal emulator +used to launch the interactive I/O application. If one is using xterm and +kermit to connect to /dev/hvcs0 when the console prompt becomes available +one should "export TERM=xterm" on the console. This tells ncurses +applications that are invoked from the console that they should output +control sequences that xterm can understand. + +As a precautionary measure an hvcs user should always "exit" from their +session before disconnecting an application such as kermit from the device +node. If this is not done, the next user to connect to the console will +continue using the previous user's logged in session which includes +using the $TERM variable that the previous user supplied. + +--------------------------------------------------------------------------- +6. Disconnection + +As a security feature to prevent the delivery of stale data to an +unintended target the Power5 system firmware disables the fetching of data +and discards that data when a connection between a vty-server and a vty has +been severed. As an example, when a vty-server is immediately disconnected +from a vty following output of data to the vty the vty adapter may not have +enough time between when it received the data interrupt and when the +connection was severed to fetch the data from firmware before the fetch is +disabled by firmware. + +When hvcs is being used to serve consoles this behavior is not a huge issue +because the adapter stays connected for large amounts of time following +almost all data writes. When hvcs is being used as a tty conduit to tunnel +data between two partitions [see Q & A below] this is a huge problem +because the standard Linux behavior when cat'ing or dd'ing data to a device +is to open the tty, send the data, and then close the tty. If this driver +manually terminated vty-server connections on tty close this would close +the vty-server and vty connection before the target vty has had a chance to +fetch the data. + +Additionally, disconnecting a vty-server and vty only on module removal or +adapter removal is impractical because other vty-servers in other +partitions may require the usage of the target vty at any time. + +Due to this behavioral restriction disconnection of vty-servers from the +connected vty is a manual procedure using a write to a sysfs attribute +outlined below, on the other hand the initial vty-server connection to a +vty is established automatically by this driver. Manual vty-server +connection is never required. + +In order to terminate the connection between a vty-server and vty the +"vterm_state" sysfs attribute within each vty-server's sysfs entry is used. +Reading this attribute reveals the current connection state of the +vty-server adapter. A zero means that the vty-server is not connected to a +vty. A one indicates that a connection is active. + +Writing a '0' (zero) to the vterm_state attribute will disconnect the VTERM +connection between the vty-server and target vty ONLY if the vterm_state +previously read '1'. The write directive is ignored if the vterm_state +read '0' or if any value other than '0' was written to the vterm_state +attribute. The following example will show the method used for verifying +the vty-server connection status and disconnecting a vty-server connection. + + Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat vterm_state + 1 + + Pow5:/sys/bus/vio/drivers/hvcs/30000004 # echo 0 > vterm_state + + Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat vterm_state + 0 + +All vty-server connections are automatically terminated when the device is +hotplug removed and when the module is removed. + +--------------------------------------------------------------------------- +7. Configuration + +Each vty-server has a sysfs entry in the /sys/devices/vio directory, which +is symlinked in several other sysfs tree directories, notably under the +hvcs driver entry, which looks like the following example: + + Pow5:/sys/bus/vio/drivers/hvcs # ls + . .. 30000003 30000004 rescan + +By design, firmware notifies the hvcs driver of vty-server lifetimes and +partner vty removals but not the addition of partner vtys. Since an HMC +Super Admin can add partner info dynamically we have provided the hvcs +driver sysfs directory with the "rescan" update attribute which will query +firmware and update the partner info for all the vty-servers that this +driver manages. Writing a '1' to the attribute triggers the update. An +explicit example follows: + + Pow5:/sys/bus/vio/drivers/hvcs # echo 1 > rescan + +Reading the attribute will indicate a state of '1' or '0'. A one indicates +that an update is in process. A zero indicates that an update has +completed or was never executed. + +Vty-server entries in this directory are a 32 bit partition unique unit +address that is created by firmware. An example vty-server sysfs entry +looks like the following: + + Pow5:/sys/bus/vio/drivers/hvcs/30000004 # ls + . current_vty devspec partner_clcs vterm_state + .. detach_state name partner_vtys + +Each entry is provided, by default with a "name" attribute. Reading the +"name" attribute will reveal the device type as shown in the following +example: + + Pow5:/sys/bus/vio/drivers/hvcs/30000003 # cat name + vty-server + +Each entry is also provided, by default, with a "devspec" attribute which +reveals the full device specification when read, as shown in the following +example: + + Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat devspec + /vdevice/vty-server@30000004 + +Each vty-server sysfs dir is provided with two read-only attributes that +provide lists of easily parsed partner vty data: "partner_vtys" and +"partner_clcs". + + Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat partner_vtys + 30000000 + 30000001 + 30000002 + 30000000 + 30000000 + + Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat partner_clcs + U5112.428.103048A-V3-C0 + U5112.428.103048A-V3-C2 + U5112.428.103048A-V3-C3 + U5112.428.103048A-V4-C0 + U5112.428.103048A-V5-C0 + +Reading partner_vtys returns a list of partner vtys. Vty unit address +numbering is only per-partition-unique so entries will frequently repeat. + +Reading partner_clcs returns a list of "converged location codes" which are +composed of a system serial number followed by "-V*", where the '*' is the +target partition number, and "-C*", where the '*' is the slot of the +adapter. The first vty partner corresponds to the first clc item, the +second vty partner to the second clc item, etc. + +A vty-server can only be connected to a single vty at a time. The entry, +"current_vty" prints the clc of the currently selected partner vty when +read. + +The current_vty can be changed by writing a valid partner clc to the entry +as in the following example: + + Pow5:/sys/bus/vio/drivers/hvcs/30000004 # echo U5112.428.10304 + 8A-V4-C0 > current_vty + +Changing the current_vty when a vty-server is already connected to a vty +does not affect the current connection. The change takes effect when the +currently open connection is freed. + +Information on the "vterm_state" attribute was covered earlier on the +chapter entitled "disconnection". + +--------------------------------------------------------------------------- +8. Questions & Answers: +=========================================================================== +Q: What are the security concerns involving hvcs? + +A: There are three main security concerns: + + 1. The creator of the /dev/hvcs* nodes has the ability to restrict + the access of the device entries to certain users or groups. It + may be best to create a special hvcs group privilege for providing + access to system consoles. + + 2. To provide network security when grabbing the console it is + suggested that the user connect to the console hosting partition + using a secure method, such as SSH or sit at a hardware console. + + 3. Make sure to exit the user session when done with a console or + the next vty-server connection (which may be from another + partition) will experience the previously logged in session. + +--------------------------------------------------------------------------- +Q: How do I multiplex a console that I grab through hvcs so that other +people can see it: + +A: You can use "screen" to directly connect to the /dev/hvcs* device and +setup a session on your machine with the console group privileges. As +pointed out earlier by default screen doesn't provide the termcap settings +for most terminal emulators to provide adequate character conversion from +term type "screen" to others. This means that curses based programs may +not display properly in screen sessions. + +--------------------------------------------------------------------------- +Q: Why are the colors all messed up? +Q: Why are the control characters acting strange or not working? +Q: Why is the console output all strange and unintelligible? + +A: Please see the preceding section on "Connection" for a discussion of how +applications can affect the display of character control sequences. +Additionally, just because you logged into the console using and xterm +doesn't mean someone else didn't log into the console with the HMC console +(vt320) before you and leave the session logged in. The best thing to do +is to export TERM to the terminal type of your terminal emulator when you +get the console. Additionally make sure to "exit" the console before you +disconnect from the console. This will ensure that the next user gets +their own TERM type set when they login. + +--------------------------------------------------------------------------- +Q: When I try to CONNECT kermit to an hvcs device I get: +"Sorry, can't open connection: /dev/hvcs*"What is happening? + +A: Some other Power5 console mechanism has a connection to the vty and +isn't giving it up. You can try to force disconnect the consoles from the +HMC by right clicking on the partition and then selecting "close terminal". +Otherwise you have to hunt down the people who have console authority. It +is possible that you already have the console open using another kermit +session and just forgot about it. Please review the console options for +Power5 systems to determine the many ways a system console can be held. + +OR + +A: Another user may not have a connectivity method currently attached to a +/dev/hvcs device but the vterm_state may reveal that they still have the +vty-server connection established. They need to free this using the method +outlined in the section on "Disconnection" in order for others to connect +to the target vty. + +OR + +A: The user profile you are using to execute kermit probably doesn't have +permissions to use the /dev/hvcs* device. + +OR + +A: You probably haven't inserted the hvcs.ko module yet but the /dev/hvcs* +entry still exists (on systems without udev). + +OR + +A: There is not a corresponding vty-server device that maps to an existing +/dev/hvcs* entry. + +--------------------------------------------------------------------------- +Q: When I try to CONNECT kermit to an hvcs device I get: +"Sorry, write access to UUCP lockfile directory denied." + +A: The /dev/hvcs* entry you have specified doesn't exist where you said it +does? Maybe you haven't inserted the module (on systems with udev). + +--------------------------------------------------------------------------- +Q: If I already have one Linux partition installed can I use hvcs on said +partition to provide the console for the install of a second Linux +partition? + +A: Yes granted that your are connected to the /dev/hvcs* device using +kermit or cu or some other program that doesn't provide terminal emulation. + +--------------------------------------------------------------------------- +Q: Can I connect to more than one partition's console at a time using this +driver? + +A: Yes. Of course this means that there must be more than one vty-server +configured for this partition and each must point to a disconnected vty. + +--------------------------------------------------------------------------- +Q: Does the hvcs driver support dynamic (hotplug) addition of devices? + +A: Yes, if you have dlpar and hotplug enabled for your system and it has +been built into the kernel the hvcs drivers is configured to dynamically +handle additions of new devices and removals of unused devices. + +--------------------------------------------------------------------------- +Q: Can I use /dev/hvcs* as a conduit to another partition and use a tty +device on that partition as the other end of the pipe? + +A: Yes, on Power5 platforms the hvc_console driver provides a tty interface +for extra /dev/hvc* devices (where /dev/hvc0 is most likely the console). +In order to get a tty conduit working between the two partitions the HMC +Super Admin must create an additional "serial server" for the target +partition with the HMC gui which will show up as /dev/hvc* when the target +partition is rebooted. + +The HMC Super Admin then creates an additional "serial client" for the +current partition and points this at the target partition's newly created +"serial server" adapter (remember the slot). This shows up as an +additional /dev/hvcs* device. + +Now a program on the target system can be configured to read or write to +/dev/hvc* and another program on the current partition can be configured to +read or write to /dev/hvcs*. Now you have a tty conduit between two +partitions. + +--------------------------------------------------------------------------- +9. Reporting Bugs: + +The proper channel for reporting bugs is either through the Linux OS +distribution company that provided your OS or by posting issues to the +ppc64 development mailing list at: + +linuxppc64-dev@lists.linuxppc.org + +This request is to provide a documented and searchable public exchange +of the problems and solutions surrounding this driver for the benefit of +all users. diff --git a/Documentation/powerpc/mpc52xx.txt b/Documentation/powerpc/mpc52xx.txt new file mode 100644 index 000000000..6efe0a0a5 --- /dev/null +++ b/Documentation/powerpc/mpc52xx.txt @@ -0,0 +1,48 @@ +Linux 2.6.x on MPC52xx family +----------------------------- + +For the latest info, go to http://www.246tNt.com/mpc52xx/state.txt + +To compile/use : + + - U-Boot: + # tftpboot 200000 uImage + => tftpboot 400000 pRamdisk + => bootm 200000 400000 + + - DBug: + # dn -i zImage.initrd.lite5200 + + +Some remarks : + - The port is named mpc52xxx, and config options are PPC_MPC52xx. The MGT5100 + is not supported, and I'm not sure anyone is interesting in working on it + so. I didn't took 5xxx because there's apparently a lot of 5xxx that have + nothing to do with the MPC5200. I also included the 'MPC' for the same + reason. + - Of course, I inspired myself from the 2.4 port. If you think I forgot to + mention you/your company in the copyright of some code, I'll correct it + ASAP. + - The codes wants the MBAR to be set at 0xf0000000 by the bootloader. It's + mapped 1:1 with the MMU. If for whatever reason, you want to change this, + beware that some code depends on the 0xf0000000 address and other depends + on the 1:1 mapping. + - Most of the code assumes that port multiplexing, frequency selection, ... + has already been done. IMHO this should be done as early as possible, in + the bootloader. If for whatever reason you can't do it there, do it in the + platform setup code (if U-Boot) or in the arch/ppc/boot/simple/... (if + DBug) diff --git a/Documentation/sound/alsa/Audigy-mixer.txt b/Documentation/sound/alsa/Audigy-mixer.txt new file mode 100644 index 000000000..5132fd95e --- /dev/null +++ b/Documentation/sound/alsa/Audigy-mixer.txt @@ -0,0 +1,345 @@ + + Sound Blaster Audigy mixer / default DSP code + =========================================== + +This is based on SB-Live-mixer.txt. + +The EMU10K2 chips have a DSP part which can be programmed to support +various ways of sample processing, which is described here. +(This acticle does not deal with the overall functionality of the +EMU10K2 chips. See the manuals section for further details.) + +The ALSA driver programs this portion of chip by default code +(can be altered later) which offers the following functionality: + + +1) Digital mixer controls +------------------------- + +These controls are built using the DSP instructions. They offer extended +functionality. Only the default build-in code in the ALSA driver is described +here. Note that the controls work as attenuators: the maximum value is the +neutral position leaving the signal unchanged. Note that if the same destination +is mentioned in multiple controls, the signal is accumulated and can be wrapped +(set to maximal or minimal value without checking of overflow). + + +Explanation of used abbreviations: + +DAC - digital to analog converter +ADC - analog to digital converter +I2S - one-way three wire serial bus for digital sound by Philips Semiconductors + (this standard is used for connecting standalone DAC and ADC converters) +LFE - low frequency effects (subwoofer signal) +AC97 - a chip containing an analog mixer, DAC and ADC converters +IEC958 - S/PDIF +FX-bus - the EMU10K2 chip has an effect bus containing 64 accumulators. + Each of the synthesizer voices can feed its output to these accumulators + and the DSP microcontroller can operate with the resulting sum. + +name='PCM Front Playback Volume',index=0 + +This control is used to attenuate samples for left and right front PCM FX-bus +accumulators. ALSA uses accumulators 8 and 9 for left and right front PCM +samples for 5.1 playback. The result samples are forwarded to the front DAC PCM +slots of the Philips DAC. + +name='PCM Surround Playback Volume',index=0 + +This control is used to attenuate samples for left and right surround PCM FX-bus +accumulators. ALSA uses accumulators 2 and 3 for left and right surround PCM +samples for 5.1 playback. The result samples are forwarded to the surround DAC PCM +slots of the Philips DAC. + +name='PCM Center Playback Volume',index=0 + +This control is used to attenuate samples for center PCM FX-bus accumulator. +ALSA uses accumulator 6 for center PCM sample for 5.1 playback. The result sample +is forwarded to the center DAC PCM slot of the Philips DAC. + +name='PCM LFE Playback Volume',index=0 + +This control is used to attenuate sample for LFE PCM FX-bus accumulator. +ALSA uses accumulator 7 for LFE PCM sample for 5.1 playback. The result sample +is forwarded to the LFE DAC PCM slot of the Philips DAC. + +name='PCM Playback Volume',index=0 + +This control is used to attenuate samples for left and right PCM FX-bus +accumulators. ALSA uses accumulators 0 and 1 for left and right PCM samples for +stereo playback. The result samples are forwarded to the front DAC PCM slots +of the Philips DAC. + +name='PCM Capture Volume',index=0 + +This control is used to attenuate samples for left and right PCM FX-bus +accumulator. ALSA uses accumulators 0 and 1 for left and right PCM. +The result is forwarded to the ADC capture FIFO (thus to the standard capture +PCM device). + +name='Music Playback Volume',index=0 + +This control is used to attenuate samples for left and right MIDI FX-bus +accumulators. ALSA uses accumulators 4 and 5 for left and right MIDI samples. +The result samples are forwarded to the front DAC PCM slots of the AC97 codec. + +name='Music Capture Volume',index=0 + +These controls are used to attenuate samples for left and right MIDI FX-bus +accumulator. ALSA uses accumulators 4 and 5 for left and right PCM. +The result is forwarded to the ADC capture FIFO (thus to the standard capture +PCM device). + +name='Mic Playback Volume',index=0 + +This control is used to attenuate samples for left and right Mic input. +For Mic input is used AC97 codec. The result samples are forwarded to +the front DAC PCM slots of the Philips DAC. Samples are forwarded to Mic +capture FIFO (device 1 - 16bit/8KHz mono) too without volume control. + +name='Mic Capture Volume',index=0 + +This control is used to attenuate samples for left and right Mic input. +The result is forwarded to the ADC capture FIFO (thus to the standard capture +PCM device). + +name='Audigy CD Playback Volume',index=0 + +This control is used to attenuate samples from left and right IEC958 TTL +digital inputs (usually used by a CDROM drive). The result samples are +forwarded to the front DAC PCM slots of the Philips DAC. + +name='Audigy CD Capture Volume',index=0 + +This control is used to attenuate samples from left and right IEC958 TTL +digital inputs (usually used by a CDROM drive). The result samples are +forwarded to the ADC capture FIFO (thus to the standard capture PCM device). + +name='IEC958 Optical Playback Volume',index=0 + +This control is used to attenuate samples from left and right IEC958 optical +digital input. The result samples are forwarded to the front DAC PCM slots +of the Philips DAC. + +name='IEC958 Optical Capture Volume',index=0 + +This control is used to attenuate samples from left and right IEC958 optical +digital inputs. The result samples are forwarded to the ADC capture FIFO +(thus to the standard capture PCM device). + +name='Line2 Playback Volume',index=0 + +This control is used to attenuate samples from left and right I2S ADC +inputs (on the AudigyDrive). The result samples are forwarded to the front +DAC PCM slots of the Philips DAC. + +name='Line2 Capture Volume',index=1 + +This control is used to attenuate samples from left and right I2S ADC +inputs (on the AudigyDrive). The result samples are forwarded to the ADC +capture FIFO (thus to the standard capture PCM device). + +name='Analog Mix Playback Volume',index=0 + +This control is used to attenuate samples from left and right I2S ADC +inputs from Philips ADC. The result samples are forwarded to the front +DAC PCM slots of the Philips DAC. This contains mix from analog sources +like CD, Line In, Aux, .... + +name='Analog Mix Capture Volume',index=1 + +This control is used to attenuate samples from left and right I2S ADC +inputs Philips ADC. The result samples are forwarded to the ADC +capture FIFO (thus to the standard capture PCM device). + +name='Aux2 Playback Volume',index=0 + +This control is used to attenuate samples from left and right I2S ADC +inputs (on the AudigyDrive). The result samples are forwarded to the front +DAC PCM slots of the Philips DAC. + +name='Aux2 Capture Volume',index=1 + +This control is used to attenuate samples from left and right I2S ADC +inputs (on the AudigyDrive). The result samples are forwarded to the ADC +capture FIFO (thus to the standard capture PCM device). + +name='Front Playback Volume',index=0 + +All stereo signals are mixed together and mirrored to surround, center and LFE. +This control is used to attenuate samples for left and right front speakers of +this mix. + +name='Surround Playback Volume',index=0 + +All stereo signals are mixed together and mirrored to surround, center and LFE. +This control is used to attenuate samples for left and right surround speakers of +this mix. + +name='Center Playback Volume',index=0 + +All stereo signals are mixed together and mirrored to surround, center and LFE. +This control is used to attenuate sample for center speaker of this mix. + +name='LFE Playback Volume',index=0 + +All stereo signals are mixed together and mirrored to surround, center and LFE. +This control is used to attenuate sample for LFE speaker of this mix. + +name='Tone Control - Switch',index=0 + +This control turns the tone control on or off. The samples for front, rear +and center / LFE outputs are affected. + +name='Tone Control - Bass',index=0 + +This control sets the bass intensity. There is no neutral value!! +When the tone control code is activated, the samples are always modified. +The closest value to pure signal is 20. + +name='Tone Control - Treble',index=0 + +This control sets the treble intensity. There is no neutral value!! +When the tone control code is activated, the samples are always modified. +The closest value to pure signal is 20. + +name='Master Playback Volume',index=0 + +This control is used to attenuate samples for front, surround, center and +LFE outputs. + +name='IEC958 Optical Raw Playback Switch',index=0 + +If this switch is on, then the samples for the IEC958 (S/PDIF) digital +output are taken only from the raw FX8010 PCM, otherwise standard front +PCM samples are taken. + + +2) PCM stream related controls +------------------------------ + +name='EMU10K1 PCM Volume',index 0-31 + +Channel volume attenuation in range 0-0xffff. The maximum value (no +attenuation) is default. The channel mapping for three values is +as follows: + + 0 - mono, default 0xffff (no attenuation) + 1 - left, default 0xffff (no attenuation) + 2 - right, default 0xffff (no attenuation) + +name='EMU10K1 PCM Send Routing',index 0-31 + +This control specifies the destination - FX-bus accumulators. There 24 +values with this mapping: + + 0 - mono, A destination (FX-bus 0-63), default 0 + 1 - mono, B destination (FX-bus 0-63), default 1 + 2 - mono, C destination (FX-bus 0-63), default 2 + 3 - mono, D destination (FX-bus 0-63), default 3 + 4 - mono, E destination (FX-bus 0-63), default 0 + 5 - mono, F destination (FX-bus 0-63), default 0 + 6 - mono, G destination (FX-bus 0-63), default 0 + 7 - mono, H destination (FX-bus 0-63), default 0 + 8 - left, A destination (FX-bus 0-63), default 0 + 9 - left, B destination (FX-bus 0-63), default 1 + 10 - left, C destination (FX-bus 0-63), default 2 + 11 - left, D destination (FX-bus 0-63), default 3 + 12 - left, E destination (FX-bus 0-63), default 0 + 13 - left, F destination (FX-bus 0-63), default 0 + 14 - left, G destination (FX-bus 0-63), default 0 + 15 - left, H destination (FX-bus 0-63), default 0 + 16 - right, A destination (FX-bus 0-63), default 0 + 17 - right, B destination (FX-bus 0-63), default 1 + 18 - right, C destination (FX-bus 0-63), default 2 + 19 - right, D destination (FX-bus 0-63), default 3 + 20 - right, E destination (FX-bus 0-63), default 0 + 21 - right, F destination (FX-bus 0-63), default 0 + 22 - right, G destination (FX-bus 0-63), default 0 + 23 - right, H destination (FX-bus 0-63), default 0 + +Don't forget that it's illegal to assign a channel to the same FX-bus accumulator +more than once (it means 0=0 && 1=0 is an invalid combination). + +name='EMU10K1 PCM Send Volume',index 0-31 + +It specifies the attenuation (amount) for given destination in range 0-255. +The channel mapping is following: + + 0 - mono, A destination attn, default 255 (no attenuation) + 1 - mono, B destination attn, default 255 (no attenuation) + 2 - mono, C destination attn, default 0 (mute) + 3 - mono, D destination attn, default 0 (mute) + 4 - mono, E destination attn, default 0 (mute) + 5 - mono, F destination attn, default 0 (mute) + 6 - mono, G destination attn, default 0 (mute) + 7 - mono, H destination attn, default 0 (mute) + 8 - left, A destination attn, default 255 (no attenuation) + 9 - left, B destination attn, default 0 (mute) + 10 - left, C destination attn, default 0 (mute) + 11 - left, D destination attn, default 0 (mute) + 12 - left, E destination attn, default 0 (mute) + 13 - left, F destination attn, default 0 (mute) + 14 - left, G destination attn, default 0 (mute) + 15 - left, H destination attn, default 0 (mute) + 16 - right, A destination attn, default 0 (mute) + 17 - right, B destination attn, default 255 (no attenuation) + 18 - right, C destination attn, default 0 (mute) + 19 - right, D destination attn, default 0 (mute) + 20 - right, E destination attn, default 0 (mute) + 21 - right, F destination attn, default 0 (mute) + 22 - right, G destination attn, default 0 (mute) + 23 - right, H destination attn, default 0 (mute) + + + +4) MANUALS/PATENTS: +------------------- + +ftp://opensource.creative.com/pub/doc +------------------------------------- + + Files: + LM4545.pdf AC97 Codec + + m2049.pdf The EMU10K1 Digital Audio Processor + + hog63.ps FX8010 - A DSP Chip Architecture for Audio Effects + + +WIPO Patents +------------ + Patent numbers: + WO 9901813 (A1) Audio Effects Processor with multiple asynchronous (Jan. 14, 1999) + streams + + WO 9901814 (A1) Processor with Instruction Set for Audio Effects (Jan. 14, 1999) + + WO 9901953 (A1) Audio Effects Processor having Decoupled Instruction + Execution and Audio Data Sequencing (Jan. 14, 1999) + + +US Patents (http://www.uspto.gov/) +---------------------------------- + + US 5925841 Digital Sampling Instrument employing cache memory (Jul. 20, 1999) + + US 5928342 Audio Effects Processor integrated on a single chip (Jul. 27, 1999) + with a multiport memory onto which multiple asynchronous + digital sound samples can be concurrently loaded + + US 5930158 Processor with Instruction Set for Audio Effects (Jul. 27, 1999) + + US 6032235 Memory initialization circuit (Tram) (Feb. 29, 2000) + + US 6138207 Interpolation looping of audio samples in cache connected to (Oct. 24, 2000) + system bus with prioritization and modification of bus transfers + in accordance with loop ends and minimum block sizes + + US 6151670 Method for conserving memory storage using a (Nov. 21, 2000) + pool of short term memory registers + + US 6195715 Interrupt control for multiple programs communicating with (Feb. 27, 2001) + a common interrupt by associating programs to GP registers, + defining interrupt register, polling GP registers, and invoking + callback routine associated with defined interrupt register diff --git a/Documentation/usb/sn9c102.txt b/Documentation/usb/sn9c102.txt new file mode 100644 index 000000000..65018c8c9 --- /dev/null +++ b/Documentation/usb/sn9c102.txt @@ -0,0 +1,276 @@ + + SN9C10[12] PC Camera Controllers + Driver for Linux + ================================ + + - Documentation - + + +Index +===== +1. Copyright +2. License +3. Overview +4. Module dependencies +5. Module loading +6. Module parameters +7. Device control through "sysfs" +8. Supported devices +9. How to add support for new image sensors +10. Note for V4L2 developers +11. Contact information +12. Credits + + +1. Copyright +============ +Copyright (C) 2004 by Luca Risolia + +SONiX is a trademark of SONiX Technology Company Limited, inc. +This driver is not sponsored or developed by SONiX. + + +2. License +========== +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + +3. Overview +=========== +This driver attempts to support the video streaming capabilities of the devices +mounting the SONiX SN9C101 or SONiX SN9C102 PC Camera Controllers. + +- It's worth to note that SONiX has never collaborated with me during the +development of this project, despite of several requests for enough detailed +specifications of the register tables, compression engine and video data format +of the above chips - + +Up to 64 cameras can be handled at the same time. They can be connected and +disconnected from the host many times without turning off the computer, if +your system supports the hotplug facility. + +The driver relies on the Video4Linux2 and USB core modules. It has been +designed to run properly on SMP systems as well. + +The latest version of the SN9C10[12] driver can be found at the following URL: +http://go.lamarinapunto.com/ + + +4. Module dependencies +====================== +For it to work properly, the driver needs kernel support for Video4Linux and +USB. + +The following options of the kernel configuration file must be enabled and +corresponding modules must be compiled: + + # Multimedia devices + # + CONFIG_VIDEO_DEV=m + + # USB support + # + CONFIG_USB=m + +In addition, depending on the hardware being used, the modules below are +necessary: + + # USB Host Controller Drivers + # + CONFIG_USB_EHCI_HCD=m + CONFIG_USB_UHCI_HCD=m + CONFIG_USB_OHCI_HCD=m + +And finally: + + # USB Multimedia devices + # + CONFIG_USB_SN9C102=m + + +5. Module loading +================= +To use the driver, it is necessary to load the "sn9c102" module into memory +after every other module required: "videodev", "usbcore" and, depending on +the USB host controller you have, "ehci-hcd", "uhci-hcd" or "ohci-hcd". + +Loading can be done as shown below: + + [root@localhost home]# modprobe usbcore + [root@localhost home]# modprobe sn9c102 + +At this point the devices should be recognized. You can invoke "dmesg" to +analyze kernel messages and verify that the loading process has gone well: + + [user@localhost home]$ dmesg + + +6. Module parameters +==================== +Module parameters are listed below: +------------------------------------------------------------------------------- +Name: video_nr +Type: int array (min = 0, max = 32) +Syntax: <-1|n[,...]> +Description: Specify V4L2 minor mode number: + -1 = use next available + n = use minor number n + You can specify up to 32 cameras this way. + For example: + video_nr=-1,2,-1 would assign minor number 2 to the second + recognized camera and use auto for the first one and for every + other camera. +Default: -1 +------------------------------------------------------------------------------- +Name: debug +Type: int +Syntax: +Description: Debugging information level, from 0 to 3: + 0 = none (use carefully) + 1 = critical errors + 2 = significant informations + 3 = more verbose messages + Level 3 is useful for testing only, when just one device + is used. +Default: 2 +------------------------------------------------------------------------------- + + +7. Device control through "sysfs" +================================= +It is possible to read and write both the SN9C10[12] and the image sensor +registers by using the "sysfs" filesystem interface. + +Every time a supported device is recognized, read-only files named "redblue" +and "green" are created in the /sys/class/video4linux/videoX directory. You can +set the red, blue and green channel's gain by writing the desired value to +them. The value may range from 0 to 15 for each channel; this means that +"redblue" accepts 8-bit values, where the low 4 bits are reserved for red and +the others for blue. + +There are other four entries in the directory above for each registered camera: +"reg", "val", "i2c_reg" and "i2c_val". The first two files control the +SN9C10[12] bridge, while the other two control the sensor chip. "reg" and +"i2c_reg" hold the values of the current register index where the following +reading/writing operations are addressed at through "val" and "i2c_val". Their +use is not intended for end-users, unless you know what you are doing. Note +that "i2c_reg" and "i2c_val" won't be created if the sensor does not actually +support the standard I2C protocol. Also, remember that you must be logged in as +root before writing to them. + +As an example, suppose we were to want to read the value contained in the +register number 1 of the sensor register table - which usually is the product +identifier - of the camera registered as "/dev/video0": + + [root@localhost #] cd /sys/class/video4linux/video0 + [root@localhost #] echo 1 > i2c_reg + [root@localhost #] cat i2c_val + +Now let's set the green gain's register of the SN9C10[12] chip to 2: + + [root@localhost #] echo 0x11 > reg + [root@localhost #] echo 2 > val + +Note that the SN9C10[12] always returns 0 when some of its registers are read. +To avoid race conditions, all the I/O accesses to the files are serialized. + + +8. Supported devices +==================== +- I won't mention any of the names of the companies as well as their products +here. They have never collaborated with me, so no advertising - + +From the point of view of a driver, what unambiguously identify a device are +its vendor and product USB identifiers. Below is a list of known identifiers of +devices mounting the SN9C10[12] PC camera controllers: + +Vendor ID Product ID +--------- ---------- +0xc45 0x6001 +0xc45 0x6005 +0xc45 0x6009 +0xc45 0x600d +0xc45 0x6024 +0xc45 0x6025 +0xc45 0x6028 +0xc45 0x6029 +0xc45 0x602a +0xc45 0x602c +0xc45 0x8001 + +The list above does NOT imply that all those devices work with this driver: up +until now only the ones that mount the following image sensors are supported. +Kernel messages will always tell you whether this is the case: + +Model Manufacturer +----- ------------ +PAS106B PixArt Imaging Inc. +TAS5110C1B Taiwan Advanced Sensor Corporation +TAS5130D1B Taiwan Advanced Sensor Corporation + +If you think your camera is based on the above hardware and is not actually +listed in the above table, you may try to add the specific USB VendorID and +ProductID identifiers to the sn9c102_id_table[] in the file "sn9c102_sensor.h"; +then compile, load the module again and look at the kernel output. +If this works, please send an email to me reporting the kernel messages, so +that I will add a new entry in the list of supported devices. + +Donations of new models for further testing and support would be much +appreciated. I won't add official support for hardware that I don't actually +have. + + +9. How to add support for new image sensors +=========================================== +It should be easy to write code for new sensors by using the small API that I +have created for this purpose, which is present in "sn9c102_sensor.h" +(documentation is included there). As an example, have a look at the code in +"sn9c102_pas106b.c", which uses the mentioned interface. + +At the moment, not yet supported image sensors are: PAS202B (VGA), +HV7131[D|E1] (VGA), MI03 (VGA), OV7620 (VGA). + + +10. Note for V4L2 developers +============================ +This driver follows the V4L2 API specifications. In particular, it enforces two +rules: + +1) Exactly one I/O method, either "mmap" or "read", is associated with each +file descriptor. Once it is selected, the application must close and reopen the +device to switch to the other I/O method. + +2) Previously mapped buffer memory must always be unmapped before calling any +of the "VIDIOC_S_CROP", "VIDIOC_TRY_FMT" and "VIDIOC_S_FMT" ioctl's. In case, +the same number of buffers as before will be allocated again to match the size +of the new video frames, so you have to map them again before any I/O attempts. + + +11. Contact information +======================= +I may be contacted by e-mail at . + +I can accept GPG/PGP encrypted e-mail. My GPG key ID is 'FCE635A4'. +My public 1024-bit key should be available at any keyserver; the fingerprint +is: '88E8 F32F 7244 68BA 3958 5D40 99DA 5D2A FCE6 35A4'. + + +12. Credits +=========== +I would thank the following persons: + +- Stefano Mozzi, who donated 45 EU; +- Luca Capello for the donation of a webcam; +- Mizuno Takafumi for the donation of a webcam. diff --git a/arch/arm/boot/bootp/initrd.S b/arch/arm/boot/bootp/initrd.S new file mode 100644 index 000000000..d81ea1837 --- /dev/null +++ b/arch/arm/boot/bootp/initrd.S @@ -0,0 +1,6 @@ + .type initrd_start,#object + .globl initrd_start +initrd_start: + .incbin INITRD + .globl initrd_end +initrd_end: diff --git a/arch/arm/boot/bootp/kernel.S b/arch/arm/boot/bootp/kernel.S new file mode 100644 index 000000000..b87a25c7e --- /dev/null +++ b/arch/arm/boot/bootp/kernel.S @@ -0,0 +1,6 @@ + .globl kernel_start +kernel_start: + .incbin "arch/arm/boot/zImage" + .globl kernel_end +kernel_end: + .align 2 diff --git a/arch/arm/boot/compressed/piggy.S b/arch/arm/boot/compressed/piggy.S new file mode 100644 index 000000000..54c951800 --- /dev/null +++ b/arch/arm/boot/compressed/piggy.S @@ -0,0 +1,6 @@ + .section .piggydata,#alloc + .globl input_data +input_data: + .incbin "arch/arm/boot/compressed/piggy.gz" + .globl input_data_end +input_data_end: diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c new file mode 100644 index 000000000..46550ddde --- /dev/null +++ b/arch/arm/common/locomo.c @@ -0,0 +1,762 @@ +/* + * linux/arch/arm/common/locomo.c + * + * Sharp LoCoMo support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains all generic LoCoMo support. + * + * All initialization functions provided here are intended to be called + * from machine specific code with proper arguments when required. + * + * Based on sa1111.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +/* the following is the overall data for the locomo chip */ +struct locomo { + struct device *dev; + unsigned long phys; + unsigned int irq; + void *base; +}; + +struct locomo_dev_info { + unsigned long offset; + unsigned long length; + unsigned int devid; + unsigned int irq[1]; + const char * name; +}; + +static struct locomo_dev_info locomo_devices[] = { +}; + + +/** LoCoMo interrupt handling stuff. + * NOTE: LoCoMo has a 1 to many mapping on all of its IRQs. + * that is, there is only one real hardware interrupt + * we determine which interrupt it is by reading some IO memory. + * We have two levels of expansion, first in the handler for the + * hardware interrupt we generate an interrupt + * IRQ_LOCOMO_*_BASE and those handlers generate more interrupts + * + * hardware irq reads LOCOMO_ICR & 0x0f00 + * IRQ_LOCOMO_KEY_BASE + * IRQ_LOCOMO_GPIO_BASE + * IRQ_LOCOMO_LT_BASE + * IRQ_LOCOMO_SPI_BASE + * IRQ_LOCOMO_KEY_BASE reads LOCOMO_KIC & 0x0001 + * IRQ_LOCOMO_KEY + * IRQ_LOCOMO_GPIO_BASE reads LOCOMO_GIR & LOCOMO_GPD & 0xffff + * IRQ_LOCOMO_GPIO[0-15] + * IRQ_LOCOMO_LT_BASE reads LOCOMO_LTINT & 0x0001 + * IRQ_LOCOMO_LT + * IRQ_LOCOMO_SPI_BASE reads LOCOMO_SPIIR & 0x000F + * IRQ_LOCOMO_SPI_RFR + * IRQ_LOCOMO_SPI_RFW + * IRQ_LOCOMO_SPI_OVRN + * IRQ_LOCOMO_SPI_TEND + */ + +#define LOCOMO_IRQ_START (IRQ_LOCOMO_KEY_BASE) +#define LOCOMO_IRQ_KEY_START (IRQ_LOCOMO_KEY) +#define LOCOMO_IRQ_GPIO_START (IRQ_LOCOMO_GPIO0) +#define LOCOMO_IRQ_LT_START (IRQ_LOCOMO_LT) +#define LOCOMO_IRQ_SPI_START (IRQ_LOCOMO_SPI_RFR) + +static void locomo_handler(unsigned int irq, struct irqdesc *desc, + struct pt_regs *regs) +{ + int req, i; + struct irqdesc *d; + void *mapbase = get_irq_chipdata(irq); + + /* Acknowledge the parent IRQ */ + desc->chip->ack(irq); + + /* check why this interrupt was generated */ + req = locomo_readl(mapbase + LOCOMO_ICR) & 0x0f00; + + if (req) { + /* generate the next interrupt(s) */ + irq = LOCOMO_IRQ_START; + d = irq_desc + irq; + for (i = 0; i <= 3; i++, d++, irq++) { + if (req & (0x0100 << i)) { + d->handle(irq, d, regs); + } + + } + } +} + +static void locomo_ack_irq(unsigned int irq) +{ +} + +static void locomo_mask_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_ICR); + r &= ~(0x0010 << (irq - LOCOMO_IRQ_START)); + locomo_writel(r, mapbase + LOCOMO_ICR); +} + +static void locomo_unmask_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_ICR); + r |= (0x0010 << (irq - LOCOMO_IRQ_START)); + locomo_writel(r, mapbase + LOCOMO_ICR); +} + +static struct irqchip locomo_chip = { + .ack = locomo_ack_irq, + .mask = locomo_mask_irq, + .unmask = locomo_unmask_irq, +}; + +static void locomo_key_handler(unsigned int irq, struct irqdesc *desc, + struct pt_regs *regs) +{ + struct irqdesc *d; + void *mapbase = get_irq_chipdata(irq); + + if (locomo_readl(mapbase + LOCOMO_KIC) & 0x0001) { + d = irq_desc + LOCOMO_IRQ_KEY_START; + d->handle(LOCOMO_IRQ_KEY_START, d, regs); + } +} + +static void locomo_key_ack_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_KIC); + r &= ~(0x0100 << (irq - LOCOMO_IRQ_KEY_START)); + locomo_writel(r, mapbase + LOCOMO_KIC); +} + +static void locomo_key_mask_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_KIC); + r &= ~(0x0010 << (irq - LOCOMO_IRQ_KEY_START)); + locomo_writel(r, mapbase + LOCOMO_KIC); +} + +static void locomo_key_unmask_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_KIC); + r |= (0x0010 << (irq - LOCOMO_IRQ_KEY_START)); + locomo_writel(r, mapbase + LOCOMO_KIC); +} + +static struct irqchip locomo_key_chip = { + .ack = locomo_key_ack_irq, + .mask = locomo_key_mask_irq, + .unmask = locomo_key_unmask_irq, +}; + +static void locomo_gpio_handler(unsigned int irq, struct irqdesc *desc, + struct pt_regs *regs) +{ + int req, i; + struct irqdesc *d; + void *mapbase = get_irq_chipdata(irq); + + req = locomo_readl(mapbase + LOCOMO_GIR) & + locomo_readl(mapbase + LOCOMO_GPD) & + 0xffff; + + if (req) { + irq = LOCOMO_IRQ_GPIO_START; + d = irq_desc + LOCOMO_IRQ_GPIO_START; + for (i = 0; i <= 15; i++, irq++, d++) { + if (req & (0x0001 << i)) { + d->handle(irq, d, regs); + } + } + } +} + +static void locomo_gpio_ack_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_GWE); + r |= (0x0001 << (irq - LOCOMO_IRQ_GPIO_START)); + locomo_writel(r, mapbase + LOCOMO_GWE); + + r = locomo_readl(mapbase + LOCOMO_GIS); + r &= ~(0x0001 << (irq - LOCOMO_IRQ_GPIO_START)); + locomo_writel(r, mapbase + LOCOMO_GIS); + + r = locomo_readl(mapbase + LOCOMO_GWE); + r &= ~(0x0001 << (irq - LOCOMO_IRQ_GPIO_START)); + locomo_writel(r, mapbase + LOCOMO_GWE); +} + +static void locomo_gpio_mask_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_GIE); + r &= ~(0x0001 << (irq - LOCOMO_IRQ_GPIO_START)); + locomo_writel(r, mapbase + LOCOMO_GIE); +} + +static void locomo_gpio_unmask_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_GIE); + r |= (0x0001 << (irq - LOCOMO_IRQ_GPIO_START)); + locomo_writel(r, mapbase + LOCOMO_GIE); +} + +static struct irqchip locomo_gpio_chip = { + .ack = locomo_gpio_ack_irq, + .mask = locomo_gpio_mask_irq, + .unmask = locomo_gpio_unmask_irq, +}; + +static void locomo_lt_handler(unsigned int irq, struct irqdesc *desc, + struct pt_regs *regs) +{ + struct irqdesc *d; + void *mapbase = get_irq_chipdata(irq); + + if (locomo_readl(mapbase + LOCOMO_LTINT) & 0x0001) { + d = irq_desc + LOCOMO_IRQ_LT_START; + d->handle(LOCOMO_IRQ_LT_START, d, regs); + } +} + +static void locomo_lt_ack_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_LTINT); + r &= ~(0x0100 << (irq - LOCOMO_IRQ_LT_START)); + locomo_writel(r, mapbase + LOCOMO_LTINT); +} + +static void locomo_lt_mask_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_LTINT); + r &= ~(0x0010 << (irq - LOCOMO_IRQ_LT_START)); + locomo_writel(r, mapbase + LOCOMO_LTINT); +} + +static void locomo_lt_unmask_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_LTINT); + r |= (0x0010 << (irq - LOCOMO_IRQ_LT_START)); + locomo_writel(r, mapbase + LOCOMO_LTINT); +} + +static struct irqchip locomo_lt_chip = { + .ack = locomo_lt_ack_irq, + .mask = locomo_lt_mask_irq, + .unmask = locomo_lt_unmask_irq, +}; + +static void locomo_spi_handler(unsigned int irq, struct irqdesc *desc, + struct pt_regs *regs) +{ + int req, i; + struct irqdesc *d; + void *mapbase = get_irq_chipdata(irq); + + req = locomo_readl(mapbase + LOCOMO_SPIIR) & 0x000F; + if (req) { + irq = LOCOMO_IRQ_SPI_START; + d = irq_desc + irq; + + for (i = 0; i <= 3; i++, irq++, d++) { + if (req & (0x0001 << i)) { + d->handle(irq, d, regs); + } + } + } +} + +static void locomo_spi_ack_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_SPIWE); + r |= (0x0001 << (irq - LOCOMO_IRQ_SPI_START)); + locomo_writel(r, mapbase + LOCOMO_SPIWE); + + r = locomo_readl(mapbase + LOCOMO_SPIIS); + r &= ~(0x0001 << (irq - LOCOMO_IRQ_SPI_START)); + locomo_writel(r, mapbase + LOCOMO_SPIIS); + + r = locomo_readl(mapbase + LOCOMO_SPIWE); + r &= ~(0x0001 << (irq - LOCOMO_IRQ_SPI_START)); + locomo_writel(r, mapbase + LOCOMO_SPIWE); +} + +static void locomo_spi_mask_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_SPIIE); + r &= ~(0x0001 << (irq - LOCOMO_IRQ_SPI_START)); + locomo_writel(r, mapbase + LOCOMO_SPIIE); +} + +static void locomo_spi_unmask_irq(unsigned int irq) +{ + void *mapbase = get_irq_chipdata(irq); + unsigned int r; + r = locomo_readl(mapbase + LOCOMO_SPIIE); + r |= (0x0001 << (irq - LOCOMO_IRQ_SPI_START)); + locomo_writel(r, mapbase + LOCOMO_SPIIE); +} + +static struct irqchip locomo_spi_chip = { + .ack = locomo_spi_ack_irq, + .mask = locomo_spi_mask_irq, + .unmask = locomo_spi_unmask_irq, +}; + +static void locomo_setup_irq(struct locomo *lchip) +{ + int irq; + void *irqbase = lchip->base; + + /* + * Install handler for IRQ_LOCOMO_HW. + */ + set_irq_type(lchip->irq, IRQT_FALLING); + set_irq_chipdata(lchip->irq, irqbase); + set_irq_chained_handler(lchip->irq, locomo_handler); + + /* Install handlers for IRQ_LOCOMO_*_BASE */ + set_irq_chip(IRQ_LOCOMO_KEY_BASE, &locomo_chip); + set_irq_chipdata(IRQ_LOCOMO_KEY_BASE, irqbase); + set_irq_chained_handler(IRQ_LOCOMO_KEY_BASE, locomo_key_handler); + set_irq_flags(IRQ_LOCOMO_KEY_BASE, IRQF_VALID | IRQF_PROBE); + + set_irq_chip(IRQ_LOCOMO_GPIO_BASE, &locomo_chip); + set_irq_chipdata(IRQ_LOCOMO_GPIO_BASE, irqbase); + set_irq_chained_handler(IRQ_LOCOMO_GPIO_BASE, locomo_gpio_handler); + set_irq_flags(IRQ_LOCOMO_GPIO_BASE, IRQF_VALID | IRQF_PROBE); + + set_irq_chip(IRQ_LOCOMO_LT_BASE, &locomo_chip); + set_irq_chipdata(IRQ_LOCOMO_LT_BASE, irqbase); + set_irq_chained_handler(IRQ_LOCOMO_LT_BASE, locomo_lt_handler); + set_irq_flags(IRQ_LOCOMO_LT_BASE, IRQF_VALID | IRQF_PROBE); + + set_irq_chip(IRQ_LOCOMO_SPI_BASE, &locomo_chip); + set_irq_chipdata(IRQ_LOCOMO_SPI_BASE, irqbase); + set_irq_chained_handler(IRQ_LOCOMO_SPI_BASE, locomo_spi_handler); + set_irq_flags(IRQ_LOCOMO_SPI_BASE, IRQF_VALID | IRQF_PROBE); + + /* install handlers for IRQ_LOCOMO_KEY_BASE generated interrupts */ + set_irq_chip(LOCOMO_IRQ_KEY_START, &locomo_key_chip); + set_irq_chipdata(LOCOMO_IRQ_KEY_START, irqbase); + set_irq_handler(LOCOMO_IRQ_KEY_START, do_edge_IRQ); + set_irq_flags(LOCOMO_IRQ_KEY_START, IRQF_VALID | IRQF_PROBE); + + /* install handlers for IRQ_LOCOMO_GPIO_BASE generated interrupts */ + for (irq = LOCOMO_IRQ_GPIO_START; irq < LOCOMO_IRQ_GPIO_START + 16; irq++) { + set_irq_chip(irq, &locomo_gpio_chip); + set_irq_chipdata(irq, irqbase); + set_irq_handler(irq, do_edge_IRQ); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } + + /* install handlers for IRQ_LOCOMO_LT_BASE generated interrupts */ + set_irq_chip(LOCOMO_IRQ_LT_START, &locomo_lt_chip); + set_irq_chipdata(LOCOMO_IRQ_LT_START, irqbase); + set_irq_handler(LOCOMO_IRQ_LT_START, do_edge_IRQ); + set_irq_flags(LOCOMO_IRQ_LT_START, IRQF_VALID | IRQF_PROBE); + + /* install handlers for IRQ_LOCOMO_SPI_BASE generated interrupts */ + for (irq = LOCOMO_IRQ_SPI_START; irq < LOCOMO_IRQ_SPI_START + 3; irq++) { + set_irq_chip(irq, &locomo_spi_chip); + set_irq_chipdata(irq, irqbase); + set_irq_handler(irq, do_edge_IRQ); + set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); + } +} + + +static void locomo_dev_release(struct device *_dev) +{ + struct locomo_dev *dev = LOCOMO_DEV(_dev); + + release_resource(&dev->res); + kfree(dev); +} + +static int +locomo_init_one_child(struct locomo *lchip, struct resource *parent, + struct locomo_dev_info *info) +{ + struct locomo_dev *dev; + int ret; + + dev = kmalloc(sizeof(struct locomo_dev), GFP_KERNEL); + if (!dev) { + ret = -ENOMEM; + goto out; + } + memset(dev, 0, sizeof(struct locomo_dev)); + + strncpy(dev->dev.bus_id,info->name,sizeof(dev->dev.bus_id)); + /* + * If the parent device has a DMA mask associated with it, + * propagate it down to the children. + */ + if (lchip->dev->dma_mask) { + dev->dma_mask = *lchip->dev->dma_mask; + dev->dev.dma_mask = &dev->dma_mask; + } + + dev->devid = info->devid; + dev->dev.parent = lchip->dev; + dev->dev.bus = &locomo_bus_type; + dev->dev.release = locomo_dev_release; + dev->dev.coherent_dma_mask = lchip->dev->coherent_dma_mask; + dev->res.start = lchip->phys + info->offset; + dev->res.end = dev->res.start + info->length; + dev->res.name = dev->dev.bus_id; + dev->res.flags = IORESOURCE_MEM; + dev->mapbase = lchip->base + info->offset; + memmove(dev->irq, info->irq, sizeof(dev->irq)); + + if (info->length) { + ret = request_resource(parent, &dev->res); + if (ret) { + printk("LoCoMo: failed to allocate resource for %s\n", + dev->res.name); + goto out; + } + } + + ret = device_register(&dev->dev); + if (ret) { + release_resource(&dev->res); + out: + kfree(dev); + } + return ret; +} + +/** + * locomo_probe - probe for a single LoCoMo chip. + * @phys_addr: physical address of device. + * + * Probe for a LoCoMo chip. This must be called + * before any other locomo-specific code. + * + * Returns: + * %-ENODEV device not found. + * %-EBUSY physical address already marked in-use. + * %0 successful. + */ +static int +__locomo_probe(struct device *me, struct resource *mem, int irq) +{ + struct locomo *lchip; + unsigned long r; + int i, ret = -ENODEV; + + lchip = kmalloc(sizeof(struct locomo), GFP_KERNEL); + if (!lchip) + return -ENOMEM; + + memset(lchip, 0, sizeof(struct locomo)); + + lchip->dev = me; + dev_set_drvdata(lchip->dev, lchip); + + lchip->phys = mem->start; + lchip->irq = irq; + + /* + * Map the whole region. This also maps the + * registers for our children. + */ + lchip->base = ioremap(mem->start, PAGE_SIZE); + if (!lchip->base) { + ret = -ENOMEM; + goto out; + } + + /* locomo initialize */ + locomo_writel(0, lchip->base + LOCOMO_ICR); + /* KEYBOARD */ + locomo_writel(0, lchip->base + LOCOMO_KIC); + + /* GPIO */ + locomo_writel(0, lchip->base + LOCOMO_GPO); + locomo_writel( (LOCOMO_GPIO(2) | LOCOMO_GPIO(3) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) + , lchip->base + LOCOMO_GPE); + locomo_writel( (LOCOMO_GPIO(2) | LOCOMO_GPIO(3) | LOCOMO_GPIO(13) | LOCOMO_GPIO(14)) + , lchip->base + LOCOMO_GPD); + locomo_writel(0, lchip->base + LOCOMO_GIE); + + /* FrontLight */ + locomo_writel(0, lchip->base + LOCOMO_ALS); + locomo_writel(0, lchip->base + LOCOMO_ALD); + /* Longtime timer */ + locomo_writel(0, lchip->base + LOCOMO_LTINT); + /* SPI */ + locomo_writel(0, lchip->base + LOCOMO_SPIIE); + + locomo_writel(6 + 8 + 320 + 30 - 10, lchip->base + LOCOMO_ASD); + r = locomo_readl(lchip->base + LOCOMO_ASD); + r |= 0x8000; + locomo_writel(r, lchip->base + LOCOMO_ASD); + + locomo_writel(6 + 8 + 320 + 30 - 10 - 128 + 4, lchip->base + LOCOMO_HSD); + r = locomo_readl(lchip->base + LOCOMO_HSD); + r |= 0x8000; + locomo_writel(r, lchip->base + LOCOMO_HSD); + + locomo_writel(128 / 8, lchip->base + LOCOMO_HSC); + + /* XON */ + locomo_writel(0x80, lchip->base + LOCOMO_TADC); + udelay(1000); + /* CLK9MEN */ + r = locomo_readl(lchip->base + LOCOMO_TADC); + r |= 0x10; + locomo_writel(r, lchip->base + LOCOMO_TADC); + udelay(100); + + /* init DAC */ + r = locomo_readl(lchip->base + LOCOMO_DAC); + r |= LOCOMO_DAC_SCLOEB | LOCOMO_DAC_SDAOEB; + locomo_writel(r, lchip->base + LOCOMO_DAC); + + r = locomo_readl(lchip->base + LOCOMO_VER); + printk(KERN_INFO "LoCoMo Chip: %lu%lu\n", (r >> 8), (r & 0xff)); + + /* + * The interrupt controller must be initialised before any + * other device to ensure that the interrupts are available. + */ + if (lchip->irq != NO_IRQ) + locomo_setup_irq(lchip); + + for (i = 0; i < ARRAY_SIZE(locomo_devices); i++) + locomo_init_one_child(lchip, mem, &locomo_devices[i]); + + return 0; + + out: + kfree(lchip); + return ret; +} + +static void __locomo_remove(struct locomo *lchip) +{ + struct list_head *l, *n; + + list_for_each_safe(l, n, &lchip->dev->children) { + struct device *d = list_to_dev(l); + + device_unregister(d); + } + + if (lchip->irq != NO_IRQ) { + set_irq_chained_handler(lchip->irq, NULL); + set_irq_data(lchip->irq, NULL); + } + + iounmap(lchip->base); + kfree(lchip); +} + +static int locomo_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct resource *mem = NULL, *irq = NULL; + int i; + + for (i = 0; i < pdev->num_resources; i++) { + if (pdev->resource[i].flags & IORESOURCE_MEM) + mem = &pdev->resource[i]; + if (pdev->resource[i].flags & IORESOURCE_IRQ) + irq = &pdev->resource[i]; + } + + return __locomo_probe(dev, mem, irq ? irq->start : NO_IRQ); +} + +static int locomo_remove(struct device *dev) +{ + struct locomo *lchip = dev_get_drvdata(dev); + + if (lchip) { + __locomo_remove(lchip); + dev_set_drvdata(dev, NULL); + + kfree(dev->saved_state); + dev->saved_state = NULL; + } + + return 0; +} + +/* + * Not sure if this should be on the system bus or not yet. + * We really want some way to register a system device at + * the per-machine level, and then have this driver pick + * up the registered devices. + */ +static struct device_driver locomo_device_driver = { + .name = "locomo", + .bus = &platform_bus_type, + .probe = locomo_probe, + .remove = locomo_remove, +}; + +/* + * Get the parent device driver (us) structure + * from a child function device + */ +static inline struct locomo *locomo_chip_driver(struct locomo_dev *ldev) +{ + return (struct locomo *)dev_get_drvdata(ldev->dev.parent); +} + +/* + * LoCoMo "Register Access Bus." + * + * We model this as a regular bus type, and hang devices directly + * off this. + */ +static int locomo_match(struct device *_dev, struct device_driver *_drv) +{ + struct locomo_dev *dev = LOCOMO_DEV(_dev); + struct locomo_driver *drv = LOCOMO_DRV(_drv); + + return dev->devid == drv->devid; +} + +static int locomo_bus_suspend(struct device *dev, u32 state) +{ + struct locomo_dev *ldev = LOCOMO_DEV(dev); + struct locomo_driver *drv = LOCOMO_DRV(dev->driver); + int ret = 0; + + if (drv && drv->suspend) + ret = drv->suspend(ldev, state); + return ret; +} + +static int locomo_bus_resume(struct device *dev) +{ + struct locomo_dev *ldev = LOCOMO_DEV(dev); + struct locomo_driver *drv = LOCOMO_DRV(dev->driver); + int ret = 0; + + if (drv && drv->resume) + ret = drv->resume(ldev); + return ret; +} + +static int locomo_bus_probe(struct device *dev) +{ + struct locomo_dev *ldev = LOCOMO_DEV(dev); + struct locomo_driver *drv = LOCOMO_DRV(dev->driver); + int ret = -ENODEV; + + if (drv->probe) + ret = drv->probe(ldev); + return ret; +} + +static int locomo_bus_remove(struct device *dev) +{ + struct locomo_dev *ldev = LOCOMO_DEV(dev); + struct locomo_driver *drv = LOCOMO_DRV(dev->driver); + int ret = 0; + + if (drv->remove) + ret = drv->remove(ldev); + return ret; +} + +struct bus_type locomo_bus_type = { + .name = "locomo-bus", + .match = locomo_match, + .suspend = locomo_bus_suspend, + .resume = locomo_bus_resume, +}; + +int locomo_driver_register(struct locomo_driver *driver) +{ + driver->drv.probe = locomo_bus_probe; + driver->drv.remove = locomo_bus_remove; + driver->drv.bus = &locomo_bus_type; + return driver_register(&driver->drv); +} + +void locomo_driver_unregister(struct locomo_driver *driver) +{ + driver_unregister(&driver->drv); +} + +static int __init locomo_init(void) +{ + int ret = bus_register(&locomo_bus_type); + if (ret == 0) + driver_register(&locomo_device_driver); + return ret; +} + +static void __exit locomo_exit(void) +{ + driver_unregister(&locomo_device_driver); + bus_unregister(&locomo_bus_type); +} + +module_init(locomo_init); +module_exit(locomo_exit); + +MODULE_DESCRIPTION("Sharp LoCoMo core driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Lenz "); + +EXPORT_SYMBOL(locomo_driver_register); +EXPORT_SYMBOL(locomo_driver_unregister); diff --git a/arch/arm/common/time-acorn.c b/arch/arm/common/time-acorn.c new file mode 100644 index 000000000..eb0628296 --- /dev/null +++ b/arch/arm/common/time-acorn.c @@ -0,0 +1,67 @@ +/* + * linux/arch/arm/common/time-acorn.c + * + * Copyright (c) 1996-2000 Russell King. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Changelog: + * 24-Sep-1996 RMK Created + * 10-Oct-1996 RMK Brought up to date with arch-sa110eval + * 04-Dec-1997 RMK Updated for new arch/arm/time.c + * 13=Jun-2004 DS Moved to arch/arm/common b/c shared w/CLPS7500 + */ +#include +#include + +#include +#include +#include + +#include + +static unsigned long ioctime_gettimeoffset(void) +{ + unsigned int count1, count2, status; + long offset; + + ioc_writeb (0, IOC_T0LATCH); + barrier (); + count1 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8); + barrier (); + status = ioc_readb(IOC_IRQREQA); + barrier (); + ioc_writeb (0, IOC_T0LATCH); + barrier (); + count2 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8); + + offset = count2; + if (count2 < count1) { + /* + * We have not had an interrupt between reading count1 + * and count2. + */ + if (status & (1 << 5)) + offset -= LATCH; + } else if (count2 > count1) { + /* + * We have just had another interrupt between reading + * count1 and count2. + */ + offset -= LATCH; + } + + offset = (LATCH - offset) * (tick_nsec / 1000); + return (offset + LATCH/2) / LATCH; +} + +void __init ioctime_init(void) +{ + ioc_writeb(LATCH & 255, IOC_T0LTCHL); + ioc_writeb(LATCH >> 8, IOC_T0LTCHH); + ioc_writeb(0, IOC_T0GO); + + gettimeoffset = ioctime_gettimeoffset; +} diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig new file mode 100644 index 000000000..fd95f39bc --- /dev/null +++ b/arch/arm/configs/ixp4xx_defconfig @@ -0,0 +1,1081 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_ARM=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y + +# +# Loadable module support +# +CONFIG_MODULES=y +# CONFIG_MODULE_UNLOAD is not set +CONFIG_OBSOLETE_MODPARM=y +CONFIG_MODVERSIONS=y +CONFIG_KMOD=y + +# +# System Type +# +# CONFIG_ARCH_ADIFCC is not set +# CONFIG_ARCH_CLPS7500 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CO285 is not set +# CONFIG_ARCH_PXA is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_CAMELOT is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP3XX is not set +CONFIG_ARCH_IXP4XX=y +# CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_VERSATILE_PB is not set + +# +# CLPS711X/EP721X Implementations +# + +# +# Epxa10db +# + +# +# Footbridge Implementations +# + +# +# IOP3xx Implementation Options +# +# CONFIG_ARCH_IOP310 is not set +# CONFIG_ARCH_IOP321 is not set + +# +# IOP3xx Chipset Features +# +CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y + +# +# Intel IXP4xx Implementation Options +# + +# +# IXP4xx Platforms +# +CONFIG_ARCH_IXDP425=y +CONFIG_ARCH_IXCDP1100=y +CONFIG_ARCH_PRPMC1100=y +CONFIG_ARCH_ADI_COYOTE=y +# CONFIG_ARCH_AVILA is not set +CONFIG_ARCH_IXDP4XX=y + +# +# IXP4xx Options +# +# CONFIG_IXP4XX_INDIRECT_PCI is not set + +# +# Intel PXA250/210 Implementations +# + +# +# SA11x0 Implementations +# + +# +# TI OMAP Implementations +# + +# +# OMAP Core Type +# + +# +# OMAP Board Type +# + +# +# OMAP Feature Selections +# + +# +# S3C2410 Implementations +# + +# +# LH7A40X Implementations +# +CONFIG_DMABOUNCE=y + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_XSCALE=y +CONFIG_CPU_32v5=y +CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_TLB_V4WBI=y +CONFIG_CPU_MINICACHE=y + +# +# Processor Features +# +# CONFIG_ARM_THUMB is not set +CONFIG_CPU_BIG_ENDIAN=y +CONFIG_XSCALE_PMU=y + +# +# General setup +# +CONFIG_PCI=y +# CONFIG_ZBOOT_ROM is not set +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_PCI_LEGACY_PROC=y +CONFIG_PCI_NAMES=y + +# +# At least one math emulation must be selected +# +CONFIG_FPE_NWFPE=y +# CONFIG_FPE_NWFPE_XP is not set +# CONFIG_FPE_FASTFPE is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Generic Driver Options +# +# CONFIG_DEBUG_DRIVER is not set +CONFIG_PM=y +# CONFIG_PREEMPT is not set +CONFIG_APM=y +# CONFIG_ARTHUR is not set +CONFIG_CMDLINE="console=ttyS0,115200 ip=bootp root=/dev/nfs" +CONFIG_ALIGNMENT_TRAP=y + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_REDBOOT_PARTS=y +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# CONFIG_MTD_OBSOLETE_CHIPS is not set + +# +# Mapping drivers for chip access +# +CONFIG_MTD_COMPLEX_MAPPINGS=y +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_ARM_INTEGRATOR is not set +CONFIG_MTD_IXP4XX=y +# CONFIG_MTD_EDB7312 is not set +# CONFIG_MTD_PCI is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLKMTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +CONFIG_MTD_NAND=m +# CONFIG_MTD_NAND_VERIFY_WRITE is not set +CONFIG_MTD_NAND_IDS=m + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_CARMEL is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_BLK_DEV_INITRD=y + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=m +CONFIG_PACKET_MMAP=y +CONFIG_NETLINK_DEV=m +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_FWMARK=y +CONFIG_IP_ROUTE_NAT=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_TOS=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set + +# +# IP: Virtual Server Configuration +# +CONFIG_IP_VS=m +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +# CONFIG_IP_VS_PROTO_TCP is not set +# CONFIG_IP_VS_PROTO_UDP is not set +# CONFIG_IP_VS_PROTO_ESP is not set +# CONFIG_IP_VS_PROTO_AH is not set + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +# CONFIG_IP_VS_SED is not set +# CONFIG_IP_VS_NQ is not set + +# +# IPVS application helper +# +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set +CONFIG_BRIDGE_NETFILTER=y + +# +# IP: Netfilter Configuration +# +CONFIG_IP_NF_CONNTRACK=m +CONFIG_IP_NF_FTP=m +CONFIG_IP_NF_IRC=m +# CONFIG_IP_NF_TFTP is not set +# CONFIG_IP_NF_AMANDA is not set +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_LIMIT=m +# CONFIG_IP_NF_MATCH_IPRANGE is not set +CONFIG_IP_NF_MATCH_MAC=m +# CONFIG_IP_NF_MATCH_PKTTYPE is not set +CONFIG_IP_NF_MATCH_MARK=m +CONFIG_IP_NF_MATCH_MULTIPORT=m +CONFIG_IP_NF_MATCH_TOS=m +# CONFIG_IP_NF_MATCH_RECENT is not set +# CONFIG_IP_NF_MATCH_ECN is not set +# CONFIG_IP_NF_MATCH_DSCP is not set +CONFIG_IP_NF_MATCH_AH_ESP=m +CONFIG_IP_NF_MATCH_LENGTH=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_MATCH_TCPMSS=m +# CONFIG_IP_NF_MATCH_HELPER is not set +CONFIG_IP_NF_MATCH_STATE=m +# CONFIG_IP_NF_MATCH_CONNTRACK is not set +CONFIG_IP_NF_MATCH_OWNER=m +# CONFIG_IP_NF_MATCH_PHYSDEV is not set +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_REDIRECT=m +# CONFIG_IP_NF_TARGET_NETMAP is not set +# CONFIG_IP_NF_TARGET_SAME is not set +CONFIG_IP_NF_NAT_LOCAL=y +CONFIG_IP_NF_NAT_SNMP_BASIC=m +CONFIG_IP_NF_NAT_IRC=m +CONFIG_IP_NF_NAT_FTP=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_TOS=m +# CONFIG_IP_NF_TARGET_ECN is not set +# CONFIG_IP_NF_TARGET_DSCP is not set +CONFIG_IP_NF_TARGET_MARK=m +# CONFIG_IP_NF_TARGET_CLASSIFY is not set +CONFIG_IP_NF_TARGET_LOG=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_TARGET_TCPMSS=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +# CONFIG_IP_NF_ARP_MANGLE is not set +CONFIG_IP_NF_COMPAT_IPCHAINS=m +CONFIG_IP_NF_COMPAT_IPFWADM=m +# CONFIG_IP_NF_RAW is not set + +# +# Bridge: Netfilter Configuration +# +# CONFIG_BRIDGE_NF_EBTABLES is not set +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +CONFIG_ATM=y +CONFIG_ATM_CLIP=y +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +# CONFIG_DECNET is not set +CONFIG_LLC=m +# CONFIG_LLC2 is not set +CONFIG_IPX=m +# CONFIG_IPX_INTERN is not set +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=y +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_X25=m +CONFIG_LAPB=m +# CONFIG_NET_DIVERT is not set +CONFIG_ECONET=m +CONFIG_ECONET_AUNUDP=y +CONFIG_ECONET_NATIVE=y +CONFIG_WAN_ROUTER=m +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +# CONFIG_NET_SCH_HFSC is not set +CONFIG_NET_SCH_CSZ=m +# CONFIG_NET_SCH_ATM is not set +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +# CONFIG_NET_SCH_DELAY is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_QOS=y +CONFIG_NET_ESTIMATOR=y +CONFIG_NET_CLS=y +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_ROUTE=y +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_POLICE=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +CONFIG_DUMMY=y +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_ETHERTAP is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set + +# +# Tulip family network device support +# +# CONFIG_NET_TULIP is not set +# CONFIG_HP100 is not set +CONFIG_NET_PCI=y +# CONFIG_PCNET32 is not set +# CONFIG_AMD8111_ETH is not set +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_B44 is not set +# CONFIG_FORCEDETH is not set +# CONFIG_DGRS is not set +CONFIG_EEPRO100=y +# CONFIG_EEPRO100_PIO is not set +# CONFIG_E100 is not set +# CONFIG_FEALNX is not set +# CONFIG_NATSEMI is not set +# CONFIG_NE2K_PCI is not set +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_SIS900 is not set +# CONFIG_EPIC100 is not set +# CONFIG_SUNDANCE is not set +# CONFIG_TLAN is not set +# CONFIG_VIA_RHINE is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +CONFIG_NET_RADIO=y + +# +# Obsolete Wireless cards support (pre-802.11) +# +# CONFIG_STRIP is not set + +# +# Wireless 802.11b ISA/PCI cards support +# +# CONFIG_AIRO is not set +CONFIG_HERMES=y +# CONFIG_PLX_HERMES is not set +# CONFIG_TMD_HERMES is not set +CONFIG_PCI_HERMES=y +# CONFIG_ATMEL is not set + +# +# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support +# +CONFIG_NET_WIRELESS=y + +# +# Wan interfaces +# +CONFIG_WAN=y +# CONFIG_DSCC4 is not set +# CONFIG_LANMEDIA is not set +# CONFIG_SYNCLINK_SYNCPPP is not set +CONFIG_HDLC=m +CONFIG_HDLC_RAW=y +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=y +CONFIG_HDLC_FR=y +CONFIG_HDLC_PPP=y +CONFIG_HDLC_X25=y +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300 is not set +# CONFIG_FARSYNC is not set +CONFIG_DLCI=m +CONFIG_DLCI_COUNT=24 +CONFIG_DLCI_MAX=8 +CONFIG_WAN_ROUTER_DRIVERS=y +# CONFIG_CYCLADES_SYNC is not set +# CONFIG_LAPBETHER is not set +# CONFIG_X25_ASY is not set + +# +# ATM drivers +# +CONFIG_ATM_TCP=m +# CONFIG_ATM_LANAI is not set +# CONFIG_ATM_ENI is not set +# CONFIG_ATM_FIRESTREAM is not set +# CONFIG_ATM_ZATM is not set +# CONFIG_ATM_NICSTAR is not set +# CONFIG_ATM_IDT77252 is not set +# CONFIG_ATM_AMBASSADOR is not set +# CONFIG_ATM_HORIZON is not set +# CONFIG_ATM_IA is not set +# CONFIG_ATM_FORE200E_MAYBE is not set +# CONFIG_ATM_HE is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_RCPCI is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ATA/ATAPI/MFM/RLL support +# +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +CONFIG_BLK_DEV_IDEDISK=y +# CONFIG_IDEDISK_MULTI_MODE is not set +# CONFIG_IDEDISK_STROKE is not set +# CONFIG_BLK_DEV_IDECD is not set +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDEFLOPPY is not set +# CONFIG_IDE_TASK_IOCTL is not set +# CONFIG_IDE_TASKFILE_IO is not set + +# +# IDE chipset support/bugfixes +# +CONFIG_IDE_GENERIC=y +CONFIG_BLK_DEV_IDEPCI=y +# CONFIG_IDEPCI_SHARE_IRQ is not set +# CONFIG_BLK_DEV_OFFBOARD is not set +# CONFIG_BLK_DEV_GENERIC is not set +# CONFIG_BLK_DEV_OPTI621 is not set +# CONFIG_BLK_DEV_SL82C105 is not set +CONFIG_BLK_DEV_IDEDMA_PCI=y +# CONFIG_BLK_DEV_IDEDMA_FORCED is not set +# CONFIG_IDEDMA_PCI_AUTO is not set +CONFIG_BLK_DEV_ADMA=y +# CONFIG_BLK_DEV_AEC62XX is not set +# CONFIG_BLK_DEV_ALI15X3 is not set +# CONFIG_BLK_DEV_AMD74XX is not set +CONFIG_BLK_DEV_CMD64X=y +# CONFIG_BLK_DEV_TRIFLEX is not set +# CONFIG_BLK_DEV_CY82C693 is not set +# CONFIG_BLK_DEV_CS5520 is not set +# CONFIG_BLK_DEV_CS5530 is not set +# CONFIG_BLK_DEV_HPT34X is not set +CONFIG_BLK_DEV_HPT366=y +# CONFIG_BLK_DEV_SC1200 is not set +# CONFIG_BLK_DEV_PIIX is not set +# CONFIG_BLK_DEV_NS87415 is not set +# CONFIG_BLK_DEV_PDC202XX_OLD is not set +CONFIG_BLK_DEV_PDC202XX_NEW=y +# CONFIG_PDC202XX_FORCE is not set +# CONFIG_BLK_DEV_SVWKS is not set +# CONFIG_BLK_DEV_SIIMAGE is not set +# CONFIG_BLK_DEV_SLC90E66 is not set +# CONFIG_BLK_DEV_TRM290 is not set +# CONFIG_BLK_DEV_VIA82CXXX is not set +CONFIG_BLK_DEV_IDEDMA=y +# CONFIG_IDEDMA_IVB is not set +# CONFIG_IDEDMA_AUTO is not set +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Fusion MPT device support +# +# CONFIG_FUSION is not set + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +# CONFIG_SERIO is not set +# CONFIG_SERIO_I8042 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=2 +# CONFIG_SERIAL_8250_EXTENDED is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +CONFIG_IXP4XX_WATCHDOG=y + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_I2C_CHARDEV=y + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=y +# CONFIG_I2C_ALGOPCF is not set + +# +# I2C Hardware Bus support +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_I810 is not set +# CONFIG_I2C_ISA is not set +CONFIG_I2C_IXP4XX=y +# CONFIG_I2C_NFORCE2 is not set +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_PIIX4 is not set +# CONFIG_I2C_PROSAVAGE is not set +# CONFIG_I2C_SAVAGE4 is not set +# CONFIG_SCx200_ACB is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_VOODOO3 is not set + +# +# Hardware Sensors Chip support +# +CONFIG_I2C_SENSOR=y +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ASB100 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_FSCHER is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83627HF is not set + +# +# Other I2C Chip support +# +CONFIG_SENSORS_EEPROM=y +# CONFIG_SENSORS_PCF8574 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +# CONFIG_EXT2_FS_SECURITY is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +CONFIG_EXT3_FS_POSIX_ACL=y +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +# CONFIG_JFFS2_FS_NAND is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_INTERMEZZO_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Misc devices +# + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# Kernel hacking +# +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_USER is not set +# CONFIG_DEBUG_INFO is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SLAB is not set +CONFIG_MAGIC_SYSRQ=y +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_WAITQ is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_ERRORS=y +CONFIG_DEBUG_LL=y +# CONFIG_DEBUG_ICEDCC is not set +# CONFIG_DEBUG_BDI2000_XSCALE is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y diff --git a/arch/arm/configs/mainstone_defconfig b/arch/arm/configs/mainstone_defconfig new file mode 100644 index 000000000..925b2777f --- /dev/null +++ b/arch/arm/configs/mainstone_defconfig @@ -0,0 +1,743 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_ARM=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_HOTPLUG=y +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y + +# +# Loadable module support +# +CONFIG_MODULES=y +# CONFIG_MODULE_UNLOAD is not set +CONFIG_OBSOLETE_MODPARM=y +# CONFIG_MODVERSIONS is not set +# CONFIG_KMOD is not set + +# +# System Type +# +# CONFIG_ARCH_ADIFCC is not set +# CONFIG_ARCH_CLPS7500 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CO285 is not set +CONFIG_ARCH_PXA=y +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_CAMELOT is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP3XX is not set +# CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_OMAP is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_VERSATILE_PB is not set + +# +# CLPS711X/EP721X Implementations +# + +# +# Epxa10db +# + +# +# Footbridge Implementations +# + +# +# IOP3xx Implementation Options +# +# CONFIG_ARCH_IOP310 is not set +# CONFIG_ARCH_IOP321 is not set + +# +# IOP3xx Chipset Features +# + +# +# Intel PXA2xx Implementations +# +# CONFIG_ARCH_LUBBOCK is not set +CONFIG_MACH_MAINSTONE=y +# CONFIG_ARCH_PXA_IDP is not set +CONFIG_PXA27x=y +CONFIG_IWMMXT=y + +# +# SA11x0 Implementations +# + +# +# TI OMAP Implementations +# + +# +# OMAP Core Type +# + +# +# OMAP Board Type +# + +# +# OMAP Feature Selections +# + +# +# S3C2410 Implementations +# + +# +# LH7A40X Implementations +# + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_XSCALE=y +CONFIG_CPU_32v5=y +CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_TLB_V4WBI=y +CONFIG_CPU_MINICACHE=y + +# +# Processor Features +# +# CONFIG_ARM_THUMB is not set +CONFIG_XSCALE_PMU=y + +# +# General setup +# +# CONFIG_ZBOOT_ROM is not set +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 + +# +# PCMCIA/CardBus support +# +CONFIG_PCMCIA=y +# CONFIG_PCMCIA_DEBUG is not set +# CONFIG_TCIC is not set +CONFIG_PCMCIA_PXA2XX=y + +# +# At least one math emulation must be selected +# +CONFIG_FPE_NWFPE=y +# CONFIG_FPE_NWFPE_XP is not set +# CONFIG_FPE_FASTFPE is not set +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Generic Driver Options +# +# CONFIG_FW_LOADER is not set +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_PM is not set +# CONFIG_PREEMPT is not set +# CONFIG_ARTHUR is not set +CONFIG_CMDLINE="root=/dev/nfs ip=bootp console=ttyS0,115200 mem=64M" +CONFIG_LEDS=y +CONFIG_LEDS_TIMER=y +CONFIG_LEDS_CPU=y +CONFIG_ALIGNMENT_TRAP=y + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_CONCAT is not set +CONFIG_MTD_REDBOOT_PARTS=y +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_NOSWAP=y +# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set +# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set +CONFIG_MTD_CFI_GEOMETRY=y +# CONFIG_MTD_CFI_B1 is not set +# CONFIG_MTD_CFI_B2 is not set +CONFIG_MTD_CFI_B4=y +# CONFIG_MTD_CFI_B8 is not set +# CONFIG_MTD_CFI_I1 is not set +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# CONFIG_MTD_OBSOLETE_CHIPS is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_ARM_INTEGRATOR is not set +# CONFIG_MTD_EDB7312 is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLKMTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +# CONFIG_MTD_NAND is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_RAM is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_PACKET is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_SMC91X=y + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# PCMCIA network device support +# +# CONFIG_NET_PCMCIA is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ATA/ATAPI/MFM/RLL support +# +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +CONFIG_BLK_DEV_IDEDISK=y +# CONFIG_IDEDISK_MULTI_MODE is not set +# CONFIG_IDEDISK_STROKE is not set +CONFIG_BLK_DEV_IDECS=y +# CONFIG_BLK_DEV_IDECD is not set +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDEFLOPPY is not set +# CONFIG_IDE_TASK_IOCTL is not set +# CONFIG_IDE_TASKFILE_IO is not set + +# +# IDE chipset support/bugfixes +# +# CONFIG_IDE_GENERIC is not set +# CONFIG_BLK_DEV_IDEDMA is not set +# CONFIG_IDEDMA_AUTO is not set +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_PXA=y +CONFIG_SERIAL_PXA_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set + +# +# PCMCIA character devices +# +# CONFIG_SYNCLINK_CS is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +# CONFIG_JFFS2_FS_NAND is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_INTERMEZZO_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set + +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ISO8859_1=y +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Misc devices +# + +# +# USB support +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# Kernel hacking +# +CONFIG_FRAME_POINTER=y +CONFIG_DEBUG_USER=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SLAB is not set +CONFIG_MAGIC_SYSRQ=y +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_WAITQ is not set +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_ERRORS=y +CONFIG_DEBUG_LL=y +# CONFIG_DEBUG_ICEDCC is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y diff --git a/arch/arm/configs/smdk2410_defconfig b/arch/arm/configs/smdk2410_defconfig new file mode 100644 index 000000000..a88724f26 --- /dev/null +++ b/arch/arm/configs/smdk2410_defconfig @@ -0,0 +1,667 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_ARM=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# System Type +# +# CONFIG_ARCH_ADIFCC is not set +# CONFIG_ARCH_CLPS7500 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_CO285 is not set +# CONFIG_ARCH_PXA is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_CAMELOT is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_IOP3XX is not set +# CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_SHARK is not set +CONFIG_ARCH_S3C2410=y +# CONFIG_ARCH_OMAP is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_VERSATILE_PB is not set + +# +# CLPS711X/EP721X Implementations +# + +# +# Epxa10db +# + +# +# Footbridge Implementations +# + +# +# IOP3xx Implementation Options +# +# CONFIG_ARCH_IOP310 is not set +# CONFIG_ARCH_IOP321 is not set + +# +# IOP3xx Chipset Features +# + +# +# Intel PXA250/210 Implementations +# + +# +# SA11x0 Implementations +# + +# +# TI OMAP Implementations +# + +# +# OMAP Core Type +# + +# +# OMAP Board Type +# + +# +# OMAP Feature Selections +# + +# +# S3C2410 Implementations +# +# CONFIG_ARCH_BAST is not set +# CONFIG_ARCH_H1940 is not set +CONFIG_ARCH_SMDK2410=y +# CONFIG_MACH_VR1000 is not set + +# +# LH7A40X Implementations +# + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_ARM920T=y +CONFIG_CPU_32v4=y +CONFIG_CPU_ABRT_EV4T=y +CONFIG_CPU_CACHE_V4WT=y +CONFIG_CPU_COPY_V4WB=y +CONFIG_CPU_TLB_V4WBI=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_DCACHE_WRITETHROUGH is not set + +# +# General setup +# +# CONFIG_ZBOOT_ROM is not set +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZBOOT_ROM_BSS=0 + +# +# At least one math emulation must be selected +# +# CONFIG_FPE_NWFPE is not set +# CONFIG_FPE_FASTFPE is not set +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_AOUT=y +# CONFIG_BINFMT_MISC is not set + +# +# Generic Driver Options +# +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_PM is not set +# CONFIG_PREEMPT is not set +# CONFIG_ARTHUR is not set +CONFIG_CMDLINE="root=1f04 mem=32M" +CONFIG_ALIGNMENT_TRAP=y + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Memory Technology Devices (MTD) +# +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +# CONFIG_MTD_PARTITIONS is not set +# CONFIG_MTD_CONCAT is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=y +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=y +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_CFI_INTELEXT=y +# CONFIG_MTD_CFI_AMDSTD is not set +# CONFIG_MTD_CFI_STAA is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# CONFIG_MTD_OBSOLETE_CHIPS is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_ARM_INTEGRATOR is not set +# CONFIG_MTD_EDB7312 is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLKMTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set + +# +# NAND Flash Device Drivers +# +# CONFIG_MTD_NAND is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_PACKET is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_S3C2410=y +CONFIG_SERIAL_S3C2410_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +CONFIG_ROMFS_FS=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_INTERMEZZO_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Graphics support +# +CONFIG_FB=y +CONFIG_FB_VIRTUAL=y + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_PCI_CONSOLE=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y + +# +# Logo configuration +# +# CONFIG_LOGO is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# Misc devices +# + +# +# USB support +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# Kernel hacking +# +CONFIG_FRAME_POINTER=y +CONFIG_DEBUG_USER=y +# CONFIG_DEBUG_INFO is not set +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SLAB is not set +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_WAITQ is not set +# CONFIG_DEBUG_BUGVERBOSE is not set +# CONFIG_DEBUG_ERRORS is not set +CONFIG_DEBUG_LL=y +# CONFIG_DEBUG_ICEDCC is not set +CONFIG_DEBUG_LL_PRINTK=y +CONFIG_DEBUG_S3C2410_PORT=y +CONFIG_DEBUG_S3C2410_UART=0 + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y +CONFIG_LIBCRC32C=y diff --git a/arch/arm/mach-footbridge/time.c b/arch/arm/mach-footbridge/time.c new file mode 100644 index 000000000..e9f5708b4 --- /dev/null +++ b/arch/arm/mach-footbridge/time.c @@ -0,0 +1,296 @@ +/* + * linux/include/asm-arm/arch-ebsa285/time.h + * + * Copyright (C) 1998 Russell King. + * Copyright (C) 1998 Phil Blundell + * + * CATS has a real-time clock, though the evaluation board doesn't. + * + * Changelog: + * 21-Mar-1998 RMK Created + * 27-Aug-1998 PJB CATS support + * 28-Dec-1998 APH Made leds optional + * 20-Jan-1999 RMK Started merge of EBSA285, CATS and NetWinder + * 16-Mar-1999 RMK More support for EBSA285-like machines with RTCs in + */ + +#define RTC_PORT(x) (rtc_base+(x)) +#define RTC_ALWAYS_BCD 0 + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include + +static int rtc_base; + +#define mSEC_10_from_14 ((14318180 + 100) / 200) + +static unsigned long isa_gettimeoffset(void) +{ + int count; + + static int count_p = (mSEC_10_from_14/6); /* for the first call after boot */ + static unsigned long jiffies_p = 0; + + /* + * cache volatile jiffies temporarily; we have IRQs turned off. + */ + unsigned long jiffies_t; + + /* timer count may underflow right here */ + outb_p(0x00, 0x43); /* latch the count ASAP */ + + count = inb_p(0x40); /* read the latched count */ + + /* + * We do this guaranteed double memory access instead of a _p + * postfix in the previous port access. Wheee, hackady hack + */ + jiffies_t = jiffies; + + count |= inb_p(0x40) << 8; + + /* Detect timer underflows. If we haven't had a timer tick since + the last time we were called, and time is apparently going + backwards, the counter must have wrapped during this routine. */ + if ((jiffies_t == jiffies_p) && (count > count_p)) + count -= (mSEC_10_from_14/6); + else + jiffies_p = jiffies_t; + + count_p = count; + + count = (((mSEC_10_from_14/6)-1) - count) * (tick_nsec / 1000); + count = (count + (mSEC_10_from_14/6)/2) / (mSEC_10_from_14/6); + + return count; +} + +static irqreturn_t +isa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + timer_tick(regs); + + return IRQ_HANDLED; +} + +static unsigned long __init get_isa_cmos_time(void) +{ + unsigned int year, mon, day, hour, min, sec; + int i; + + // check to see if the RTC makes sense..... + if ((CMOS_READ(RTC_VALID) & RTC_VRT) == 0) + return mktime(1970, 1, 1, 0, 0, 0); + + /* The Linux interpretation of the CMOS clock register contents: + * When the Update-In-Progress (UIP) flag goes from 1 to 0, the + * RTC registers show the second which has precisely just started. + * Let's hope other operating systems interpret the RTC the same way. + */ + /* read RTC exactly on falling edge of update flag */ + for (i = 0 ; i < 1000000 ; i++) /* may take up to 1 second... */ + if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) + break; + + for (i = 0 ; i < 1000000 ; i++) /* must try at least 2.228 ms */ + if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)) + break; + + do { /* Isn't this overkill ? UIP above should guarantee consistency */ + sec = CMOS_READ(RTC_SECONDS); + min = CMOS_READ(RTC_MINUTES); + hour = CMOS_READ(RTC_HOURS); + day = CMOS_READ(RTC_DAY_OF_MONTH); + mon = CMOS_READ(RTC_MONTH); + year = CMOS_READ(RTC_YEAR); + } while (sec != CMOS_READ(RTC_SECONDS)); + + if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BCD_TO_BIN(sec); + BCD_TO_BIN(min); + BCD_TO_BIN(hour); + BCD_TO_BIN(day); + BCD_TO_BIN(mon); + BCD_TO_BIN(year); + } + if ((year += 1900) < 1970) + year += 100; + return mktime(year, mon, day, hour, min, sec); +} + +static int +set_isa_cmos_time(void) +{ + int retval = 0; + int real_seconds, real_minutes, cmos_minutes; + unsigned char save_control, save_freq_select; + unsigned long nowtime = xtime.tv_sec; + + save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */ + CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); + + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */ + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + + cmos_minutes = CMOS_READ(RTC_MINUTES); + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) + BCD_TO_BIN(cmos_minutes); + + /* + * since we're only adjusting minutes and seconds, + * don't interfere with hour overflow. This avoids + * messing with unknown time zones but requires your + * RTC not to be off by more than 15 minutes + */ + real_seconds = nowtime % 60; + real_minutes = nowtime / 60; + if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) + real_minutes += 30; /* correct for half hour time zone */ + real_minutes %= 60; + + if (abs(real_minutes - cmos_minutes) < 30) { + if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) { + BIN_TO_BCD(real_seconds); + BIN_TO_BCD(real_minutes); + } + CMOS_WRITE(real_seconds,RTC_SECONDS); + CMOS_WRITE(real_minutes,RTC_MINUTES); + } else + retval = -1; + + /* The following flags have to be released exactly in this order, + * otherwise the DS12887 (popular MC146818A clone with integrated + * battery and quartz) will not reset the oscillator and will not + * update precisely 500 ms later. You won't find this mentioned in + * the Dallas Semiconductor data sheets, but who believes data + * sheets anyway ... -- Markus Kuhn + */ + CMOS_WRITE(save_control, RTC_CONTROL); + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + + return retval; +} + + +static unsigned long timer1_latch; + +static unsigned long timer1_gettimeoffset (void) +{ + unsigned long value = timer1_latch - *CSR_TIMER1_VALUE; + + return ((tick_nsec / 1000) * value) / timer1_latch; +} + +static irqreturn_t +timer1_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + *CSR_TIMER1_CLR = 0; + + timer_tick(regs); + + return IRQ_HANDLED; +} + +static struct irqaction footbridge_timer_irq = { + .flags = SA_INTERRUPT +}; + +/* + * Set up timer interrupt. + */ +void __init footbridge_init_time(void) +{ + if (machine_is_co285() || + machine_is_personal_server()) + /* + * Add-in 21285s shouldn't access the RTC + */ + rtc_base = 0; + else + rtc_base = 0x70; + + if (rtc_base) { + int reg_d, reg_b; + + /* + * Probe for the RTC. + */ + reg_d = CMOS_READ(RTC_REG_D); + + /* + * make sure the divider is set + */ + CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_REG_A); + + /* + * Set control reg B + * (24 hour mode, update enabled) + */ + reg_b = CMOS_READ(RTC_REG_B) & 0x7f; + reg_b |= 2; + CMOS_WRITE(reg_b, RTC_REG_B); + + if ((CMOS_READ(RTC_REG_A) & 0x7f) == RTC_REF_CLCK_32KHZ && + CMOS_READ(RTC_REG_B) == reg_b) { + struct timespec tv; + + /* + * We have a RTC. Check the battery + */ + if ((reg_d & 0x80) == 0) + printk(KERN_WARNING "RTC: *** warning: CMOS battery bad\n"); + + tv.tv_nsec = 0; + tv.tv_sec = get_isa_cmos_time(); + do_settimeofday(&tv); + set_rtc = set_isa_cmos_time; + } else + rtc_base = 0; + } + + if (machine_is_ebsa285() || + machine_is_co285() || + machine_is_personal_server()) { + gettimeoffset = timer1_gettimeoffset; + + timer1_latch = (mem_fclk_21285 + 8 * HZ) / (16 * HZ); + + *CSR_TIMER1_CLR = 0; + *CSR_TIMER1_LOAD = timer1_latch; + *CSR_TIMER1_CNTL = TIMER_CNTL_ENABLE | TIMER_CNTL_AUTORELOAD | TIMER_CNTL_DIV16; + + footbridge_timer_irq.name = "Timer1 Timer Tick"; + footbridge_timer_irq.handler = timer1_interrupt; + + setup_irq(IRQ_TIMER1, &footbridge_timer_irq); + + } else { + /* enable PIT timer */ + /* set for periodic (4) and LSB/MSB write (0x30) */ + outb(0x34, 0x43); + outb((mSEC_10_from_14/6) & 0xFF, 0x40); + outb((mSEC_10_from_14/6) >> 8, 0x40); + + gettimeoffset = isa_gettimeoffset; + + footbridge_timer_irq.name = "ISA Timer Tick"; + footbridge_timer_irq.handler = isa_timer_interrupt; + + setup_irq(IRQ_ISA_TIMER, &footbridge_timer_irq); + } +} diff --git a/arch/arm/mach-integrator/clock.c b/arch/arm/mach-integrator/clock.c new file mode 100644 index 000000000..6af3715ad --- /dev/null +++ b/arch/arm/mach-integrator/clock.c @@ -0,0 +1,138 @@ +/* + * linux/arch/arm/mach-integrator/clock.c + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "clock.h" + +static LIST_HEAD(clocks); +static DECLARE_MUTEX(clocks_sem); + +struct clk *clk_get(struct device *dev, const char *id) +{ + struct clk *p, *clk = ERR_PTR(-ENOENT); + + down(&clocks_sem); + list_for_each_entry(p, &clocks, node) { + if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { + clk = p; + break; + } + } + up(&clocks_sem); + + return clk; +} +EXPORT_SYMBOL(clk_get); + +void clk_put(struct clk *clk) +{ + module_put(clk->owner); +} +EXPORT_SYMBOL(clk_put); + +int clk_enable(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_disable); + +int clk_use(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_use); + +void clk_unuse(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_unuse); + +unsigned long clk_get_rate(struct clk *clk) +{ + return clk->rate; +} +EXPORT_SYMBOL(clk_get_rate); + +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + return rate; +} +EXPORT_SYMBOL(clk_round_rate); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + int ret = -EIO; + if (clk->setvco) { + struct icst525_vco vco; + + vco = icst525_khz_to_vco(clk->params, rate); + clk->rate = icst525_khz(clk->params, vco); + + printk("Clock %s: setting VCO reg params: S=%d R=%d V=%d\n", + clk->name, vco.s, vco.r, vco.v); + + clk->setvco(clk, vco); + ret = 0; + } + return 0; +} +EXPORT_SYMBOL(clk_set_rate); + +/* + * These are fixed clocks. + */ +static struct clk kmi_clk = { + .name = "KMIREFCLK", + .rate = 24000000, +}; + +static struct clk uart_clk = { + .name = "UARTCLK", + .rate = 14745600, +}; + +int clk_register(struct clk *clk) +{ + down(&clocks_sem); + list_add(&clk->node, &clocks); + up(&clocks_sem); + return 0; +} +EXPORT_SYMBOL(clk_register); + +void clk_unregister(struct clk *clk) +{ + down(&clocks_sem); + list_del(&clk->node); + up(&clocks_sem); +} +EXPORT_SYMBOL(clk_unregister); + +static int __init clk_init(void) +{ + clk_register(&kmi_clk); + clk_register(&uart_clk); + return 0; +} +arch_initcall(clk_init); diff --git a/arch/arm/mach-integrator/clock.h b/arch/arm/mach-integrator/clock.h new file mode 100644 index 000000000..09e6328ce --- /dev/null +++ b/arch/arm/mach-integrator/clock.h @@ -0,0 +1,25 @@ +/* + * linux/arch/arm/mach-integrator/clock.h + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +struct module; +struct icst525_params; + +struct clk { + struct list_head node; + unsigned long rate; + struct module *owner; + const char *name; + const struct icst525_params *params; + void *data; + void (*setvco)(struct clk *, struct icst525_vco vco); +}; + +int clk_register(struct clk *clk); +void clk_unregister(struct clk *clk); diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile new file mode 100644 index 000000000..f656397f8 --- /dev/null +++ b/arch/arm/mach-ixp4xx/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the linux kernel. +# + +obj-y += common.o common-pci.o + +obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o ixdp425-setup.o +obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o coyote-setup.o +obj-$(CONFIG_ARCH_PRPMC1100) += prpmc1100-pci.o prpmc1100-setup.o + diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c new file mode 100644 index 000000000..c20dc3226 --- /dev/null +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -0,0 +1,543 @@ +/* + * arch/arm/mach-ixp4xx/common-pci.c + * + * IXP4XX PCI routines for all platforms + * + * Maintainer: Deepak Saxena + * + * Copyright (C) 2002 Intel Corporation. + * Copyright (C) 2003 Greg Ungerer + * Copyright (C) 2003-2004 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +/* + * IXP4xx PCI read function is dependent on whether we are + * running A0 or B0 (AppleGate) silicon. + */ +int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data); + +/* + * Base address for PCI regsiter region + */ +unsigned long ixp4xx_pci_reg_base = 0; + +/* + * PCI cfg an I/O routines are done by programming a + * command/byte enable register, and then read/writing + * the data from a data regsiter. We need to ensure + * these transactions are atomic or we will end up + * with corrupt data on the bus or in a driver. + */ +static spinlock_t ixp4xx_pci_lock = SPIN_LOCK_UNLOCKED; + +/* + * Read from PCI config space + */ +static void crp_read(u32 ad_cbe, u32 *data) +{ + unsigned long flags; + spin_lock_irqsave(&ixp4xx_pci_lock, flags); + *PCI_CRP_AD_CBE = ad_cbe; + *data = *PCI_CRP_RDATA; + spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); +} + +/* + * Write to PCI config space + */ +static void crp_write(u32 ad_cbe, u32 data) +{ + unsigned long flags; + spin_lock_irqsave(&ixp4xx_pci_lock, flags); + *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe; + *PCI_CRP_WDATA = data; + spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); +} + +static inline int check_master_abort(void) +{ + /* check Master Abort bit after access */ + unsigned long isr = *PCI_ISR; + + if (isr & PCI_ISR_PFE) { + /* make sure the Master Abort bit is reset */ + *PCI_ISR = PCI_ISR_PFE; + pr_debug("%s failed\n", __FUNCTION__); + return 1; + } + + return 0; +} + +int ixp4xx_pci_read_errata(u32 addr, u32 cmd, u32* data) +{ + unsigned long flags; + int retval = 0; + int i; + + spin_lock_irqsave(&ixp4xx_pci_lock, flags); + + *PCI_NP_AD = addr; + + /* + * PCI workaround - only works if NP PCI space reads have + * no side effects!!! Read 8 times. last one will be good. + */ + for (i = 0; i < 8; i++) { + *PCI_NP_CBE = cmd; + *data = *PCI_NP_RDATA; + *data = *PCI_NP_RDATA; + } + + if(check_master_abort()) + retval = 1; + + spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + return retval; +} + +int ixp4xx_pci_read_no_errata(u32 addr, u32 cmd, u32* data) +{ + unsigned long flags; + int retval = 0; + + spin_lock_irqsave(&ixp4xx_pci_lock, flags); + + *PCI_NP_AD = addr; + + /* set up and execute the read */ + *PCI_NP_CBE = cmd; + + /* the result of the read is now in NP_RDATA */ + *data = *PCI_NP_RDATA; + + if(check_master_abort()) + retval = 1; + + spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + return retval; +} + +int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data) +{ + unsigned long flags; + int retval = 0; + + spin_lock_irqsave(&ixp4xx_pci_lock, flags); + + *PCI_NP_AD = addr; + + /* set up the write */ + *PCI_NP_CBE = cmd; + + /* execute the write by writing to NP_WDATA */ + *PCI_NP_WDATA = data; + + if(check_master_abort()) + retval = 1; + + spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); + return retval; +} + +static u32 ixp4xx_config_addr(u8 bus_num, u16 devfn, int where) +{ + u32 addr; + if (!bus_num) { + /* type 0 */ + addr = BIT(32-PCI_SLOT(devfn)) | ((PCI_FUNC(devfn)) << 8) | + (where & ~3); + } else { + /* type 1 */ + addr = (bus_num << 16) | ((PCI_SLOT(devfn)) << 11) | + ((PCI_FUNC(devfn)) << 8) | (where & ~3) | 1; + } + return addr; +} + +/* + * Mask table, bits to mask for quantity of size 1, 2 or 4 bytes. + * 0 and 3 are not valid indexes... + */ +static u32 bytemask[] = { + /*0*/ 0, + /*1*/ 0xff, + /*2*/ 0xffff, + /*3*/ 0, + /*4*/ 0xffffffff, +}; + +static u32 local_byte_lane_enable_bits(u32 n, int size) +{ + if (size == 1) + return (0xf & ~BIT(n)) << CRP_AD_CBE_BESL; + if (size == 2) + return (0xf & ~(BIT(n) | BIT(n+1))) << CRP_AD_CBE_BESL; + if (size == 4) + return 0; + return 0xffffffff; +} + +static int local_read_config(int where, int size, u32 *value) +{ + u32 n, data; + pr_debug("local_read_config from %d size %d\n", where, size); + n = where % 4; + crp_read(where & ~3, &data); + *value = (data >> (8*n)) & bytemask[size]; + pr_debug("local_read_config read %#x\n", *value); + return PCIBIOS_SUCCESSFUL; +} + +static int local_write_config(int where, int size, u32 value) +{ + u32 n, byte_enables, data; + pr_debug("local_write_config %#x to %d size %d\n", value, where, size); + n = where % 4; + byte_enables = local_byte_lane_enable_bits(n, size); + if (byte_enables == 0xffffffff) + return PCIBIOS_BAD_REGISTER_NUMBER; + data = value << (8*n); + crp_write((where & ~3) | byte_enables, data); + return PCIBIOS_SUCCESSFUL; +} + +static u32 byte_lane_enable_bits(u32 n, int size) +{ + if (size == 1) + return (0xf & ~BIT(n)) << 4; + if (size == 2) + return (0xf & ~(BIT(n) | BIT(n+1))) << 4; + if (size == 4) + return 0; + return 0xffffffff; +} + +static int read_config(u8 bus_num, u16 devfn, int where, int size, u32 *value) +{ + u32 n, byte_enables, addr, data; + + pr_debug("read_config from %d size %d dev %d:%d:%d\n", where, size, + bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + *value = 0xffffffff; + n = where % 4; + byte_enables = byte_lane_enable_bits(n, size); + if (byte_enables == 0xffffffff) + return PCIBIOS_BAD_REGISTER_NUMBER; + + addr = ixp4xx_config_addr(bus_num, devfn, where); + if (ixp4xx_pci_read(addr, byte_enables | NP_CMD_CONFIGREAD, &data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + *value = (data >> (8*n)) & bytemask[size]; + pr_debug("read_config_byte read %#x\n", *value); + return PCIBIOS_SUCCESSFUL; +} + +static int write_config(u8 bus_num, u16 devfn, int where, int size, u32 value) +{ + u32 n, byte_enables, addr, data; + + pr_debug("write_config_byte %#x to %d size %d dev %d:%d:%d\n", value, where, + size, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn)); + + n = where % 4; + byte_enables = byte_lane_enable_bits(n, size); + if (byte_enables == 0xffffffff) + return PCIBIOS_BAD_REGISTER_NUMBER; + + addr = ixp4xx_config_addr(bus_num, devfn, where); + data = value << (8*n); + if (ixp4xx_pci_write(addr, byte_enables | NP_CMD_CONFIGWRITE, data)) + return PCIBIOS_DEVICE_NOT_FOUND; + + return PCIBIOS_SUCCESSFUL; +} + +/* + * Generalized PCI config access functions. + */ +static int ixp4xx_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + if (bus->number && !PCI_SLOT(devfn)) + return local_read_config(where, size, value); + return read_config(bus->number, devfn, where, size, value); +} + +static int ixp4xx_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + if (bus->number && !PCI_SLOT(devfn)) + return local_write_config(where, size, value); + return write_config(bus->number, devfn, where, size, value); +} + +struct pci_ops ixp4xx_ops = { + .read = ixp4xx_read_config, + .write = ixp4xx_write_config, +}; + + +/* + * PCI abort handler + */ +static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *regs) +{ + u32 isr, status; + + isr = *PCI_ISR; + local_read_config(PCI_STATUS, 2, &status); + pr_debug("PCI: abort_handler addr = %#lx, isr = %#x, " + "status = %#x\n", addr, isr, status); + + /* make sure the Master Abort bit is reset */ + *PCI_ISR = PCI_ISR_PFE; + status |= PCI_STATUS_REC_MASTER_ABORT; + local_write_config(PCI_STATUS, 2, status); + + /* + * If it was an imprecise abort, then we need to correct the + * return address to be _after_ the instruction. + */ + if (fsr & (1 << 10)) + regs->ARM_pc += 4; + + return 0; +} + + +/* + * Setup DMA mask to 64MB on PCI devices. Ignore all other devices. + */ +static int ixp4xx_pci_platform_notify(struct device *dev) +{ + if(dev->bus == &pci_bus_type) { + *dev->dma_mask = SZ_64M - 1; + dev->coherent_dma_mask = SZ_64M - 1; + dmabounce_register_dev(dev, 2048, 4096); + } + return 0; +} + +static int ixp4xx_pci_platform_notify_remove(struct device *dev) +{ + if(dev->bus == &pci_bus_type) { + dmabounce_unregister_dev(dev); + } + return 0; +} + +int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) +{ + return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M); +} + +void __init ixp4xx_pci_preinit(void) +{ + unsigned long processor_id; + + asm("mrc p15, 0, %0, cr0, cr0, 0;" : "=r"(processor_id) :); + + /* + * Determine which PCI read method to use + */ + if (!(processor_id & 0xf)) { + printk("PCI: IXP4xx A0 silicon detected - " + "PCI Non-Prefetch Workaround Enabled\n"); + ixp4xx_pci_read = ixp4xx_pci_read_errata; + } else + ixp4xx_pci_read = ixp4xx_pci_read_no_errata; + + + /* hook in our fault handler for PCI errors */ + hook_fault_code(16+6, abort_handler, SIGBUS, "imprecise external abort"); + + pr_debug("setup PCI-AHB(inbound) and AHB-PCI(outbound) address mappings\n"); + + /* + * We use identity AHB->PCI address translation + * in the 0x48000000 to 0x4bffffff address space + */ + *PCI_PCIMEMBASE = 0x48494A4B; + + /* + * We also use identity PCI->AHB address translation + * in 4 16MB BARs that begin at the physical memory start + */ + *PCI_AHBMEMBASE = (PHYS_OFFSET & 0xFF000000) + + ((PHYS_OFFSET & 0xFF000000) >> 8) + + ((PHYS_OFFSET & 0xFF000000) >> 16) + + ((PHYS_OFFSET & 0xFF000000) >> 24) + + 0x00010203; + + if (*PCI_CSR & PCI_CSR_HOST) { + printk("PCI: IXP4xx is host\n"); + + pr_debug("setup BARs in controller\n"); + + /* + * We configure the PCI inbound memory windows to be + * 1:1 mapped to SDRAM + */ + local_write_config(PCI_BASE_ADDRESS_0, 4, PHYS_OFFSET + 0x00000000); + local_write_config(PCI_BASE_ADDRESS_1, 4, PHYS_OFFSET + 0x01000000); + local_write_config(PCI_BASE_ADDRESS_2, 4, PHYS_OFFSET + 0x02000000); + local_write_config(PCI_BASE_ADDRESS_3, 4, PHYS_OFFSET + 0x03000000); + + /* + * Enable CSR window at 0xff000000. + */ + local_write_config(PCI_BASE_ADDRESS_4, 4, 0xff000008); + + /* + * Enable the IO window to be way up high, at 0xfffffc00 + */ + local_write_config(PCI_BASE_ADDRESS_5, 4, 0xfffffc01); + } else { + printk("PCI: IXP4xx is target - No bus scan performed\n"); + } + + printk("PCI: IXP4xx Using %s access for memory space\n", +#ifndef CONFIG_IXP4XX_INDIRECT_PCI + "direct" +#else + "indirect" +#endif + ); + + pr_debug("clear error bits in ISR\n"); + *PCI_ISR = PCI_ISR_PSE | PCI_ISR_PFE | PCI_ISR_PPE | PCI_ISR_AHBE; + + /* + * Set Initialize Complete in PCI Control Register: allow IXP4XX to + * respond to PCI configuration cycles. Specify that the AHB bus is + * operating in big endian mode. Set up byte lane swapping between + * little-endian PCI and the big-endian AHB bus + */ +#ifdef __ARMEB__ + *PCI_CSR = PCI_CSR_IC | PCI_CSR_ABE | PCI_CSR_PDS | PCI_CSR_ADS; +#else + *PCI_CSR = PCI_CSR_IC; +#endif + + pr_debug("DONE\n"); +} + +int ixp4xx_setup(int nr, struct pci_sys_data *sys) +{ + struct resource *res; + + if (nr >= 1) + return 0; + + res = kmalloc(sizeof(*res) * 2, GFP_KERNEL); + if (res == NULL) { + /* + * If we're out of memory this early, something is wrong, + * so we might as well catch it here. + */ + panic("PCI: unable to allocate resources?\n"); + } + memset(res, 0, sizeof(*res) * 2); + + local_write_config(PCI_COMMAND, 2, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY); + + res[0].name = "PCI I/O Space"; + res[0].start = 0x00001000; + res[0].end = 0xffff0000; + res[0].flags = IORESOURCE_IO; + + res[1].name = "PCI Memory Space"; + res[1].start = 0x48000000; +#ifndef CONFIG_IXP4XX_INDIRECT_PCI + res[1].end = 0x4bffffff; +#else + res[1].end = 0x4fffffff; +#endif + res[1].flags = IORESOURCE_MEM; + + request_resource(&ioport_resource, &res[0]); + request_resource(&iomem_resource, &res[1]); + + sys->resource[0] = &res[0]; + sys->resource[1] = &res[1]; + sys->resource[2] = NULL; + + platform_notify = ixp4xx_pci_platform_notify; + platform_notify_remove = ixp4xx_pci_platform_notify_remove; + + return 1; +} + +struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys) +{ + return pci_scan_bus(sys->busnr, &ixp4xx_ops, sys); +} + +/* + * We override these so we properly do dmabounce otherwise drivers + * are able to set the dma_mask to 0xffffffff and we can no longer + * trap bounces. :( + * + * We just return true on everyhing except for < 64MB in which case + * we will fail miseralby and die since we can't handle that case. + */ +int +pci_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + if (mask >= SZ_64M - 1 ) + return 0; + + return -EIO; +} + +int +pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask) +{ + if (mask >= SZ_64M - 1 ) + return 0; + + return -EIO; +} + +int +pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) +{ + if (mask >= SZ_64M - 1 ) + return 0; + + return -EIO; +} + +EXPORT_SYMBOL(pci_set_dma_mask); +EXPORT_SYMBOL(pci_dac_set_dma_mask); +EXPORT_SYMBOL(pci_set_consistent_dma_mask); + diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c new file mode 100644 index 000000000..f0166508d --- /dev/null +++ b/arch/arm/mach-ixp4xx/common.c @@ -0,0 +1,263 @@ +/* + * arch/arm/mach-ixp4xx/common.c + * + * Generic code shared across all IXP4XX platforms + * + * Maintainer: Deepak Saxena + * + * Copyright 2002 (c) Intel Corporation + * Copyright 2003-2004 (c) MontaVista, Software, Inc. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + + +/************************************************************************* + * GPIO acces functions + *************************************************************************/ + +/* + * Configure GPIO line for input, interrupt, or output operation + * + * TODO: Enable/disable the irq_desc based on interrupt or output mode. + * TODO: Should these be named ixp4xx_gpio_? + */ +void gpio_line_config(u8 line, u32 style) +{ + u32 enable; + volatile u32 *int_reg; + u32 int_style; + + enable = *IXP4XX_GPIO_GPOER; + + if (style & IXP4XX_GPIO_OUT) { + enable &= ~((1) << line); + } else if (style & IXP4XX_GPIO_IN) { + enable |= ((1) << line); + + switch (style & IXP4XX_GPIO_INTSTYLE_MASK) + { + case (IXP4XX_GPIO_ACTIVE_HIGH): + int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH; + break; + case (IXP4XX_GPIO_ACTIVE_LOW): + int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW; + break; + case (IXP4XX_GPIO_RISING_EDGE): + int_style = IXP4XX_GPIO_STYLE_RISING_EDGE; + break; + case (IXP4XX_GPIO_FALLING_EDGE): + int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE; + break; + case (IXP4XX_GPIO_TRANSITIONAL): + int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL; + break; + default: + int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH; + break; + } + + if (line >= 8) { /* pins 8-15 */ + line -= 8; + int_reg = IXP4XX_GPIO_GPIT2R; + } + else { /* pins 0-7 */ + int_reg = IXP4XX_GPIO_GPIT1R; + } + + /* Clear the style for the appropriate pin */ + *int_reg &= ~(IXP4XX_GPIO_STYLE_CLEAR << + (line * IXP4XX_GPIO_STYLE_SIZE)); + + /* Set the new style */ + *int_reg |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE)); + } + + *IXP4XX_GPIO_GPOER = enable; +} + +EXPORT_SYMBOL(gpio_line_config); + +/************************************************************************* + * IXP4xx chipset I/O mapping + *************************************************************************/ +static struct map_desc ixp4xx_io_desc[] __initdata = { + { /* UART, Interrupt ctrl, GPIO, timers, NPEs, MACs, USB .... */ + .virtual = IXP4XX_PERIPHERAL_BASE_VIRT, + .physical = IXP4XX_PERIPHERAL_BASE_PHYS, + .length = IXP4XX_PERIPHERAL_REGION_SIZE, + .type = MT_DEVICE + }, { /* Expansion Bus Config Registers */ + .virtual = IXP4XX_EXP_CFG_BASE_VIRT, + .physical = IXP4XX_EXP_CFG_BASE_PHYS, + .length = IXP4XX_EXP_CFG_REGION_SIZE, + .type = MT_DEVICE + }, { /* PCI Registers */ + .virtual = IXP4XX_PCI_CFG_BASE_VIRT, + .physical = IXP4XX_PCI_CFG_BASE_PHYS, + .length = IXP4XX_PCI_CFG_REGION_SIZE, + .type = MT_DEVICE + } +}; + +void __init ixp4xx_map_io(void) +{ + iotable_init(ixp4xx_io_desc, ARRAY_SIZE(ixp4xx_io_desc)); +} + + +/************************************************************************* + * IXP4xx chipset IRQ handling + * + * TODO: GPIO IRQs should be marked invalid until the user of the IRQ + * (be it PCI or something else) configures that GPIO line + * as an IRQ. Also, we should use a different chip structure for + * level-based GPIO vs edge-based GPIO. Currently nobody needs this as + * all HW that's publically available uses level IRQs, so we'll + * worry about it if/when we have HW to test. + **************************************************************************/ +static void ixp4xx_irq_mask(unsigned int irq) +{ + *IXP4XX_ICMR &= ~(1 << irq); +} + +static void ixp4xx_irq_mask_ack(unsigned int irq) +{ + ixp4xx_irq_mask(irq); +} + +static void ixp4xx_irq_unmask(unsigned int irq) +{ + static int irq2gpio[NR_IRQS] = { + -1, -1, -1, -1, -1, -1, 0, 1, + -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, -1, -1, + }; + int line = irq2gpio[irq]; + + /* + * This only works for LEVEL gpio IRQs as per the IXP4xx developer's + * manual. If edge-triggered, need to move it to the mask_ack. + * Nobody seems to be using the edge-triggered mode on the GPIOs. + */ + if (line >= 0) + gpio_line_isr_clear(line); + + *IXP4XX_ICMR |= (1 << irq); +} + +static struct irqchip ixp4xx_irq_chip = { + .ack = ixp4xx_irq_mask_ack, + .mask = ixp4xx_irq_mask, + .unmask = ixp4xx_irq_unmask, +}; + +void __init ixp4xx_init_irq(void) +{ + int i = 0; + + /* Route all sources to IRQ instead of FIQ */ + *IXP4XX_ICLR = 0x0; + + /* Disable all interrupt */ + *IXP4XX_ICMR = 0x0; + + for(i = 0; i < NR_IRQS; i++) + { + set_irq_chip(i, &ixp4xx_irq_chip); + set_irq_handler(i, do_level_IRQ); + set_irq_flags(i, IRQF_VALID); + } +} + + +/************************************************************************* + * IXP4xx timer tick + * We use OS timer1 on the CPU for the timer tick and the timestamp + * counter as a source of real clock ticks to account for missed jiffies. + *************************************************************************/ + +static unsigned volatile last_jiffy_time; + +#define CLOCK_TICKS_PER_USEC (CLOCK_TICK_RATE / USEC_PER_SEC) + +/* IRQs are disabled before entering here from do_gettimeofday() */ +static unsigned long ixp4xx_gettimeoffset(void) +{ + u32 elapsed; + + elapsed = *IXP4XX_OSTS - last_jiffy_time; + + return elapsed / CLOCK_TICKS_PER_USEC; +} + +static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + /* Clear Pending Interrupt by writing '1' to it */ + *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND; + + /* + * Catch up with the real idea of time + */ + do { + do_timer(regs); + last_jiffy_time += LATCH; + } while((*IXP4XX_OSTS - last_jiffy_time) > LATCH); + + return IRQ_HANDLED; +} + +extern unsigned long (*gettimeoffset)(void); + +static struct irqaction timer_irq = { + .name = "IXP4xx Timer Tick", + .flags = SA_INTERRUPT +}; + +void __init time_init(void) +{ + gettimeoffset = ixp4xx_gettimeoffset; + timer_irq.handler = ixp4xx_timer_interrupt; + + /* Clear Pending Interrupt by writing '1' to it */ + *IXP4XX_OSST = IXP4XX_OSST_TIMER_1_PEND; + + /* Setup the Timer counter value */ + *IXP4XX_OSRT1 = (LATCH & ~IXP4XX_OST_RELOAD_MASK) | IXP4XX_OST_ENABLE; + + /* Reset time-stamp counter */ + *IXP4XX_OSTS = 0; + last_jiffy_time = 0; + + /* Connect the interrupt handler and enable the interrupt */ + setup_irq(IRQ_IXP4XX_TIMER1, &timer_irq); +} + + diff --git a/arch/arm/mach-ixp4xx/coyote-pci.c b/arch/arm/mach-ixp4xx/coyote-pci.c new file mode 100644 index 000000000..b46c74351 --- /dev/null +++ b/arch/arm/mach-ixp4xx/coyote-pci.c @@ -0,0 +1,69 @@ +/* + * arch/arch/mach-ixp4xx/coyote-pci.c + * + * PCI setup routines for ADI Engineering Coyote platform + * + * Copyright (C) 2002 Jungo Software Technologies. + * Copyright (C) 2003 MontaVista Softwrae, Inc. + * + * Maintainer: Deepak Saxena + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include + +#include +#include +#include + +#include + +extern void ixp4xx_pci_preinit(void); +extern int ixp4xx_setup(int nr, struct pci_sys_data *sys); +extern struct pci_bus *ixp4xx_scan_bus(int nr, struct pci_sys_data *sys); + +void __init coyote_pci_preinit(void) +{ + gpio_line_config(COYOTE_PCI_SLOT0_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + + gpio_line_config(COYOTE_PCI_SLOT1_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + + gpio_line_isr_clear(COYOTE_PCI_SLOT0_PIN); + gpio_line_isr_clear(COYOTE_PCI_SLOT1_PIN); + + ixp4xx_pci_preinit(); +} + +static int __init coyote_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + if (slot == COYOTE_PCI_SLOT0_DEVID) + return IRQ_COYOTE_PCI_SLOT0; + else if (slot == COYOTE_PCI_SLOT1_DEVID) + return IRQ_COYOTE_PCI_SLOT1; + else return -1; +} + +struct hw_pci coyote_pci __initdata = { + .nr_controllers = 1, + .preinit = coyote_pci_preinit, + .swizzle = pci_std_swizzle, + .setup = ixp4xx_setup, + .scan = ixp4xx_scan_bus, + .map_irq = coyote_map_irq, +}; + +int __init coyote_pci_init(void) +{ + if (machine_is_adi_coyote()) + pci_common_init(&coyote_pci); + return 0; +} + +subsys_initcall(coyote_pci_init); diff --git a/arch/arm/mach-ixp4xx/ixdp425-pci.c b/arch/arm/mach-ixp4xx/ixdp425-pci.c new file mode 100644 index 000000000..7baa60c2d --- /dev/null +++ b/arch/arm/mach-ixp4xx/ixdp425-pci.c @@ -0,0 +1,84 @@ +/* + * arch/arm/mach-ixp4xx/ixdp425-pci.c + * + * IXDP425 board-level PCI initialization + * + * Copyright (C) 2002 Intel Corporation. + * Copyright (C) 2003-2004 MontaVista Software, Inc. + * + * Maintainer: Deepak Saxena + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +void __init ixdp425_pci_preinit(void) +{ + gpio_line_config(IXDP425_PCI_INTA_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(IXDP425_PCI_INTB_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(IXDP425_PCI_INTC_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(IXDP425_PCI_INTD_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + + gpio_line_isr_clear(IXDP425_PCI_INTA_PIN); + gpio_line_isr_clear(IXDP425_PCI_INTB_PIN); + gpio_line_isr_clear(IXDP425_PCI_INTC_PIN); + gpio_line_isr_clear(IXDP425_PCI_INTD_PIN); + + ixp4xx_pci_preinit(); +} + +static int __init ixdp425_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + static int pci_irq_table[IXDP425_PCI_IRQ_LINES] = { + IRQ_IXDP425_PCI_INTA, + IRQ_IXDP425_PCI_INTB, + IRQ_IXDP425_PCI_INTC, + IRQ_IXDP425_PCI_INTD + }; + + int irq = -1; + + if (slot >= 1 && slot <= IXDP425_PCI_MAX_DEV && + pin >= 1 && pin <= IXDP425_PCI_IRQ_LINES) { + irq = pci_irq_table[(slot + pin - 2) % 4]; + } + + return irq; +} + +struct hw_pci ixdp425_pci __initdata = { + .nr_controllers = 1, + .preinit = ixdp425_pci_preinit, + .swizzle = pci_std_swizzle, + .setup = ixp4xx_setup, + .scan = ixp4xx_scan_bus, + .map_irq = ixdp425_map_irq, +}; + +int __init ixdp425_pci_init(void) +{ + if (machine_is_ixdp425() || + machine_is_ixcdp1100() || + machine_is_avila()) + pci_common_init(&ixdp425_pci); + return 0; +} + +subsys_initcall(ixdp425_pci_init); + diff --git a/arch/arm/mach-ixp4xx/prpmc1100-pci.c b/arch/arm/mach-ixp4xx/prpmc1100-pci.c new file mode 100644 index 000000000..a0aed9ca3 --- /dev/null +++ b/arch/arm/mach-ixp4xx/prpmc1100-pci.c @@ -0,0 +1,119 @@ +/* + * arch/arm/mach-ixp4xx/prpmc1100-pci.c + * + * PrPMC1100 PCI initialization + * + * Copyright (C) 2003-2004 MontaVista Sofwtare, Inc. + * Based on IXDP425 code originally (C) Intel Corporation + * + * Author: Deepak Saxena + * + * PrPMC1100 PCI init code. GPIO usage is similar to that on + * IXDP425, but the IRQ routing is completely different and + * depends on what carrier you are using. This code is written + * to work on the Motorola PrPMC800 ATX carrier board. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include + +#include +#include +#include + +#include + + +void __init prpmc1100_pci_preinit(void) +{ + gpio_line_config(PRPMC1100_PCI_INTA_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(PRPMC1100_PCI_INTB_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(PRPMC1100_PCI_INTC_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + gpio_line_config(PRPMC1100_PCI_INTD_PIN, + IXP4XX_GPIO_IN | IXP4XX_GPIO_ACTIVE_LOW); + + gpio_line_isr_clear(PRPMC1100_PCI_INTA_PIN); + gpio_line_isr_clear(PRPMC1100_PCI_INTB_PIN); + gpio_line_isr_clear(PRPMC1100_PCI_INTC_PIN); + gpio_line_isr_clear(PRPMC1100_PCI_INTD_PIN); + + ixp4xx_pci_preinit(); +} + + +static int __init prpmc1100_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int irq = -1; + + static int pci_irq_table[][4] = { + { /* IDSEL 16 - PMC A1 */ + IRQ_PRPMC1100_PCI_INTD, + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB, + IRQ_PRPMC1100_PCI_INTC + }, { /* IDSEL 17 - PRPMC-A-B */ + IRQ_PRPMC1100_PCI_INTD, + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB, + IRQ_PRPMC1100_PCI_INTC + }, { /* IDSEL 18 - PMC A1-B */ + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB, + IRQ_PRPMC1100_PCI_INTC, + IRQ_PRPMC1100_PCI_INTD + }, { /* IDSEL 19 - Unused */ + 0, 0, 0, 0 + }, { /* IDSEL 20 - P2P Bridge */ + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB, + IRQ_PRPMC1100_PCI_INTC, + IRQ_PRPMC1100_PCI_INTD + }, { /* IDSEL 21 - PMC A2 */ + IRQ_PRPMC1100_PCI_INTC, + IRQ_PRPMC1100_PCI_INTD, + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB + }, { /* IDSEL 22 - PMC A2-B */ + IRQ_PRPMC1100_PCI_INTD, + IRQ_PRPMC1100_PCI_INTA, + IRQ_PRPMC1100_PCI_INTB, + IRQ_PRPMC1100_PCI_INTC + }, + }; + + if (slot >= PRPMC1100_PCI_MIN_DEVID && slot <= PRPMC1100_PCI_MAX_DEVID + && pin >= 1 && pin <= PRPMC1100_PCI_IRQ_LINES) { + irq = pci_irq_table[slot - PRPMC1100_PCI_MIN_DEVID][pin - 1]; + } + + return irq; +} + + +struct hw_pci prpmc1100_pci __initdata = { + .nr_controllers = 1, + .preinit = prpmc1100_pci_preinit, + .swizzle = pci_std_swizzle, + .setup = ixp4xx_setup, + .scan = ixp4xx_scan_bus, + .map_irq = prpmc1100_map_irq, +}; + +int __init prpmc1100_pci_init(void) +{ + if (machine_is_prpmc1100()) + pci_common_init(&prpmc1100_pci); + return 0; +} + +subsys_initcall(prpmc1100_pci_init); + diff --git a/arch/arm/mach-ixp4xx/prpmc1100-setup.c b/arch/arm/mach-ixp4xx/prpmc1100-setup.c new file mode 100644 index 000000000..b0603205d --- /dev/null +++ b/arch/arm/mach-ixp4xx/prpmc1100-setup.c @@ -0,0 +1,90 @@ +/* + * arch/arm/mach-ixp4xx/prpmc1100-setup.c + * + * Motorola PrPMC1100 board setup + * + * Copyright (C) 2003-2004 MontaVista Software, Inc. + * + * Author: Deepak Saxena + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __ARMEB__ +#define REG_OFFSET 3 +#else +#define REG_OFFSET 0 +#endif + +/* + * Only one serial port is connected on the PrPMC1100 + */ +static struct uart_port prpmc1100_serial_port = { + .membase = (char*)(IXP4XX_UART1_BASE_VIRT + REG_OFFSET), + .mapbase = (IXP4XX_UART1_BASE_PHYS), + .irq = IRQ_IXP4XX_UART1, + .flags = UPF_SKIP_TEST, + .iotype = UPIO_MEM, + .regshift = 2, + .uartclk = IXP4XX_UART_XTAL, + .line = 0, + .type = PORT_XSCALE, + .fifosize = 32 +}; + +void __init prpmc1100_map_io(void) +{ + early_serial_setup(&prpmc1100_serial_port); + + ixp4xx_map_io(); +} + +static struct flash_platform_data prpmc1100_flash_data = { + .map_name = "cfi_probe", + .width = 2, +}; + +static struct resource prpmc1100_flash_resource = { + .start = PRPMC1100_FLASH_BASE, + .end = PRPMC1100_FLASH_BASE + PRPMC1100_FLASH_SIZE, + .flags = IORESOURCE_MEM, +}; + +static struct platform_device prpmc1100_flash_device = { + .name = "IXP4XX-Flash", + .id = 0, + .dev = { + .platform_data = &prpmc1100_flash_data, + }, + .num_resources = 1, + .resource = &prpmc1100_flash_resource, +}; + +static void __init prpmc1100_init(void) +{ + platform_add_device(&prpmc1100_flash_device); +} + +MACHINE_START(PRPMC1100, "Motorola PrPMC1100") + MAINTAINER("MontaVista Software, Inc.") + BOOT_MEM(PHYS_OFFSET, IXP4XX_PERIPHERAL_BASE_PHYS, + IXP4XX_PERIPHERAL_BASE_VIRT) + MAPIO(prpmc1100_map_io) + INITIRQ(ixp4xx_init_irq) + BOOT_PARAMS(0x0100) + INIT_MACHINE(prpmc1100_init) +MACHINE_END + diff --git a/arch/arm/mach-lh7a40x/time.c b/arch/arm/mach-lh7a40x/time.c new file mode 100644 index 000000000..61ada24c7 --- /dev/null +++ b/arch/arm/mach-lh7a40x/time.c @@ -0,0 +1,67 @@ +/* + * arch/arm/mach-lh7a40x/time.c + * + * Copyright (C) 2004 Logic Product Development + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#if HZ < 100 +# define TIMER_CONTROL TIMER_CONTROL1 +# define TIMER_LOAD TIMER_LOAD1 +# define TIMER_CONSTANT (508469/HZ) +# define TIMER_MODE (TIMER_C_ENABLE | TIMER_C_PERIODIC | TIMER_C_508KHZ) +# define TIMER_EOI TIMER_EOI1 +# define TIMER_IRQ IRQ_T1UI +#else +# define TIMER_CONTROL TIMER_CONTROL3 +# define TIMER_LOAD TIMER_LOAD3 +# define TIMER_CONSTANT (3686400/HZ) +# define TIMER_MODE (TIMER_C_ENABLE | TIMER_C_PERIODIC) +# define TIMER_EOI TIMER_EOI3 +# define TIMER_IRQ IRQ_T3UI +#endif + +static irqreturn_t +lh7a40x_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + TIMER_EOI = 0; + timer_tick(regs); + + return IRQ_HANDLED; +} + +static struct irqaction lh7a40x_timer_irq = { + .name = "LHA740x Timer Tick", + .flags = SA_INTERRUPT, + .handler = lh7a40x_timer_interrupt +}; + +void __init lh7a40x_init_time(void) +{ + /* Stop/disable all timers */ + TIMER_CONTROL1 = 0; + TIMER_CONTROL2 = 0; + TIMER_CONTROL3 = 0; + + setup_irq (TIMER_IRQ, &lh7a40x_timer_irq); + + TIMER_LOAD = TIMER_CONSTANT; + TIMER_CONTROL = TIMER_MODE; +} + diff --git a/arch/arm/mach-omap/time.c b/arch/arm/mach-omap/time.c new file mode 100644 index 000000000..cb3c5d6f7 --- /dev/null +++ b/arch/arm/mach-omap/time.c @@ -0,0 +1,217 @@ +/* + * arch/arm/mach-omap/time.c + * + * OMAP Timer Tick + * + * Copyright (C) 2000 RidgeRun, Inc. + * Author: Greg Lonnon + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef __instrument +#define __instrument +#define __noinstrument __attribute__ ((no_instrument_function)) +#endif + +typedef struct { + u32 cntl; /* CNTL_TIMER, R/W */ + u32 load_tim; /* LOAD_TIM, W */ + u32 read_tim; /* READ_TIM, R */ +} mputimer_regs_t; + +#define mputimer_base(n) \ + ((volatile mputimer_regs_t*)IO_ADDRESS(OMAP_MPUTIMER_BASE + \ + (n)*OMAP_MPUTIMER_OFFSET)) + +static inline unsigned long timer32k_read(int reg) { + unsigned long val; + val = omap_readw(reg + OMAP_32kHz_TIMER_BASE); + return val; +} +static inline void timer32k_write(int reg,int val) { + omap_writew(val, reg + OMAP_32kHz_TIMER_BASE); +} + +/* + * How long is the timer interval? 100 HZ, right... + * IRQ rate = (TVR + 1) / 32768 seconds + * TVR = 32768 * IRQ_RATE -1 + * IRQ_RATE = 1/100 + * TVR = 326 + */ +#define TIMER32k_PERIOD 326 +//#define TIMER32k_PERIOD 0x7ff + +static inline void start_timer32k(void) { + timer32k_write(TIMER32k_CR, + TIMER32k_TSS | TIMER32k_TRB | + TIMER32k_INT | TIMER32k_ARL); +} + +#ifdef CONFIG_MACH_OMAP_PERSEUS2 +/* + * After programming PTV with 0 and setting the MPUTIM_CLOCK_ENABLE + * (external clock enable) bit, the timer count rate is 6.5 MHz (13 + * MHZ input/2). !! The divider by 2 is undocumented !! + */ +#define MPUTICKS_PER_SEC (13000000/2) +#else +/* + * After programming PTV with 0, the timer count rate is 6 MHz. + * WARNING! this must be an even number, or machinecycles_to_usecs + * below will break. + */ +#define MPUTICKS_PER_SEC (12000000/2) +#endif + +static int mputimer_started[3] = {0,0,0}; + +static inline void __noinstrument start_mputimer(int n, + unsigned long load_val) +{ + volatile mputimer_regs_t* timer = mputimer_base(n); + + mputimer_started[n] = 0; + timer->cntl = MPUTIM_CLOCK_ENABLE; + udelay(1); + + timer->load_tim = load_val; + udelay(1); + timer->cntl = (MPUTIM_CLOCK_ENABLE | MPUTIM_AR | MPUTIM_ST); + mputimer_started[n] = 1; +} + +static inline unsigned long __noinstrument +read_mputimer(int n) +{ + volatile mputimer_regs_t* timer = mputimer_base(n); + return (mputimer_started[n] ? timer->read_tim : 0); +} + +void __noinstrument start_mputimer1(unsigned long load_val) +{ + start_mputimer(0, load_val); +} +void __noinstrument start_mputimer2(unsigned long load_val) +{ + start_mputimer(1, load_val); +} +void __noinstrument start_mputimer3(unsigned long load_val) +{ + start_mputimer(2, load_val); +} + +unsigned long __noinstrument read_mputimer1(void) +{ + return read_mputimer(0); +} +unsigned long __noinstrument read_mputimer2(void) +{ + return read_mputimer(1); +} +unsigned long __noinstrument read_mputimer3(void) +{ + return read_mputimer(2); +} + +unsigned long __noinstrument do_getmachinecycles(void) +{ + return 0 - read_mputimer(0); +} + +unsigned long __noinstrument machinecycles_to_usecs(unsigned long mputicks) +{ + /* Round up to nearest usec */ + return ((mputicks * 1000) / (MPUTICKS_PER_SEC / 2 / 1000) + 1) >> 1; +} + +/* + * This marks the time of the last system timer interrupt + * that was *processed by the ISR* (timer 2). + */ +static unsigned long systimer_mark; + +static unsigned long omap_gettimeoffset(void) +{ + /* Return elapsed usecs since last system timer ISR */ + return machinecycles_to_usecs(do_getmachinecycles() - systimer_mark); +} + +static irqreturn_t +omap_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + unsigned long now, ilatency; + + /* + * Mark the time at which the timer interrupt ocurred using + * timer1. We need to remove interrupt latency, which we can + * retrieve from the current system timer2 counter. Both the + * offset timer1 and the system timer2 are counting at 6MHz, + * so we're ok. + */ + now = 0 - read_mputimer1(); + ilatency = MPUTICKS_PER_SEC / 100 - read_mputimer2(); + systimer_mark = now - ilatency; + + timer_tick(regs); + + return IRQ_HANDLED; +} + +static struct irqaction omap_timer_irq = { + .name = "OMAP Timer Tick", + .flags = SA_INTERRUPT, + .handler = omap_timer_interrupt +}; + +void __init omap_init_time(void) +{ + /* Since we don't call request_irq, we must init the structure */ + gettimeoffset = omap_gettimeoffset; + +#ifdef OMAP1510_USE_32KHZ_TIMER + timer32k_write(TIMER32k_CR, 0x0); + timer32k_write(TIMER32k_TVR,TIMER32k_PERIOD); + setup_irq(INT_OS_32kHz_TIMER, &omap_timer_irq); + start_timer32k(); +#else + setup_irq(INT_TIMER2, &omap_timer_irq); + start_mputimer2(MPUTICKS_PER_SEC / 100 - 1); +#endif +} + diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c new file mode 100644 index 000000000..7e8923563 --- /dev/null +++ b/arch/arm/mach-pxa/time.c @@ -0,0 +1,124 @@ +/* + * arch/arm/mach-pxa/time.c + * + * Author: Nicolas Pitre + * Created: Jun 15, 2001 + * Copyright: MontaVista Software Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +static inline unsigned long pxa_get_rtc_time(void) +{ + return RCNR; +} + +static int pxa_set_rtc(void) +{ + unsigned long current_time = xtime.tv_sec; + + if (RTSR & RTSR_ALE) { + /* make sure not to forward the clock over an alarm */ + unsigned long alarm = RTAR; + if (current_time >= alarm && alarm >= RCNR) + return -ERESTARTSYS; + } + RCNR = current_time; + return 0; +} + +/* IRQs are disabled before entering here from do_gettimeofday() */ +static unsigned long pxa_gettimeoffset (void) +{ + long ticks_to_match, elapsed, usec; + + /* Get ticks before next timer match */ + ticks_to_match = OSMR0 - OSCR; + + /* We need elapsed ticks since last match */ + elapsed = LATCH - ticks_to_match; + + /* don't get fooled by the workaround in pxa_timer_interrupt() */ + if (elapsed <= 0) + return 0; + + /* Now convert them to usec */ + usec = (unsigned long)(elapsed * (tick_nsec / 1000))/LATCH; + + return usec; +} + +static irqreturn_t +pxa_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + int next_match; + + /* Loop until we get ahead of the free running timer. + * This ensures an exact clock tick count and time accuracy. + * IRQs are disabled inside the loop to ensure coherence between + * lost_ticks (updated in do_timer()) and the match reg value, so we + * can use do_gettimeofday() from interrupt handlers. + * + * HACK ALERT: it seems that the PXA timer regs aren't updated right + * away in all cases when a write occurs. We therefore compare with + * 8 instead of 0 in the while() condition below to avoid missing a + * match if OSCR has already reached the next OSMR value. + * Experience has shown that up to 6 ticks are needed to work around + * this problem, but let's use 8 to be conservative. Note that this + * affect things only when the timer IRQ has been delayed by nearly + * exactly one tick period which should be a pretty rare event. + */ + do { + timer_tick(regs); + OSSR = OSSR_M0; /* Clear match on timer 0 */ + next_match = (OSMR0 += LATCH); + } while( (signed long)(next_match - OSCR) <= 8 ); + + return IRQ_HANDLED; +} + +static struct irqaction pxa_timer_irq = { + .name = "PXA Timer Tick", + .flags = SA_INTERRUPT, + .handler = pxa_timer_interrupt +}; + +void __init pxa_init_time(void) +{ + struct timespec tv; + + gettimeoffset = pxa_gettimeoffset; + set_rtc = pxa_set_rtc; + + tv.tv_nsec = 0; + tv.tv_sec = pxa_get_rtc_time(); + do_settimeofday(&tv); + + OSMR0 = 0; /* set initial match at 0 */ + OSSR = 0xf; /* clear status on all timers */ + setup_irq(IRQ_OST0, &pxa_timer_irq); + OIER |= OIER_E0; /* enable match on timer 0 to cause interrupts */ + OSCR = 0; /* initialize free-running timer, force first match */ +} + diff --git a/arch/arm/mach-s3c2410/gpio.c b/arch/arm/mach-s3c2410/gpio.c new file mode 100644 index 000000000..450b132af --- /dev/null +++ b/arch/arm/mach-s3c2410/gpio.c @@ -0,0 +1,98 @@ +/* linux/arch/arm/mach-s3c2410/gpio.c + * + * Copyright (c) 2004 Simtec Electronics + * Ben Dooks + * + * S3C2410 GPIO support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + + +#include +#include +#include +#include + +#include +#include +#include + +#include + +void s3c2410_gpio_cfgpin(unsigned int pin, unsigned int function) +{ + unsigned long base = S3C2410_GPIO_BASE(pin); + unsigned long shift = 1; + unsigned long mask = 3; + unsigned long con; + unsigned long flags; + + if (pin < S3C2410_GPIO_BANKB) { + shift = 0; + mask = 1; + } + + mask <<= S3C2410_GPIO_OFFSET(pin); + + local_irq_save(flags); + + con = __raw_readl(base + 0x00); + + con &= mask << shift; + con |= function; + + __raw_writel(con, base + 0x00); + + local_irq_restore(flags); +} + +void s3c2410_gpio_pullup(unsigned int pin, unsigned int to) +{ + unsigned long base = S3C2410_GPIO_BASE(pin); + unsigned long offs = S3C2410_GPIO_OFFSET(pin); + unsigned long flags; + unsigned long up; + + if (pin < S3C2410_GPIO_BANKB) + return; + + local_irq_save(flags); + + up = __raw_readl(base + 0x08); + up &= 1 << offs; + up |= to << offs; + __raw_writel(up, base + 0x08); + + local_irq_restore(flags); +} + +void s3c2410_gpio_setpin(unsigned int pin, unsigned int to) +{ + unsigned long base = S3C2410_GPIO_BASE(pin); + unsigned long offs = S3C2410_GPIO_OFFSET(pin); + unsigned long flags; + unsigned long dat; + + local_irq_save(flags); + + dat = __raw_readl(base + 0x04); + dat &= 1 << offs; + dat |= to << offs; + __raw_writel(dat, base + 0x04); + + local_irq_restore(flags); +} diff --git a/arch/arm/mach-s3c2410/mach-smdk2410.c b/arch/arm/mach-s3c2410/mach-smdk2410.c new file mode 100644 index 000000000..4e0282b12 --- /dev/null +++ b/arch/arm/mach-s3c2410/mach-smdk2410.c @@ -0,0 +1,109 @@ +/*********************************************************************** + * + * linux/arch/arm/mach-s3c2410/mach-smdk2410.c + * + * Copyright (C) 2004 by FS Forth-Systeme GmbH + * All rights reserved. + * + * $Id: mach-smdk2410.c,v 1.1 2004/05/11 14:15:38 mpietrek Exp $ + * @Author: Jonas Dietsche + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + * + * @History: + * derived from linux/arch/arm/mach-s3c2410/mach-bast.c, written by + * Ben Dooks + ***********************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include + +#include "s3c2410.h" + + +static struct map_desc smdk2410_iodesc[] __initdata = { + /* nothing here yet */ +}; + +#define UCON S3C2410_UCON_DEFAULT +#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB +#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE + +/* base baud rate for all our UARTs */ +static unsigned long smdk2410_serial_clock = 24*1000*1000; + +static struct s3c2410_uartcfg smdk2410_uartcfgs[] = { + [0] = { + .hwport = 0, + .flags = 0, + .clock = &smdk2410_serial_clock, + .ucon = UCON, + .ulcon = ULCON, + .ufcon = UFCON, + }, + [1] = { + .hwport = 1, + .flags = 0, + .clock = &smdk2410_serial_clock, + .ucon = UCON, + .ulcon = ULCON, + .ufcon = UFCON, + }, + [2] = { + .hwport = 2, + .flags = 0, + .clock = &smdk2410_serial_clock, + .ucon = UCON, + .ulcon = ULCON, + .ufcon = UFCON, + } +}; + + +void __init smdk2410_map_io(void) +{ + s3c2410_map_io(smdk2410_iodesc, ARRAY_SIZE(smdk2410_iodesc)); + s3c2410_uartcfgs = smdk2410_uartcfgs; +} + +void __init smdk2410_init_irq(void) +{ + s3c2410_init_irq(); +} + +MACHINE_START(SMDK2410, "SMDK2410") /* @TODO: request a new identifier and switch + * to SMDK2410 */ + MAINTAINER("Jonas Dietsche") + BOOT_MEM(S3C2410_SDRAM_PA, S3C2410_PA_UART, S3C2410_VA_UART) + BOOT_PARAMS(S3C2410_SDRAM_PA + 0x100) + MAPIO(smdk2410_map_io) + INITIRQ(smdk2410_init_irq) +MACHINE_END diff --git a/arch/arm/mach-s3c2410/time.c b/arch/arm/mach-s3c2410/time.c new file mode 100644 index 000000000..62d89d98c --- /dev/null +++ b/arch/arm/mach-s3c2410/time.c @@ -0,0 +1,176 @@ +/* linux/include/asm-arm/arch-s3c2410/time.h + * + * Copyright (C) 2003 Simtec Electronics + * Ben Dooks, + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static unsigned long timer_startval; +static unsigned long timer_ticks_usec; + +#ifdef CONFIG_S3C2410_RTC +extern void s3c2410_rtc_check(); +#endif + +/* with an 12MHz clock, we get 12 ticks per-usec + */ + + +/*** + * Returns microsecond since last clock interrupt. Note that interrupts + * will have been disabled by do_gettimeoffset() + * IRQs are disabled before entering here from do_gettimeofday() + */ +static unsigned long s3c2410_gettimeoffset (void) +{ + unsigned long tdone; + unsigned long usec; + + /* work out how many ticks have gone since last timer interrupt */ + + tdone = timer_startval - __raw_readl(S3C2410_TCNTO(4)); + + /* currently, tcnt is in 12MHz units, but this may change + * for non-bast machines... + */ + + usec = tdone / timer_ticks_usec; + + return usec; +} + + +/* + * IRQ handler for the timer + */ +static irqreturn_t +s3c2410_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + timer_tick(regs); + + return IRQ_HANDLED; +} + +static struct irqaction s3c2410_timer_irq = { + .name = "S32410 Timer Tick", + .flags = SA_INTERRUPT, + .handler = s3c2410_timer_interrupt +}; + +/* + * Set up timer interrupt, and return the current time in seconds. + * + * Currently we only use timer4, as it is the only timer which has no + * other function that can be exploited externally + */ +void __init s3c2410_init_time (void) +{ + unsigned long tcon; + unsigned long tcnt; + unsigned long tcfg1; + unsigned long tcfg0; + + gettimeoffset = s3c2410_gettimeoffset; + + tcnt = 0xffff; /* default value for tcnt */ + + /* read the current timer configuration bits */ + + tcon = __raw_readl(S3C2410_TCON); + tcfg1 = __raw_readl(S3C2410_TCFG1); + tcfg0 = __raw_readl(S3C2410_TCFG0); + + /* configure the system for whichever machine is in use */ + + if (machine_is_bast() || machine_is_vr1000()) { + timer_ticks_usec = 12; /* timer is at 12MHz */ + tcnt = (timer_ticks_usec * (1000*1000)) / HZ; + } + + /* for the h1940, we use the pclk from the core to generate + * the timer values. since 67.5MHz is not a value we can directly + * generate the timer value from, we need to pre-scale and + * divied before using it. + * + * overall divsior to get 200Hz is 337500 + * we can fit tcnt if we pre-scale by 6, producing a tick rate + * of 11.25MHz, and a tcnt of 56250. + */ + + if (machine_is_h1940() || machine_is_smdk2410() ) { + timer_ticks_usec = s3c2410_pclk / (1000*1000); + timer_ticks_usec /= 6; + + tcfg1 &= ~S3C2410_TCFG1_MUX4_MASK; + tcfg1 |= S3C2410_TCFG1_MUX4_DIV2; + + tcfg0 &= ~S3C2410_TCFG_PRESCALER1_MASK; + tcfg0 |= ((6 - 1) / 2) << S3C2410_TCFG_PRESCALER1_SHIFT; + + tcnt = (s3c2410_pclk / 6) / HZ; + } + + + printk("setup_timer tcon=%08lx, tcnt %04lx, tcfg %08lx,%08lx\n", + tcon, tcnt, tcfg0, tcfg1); + + /* check to see if timer is within 16bit range... */ + if (tcnt > 0xffff) { + panic("setup_timer: HZ is too small, cannot configure timer!"); + return; + } + + __raw_writel(tcfg1, S3C2410_TCFG1); + __raw_writel(tcfg0, S3C2410_TCFG0); + + timer_startval = tcnt; + __raw_writel(tcnt, S3C2410_TCNTB(4)); + + /* ensure timer is stopped... */ + + tcon &= ~(7<<20); + tcon |= S3C2410_TCON_T4RELOAD; + tcon |= S3C2410_TCON_T4MANUALUPD; + + __raw_writel(tcon, S3C2410_TCON); + __raw_writel(tcnt, S3C2410_TCNTB(4)); + __raw_writel(tcnt, S3C2410_TCMPB(4)); + + setup_irq(IRQ_TIMER4, &s3c2410_timer_irq); + + /* start the timer running */ + tcon |= S3C2410_TCON_T4START; + tcon &= ~S3C2410_TCON_T4MANUALUPD; + __raw_writel(tcon, S3C2410_TCON); +} + + + diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c new file mode 100644 index 000000000..dcbb3c751 --- /dev/null +++ b/arch/arm/mach-sa1100/collie.c @@ -0,0 +1,144 @@ +/* + * linux/arch/arm/mach-sa1100/collie.c + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * This file contains all Collie-specific tweaks. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ChangeLog: + * 03-06-2004 John Lenz + * 06-04-2002 Chris Larson + * 04-16-2001 Lineo Japan,Inc. ... + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "generic.h" + +static void __init scoop_init(void) +{ + +#define COLLIE_SCP_INIT_DATA(adr,dat) (((adr)<<16)|(dat)) +#define COLLIE_SCP_INIT_DATA_END ((unsigned long)-1) + static const unsigned long scp_init[] = { + COLLIE_SCP_INIT_DATA(COLLIE_SCP_MCR, 0x0140), // 00 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_MCR, 0x0100), + COLLIE_SCP_INIT_DATA(COLLIE_SCP_CDR, 0x0000), // 04 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_CPR, 0x0000), // 0C + COLLIE_SCP_INIT_DATA(COLLIE_SCP_CCR, 0x0000), // 10 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_IMR, 0x0000), // 18 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_IRM, 0x00FF), // 14 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_ISR, 0x0000), // 1C + COLLIE_SCP_INIT_DATA(COLLIE_SCP_IRM, 0x0000), + COLLIE_SCP_INIT_DATA(COLLIE_SCP_GPCR, COLLIE_SCP_IO_DIR), // 20 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_GPWR, COLLIE_SCP_IO_OUT), // 24 + COLLIE_SCP_INIT_DATA_END + }; + int i; + for (i = 0; scp_init[i] != COLLIE_SCP_INIT_DATA_END; i++) { + int adr = scp_init[i] >> 16; + COLLIE_SCP_REG(adr) = scp_init[i] & 0xFFFF; + } + +} + +static struct resource locomo_resources[] = { + [0] = { + .start = 0x40000000, + .end = 0x40001fff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = IRQ_GPIO25, + .end = IRQ_GPIO25, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device locomo_device = { + .name = "locomo", + .id = 0, + .num_resources = ARRAY_SIZE(locomo_resources), + .resource = locomo_resources, +}; + +static struct platform_device *devices[] __initdata = { + &locomo_device, +}; + +static void __init collie_init(void) +{ + int ret = 0; + + /* cpu initialize */ + GAFR = ( GPIO_SSP_TXD | \ + GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SSP_CLK | GPIO_TIC_ACK | \ + GPIO_32_768kHz ); + + GPDR = ( GPIO_LDD8 | GPIO_LDD9 | GPIO_LDD10 | GPIO_LDD11 | GPIO_LDD12 | \ + GPIO_LDD13 | GPIO_LDD14 | GPIO_LDD15 | GPIO_SSP_TXD | \ + GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SDLC_SCLK | \ + GPIO_SDLC_AAF | GPIO_UART_SCLK1 | GPIO_32_768kHz ); + GPLR = GPIO_GPIO18; + + // PPC pin setting + PPDR = ( PPC_LDD0 | PPC_LDD1 | PPC_LDD2 | PPC_LDD3 | PPC_LDD4 | PPC_LDD5 | \ + PPC_LDD6 | PPC_LDD7 | PPC_L_PCLK | PPC_L_LCLK | PPC_L_FCLK | PPC_L_BIAS | \ + PPC_TXD1 | PPC_TXD2 | PPC_RXD2 | PPC_TXD3 | PPC_TXD4 | PPC_SCLK | PPC_SFRM ); + + PSDR = ( PPC_RXD1 | PPC_RXD2 | PPC_RXD3 | PPC_RXD4 ); + + GAFR |= GPIO_32_768kHz; + GPDR |= GPIO_32_768kHz; + TUCR = TUCR_32_768kHz; + + scoop_init(); + + ret = platform_add_devices(devices, ARRAY_SIZE(devices)); + if (ret) { + printk(KERN_WARNING "collie: Unable to register LoCoMo device\n"); + } +} + +static struct map_desc collie_io_desc[] __initdata = { + /* virtual physical length type */ + {0xe8000000, 0x00000000, 0x02000000, MT_DEVICE}, /* 32M main flash (cs0) */ + {0xea000000, 0x08000000, 0x02000000, MT_DEVICE}, /* 32M boot flash (cs1) */ + {0xf0000000, 0x40000000, 0x01000000, MT_DEVICE}, /* 16M LOCOMO & SCOOP (cs4) */ +}; + +static void __init collie_map_io(void) +{ + sa1100_map_io(); + iotable_init(collie_io_desc, ARRAY_SIZE(collie_io_desc)); +} + +MACHINE_START(COLLIE, "Sharp-Collie") + BOOT_MEM(0xc0000000, 0x80000000, 0xf8000000) + MAPIO(collie_map_io) + INITIRQ(sa1100_init_irq) + INIT_MACHINE(collie_init) +MACHINE_END diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c new file mode 100644 index 000000000..1c3d082f8 --- /dev/null +++ b/arch/arm/mach-sa1100/time.c @@ -0,0 +1,119 @@ +/* + * linux/arch/arm/mach-sa1100/time.c + * + * Copyright (C) 1998 Deborah Wallach. + * Twiddles (C) 1999 Hugo Fiennes + * + * 2000/03/29 (C) Nicolas Pitre + * Rewritten: big cleanup, much simpler, better HZ accuracy. + * + */ +#include +#include +#include +#include +#include + +#include +#include + +#define RTC_DEF_DIVIDER (32768 - 1) +#define RTC_DEF_TRIM 0 + +static unsigned long __init sa1100_get_rtc_time(void) +{ + /* + * According to the manual we should be able to let RTTR be zero + * and then a default diviser for a 32.768KHz clock is used. + * Apparently this doesn't work, at least for my SA1110 rev 5. + * If the clock divider is uninitialized then reset it to the + * default value to get the 1Hz clock. + */ + if (RTTR == 0) { + RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16); + printk(KERN_WARNING "Warning: uninitialized Real Time Clock\n"); + /* The current RTC value probably doesn't make sense either */ + RCNR = 0; + return 0; + } + return RCNR; +} + +static int sa1100_set_rtc(void) +{ + unsigned long current_time = xtime.tv_sec; + + if (RTSR & RTSR_ALE) { + /* make sure not to forward the clock over an alarm */ + unsigned long alarm = RTAR; + if (current_time >= alarm && alarm >= RCNR) + return -ERESTARTSYS; + } + RCNR = current_time; + return 0; +} + +/* IRQs are disabled before entering here from do_gettimeofday() */ +static unsigned long sa1100_gettimeoffset (void) +{ + unsigned long ticks_to_match, elapsed, usec; + + /* Get ticks before next timer match */ + ticks_to_match = OSMR0 - OSCR; + + /* We need elapsed ticks since last match */ + elapsed = LATCH - ticks_to_match; + + /* Now convert them to usec */ + usec = (unsigned long)(elapsed * (tick_nsec / 1000))/LATCH; + + return usec; +} + +/* + * We will be entered with IRQs enabled. + * + * Loop until we get ahead of the free running timer. + * This ensures an exact clock tick count and time accuracy. + * IRQs are disabled inside the loop to ensure coherence between + * lost_ticks (updated in do_timer()) and the match reg value, so we + * can use do_gettimeofday() from interrupt handlers. + */ +static irqreturn_t +sa1100_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + unsigned int next_match; + + do { + timer_tick(regs); + OSSR = OSSR_M0; /* Clear match on timer 0 */ + next_match = (OSMR0 += LATCH); + } while ((signed long)(next_match - OSCR) <= 0); + + return IRQ_HANDLED; +} + +static struct irqaction sa1100_timer_irq = { + .name = "SA11xx Timer Tick", + .flags = SA_INTERRUPT, + .handler = sa1100_timer_interrupt +}; + +void __init sa1100_init_time(void) +{ + struct timespec tv; + + gettimeoffset = sa1100_gettimeoffset; + set_rtc = sa1100_set_rtc; + + tv.tv_nsec = 0; + tv.tv_sec = sa1100_get_rtc_time(); + do_settimeofday(&tv); + + OSMR0 = 0; /* set initial match at 0 */ + OSSR = 0xf; /* clear status on all timers */ + setup_irq(IRQ_OST0, &sa1100_timer_irq); + OIER |= OIER_E0; /* enable match on timer 0 to cause interrupts */ + OSCR = 0; /* initialize free-running timer, force first match */ +} + diff --git a/arch/arm/mach-versatile/clock.c b/arch/arm/mach-versatile/clock.c new file mode 100644 index 000000000..c1f91fad8 --- /dev/null +++ b/arch/arm/mach-versatile/clock.c @@ -0,0 +1,146 @@ +/* + * linux/arch/arm/mach-versatile/clock.c + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "clock.h" + +static LIST_HEAD(clocks); +static DECLARE_MUTEX(clocks_sem); + +struct clk *clk_get(struct device *dev, const char *id) +{ + struct clk *p, *clk = ERR_PTR(-ENOENT); + + down(&clocks_sem); + list_for_each_entry(p, &clocks, node) { + if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { + clk = p; + break; + } + } + up(&clocks_sem); + + return clk; +} +EXPORT_SYMBOL(clk_get); + +void clk_put(struct clk *clk) +{ + module_put(clk->owner); +} +EXPORT_SYMBOL(clk_put); + +int clk_enable(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_disable); + +int clk_use(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_use); + +void clk_unuse(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_unuse); + +unsigned long clk_get_rate(struct clk *clk) +{ + return clk->rate; +} +EXPORT_SYMBOL(clk_get_rate); + +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + return rate; +} +EXPORT_SYMBOL(clk_round_rate); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + int ret = -EIO; +#if 0 // Not yet + if (clk->setvco) { + struct icst525_vco vco; + + vco = icst525_khz_to_vco(clk->params, rate); + clk->rate = icst525_khz(clk->params, vco); + + printk("Clock %s: setting VCO reg params: S=%d R=%d V=%d\n", + clk->name, vco.s, vco.r, vco.v); + + clk->setvco(clk, vco); + ret = 0; + } +#endif + return 0; +} +EXPORT_SYMBOL(clk_set_rate); + +/* + * These are fixed clocks. + */ +static struct clk kmi_clk = { + .name = "KMIREFCLK", + .rate = 24000000, +}; + +static struct clk uart_clk = { + .name = "UARTCLK", + .rate = 24000000, +}; + +static struct clk mmci_clk = { + .name = "MCLK", + .rate = 33000000, +}; + +int clk_register(struct clk *clk) +{ + down(&clocks_sem); + list_add(&clk->node, &clocks); + up(&clocks_sem); + return 0; +} +EXPORT_SYMBOL(clk_register); + +void clk_unregister(struct clk *clk) +{ + down(&clocks_sem); + list_del(&clk->node); + up(&clocks_sem); +} +EXPORT_SYMBOL(clk_unregister); + +static int __init clk_init(void) +{ + clk_register(&kmi_clk); + clk_register(&uart_clk); + clk_register(&mmci_clk); + return 0; +} +arch_initcall(clk_init); diff --git a/arch/arm/mach-versatile/clock.h b/arch/arm/mach-versatile/clock.h new file mode 100644 index 000000000..12e68ecde --- /dev/null +++ b/arch/arm/mach-versatile/clock.h @@ -0,0 +1,25 @@ +/* + * linux/arch/arm/mach-versatile/clock.h + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +struct module; +struct icst525_params; + +struct clk { + struct list_head node; + unsigned long rate; + struct module *owner; + const char *name; + const struct icst525_params *params; + void *data; + void (*setvco)(struct clk *, struct icst525_vco vco); +}; + +int clk_register(struct clk *clk); +void clk_unregister(struct clk *clk); diff --git a/arch/arm/vfp/Makefile b/arch/arm/vfp/Makefile new file mode 100644 index 000000000..afabac31d --- /dev/null +++ b/arch/arm/vfp/Makefile @@ -0,0 +1,12 @@ +# +# linux/arch/arm/vfp/Makefile +# +# Copyright (C) 2001 ARM Limited +# + +# EXTRA_CFLAGS := -DDEBUG +# EXTRA_AFLAGS := -DDEBUG + +obj-y += vfp.o + +vfp-$(CONFIG_VFP) += entry.o vfpmodule.o vfphw.o vfpsingle.o vfpdouble.o diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S new file mode 100644 index 000000000..e9e583b2b --- /dev/null +++ b/arch/arm/vfp/entry.S @@ -0,0 +1,45 @@ +/* + * linux/arch/arm/vfp/entry.S + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Basic entry code, called from the kernel's undefined instruction trap. + * r0 = faulted instruction + * r5 = faulted PC+4 + * r9 = successful return + * r10 = thread_info structure + * lr = failure return + */ +#include +#include +#include +#include + + .globl do_vfp +do_vfp: + ldr r4, .LCvfp + add r10, r10, #TI_VFPSTATE @ r10 = workspace + ldr pc, [r4] @ call VFP entry point + +.LCvfp: + .word vfp_vector + +@ This code is called if the VFP does not exist. It needs to flag the +@ failure to the VFP initialisation code. + + __INIT + .globl vfp_testing_entry +vfp_testing_entry: + ldr r0, VFP_arch_address + str r5, [r0] @ known non-zero value + mov pc, r9 @ we have handled the fault + +VFP_arch_address: + .word VFP_arch + + __FINIT diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h new file mode 100644 index 000000000..98e0f526e --- /dev/null +++ b/arch/arm/vfp/vfp.h @@ -0,0 +1,333 @@ +/* + * linux/arch/arm/vfp/vfp.h + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +static inline u32 vfp_shiftright32jamming(u32 val, unsigned int shift) +{ + if (shift) { + if (shift < 32) + val = val >> shift | ((val << (32 - shift)) != 0); + else + val = val != 0; + } + return val; +} + +static inline u64 vfp_shiftright64jamming(u64 val, unsigned int shift) +{ + if (shift) { + if (shift < 64) + val = val >> shift | ((val << (64 - shift)) != 0); + else + val = val != 0; + } + return val; +} + +static inline u32 vfp_hi64to32jamming(u64 val) +{ + u32 v; + + asm( + "cmp %Q1, #1 @ vfp_hi64to32jamming\n\t" + "movcc %0, %R1\n\t" + "orrcs %0, %R1, #1" + : "=r" (v) : "r" (val) : "cc"); + + return v; +} + +static inline void add128(u64 *resh, u64 *resl, u64 nh, u64 nl, u64 mh, u64 ml) +{ + asm( "adds %Q0, %Q2, %Q4\n\t" + "adcs %R0, %R2, %R4\n\t" + "adcs %Q1, %Q3, %Q5\n\t" + "adc %R1, %R3, %R5" + : "=r" (nl), "=r" (nh) + : "0" (nl), "1" (nh), "r" (ml), "r" (mh) + : "cc"); + *resh = nh; + *resl = nl; +} + +static inline void sub128(u64 *resh, u64 *resl, u64 nh, u64 nl, u64 mh, u64 ml) +{ + asm( "subs %Q0, %Q2, %Q4\n\t" + "sbcs %R0, %R2, %R4\n\t" + "sbcs %Q1, %Q3, %Q5\n\t" + "sbc %R1, %R3, %R5\n\t" + : "=r" (nl), "=r" (nh) + : "0" (nl), "1" (nh), "r" (ml), "r" (mh) + : "cc"); + *resh = nh; + *resl = nl; +} + +static inline void mul64to128(u64 *resh, u64 *resl, u64 n, u64 m) +{ + u32 nh, nl, mh, ml; + u64 rh, rma, rmb, rl; + + nl = n; + ml = m; + rl = (u64)nl * ml; + + nh = n >> 32; + rma = (u64)nh * ml; + + mh = m >> 32; + rmb = (u64)nl * mh; + rma += rmb; + + rh = (u64)nh * mh; + rh += ((u64)(rma < rmb) << 32) + (rma >> 32); + + rma <<= 32; + rl += rma; + rh += (rl < rma); + + *resl = rl; + *resh = rh; +} + +static inline void shift64left(u64 *resh, u64 *resl, u64 n) +{ + *resh = n >> 63; + *resl = n << 1; +} + +static inline u64 vfp_hi64multiply64(u64 n, u64 m) +{ + u64 rh, rl; + mul64to128(&rh, &rl, n, m); + return rh | (rl != 0); +} + +static inline u64 vfp_estimate_div128to64(u64 nh, u64 nl, u64 m) +{ + u64 mh, ml, remh, reml, termh, terml, z; + + if (nh >= m) + return ~0ULL; + mh = m >> 32; + z = (mh << 32 <= nh) ? 0xffffffff00000000ULL : (nh / mh) << 32; + mul64to128(&termh, &terml, m, z); + sub128(&remh, &reml, nh, nl, termh, terml); + ml = m << 32; + while ((s64)remh < 0) { + z -= 0x100000000ULL; + add128(&remh, &reml, remh, reml, mh, ml); + } + remh = (remh << 32) | (reml >> 32); + z |= (mh << 32 <= remh) ? 0xffffffff : remh / mh; + return z; +} + +/* + * Operations on unpacked elements + */ +#define vfp_sign_negate(sign) (sign ^ 0x8000) + +/* + * Single-precision + */ +struct vfp_single { + s16 exponent; + u16 sign; + u32 significand; +}; + +extern s32 vfp_get_float(unsigned int reg); +extern void vfp_put_float(unsigned int reg, s32 val); + +/* + * VFP_SINGLE_MANTISSA_BITS - number of bits in the mantissa + * VFP_SINGLE_EXPONENT_BITS - number of bits in the exponent + * VFP_SINGLE_LOW_BITS - number of low bits in the unpacked significand + * which are not propagated to the float upon packing. + */ +#define VFP_SINGLE_MANTISSA_BITS (23) +#define VFP_SINGLE_EXPONENT_BITS (8) +#define VFP_SINGLE_LOW_BITS (32 - VFP_SINGLE_MANTISSA_BITS - 2) +#define VFP_SINGLE_LOW_BITS_MASK ((1 << VFP_SINGLE_LOW_BITS) - 1) + +/* + * The bit in an unpacked float which indicates that it is a quiet NaN + */ +#define VFP_SINGLE_SIGNIFICAND_QNAN (1 << (VFP_SINGLE_MANTISSA_BITS - 1 + VFP_SINGLE_LOW_BITS)) + +/* + * Operations on packed single-precision numbers + */ +#define vfp_single_packed_sign(v) ((v) & 0x80000000) +#define vfp_single_packed_negate(v) ((v) ^ 0x80000000) +#define vfp_single_packed_abs(v) ((v) & ~0x80000000) +#define vfp_single_packed_exponent(v) (((v) >> VFP_SINGLE_MANTISSA_BITS) & ((1 << VFP_SINGLE_EXPONENT_BITS) - 1)) +#define vfp_single_packed_mantissa(v) ((v) & ((1 << VFP_SINGLE_MANTISSA_BITS) - 1)) + +/* + * Unpack a single-precision float. Note that this returns the magnitude + * of the single-precision float mantissa with the 1. if necessary, + * aligned to bit 30. + */ +static inline void vfp_single_unpack(struct vfp_single *s, s32 val) +{ + u32 significand; + + s->sign = vfp_single_packed_sign(val) >> 16, + s->exponent = vfp_single_packed_exponent(val); + + significand = (u32) val; + significand = (significand << (32 - VFP_SINGLE_MANTISSA_BITS)) >> 2; + if (s->exponent && s->exponent != 255) + significand |= 0x40000000; + s->significand = significand; +} + +/* + * Re-pack a single-precision float. This assumes that the float is + * already normalised such that the MSB is bit 30, _not_ bit 31. + */ +static inline s32 vfp_single_pack(struct vfp_single *s) +{ + u32 val; + val = (s->sign << 16) + + (s->exponent << VFP_SINGLE_MANTISSA_BITS) + + (s->significand >> VFP_SINGLE_LOW_BITS); + return (s32)val; +} + +#define VFP_NUMBER (1<<0) +#define VFP_ZERO (1<<1) +#define VFP_DENORMAL (1<<2) +#define VFP_INFINITY (1<<3) +#define VFP_NAN (1<<4) +#define VFP_NAN_SIGNAL (1<<5) + +#define VFP_QNAN (VFP_NAN) +#define VFP_SNAN (VFP_NAN|VFP_NAN_SIGNAL) + +static inline int vfp_single_type(struct vfp_single *s) +{ + int type = VFP_NUMBER; + if (s->exponent == 255) { + if (s->significand == 0) + type = VFP_INFINITY; + else if (s->significand & VFP_SINGLE_SIGNIFICAND_QNAN) + type = VFP_QNAN; + else + type = VFP_SNAN; + } else if (s->exponent == 0) { + if (s->significand == 0) + type |= VFP_ZERO; + else + type |= VFP_DENORMAL; + } + return type; +} + +#ifndef DEBUG +#define vfp_single_normaliseround(sd,vsd,fpscr,except,func) __vfp_single_normaliseround(sd,vsd,fpscr,except) +u32 __vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exceptions); +#else +u32 vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exceptions, const char *func); +#endif + +/* + * Double-precision + */ +struct vfp_double { + s16 exponent; + u16 sign; + u64 significand; +}; + +extern u64 vfp_get_double(unsigned int reg); +extern void vfp_put_double(unsigned int reg, u64 val); + +#define VFP_DOUBLE_MANTISSA_BITS (52) +#define VFP_DOUBLE_EXPONENT_BITS (11) +#define VFP_DOUBLE_LOW_BITS (64 - VFP_DOUBLE_MANTISSA_BITS - 2) +#define VFP_DOUBLE_LOW_BITS_MASK ((1 << VFP_DOUBLE_LOW_BITS) - 1) + +/* + * The bit in an unpacked double which indicates that it is a quiet NaN + */ +#define VFP_DOUBLE_SIGNIFICAND_QNAN (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1 + VFP_DOUBLE_LOW_BITS)) + +/* + * Operations on packed single-precision numbers + */ +#define vfp_double_packed_sign(v) ((v) & (1ULL << 63)) +#define vfp_double_packed_negate(v) ((v) ^ (1ULL << 63)) +#define vfp_double_packed_abs(v) ((v) & ~(1ULL << 63)) +#define vfp_double_packed_exponent(v) (((v) >> VFP_DOUBLE_MANTISSA_BITS) & ((1 << VFP_DOUBLE_EXPONENT_BITS) - 1)) +#define vfp_double_packed_mantissa(v) ((v) & ((1ULL << VFP_DOUBLE_MANTISSA_BITS) - 1)) + +/* + * Unpack a double-precision float. Note that this returns the magnitude + * of the double-precision float mantissa with the 1. if necessary, + * aligned to bit 62. + */ +static inline void vfp_double_unpack(struct vfp_double *s, s64 val) +{ + u64 significand; + + s->sign = vfp_double_packed_sign(val) >> 48; + s->exponent = vfp_double_packed_exponent(val); + + significand = (u64) val; + significand = (significand << (64 - VFP_DOUBLE_MANTISSA_BITS)) >> 2; + if (s->exponent && s->exponent != 2047) + significand |= (1ULL << 62); + s->significand = significand; +} + +/* + * Re-pack a double-precision float. This assumes that the float is + * already normalised such that the MSB is bit 30, _not_ bit 31. + */ +static inline s64 vfp_double_pack(struct vfp_double *s) +{ + u64 val; + val = ((u64)s->sign << 48) + + ((u64)s->exponent << VFP_DOUBLE_MANTISSA_BITS) + + (s->significand >> VFP_DOUBLE_LOW_BITS); + return (s64)val; +} + +static inline int vfp_double_type(struct vfp_double *s) +{ + int type = VFP_NUMBER; + if (s->exponent == 2047) { + if (s->significand == 0) + type = VFP_INFINITY; + else if (s->significand & VFP_DOUBLE_SIGNIFICAND_QNAN) + type = VFP_QNAN; + else + type = VFP_SNAN; + } else if (s->exponent == 0) { + if (s->significand == 0) + type |= VFP_ZERO; + else + type |= VFP_DENORMAL; + } + return type; +} + +u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func); + +/* + * System registers + */ +extern u32 vfp_get_sys(unsigned int reg); +extern void vfp_put_sys(unsigned int reg, u32 val); + +u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand); diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c new file mode 100644 index 000000000..54649c1ee --- /dev/null +++ b/arch/arm/vfp/vfpdouble.c @@ -0,0 +1,1186 @@ +/* + * linux/arch/arm/vfp/vfpdouble.c + * + * This code is derived in part from John R. Housers softfloat library, which + * carries the following notice: + * + * =========================================================================== + * This C source file is part of the SoftFloat IEC/IEEE Floating-point + * Arithmetic Package, Release 2. + * + * Written by John R. Hauser. This work was made possible in part by the + * International Computer Science Institute, located at Suite 600, 1947 Center + * Street, Berkeley, California 94704. Funding was partially provided by the + * National Science Foundation under grant MIP-9311980. The original version + * of this code was written as part of a project to build a fixed-point vector + * processor in collaboration with the University of California at Berkeley, + * overseen by Profs. Nelson Morgan and John Wawrzynek. More information + * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ + * arithmetic/softfloat.html'. + * + * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort + * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT + * TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO + * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY + * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + * + * Derivative works are acceptable, even for commercial purposes, so long as + * (1) they include prominent notice that the work is derivative, and (2) they + * include prominent notice akin to these three paragraphs for those parts of + * this code that are retained. + * =========================================================================== + */ +#include +#include +#include +#include + +#include "vfpinstr.h" +#include "vfp.h" + +static struct vfp_double vfp_double_default_qnan = { + .exponent = 2047, + .sign = 0, + .significand = VFP_DOUBLE_SIGNIFICAND_QNAN, +}; + +static void vfp_double_dump(const char *str, struct vfp_double *d) +{ + pr_debug("VFP: %s: sign=%d exponent=%d significand=%016llx\n", + str, d->sign != 0, d->exponent, d->significand); +} + +static void vfp_double_normalise_denormal(struct vfp_double *vd) +{ + int bits = 31 - fls(vd->significand >> 32); + if (bits == 31) + bits = 62 - fls(vd->significand); + + vfp_double_dump("normalise_denormal: in", vd); + + if (bits) { + vd->exponent -= bits - 1; + vd->significand <<= bits; + } + + vfp_double_dump("normalise_denormal: out", vd); +} + +u32 vfp_double_normaliseround(int dd, struct vfp_double *vd, u32 fpscr, u32 exceptions, const char *func) +{ + u64 significand, incr; + int exponent, shift, underflow; + u32 rmode; + + vfp_double_dump("pack: in", vd); + + /* + * Infinities and NaNs are a special case. + */ + if (vd->exponent == 2047 && (vd->significand == 0 || exceptions)) + goto pack; + + /* + * Special-case zero. + */ + if (vd->significand == 0) { + vd->exponent = 0; + goto pack; + } + + exponent = vd->exponent; + significand = vd->significand; + + shift = 32 - fls(significand >> 32); + if (shift == 32) + shift = 64 - fls(significand); + if (shift) { + exponent -= shift; + significand <<= shift; + } + +#ifdef DEBUG + vd->exponent = exponent; + vd->significand = significand; + vfp_double_dump("pack: normalised", vd); +#endif + + /* + * Tiny number? + */ + underflow = exponent < 0; + if (underflow) { + significand = vfp_shiftright64jamming(significand, -exponent); + exponent = 0; +#ifdef DEBUG + vd->exponent = exponent; + vd->significand = significand; + vfp_double_dump("pack: tiny number", vd); +#endif + if (!(significand & ((1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1))) + underflow = 0; + } + + /* + * Select rounding increment. + */ + incr = 0; + rmode = fpscr & FPSCR_RMODE_MASK; + + if (rmode == FPSCR_ROUND_NEAREST) { + incr = 1ULL << VFP_DOUBLE_LOW_BITS; + if ((significand & (1ULL << (VFP_DOUBLE_LOW_BITS + 1))) == 0) + incr -= 1; + } else if (rmode == FPSCR_ROUND_TOZERO) { + incr = 0; + } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vd->sign != 0)) + incr = (1ULL << (VFP_DOUBLE_LOW_BITS + 1)) - 1; + + pr_debug("VFP: rounding increment = 0x%08llx\n", incr); + + /* + * Is our rounding going to overflow? + */ + if ((significand + incr) < significand) { + exponent += 1; + significand = (significand >> 1) | (significand & 1); + incr >>= 1; +#ifdef DEBUG + vd->exponent = exponent; + vd->significand = significand; + vfp_double_dump("pack: overflow", vd); +#endif + } + + /* + * If any of the low bits (which will be shifted out of the + * number) are non-zero, the result is inexact. + */ + if (significand & ((1 << (VFP_DOUBLE_LOW_BITS + 1)) - 1)) + exceptions |= FPSCR_IXC; + + /* + * Do our rounding. + */ + significand += incr; + + /* + * Infinity? + */ + if (exponent >= 2046) { + exceptions |= FPSCR_OFC | FPSCR_IXC; + if (incr == 0) { + vd->exponent = 2045; + vd->significand = 0x7fffffffffffffffULL; + } else { + vd->exponent = 2047; /* infinity */ + vd->significand = 0; + } + } else { + if (significand >> (VFP_DOUBLE_LOW_BITS + 1) == 0) + exponent = 0; + if (exponent || significand > 0x8000000000000000ULL) + underflow = 0; + if (underflow) + exceptions |= FPSCR_UFC; + vd->exponent = exponent; + vd->significand = significand >> 1; + } + + pack: + vfp_double_dump("pack: final", vd); + { + s64 d = vfp_double_pack(vd); + pr_debug("VFP: %s: d(d%d)=%016llx exceptions=%08x\n", func, + dd, d, exceptions); + vfp_put_double(dd, d); + } + return exceptions; +} + +/* + * Propagate the NaN, setting exceptions if it is signalling. + * 'n' is always a NaN. 'm' may be a number, NaN or infinity. + */ +static u32 +vfp_propagate_nan(struct vfp_double *vdd, struct vfp_double *vdn, + struct vfp_double *vdm, u32 fpscr) +{ + struct vfp_double *nan; + int tn, tm = 0; + + tn = vfp_double_type(vdn); + + if (vdm) + tm = vfp_double_type(vdm); + + if (fpscr & FPSCR_DEFAULT_NAN) + /* + * Default NaN mode - always returns a quiet NaN + */ + nan = &vfp_double_default_qnan; + else { + /* + * Contemporary mode - select the first signalling + * NAN, or if neither are signalling, the first + * quiet NAN. + */ + if (tn == VFP_SNAN || (tm != VFP_SNAN && tn == VFP_QNAN)) + nan = vdn; + else + nan = vdm; + /* + * Make the NaN quiet. + */ + nan->significand |= VFP_DOUBLE_SIGNIFICAND_QNAN; + } + + *vdd = *nan; + + /* + * If one was a signalling NAN, raise invalid operation. + */ + return tn == VFP_SNAN || tm == VFP_SNAN ? FPSCR_IOC : 0x100; +} + +/* + * Extended operations + */ +static u32 vfp_double_fabs(int dd, int unused, int dm, u32 fpscr) +{ + vfp_put_double(dd, vfp_double_packed_abs(vfp_get_double(dm))); + return 0; +} + +static u32 vfp_double_fcpy(int dd, int unused, int dm, u32 fpscr) +{ + vfp_put_double(dd, vfp_get_double(dm)); + return 0; +} + +static u32 vfp_double_fneg(int dd, int unused, int dm, u32 fpscr) +{ + vfp_put_double(dd, vfp_double_packed_negate(vfp_get_double(dm))); + return 0; +} + +static u32 vfp_double_fsqrt(int dd, int unused, int dm, u32 fpscr) +{ + struct vfp_double vdm, vdd; + int ret, tm; + + vfp_double_unpack(&vdm, vfp_get_double(dm)); + tm = vfp_double_type(&vdm); + if (tm & (VFP_NAN|VFP_INFINITY)) { + struct vfp_double *vdp = &vdd; + + if (tm & VFP_NAN) + ret = vfp_propagate_nan(vdp, &vdm, NULL, fpscr); + else if (vdm.sign == 0) { + sqrt_copy: + vdp = &vdm; + ret = 0; + } else { + sqrt_invalid: + vdp = &vfp_double_default_qnan; + ret = FPSCR_IOC; + } + vfp_put_double(dd, vfp_double_pack(vdp)); + return ret; + } + + /* + * sqrt(+/- 0) == +/- 0 + */ + if (tm & VFP_ZERO) + goto sqrt_copy; + + /* + * Normalise a denormalised number + */ + if (tm & VFP_DENORMAL) + vfp_double_normalise_denormal(&vdm); + + /* + * sqrt(<0) = invalid + */ + if (vdm.sign) + goto sqrt_invalid; + + vfp_double_dump("sqrt", &vdm); + + /* + * Estimate the square root. + */ + vdd.sign = 0; + vdd.exponent = ((vdm.exponent - 1023) >> 1) + 1023; + vdd.significand = (u64)vfp_estimate_sqrt_significand(vdm.exponent, vdm.significand >> 32) << 31; + + vfp_double_dump("sqrt estimate1", &vdd); + + vdm.significand >>= 1 + (vdm.exponent & 1); + vdd.significand += 2 + vfp_estimate_div128to64(vdm.significand, 0, vdd.significand); + + vfp_double_dump("sqrt estimate2", &vdd); + + /* + * And now adjust. + */ + if ((vdd.significand & VFP_DOUBLE_LOW_BITS_MASK) <= 5) { + if (vdd.significand < 2) { + vdd.significand = ~0ULL; + } else { + u64 termh, terml, remh, reml; + vdm.significand <<= 2; + mul64to128(&termh, &terml, vdd.significand, vdd.significand); + sub128(&remh, &reml, vdm.significand, 0, termh, terml); + while ((s64)remh < 0) { + vdd.significand -= 1; + shift64left(&termh, &terml, vdd.significand); + terml |= 1; + add128(&remh, &reml, remh, reml, termh, terml); + } + vdd.significand |= (remh | reml) != 0; + } + } + vdd.significand = vfp_shiftright64jamming(vdd.significand, 1); + + return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fsqrt"); +} + +/* + * Equal := ZC + * Less than := N + * Greater than := C + * Unordered := CV + */ +static u32 vfp_compare(int dd, int signal_on_qnan, int dm, u32 fpscr) +{ + s64 d, m; + u32 ret = 0; + + m = vfp_get_double(dm); + if (vfp_double_packed_exponent(m) == 2047 && vfp_double_packed_mantissa(m)) { + ret |= FPSCR_C | FPSCR_V; + if (signal_on_qnan || !(vfp_double_packed_mantissa(m) & (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1)))) + /* + * Signalling NaN, or signalling on quiet NaN + */ + ret |= FPSCR_IOC; + } + + d = vfp_get_double(dd); + if (vfp_double_packed_exponent(d) == 2047 && vfp_double_packed_mantissa(d)) { + ret |= FPSCR_C | FPSCR_V; + if (signal_on_qnan || !(vfp_double_packed_mantissa(d) & (1ULL << (VFP_DOUBLE_MANTISSA_BITS - 1)))) + /* + * Signalling NaN, or signalling on quiet NaN + */ + ret |= FPSCR_IOC; + } + + if (ret == 0) { + if (d == m || vfp_double_packed_abs(d | m) == 0) { + /* + * equal + */ + ret |= FPSCR_Z | FPSCR_C; + } else if (vfp_double_packed_sign(d ^ m)) { + /* + * different signs + */ + if (vfp_double_packed_sign(d)) + /* + * d is negative, so d < m + */ + ret |= FPSCR_N; + else + /* + * d is positive, so d > m + */ + ret |= FPSCR_C; + } else if ((vfp_double_packed_sign(d) != 0) ^ (d < m)) { + /* + * d < m + */ + ret |= FPSCR_N; + } else if ((vfp_double_packed_sign(d) != 0) ^ (d > m)) { + /* + * d > m + */ + ret |= FPSCR_C; + } + } + + return ret; +} + +static u32 vfp_double_fcmp(int dd, int unused, int dm, u32 fpscr) +{ + return vfp_compare(dd, 0, dm, fpscr); +} + +static u32 vfp_double_fcmpe(int dd, int unused, int dm, u32 fpscr) +{ + return vfp_compare(dd, 1, dm, fpscr); +} + +static u32 vfp_double_fcmpz(int dd, int unused, int dm, u32 fpscr) +{ + return vfp_compare(dd, 0, -1, fpscr); +} + +static u32 vfp_double_fcmpez(int dd, int unused, int dm, u32 fpscr) +{ + return vfp_compare(dd, 1, -1, fpscr); +} + +static u32 vfp_double_fcvts(int sd, int unused, int dm, u32 fpscr) +{ + struct vfp_double vdm; + struct vfp_single vsd; + int tm; + u32 exceptions = 0; + + vfp_double_unpack(&vdm, vfp_get_double(dm)); + + tm = vfp_double_type(&vdm); + + /* + * If we have a signalling NaN, signal invalid operation. + */ + if (tm == VFP_SNAN) + exceptions = FPSCR_IOC; + + if (tm & VFP_DENORMAL) + vfp_double_normalise_denormal(&vdm); + + vsd.sign = vdm.sign; + vsd.significand = vfp_hi64to32jamming(vdm.significand); + + /* + * If we have an infinity or a NaN, the exponent must be 255 + */ + if (tm & (VFP_INFINITY|VFP_NAN)) { + vsd.exponent = 255; + if (tm & VFP_NAN) + vsd.significand |= VFP_SINGLE_SIGNIFICAND_QNAN; + goto pack_nan; + } else if (tm & VFP_ZERO) + vsd.exponent = 0; + else + vsd.exponent = vdm.exponent - (1023 - 127); + + return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fcvts"); + + pack_nan: + vfp_put_float(sd, vfp_single_pack(&vsd)); + return exceptions; +} + +static u32 vfp_double_fuito(int dd, int unused, int dm, u32 fpscr) +{ + struct vfp_double vdm; + u32 m = vfp_get_float(dm); + + vdm.sign = 0; + vdm.exponent = 1023 + 63 - 1; + vdm.significand = (u64)m; + + return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fuito"); +} + +static u32 vfp_double_fsito(int dd, int unused, int dm, u32 fpscr) +{ + struct vfp_double vdm; + u32 m = vfp_get_float(dm); + + vdm.sign = (m & 0x80000000) >> 16; + vdm.exponent = 1023 + 63 - 1; + vdm.significand = vdm.sign ? -m : m; + + return vfp_double_normaliseround(dd, &vdm, fpscr, 0, "fsito"); +} + +static u32 vfp_double_ftoui(int sd, int unused, int dm, u32 fpscr) +{ + struct vfp_double vdm; + u32 d, exceptions = 0; + int rmode = fpscr & FPSCR_RMODE_MASK; + int tm; + + vfp_double_unpack(&vdm, vfp_get_double(dm)); + + /* + * Do we have a denormalised number? + */ + tm = vfp_double_type(&vdm); + if (tm & VFP_DENORMAL) + exceptions |= FPSCR_IDC; + + if (tm & VFP_NAN) + vdm.sign = 0; + + if (vdm.exponent >= 1023 + 32) { + d = vdm.sign ? 0 : 0xffffffff; + exceptions = FPSCR_IOC; + } else if (vdm.exponent >= 1023 - 1) { + int shift = 1023 + 63 - vdm.exponent; + u64 rem, incr = 0; + + /* + * 2^0 <= m < 2^32-2^8 + */ + d = (vdm.significand << 1) >> shift; + rem = vdm.significand << (65 - shift); + + if (rmode == FPSCR_ROUND_NEAREST) { + incr = 0x8000000000000000ULL; + if ((d & 1) == 0) + incr -= 1; + } else if (rmode == FPSCR_ROUND_TOZERO) { + incr = 0; + } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) { + incr = ~0ULL; + } + + if ((rem + incr) < rem) { + if (d < 0xffffffff) + d += 1; + else + exceptions |= FPSCR_IOC; + } + + if (d && vdm.sign) { + d = 0; + exceptions |= FPSCR_IOC; + } else if (rem) + exceptions |= FPSCR_IXC; + } else { + d = 0; + if (vdm.exponent | vdm.significand) { + exceptions |= FPSCR_IXC; + if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0) + d = 1; + else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) { + d = 0; + exceptions |= FPSCR_IOC; + } + } + } + + pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions); + + vfp_put_float(sd, d); + + return exceptions; +} + +static u32 vfp_double_ftouiz(int sd, int unused, int dm, u32 fpscr) +{ + return vfp_double_ftoui(sd, unused, dm, FPSCR_ROUND_TOZERO); +} + +static u32 vfp_double_ftosi(int sd, int unused, int dm, u32 fpscr) +{ + struct vfp_double vdm; + u32 d, exceptions = 0; + int rmode = fpscr & FPSCR_RMODE_MASK; + + vfp_double_unpack(&vdm, vfp_get_double(dm)); + vfp_double_dump("VDM", &vdm); + + /* + * Do we have denormalised number? + */ + if (vfp_double_type(&vdm) & VFP_DENORMAL) + exceptions |= FPSCR_IDC; + + if (vdm.exponent >= 1023 + 32) { + d = 0x7fffffff; + if (vdm.sign) + d = ~d; + exceptions |= FPSCR_IOC; + } else if (vdm.exponent >= 1023 - 1) { + int shift = 1023 + 63 - vdm.exponent; /* 58 */ + u64 rem, incr = 0; + + d = (vdm.significand << 1) >> shift; + rem = vdm.significand << (65 - shift); + + if (rmode == FPSCR_ROUND_NEAREST) { + incr = 0x8000000000000000ULL; + if ((d & 1) == 0) + incr -= 1; + } else if (rmode == FPSCR_ROUND_TOZERO) { + incr = 0; + } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vdm.sign != 0)) { + incr = ~0ULL; + } + + if ((rem + incr) < rem && d < 0xffffffff) + d += 1; + if (d > 0x7fffffff + (vdm.sign != 0)) { + d = 0x7fffffff + (vdm.sign != 0); + exceptions |= FPSCR_IOC; + } else if (rem) + exceptions |= FPSCR_IXC; + + if (vdm.sign) + d = -d; + } else { + d = 0; + if (vdm.exponent | vdm.significand) { + exceptions |= FPSCR_IXC; + if (rmode == FPSCR_ROUND_PLUSINF && vdm.sign == 0) + d = 1; + else if (rmode == FPSCR_ROUND_MINUSINF && vdm.sign) + d = -1; + } + } + + pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions); + + vfp_put_float(sd, (s32)d); + + return exceptions; +} + +static u32 vfp_double_ftosiz(int dd, int unused, int dm, u32 fpscr) +{ + return vfp_double_ftosi(dd, unused, dm, FPSCR_ROUND_TOZERO); +} + + +static u32 (* const fop_extfns[32])(int dd, int unused, int dm, u32 fpscr) = { + [FEXT_TO_IDX(FEXT_FCPY)] = vfp_double_fcpy, + [FEXT_TO_IDX(FEXT_FABS)] = vfp_double_fabs, + [FEXT_TO_IDX(FEXT_FNEG)] = vfp_double_fneg, + [FEXT_TO_IDX(FEXT_FSQRT)] = vfp_double_fsqrt, + [FEXT_TO_IDX(FEXT_FCMP)] = vfp_double_fcmp, + [FEXT_TO_IDX(FEXT_FCMPE)] = vfp_double_fcmpe, + [FEXT_TO_IDX(FEXT_FCMPZ)] = vfp_double_fcmpz, + [FEXT_TO_IDX(FEXT_FCMPEZ)] = vfp_double_fcmpez, + [FEXT_TO_IDX(FEXT_FCVT)] = vfp_double_fcvts, + [FEXT_TO_IDX(FEXT_FUITO)] = vfp_double_fuito, + [FEXT_TO_IDX(FEXT_FSITO)] = vfp_double_fsito, + [FEXT_TO_IDX(FEXT_FTOUI)] = vfp_double_ftoui, + [FEXT_TO_IDX(FEXT_FTOUIZ)] = vfp_double_ftouiz, + [FEXT_TO_IDX(FEXT_FTOSI)] = vfp_double_ftosi, + [FEXT_TO_IDX(FEXT_FTOSIZ)] = vfp_double_ftosiz, +}; + + + + +static u32 +vfp_double_fadd_nonnumber(struct vfp_double *vdd, struct vfp_double *vdn, + struct vfp_double *vdm, u32 fpscr) +{ + struct vfp_double *vdp; + u32 exceptions = 0; + int tn, tm; + + tn = vfp_double_type(vdn); + tm = vfp_double_type(vdm); + + if (tn & tm & VFP_INFINITY) { + /* + * Two infinities. Are they different signs? + */ + if (vdn->sign ^ vdm->sign) { + /* + * different signs -> invalid + */ + exceptions = FPSCR_IOC; + vdp = &vfp_double_default_qnan; + } else { + /* + * same signs -> valid + */ + vdp = vdn; + } + } else if (tn & VFP_INFINITY && tm & VFP_NUMBER) { + /* + * One infinity and one number -> infinity + */ + vdp = vdn; + } else { + /* + * 'n' is a NaN of some type + */ + return vfp_propagate_nan(vdd, vdn, vdm, fpscr); + } + *vdd = *vdp; + return exceptions; +} + +static u32 +vfp_double_add(struct vfp_double *vdd, struct vfp_double *vdn, + struct vfp_double *vdm, u32 fpscr) +{ + u32 exp_diff; + u64 m_sig; + + if (vdn->significand & (1ULL << 63) || + vdm->significand & (1ULL << 63)) { + pr_info("VFP: bad FP values in %s\n", __func__); + vfp_double_dump("VDN", vdn); + vfp_double_dump("VDM", vdm); + } + + /* + * Ensure that 'n' is the largest magnitude number. Note that + * if 'n' and 'm' have equal exponents, we do not swap them. + * This ensures that NaN propagation works correctly. + */ + if (vdn->exponent < vdm->exponent) { + struct vfp_double *t = vdn; + vdn = vdm; + vdm = t; + } + + /* + * Is 'n' an infinity or a NaN? Note that 'm' may be a number, + * infinity or a NaN here. + */ + if (vdn->exponent == 2047) + return vfp_double_fadd_nonnumber(vdd, vdn, vdm, fpscr); + + /* + * We have two proper numbers, where 'vdn' is the larger magnitude. + * + * Copy 'n' to 'd' before doing the arithmetic. + */ + *vdd = *vdn; + + /* + * Align 'm' with the result. + */ + exp_diff = vdn->exponent - vdm->exponent; + m_sig = vfp_shiftright64jamming(vdm->significand, exp_diff); + + /* + * If the signs are different, we are really subtracting. + */ + if (vdn->sign ^ vdm->sign) { + m_sig = vdn->significand - m_sig; + if ((s64)m_sig < 0) { + vdd->sign = vfp_sign_negate(vdd->sign); + m_sig = -m_sig; + } + } else { + m_sig += vdn->significand; + } + vdd->significand = m_sig; + + return 0; +} + +static u32 +vfp_double_multiply(struct vfp_double *vdd, struct vfp_double *vdn, + struct vfp_double *vdm, u32 fpscr) +{ + vfp_double_dump("VDN", vdn); + vfp_double_dump("VDM", vdm); + + /* + * Ensure that 'n' is the largest magnitude number. Note that + * if 'n' and 'm' have equal exponents, we do not swap them. + * This ensures that NaN propagation works correctly. + */ + if (vdn->exponent < vdm->exponent) { + struct vfp_double *t = vdn; + vdn = vdm; + vdm = t; + pr_debug("VFP: swapping M <-> N\n"); + } + + vdd->sign = vdn->sign ^ vdm->sign; + + /* + * If 'n' is an infinity or NaN, handle it. 'm' may be anything. + */ + if (vdn->exponent == 2047) { + if (vdn->significand || (vdm->exponent == 2047 && vdm->significand)) + return vfp_propagate_nan(vdd, vdn, vdm, fpscr); + if ((vdm->exponent | vdm->significand) == 0) { + *vdd = vfp_double_default_qnan; + return FPSCR_IOC; + } + vdd->exponent = vdn->exponent; + vdd->significand = 0; + return 0; + } + + /* + * If 'm' is zero, the result is always zero. In this case, + * 'n' may be zero or a number, but it doesn't matter which. + */ + if ((vdm->exponent | vdm->significand) == 0) { + vdd->exponent = 0; + vdd->significand = 0; + return 0; + } + + /* + * We add 2 to the destination exponent for the same reason + * as the addition case - though this time we have +1 from + * each input operand. + */ + vdd->exponent = vdn->exponent + vdm->exponent - 1023 + 2; + vdd->significand = vfp_hi64multiply64(vdn->significand, vdm->significand); + + vfp_double_dump("VDD", vdd); + return 0; +} + +#define NEG_MULTIPLY (1 << 0) +#define NEG_SUBTRACT (1 << 1) + +static u32 +vfp_double_multiply_accumulate(int dd, int dn, int dm, u32 fpscr, u32 negate, char *func) +{ + struct vfp_double vdd, vdp, vdn, vdm; + u32 exceptions; + + vfp_double_unpack(&vdn, vfp_get_double(dn)); + if (vdn.exponent == 0 && vdn.significand) + vfp_double_normalise_denormal(&vdn); + + vfp_double_unpack(&vdm, vfp_get_double(dm)); + if (vdm.exponent == 0 && vdm.significand) + vfp_double_normalise_denormal(&vdm); + + exceptions = vfp_double_multiply(&vdp, &vdn, &vdm, fpscr); + if (negate & NEG_MULTIPLY) + vdp.sign = vfp_sign_negate(vdp.sign); + + vfp_double_unpack(&vdn, vfp_get_double(dd)); + if (negate & NEG_SUBTRACT) + vdn.sign = vfp_sign_negate(vdn.sign); + + exceptions |= vfp_double_add(&vdd, &vdn, &vdp, fpscr); + + return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, func); +} + +/* + * Standard operations + */ + +/* + * sd = sd + (sn * sm) + */ +static u32 vfp_double_fmac(int dd, int dn, int dm, u32 fpscr) +{ + return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, 0, "fmac"); +} + +/* + * sd = sd - (sn * sm) + */ +static u32 vfp_double_fnmac(int dd, int dn, int dm, u32 fpscr) +{ + return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_MULTIPLY, "fnmac"); +} + +/* + * sd = -sd + (sn * sm) + */ +static u32 vfp_double_fmsc(int dd, int dn, int dm, u32 fpscr) +{ + return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_SUBTRACT, "fmsc"); +} + +/* + * sd = -sd - (sn * sm) + */ +static u32 vfp_double_fnmsc(int dd, int dn, int dm, u32 fpscr) +{ + return vfp_double_multiply_accumulate(dd, dn, dm, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc"); +} + +/* + * sd = sn * sm + */ +static u32 vfp_double_fmul(int dd, int dn, int dm, u32 fpscr) +{ + struct vfp_double vdd, vdn, vdm; + u32 exceptions; + + vfp_double_unpack(&vdn, vfp_get_double(dn)); + if (vdn.exponent == 0 && vdn.significand) + vfp_double_normalise_denormal(&vdn); + + vfp_double_unpack(&vdm, vfp_get_double(dm)); + if (vdm.exponent == 0 && vdm.significand) + vfp_double_normalise_denormal(&vdm); + + exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); + return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fmul"); +} + +/* + * sd = -(sn * sm) + */ +static u32 vfp_double_fnmul(int dd, int dn, int dm, u32 fpscr) +{ + struct vfp_double vdd, vdn, vdm; + u32 exceptions; + + vfp_double_unpack(&vdn, vfp_get_double(dn)); + if (vdn.exponent == 0 && vdn.significand) + vfp_double_normalise_denormal(&vdn); + + vfp_double_unpack(&vdm, vfp_get_double(dm)); + if (vdm.exponent == 0 && vdm.significand) + vfp_double_normalise_denormal(&vdm); + + exceptions = vfp_double_multiply(&vdd, &vdn, &vdm, fpscr); + vdd.sign = vfp_sign_negate(vdd.sign); + + return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fnmul"); +} + +/* + * sd = sn + sm + */ +static u32 vfp_double_fadd(int dd, int dn, int dm, u32 fpscr) +{ + struct vfp_double vdd, vdn, vdm; + u32 exceptions; + + vfp_double_unpack(&vdn, vfp_get_double(dn)); + if (vdn.exponent == 0 && vdn.significand) + vfp_double_normalise_denormal(&vdn); + + vfp_double_unpack(&vdm, vfp_get_double(dm)); + if (vdm.exponent == 0 && vdm.significand) + vfp_double_normalise_denormal(&vdm); + + exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr); + + return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fadd"); +} + +/* + * sd = sn - sm + */ +static u32 vfp_double_fsub(int dd, int dn, int dm, u32 fpscr) +{ + struct vfp_double vdd, vdn, vdm; + u32 exceptions; + + vfp_double_unpack(&vdn, vfp_get_double(dn)); + if (vdn.exponent == 0 && vdn.significand) + vfp_double_normalise_denormal(&vdn); + + vfp_double_unpack(&vdm, vfp_get_double(dm)); + if (vdm.exponent == 0 && vdm.significand) + vfp_double_normalise_denormal(&vdm); + + /* + * Subtraction is like addition, but with a negated operand. + */ + vdm.sign = vfp_sign_negate(vdm.sign); + + exceptions = vfp_double_add(&vdd, &vdn, &vdm, fpscr); + + return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fsub"); +} + +/* + * sd = sn / sm + */ +static u32 vfp_double_fdiv(int dd, int dn, int dm, u32 fpscr) +{ + struct vfp_double vdd, vdn, vdm; + u32 exceptions = 0; + int tm, tn; + + vfp_double_unpack(&vdn, vfp_get_double(dn)); + vfp_double_unpack(&vdm, vfp_get_double(dm)); + + vdd.sign = vdn.sign ^ vdm.sign; + + tn = vfp_double_type(&vdn); + tm = vfp_double_type(&vdm); + + /* + * Is n a NAN? + */ + if (tn & VFP_NAN) + goto vdn_nan; + + /* + * Is m a NAN? + */ + if (tm & VFP_NAN) + goto vdm_nan; + + /* + * If n and m are infinity, the result is invalid + * If n and m are zero, the result is invalid + */ + if (tm & tn & (VFP_INFINITY|VFP_ZERO)) + goto invalid; + + /* + * If n is infinity, the result is infinity + */ + if (tn & VFP_INFINITY) + goto infinity; + + /* + * If m is zero, raise div0 exceptions + */ + if (tm & VFP_ZERO) + goto divzero; + + /* + * If m is infinity, or n is zero, the result is zero + */ + if (tm & VFP_INFINITY || tn & VFP_ZERO) + goto zero; + + if (tn & VFP_DENORMAL) + vfp_double_normalise_denormal(&vdn); + if (tm & VFP_DENORMAL) + vfp_double_normalise_denormal(&vdm); + + /* + * Ok, we have two numbers, we can perform division. + */ + vdd.exponent = vdn.exponent - vdm.exponent + 1023 - 1; + vdm.significand <<= 1; + if (vdm.significand <= (2 * vdn.significand)) { + vdn.significand >>= 1; + vdd.exponent++; + } + vdd.significand = vfp_estimate_div128to64(vdn.significand, 0, vdm.significand); + if ((vdd.significand & 0x1ff) <= 2) { + u64 termh, terml, remh, reml; + mul64to128(&termh, &terml, vdm.significand, vdd.significand); + sub128(&remh, &reml, vdn.significand, 0, termh, terml); + while ((s64)remh < 0) { + vdd.significand -= 1; + add128(&remh, &reml, remh, reml, 0, vdm.significand); + } + vdd.significand |= (reml != 0); + } + return vfp_double_normaliseround(dd, &vdd, fpscr, 0, "fdiv"); + + vdn_nan: + exceptions = vfp_propagate_nan(&vdd, &vdn, &vdm, fpscr); + pack: + vfp_put_double(dd, vfp_double_pack(&vdd)); + return exceptions; + + vdm_nan: + exceptions = vfp_propagate_nan(&vdd, &vdm, &vdn, fpscr); + goto pack; + + zero: + vdd.exponent = 0; + vdd.significand = 0; + goto pack; + + divzero: + exceptions = FPSCR_DZC; + infinity: + vdd.exponent = 2047; + vdd.significand = 0; + goto pack; + + invalid: + vfp_put_double(dd, vfp_double_pack(&vfp_double_default_qnan)); + return FPSCR_IOC; +} + +static u32 (* const fop_fns[16])(int dd, int dn, int dm, u32 fpscr) = { + [FOP_TO_IDX(FOP_FMAC)] = vfp_double_fmac, + [FOP_TO_IDX(FOP_FNMAC)] = vfp_double_fnmac, + [FOP_TO_IDX(FOP_FMSC)] = vfp_double_fmsc, + [FOP_TO_IDX(FOP_FNMSC)] = vfp_double_fnmsc, + [FOP_TO_IDX(FOP_FMUL)] = vfp_double_fmul, + [FOP_TO_IDX(FOP_FNMUL)] = vfp_double_fnmul, + [FOP_TO_IDX(FOP_FADD)] = vfp_double_fadd, + [FOP_TO_IDX(FOP_FSUB)] = vfp_double_fsub, + [FOP_TO_IDX(FOP_FDIV)] = vfp_double_fdiv, +}; + +#define FREG_BANK(x) ((x) & 0x0c) +#define FREG_IDX(x) ((x) & 3) + +u32 vfp_double_cpdo(u32 inst, u32 fpscr) +{ + u32 op = inst & FOP_MASK; + u32 exceptions = 0; + unsigned int dd = vfp_get_sd(inst); + unsigned int dn = vfp_get_sn(inst); + unsigned int dm = vfp_get_sm(inst); + unsigned int vecitr, veclen, vecstride; + u32 (*fop)(int, int, s32, u32); + + veclen = fpscr & FPSCR_LENGTH_MASK; + vecstride = (1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK)) * 2; + + /* + * If destination bank is zero, vector length is always '1'. + * ARM DDI0100F C5.1.3, C5.3.2. + */ + if (FREG_BANK(dd) == 0) + veclen = 0; + + pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, + (veclen >> FPSCR_LENGTH_BIT) + 1); + + fop = (op == FOP_EXT) ? fop_extfns[dn] : fop_fns[FOP_TO_IDX(op)]; + if (!fop) + goto invalid; + + for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) { + u32 except; + + if (op == FOP_EXT) + pr_debug("VFP: itr%d (d%u.%u) = op[%u] (d%u.%u)\n", + vecitr >> FPSCR_LENGTH_BIT, + dd >> 1, dd & 1, dn, + dm >> 1, dm & 1); + else + pr_debug("VFP: itr%d (d%u.%u) = (d%u.%u) op[%u] (d%u.%u)\n", + vecitr >> FPSCR_LENGTH_BIT, + dd >> 1, dd & 1, + dn >> 1, dn & 1, + FOP_TO_IDX(op), + dm >> 1, dm & 1); + + except = fop(dd, dn, dm, fpscr); + pr_debug("VFP: itr%d: exceptions=%08x\n", + vecitr >> FPSCR_LENGTH_BIT, except); + + exceptions |= except; + + /* + * This ensures that comparisons only operate on scalars; + * comparisons always return with one FPSCR status bit set. + */ + if (except & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) + break; + + /* + * CHECK: It appears to be undefined whether we stop when + * we encounter an exception. We continue. + */ + + dd = FREG_BANK(dd) + ((FREG_IDX(dd) + vecstride) & 6); + dn = FREG_BANK(dn) + ((FREG_IDX(dn) + vecstride) & 6); + if (FREG_BANK(dm) != 0) + dm = FREG_BANK(dm) + ((FREG_IDX(dm) + vecstride) & 6); + } + return exceptions; + + invalid: + return ~0; +} diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S new file mode 100644 index 000000000..ad3626103 --- /dev/null +++ b/arch/arm/vfp/vfphw.S @@ -0,0 +1,210 @@ +/* + * linux/arch/arm/vfp/vfphw.S + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This code is called from the kernel's undefined instruction trap. + * r9 holds the return address for successful handling. + * lr holds the return address for unrecognised instructions. + * r10 points at the start of the private FP workspace in the thread structure + * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h) + */ +#include +#include +#include "../kernel/entry-header.S" + + .macro DBGSTR, str +#ifdef DEBUG + stmfd sp!, {r0-r3, ip, lr} + add r0, pc, #4 + bl printk + b 1f + .asciz "<7>VFP: \str\n" + .balign 4 +1: ldmfd sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro DBGSTR1, str, arg +#ifdef DEBUG + stmfd sp!, {r0-r3, ip, lr} + mov r1, \arg + add r0, pc, #4 + bl printk + b 1f + .asciz "<7>VFP: \str\n" + .balign 4 +1: ldmfd sp!, {r0-r3, ip, lr} +#endif + .endm + + .macro DBGSTR3, str, arg1, arg2, arg3 +#ifdef DEBUG + stmfd sp!, {r0-r3, ip, lr} + mov r3, \arg3 + mov r2, \arg2 + mov r1, \arg1 + add r0, pc, #4 + bl printk + b 1f + .asciz "<7>VFP: \str\n" + .balign 4 +1: ldmfd sp!, {r0-r3, ip, lr} +#endif + .endm + + +@ VFP hardware support entry point. +@ +@ r0 = faulted instruction +@ r5 = faulted PC+4 +@ r9 = successful return +@ r10 = vfp_state union +@ lr = failure return + + .globl vfp_support_entry +vfp_support_entry: + DBGSTR3 "instr %08x pc %08x state %p", r0, r5, r10 + + VFPFMRX r1, FPEXC @ Is the VFP enabled? + DBGSTR1 "fpexc %08x", r1 + tst r1, #FPEXC_ENABLE + bne look_for_VFP_exceptions @ VFP is already enabled + + DBGSTR1 "enable %x", r10 + ldr r3, last_VFP_context_address + orr r1, r1, #FPEXC_ENABLE @ user FPEXC has the enable bit set + ldr r4, [r3] @ last_VFP_context pointer + bic r2, r1, #FPEXC_EXCEPTION @ make sure exceptions are disabled + cmp r4, r10 + beq check_for_exception @ we are returning to the same + @ process, so the registers are + @ still there. In this case, we do + @ not want to drop a pending exception. + + VFPFMXR FPEXC, r2 @ enable VFP, disable any pending + @ exceptions, so we can get at the + @ rest of it + + @ Save out the current registers to the old thread state + + DBGSTR1 "save old state %p", r4 + cmp r4, #0 + beq no_old_VFP_process + VFPFMRX r2, FPSCR @ current status + VFPFMRX r6, FPINST @ FPINST (always there, rev0 onwards) + tst r1, #FPEXC_FPV2 @ is there an FPINST2 to read? + VFPFMRX r8, FPINST2, NE @ FPINST2 if needed - avoids reading + @ nonexistant reg on rev0 + VFPFSTMIA r4 @ save the working registers + add r4, r4, #8*16+4 + stmia r4, {r1, r2, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 + @ and point r4 at the word at the + @ start of the register dump + +no_old_VFP_process: + DBGSTR1 "load state %p", r10 + str r10, [r3] @ update the last_VFP_context pointer + @ Load the saved state back into the VFP + add r4, r10, #8*16+4 + ldmia r4, {r1, r2, r6, r8} @ load FPEXC, FPSCR, FPINST, FPINST2 + VFPFLDMIA r10 @ reload the working registers while + @ FPEXC is in a safe state + tst r1, #FPEXC_FPV2 @ is there an FPINST2 to write? + VFPFMXR FPINST2, r8, NE @ FPINST2 if needed - avoids writing + @ nonexistant reg on rev0 + VFPFMXR FPINST, r6 + VFPFMXR FPSCR, r2 @ restore status + +check_for_exception: + tst r1, #FPEXC_EXCEPTION + bne process_exception @ might as well handle the pending + @ exception before retrying branch + @ out before setting an FPEXC that + @ stops us reading stuff + VFPFMXR FPEXC, r1 @ restore FPEXC last + sub r5, r5, #4 + str r5, [sp, #S_PC] @ retry the instruction + mov pc, r9 @ we think we have handled things + + +look_for_VFP_exceptions: + tst r1, #FPEXC_EXCEPTION + bne process_exception + VFPFMRX r2, FPSCR + tst r2, #FPSCR_IXE @ IXE doesn't set FPEXC_EXCEPTION ! + bne process_exception + + @ Fall into hand on to next handler - appropriate coproc instr + @ not recognised by VFP + + DBGSTR "not VFP" + mov pc, lr + +process_exception: + DBGSTR "bounce" + sub r5, r5, #4 + str r5, [sp, #S_PC] @ retry the instruction on exit from + @ the imprecise exception handling in + @ the support code + mov r2, sp @ nothing stacked - regdump is at TOS + mov lr, r9 @ setup for a return to the user code. + + @ Now call the C code to package up the bounce to the support code + @ r0 holds the trigger instruction + @ r1 holds the FPEXC value + @ r2 pointer to register dump + b VFP9_bounce @ we have handled this - the support + @ code will raise an exception if + @ required. If not, the user code will + @ retry the faulted instruction + +last_VFP_context_address: + .word last_VFP_context + + .globl vfp_get_float +vfp_get_float: + add pc, pc, r0, lsl #3 + mov r0, r0 + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 + mov pc, lr + mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 + mov pc, lr + .endr + + .globl vfp_put_float +vfp_put_float: + add pc, pc, r0, lsl #3 + mov r0, r0 + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + mcr p10, 0, r1, c\dr, c0, 0 @ fmsr r0, s0 + mov pc, lr + mcr p10, 0, r1, c\dr, c0, 4 @ fmsr r0, s1 + mov pc, lr + .endr + + .globl vfp_get_double +vfp_get_double: + mov r0, r0, lsr #1 + add pc, pc, r0, lsl #3 + mov r0, r0 + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + mrrc p10, 1, r0, r1, c\dr @ fmrrd r0, r1, d\dr + mov pc, lr + .endr + + .globl vfp_put_double +vfp_put_double: + mov r0, r0, lsr #1 + add pc, pc, r0, lsl #3 + mov r0, r0 + .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + mcrr p10, 1, r1, r2, c\dr @ fmrrd r1, r2, d\dr + mov pc, lr + .endr diff --git a/arch/arm/vfp/vfpinstr.h b/arch/arm/vfp/vfpinstr.h new file mode 100644 index 000000000..6c819aeae --- /dev/null +++ b/arch/arm/vfp/vfpinstr.h @@ -0,0 +1,88 @@ +/* + * linux/arch/arm/vfp/vfpinstr.h + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * VFP instruction masks. + */ +#define INST_CPRTDO(inst) (((inst) & 0x0f000000) == 0x0e000000) +#define INST_CPRT(inst) ((inst) & (1 << 4)) +#define INST_CPRT_L(inst) ((inst) & (1 << 20)) +#define INST_CPRT_Rd(inst) (((inst) & (15 << 12)) >> 12) +#define INST_CPRT_OP(inst) (((inst) >> 21) & 7) +#define INST_CPNUM(inst) ((inst) & 0xf00) +#define CPNUM(cp) ((cp) << 8) + +#define FOP_MASK (0x00b00040) +#define FOP_FMAC (0x00000000) +#define FOP_FNMAC (0x00000040) +#define FOP_FMSC (0x00100000) +#define FOP_FNMSC (0x00100040) +#define FOP_FMUL (0x00200000) +#define FOP_FNMUL (0x00200040) +#define FOP_FADD (0x00300000) +#define FOP_FSUB (0x00300040) +#define FOP_FDIV (0x00800000) +#define FOP_EXT (0x00b00040) + +#define FOP_TO_IDX(inst) ((inst & 0x00b00000) >> 20 | (inst & (1 << 6)) >> 4) + +#define FEXT_MASK (0x000f0080) +#define FEXT_FCPY (0x00000000) +#define FEXT_FABS (0x00000080) +#define FEXT_FNEG (0x00010000) +#define FEXT_FSQRT (0x00010080) +#define FEXT_FCMP (0x00040000) +#define FEXT_FCMPE (0x00040080) +#define FEXT_FCMPZ (0x00050000) +#define FEXT_FCMPEZ (0x00050080) +#define FEXT_FCVT (0x00070080) +#define FEXT_FUITO (0x00080000) +#define FEXT_FSITO (0x00080080) +#define FEXT_FTOUI (0x000c0000) +#define FEXT_FTOUIZ (0x000c0080) +#define FEXT_FTOSI (0x000d0000) +#define FEXT_FTOSIZ (0x000d0080) + +#define FEXT_TO_IDX(inst) ((inst & 0x000f0000) >> 15 | (inst & (1 << 7)) >> 7) + +#define vfp_get_sd(inst) ((inst & 0x0000f000) >> 11 | (inst & (1 << 22)) >> 22) +#define vfp_get_dd(inst) ((inst & 0x0000f000) >> 12) +#define vfp_get_sm(inst) ((inst & 0x0000000f) << 1 | (inst & (1 << 5)) >> 5) +#define vfp_get_dm(inst) ((inst & 0x0000000f)) +#define vfp_get_sn(inst) ((inst & 0x000f0000) >> 15 | (inst & (1 << 7)) >> 7) +#define vfp_get_dn(inst) ((inst & 0x000f0000) >> 16) + +#define vfp_single(inst) (((inst) & 0x0000f00) == 0xa00) + +#define FPSCR_N (1 << 31) +#define FPSCR_Z (1 << 30) +#define FPSCR_C (1 << 29) +#define FPSCR_V (1 << 28) + +/* + * Since we aren't building with -mfpu=vfp, we need to code + * these instructions using their MRC/MCR equivalents. + */ +#define vfpreg(_vfp_) #_vfp_ + +#define fmrx(_vfp_) ({ \ + u32 __v; \ + asm("mrc%? p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmrx %0, " #_vfp_ \ + : "=r" (__v)); \ + __v; \ + }) + +#define fmxr(_vfp_,_var_) \ + asm("mcr%? p10, 7, %0, " vfpreg(_vfp_) ", cr0, 0 @ fmxr " #_vfp_ ", %0" \ + : : "r" (_var_)) + +u32 vfp_single_cpdo(u32 inst, u32 fpscr); +u32 vfp_single_cprt(u32 inst, u32 fpscr, struct pt_regs *regs); + +u32 vfp_double_cpdo(u32 inst, u32 fpscr); diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c new file mode 100644 index 000000000..3aeedd2af --- /dev/null +++ b/arch/arm/vfp/vfpmodule.c @@ -0,0 +1,288 @@ +/* + * linux/arch/arm/vfp/vfpmodule.c + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vfpinstr.h" +#include "vfp.h" + +/* + * Our undef handlers (in entry.S) + */ +void vfp_testing_entry(void); +void vfp_support_entry(void); + +void (*vfp_vector)(void) = vfp_testing_entry; +union vfp_state *last_VFP_context; + +/* + * Dual-use variable. + * Used in startup: set to non-zero if VFP checks fail + * After startup, holds VFP architecture + */ +unsigned int VFP_arch; + +/* + * Per-thread VFP initialisation. + */ +void vfp_flush_thread(union vfp_state *vfp) +{ + memset(vfp, 0, sizeof(union vfp_state)); + + vfp->hard.fpexc = FPEXC_ENABLE; + vfp->hard.fpscr = FPSCR_ROUND_NEAREST; + + /* + * Disable VFP to ensure we initialise it first. + */ + fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_ENABLE); + + /* + * Ensure we don't try to overwrite our newly initialised + * state information on the first fault. + */ + if (last_VFP_context == vfp) + last_VFP_context = NULL; +} + +/* + * Per-thread VFP cleanup. + */ +void vfp_release_thread(union vfp_state *vfp) +{ + if (last_VFP_context == vfp) + last_VFP_context = NULL; +} + +/* + * Raise a SIGFPE for the current process. + * sicode describes the signal being raised. + */ +void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs) +{ + siginfo_t info; + + memset(&info, 0, sizeof(info)); + + info.si_signo = SIGFPE; + info.si_code = sicode; + info.si_addr = (void *)(instruction_pointer(regs) - 4); + + /* + * This is the same as NWFPE, because it's not clear what + * this is used for + */ + current->thread.error_code = 0; + current->thread.trap_no = 6; + + force_sig_info(SIGFPE, &info, current); +} + +static void vfp_panic(char *reason) +{ + int i; + + printk(KERN_ERR "VFP: Error: %s\n", reason); + printk(KERN_ERR "VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n", + fmrx(FPEXC), fmrx(FPSCR), fmrx(FPINST)); + for (i = 0; i < 32; i += 2) + printk(KERN_ERR "VFP: s%2u: 0x%08x s%2u: 0x%08x\n", + i, vfp_get_float(i), i+1, vfp_get_float(i+1)); +} + +/* + * Process bitmask of exception conditions. + */ +static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs) +{ + int si_code = 0; + + pr_debug("VFP: raising exceptions %08x\n", exceptions); + + if (exceptions == (u32)-1) { + vfp_panic("unhandled bounce"); + vfp_raise_sigfpe(0, regs); + return; + } + + /* + * If any of the status flags are set, update the FPSCR. + * Comparison instructions always return at least one of + * these flags set. + */ + if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) + fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V); + + fpscr |= exceptions; + + fmxr(FPSCR, fpscr); + +#define RAISE(stat,en,sig) \ + if (exceptions & stat && fpscr & en) \ + si_code = sig; + + /* + * These are arranged in priority order, least to highest. + */ + RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES); + RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND); + RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF); + RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV); + + if (si_code) + vfp_raise_sigfpe(si_code, regs); +} + +/* + * Emulate a VFP instruction. + */ +static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs) +{ + u32 exceptions = (u32)-1; + + pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr); + + if (INST_CPRTDO(inst)) { + if (!INST_CPRT(inst)) { + /* + * CPDO + */ + if (vfp_single(inst)) { + exceptions = vfp_single_cpdo(inst, fpscr); + } else { + exceptions = vfp_double_cpdo(inst, fpscr); + } + } else { + /* + * A CPRT instruction can not appear in FPINST2, nor + * can it cause an exception. Therefore, we do not + * have to emulate it. + */ + } + } else { + /* + * A CPDT instruction can not appear in FPINST2, nor can + * it cause an exception. Therefore, we do not have to + * emulate it. + */ + } + return exceptions; +} + +/* + * Package up a bounce condition. + */ +void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) +{ + u32 fpscr, orig_fpscr, exceptions, inst; + + pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); + + /* + * Enable access to the VFP so we can handle the bounce. + */ + fmxr(FPEXC, fpexc & ~(FPEXC_EXCEPTION|FPEXC_INV|FPEXC_UFC|FPEXC_IOC)); + + orig_fpscr = fpscr = fmrx(FPSCR); + + /* + * If we are running with inexact exceptions enabled, we need to + * emulate the trigger instruction. Note that as we're emulating + * the trigger instruction, we need to increment PC. + */ + if (fpscr & FPSCR_IXE) { + regs->ARM_pc += 4; + goto emulate; + } + + barrier(); + + /* + * Modify fpscr to indicate the number of iterations remaining + */ + if (fpexc & FPEXC_EXCEPTION) { + u32 len; + + len = fpexc + (1 << FPEXC_LENGTH_BIT); + + fpscr &= ~FPSCR_LENGTH_MASK; + fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT); + } + + /* + * Handle the first FP instruction. We used to take note of the + * FPEXC bounce reason, but this appears to be unreliable. + * Emulate the bounced instruction instead. + */ + inst = fmrx(FPINST); + exceptions = vfp_emulate_instruction(inst, fpscr, regs); + if (exceptions) + vfp_raise_exceptions(exceptions, inst, orig_fpscr, regs); + + /* + * If there isn't a second FP instruction, exit now. + */ + if (!(fpexc & FPEXC_FPV2)) + return; + + /* + * The barrier() here prevents fpinst2 being read + * before the condition above. + */ + barrier(); + trigger = fmrx(FPINST2); + fpscr = fmrx(FPSCR); + + emulate: + exceptions = vfp_emulate_instruction(trigger, fpscr, regs); + if (exceptions) + vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); +} + +/* + * VFP support code initialisation. + */ +static int __init vfp_init(void) +{ + unsigned int vfpsid; + + /* + * First check that there is a VFP that we can use. + * The handler is already setup to just log calls, so + * we just need to read the VFPSID register. + */ + vfpsid = fmrx(FPSID); + + printk(KERN_INFO "VFP support v0.3: "); + if (VFP_arch) { + printk("not present\n"); + } else if (vfpsid & FPSID_NODOUBLE) { + printk("no double precision support\n"); + } else { + VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ + printk("implementor %02x architecture %d part %02x variant %x rev %x\n", + (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, + (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, + (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, + (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, + (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); + vfp_vector = vfp_support_entry; + } + return 0; +} + +late_initcall(vfp_init); diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c new file mode 100644 index 000000000..92aa84127 --- /dev/null +++ b/arch/arm/vfp/vfpsingle.c @@ -0,0 +1,1224 @@ +/* + * linux/arch/arm/vfp/vfpsingle.c + * + * This code is derived in part from John R. Housers softfloat library, which + * carries the following notice: + * + * =========================================================================== + * This C source file is part of the SoftFloat IEC/IEEE Floating-point + * Arithmetic Package, Release 2. + * + * Written by John R. Hauser. This work was made possible in part by the + * International Computer Science Institute, located at Suite 600, 1947 Center + * Street, Berkeley, California 94704. Funding was partially provided by the + * National Science Foundation under grant MIP-9311980. The original version + * of this code was written as part of a project to build a fixed-point vector + * processor in collaboration with the University of California at Berkeley, + * overseen by Profs. Nelson Morgan and John Wawrzynek. More information + * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ + * arithmetic/softfloat.html'. + * + * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort + * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT + * TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO + * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY + * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + * + * Derivative works are acceptable, even for commercial purposes, so long as + * (1) they include prominent notice that the work is derivative, and (2) they + * include prominent notice akin to these three paragraphs for those parts of + * this code that are retained. + * =========================================================================== + */ +#include +#include +#include +#include + +#include "vfpinstr.h" +#include "vfp.h" + +static struct vfp_single vfp_single_default_qnan = { + .exponent = 255, + .sign = 0, + .significand = VFP_SINGLE_SIGNIFICAND_QNAN, +}; + +static void vfp_single_dump(const char *str, struct vfp_single *s) +{ + pr_debug("VFP: %s: sign=%d exponent=%d significand=%08x\n", + str, s->sign != 0, s->exponent, s->significand); +} + +static void vfp_single_normalise_denormal(struct vfp_single *vs) +{ + int bits = 31 - fls(vs->significand); + + vfp_single_dump("normalise_denormal: in", vs); + + if (bits) { + vs->exponent -= bits - 1; + vs->significand <<= bits; + } + + vfp_single_dump("normalise_denormal: out", vs); +} + +#ifndef DEBUG +#define vfp_single_normaliseround(sd,vsd,fpscr,except,func) __vfp_single_normaliseround(sd,vsd,fpscr,except) +u32 __vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exceptions) +#else +u32 vfp_single_normaliseround(int sd, struct vfp_single *vs, u32 fpscr, u32 exceptions, const char *func) +#endif +{ + u32 significand, incr, rmode; + int exponent, shift, underflow; + + vfp_single_dump("pack: in", vs); + + /* + * Infinities and NaNs are a special case. + */ + if (vs->exponent == 255 && (vs->significand == 0 || exceptions)) + goto pack; + + /* + * Special-case zero. + */ + if (vs->significand == 0) { + vs->exponent = 0; + goto pack; + } + + exponent = vs->exponent; + significand = vs->significand; + + /* + * Normalise first. Note that we shift the significand up to + * bit 31, so we have VFP_SINGLE_LOW_BITS + 1 below the least + * significant bit. + */ + shift = 32 - fls(significand); + if (shift < 32 && shift) { + exponent -= shift; + significand <<= shift; + } + +#ifdef DEBUG + vs->exponent = exponent; + vs->significand = significand; + vfp_single_dump("pack: normalised", vs); +#endif + + /* + * Tiny number? + */ + underflow = exponent < 0; + if (underflow) { + significand = vfp_shiftright32jamming(significand, -exponent); + exponent = 0; +#ifdef DEBUG + vs->exponent = exponent; + vs->significand = significand; + vfp_single_dump("pack: tiny number", vs); +#endif + if (!(significand & ((1 << (VFP_SINGLE_LOW_BITS + 1)) - 1))) + underflow = 0; + } + + /* + * Select rounding increment. + */ + incr = 0; + rmode = fpscr & FPSCR_RMODE_MASK; + + if (rmode == FPSCR_ROUND_NEAREST) { + incr = 1 << VFP_SINGLE_LOW_BITS; + if ((significand & (1 << (VFP_SINGLE_LOW_BITS + 1))) == 0) + incr -= 1; + } else if (rmode == FPSCR_ROUND_TOZERO) { + incr = 0; + } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vs->sign != 0)) + incr = (1 << (VFP_SINGLE_LOW_BITS + 1)) - 1; + + pr_debug("VFP: rounding increment = 0x%08x\n", incr); + + /* + * Is our rounding going to overflow? + */ + if ((significand + incr) < significand) { + exponent += 1; + significand = (significand >> 1) | (significand & 1); + incr >>= 1; +#ifdef DEBUG + vs->exponent = exponent; + vs->significand = significand; + vfp_single_dump("pack: overflow", vs); +#endif + } + + /* + * If any of the low bits (which will be shifted out of the + * number) are non-zero, the result is inexact. + */ + if (significand & ((1 << (VFP_SINGLE_LOW_BITS + 1)) - 1)) + exceptions |= FPSCR_IXC; + + /* + * Do our rounding. + */ + significand += incr; + + /* + * Infinity? + */ + if (exponent >= 254) { + exceptions |= FPSCR_OFC | FPSCR_IXC; + if (incr == 0) { + vs->exponent = 253; + vs->significand = 0x7fffffff; + } else { + vs->exponent = 255; /* infinity */ + vs->significand = 0; + } + } else { + if (significand >> (VFP_SINGLE_LOW_BITS + 1) == 0) + exponent = 0; + if (exponent || significand > 0x80000000) + underflow = 0; + if (underflow) + exceptions |= FPSCR_UFC; + vs->exponent = exponent; + vs->significand = significand >> 1; + } + + pack: + vfp_single_dump("pack: final", vs); + { + s32 d = vfp_single_pack(vs); + pr_debug("VFP: %s: d(s%d)=%08x exceptions=%08x\n", func, + sd, d, exceptions); + vfp_put_float(sd, d); + } + + return exceptions; +} + +/* + * Propagate the NaN, setting exceptions if it is signalling. + * 'n' is always a NaN. 'm' may be a number, NaN or infinity. + */ +static u32 +vfp_propagate_nan(struct vfp_single *vsd, struct vfp_single *vsn, + struct vfp_single *vsm, u32 fpscr) +{ + struct vfp_single *nan; + int tn, tm = 0; + + tn = vfp_single_type(vsn); + + if (vsm) + tm = vfp_single_type(vsm); + + if (fpscr & FPSCR_DEFAULT_NAN) + /* + * Default NaN mode - always returns a quiet NaN + */ + nan = &vfp_single_default_qnan; + else { + /* + * Contemporary mode - select the first signalling + * NAN, or if neither are signalling, the first + * quiet NAN. + */ + if (tn == VFP_SNAN || (tm != VFP_SNAN && tn == VFP_QNAN)) + nan = vsn; + else + nan = vsm; + /* + * Make the NaN quiet. + */ + nan->significand |= VFP_SINGLE_SIGNIFICAND_QNAN; + } + + *vsd = *nan; + + /* + * If one was a signalling NAN, raise invalid operation. + */ + return tn == VFP_SNAN || tm == VFP_SNAN ? FPSCR_IOC : 0x100; +} + + +/* + * Extended operations + */ +static u32 vfp_single_fabs(int sd, int unused, s32 m, u32 fpscr) +{ + vfp_put_float(sd, vfp_single_packed_abs(m)); + return 0; +} + +static u32 vfp_single_fcpy(int sd, int unused, s32 m, u32 fpscr) +{ + vfp_put_float(sd, m); + return 0; +} + +static u32 vfp_single_fneg(int sd, int unused, s32 m, u32 fpscr) +{ + vfp_put_float(sd, vfp_single_packed_negate(m)); + return 0; +} + +static const u16 sqrt_oddadjust[] = { + 0x0004, 0x0022, 0x005d, 0x00b1, 0x011d, 0x019f, 0x0236, 0x02e0, + 0x039c, 0x0468, 0x0545, 0x0631, 0x072b, 0x0832, 0x0946, 0x0a67 +}; + +static const u16 sqrt_evenadjust[] = { + 0x0a2d, 0x08af, 0x075a, 0x0629, 0x051a, 0x0429, 0x0356, 0x029e, + 0x0200, 0x0179, 0x0109, 0x00af, 0x0068, 0x0034, 0x0012, 0x0002 +}; + +u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand) +{ + int index; + u32 z, a; + + if ((significand & 0xc0000000) != 0x40000000) { + printk(KERN_WARNING "VFP: estimate_sqrt: invalid significand\n"); + } + + a = significand << 1; + index = (a >> 27) & 15; + if (exponent & 1) { + z = 0x4000 + (a >> 17) - sqrt_oddadjust[index]; + z = ((a / z) << 14) + (z << 15); + a >>= 1; + } else { + z = 0x8000 + (a >> 17) - sqrt_evenadjust[index]; + z = a / z + z; + z = (z >= 0x20000) ? 0xffff8000 : (z << 15); + if (z <= a) + return (s32)a >> 1; + } + return (u32)(((u64)a << 31) / z) + (z >> 1); +} + +static u32 vfp_single_fsqrt(int sd, int unused, s32 m, u32 fpscr) +{ + struct vfp_single vsm, vsd; + int ret, tm; + + vfp_single_unpack(&vsm, m); + tm = vfp_single_type(&vsm); + if (tm & (VFP_NAN|VFP_INFINITY)) { + struct vfp_single *vsp = &vsd; + + if (tm & VFP_NAN) + ret = vfp_propagate_nan(vsp, &vsm, NULL, fpscr); + else if (vsm.sign == 0) { + sqrt_copy: + vsp = &vsm; + ret = 0; + } else { + sqrt_invalid: + vsp = &vfp_single_default_qnan; + ret = FPSCR_IOC; + } + vfp_put_float(sd, vfp_single_pack(vsp)); + return ret; + } + + /* + * sqrt(+/- 0) == +/- 0 + */ + if (tm & VFP_ZERO) + goto sqrt_copy; + + /* + * Normalise a denormalised number + */ + if (tm & VFP_DENORMAL) + vfp_single_normalise_denormal(&vsm); + + /* + * sqrt(<0) = invalid + */ + if (vsm.sign) + goto sqrt_invalid; + + vfp_single_dump("sqrt", &vsm); + + /* + * Estimate the square root. + */ + vsd.sign = 0; + vsd.exponent = ((vsm.exponent - 127) >> 1) + 127; + vsd.significand = vfp_estimate_sqrt_significand(vsm.exponent, vsm.significand) + 2; + + vfp_single_dump("sqrt estimate", &vsd); + + /* + * And now adjust. + */ + if ((vsd.significand & VFP_SINGLE_LOW_BITS_MASK) <= 5) { + if (vsd.significand < 2) { + vsd.significand = 0xffffffff; + } else { + u64 term; + s64 rem; + vsm.significand <<= !(vsm.exponent & 1); + term = (u64)vsd.significand * vsd.significand; + rem = ((u64)vsm.significand << 32) - term; + + pr_debug("VFP: term=%016llx rem=%016llx\n", term, rem); + + while (rem < 0) { + vsd.significand -= 1; + rem += ((u64)vsd.significand << 1) | 1; + } + vsd.significand |= rem != 0; + } + } + vsd.significand = vfp_shiftright32jamming(vsd.significand, 1); + + return vfp_single_normaliseround(sd, &vsd, fpscr, 0, "fsqrt"); +} + +/* + * Equal := ZC + * Less than := N + * Greater than := C + * Unordered := CV + */ +static u32 vfp_compare(int sd, int signal_on_qnan, s32 m, u32 fpscr) +{ + s32 d; + u32 ret = 0; + + d = vfp_get_float(sd); + if (vfp_single_packed_exponent(m) == 255 && vfp_single_packed_mantissa(m)) { + ret |= FPSCR_C | FPSCR_V; + if (signal_on_qnan || !(vfp_single_packed_mantissa(m) & (1 << (VFP_SINGLE_MANTISSA_BITS - 1)))) + /* + * Signalling NaN, or signalling on quiet NaN + */ + ret |= FPSCR_IOC; + } + + if (vfp_single_packed_exponent(d) == 255 && vfp_single_packed_mantissa(d)) { + ret |= FPSCR_C | FPSCR_V; + if (signal_on_qnan || !(vfp_single_packed_mantissa(d) & (1 << (VFP_SINGLE_MANTISSA_BITS - 1)))) + /* + * Signalling NaN, or signalling on quiet NaN + */ + ret |= FPSCR_IOC; + } + + if (ret == 0) { + if (d == m || vfp_single_packed_abs(d | m) == 0) { + /* + * equal + */ + ret |= FPSCR_Z | FPSCR_C; + } else if (vfp_single_packed_sign(d ^ m)) { + /* + * different signs + */ + if (vfp_single_packed_sign(d)) + /* + * d is negative, so d < m + */ + ret |= FPSCR_N; + else + /* + * d is positive, so d > m + */ + ret |= FPSCR_C; + } else if ((vfp_single_packed_sign(d) != 0) ^ (d < m)) { + /* + * d < m + */ + ret |= FPSCR_N; + } else if ((vfp_single_packed_sign(d) != 0) ^ (d > m)) { + /* + * d > m + */ + ret |= FPSCR_C; + } + } + return ret; +} + +static u32 vfp_single_fcmp(int sd, int unused, s32 m, u32 fpscr) +{ + return vfp_compare(sd, 0, m, fpscr); +} + +static u32 vfp_single_fcmpe(int sd, int unused, s32 m, u32 fpscr) +{ + return vfp_compare(sd, 1, m, fpscr); +} + +static u32 vfp_single_fcmpz(int sd, int unused, s32 m, u32 fpscr) +{ + return vfp_compare(sd, 0, 0, fpscr); +} + +static u32 vfp_single_fcmpez(int sd, int unused, s32 m, u32 fpscr) +{ + return vfp_compare(sd, 1, 0, fpscr); +} + +static u32 vfp_single_fcvtd(int dd, int unused, s32 m, u32 fpscr) +{ + struct vfp_single vsm; + struct vfp_double vdd; + int tm; + u32 exceptions = 0; + + vfp_single_unpack(&vsm, m); + + tm = vfp_single_type(&vsm); + + /* + * If we have a signalling NaN, signal invalid operation. + */ + if (tm == VFP_SNAN) + exceptions = FPSCR_IOC; + + if (tm & VFP_DENORMAL) + vfp_single_normalise_denormal(&vsm); + + vdd.sign = vsm.sign; + vdd.significand = (u64)vsm.significand << 32; + + /* + * If we have an infinity or NaN, the exponent must be 2047. + */ + if (tm & (VFP_INFINITY|VFP_NAN)) { + vdd.exponent = 2047; + if (tm & VFP_NAN) + vdd.significand |= VFP_DOUBLE_SIGNIFICAND_QNAN; + goto pack_nan; + } else if (tm & VFP_ZERO) + vdd.exponent = 0; + else + vdd.exponent = vsm.exponent + (1023 - 127); + + /* + * Technically, if bit 0 of dd is set, this is an invalid + * instruction. However, we ignore this for efficiency. + */ + return vfp_double_normaliseround(dd, &vdd, fpscr, exceptions, "fcvtd"); + + pack_nan: + vfp_put_double(dd, vfp_double_pack(&vdd)); + return exceptions; +} + +static u32 vfp_single_fuito(int sd, int unused, s32 m, u32 fpscr) +{ + struct vfp_single vs; + + vs.sign = 0; + vs.exponent = 127 + 31 - 1; + vs.significand = (u32)m; + + return vfp_single_normaliseround(sd, &vs, fpscr, 0, "fuito"); +} + +static u32 vfp_single_fsito(int sd, int unused, s32 m, u32 fpscr) +{ + struct vfp_single vs; + + vs.sign = (m & 0x80000000) >> 16; + vs.exponent = 127 + 31 - 1; + vs.significand = vs.sign ? -m : m; + + return vfp_single_normaliseround(sd, &vs, fpscr, 0, "fsito"); +} + +static u32 vfp_single_ftoui(int sd, int unused, s32 m, u32 fpscr) +{ + struct vfp_single vsm; + u32 d, exceptions = 0; + int rmode = fpscr & FPSCR_RMODE_MASK; + int tm; + + vfp_single_unpack(&vsm, m); + vfp_single_dump("VSM", &vsm); + + /* + * Do we have a denormalised number? + */ + tm = vfp_single_type(&vsm); + if (tm & VFP_DENORMAL) + exceptions |= FPSCR_IDC; + + if (tm & VFP_NAN) + vsm.sign = 0; + + if (vsm.exponent >= 127 + 32) { + d = vsm.sign ? 0 : 0xffffffff; + exceptions = FPSCR_IOC; + } else if (vsm.exponent >= 127 - 1) { + int shift = 127 + 31 - vsm.exponent; + u32 rem, incr = 0; + + /* + * 2^0 <= m < 2^32-2^8 + */ + d = (vsm.significand << 1) >> shift; + rem = vsm.significand << (33 - shift); + + if (rmode == FPSCR_ROUND_NEAREST) { + incr = 0x80000000; + if ((d & 1) == 0) + incr -= 1; + } else if (rmode == FPSCR_ROUND_TOZERO) { + incr = 0; + } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vsm.sign != 0)) { + incr = ~0; + } + + if ((rem + incr) < rem) { + if (d < 0xffffffff) + d += 1; + else + exceptions |= FPSCR_IOC; + } + + if (d && vsm.sign) { + d = 0; + exceptions |= FPSCR_IOC; + } else if (rem) + exceptions |= FPSCR_IXC; + } else { + d = 0; + if (vsm.exponent | vsm.significand) { + exceptions |= FPSCR_IXC; + if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0) + d = 1; + else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign) { + d = 0; + exceptions |= FPSCR_IOC; + } + } + } + + pr_debug("VFP: ftoui: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions); + + vfp_put_float(sd, d); + + return exceptions; +} + +static u32 vfp_single_ftouiz(int sd, int unused, s32 m, u32 fpscr) +{ + return vfp_single_ftoui(sd, unused, m, FPSCR_ROUND_TOZERO); +} + +static u32 vfp_single_ftosi(int sd, int unused, s32 m, u32 fpscr) +{ + struct vfp_single vsm; + u32 d, exceptions = 0; + int rmode = fpscr & FPSCR_RMODE_MASK; + + vfp_single_unpack(&vsm, m); + vfp_single_dump("VSM", &vsm); + + /* + * Do we have a denormalised number? + */ + if (vfp_single_type(&vsm) & VFP_DENORMAL) + exceptions |= FPSCR_IDC; + + if (vsm.exponent >= 127 + 32) { + /* + * m >= 2^31-2^7: invalid + */ + d = 0x7fffffff; + if (vsm.sign) + d = ~d; + exceptions |= FPSCR_IOC; + } else if (vsm.exponent >= 127 - 1) { + int shift = 127 + 31 - vsm.exponent; + u32 rem, incr = 0; + + /* 2^0 <= m <= 2^31-2^7 */ + d = (vsm.significand << 1) >> shift; + rem = vsm.significand << (33 - shift); + + if (rmode == FPSCR_ROUND_NEAREST) { + incr = 0x80000000; + if ((d & 1) == 0) + incr -= 1; + } else if (rmode == FPSCR_ROUND_TOZERO) { + incr = 0; + } else if ((rmode == FPSCR_ROUND_PLUSINF) ^ (vsm.sign != 0)) { + incr = ~0; + } + + if ((rem + incr) < rem && d < 0xffffffff) + d += 1; + if (d > 0x7fffffff + (vsm.sign != 0)) { + d = 0x7fffffff + (vsm.sign != 0); + exceptions |= FPSCR_IOC; + } else if (rem) + exceptions |= FPSCR_IXC; + + if (vsm.sign) + d = -d; + } else { + d = 0; + if (vsm.exponent | vsm.significand) { + exceptions |= FPSCR_IXC; + if (rmode == FPSCR_ROUND_PLUSINF && vsm.sign == 0) + d = 1; + else if (rmode == FPSCR_ROUND_MINUSINF && vsm.sign) + d = -1; + } + } + + pr_debug("VFP: ftosi: d(s%d)=%08x exceptions=%08x\n", sd, d, exceptions); + + vfp_put_float(sd, (s32)d); + + return exceptions; +} + +static u32 vfp_single_ftosiz(int sd, int unused, s32 m, u32 fpscr) +{ + return vfp_single_ftosi(sd, unused, m, FPSCR_ROUND_TOZERO); +} + +static u32 (* const fop_extfns[32])(int sd, int unused, s32 m, u32 fpscr) = { + [FEXT_TO_IDX(FEXT_FCPY)] = vfp_single_fcpy, + [FEXT_TO_IDX(FEXT_FABS)] = vfp_single_fabs, + [FEXT_TO_IDX(FEXT_FNEG)] = vfp_single_fneg, + [FEXT_TO_IDX(FEXT_FSQRT)] = vfp_single_fsqrt, + [FEXT_TO_IDX(FEXT_FCMP)] = vfp_single_fcmp, + [FEXT_TO_IDX(FEXT_FCMPE)] = vfp_single_fcmpe, + [FEXT_TO_IDX(FEXT_FCMPZ)] = vfp_single_fcmpz, + [FEXT_TO_IDX(FEXT_FCMPEZ)] = vfp_single_fcmpez, + [FEXT_TO_IDX(FEXT_FCVT)] = vfp_single_fcvtd, + [FEXT_TO_IDX(FEXT_FUITO)] = vfp_single_fuito, + [FEXT_TO_IDX(FEXT_FSITO)] = vfp_single_fsito, + [FEXT_TO_IDX(FEXT_FTOUI)] = vfp_single_ftoui, + [FEXT_TO_IDX(FEXT_FTOUIZ)] = vfp_single_ftouiz, + [FEXT_TO_IDX(FEXT_FTOSI)] = vfp_single_ftosi, + [FEXT_TO_IDX(FEXT_FTOSIZ)] = vfp_single_ftosiz, +}; + + + + + +static u32 +vfp_single_fadd_nonnumber(struct vfp_single *vsd, struct vfp_single *vsn, + struct vfp_single *vsm, u32 fpscr) +{ + struct vfp_single *vsp; + u32 exceptions = 0; + int tn, tm; + + tn = vfp_single_type(vsn); + tm = vfp_single_type(vsm); + + if (tn & tm & VFP_INFINITY) { + /* + * Two infinities. Are they different signs? + */ + if (vsn->sign ^ vsm->sign) { + /* + * different signs -> invalid + */ + exceptions = FPSCR_IOC; + vsp = &vfp_single_default_qnan; + } else { + /* + * same signs -> valid + */ + vsp = vsn; + } + } else if (tn & VFP_INFINITY && tm & VFP_NUMBER) { + /* + * One infinity and one number -> infinity + */ + vsp = vsn; + } else { + /* + * 'n' is a NaN of some type + */ + return vfp_propagate_nan(vsd, vsn, vsm, fpscr); + } + *vsd = *vsp; + return exceptions; +} + +static u32 +vfp_single_add(struct vfp_single *vsd, struct vfp_single *vsn, + struct vfp_single *vsm, u32 fpscr) +{ + u32 exp_diff, m_sig; + + if (vsn->significand & 0x80000000 || + vsm->significand & 0x80000000) { + pr_info("VFP: bad FP values in %s\n", __func__); + vfp_single_dump("VSN", vsn); + vfp_single_dump("VSM", vsm); + } + + /* + * Ensure that 'n' is the largest magnitude number. Note that + * if 'n' and 'm' have equal exponents, we do not swap them. + * This ensures that NaN propagation works correctly. + */ + if (vsn->exponent < vsm->exponent) { + struct vfp_single *t = vsn; + vsn = vsm; + vsm = t; + } + + /* + * Is 'n' an infinity or a NaN? Note that 'm' may be a number, + * infinity or a NaN here. + */ + if (vsn->exponent == 255) + return vfp_single_fadd_nonnumber(vsd, vsn, vsm, fpscr); + + /* + * We have two proper numbers, where 'vsn' is the larger magnitude. + * + * Copy 'n' to 'd' before doing the arithmetic. + */ + *vsd = *vsn; + + /* + * Align both numbers. + */ + exp_diff = vsn->exponent - vsm->exponent; + m_sig = vfp_shiftright32jamming(vsm->significand, exp_diff); + + /* + * If the signs are different, we are really subtracting. + */ + if (vsn->sign ^ vsm->sign) { + m_sig = vsn->significand - m_sig; + if ((s32)m_sig < 0) { + vsd->sign = vfp_sign_negate(vsd->sign); + m_sig = -m_sig; + } else if (m_sig == 0) { + vsd->sign = (fpscr & FPSCR_RMODE_MASK) == + FPSCR_ROUND_MINUSINF ? 0x8000 : 0; + } + } else { + m_sig = vsn->significand + m_sig; + } + vsd->significand = m_sig; + + return 0; +} + +static u32 +vfp_single_multiply(struct vfp_single *vsd, struct vfp_single *vsn, struct vfp_single *vsm, u32 fpscr) +{ + vfp_single_dump("VSN", vsn); + vfp_single_dump("VSM", vsm); + + /* + * Ensure that 'n' is the largest magnitude number. Note that + * if 'n' and 'm' have equal exponents, we do not swap them. + * This ensures that NaN propagation works correctly. + */ + if (vsn->exponent < vsm->exponent) { + struct vfp_single *t = vsn; + vsn = vsm; + vsm = t; + pr_debug("VFP: swapping M <-> N\n"); + } + + vsd->sign = vsn->sign ^ vsm->sign; + + /* + * If 'n' is an infinity or NaN, handle it. 'm' may be anything. + */ + if (vsn->exponent == 255) { + if (vsn->significand || (vsm->exponent == 255 && vsm->significand)) + return vfp_propagate_nan(vsd, vsn, vsm, fpscr); + if ((vsm->exponent | vsm->significand) == 0) { + *vsd = vfp_single_default_qnan; + return FPSCR_IOC; + } + vsd->exponent = vsn->exponent; + vsd->significand = 0; + return 0; + } + + /* + * If 'm' is zero, the result is always zero. In this case, + * 'n' may be zero or a number, but it doesn't matter which. + */ + if ((vsm->exponent | vsm->significand) == 0) { + vsd->exponent = 0; + vsd->significand = 0; + return 0; + } + + /* + * We add 2 to the destination exponent for the same reason as + * the addition case - though this time we have +1 from each + * input operand. + */ + vsd->exponent = vsn->exponent + vsm->exponent - 127 + 2; + vsd->significand = vfp_hi64to32jamming((u64)vsn->significand * vsm->significand); + + vfp_single_dump("VSD", vsd); + return 0; +} + +#define NEG_MULTIPLY (1 << 0) +#define NEG_SUBTRACT (1 << 1) + +static u32 +vfp_single_multiply_accumulate(int sd, int sn, s32 m, u32 fpscr, u32 negate, char *func) +{ + struct vfp_single vsd, vsp, vsn, vsm; + u32 exceptions; + s32 v; + + v = vfp_get_float(sn); + pr_debug("VFP: s%u = %08x\n", sn, v); + vfp_single_unpack(&vsn, v); + if (vsn.exponent == 0 && vsn.significand) + vfp_single_normalise_denormal(&vsn); + + vfp_single_unpack(&vsm, m); + if (vsm.exponent == 0 && vsm.significand) + vfp_single_normalise_denormal(&vsm); + + exceptions = vfp_single_multiply(&vsp, &vsn, &vsm, fpscr); + if (negate & NEG_MULTIPLY) + vsp.sign = vfp_sign_negate(vsp.sign); + + v = vfp_get_float(sd); + pr_debug("VFP: s%u = %08x\n", sd, v); + vfp_single_unpack(&vsn, v); + if (negate & NEG_SUBTRACT) + vsn.sign = vfp_sign_negate(vsn.sign); + + exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr); + + return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, func); +} + +/* + * Standard operations + */ + +/* + * sd = sd + (sn * sm) + */ +static u32 vfp_single_fmac(int sd, int sn, s32 m, u32 fpscr) +{ + return vfp_single_multiply_accumulate(sd, sn, m, fpscr, 0, "fmac"); +} + +/* + * sd = sd - (sn * sm) + */ +static u32 vfp_single_fnmac(int sd, int sn, s32 m, u32 fpscr) +{ + return vfp_single_multiply_accumulate(sd, sn, m, fpscr, NEG_MULTIPLY, "fnmac"); +} + +/* + * sd = -sd + (sn * sm) + */ +static u32 vfp_single_fmsc(int sd, int sn, s32 m, u32 fpscr) +{ + return vfp_single_multiply_accumulate(sd, sn, m, fpscr, NEG_SUBTRACT, "fmsc"); +} + +/* + * sd = -sd - (sn * sm) + */ +static u32 vfp_single_fnmsc(int sd, int sn, s32 m, u32 fpscr) +{ + return vfp_single_multiply_accumulate(sd, sn, m, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc"); +} + +/* + * sd = sn * sm + */ +static u32 vfp_single_fmul(int sd, int sn, s32 m, u32 fpscr) +{ + struct vfp_single vsd, vsn, vsm; + u32 exceptions; + s32 n = vfp_get_float(sn); + + pr_debug("VFP: s%u = %08x\n", sn, n); + + vfp_single_unpack(&vsn, n); + if (vsn.exponent == 0 && vsn.significand) + vfp_single_normalise_denormal(&vsn); + + vfp_single_unpack(&vsm, m); + if (vsm.exponent == 0 && vsm.significand) + vfp_single_normalise_denormal(&vsm); + + exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); + return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fmul"); +} + +/* + * sd = -(sn * sm) + */ +static u32 vfp_single_fnmul(int sd, int sn, s32 m, u32 fpscr) +{ + struct vfp_single vsd, vsn, vsm; + u32 exceptions; + s32 n = vfp_get_float(sn); + + pr_debug("VFP: s%u = %08x\n", sn, n); + + vfp_single_unpack(&vsn, n); + if (vsn.exponent == 0 && vsn.significand) + vfp_single_normalise_denormal(&vsn); + + vfp_single_unpack(&vsm, m); + if (vsm.exponent == 0 && vsm.significand) + vfp_single_normalise_denormal(&vsm); + + exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr); + vsd.sign = vfp_sign_negate(vsd.sign); + return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fnmul"); +} + +/* + * sd = sn + sm + */ +static u32 vfp_single_fadd(int sd, int sn, s32 m, u32 fpscr) +{ + struct vfp_single vsd, vsn, vsm; + u32 exceptions; + s32 n = vfp_get_float(sn); + + pr_debug("VFP: s%u = %08x\n", sn, n); + + /* + * Unpack and normalise denormals. + */ + vfp_single_unpack(&vsn, n); + if (vsn.exponent == 0 && vsn.significand) + vfp_single_normalise_denormal(&vsn); + + vfp_single_unpack(&vsm, m); + if (vsm.exponent == 0 && vsm.significand) + vfp_single_normalise_denormal(&vsm); + + exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr); + + return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fadd"); +} + +/* + * sd = sn - sm + */ +static u32 vfp_single_fsub(int sd, int sn, s32 m, u32 fpscr) +{ + /* + * Subtraction is addition with one sign inverted. + */ + return vfp_single_fadd(sd, sn, vfp_single_packed_negate(m), fpscr); +} + +/* + * sd = sn / sm + */ +static u32 vfp_single_fdiv(int sd, int sn, s32 m, u32 fpscr) +{ + struct vfp_single vsd, vsn, vsm; + u32 exceptions = 0; + s32 n = vfp_get_float(sn); + int tm, tn; + + pr_debug("VFP: s%u = %08x\n", sn, n); + + vfp_single_unpack(&vsn, n); + vfp_single_unpack(&vsm, m); + + vsd.sign = vsn.sign ^ vsm.sign; + + tn = vfp_single_type(&vsn); + tm = vfp_single_type(&vsm); + + /* + * Is n a NAN? + */ + if (tn & VFP_NAN) + goto vsn_nan; + + /* + * Is m a NAN? + */ + if (tm & VFP_NAN) + goto vsm_nan; + + /* + * If n and m are infinity, the result is invalid + * If n and m are zero, the result is invalid + */ + if (tm & tn & (VFP_INFINITY|VFP_ZERO)) + goto invalid; + + /* + * If n is infinity, the result is infinity + */ + if (tn & VFP_INFINITY) + goto infinity; + + /* + * If m is zero, raise div0 exception + */ + if (tm & VFP_ZERO) + goto divzero; + + /* + * If m is infinity, or n is zero, the result is zero + */ + if (tm & VFP_INFINITY || tn & VFP_ZERO) + goto zero; + + if (tn & VFP_DENORMAL) + vfp_single_normalise_denormal(&vsn); + if (tm & VFP_DENORMAL) + vfp_single_normalise_denormal(&vsm); + + /* + * Ok, we have two numbers, we can perform division. + */ + vsd.exponent = vsn.exponent - vsm.exponent + 127 - 1; + vsm.significand <<= 1; + if (vsm.significand <= (2 * vsn.significand)) { + vsn.significand >>= 1; + vsd.exponent++; + } + vsd.significand = ((u64)vsn.significand << 32) / vsm.significand; + if ((vsd.significand & 0x3f) == 0) + vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32); + + return vfp_single_normaliseround(sd, &vsd, fpscr, 0, "fdiv"); + + vsn_nan: + exceptions = vfp_propagate_nan(&vsd, &vsn, &vsm, fpscr); + pack: + vfp_put_float(sd, vfp_single_pack(&vsd)); + return exceptions; + + vsm_nan: + exceptions = vfp_propagate_nan(&vsd, &vsm, &vsn, fpscr); + goto pack; + + zero: + vsd.exponent = 0; + vsd.significand = 0; + goto pack; + + divzero: + exceptions = FPSCR_DZC; + infinity: + vsd.exponent = 255; + vsd.significand = 0; + goto pack; + + invalid: + vfp_put_float(sd, vfp_single_pack(&vfp_single_default_qnan)); + return FPSCR_IOC; +} + +static u32 (* const fop_fns[16])(int sd, int sn, s32 m, u32 fpscr) = { + [FOP_TO_IDX(FOP_FMAC)] = vfp_single_fmac, + [FOP_TO_IDX(FOP_FNMAC)] = vfp_single_fnmac, + [FOP_TO_IDX(FOP_FMSC)] = vfp_single_fmsc, + [FOP_TO_IDX(FOP_FNMSC)] = vfp_single_fnmsc, + [FOP_TO_IDX(FOP_FMUL)] = vfp_single_fmul, + [FOP_TO_IDX(FOP_FNMUL)] = vfp_single_fnmul, + [FOP_TO_IDX(FOP_FADD)] = vfp_single_fadd, + [FOP_TO_IDX(FOP_FSUB)] = vfp_single_fsub, + [FOP_TO_IDX(FOP_FDIV)] = vfp_single_fdiv, +}; + +#define FREG_BANK(x) ((x) & 0x18) +#define FREG_IDX(x) ((x) & 7) + +u32 vfp_single_cpdo(u32 inst, u32 fpscr) +{ + u32 op = inst & FOP_MASK; + u32 exceptions = 0; + unsigned int sd = vfp_get_sd(inst); + unsigned int sn = vfp_get_sn(inst); + unsigned int sm = vfp_get_sm(inst); + unsigned int vecitr, veclen, vecstride; + u32 (*fop)(int, int, s32, u32); + + veclen = fpscr & FPSCR_LENGTH_MASK; + vecstride = 1 + ((fpscr & FPSCR_STRIDE_MASK) == FPSCR_STRIDE_MASK); + + /* + * If destination bank is zero, vector length is always '1'. + * ARM DDI0100F C5.1.3, C5.3.2. + */ + if (FREG_BANK(sd) == 0) + veclen = 0; + + pr_debug("VFP: vecstride=%u veclen=%u\n", vecstride, + (veclen >> FPSCR_LENGTH_BIT) + 1); + + fop = (op == FOP_EXT) ? fop_extfns[sn] : fop_fns[FOP_TO_IDX(op)]; + if (!fop) + goto invalid; + + for (vecitr = 0; vecitr <= veclen; vecitr += 1 << FPSCR_LENGTH_BIT) { + s32 m = vfp_get_float(sm); + u32 except; + + if (op == FOP_EXT) + pr_debug("VFP: itr%d (s%u) = op[%u] (s%u=%08x)\n", + vecitr >> FPSCR_LENGTH_BIT, sd, sn, sm, m); + else + pr_debug("VFP: itr%d (s%u) = (s%u) op[%u] (s%u=%08x)\n", + vecitr >> FPSCR_LENGTH_BIT, sd, sn, + FOP_TO_IDX(op), sm, m); + + except = fop(sd, sn, m, fpscr); + pr_debug("VFP: itr%d: exceptions=%08x\n", + vecitr >> FPSCR_LENGTH_BIT, except); + + exceptions |= except; + + /* + * This ensures that comparisons only operate on scalars; + * comparisons always return with one FPSCR status bit set. + */ + if (except & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V)) + break; + + /* + * CHECK: It appears to be undefined whether we stop when + * we encounter an exception. We continue. + */ + + sd = FREG_BANK(sd) + ((FREG_IDX(sd) + vecstride) & 7); + sn = FREG_BANK(sn) + ((FREG_IDX(sn) + vecstride) & 7); + if (FREG_BANK(sm) != 0) + sm = FREG_BANK(sm) + ((FREG_IDX(sm) + vecstride) & 7); + } + return exceptions; + + invalid: + return (u32)-1; +} diff --git a/arch/cris/arch-v10/drivers/ide.c b/arch/cris/arch-v10/drivers/ide.c new file mode 100644 index 000000000..335473c45 --- /dev/null +++ b/arch/cris/arch-v10/drivers/ide.c @@ -0,0 +1,945 @@ +/* $Id: ide.c,v 1.1 2004/01/22 08:22:58 starvik Exp $ + * + * Etrax specific IDE functions, like init and PIO-mode setting etc. + * Almost the entire ide.c is used for the rest of the Etrax ATA driver. + * Copyright (c) 2000-2004 Axis Communications AB + * + * Authors: Bjorn Wesen (initial version) + * Mikael Starvik (pio setup stuff, Linux 2.6 port) + */ + +/* Regarding DMA: + * + * There are two forms of DMA - "DMA handshaking" between the interface and the drive, + * and DMA between the memory and the interface. We can ALWAYS use the latter, since it's + * something built-in in the Etrax. However only some drives support the DMA-mode handshaking + * on the ATA-bus. The normal PC driver and Triton interface disables memory-if DMA when the + * device can't do DMA handshaking for some stupid reason. We don't need to do that. + */ + +#undef REALLY_SLOW_IO /* most systems can safely undef this */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* number of Etrax DMA descriptors */ +#define MAX_DMA_DESCRS 64 + +/* number of times to retry busy-flags when reading/writing IDE-registers + * this can't be too high because a hung harddisk might cause the watchdog + * to trigger (sometimes INB and OUTB are called with irq's disabled) + */ + +#define IDE_REGISTER_TIMEOUT 300 + +#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET +/* address where the memory-mapped IDE reset bit lives, if used */ +static volatile unsigned long *reset_addr; +#endif + +static int e100_read_command = 0; + +#define LOWDB(x) +#define D(x) + +void +etrax100_ide_outw(unsigned short data, ide_ioreg_t reg) { + int timeleft; + LOWDB(printk("ow: data 0x%x, reg 0x%x\n", data, reg)); + + /* note the lack of handling any timeouts. we stop waiting, but we don't + * really notify anybody. + */ + + timeleft = IDE_REGISTER_TIMEOUT; + /* wait for busy flag */ + while(timeleft && (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy))) + timeleft--; + + /* + * Fall through at a timeout, so the ongoing command will be + * aborted by the write below, which is expected to be a dummy + * command to the command register. This happens when a faulty + * drive times out on a command. See comment on timeout in + * INB. + */ + if(!timeleft) + printk("ATA timeout reg 0x%lx := 0x%x\n", reg, data); + + *R_ATA_CTRL_DATA = reg | data; /* write data to the drive's register */ + + timeleft = IDE_REGISTER_TIMEOUT; + /* wait for transmitter ready */ + while(timeleft && !(*R_ATA_STATUS_DATA & + IO_MASK(R_ATA_STATUS_DATA, tr_rdy))) + timeleft--; +} + +void +etrax100_ide_outb(unsigned char data, ide_ioreg_t reg) +{ + etrax100_ide_outw(data, reg); +} + +void +etrax100_ide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port) +{ + etrax100_ide_outw(addr, port); +} + +unsigned short +etrax100_ide_inw(ide_ioreg_t reg) { + int status; + int timeleft; + + timeleft = IDE_REGISTER_TIMEOUT; + /* wait for busy flag */ + while(timeleft && (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy))) + timeleft--; + + if(!timeleft) { + /* + * If we're asked to read the status register, like for + * example when a command does not complete for an + * extended time, but the ATA interface is stuck in a + * busy state at the *ETRAX* ATA interface level (as has + * happened repeatedly with at least one bad disk), then + * the best thing to do is to pretend that we read + * "busy" in the status register, so the IDE driver will + * time-out, abort the ongoing command and perform a + * reset sequence. Note that the subsequent OUT_BYTE + * call will also timeout on busy, but as long as the + * write is still performed, everything will be fine. + */ + if ((reg & IO_MASK (R_ATA_CTRL_DATA, addr)) + == IO_FIELD (R_ATA_CTRL_DATA, addr, IDE_STATUS_OFFSET)) + return BUSY_STAT; + else + /* For other rare cases we assume 0 is good enough. */ + return 0; + } + + *R_ATA_CTRL_DATA = reg | IO_STATE(R_ATA_CTRL_DATA, rw, read); /* read data */ + + timeleft = IDE_REGISTER_TIMEOUT; + /* wait for available */ + while(timeleft && !((status = *R_ATA_STATUS_DATA) & + IO_MASK(R_ATA_STATUS_DATA, dav))) + timeleft--; + + if(!timeleft) + return 0; + + LOWDB(printk("inb: 0x%x from reg 0x%x\n", status & 0xff, reg)); + + return (unsigned short)status; +} + +unsigned char +etrax100_ide_inb(ide_ioreg_t reg) +{ + return (unsigned char)etrax100_ide_inw(reg); +} + +/* PIO timing (in R_ATA_CONFIG) + * + * _____________________________ + * ADDRESS : ________/ + * + * _______________ + * DIOR : ____________/ \__________ + * + * _______________ + * DATA : XXXXXXXXXXXXXXXX_______________XXXXXXXX + * + * + * DIOR is unbuffered while address and data is buffered. + * This creates two problems: + * 1. The DIOR pulse is to early (because it is unbuffered) + * 2. The rise time of DIOR is long + * + * There are at least three different plausible solutions + * 1. Use a pad capable of larger currents in Etrax + * 2. Use an external buffer + * 3. Make the strobe pulse longer + * + * Some of the strobe timings below are modified to compensate + * for this. This implies a slight performance decrease. + * + * THIS SHOULD NEVER BE CHANGED! + * + * TODO: Is this true for the latest LX boards still ? + */ + +#define ATA_DMA2_STROBE 4 +#define ATA_DMA2_HOLD 0 +#define ATA_DMA1_STROBE 4 +#define ATA_DMA1_HOLD 1 +#define ATA_DMA0_STROBE 12 +#define ATA_DMA0_HOLD 9 +#define ATA_PIO4_SETUP 1 +#define ATA_PIO4_STROBE 5 +#define ATA_PIO4_HOLD 0 +#define ATA_PIO3_SETUP 1 +#define ATA_PIO3_STROBE 5 +#define ATA_PIO3_HOLD 1 +#define ATA_PIO2_SETUP 1 +#define ATA_PIO2_STROBE 6 +#define ATA_PIO2_HOLD 2 +#define ATA_PIO1_SETUP 2 +#define ATA_PIO1_STROBE 11 +#define ATA_PIO1_HOLD 4 +#define ATA_PIO0_SETUP 4 +#define ATA_PIO0_STROBE 19 +#define ATA_PIO0_HOLD 4 + +static int e100_dma_check (ide_drive_t *drive); +static int e100_dma_begin (ide_drive_t *drive); +static int e100_dma_end (ide_drive_t *drive); +static int e100_dma_read (ide_drive_t *drive); +static int e100_dma_write (ide_drive_t *drive); +static void e100_ide_input_data (ide_drive_t *drive, void *, unsigned int); +static void e100_ide_output_data (ide_drive_t *drive, void *, unsigned int); +static void e100_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int); +static void e100_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int); +static int e100_dma_off (ide_drive_t *drive); +static int e100_dma_verbose (ide_drive_t *drive); + + +/* + * good_dma_drives() lists the model names (from "hdparm -i") + * of drives which do not support mword2 DMA but which are + * known to work fine with this interface under Linux. + */ + +const char *good_dma_drives[] = {"Micropolis 2112A", + "CONNER CTMA 4000", + "CONNER CTT8000-A", + NULL}; + +static void tune_e100_ide(ide_drive_t *drive, byte pio) +{ + pio = 4; + /* pio = ide_get_best_pio_mode(drive, pio, 4, NULL); */ + + /* set pio mode! */ + + switch(pio) { + case 0: + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO0_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO0_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO0_HOLD ) ); + break; + case 1: + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO1_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO1_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO1_HOLD ) ); + break; + case 2: + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO2_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO2_HOLD ) ); + break; + case 3: + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO3_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO3_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO3_HOLD ) ); + break; + case 4: + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO4_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO4_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO4_HOLD ) ); + break; + } +} + +void __init +init_e100_ide (void) +{ + volatile unsigned int dummy; + int h; + + printk("ide: ETRAX 100LX built-in ATA DMA controller\n"); + + /* first fill in some stuff in the ide_hwifs fields */ + + for(h = 0; h < MAX_HWIFS; h++) { + ide_hwif_t *hwif = &ide_hwifs[h]; + hwif->mmio = 2; + hwif->chipset = ide_etrax100; + hwif->tuneproc = &tune_e100_ide; + hwif->ata_input_data = &e100_ide_input_data; + hwif->ata_output_data = &e100_ide_output_data; + hwif->atapi_input_bytes = &e100_atapi_input_bytes; + hwif->atapi_output_bytes = &e100_atapi_output_bytes; + hwif->ide_dma_check = &e100_dma_check; + hwif->ide_dma_end = &e100_dma_end; + hwif->ide_dma_write = &e100_dma_write; + hwif->ide_dma_read = &e100_dma_read; + hwif->ide_dma_begin = &e100_dma_begin; + hwif->OUTB = &etrax100_ide_outb; + hwif->OUTW = &etrax100_ide_outw; + hwif->OUTBSYNC = &etrax100_ide_outbsync; + hwif->INB = &etrax100_ide_inb; + hwif->INW = &etrax100_ide_inw; + hwif->ide_dma_off_quietly = &e100_dma_off; + hwif->ide_dma_verbose = &e100_dma_verbose; + hwif->sg_table = + kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES, GFP_KERNEL); + } + + /* actually reset and configure the etrax100 ide/ata interface */ + + *R_ATA_CTRL_DATA = 0; + *R_ATA_TRANSFER_CNT = 0; + *R_ATA_CONFIG = 0; + + genconfig_shadow = (genconfig_shadow & + ~IO_MASK(R_GEN_CONFIG, dma2) & + ~IO_MASK(R_GEN_CONFIG, dma3) & + ~IO_MASK(R_GEN_CONFIG, ata)) | + ( IO_STATE( R_GEN_CONFIG, dma3, ata ) | + IO_STATE( R_GEN_CONFIG, dma2, ata ) | + IO_STATE( R_GEN_CONFIG, ata, select ) ); + + *R_GEN_CONFIG = genconfig_shadow; + + /* pull the chosen /reset-line low */ + +#ifdef CONFIG_ETRAX_IDE_G27_RESET + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, 0); +#endif +#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET + REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, 0); +#endif +#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET + REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, 0); +#endif +#ifdef CONFIG_ETRAX_IDE_PB7_RESET + port_pb_dir_shadow = port_pb_dir_shadow | + IO_STATE(R_PORT_PB_DIR, dir7, output); + *R_PORT_PB_DIR = port_pb_dir_shadow; + REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, 7, 1); +#endif + + /* wait some */ + + udelay(25); + + /* de-assert bus-reset */ + +#ifdef CONFIG_ETRAX_IDE_CSE1_16_RESET + REG_SHADOW_SET(port_cse1_addr, port_cse1_shadow, 16, 1); +#endif +#ifdef CONFIG_ETRAX_IDE_CSP0_8_RESET + REG_SHADOW_SET(port_csp0_addr, port_csp0_shadow, 8, 1); +#endif +#ifdef CONFIG_ETRAX_IDE_G27_RESET + REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, 1); +#endif + + /* make a dummy read to set the ata controller in a proper state */ + dummy = *R_ATA_STATUS_DATA; + + *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) | + IO_FIELD( R_ATA_CONFIG, dma_strobe, ATA_DMA2_STROBE ) | + IO_FIELD( R_ATA_CONFIG, dma_hold, ATA_DMA2_HOLD ) | + IO_FIELD( R_ATA_CONFIG, pio_setup, ATA_PIO4_SETUP ) | + IO_FIELD( R_ATA_CONFIG, pio_strobe, ATA_PIO4_STROBE ) | + IO_FIELD( R_ATA_CONFIG, pio_hold, ATA_PIO4_HOLD ) ); + + *R_ATA_CTRL_DATA = ( IO_STATE( R_ATA_CTRL_DATA, rw, read) | + IO_FIELD( R_ATA_CTRL_DATA, addr, 1 ) ); + + while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); /* wait for busy flag*/ + + *R_IRQ_MASK0_SET = ( IO_STATE( R_IRQ_MASK0_SET, ata_irq0, set ) | + IO_STATE( R_IRQ_MASK0_SET, ata_irq1, set ) | + IO_STATE( R_IRQ_MASK0_SET, ata_irq2, set ) | + IO_STATE( R_IRQ_MASK0_SET, ata_irq3, set ) ); + + printk("ide: waiting %d seconds for drives to regain consciousness\n", + CONFIG_ETRAX_IDE_DELAY); + + h = jiffies + (CONFIG_ETRAX_IDE_DELAY * HZ); + while(time_before(jiffies, h)) /* nothing */ ; + + /* reset the dma channels we will use */ + + RESET_DMA(ATA_TX_DMA_NBR); + RESET_DMA(ATA_RX_DMA_NBR); + WAIT_DMA(ATA_TX_DMA_NBR); + WAIT_DMA(ATA_RX_DMA_NBR); + +} + +static int e100_dma_off (ide_drive_t *drive) +{ + return 0; +} + +static int e100_dma_verbose (ide_drive_t *drive) +{ + printk(", DMA(mode 2)"); + return 0; +} + +static etrax_dma_descr mydescr; + +/* + * The following routines are mainly used by the ATAPI drivers. + * + * These routines will round up any request for an odd number of bytes, + * so if an odd bytecount is specified, be sure that there's at least one + * extra byte allocated for the buffer. + */ +static void +e100_atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount) +{ + ide_ioreg_t data_reg = IDE_DATA_REG; + + D(printk("atapi_input_bytes, dreg 0x%x, buffer 0x%x, count %d\n", + data_reg, buffer, bytecount)); + + if(bytecount & 1) { + printk("warning, odd bytecount in cdrom_in_bytes = %d.\n", bytecount); + bytecount++; /* to round off */ + } + + /* make sure the DMA channel is available */ + RESET_DMA(ATA_RX_DMA_NBR); + WAIT_DMA(ATA_RX_DMA_NBR); + + /* setup DMA descriptor */ + + mydescr.sw_len = bytecount; + mydescr.ctrl = d_eol; + mydescr.buf = virt_to_phys(buffer); + + /* start the dma channel */ + + *R_DMA_CH3_FIRST = virt_to_phys(&mydescr); + *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, start); + + /* initiate a multi word dma read using PIO handshaking */ + + *R_ATA_TRANSFER_CNT = IO_FIELD(R_ATA_TRANSFER_CNT, count, bytecount >> 1); + + *R_ATA_CTRL_DATA = data_reg | + IO_STATE(R_ATA_CTRL_DATA, rw, read) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | + IO_STATE(R_ATA_CTRL_DATA, handsh, pio) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + /* wait for completion */ + + LED_DISK_READ(1); + WAIT_DMA(ATA_RX_DMA_NBR); + LED_DISK_READ(0); + +#if 0 + /* old polled transfer code + * this should be moved into a new function that can do polled + * transfers if DMA is not available + */ + + /* initiate a multi word read */ + + *R_ATA_TRANSFER_CNT = wcount << 1; + + *R_ATA_CTRL_DATA = data_reg | + IO_STATE(R_ATA_CTRL_DATA, rw, read) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, register) | + IO_STATE(R_ATA_CTRL_DATA, handsh, pio) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + /* svinto has a latency until the busy bit actually is set */ + + nop(); nop(); + nop(); nop(); + nop(); nop(); + nop(); nop(); + nop(); nop(); + + /* unit should be busy during multi transfer */ + while((status = *R_ATA_STATUS_DATA) & IO_MASK(R_ATA_STATUS_DATA, busy)) { + while(!(status & IO_MASK(R_ATA_STATUS_DATA, dav))) + status = *R_ATA_STATUS_DATA; + *ptr++ = (unsigned short)(status & 0xffff); + } +#endif +} + +static void +e100_atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount) +{ + ide_ioreg_t data_reg = IDE_DATA_REG; + + D(printk("atapi_output_bytes, dreg 0x%x, buffer 0x%x, count %d\n", + data_reg, buffer, bytecount)); + + if(bytecount & 1) { + printk("odd bytecount %d in atapi_out_bytes!\n", bytecount); + bytecount++; + } + + /* make sure the DMA channel is available */ + RESET_DMA(ATA_TX_DMA_NBR); + WAIT_DMA(ATA_TX_DMA_NBR); + + /* setup DMA descriptor */ + + mydescr.sw_len = bytecount; + mydescr.ctrl = d_eol; + mydescr.buf = virt_to_phys(buffer); + + /* start the dma channel */ + + *R_DMA_CH2_FIRST = virt_to_phys(&mydescr); + *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, start); + + /* initiate a multi word dma write using PIO handshaking */ + + *R_ATA_TRANSFER_CNT = IO_FIELD(R_ATA_TRANSFER_CNT, count, bytecount >> 1); + + *R_ATA_CTRL_DATA = data_reg | + IO_STATE(R_ATA_CTRL_DATA, rw, write) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | + IO_STATE(R_ATA_CTRL_DATA, handsh, pio) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + /* wait for completion */ + + LED_DISK_WRITE(1); + WAIT_DMA(ATA_TX_DMA_NBR); + LED_DISK_WRITE(0); + +#if 0 + /* old polled write code - see comment in input_bytes */ + + /* wait for busy flag */ + while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); + + /* initiate a multi word write */ + + *R_ATA_TRANSFER_CNT = bytecount >> 1; + + ctrl = data_reg | + IO_STATE(R_ATA_CTRL_DATA, rw, write) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, register) | + IO_STATE(R_ATA_CTRL_DATA, handsh, pio) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + LED_DISK_WRITE(1); + + /* Etrax will set busy = 1 until the multi pio transfer has finished + * and tr_rdy = 1 after each successful word transfer. + * When the last byte has been transferred Etrax will first set tr_tdy = 1 + * and then busy = 0 (not in the same cycle). If we read busy before it + * has been set to 0 we will think that we should transfer more bytes + * and then tr_rdy would be 0 forever. This is solved by checking busy + * in the inner loop. + */ + + do { + *R_ATA_CTRL_DATA = ctrl | *ptr++; + while(!(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy)) && + (*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy))); + } while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); + + LED_DISK_WRITE(0); +#endif + +} + +/* + * This is used for most PIO data transfers *from* the IDE interface + */ +static void +e100_ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount) +{ + e100_atapi_input_bytes(drive, buffer, wcount << 2); +} + +/* + * This is used for most PIO data transfers *to* the IDE interface + */ +static void +e100_ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount) +{ + e100_atapi_output_bytes(drive, buffer, wcount << 2); +} + +/* we only have one DMA channel on the chip for ATA, so we can keep these statically */ +static etrax_dma_descr ata_descrs[MAX_DMA_DESCRS]; +static unsigned int ata_tot_size; + +/* + * e100_ide_build_dmatable() prepares a dma request. + * Returns 0 if all went okay, returns 1 otherwise. + */ +static int e100_ide_build_dmatable (ide_drive_t *drive) +{ + ide_hwif_t *hwif = HWIF(drive); + struct scatterlist* sg; + struct request *rq = HWGROUP(drive)->rq; + unsigned long size, addr; + unsigned int count = 0; + int i = 0; + + sg = hwif->sg_table; + + ata_tot_size = 0; + + if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE) { + u8 *virt_addr = rq->buffer; + int sector_count = rq->nr_sectors; + memset(&sg[0], 0, sizeof(*sg)); + sg[0].page = virt_to_page(virt_addr); + sg[0].offset = offset_in_page(virt_addr); + sg[0].length = sector_count * SECTOR_SIZE; + hwif->sg_nents = i = 1; + } + else + { + hwif->sg_nents = i = blk_rq_map_sg(drive->queue, rq, hwif->sg_table); + } + + + while(i) { + /* + * Determine addr and size of next buffer area. We assume that + * individual virtual buffers are always composed linearly in + * physical memory. For example, we assume that any 8kB buffer + * is always composed of two adjacent physical 4kB pages rather + * than two possibly non-adjacent physical 4kB pages. + */ + /* group sequential buffers into one large buffer */ + addr = page_to_phys(sg->page) + sg->offset; + size = sg_dma_len(sg); + while (sg++, --i) { + if ((addr + size) != page_to_phys(sg->page) + sg->offset) + break; + size += sg_dma_len(sg); + } + + /* did we run out of descriptors? */ + + if(count >= MAX_DMA_DESCRS) { + printk("%s: too few DMA descriptors\n", drive->name); + return 1; + } + + /* however, this case is more difficult - R_ATA_TRANSFER_CNT cannot be more + than 65536 words per transfer, so in that case we need to either + 1) use a DMA interrupt to re-trigger R_ATA_TRANSFER_CNT and continue with + the descriptors, or + 2) simply do the request here, and get dma_intr to only ide_end_request on + those blocks that were actually set-up for transfer. + */ + + if(ata_tot_size + size > 131072) { + printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, (int)size); + return 1; + } + + /* If size > 65536 it has to be splitted into new descriptors. Since we don't handle + size > 131072 only one split is necessary */ + + if(size > 65536) { + /* ok we want to do IO at addr, size bytes. set up a new descriptor entry */ + ata_descrs[count].sw_len = 0; /* 0 means 65536, this is a 16-bit field */ + ata_descrs[count].ctrl = 0; + ata_descrs[count].buf = addr; + ata_descrs[count].next = virt_to_phys(&ata_descrs[count + 1]); + count++; + ata_tot_size += 65536; + /* size and addr should refere to not handled data */ + size -= 65536; + addr += 65536; + } + /* ok we want to do IO at addr, size bytes. set up a new descriptor entry */ + if(size == 65536) { + ata_descrs[count].sw_len = 0; /* 0 means 65536, this is a 16-bit field */ + } else { + ata_descrs[count].sw_len = size; + } + ata_descrs[count].ctrl = 0; + ata_descrs[count].buf = addr; + ata_descrs[count].next = virt_to_phys(&ata_descrs[count + 1]); + count++; + ata_tot_size += size; + } + + if (count) { + /* set the end-of-list flag on the last descriptor */ + ata_descrs[count - 1].ctrl |= d_eol; + /* return and say all is ok */ + return 0; + } + + printk("%s: empty DMA table?\n", drive->name); + return 1; /* let the PIO routines handle this weirdness */ +} + +static int config_drive_for_dma (ide_drive_t *drive) +{ + const char **list; + struct hd_driveid *id = drive->id; + + if (id && (id->capability & 1)) { + /* Enable DMA on any drive that supports mword2 DMA */ + if ((id->field_valid & 2) && (id->dma_mword & 0x404) == 0x404) { + drive->using_dma = 1; + return 0; /* DMA enabled */ + } + + /* Consult the list of known "good" drives */ + list = good_dma_drives; + while (*list) { + if (!strcmp(*list++,id->model)) { + drive->using_dma = 1; + return 0; /* DMA enabled */ + } + } + } + return 1; /* DMA not enabled */ +} + +/* + * etrax_dma_intr() is the handler for disk read/write DMA interrupts + */ +static ide_startstop_t etrax_dma_intr (ide_drive_t *drive) +{ + int i, dma_stat; + byte stat; + + LED_DISK_READ(0); + LED_DISK_WRITE(0); + + dma_stat = HWIF(drive)->ide_dma_end(drive); + stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */ + if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) { + if (!dma_stat) { + struct request *rq; + rq = HWGROUP(drive)->rq; + for (i = rq->nr_sectors; i > 0;) { + i -= rq->current_nr_sectors; + DRIVER(drive)->end_request(drive, 1, rq->nr_sectors); + } + return ide_stopped; + } + printk("%s: bad DMA status\n", drive->name); + } + return DRIVER(drive)->error(drive, "dma_intr", stat); +} + +/* + * Functions below initiates/aborts DMA read/write operations on a drive. + * + * The caller is assumed to have selected the drive and programmed the drive's + * sector address using CHS or LBA. All that remains is to prepare for DMA + * and then issue the actual read/write DMA/PIO command to the drive. + * + * For ATAPI devices, we just prepare for DMA and return. The caller should + * then issue the packet command to the drive and call us again with + * ide_dma_begin afterwards. + * + * Returns 0 if all went well. + * Returns 1 if DMA read/write could not be started, in which case + * the caller should revert to PIO for the current request. + */ + +static int e100_dma_check(ide_drive_t *drive) +{ + return config_drive_for_dma (drive); +} + +static int e100_dma_end(ide_drive_t *drive) +{ + /* TODO: check if something went wrong with the DMA */ + return 0; +} + +static int e100_start_dma(ide_drive_t *drive, int atapi, int reading) +{ + if(reading) { + + RESET_DMA(ATA_RX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */ + WAIT_DMA(ATA_RX_DMA_NBR); + + /* set up the Etrax DMA descriptors */ + + if(e100_ide_build_dmatable (drive)) + return 1; + + if(!atapi) { + /* set the irq handler which will finish the request when DMA is done */ + + ide_set_handler(drive, &etrax_dma_intr, WAIT_CMD, NULL); + + /* issue cmd to drive */ + if ((HWGROUP(drive)->rq->cmd == IDE_DRIVE_TASKFILE) && + (drive->addressing == 1)) { + ide_task_t *args = HWGROUP(drive)->rq->special; + etrax100_ide_outb(args->tfRegister[IDE_COMMAND_OFFSET], IDE_COMMAND_REG); + } else if (drive->addressing) { + etrax100_ide_outb(WIN_READDMA_EXT, IDE_COMMAND_REG); + } else { + etrax100_ide_outb(WIN_READDMA, IDE_COMMAND_REG); + } + } + + /* begin DMA */ + + /* need to do this before RX DMA due to a chip bug + * it is enough to just flush the part of the cache that + * corresponds to the buffers we start, but since HD transfers + * usually are more than 8 kB, it is easier to optimize for the + * normal case and just flush the entire cache. its the only + * way to be sure! (OB movie quote) + */ + flush_etrax_cache(); + *R_DMA_CH3_FIRST = virt_to_phys(ata_descrs); + *R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, start); + + /* initiate a multi word dma read using DMA handshaking */ + + *R_ATA_TRANSFER_CNT = + IO_FIELD(R_ATA_TRANSFER_CNT, count, ata_tot_size >> 1); + + *R_ATA_CTRL_DATA = + IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) | + IO_STATE(R_ATA_CTRL_DATA, rw, read) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | + IO_STATE(R_ATA_CTRL_DATA, handsh, dma) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + LED_DISK_READ(1); + + D(printk("dma read of %d bytes.\n", ata_tot_size)); + + } else { + /* writing */ + + RESET_DMA(ATA_TX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */ + WAIT_DMA(ATA_TX_DMA_NBR); + + /* set up the Etrax DMA descriptors */ + + if(e100_ide_build_dmatable (drive)) + return 1; + + if(!atapi) { + /* set the irq handler which will finish the request when DMA is done */ + + ide_set_handler(drive, &etrax_dma_intr, WAIT_CMD, NULL); + + /* issue cmd to drive */ + if ((HWGROUP(drive)->rq->cmd == IDE_DRIVE_TASKFILE) && + (drive->addressing == 1)) { + ide_task_t *args = HWGROUP(drive)->rq->special; + etrax100_ide_outb(args->tfRegister[IDE_COMMAND_OFFSET], IDE_COMMAND_REG); + } else if (drive->addressing) { + etrax100_ide_outb(WIN_WRITEDMA_EXT, IDE_COMMAND_REG); + } else { + etrax100_ide_outb(WIN_WRITEDMA, IDE_COMMAND_REG); + } + } + + /* begin DMA */ + + *R_DMA_CH2_FIRST = virt_to_phys(ata_descrs); + *R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, start); + + /* initiate a multi word dma write using DMA handshaking */ + + *R_ATA_TRANSFER_CNT = + IO_FIELD(R_ATA_TRANSFER_CNT, count, ata_tot_size >> 1); + + *R_ATA_CTRL_DATA = + IO_FIELD(R_ATA_CTRL_DATA, data, IDE_DATA_REG) | + IO_STATE(R_ATA_CTRL_DATA, rw, write) | + IO_STATE(R_ATA_CTRL_DATA, src_dst, dma) | + IO_STATE(R_ATA_CTRL_DATA, handsh, dma) | + IO_STATE(R_ATA_CTRL_DATA, multi, on) | + IO_STATE(R_ATA_CTRL_DATA, dma_size, word); + + LED_DISK_WRITE(1); + + D(printk("dma write of %d bytes.\n", ata_tot_size)); + } + return 0; +} + +static int e100_dma_write(ide_drive_t *drive) +{ + e100_read_command = 0; + /* ATAPI-devices (not disks) first call ide_dma_read/write to set the direction + * then they call ide_dma_begin after they have issued the appropriate drive command + * themselves to actually start the chipset DMA. so we just return here if we're + * not a diskdrive. + */ + if (drive->media != ide_disk) + return 0; + return e100_start_dma(drive, 0, 0); +} + +static int e100_dma_read(ide_drive_t *drive) +{ + e100_read_command = 1; + /* ATAPI-devices (not disks) first call ide_dma_read/write to set the direction + * then they call ide_dma_begin after they have issued the appropriate drive command + * themselves to actually start the chipset DMA. so we just return here if we're + * not a diskdrive. + */ + if (drive->media != ide_disk) + return 0; + return e100_start_dma(drive, 0, 1); +} + +static int e100_dma_begin(ide_drive_t *drive) +{ + /* begin DMA, used by ATAPI devices which want to issue the + * appropriate IDE command themselves. + * + * they have already called ide_dma_read/write to set the + * static reading flag, now they call ide_dma_begin to do + * the real stuff. we tell our code below not to issue + * any IDE commands itself and jump into it. + */ + return e100_start_dma(drive, 1, e100_read_command); +} diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c new file mode 100644 index 000000000..6ded633f8 --- /dev/null +++ b/arch/cris/kernel/crisksyms.c @@ -0,0 +1,104 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void dump_thread(struct pt_regs *, struct user *); +extern unsigned long get_cmos_time(void); +extern void __Udiv(void); +extern void __Umod(void); +extern void __Div(void); +extern void __Mod(void); +extern void __ashrdi3(void); +extern void iounmap(void *addr); + +/* Platform dependent support */ +EXPORT_SYMBOL(dump_thread); +EXPORT_SYMBOL(enable_irq); +EXPORT_SYMBOL(disable_irq); +EXPORT_SYMBOL(kernel_thread); +EXPORT_SYMBOL(get_cmos_time); +EXPORT_SYMBOL(loops_per_usec); + +/* String functions */ +EXPORT_SYMBOL(memcmp); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(strpbrk); +EXPORT_SYMBOL(strstr); +EXPORT_SYMBOL(strcpy); +EXPORT_SYMBOL(strchr); +EXPORT_SYMBOL(strcmp); +EXPORT_SYMBOL(strlen); +EXPORT_SYMBOL(strcat); +EXPORT_SYMBOL(strncat); +EXPORT_SYMBOL(strncmp); +EXPORT_SYMBOL(strncpy); + +/* Math functions */ +EXPORT_SYMBOL(__Udiv); +EXPORT_SYMBOL(__Umod); +EXPORT_SYMBOL(__Div); +EXPORT_SYMBOL(__Mod); +EXPORT_SYMBOL(__ashrdi3); + +/* Memory functions */ +EXPORT_SYMBOL(__ioremap); +EXPORT_SYMBOL(iounmap); + +/* Semaphore functions */ +EXPORT_SYMBOL(__up); +EXPORT_SYMBOL(__down); +EXPORT_SYMBOL(__down_interruptible); +EXPORT_SYMBOL(__down_trylock); + +/* Export shadow registers for the CPU I/O pins */ +EXPORT_SYMBOL(genconfig_shadow); +EXPORT_SYMBOL(port_pa_data_shadow); +EXPORT_SYMBOL(port_pa_dir_shadow); +EXPORT_SYMBOL(port_pb_data_shadow); +EXPORT_SYMBOL(port_pb_dir_shadow); +EXPORT_SYMBOL(port_pb_config_shadow); +EXPORT_SYMBOL(port_g_data_shadow); + +/* Userspace access functions */ +EXPORT_SYMBOL(__copy_user_zeroing); +EXPORT_SYMBOL(__copy_user); + +/* Cache flush functions */ +EXPORT_SYMBOL(flush_etrax_cache); +EXPORT_SYMBOL(prepare_rx_descriptor); + +#undef memcpy +#undef memset +extern void * memset(void *, int, __kernel_size_t); +extern void * memcpy(void *, const void *, __kernel_size_t); +EXPORT_SYMBOL_NOVERS(memcpy); +EXPORT_SYMBOL_NOVERS(memset); + +#ifdef CONFIG_ETRAX_FAST_TIMER +/* Fast timer functions */ +EXPORT_SYMBOL(fast_timer_list); +EXPORT_SYMBOL(start_one_shot_timer); +EXPORT_SYMBOL(del_fast_timer); +EXPORT_SYMBOL(schedule_usleep); +#endif + diff --git a/arch/i386/crypto/Makefile b/arch/i386/crypto/Makefile new file mode 100644 index 000000000..103c353d0 --- /dev/null +++ b/arch/i386/crypto/Makefile @@ -0,0 +1,9 @@ +# +# i386/crypto/Makefile +# +# Arch-specific CryptoAPI modules. +# + +obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o + +aes-i586-y := aes-i586-asm.o aes.o diff --git a/arch/i386/crypto/aes-i586-asm.S b/arch/i386/crypto/aes-i586-asm.S new file mode 100644 index 000000000..e8a04713d --- /dev/null +++ b/arch/i386/crypto/aes-i586-asm.S @@ -0,0 +1,341 @@ +// ------------------------------------------------------------------------- +// Copyright (c) 2001, Dr Brian Gladman < >, Worcester, UK. +// All rights reserved. +// +// LICENSE TERMS +// +// The free distribution and use of this software in both source and binary +// form is allowed (with or without changes) provided that: +// +// 1. distributions of this source code include the above copyright +// notice, this list of conditions and the following disclaimer// +// +// 2. distributions in binary form include the above copyright +// notice, this list of conditions and the following disclaimer +// in the documentation and/or other associated materials// +// +// 3. the copyright holder's name is not used to endorse products +// built using this software without specific written permission. +// +// +// ALTERNATIVELY, provided that this notice is retained in full, this product +// may be distributed under the terms of the GNU General Public License (GPL), +// in which case the provisions of the GPL apply INSTEAD OF those given above. +// +// Copyright (c) 2004 Linus Torvalds +// Copyright (c) 2004 Red Hat, Inc., James Morris + +// DISCLAIMER +// +// This software is provided 'as is' with no explicit or implied warranties +// in respect of its properties including, but not limited to, correctness +// and fitness for purpose. +// ------------------------------------------------------------------------- +// Issue Date: 29/07/2002 + +.file "aes-i586-asm.S" +.text + +// aes_rval aes_enc_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])// +// aes_rval aes_dec_blk(const unsigned char in_blk[], unsigned char out_blk[], const aes_ctx cx[1])// + +#define tlen 1024 // length of each of 4 'xor' arrays (256 32-bit words) + +// offsets to parameters with one register pushed onto stack + +#define in_blk 8 // input byte array address parameter +#define out_blk 12 // output byte array address parameter +#define ctx 16 // AES context structure + +// offsets in context structure + +#define ekey 0 // encryption key schedule base address +#define nrnd 256 // number of rounds +#define dkey 260 // decryption key schedule base address + +// register mapping for encrypt and decrypt subroutines + +#define r0 eax +#define r1 ebx +#define r2 ecx +#define r3 edx +#define r4 esi +#define r5 edi +#define r6 ebp + +#define eaxl al +#define eaxh ah +#define ebxl bl +#define ebxh bh +#define ecxl cl +#define ecxh ch +#define edxl dl +#define edxh dh + +#define _h(reg) reg##h +#define h(reg) _h(reg) + +#define _l(reg) reg##l +#define l(reg) _l(reg) + +// This macro takes a 32-bit word representing a column and uses +// each of its four bytes to index into four tables of 256 32-bit +// words to obtain values that are then xored into the appropriate +// output registers r0, r1, r4 or r5. + +// Parameters: +// %1 out_state[0] +// %2 out_state[1] +// %3 out_state[2] +// %4 out_state[3] +// %5 table base address +// %6 input register for the round (destroyed) +// %7 scratch register for the round + +#define do_col(a1, a2, a3, a4, a5, a6, a7) \ + movzx %l(a6),%a7; \ + xor a5(,%a7,4),%a1; \ + movzx %h(a6),%a7; \ + shr $16,%a6; \ + xor a5+tlen(,%a7,4),%a2; \ + movzx %l(a6),%a7; \ + movzx %h(a6),%a6; \ + xor a5+2*tlen(,%a7,4),%a3; \ + xor a5+3*tlen(,%a6,4),%a4; + +// initialise output registers from the key schedule + +#define do_fcol(a1, a2, a3, a4, a5, a6, a7, a8) \ + mov 0 a8,%a1; \ + movzx %l(a6),%a7; \ + mov 12 a8,%a2; \ + xor a5(,%a7,4),%a1; \ + mov 4 a8,%a4; \ + movzx %h(a6),%a7; \ + shr $16,%a6; \ + xor a5+tlen(,%a7,4),%a2; \ + movzx %l(a6),%a7; \ + movzx %h(a6),%a6; \ + xor a5+3*tlen(,%a6,4),%a4; \ + mov %a3,%a6; \ + mov 8 a8,%a3; \ + xor a5+2*tlen(,%a7,4),%a3; + +// initialise output registers from the key schedule + +#define do_icol(a1, a2, a3, a4, a5, a6, a7, a8) \ + mov 0 a8,%a1; \ + movzx %l(a6),%a7; \ + mov 4 a8,%a2; \ + xor a5(,%a7,4),%a1; \ + mov 12 a8,%a4; \ + movzx %h(a6),%a7; \ + shr $16,%a6; \ + xor a5+tlen(,%a7,4),%a2; \ + movzx %l(a6),%a7; \ + movzx %h(a6),%a6; \ + xor a5+3*tlen(,%a6,4),%a4; \ + mov %a3,%a6; \ + mov 8 a8,%a3; \ + xor a5+2*tlen(,%a7,4),%a3; + + +// original Gladman had conditional saves to MMX regs. +#define save(a1, a2) \ + mov %a2,4*a1(%esp) + +#define restore(a1, a2) \ + mov 4*a2(%esp),%a1 + +// This macro performs a forward encryption cycle. It is entered with +// the first previous round column values in r0, r1, r4 and r5 and +// exits with the final values in the same registers, using the MMX +// registers mm0-mm1 or the stack for temporary storage + +// mov current column values into the MMX registers +#define fwd_rnd(arg, table) \ + /* mov current column values into the MMX registers */ \ + mov %r0,%r2; \ + save (0,r1); \ + save (1,r5); \ + \ + /* compute new column values */ \ + do_fcol(r0,r5,r4,r1,table, r2,r3, arg); \ + do_col (r4,r1,r0,r5,table, r2,r3); \ + restore(r2,0); \ + do_col (r1,r0,r5,r4,table, r2,r3); \ + restore(r2,1); \ + do_col (r5,r4,r1,r0,table, r2,r3); + +// This macro performs an inverse encryption cycle. It is entered with +// the first previous round column values in r0, r1, r4 and r5 and +// exits with the final values in the same registers, using the MMX +// registers mm0-mm1 or the stack for temporary storage + +#define inv_rnd(arg, table) \ + /* mov current column values into the MMX registers */ \ + mov %r0,%r2; \ + save (0,r1); \ + save (1,r5); \ + \ + /* compute new column values */ \ + do_icol(r0,r1,r4,r5, table, r2,r3, arg); \ + do_col (r4,r5,r0,r1, table, r2,r3); \ + restore(r2,0); \ + do_col (r1,r4,r5,r0, table, r2,r3); \ + restore(r2,1); \ + do_col (r5,r0,r1,r4, table, r2,r3); + +// AES (Rijndael) Encryption Subroutine + +.global aes_enc_blk + +.extern ft_tab +.extern fl_tab + +.align 4 + +aes_enc_blk: + push %ebp + mov ctx(%esp),%ebp // pointer to context + xor %eax,%eax + +// CAUTION: the order and the values used in these assigns +// rely on the register mappings + +1: push %ebx + mov in_blk+4(%esp),%r2 + push %esi + mov nrnd(%ebp),%r3 // number of rounds + push %edi + lea ekey(%ebp),%r6 // key pointer + +// input four columns and xor in first round key + + mov (%r2),%r0 + mov 4(%r2),%r1 + mov 8(%r2),%r4 + mov 12(%r2),%r5 + xor (%r6),%r0 + xor 4(%r6),%r1 + xor 8(%r6),%r4 + xor 12(%r6),%r5 + + sub $8,%esp // space for register saves on stack + add $16,%r6 // increment to next round key + sub $10,%r3 + je 4f // 10 rounds for 128-bit key + add $32,%r6 + sub $2,%r3 + je 3f // 12 rounds for 128-bit key + add $32,%r6 + +2: fwd_rnd( -64(%r6) ,ft_tab) // 14 rounds for 128-bit key + fwd_rnd( -48(%r6) ,ft_tab) +3: fwd_rnd( -32(%r6) ,ft_tab) // 12 rounds for 128-bit key + fwd_rnd( -16(%r6) ,ft_tab) +4: fwd_rnd( (%r6) ,ft_tab) // 10 rounds for 128-bit key + fwd_rnd( +16(%r6) ,ft_tab) + fwd_rnd( +32(%r6) ,ft_tab) + fwd_rnd( +48(%r6) ,ft_tab) + fwd_rnd( +64(%r6) ,ft_tab) + fwd_rnd( +80(%r6) ,ft_tab) + fwd_rnd( +96(%r6) ,ft_tab) + fwd_rnd(+112(%r6) ,ft_tab) + fwd_rnd(+128(%r6) ,ft_tab) + fwd_rnd(+144(%r6) ,fl_tab) // last round uses a different table + +// move final values to the output array. CAUTION: the +// order of these assigns rely on the register mappings + + add $8,%esp + mov out_blk+12(%esp),%r6 + mov %r5,12(%r6) + pop %edi + mov %r4,8(%r6) + pop %esi + mov %r1,4(%r6) + pop %ebx + mov %r0,(%r6) + pop %ebp + mov $1,%eax + ret + +// AES (Rijndael) Decryption Subroutine + +.global aes_dec_blk + +.extern it_tab +.extern il_tab + +.align 4 + +aes_dec_blk: + push %ebp + mov ctx(%esp),%ebp // pointer to context + xor %eax,%eax + +// CAUTION: the order and the values used in these assigns +// rely on the register mappings + +1: push %ebx + mov in_blk+4(%esp),%r2 + push %esi + mov nrnd(%ebp),%r3 // number of rounds + push %edi + lea dkey(%ebp),%r6 // key pointer + mov %r3,%r0 + shl $4,%r0 + add %r0,%r6 + +// input four columns and xor in first round key + + mov (%r2),%r0 + mov 4(%r2),%r1 + mov 8(%r2),%r4 + mov 12(%r2),%r5 + xor (%r6),%r0 + xor 4(%r6),%r1 + xor 8(%r6),%r4 + xor 12(%r6),%r5 + + sub $8,%esp // space for register saves on stack + sub $16,%r6 // increment to next round key + sub $10,%r3 + je 4f // 10 rounds for 128-bit key + sub $32,%r6 + sub $2,%r3 + je 3f // 12 rounds for 128-bit key + sub $32,%r6 + +2: inv_rnd( +64(%r6), it_tab) // 14 rounds for 128-bit key + inv_rnd( +48(%r6), it_tab) +3: inv_rnd( +32(%r6), it_tab) // 12 rounds for 128-bit key + inv_rnd( +16(%r6), it_tab) +4: inv_rnd( (%r6), it_tab) // 10 rounds for 128-bit key + inv_rnd( -16(%r6), it_tab) + inv_rnd( -32(%r6), it_tab) + inv_rnd( -48(%r6), it_tab) + inv_rnd( -64(%r6), it_tab) + inv_rnd( -80(%r6), it_tab) + inv_rnd( -96(%r6), it_tab) + inv_rnd(-112(%r6), it_tab) + inv_rnd(-128(%r6), it_tab) + inv_rnd(-144(%r6), il_tab) // last round uses a different table + +// move final values to the output array. CAUTION: the +// order of these assigns rely on the register mappings + + add $8,%esp + mov out_blk+12(%esp),%r6 + mov %r5,12(%r6) + pop %edi + mov %r4,8(%r6) + pop %esi + mov %r1,4(%r6) + pop %ebx + mov %r0,(%r6) + pop %ebp + mov $1,%eax + ret + diff --git a/arch/i386/crypto/aes.c b/arch/i386/crypto/aes.c new file mode 100644 index 000000000..5a34ee9e4 --- /dev/null +++ b/arch/i386/crypto/aes.c @@ -0,0 +1,520 @@ +/* + * + * Glue Code for optimized 586 assembler version of AES + * + * Copyright (c) 2002, Dr Brian Gladman <>, Worcester, UK. + * All rights reserved. + * + * LICENSE TERMS + * + * The free distribution and use of this software in both source and binary + * form is allowed (with or without changes) provided that: + * + * 1. distributions of this source code include the above copyright + * notice, this list of conditions and the following disclaimer; + * + * 2. distributions in binary form include the above copyright + * notice, this list of conditions and the following disclaimer + * in the documentation and/or other associated materials; + * + * 3. the copyright holder's name is not used to endorse products + * built using this software without specific written permission. + * + * ALTERNATIVELY, provided that this notice is retained in full, this product + * may be distributed under the terms of the GNU General Public License (GPL), + * in which case the provisions of the GPL apply INSTEAD OF those given above. + * + * DISCLAIMER + * + * This software is provided 'as is' with no explicit or implied warranties + * in respect of its properties, including, but not limited to, correctness + * and/or fitness for purpose. + * + * Copyright (c) 2003, Adam J. Richter (conversion to + * 2.5 API). + * Copyright (c) 2003, 2004 Fruhwirth Clemens + * Copyright (c) 2004 Red Hat, Inc., James Morris + * + */ +#include +#include +#include +#include +#include +#include + +asmlinkage void aes_enc_blk(const u8 *src, u8 *dst, void *ctx); +asmlinkage void aes_dec_blk(const u8 *src, u8 *dst, void *ctx); + +#define AES_MIN_KEY_SIZE 16 +#define AES_MAX_KEY_SIZE 32 +#define AES_BLOCK_SIZE 16 +#define AES_KS_LENGTH 4 * AES_BLOCK_SIZE +#define RC_LENGTH 29 + +struct aes_ctx { + u32 ekey[AES_KS_LENGTH]; + u32 rounds; + u32 dkey[AES_KS_LENGTH]; +}; + +#define WPOLY 0x011b +#define u32_in(x) le32_to_cpu(*(const u32 *)(x)) +#define bytes2word(b0, b1, b2, b3) \ + (((u32)(b3) << 24) | ((u32)(b2) << 16) | ((u32)(b1) << 8) | (b0)) + +/* define the finite field multiplies required for Rijndael */ +#define f2(x) ((x) ? pow[log[x] + 0x19] : 0) +#define f3(x) ((x) ? pow[log[x] + 0x01] : 0) +#define f9(x) ((x) ? pow[log[x] + 0xc7] : 0) +#define fb(x) ((x) ? pow[log[x] + 0x68] : 0) +#define fd(x) ((x) ? pow[log[x] + 0xee] : 0) +#define fe(x) ((x) ? pow[log[x] + 0xdf] : 0) +#define fi(x) ((x) ? pow[255 - log[x]]: 0) + +static inline u32 upr(u32 x, int n) +{ + return (x << 8 * n) | (x >> (32 - 8 * n)); +} + +static inline u8 bval(u32 x, int n) +{ + return x >> 8 * n; +} + +/* The forward and inverse affine transformations used in the S-box */ +#define fwd_affine(x) \ + (w = (u32)x, w ^= (w<<1)^(w<<2)^(w<<3)^(w<<4), 0x63^(u8)(w^(w>>8))) + +#define inv_affine(x) \ + (w = (u32)x, w = (w<<1)^(w<<3)^(w<<6), 0x05^(u8)(w^(w>>8))) + +static u32 rcon_tab[RC_LENGTH]; + +u32 ft_tab[4][256]; +u32 fl_tab[4][256]; +u32 ls_tab[4][256]; +u32 im_tab[4][256]; +u32 il_tab[4][256]; +u32 it_tab[4][256]; + +void gen_tabs(void) +{ + u32 i, w; + u8 pow[512], log[256]; + + /* + * log and power tables for GF(2^8) finite field with + * WPOLY as modular polynomial - the simplest primitive + * root is 0x03, used here to generate the tables. + */ + i = 0; w = 1; + + do { + pow[i] = (u8)w; + pow[i + 255] = (u8)w; + log[w] = (u8)i++; + w ^= (w << 1) ^ (w & 0x80 ? WPOLY : 0); + } while (w != 1); + + for(i = 0, w = 1; i < RC_LENGTH; ++i) { + rcon_tab[i] = bytes2word(w, 0, 0, 0); + w = f2(w); + } + + for(i = 0; i < 256; ++i) { + u8 b; + + b = fwd_affine(fi((u8)i)); + w = bytes2word(f2(b), b, b, f3(b)); + + /* tables for a normal encryption round */ + ft_tab[0][i] = w; + ft_tab[1][i] = upr(w, 1); + ft_tab[2][i] = upr(w, 2); + ft_tab[3][i] = upr(w, 3); + w = bytes2word(b, 0, 0, 0); + + /* + * tables for last encryption round + * (may also be used in the key schedule) + */ + fl_tab[0][i] = w; + fl_tab[1][i] = upr(w, 1); + fl_tab[2][i] = upr(w, 2); + fl_tab[3][i] = upr(w, 3); + + /* + * table for key schedule if fl_tab above is + * not of the required form + */ + ls_tab[0][i] = w; + ls_tab[1][i] = upr(w, 1); + ls_tab[2][i] = upr(w, 2); + ls_tab[3][i] = upr(w, 3); + + b = fi(inv_affine((u8)i)); + w = bytes2word(fe(b), f9(b), fd(b), fb(b)); + + /* tables for the inverse mix column operation */ + im_tab[0][b] = w; + im_tab[1][b] = upr(w, 1); + im_tab[2][b] = upr(w, 2); + im_tab[3][b] = upr(w, 3); + + /* tables for a normal decryption round */ + it_tab[0][i] = w; + it_tab[1][i] = upr(w,1); + it_tab[2][i] = upr(w,2); + it_tab[3][i] = upr(w,3); + + w = bytes2word(b, 0, 0, 0); + + /* tables for last decryption round */ + il_tab[0][i] = w; + il_tab[1][i] = upr(w,1); + il_tab[2][i] = upr(w,2); + il_tab[3][i] = upr(w,3); + } +} + +#define four_tables(x,tab,vf,rf,c) \ +( tab[0][bval(vf(x,0,c),rf(0,c))] ^ \ + tab[1][bval(vf(x,1,c),rf(1,c))] ^ \ + tab[2][bval(vf(x,2,c),rf(2,c))] ^ \ + tab[3][bval(vf(x,3,c),rf(3,c))] \ +) + +#define vf1(x,r,c) (x) +#define rf1(r,c) (r) +#define rf2(r,c) ((r-c)&3) + +#define inv_mcol(x) four_tables(x,im_tab,vf1,rf1,0) +#define ls_box(x,c) four_tables(x,fl_tab,vf1,rf2,c) + +#define ff(x) inv_mcol(x) + +#define ke4(k,i) \ +{ \ + k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i]; \ + k[4*(i)+5] = ss[1] ^= ss[0]; \ + k[4*(i)+6] = ss[2] ^= ss[1]; \ + k[4*(i)+7] = ss[3] ^= ss[2]; \ +} + +#define kel4(k,i) \ +{ \ + k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ rcon_tab[i]; \ + k[4*(i)+5] = ss[1] ^= ss[0]; \ + k[4*(i)+6] = ss[2] ^= ss[1]; k[4*(i)+7] = ss[3] ^= ss[2]; \ +} + +#define ke6(k,i) \ +{ \ + k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \ + k[6*(i)+ 7] = ss[1] ^= ss[0]; \ + k[6*(i)+ 8] = ss[2] ^= ss[1]; \ + k[6*(i)+ 9] = ss[3] ^= ss[2]; \ + k[6*(i)+10] = ss[4] ^= ss[3]; \ + k[6*(i)+11] = ss[5] ^= ss[4]; \ +} + +#define kel6(k,i) \ +{ \ + k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \ + k[6*(i)+ 7] = ss[1] ^= ss[0]; \ + k[6*(i)+ 8] = ss[2] ^= ss[1]; \ + k[6*(i)+ 9] = ss[3] ^= ss[2]; \ +} + +#define ke8(k,i) \ +{ \ + k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \ + k[8*(i)+ 9] = ss[1] ^= ss[0]; \ + k[8*(i)+10] = ss[2] ^= ss[1]; \ + k[8*(i)+11] = ss[3] ^= ss[2]; \ + k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0); \ + k[8*(i)+13] = ss[5] ^= ss[4]; \ + k[8*(i)+14] = ss[6] ^= ss[5]; \ + k[8*(i)+15] = ss[7] ^= ss[6]; \ +} + +#define kel8(k,i) \ +{ \ + k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \ + k[8*(i)+ 9] = ss[1] ^= ss[0]; \ + k[8*(i)+10] = ss[2] ^= ss[1]; \ + k[8*(i)+11] = ss[3] ^= ss[2]; \ +} + +#define kdf4(k,i) \ +{ \ + ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3]; \ + ss[1] = ss[1] ^ ss[3]; \ + ss[2] = ss[2] ^ ss[3]; \ + ss[3] = ss[3]; \ + ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \ + ss[i % 4] ^= ss[4]; \ + ss[4] ^= k[4*(i)]; \ + k[4*(i)+4] = ff(ss[4]); \ + ss[4] ^= k[4*(i)+1]; \ + k[4*(i)+5] = ff(ss[4]); \ + ss[4] ^= k[4*(i)+2]; \ + k[4*(i)+6] = ff(ss[4]); \ + ss[4] ^= k[4*(i)+3]; \ + k[4*(i)+7] = ff(ss[4]); \ +} + +#define kd4(k,i) \ +{ \ + ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \ + ss[i % 4] ^= ss[4]; \ + ss[4] = ff(ss[4]); \ + k[4*(i)+4] = ss[4] ^= k[4*(i)]; \ + k[4*(i)+5] = ss[4] ^= k[4*(i)+1]; \ + k[4*(i)+6] = ss[4] ^= k[4*(i)+2]; \ + k[4*(i)+7] = ss[4] ^= k[4*(i)+3]; \ +} + +#define kdl4(k,i) \ +{ \ + ss[4] = ls_box(ss[(i+3) % 4], 3) ^ rcon_tab[i]; \ + ss[i % 4] ^= ss[4]; \ + k[4*(i)+4] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3]; \ + k[4*(i)+5] = ss[1] ^ ss[3]; \ + k[4*(i)+6] = ss[0]; \ + k[4*(i)+7] = ss[1]; \ +} + +#define kdf6(k,i) \ +{ \ + ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \ + k[6*(i)+ 6] = ff(ss[0]); \ + ss[1] ^= ss[0]; \ + k[6*(i)+ 7] = ff(ss[1]); \ + ss[2] ^= ss[1]; \ + k[6*(i)+ 8] = ff(ss[2]); \ + ss[3] ^= ss[2]; \ + k[6*(i)+ 9] = ff(ss[3]); \ + ss[4] ^= ss[3]; \ + k[6*(i)+10] = ff(ss[4]); \ + ss[5] ^= ss[4]; \ + k[6*(i)+11] = ff(ss[5]); \ +} + +#define kd6(k,i) \ +{ \ + ss[6] = ls_box(ss[5],3) ^ rcon_tab[i]; \ + ss[0] ^= ss[6]; ss[6] = ff(ss[6]); \ + k[6*(i)+ 6] = ss[6] ^= k[6*(i)]; \ + ss[1] ^= ss[0]; \ + k[6*(i)+ 7] = ss[6] ^= k[6*(i)+ 1]; \ + ss[2] ^= ss[1]; \ + k[6*(i)+ 8] = ss[6] ^= k[6*(i)+ 2]; \ + ss[3] ^= ss[2]; \ + k[6*(i)+ 9] = ss[6] ^= k[6*(i)+ 3]; \ + ss[4] ^= ss[3]; \ + k[6*(i)+10] = ss[6] ^= k[6*(i)+ 4]; \ + ss[5] ^= ss[4]; \ + k[6*(i)+11] = ss[6] ^= k[6*(i)+ 5]; \ +} + +#define kdl6(k,i) \ +{ \ + ss[0] ^= ls_box(ss[5],3) ^ rcon_tab[i]; \ + k[6*(i)+ 6] = ss[0]; \ + ss[1] ^= ss[0]; \ + k[6*(i)+ 7] = ss[1]; \ + ss[2] ^= ss[1]; \ + k[6*(i)+ 8] = ss[2]; \ + ss[3] ^= ss[2]; \ + k[6*(i)+ 9] = ss[3]; \ +} + +#define kdf8(k,i) \ +{ \ + ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \ + k[8*(i)+ 8] = ff(ss[0]); \ + ss[1] ^= ss[0]; \ + k[8*(i)+ 9] = ff(ss[1]); \ + ss[2] ^= ss[1]; \ + k[8*(i)+10] = ff(ss[2]); \ + ss[3] ^= ss[2]; \ + k[8*(i)+11] = ff(ss[3]); \ + ss[4] ^= ls_box(ss[3],0); \ + k[8*(i)+12] = ff(ss[4]); \ + ss[5] ^= ss[4]; \ + k[8*(i)+13] = ff(ss[5]); \ + ss[6] ^= ss[5]; \ + k[8*(i)+14] = ff(ss[6]); \ + ss[7] ^= ss[6]; \ + k[8*(i)+15] = ff(ss[7]); \ +} + +#define kd8(k,i) \ +{ \ + u32 __g = ls_box(ss[7],3) ^ rcon_tab[i]; \ + ss[0] ^= __g; \ + __g = ff(__g); \ + k[8*(i)+ 8] = __g ^= k[8*(i)]; \ + ss[1] ^= ss[0]; \ + k[8*(i)+ 9] = __g ^= k[8*(i)+ 1]; \ + ss[2] ^= ss[1]; \ + k[8*(i)+10] = __g ^= k[8*(i)+ 2]; \ + ss[3] ^= ss[2]; \ + k[8*(i)+11] = __g ^= k[8*(i)+ 3]; \ + __g = ls_box(ss[3],0); \ + ss[4] ^= __g; \ + __g = ff(__g); \ + k[8*(i)+12] = __g ^= k[8*(i)+ 4]; \ + ss[5] ^= ss[4]; \ + k[8*(i)+13] = __g ^= k[8*(i)+ 5]; \ + ss[6] ^= ss[5]; \ + k[8*(i)+14] = __g ^= k[8*(i)+ 6]; \ + ss[7] ^= ss[6]; \ + k[8*(i)+15] = __g ^= k[8*(i)+ 7]; \ +} + +#define kdl8(k,i) \ +{ \ + ss[0] ^= ls_box(ss[7],3) ^ rcon_tab[i]; \ + k[8*(i)+ 8] = ss[0]; \ + ss[1] ^= ss[0]; \ + k[8*(i)+ 9] = ss[1]; \ + ss[2] ^= ss[1]; \ + k[8*(i)+10] = ss[2]; \ + ss[3] ^= ss[2]; \ + k[8*(i)+11] = ss[3]; \ +} + +static int +aes_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) +{ + int i; + u32 ss[8]; + struct aes_ctx *ctx = ctx_arg; + + /* encryption schedule */ + + ctx->ekey[0] = ss[0] = u32_in(in_key); + ctx->ekey[1] = ss[1] = u32_in(in_key + 4); + ctx->ekey[2] = ss[2] = u32_in(in_key + 8); + ctx->ekey[3] = ss[3] = u32_in(in_key + 12); + + switch(key_len) { + case 16: + for (i = 0; i < 9; i++) + ke4(ctx->ekey, i); + kel4(ctx->ekey, 9); + ctx->rounds = 10; + break; + + case 24: + ctx->ekey[4] = ss[4] = u32_in(in_key + 16); + ctx->ekey[5] = ss[5] = u32_in(in_key + 20); + for (i = 0; i < 7; i++) + ke6(ctx->ekey, i); + kel6(ctx->ekey, 7); + ctx->rounds = 12; + break; + + case 32: + ctx->ekey[4] = ss[4] = u32_in(in_key + 16); + ctx->ekey[5] = ss[5] = u32_in(in_key + 20); + ctx->ekey[6] = ss[6] = u32_in(in_key + 24); + ctx->ekey[7] = ss[7] = u32_in(in_key + 28); + for (i = 0; i < 6; i++) + ke8(ctx->ekey, i); + kel8(ctx->ekey, 6); + ctx->rounds = 14; + break; + + default: + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + /* decryption schedule */ + + ctx->dkey[0] = ss[0] = u32_in(in_key); + ctx->dkey[1] = ss[1] = u32_in(in_key + 4); + ctx->dkey[2] = ss[2] = u32_in(in_key + 8); + ctx->dkey[3] = ss[3] = u32_in(in_key + 12); + + switch (key_len) { + case 16: + kdf4(ctx->dkey, 0); + for (i = 1; i < 9; i++) + kd4(ctx->dkey, i); + kdl4(ctx->dkey, 9); + break; + + case 24: + ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); + ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); + kdf6(ctx->dkey, 0); + for (i = 1; i < 7; i++) + kd6(ctx->dkey, i); + kdl6(ctx->dkey, 7); + break; + + case 32: + ctx->dkey[4] = ff(ss[4] = u32_in(in_key + 16)); + ctx->dkey[5] = ff(ss[5] = u32_in(in_key + 20)); + ctx->dkey[6] = ff(ss[6] = u32_in(in_key + 24)); + ctx->dkey[7] = ff(ss[7] = u32_in(in_key + 28)); + kdf8(ctx->dkey, 0); + for (i = 1; i < 6; i++) + kd8(ctx->dkey, i); + kdl8(ctx->dkey, 6); + break; + } + return 0; +} + +static inline void aes_encrypt(void *ctx, u8 *dst, const u8 *src) +{ + aes_enc_blk(src, dst, ctx); +} +static inline void aes_decrypt(void *ctx, u8 *dst, const u8 *src) +{ + aes_dec_blk(src, dst, ctx); +} + + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(aes_alg.cra_list), + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = aes_encrypt, + .cia_decrypt = aes_decrypt + } + } +}; + +static int __init aes_init(void) +{ + gen_tabs(); + return crypto_register_alg(&aes_alg); +} + +static void __exit aes_fini(void) +{ + crypto_unregister_alg(&aes_alg); +} + +module_init(aes_init); +module_exit(aes_fini); + +MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, i586 asm optimized"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Fruhwirth Clemens, James Morris, Brian Gladman, Adam Richter"); +MODULE_ALIAS("aes"); diff --git a/arch/i386/lib/bitops.c b/arch/i386/lib/bitops.c new file mode 100644 index 000000000..97db3853d --- /dev/null +++ b/arch/i386/lib/bitops.c @@ -0,0 +1,70 @@ +#include +#include + +/** + * find_next_bit - find the first set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +int find_next_bit(const unsigned long *addr, int size, int offset) +{ + const unsigned long *p = addr + (offset >> 5); + int set = 0, bit = offset & 31, res; + + if (bit) { + /* + * Look for nonzero in the first 32 bits: + */ + __asm__("bsfl %1,%0\n\t" + "jne 1f\n\t" + "movl $32, %0\n" + "1:" + : "=r" (set) + : "r" (*p >> bit)); + if (set < (32 - bit)) + return set + offset; + set = 32 - bit; + p++; + } + /* + * No set bit yet, search remaining full words for a bit + */ + res = find_first_bit (p, size - 32 * (p - addr)); + return (offset + set + res); +} +EXPORT_SYMBOL(find_next_bit); + +/** + * find_next_zero_bit - find the first zero bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +int find_next_zero_bit(const unsigned long *addr, int size, int offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 5); + int set = 0, bit = offset & 31, res; + + if (bit) { + /* + * Look for zero in the first 32 bits. + */ + __asm__("bsfl %1,%0\n\t" + "jne 1f\n\t" + "movl $32, %0\n" + "1:" + : "=r" (set) + : "r" (~(*p >> bit))); + if (set < (32 - bit)) + return set + offset; + set = 32 - bit; + p++; + } + /* + * No zero yet, search remaining full bytes for a zero + */ + res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr)); + return (offset + set + res); +} +EXPORT_SYMBOL(find_next_zero_bit); diff --git a/arch/i386/mach-generic/es7000.c b/arch/i386/mach-generic/es7000.c new file mode 100644 index 000000000..48d3ec372 --- /dev/null +++ b/arch/i386/mach-generic/es7000.c @@ -0,0 +1,28 @@ +/* + * APIC driver for the Unisys ES7000 chipset. + */ +#define APIC_DEFINITION 1 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static __init int probe_es7000(void) +{ + /* probed later in mptable/ACPI hooks */ + return 0; +} + +struct genapic apic_es7000 = APIC_INIT("es7000", probe_es7000); diff --git a/arch/ia64/configs/sim_defconfig b/arch/ia64/configs/sim_defconfig new file mode 100644 index 000000000..5744b59f1 --- /dev/null +++ b/arch/ia64/configs/sim_defconfig @@ -0,0 +1,535 @@ +# +# Automatically generated make config: don't edit +# + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +# CONFIG_CLEAN_COMPILE is not set +# CONFIG_STANDALONE is not set +CONFIG_BROKEN=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=16 +# CONFIG_HOTPLUG is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_OBSOLETE_MODPARM=y +CONFIG_MODVERSIONS=y +CONFIG_KMOD=y +CONFIG_STOP_MACHINE=y + +# +# Processor type and features +# +CONFIG_IA64=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_TIME_INTERPOLATION=y +CONFIG_EFI=y +# CONFIG_IA64_GENERIC is not set +# CONFIG_IA64_DIG is not set +# CONFIG_IA64_HP_ZX1 is not set +# CONFIG_IA64_SGI_SN2 is not set +CONFIG_IA64_HP_SIM=y +# CONFIG_ITANIUM is not set +CONFIG_MCKINLEY=y +# CONFIG_IA64_PAGE_SIZE_4KB is not set +# CONFIG_IA64_PAGE_SIZE_8KB is not set +# CONFIG_IA64_PAGE_SIZE_16KB is not set +CONFIG_IA64_PAGE_SIZE_64KB=y +CONFIG_IA64_L1_CACHE_SHIFT=7 +# CONFIG_MCKINLEY_ASTEP_SPECIFIC is not set +# CONFIG_VIRTUAL_MEM_MAP is not set +# CONFIG_IA64_CYCLONE is not set +CONFIG_FORCE_MAX_ZONEORDER=18 +CONFIG_SMP=y +CONFIG_NR_CPUS=64 +CONFIG_PREEMPT=y +CONFIG_HAVE_DEC_LOCK=y +CONFIG_IA32_SUPPORT=y +CONFIG_COMPAT=y +# CONFIG_PERFMON is not set +CONFIG_IA64_PALINFO=m + +# +# Firmware Drivers +# +CONFIG_EFI_VARS=y +# CONFIG_SMBIOS is not set +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_MISC=y + +# +# Power management and ACPI +# + +# +# Device Drivers +# + +# +# Generic Driver Options +# +# CONFIG_DEBUG_DRIVER is not set + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +# CONFIG_CHR_DEV_ST is not set +# CONFIG_CHR_DEV_OSST is not set +# CONFIG_BLK_DEV_SR is not set +# CONFIG_CHR_DEV_SG is not set + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y + +# +# SCSI Transport Attributes +# +CONFIG_SCSI_SPI_ATTRS=y +# CONFIG_SCSI_FC_ATTRS is not set + +# +# SCSI low-level drivers +# +# CONFIG_SCSI_AIC7XXX_OLD is not set +# CONFIG_SCSI_SATA is not set +# CONFIG_SCSI_EATA_PIO is not set +# CONFIG_SCSI_DEBUG is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +# CONFIG_UNIX is not set +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_NETDEVICES is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +CONFIG_EFI_RTC=y +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_FS_XATTR is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +# CONFIG_TMPFS is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +CONFIG_NFS_DIRECTIO=y +CONFIG_NFSD=y +CONFIG_NFSD_V3=y +# CONFIG_NFSD_V4 is not set +# CONFIG_NFSD_TCP is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +# CONFIG_BSD_DISKLABEL is not set +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +CONFIG_EFI_PARTITION=y + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set + +# +# HP Simulator drivers +# +CONFIG_HP_SIMETH=y +CONFIG_HP_SIMSERIAL=y +CONFIG_HP_SIMSERIAL_CONSOLE=y +CONFIG_HP_SIMSCSI=y + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_IA64_GRANULE_16MB is not set +CONFIG_IA64_GRANULE_64MB=y +CONFIG_DEBUG_KERNEL=y +# CONFIG_IA64_PRINT_HAZARDS is not set +# CONFIG_DISABLE_VHPT is not set +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set +# CONFIG_IA64_DEBUG_CMPXCHG is not set +# CONFIG_IA64_DEBUG_IRQ is not set +CONFIG_DEBUG_INFO=y +CONFIG_SYSVIPC_COMPAT=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set diff --git a/arch/ia64/dig/topology.c b/arch/ia64/dig/topology.c new file mode 100644 index 000000000..8dc31378b --- /dev/null +++ b/arch/ia64/dig/topology.c @@ -0,0 +1,43 @@ +/* + * arch/ia64/dig/topology.c + * Popuate driverfs with topology information. + * Derived entirely from i386/mach-default.c + * Intel Corporation - Ashok Raj + */ +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_PER_CPU(struct ia64_cpu, cpu_devices); + +/* + * First Pass: simply borrowed code for now. Later should hook into + * hotplug notification for node/cpu/memory as applicable + */ + +static int arch_register_cpu(int num) +{ + struct node *parent = NULL; + +#ifdef CONFIG_NUMA + //parent = &node_devices[cpu_to_node(num)].node; +#endif + + return register_cpu(&per_cpu(cpu_devices,num).cpu, num, parent); +} + +static int __init topology_init(void) +{ + int i; + + for_each_cpu(i) { + arch_register_cpu(i); + } + return 0; +} + +subsys_initcall(topology_init); diff --git a/arch/ia64/lib/bitop.c b/arch/ia64/lib/bitop.c new file mode 100644 index 000000000..1c6ee49fd --- /dev/null +++ b/arch/ia64/lib/bitop.c @@ -0,0 +1,88 @@ +#include +#include +#include +#include +#include + +/* + * Find next zero bit in a bitmap reasonably efficiently.. + */ + +int __find_next_zero_bit (void *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (64-offset); + if (size < 64) + goto found_first; + if (~tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if (~(tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* any bits zero? */ + return result + size; /* nope */ +found_middle: + return result + ffz(tmp); +} +EXPORT_SYMBOL(__find_next_zero_bit); + +/* + * Find next bit in a bitmap reasonably efficiently.. + */ +int __find_next_bit(const void *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp &= ~0UL << offset; + if (size < 64) + goto found_first; + if (tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if ((tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; + found_first: + tmp &= ~0UL >> (64-size); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ + found_middle: + return result + __ffs(tmp); +} +EXPORT_SYMBOL(__find_next_bit); diff --git a/arch/mips/au1000/common/cputable.c b/arch/mips/au1000/common/cputable.c new file mode 100644 index 000000000..26744b317 --- /dev/null +++ b/arch/mips/au1000/common/cputable.c @@ -0,0 +1,56 @@ +/* + * arch/mips/au1000/common/cputable.c + * + * Copyright (C) 2004 Dan Malek (dan@embeddededge.com) + * Copied from PowerPC and updated for Alchemy Au1xxx processors. + * + * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +struct cpu_spec* cur_cpu_spec[NR_CPUS]; + +/* With some thought, we can probably use the mask to reduce the + * size of the table. + */ +struct cpu_spec cpu_specs[] = { + { 0xffffffff, 0x00030100, "Au1000 DA", 1, 0 }, + { 0xffffffff, 0x00030201, "Au1000 HA", 1, 0 }, + { 0xffffffff, 0x00030202, "Au1000 HB", 1, 0 }, + { 0xffffffff, 0x00030203, "Au1000 HC", 1, 1 }, + { 0xffffffff, 0x00030204, "Au1000 HD", 1, 1 }, + { 0xffffffff, 0x01030200, "Au1500 AB", 1, 1 }, + { 0xffffffff, 0x01030201, "Au1500 AC", 0, 1 }, + { 0xffffffff, 0x01030202, "Au1500 AD", 0, 1 }, + { 0xffffffff, 0x02030200, "Au1100 AB", 1, 1 }, + { 0xffffffff, 0x02030201, "Au1100 BA", 1, 1 }, + { 0xffffffff, 0x02030202, "Au1100 BC", 1, 1 }, + { 0xffffffff, 0x02030203, "Au1100 BD", 0, 1 }, + { 0xffffffff, 0x02030204, "Au1100 BE", 0, 1 }, + { 0xffffffff, 0x03030200, "Au1550 AA", 0, 1 }, + { 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0 }, +}; + +void +set_cpuspec(void) +{ + struct cpu_spec *sp; + u32 prid; + + prid = read_c0_prid(); + sp = cpu_specs; + while ((prid & sp->prid_mask) != sp->prid_value) + sp++; + cur_cpu_spec[0] = sp; +} diff --git a/arch/mips/configs/ocelot_g_defconfig b/arch/mips/configs/ocelot_g_defconfig new file mode 100644 index 000000000..fa2584125 --- /dev/null +++ b/arch/mips/configs/ocelot_g_defconfig @@ -0,0 +1,592 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_MIPS=y +CONFIG_MIPS64=y +CONFIG_64BIT=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Machine selection +# +# CONFIG_MACH_JAZZ is not set +# CONFIG_MACH_VR41XX is not set +# CONFIG_MIPS_COBALT is not set +# CONFIG_MACH_DECSTATION is not set +# CONFIG_MIPS_EV64120 is not set +# CONFIG_MIPS_EV96100 is not set +# CONFIG_MIPS_IVR is not set +# CONFIG_LASAT is not set +# CONFIG_MIPS_ITE8172 is not set +# CONFIG_MIPS_ATLAS is not set +# CONFIG_MIPS_MALTA is not set +# CONFIG_MIPS_SEAD is not set +# CONFIG_MOMENCO_OCELOT is not set +CONFIG_MOMENCO_OCELOT_G=y +# CONFIG_MOMENCO_OCELOT_C is not set +# CONFIG_MOMENCO_JAGUAR_ATX is not set +# CONFIG_PMC_YOSEMITE is not set +# CONFIG_DDB5074 is not set +# CONFIG_DDB5476 is not set +# CONFIG_DDB5477 is not set +# CONFIG_NEC_OSPREY is not set +# CONFIG_SGI_IP22 is not set +# CONFIG_SGI_IP27 is not set +# CONFIG_SGI_IP32 is not set +# CONFIG_SIBYTE_SB1xxx_SOC is not set +# CONFIG_SNI_RM200_PCI is not set +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_HAVE_DEC_LOCK=y +CONFIG_DMA_NONCOHERENT=y +# CONFIG_CPU_LITTLE_ENDIAN is not set +CONFIG_IRQ_CPU=y +CONFIG_IRQ_CPU_RM7K=y +CONFIG_PCI_MARVELL=y +CONFIG_SWAP_IO_SPACE=y +# CONFIG_SYSCLK_75 is not set +# CONFIG_SYSCLK_83 is not set +CONFIG_SYSCLK_100=y +CONFIG_MIPS_L1_CACHE_SHIFT=5 +# CONFIG_FB is not set + +# +# CPU selection +# +# CONFIG_CPU_MIPS32 is not set +# CONFIG_CPU_MIPS64 is not set +# CONFIG_CPU_R3000 is not set +# CONFIG_CPU_TX39XX is not set +# CONFIG_CPU_VR41XX is not set +# CONFIG_CPU_R4300 is not set +# CONFIG_CPU_R4X00 is not set +# CONFIG_CPU_TX49XX is not set +# CONFIG_CPU_R5000 is not set +# CONFIG_CPU_R5432 is not set +# CONFIG_CPU_R6000 is not set +# CONFIG_CPU_NEVADA is not set +# CONFIG_CPU_R8000 is not set +# CONFIG_CPU_R10000 is not set +CONFIG_CPU_RM7000=y +# CONFIG_CPU_RM9000 is not set +# CONFIG_CPU_SB1 is not set +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_BOARD_SCACHE=y +CONFIG_RM7000_CPU_SCACHE=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_CPU_HAS_LLSC=y +CONFIG_CPU_HAS_LLDSCD=y +CONFIG_CPU_HAS_SYNC=y +# CONFIG_PREEMPT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set + +# +# Bus options (PCI, PCMCIA, EISA, ISA, TC) +# +CONFIG_HW_HAS_PCI=y +CONFIG_PCI=y +CONFIG_PCI_LEGACY_PROC=y +CONFIG_PCI_NAMES=y +CONFIG_MMU=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +CONFIG_MIPS32_COMPAT=y +CONFIG_COMPAT=y +CONFIG_MIPS32_O32=y +CONFIG_MIPS32_N32=y +CONFIG_BINFMT_ELF32=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_SX8 is not set +# CONFIG_BLK_DEV_RAM is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_PACKET is not set +CONFIG_NETLINK_DEV=y +CONFIG_UNIX=y +CONFIG_NET_KEY=y +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_ETHERTAP is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_GALILEO_64240_ETH=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set + +# +# Tulip family network device support +# +# CONFIG_NET_TULIP is not set +# CONFIG_HP100 is not set +# CONFIG_NET_PCI is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +CONFIG_DEVPTS_FS_XATTR=y +CONFIG_DEVPTS_FS_SECURITY=y +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +CONFIG_NFSD=y +# CONFIG_NFSD_V3 is not set +# CONFIG_NFSD_TCP is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_EXPORTFS=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Kernel hacking +# +CONFIG_CROSSCOMPILE=y +CONFIG_CMDLINE="" +# CONFIG_DEBUG_KERNEL is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC16=y +# CONFIG_CRC32 is not set +# CONFIG_LIBCRC32C is not set diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c new file mode 100644 index 000000000..581687080 --- /dev/null +++ b/arch/mips/kernel/module.c @@ -0,0 +1,53 @@ +#include +#include + +static LIST_HEAD(dbe_list); +static spinlock_t dbe_lock = SPIN_LOCK_UNLOCKED; + +/* Given an address, look for it in the module exception tables. */ +const struct exception_table_entry *search_module_dbetables(unsigned long addr) +{ + unsigned long flags; + const struct exception_table_entry *e = NULL; + struct mod_arch_specific *dbe; + + spin_lock_irqsave(&dbe_lock, flags); + list_for_each_entry(dbe, &dbe_list, dbe_list) { + e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr); + if (e) + break; + } + spin_unlock_irqrestore(&dbe_lock, flags); + + /* Now, if we found one, we are running inside it now, hence + we cannot unload the module, hence no refcnt needed. */ + return e; +} + +/* Put in dbe list if neccessary. */ +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s; + char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + INIT_LIST_HEAD(&me->arch.dbe_list); + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + if (strcmp("__dbe_table", secstrings + s->sh_name) != 0) + continue; + me->arch.dbe_start = (void *)s->sh_addr; + me->arch.dbe_end = (void *)s->sh_addr + s->sh_size; + spin_lock_irq(&dbe_lock); + list_add(&me->arch.dbe_list, &dbe_list); + spin_unlock_irq(&dbe_lock); + } + return 0; +} + +void module_arch_cleanup(struct module *mod) +{ + spin_lock_irq(&dbe_lock); + list_del(&mod->arch.dbe_list); + spin_unlock_irq(&dbe_lock); +} diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c new file mode 100644 index 000000000..daac80ea3 --- /dev/null +++ b/arch/mips/mm/tlb-r8k.c @@ -0,0 +1,253 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org + * Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +extern void except_vec0_generic(void); +extern void except_vec1_r8k(void); + +#define TFP_TLB_SIZE 384 +#define TFP_TLB_SET_SHIFT 7 + +/* CP0 hazard avoidance. */ +#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \ + "nop; nop; nop; nop; nop; nop;\n\t" \ + ".set reorder\n\t") + +void local_flush_tlb_all(void) +{ + unsigned long flags; + unsigned long old_ctx; + int entry; + + local_irq_save(flags); + /* Save old context and create impossible VPN2 value */ + old_ctx = read_c0_entryhi(); + write_c0_entrylo(0); + + for (entry = 0; entry < TFP_TLB_SIZE; entry++) { + write_c0_tlbset(entry >> TFP_TLB_SET_SHIFT); + write_c0_vaddr(entry << PAGE_SHIFT); + write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1))); + mtc0_tlbw_hazard(); + tlb_write(); + } + tlbw_use_hazard(); + write_c0_entryhi(old_ctx); + local_irq_restore(flags); +} + +void local_flush_tlb_mm(struct mm_struct *mm) +{ + int cpu = smp_processor_id(); + + if (cpu_context(cpu, mm) != 0) + drop_mmu_context(mm,cpu); +} + +void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + int cpu = smp_processor_id(); + unsigned long flags; + int oldpid, newpid, size; + + if (!cpu_context(cpu, mm)) + return; + + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + size = (size + 1) >> 1; + + local_irq_save(flags); + + if (size > TFP_TLB_SIZE / 2) { + drop_mmu_context(mm, cpu); + goto out_restore; + } + + oldpid = read_c0_entryhi(); + newpid = cpu_asid(cpu, mm); + + write_c0_entrylo(0); + + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + while (start < end) { + signed long idx; + + write_c0_vaddr(start); + write_c0_entryhi(start); + start += PAGE_SIZE; + tlb_probe(); + idx = read_c0_tlbset(); + if (idx < 0) + continue; + + write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); + tlb_write(); + } + write_c0_entryhi(oldpid); + +out_restore: + local_irq_restore(flags); +} + +/* Usable for KV1 addresses only! */ +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + unsigned long flags; + int size; + + size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + size = (size + 1) >> 1; + + if (size > TFP_TLB_SIZE / 2) { + local_flush_tlb_all(); + return; + } + + local_irq_save(flags); + + write_c0_entrylo(0); + + start &= PAGE_MASK; + end += (PAGE_SIZE - 1); + end &= PAGE_MASK; + while (start < end) { + signed long idx; + + write_c0_vaddr(start); + write_c0_entryhi(start); + start += PAGE_SIZE; + tlb_probe(); + idx = read_c0_tlbset(); + if (idx < 0) + continue; + + write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); + tlb_write(); + } + + local_irq_restore(flags); +} + +void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + int cpu = smp_processor_id(); + unsigned long flags; + int oldpid, newpid; + signed long idx; + + if (!cpu_context(cpu, vma->vm_mm)) + return; + + newpid = cpu_asid(cpu, vma->vm_mm); + page &= PAGE_MASK; + local_irq_save(flags); + oldpid = read_c0_entryhi(); + write_c0_vaddr(page); + write_c0_entryhi(newpid); + tlb_probe(); + idx = read_c0_tlbset(); + if (idx < 0) + goto finish; + + write_c0_entrylo(0); + write_c0_entryhi(CKSEG0 + (idx << (PAGE_SHIFT + 1))); + tlb_write(); + +finish: + write_c0_entryhi(oldpid); + local_irq_restore(flags); +} + +/* + * We will need multiple versions of update_mmu_cache(), one that just + * updates the TLB with the new pte(s), and another which also checks + * for the R4k "end of page" hardware bug and does the needy. + */ +void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) +{ + unsigned long flags; + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + int pid; + + /* + * Handle debugger faulting in for debugee. + */ + if (current->active_mm != vma->vm_mm) + return; + + pid = read_c0_entryhi() & ASID_MASK; + + local_irq_save(flags); + address &= PAGE_MASK; + write_c0_vaddr(address); + write_c0_entryhi(pid); + pgdp = pgd_offset(vma->vm_mm, address); + pmdp = pmd_offset(pgdp, address); + ptep = pte_offset_map(pmdp, address); + tlb_probe(); + + write_c0_entrylo(pte_val(*ptep++) >> 6); + tlb_write(); + + write_c0_entryhi(pid); + local_irq_restore(flags); +} + +static void __init probe_tlb(unsigned long config) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ +} + +void __init tlb_init(void) +{ + unsigned int config = read_c0_config(); + unsigned long status; + + probe_tlb(config); + + status = read_c0_status(); + status &= ~(ST0_UPS | ST0_KPS); +#ifdef CONFIG_PAGE_SIZE_4KB + status |= (TFP_PAGESIZE_4K << 32) | (TFP_PAGESIZE_4K << 36); +#elif defined(CONFIG_PAGE_SIZE_8KB) + status |= (TFP_PAGESIZE_8K << 32) | (TFP_PAGESIZE_8K << 36); +#elif defined(CONFIG_PAGE_SIZE_16KB) + status |= (TFP_PAGESIZE_16K << 32) | (TFP_PAGESIZE_16K << 36); +#elif defined(CONFIG_PAGE_SIZE_64KB) + status |= (TFP_PAGESIZE_64K << 32) | (TFP_PAGESIZE_64K << 36); +#endif + write_c0_status(status); + + write_c0_wired(0); + + local_flush_tlb_all(); + + memcpy((void *)(CKSEG0 + 0x00), &except_vec0_generic, 0x80); + memcpy((void *)(CKSEG0 + 0x80), except_vec1_r8k, 0x80); + flush_icache_range(CKSEG0 + 0x80, CKSEG0 + 0x100); +} diff --git a/arch/mips/mm/tlb64-glue-r4k.S b/arch/mips/mm/tlb64-glue-r4k.S new file mode 100644 index 000000000..4e0194aa5 --- /dev/null +++ b/arch/mips/mm/tlb64-glue-r4k.S @@ -0,0 +1,41 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999 Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#include +#include +#include +#include + + .macro __BUILD_cli + CLI + .endm + + .macro __BUILD_sti + STI + .endm + + .macro __BUILD_kmode + KMODE + .endm + + .macro tlb_handler name interruptible writebit + NESTED(__\name, PT_SIZE, sp) + SAVE_ALL + dmfc0 a2, CP0_BADVADDR + __BUILD_\interruptible + li a1, \writebit + sd a2, PT_BVADDR(sp) + move a0, sp + jal do_page_fault + j ret_from_exception + END(__\name) + .endm + + tlb_handler xtlb_mod kmode 1 + tlb_handler xtlb_tlbl kmode 0 + tlb_handler xtlb_tlbs kmode 1 diff --git a/arch/mips/mm/tlb64-glue-sb1.S b/arch/mips/mm/tlb64-glue-sb1.S new file mode 100644 index 000000000..3c236539f --- /dev/null +++ b/arch/mips/mm/tlb64-glue-sb1.S @@ -0,0 +1,66 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1999 Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#include +#include +#include +#include +#include +#include + + .macro __BUILD_cli + CLI + .endm + + .macro __BUILD_sti + STI + .endm + + .macro __BUILD_kmode + KMODE + .endm + + .macro tlb_handler name interruptible writebit + NESTED(__\name, PT_SIZE, sp) + SAVE_ALL + dmfc0 a2, CP0_BADVADDR + __BUILD_\interruptible + li a1, \writebit + sd a2, PT_BVADDR(sp) + move a0, sp + jal do_page_fault + j ret_from_exception + END(__\name) + .endm + + .macro tlb_handler_m3 name interruptible writebit + NESTED(__\name, PT_SIZE, sp) + dmfc0 k0, CP0_BADVADDR + dmfc0 k1, CP0_ENTRYHI + xor k0, k1 + dsrl k0, k0, PAGE_SHIFT + 1 + bnez k0, 1f + SAVE_ALL + dmfc0 a2, CP0_BADVADDR + __BUILD_\interruptible + li a1, \writebit + sd a2, PT_BVADDR(sp) + move a0, sp + jal do_page_fault +1: + j ret_from_exception + END(__\name) + .endm + + tlb_handler xtlb_mod kmode 1 +#if BCM1250_M3_WAR + tlb_handler_m3 xtlb_tlbl kmode 0 +#else + tlb_handler xtlb_tlbl kmode 0 +#endif + tlb_handler xtlb_tlbs kmode 1 diff --git a/arch/mips/mm/tlbex32-r3k.S b/arch/mips/mm/tlbex32-r3k.S new file mode 100644 index 000000000..cc4a4642e --- /dev/null +++ b/arch/mips/mm/tlbex32-r3k.S @@ -0,0 +1,224 @@ +/* + * TLB exception handling code for R2000/R3000. + * + * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse + * + * Multi-CPU abstraction reworking: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + * + * Further modifications to make this work: + * Copyright (c) 1998 Harald Koerfgen + * Copyright (c) 1998, 1999 Gleb Raiko & Vladimir Roganov + * Copyright (c) 2001 Ralf Baechle + * Copyright (c) 2001 MIPS Technologies, Inc. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TLB_OPTIMIZE /* If you are paranoid, disable this. */ + + .text + .set mips1 + .set noreorder + + __INIT + + /* TLB refill, R[23]00 version */ + LEAF(except_vec0_r2300) + .set noat + .set mips1 + mfc0 k0, CP0_BADVADDR + lw k1, pgd_current # get pgd pointer + srl k0, k0, 22 + sll k0, k0, 2 + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) + and k0, k0, 0xffc + addu k1, k1, k0 + lw k0, (k1) + nop + mtc0 k0, CP0_ENTRYLO0 + mfc0 k1, CP0_EPC + tlbwr + jr k1 + rfe + END(except_vec0_r2300) + + __FINIT + + /* ABUSE of CPP macros 101. */ + + /* After this macro runs, the pte faulted on is + * in register PTE, a ptr into the table in which + * the pte belongs is in PTR. + */ +#define LOAD_PTE(pte, ptr) \ + mfc0 pte, CP0_BADVADDR; \ + lw ptr, pgd_current; \ + srl pte, pte, 22; \ + sll pte, pte, 2; \ + addu ptr, ptr, pte; \ + mfc0 pte, CP0_CONTEXT; \ + lw ptr, (ptr); \ + andi pte, pte, 0xffc; \ + addu ptr, ptr, pte; \ + lw pte, (ptr); \ + nop; + + /* This places the even/odd pte pair in the page + * table at PTR into ENTRYLO0 and ENTRYLO1 using + * TMP as a scratch register. + */ +#define PTE_RELOAD(ptr) \ + lw ptr, (ptr) ; \ + nop ; \ + mtc0 ptr, CP0_ENTRYLO0; \ + nop; + +#define DO_FAULT(write) \ + .set noat; \ + .set macro; \ + SAVE_ALL; \ + mfc0 a2, CP0_BADVADDR; \ + KMODE; \ + .set at; \ + move a0, sp; \ + jal do_page_fault; \ + li a1, write; \ + j ret_from_exception; \ + nop; \ + .set noat; \ + .set nomacro; + + /* Check is PTE is present, if not then jump to LABEL. + * PTR points to the page table where this PTE is located, + * when the macro is done executing PTE will be restored + * with it's original value. + */ +#define PTE_PRESENT(pte, ptr, label) \ + andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \ + xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \ + bnez pte, label; \ + .set push; \ + .set reorder; \ + lw pte, (ptr); \ + .set pop; + + /* Make PTE valid, store result in PTR. */ +#define PTE_MAKEVALID(pte, ptr) \ + ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \ + sw pte, (ptr); + + /* Check if PTE can be written to, if not branch to LABEL. + * Regardless restore PTE with value from PTR when done. + */ +#define PTE_WRITABLE(pte, ptr, label) \ + andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \ + xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \ + bnez pte, label; \ + .set push; \ + .set reorder; \ + lw pte, (ptr); \ + .set pop; + + + /* Make PTE writable, update software status bits as well, + * then store at PTR. + */ +#define PTE_MAKEWRITE(pte, ptr) \ + ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \ + _PAGE_VALID | _PAGE_DIRTY); \ + sw pte, (ptr); + +/* + * The index register may have the probe fail bit set, + * because we would trap on access kseg2, i.e. without refill. + */ +#define TLB_WRITE(reg) \ + mfc0 reg, CP0_INDEX; \ + nop; \ + bltz reg, 1f; \ + nop; \ + tlbwi; \ + j 2f; \ + nop; \ +1: tlbwr; \ +2: + +#define RET(reg) \ + mfc0 reg, CP0_EPC; \ + nop; \ + jr reg; \ + rfe + + .set noreorder + + .align 5 +NESTED(handle_tlbl, PT_SIZE, sp) + .set noat + +#ifdef TLB_OPTIMIZE + /* Test present bit in entry. */ + LOAD_PTE(k0, k1) + tlbp + PTE_PRESENT(k0, k1, nopage_tlbl) + PTE_MAKEVALID(k0, k1) + PTE_RELOAD(k1) + TLB_WRITE(k0) + RET(k0) +nopage_tlbl: +#endif + + DO_FAULT(0) +END(handle_tlbl) + +NESTED(handle_tlbs, PT_SIZE, sp) + .set noat + +#ifdef TLB_OPTIMIZE + LOAD_PTE(k0, k1) + tlbp # find faulting entry + PTE_WRITABLE(k0, k1, nopage_tlbs) + PTE_MAKEWRITE(k0, k1) + PTE_RELOAD(k1) + TLB_WRITE(k0) + RET(k0) +nopage_tlbs: +#endif + + DO_FAULT(1) +END(handle_tlbs) + + .align 5 +NESTED(handle_mod, PT_SIZE, sp) + .set noat +#ifdef TLB_OPTIMIZE + LOAD_PTE(k0, k1) + tlbp # find faulting entry + andi k0, k0, _PAGE_WRITE + beqz k0, nowrite_mod + .set push + .set reorder + lw k0, (k1) + .set pop + + /* Present and writable bits set, set accessed and dirty bits. */ + PTE_MAKEWRITE(k0, k1) + + /* Now reload the entry into the tlb. */ + PTE_RELOAD(k1) + tlbwi + RET(k0) +#endif + +nowrite_mod: + DO_FAULT(1) +END(handle_mod) diff --git a/arch/mips/mm/tlbex32-r4k.S b/arch/mips/mm/tlbex32-r4k.S new file mode 100644 index 000000000..49742718d --- /dev/null +++ b/arch/mips/mm/tlbex32-r4k.S @@ -0,0 +1,524 @@ +/* + * TLB exception handling code for r4k. + * + * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse + * + * Multi-cpu abstraction and reworking: + * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) + * + * Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TLB_OPTIMIZE /* If you are paranoid, disable this. */ + +#ifdef CONFIG_64BIT_PHYS_ADDR +#define PTE_L ld +#define PTE_S sd +#define PTE_SRL dsrl +#define P_MTC0 dmtc0 +#define PTE_SIZE 8 +#define PTEP_INDX_MSK 0xff0 +#define PTE_INDX_MSK 0xff8 +#define PTE_INDX_SHIFT 9 +#else +#define PTE_L lw +#define PTE_S sw +#define PTE_SRL srl +#define P_MTC0 mtc0 +#define PTE_SIZE 4 +#define PTEP_INDX_MSK 0xff8 +#define PTE_INDX_MSK 0xffc +#define PTE_INDX_SHIFT 10 +#endif + +/* + * ABUSE of CPP macros 101. + * + * After this macro runs, the pte faulted on is + * in register PTE, a ptr into the table in which + * the pte belongs is in PTR. + */ + +#ifdef CONFIG_SMP +#define GET_PGD(scratch, ptr) \ + mfc0 ptr, CP0_CONTEXT; \ + la scratch, pgd_current;\ + srl ptr, 23; \ + sll ptr, 2; \ + addu ptr, scratch, ptr; \ + lw ptr, (ptr); +#else +#define GET_PGD(scratch, ptr) \ + lw ptr, pgd_current; +#endif + +#define LOAD_PTE(pte, ptr) \ + GET_PGD(pte, ptr) \ + mfc0 pte, CP0_BADVADDR; \ + srl pte, pte, _PGDIR_SHIFT; \ + sll pte, pte, 2; \ + addu ptr, ptr, pte; \ + mfc0 pte, CP0_BADVADDR; \ + lw ptr, (ptr); \ + srl pte, pte, PTE_INDX_SHIFT; \ + and pte, pte, PTE_INDX_MSK; \ + addu ptr, ptr, pte; \ + PTE_L pte, (ptr); + + /* This places the even/odd pte pair in the page + * table at PTR into ENTRYLO0 and ENTRYLO1 using + * TMP as a scratch register. + */ +#define PTE_RELOAD(ptr, tmp) \ + ori ptr, ptr, PTE_SIZE; \ + xori ptr, ptr, PTE_SIZE; \ + PTE_L tmp, PTE_SIZE(ptr); \ + PTE_L ptr, 0(ptr); \ + PTE_SRL tmp, tmp, 6; \ + P_MTC0 tmp, CP0_ENTRYLO1; \ + PTE_SRL ptr, ptr, 6; \ + P_MTC0 ptr, CP0_ENTRYLO0; + +#define DO_FAULT(write) \ + .set noat; \ + SAVE_ALL; \ + mfc0 a2, CP0_BADVADDR; \ + KMODE; \ + .set at; \ + move a0, sp; \ + jal do_page_fault; \ + li a1, write; \ + j ret_from_exception; \ + nop; \ + .set noat; + + /* Check is PTE is present, if not then jump to LABEL. + * PTR points to the page table where this PTE is located, + * when the macro is done executing PTE will be restored + * with it's original value. + */ +#define PTE_PRESENT(pte, ptr, label) \ + andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \ + xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \ + bnez pte, label; \ + PTE_L pte, (ptr); + + /* Make PTE valid, store result in PTR. */ +#define PTE_MAKEVALID(pte, ptr) \ + ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \ + PTE_S pte, (ptr); + + /* Check if PTE can be written to, if not branch to LABEL. + * Regardless restore PTE with value from PTR when done. + */ +#define PTE_WRITABLE(pte, ptr, label) \ + andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \ + xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \ + bnez pte, label; \ + PTE_L pte, (ptr); + + /* Make PTE writable, update software status bits as well, + * then store at PTR. + */ +#define PTE_MAKEWRITE(pte, ptr) \ + ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \ + _PAGE_VALID | _PAGE_DIRTY); \ + PTE_S pte, (ptr); + + __INIT + +#ifdef CONFIG_64BIT_PHYS_ADDR +#define GET_PTE_OFF(reg) +#elif CONFIG_CPU_VR41XX +#define GET_PTE_OFF(reg) srl reg, reg, 3 +#else +#define GET_PTE_OFF(reg) srl reg, reg, 1 +#endif + +/* + * These handlers much be written in a relocatable manner + * because based upon the cpu type an arbitrary one of the + * following pieces of code will be copied to the KSEG0 + * vector location. + */ + /* TLB refill, EXL == 0, R4xx0, non-R4600 version */ + .set noreorder + .set noat + LEAF(except_vec0_r4000) + .set mips3 + GET_PGD(k0, k1) # get pgd pointer + mfc0 k0, CP0_BADVADDR # Get faulting address + srl k0, k0, _PGDIR_SHIFT # get pgd only bits + + sll k0, k0, 2 + addu k1, k1, k0 # add in pgd offset + mfc0 k0, CP0_CONTEXT # get context reg + lw k1, (k1) + GET_PTE_OFF(k0) # get pte offset + and k0, k0, PTEP_INDX_MSK + addu k1, k1, k0 # add in offset + PTE_L k0, 0(k1) # get even pte + PTE_L k1, PTE_SIZE(k1) # get odd pte + PTE_SRL k0, k0, 6 # convert to entrylo0 + P_MTC0 k0, CP0_ENTRYLO0 # load it + PTE_SRL k1, k1, 6 # convert to entrylo1 + P_MTC0 k1, CP0_ENTRYLO1 # load it + mtc0_tlbw_hazard + tlbwr # write random tlb entry + tlbw_eret_hazard + eret # return from trap + END(except_vec0_r4000) + + /* TLB refill, EXL == 0, R4600 version */ + LEAF(except_vec0_r4600) + .set mips3 + GET_PGD(k0, k1) # get pgd pointer + mfc0 k0, CP0_BADVADDR + srl k0, k0, _PGDIR_SHIFT + sll k0, k0, 2 # log2(sizeof(pgd_t) + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) + GET_PTE_OFF(k0) # get pte offset + and k0, k0, PTEP_INDX_MSK + addu k1, k1, k0 + PTE_L k0, 0(k1) + PTE_L k1, PTE_SIZE(k1) + PTE_SRL k0, k0, 6 + P_MTC0 k0, CP0_ENTRYLO0 + PTE_SRL k1, k1, 6 + P_MTC0 k1, CP0_ENTRYLO1 + nop + tlbwr + nop + eret + END(except_vec0_r4600) + + /* TLB refill, EXL == 0, R52x0 "Nevada" version */ + /* + * This version has a bug workaround for the Nevada. It seems + * as if under certain circumstances the move from cp0_context + * might produce a bogus result when the mfc0 instruction and + * it's consumer are in a different cacheline or a load instruction, + * probably any memory reference, is between them. This is + * potencially slower than the R4000 version, so we use this + * special version. + */ + .set noreorder + .set noat + LEAF(except_vec0_nevada) + .set mips3 + mfc0 k0, CP0_BADVADDR # Get faulting address + srl k0, k0, _PGDIR_SHIFT # get pgd only bits + lw k1, pgd_current # get pgd pointer + sll k0, k0, 2 # log2(sizeof(pgd_t) + addu k1, k1, k0 # add in pgd offset + lw k1, (k1) + mfc0 k0, CP0_CONTEXT # get context reg + GET_PTE_OFF(k0) # get pte offset + and k0, k0, PTEP_INDX_MSK + addu k1, k1, k0 # add in offset + PTE_L k0, 0(k1) # get even pte + PTE_L k1, PTE_SIZE(k1) # get odd pte + PTE_SRL k0, k0, 6 # convert to entrylo0 + P_MTC0 k0, CP0_ENTRYLO0 # load it + PTE_SRL k1, k1, 6 # convert to entrylo1 + P_MTC0 k1, CP0_ENTRYLO1 # load it + nop # QED specified nops + nop + tlbwr # write random tlb entry + nop # traditional nop + eret # return from trap + END(except_vec0_nevada) + + /* TLB refill, EXL == 0, SB1 with M3 errata handling version */ + LEAF(except_vec0_sb1) +#if BCM1250_M3_WAR + mfc0 k0, CP0_BADVADDR + mfc0 k1, CP0_ENTRYHI + xor k0, k1 + srl k0, k0, PAGE_SHIFT+1 + bnez k0, 1f +#endif + GET_PGD(k0, k1) # get pgd pointer + mfc0 k0, CP0_BADVADDR # Get faulting address + srl k0, k0, _PGDIR_SHIFT # get pgd only bits + sll k0, k0, 2 + addu k1, k1, k0 # add in pgd offset + mfc0 k0, CP0_CONTEXT # get context reg + lw k1, (k1) + GET_PTE_OFF(k0) # get pte offset + and k0, k0, PTEP_INDX_MSK + addu k1, k1, k0 # add in offset + PTE_L k0, 0(k1) # get even pte + PTE_L k1, PTE_SIZE(k1) # get odd pte + PTE_SRL k0, k0, 6 # convert to entrylo0 + P_MTC0 k0, CP0_ENTRYLO0 # load it + PTE_SRL k1, k1, 6 # convert to entrylo1 + P_MTC0 k1, CP0_ENTRYLO1 # load it + tlbwr # write random tlb entry +1: eret # return from trap + END(except_vec0_sb1) + + /* TLB refill, EXL == 0, R4[40]00/R5000 badvaddr hwbug version */ + LEAF(except_vec0_r45k_bvahwbug) + .set mips3 + GET_PGD(k0, k1) # get pgd pointer + mfc0 k0, CP0_BADVADDR + srl k0, k0, _PGDIR_SHIFT + sll k0, k0, 2 # log2(sizeof(pgd_t) + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) +#ifndef CONFIG_64BIT_PHYS_ADDR + srl k0, k0, 1 +#endif + and k0, k0, PTEP_INDX_MSK + addu k1, k1, k0 + PTE_L k0, 0(k1) + PTE_L k1, PTE_SIZE(k1) + nop /* XXX */ + tlbp + PTE_SRL k0, k0, 6 + P_MTC0 k0, CP0_ENTRYLO0 + PTE_SRL k1, k1, 6 + mfc0 k0, CP0_INDEX + P_MTC0 k1, CP0_ENTRYLO1 + bltzl k0, 1f + tlbwr +1: + nop + eret + END(except_vec0_r45k_bvahwbug) + +#ifdef CONFIG_SMP + /* TLB refill, EXL == 0, R4000 MP badvaddr hwbug version */ + LEAF(except_vec0_r4k_mphwbug) + .set mips3 + GET_PGD(k0, k1) # get pgd pointer + mfc0 k0, CP0_BADVADDR + srl k0, k0, _PGDIR_SHIFT + sll k0, k0, 2 # log2(sizeof(pgd_t) + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) +#ifndef CONFIG_64BIT_PHYS_ADDR + srl k0, k0, 1 +#endif + and k0, k0, PTEP_INDX_MSK + addu k1, k1, k0 + PTE_L k0, 0(k1) + PTE_L k1, PTE_SIZE(k1) + nop /* XXX */ + tlbp + PTE_SRL k0, k0, 6 + P_MTC0 k0, CP0_ENTRYLO0 + PTE_SRL k1, k1, 6 + mfc0 k0, CP0_INDEX + P_MTC0 k1, CP0_ENTRYLO1 + bltzl k0, 1f + tlbwr +1: + nop + eret + END(except_vec0_r4k_mphwbug) +#endif + + /* TLB refill, EXL == 0, R4000 UP 250MHZ entrylo[01] hwbug version */ + LEAF(except_vec0_r4k_250MHZhwbug) + .set mips3 + GET_PGD(k0, k1) # get pgd pointer + mfc0 k0, CP0_BADVADDR + srl k0, k0, _PGDIR_SHIFT + sll k0, k0, 2 # log2(sizeof(pgd_t) + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) +#ifndef CONFIG_64BIT_PHYS_ADDR + srl k0, k0, 1 +#endif + and k0, k0, PTEP_INDX_MSK + addu k1, k1, k0 + PTE_L k0, 0(k1) + PTE_L k1, PTE_SIZE(k1) + PTE_SRL k0, k0, 6 + P_MTC0 zero, CP0_ENTRYLO0 + P_MTC0 k0, CP0_ENTRYLO0 + PTE_SRL k1, k1, 6 + P_MTC0 zero, CP0_ENTRYLO1 + P_MTC0 k1, CP0_ENTRYLO1 + b 1f + tlbwr +1: + nop + eret + END(except_vec0_r4k_250MHZhwbug) + +#ifdef CONFIG_SMP + /* TLB refill, EXL == 0, R4000 MP 250MHZ entrylo[01]+badvaddr bug version */ + LEAF(except_vec0_r4k_MP250MHZhwbug) + .set mips3 + GET_PGD(k0, k1) # get pgd pointer + mfc0 k0, CP0_BADVADDR + srl k0, k0, _PGDIR_SHIFT + sll k0, k0, 2 # log2(sizeof(pgd_t) + addu k1, k1, k0 + mfc0 k0, CP0_CONTEXT + lw k1, (k1) +#ifndef CONFIG_64BIT_PHYS_ADDR + srl k0, k0, 1 +#endif + and k0, k0, PTEP_INDX_MSK + addu k1, k1, k0 + PTE_L k0, 0(k1) + PTE_L k1, PTE_SIZE(k1) + nop /* XXX */ + tlbp + PTE_SRL k0, k0, 6 + P_MTC0 zero, CP0_ENTRYLO0 + P_MTC0 k0, CP0_ENTRYLO0 + mfc0 k0, CP0_INDEX + PTE_SRL k1, k1, 6 + P_MTC0 zero, CP0_ENTRYLO1 + P_MTC0 k1, CP0_ENTRYLO1 + bltzl k0, 1f + tlbwr +1: + nop + eret + END(except_vec0_r4k_MP250MHZhwbug) +#endif + + __FINIT + + .set noreorder + +/* + * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: + * 2. A timing hazard exists for the TLBP instruction. + * + * stalling_instruction + * TLBP + * + * The JTLB is being read for the TLBP throughout the stall generated by the + * previous instruction. This is not really correct as the stalling instruction + * can modify the address used to access the JTLB. The failure symptom is that + * the TLBP instruction will use an address created for the stalling instruction + * and not the address held in C0_ENHI and thus report the wrong results. + * + * The software work-around is to not allow the instruction preceding the TLBP + * to stall - make it an NOP or some other instruction guaranteed not to stall. + * + * Errata 2 will not be fixed. This errata is also on the R5000. + * + * As if we MIPS hackers wouldn't know how to nop pipelines happy ... + */ +#define R5K_HAZARD nop + + /* + * Note for many R4k variants tlb probes cannot be executed out + * of the instruction cache else you get bogus results. + */ + .align 5 + NESTED(handle_tlbl, PT_SIZE, sp) + .set noat +#if BCM1250_M3_WAR + mfc0 k0, CP0_BADVADDR + mfc0 k1, CP0_ENTRYHI + xor k0, k1 + srl k0, k0, PAGE_SHIFT+1 + beqz k0, 1f + nop + .set mips3 + eret + .set mips0 +1: +#endif +invalid_tlbl: +#ifdef TLB_OPTIMIZE + .set mips3 + /* Test present bit in entry. */ + LOAD_PTE(k0, k1) + R5K_HAZARD + tlbp + PTE_PRESENT(k0, k1, nopage_tlbl) + PTE_MAKEVALID(k0, k1) + PTE_RELOAD(k1, k0) + mtc0_tlbw_hazard + tlbwi + tlbw_eret_hazard + .set mips3 + eret + .set mips0 +#endif + +nopage_tlbl: + DO_FAULT(0) + END(handle_tlbl) + + .align 5 + NESTED(handle_tlbs, PT_SIZE, sp) + .set noat +#ifdef TLB_OPTIMIZE + .set mips3 + li k0,0 + LOAD_PTE(k0, k1) + R5K_HAZARD + tlbp # find faulting entry + PTE_WRITABLE(k0, k1, nopage_tlbs) + PTE_MAKEWRITE(k0, k1) + PTE_RELOAD(k1, k0) + mtc0_tlbw_hazard + tlbwi + tlbw_eret_hazard + .set mips3 + eret + .set mips0 +#endif + +nopage_tlbs: + DO_FAULT(1) + END(handle_tlbs) + + .align 5 + NESTED(handle_mod, PT_SIZE, sp) + .set noat +#ifdef TLB_OPTIMIZE + .set mips3 + LOAD_PTE(k0, k1) + R5K_HAZARD + tlbp # find faulting entry + andi k0, k0, _PAGE_WRITE + beqz k0, nowrite_mod + PTE_L k0, (k1) + + /* Present and writable bits set, set accessed and dirty bits. */ + PTE_MAKEWRITE(k0, k1) + + /* Now reload the entry into the tlb. */ + PTE_RELOAD(k1, k0) + mtc0_tlbw_hazard + tlbwi + tlbw_eret_hazard + .set mips3 + eret + .set mips0 +#endif + +nowrite_mod: + DO_FAULT(1) + END(handle_mod) diff --git a/arch/mips/mm/tlbex64-r4k.S b/arch/mips/mm/tlbex64-r4k.S new file mode 100644 index 000000000..728d18f00 --- /dev/null +++ b/arch/mips/mm/tlbex64-r4k.S @@ -0,0 +1,203 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2000 Silicon Graphics, Inc. + * Written by Ulf Carlsson (ulfc@engr.sgi.com) + * Copyright (C) 2002 Maciej W. Rozycki + */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#define _VMALLOC_START 0xc000000000000000 + + /* + * After this macro runs we have a pointer to the pte of the address + * that caused the fault in PTR. + */ + .macro LOAD_PTE2, ptr, tmp, kaddr +#ifdef CONFIG_SMP + dmfc0 \ptr, CP0_CONTEXT + dmfc0 \tmp, CP0_BADVADDR + dsra \ptr, 23 # get pgd_current[cpu] +#else + dmfc0 \tmp, CP0_BADVADDR + dla \ptr, pgd_current +#endif + bltz \tmp, \kaddr + ld \ptr, (\ptr) + dsrl \tmp, (_PGDIR_SHIFT-3) # get pgd offset in bytes + andi \tmp, ((_PTRS_PER_PGD - 1)<<3) + daddu \ptr, \tmp # add in pgd offset + dmfc0 \tmp, CP0_BADVADDR + ld \ptr, (\ptr) # get pmd pointer + dsrl \tmp, (_PMD_SHIFT-3) # get pmd offset in bytes + andi \tmp, ((_PTRS_PER_PMD - 1)<<3) + daddu \ptr, \tmp # add in pmd offset + dmfc0 \tmp, CP0_XCONTEXT + ld \ptr, (\ptr) # get pte pointer + andi \tmp, 0xff0 # get pte offset + daddu \ptr, \tmp + .endm + + + /* + * Ditto for the kernel table. + */ + .macro LOAD_KPTE2, ptr, tmp, not_vmalloc + /* + * First, determine that the address is in/above vmalloc range. + */ + dmfc0 \tmp, CP0_BADVADDR + dli \ptr, _VMALLOC_START + + /* + * Now find offset into kptbl. + */ + dsubu \tmp, \tmp, \ptr + dla \ptr, kptbl + dsrl \tmp, (_PAGE_SHIFT+1) # get vpn2 + dsll \tmp, 4 # byte offset of pte + daddu \ptr, \ptr, \tmp + + /* + * Determine that fault address is within vmalloc range. + */ + dla \tmp, ekptbl + slt \tmp, \ptr, \tmp + beqz \tmp, \not_vmalloc # not vmalloc + nop + .endm + + + /* + * This places the even/odd pte pair in the page table at the pte + * entry pointed to by PTE into ENTRYLO0 and ENTRYLO1. + */ + .macro PTE_RELOAD, pte0, pte1 + dsrl \pte0, 6 # convert to entrylo0 + dmtc0 \pte0, CP0_ENTRYLO0 # load it + dsrl \pte1, 6 # convert to entrylo1 + dmtc0 \pte1, CP0_ENTRYLO1 # load it + .endm + + + .text + .set noreorder + .set mips3 + + __INIT + + /* + * TLB refill handlers for the R4000 and SB1. + * Attention: We may only use 32 instructions / 128 bytes. + */ + .align 5 +LEAF(except_vec1_r4k) + .set noat + dla k0, handle_vec1_r4k + jr k0 + nop +END(except_vec1_r4k) + +LEAF(except_vec1_sb1) +#if BCM1250_M3_WAR + dmfc0 k0, CP0_BADVADDR + dmfc0 k1, CP0_ENTRYHI + xor k0, k1 + dsrl k0, k0, _PAGE_SHIFT+1 + bnez k0, 1f +#endif + .set noat + dla k0, handle_vec1_r4k + jr k0 + nop + +1: eret + nop +END(except_vec1_sb1) + + __FINIT + + .align 5 +LEAF(handle_vec1_r4k) + .set noat + LOAD_PTE2 k1 k0 9f + ld k0, 0(k1) # get even pte + ld k1, 8(k1) # get odd pte + PTE_RELOAD k0 k1 + mtc0_tlbw_hazard + tlbwr + tlbw_eret_hazard + eret + +9: # handle the vmalloc range + LOAD_KPTE2 k1 k0 invalid_vmalloc_address + ld k0, 0(k1) # get even pte + ld k1, 8(k1) # get odd pte + PTE_RELOAD k0 k1 + mtc0_tlbw_hazard + tlbwr + tlbw_eret_hazard + eret +END(handle_vec1_r4k) + + + __INIT + + /* + * TLB refill handler for the R10000. + * Attention: We may only use 32 instructions / 128 bytes. + */ + .align 5 +LEAF(except_vec1_r10k) + .set noat + dla k0, handle_vec1_r10k + jr k0 + nop +END(except_vec1_r10k) + + __FINIT + + .align 5 +LEAF(handle_vec1_r10k) + .set noat + LOAD_PTE2 k1 k0 9f + ld k0, 0(k1) # get even pte + ld k1, 8(k1) # get odd pte + PTE_RELOAD k0 k1 + nop + tlbwr + eret + +9: # handle the vmalloc range + LOAD_KPTE2 k1 k0 invalid_vmalloc_address + ld k0, 0(k1) # get even pte + ld k1, 8(k1) # get odd pte + PTE_RELOAD k0 k1 + nop + tlbwr + eret +END(handle_vec1_r10k) + + + .align 5 +LEAF(invalid_vmalloc_address) + .set noat + SAVE_ALL + CLI + dmfc0 t0, CP0_BADVADDR + sd t0, PT_BVADDR(sp) + move a0, sp + jal show_regs + PANIC("Invalid kernel address") +END(invalid_vmalloc_address) diff --git a/arch/mips/pci/fixup-jaguar.c b/arch/mips/pci/fixup-jaguar.c new file mode 100644 index 000000000..fa78b9b1f --- /dev/null +++ b/arch/mips/pci/fixup-jaguar.c @@ -0,0 +1,42 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Marvell MV64340 interrupt fixup code. + * + * Marvell wants an NDA for their docs so this was written without + * documentation. You've been warned. + * + * Copyright (C) 2004 Ralf Baechle + */ +#include +#include +#include + +#include +#include + +/* + * WARNING: Example of how _NOT_ to do it. + */ +int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int bus = dev->bus->number; + + if (bus == 0 && slot == 1) + return 3; /* PCI-X A */ + if (bus == 0 && slot == 2) + return 4; /* PCI-X B */ + if (bus == 1 && slot == 1) + return 5; /* PCI A */ + if (bus == 1 && slot == 2) + return 6; /* PCI B */ + +return 0; + panic("Whooops in pcibios_map_irq"); +} + +struct pci_fixup pcibios_fixups[] = { + {0} +}; diff --git a/arch/mips/pci/fixup-mpc30x.c b/arch/mips/pci/fixup-mpc30x.c new file mode 100644 index 000000000..1320c42af --- /dev/null +++ b/arch/mips/pci/fixup-mpc30x.c @@ -0,0 +1,48 @@ +/* + * fixup-mpc30x.c, The Victor MP-C303/304 specific PCI fixups. + * + * Copyright (C) 2002,2004 Yoichi Yuasa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include + +#include +#include + +static const int internal_func_irqs[] __initdata = { + VRC4173_CASCADE_IRQ, + VRC4173_AC97_IRQ, + VRC4173_USB_IRQ, +}; + +static char irq_tab_mpc30x[] __initdata = { + [12] = VRC4173_PCMCIA1_IRQ, + [13] = VRC4173_PCMCIA2_IRQ, + [29] = MQ200_IRQ, +}; + +int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + if (slot == 30) + return internal_func_irqs[PCI_FUNC(dev->devfn)]; + + return irq_tab_mpc30x[slot]; +} + +struct pci_fixup pcibios_fixups[] __initdata = { + { .pass = 0, }, +}; diff --git a/arch/mips/pci/fixup-ocelot-c.c b/arch/mips/pci/fixup-ocelot-c.c new file mode 100644 index 000000000..0cc86eceb --- /dev/null +++ b/arch/mips/pci/fixup-ocelot-c.c @@ -0,0 +1,39 @@ +/* + * Copyright 2002 Momentum Computer Inc. + * Author: Matthew Dharm + * + * Based on work for the Linux port to the Ocelot board, which is + * Copyright 2001 MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * + * arch/mips/momentum/ocelot_g/pci.c + * Board-specific PCI routines for mv64340 controller. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include + +int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int bus = dev->bus->number; + + if (bus == 0 && slot == 1) + return 2; /* PCI-X A */ + if (bus == 1 && slot == 1) + return 12; /* PCI-X B */ + if (bus == 1 && slot == 2) + return 4; /* PCI B */ + +return 0; + panic("Whooops in pcibios_map_irq"); +} + +struct pci_fixup pcibios_fixups[] = { + {0} +}; diff --git a/arch/mips/pci/fixup-ocelot-g.c b/arch/mips/pci/fixup-ocelot-g.c new file mode 100644 index 000000000..9a2cc8505 --- /dev/null +++ b/arch/mips/pci/fixup-ocelot-g.c @@ -0,0 +1,35 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org) + */ +#include +#include +#include +#include + +int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int bus = dev->bus->number; + + if (bus == 0 && slot == 1) /* Intel 82543 Gigabit MAC */ + return 2; /* irq_nr is 2 for INT0 */ + + if (bus == 0 && slot == 2) /* Intel 82543 Gigabit MAC */ + return 3; /* irq_nr is 3 for INT1 */ + + if (bus == 1 && slot == 3) /* Intel 21555 bridge */ + return 5; /* irq_nr is 8 for INT6 */ + + if (bus == 1 && slot == 4) /* PMC Slot */ + return 9; /* irq_nr is 9 for INT7 */ + + return -1; +} + +struct pci_fixup pcibios_fixups[] = { + {0} +}; diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c new file mode 100644 index 000000000..ca4d99fbe --- /dev/null +++ b/arch/mips/pci/fixup-tb0219.c @@ -0,0 +1,64 @@ +/* + * fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups. + * + * Copyright (C) 2003 Megasolution Inc. + * Copyright (C) 2004 Yoichi Yuasa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include + +#include + +int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int irq = -1; + + switch (slot) { + case 12: + vr41xx_set_irq_trigger(TB0219_PCI_SLOT1_PIN, + TRIGGER_LEVEL, + SIGNAL_THROUGH); + vr41xx_set_irq_level(TB0219_PCI_SLOT1_PIN, + LEVEL_LOW); + irq = TB0219_PCI_SLOT1_IRQ; + break; + case 13: + vr41xx_set_irq_trigger(TB0219_PCI_SLOT2_PIN, + TRIGGER_LEVEL, + SIGNAL_THROUGH); + vr41xx_set_irq_level(TB0219_PCI_SLOT2_PIN, + LEVEL_LOW); + irq = TB0219_PCI_SLOT2_IRQ; + break; + case 14: + vr41xx_set_irq_trigger(TB0219_PCI_SLOT3_PIN, + TRIGGER_LEVEL, + SIGNAL_THROUGH); + vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, + LEVEL_LOW); + irq = TB0219_PCI_SLOT3_IRQ; + break; + default: + break; + } + + return irq; +} + +struct pci_fixup pcibios_fixups[] __initdata = { + { .pass = 0, }, +}; diff --git a/arch/mips/pci/ops-marvell.c b/arch/mips/pci/ops-marvell.c new file mode 100644 index 000000000..1ac5c5919 --- /dev/null +++ b/arch/mips/pci/ops-marvell.c @@ -0,0 +1,93 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) + */ +#include +#include +#include + +#include + +static int mv_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 * val) +{ + struct mv_pci_controller *mvbc = bus->sysdata; + unsigned long address_reg, data_reg; + u32 address; + + address_reg = mvbc->config_addr; + data_reg = mvbc->config_vreg; + + /* Accessing device 31 crashes those Marvells. Since years. + Will they ever make sane controllers ... */ + if (PCI_SLOT(devfn) == 31) + return PCIBIOS_DEVICE_NOT_FOUND; + + address = (bus->number << 16) | (devfn << 8) | + (where & 0xfc) | 0x80000000; + + /* start the configuration cycle */ + MV_WRITE(address_reg, address); + + switch (size) { + case 1: + *val = MV_READ_8(data_reg + (where & 0x3)); + break; + + case 2: + *val = MV_READ_16(data_reg + (where & 0x3)); + break; + + case 4: + *val = MV_READ(data_reg); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int mv_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct mv_pci_controller *mvbc = bus->sysdata; + unsigned long address_reg, data_reg; + u32 address; + + address_reg = mvbc->config_addr; + data_reg = mvbc->config_vreg; + + /* Accessing device 31 crashes those Marvells. Since years. + Will they ever make sane controllers ... */ + if (PCI_SLOT(devfn) == 31) + return PCIBIOS_DEVICE_NOT_FOUND; + + address = (bus->number << 16) | (devfn << 8) | + (where & 0xfc) | 0x80000000; + + /* start the configuration cycle */ + MV_WRITE(address_reg, address); + + switch (size) { + case 1: + MV_WRITE_8(data_reg + (where & 0x3), val); + break; + + case 2: + MV_WRITE_16(data_reg + (where & 0x3), val); + break; + + case 4: + MV_WRITE(data_reg, val); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops mv_pci_ops = { + .read = mv_read_config, + .write = mv_write_config +}; diff --git a/arch/mips/pci/ops-titan-ht.c b/arch/mips/pci/ops-titan-ht.c new file mode 100644 index 000000000..46c636c27 --- /dev/null +++ b/arch/mips/pci/ops-titan-ht.c @@ -0,0 +1,125 @@ +/* + * Copyright 2003 PMC-Sierra + * Author: Manish Lachwani (lachwani@pmc-sierra.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include + +#include + +static int titan_ht_config_read_dword(struct pci_bus *bus, unsigned int devfn, + int offset, u32 * val) +{ + volatile uint32_t address; + int busno; + + busno = bus->number; + + address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000; + if (busno != 0) + address |= 1; + + /* + * RM9000 HT Errata: Issue back to back HT config + * transcations. Issue a BIU sync before and + * after the HT cycle + */ + + *(volatile int32_t *) 0xfb0000f0 |= 0x2; + + udelay(30); + + *(volatile int32_t *) 0xfb0006f8 = address; + *(val) = *(volatile int32_t *) 0xfb0006fc; + + udelay(30); + + * (volatile int32_t *) 0xfb0000f0 |= 0x2; + + return PCIBIOS_SUCCESSFUL; +} + +static int titan_ht_config_read(struct pci_bus *bus, unsigned int devfn, + int offset, int size, u32 * val) +{ + uint32_t dword; + + titan_ht_config_read_dword(bus, devfn, offset, &dword); + + dword >>= ((offset & 3) << 3); + dword &= (0xffffffffU >> ((4 - size) << 8)); + + return PCIBIOS_SUCCESSFUL; +} + +static inline int titan_ht_config_write_dword(struct pci_bus *bus, + unsigned int devfn, int offset, u32 val) +{ + volatile uint32_t address; + int busno; + + busno = bus->number; + + address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000; + if (busno != 0) + address |= 1; + + *(volatile int32_t *) 0xfb0000f0 |= 0x2; + + udelay(30); + + *(volatile int32_t *) 0xfb0006f8 = address; + *(volatile int32_t *) 0xfb0006fc = val; + + udelay(30); + + *(volatile int32_t *) 0xfb0000f0 |= 0x2; + + return PCIBIOS_SUCCESSFUL; +} + +static int titan_ht_config_write(struct pci_bus *bus, unsigned int devfn, + int offset, int size, u32 val) +{ + uint32_t val1, val2, mask; + + titan_ht_config_read_dword(bus, devfn, offset, &val2); + + val1 = val << ((offset & 3) << 3); + mask = ~(0xffffffffU >> ((4 - size) << 8)); + val2 &= ~(mask << ((offset & 3) << 8)); + + titan_ht_config_write_dword(bus, devfn, offset, val1 | val2); + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops titan_ht_pci_ops = { + .read = titan_ht_config_read, + .write = titan_ht_config_write, +}; diff --git a/arch/mips/pci/ops-vr41xx.c b/arch/mips/pci/ops-vr41xx.c new file mode 100644 index 000000000..44654605e --- /dev/null +++ b/arch/mips/pci/ops-vr41xx.c @@ -0,0 +1,126 @@ +/* + * ops-vr41xx.c, PCI configuration routines for the PCIU of NEC VR4100 series. + * + * Copyright (C) 2001-2003 MontaVista Software Inc. + * Author: Yoichi Yuasa + * Copyright (C) 2004 Yoichi Yuasa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/* + * Changes: + * MontaVista Software Inc. or + * - New creation, NEC VR4122 and VR4131 are supported. + */ +#include +#include + +#include + +#define PCICONFDREG KSEG1ADDR(0x0f000c14) +#define PCICONFAREG KSEG1ADDR(0x0f000c18) + +static inline int set_pci_configuration_address(unsigned char number, + unsigned int devfn, int where) +{ + if (number == 0) { + /* + * Type 0 configuration + */ + if (PCI_SLOT(devfn) < 11 || where > 0xff) + return -EINVAL; + + writel((1U << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) | + (where & 0xfc), PCICONFAREG); + } else { + /* + * Type 1 configuration + */ + if (where > 0xff) + return -EINVAL; + + writel(((uint32_t)number << 16) | ((devfn & 0xff) << 8) | + (where & 0xfc) | 1U, PCICONFAREG); + } + + return 0; +} + +static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, + int size, uint32_t *val) +{ + uint32_t data; + + *val = 0xffffffffU; + if (set_pci_configuration_address(bus->number, devfn, where) < 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + data = readl(PCICONFDREG); + + switch (size) { + case 1: + *val = (data >> ((where & 3) << 3)) & 0xffU; + break; + case 2: + *val = (data >> ((where & 2) << 3)) & 0xffffU; + break; + case 4: + *val = data; + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, + int size, uint32_t val) +{ + uint32_t data; + int shift; + + if (set_pci_configuration_address(bus->number, devfn, where) < 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + data = readl(PCICONFDREG); + + switch (size) { + case 1: + shift = (where & 3) << 3; + data &= ~(0xffU << shift); + data |= ((val & 0xffU) << shift); + break; + case 2: + shift = (where & 2) << 3; + data &= ~(0xffffU << shift); + data |= ((val & 0xffffU) << shift); + break; + case 4: + data = val; + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + + writel(data, PCICONFDREG); + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops vr41xx_pci_ops = { + .read = pci_config_read, + .write = pci_config_write, +}; diff --git a/arch/mips/pci/pci-yosemite.c b/arch/mips/pci/pci-yosemite.c new file mode 100644 index 000000000..c1151f43c --- /dev/null +++ b/arch/mips/pci/pci-yosemite.c @@ -0,0 +1,37 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004 by Ralf Baechle + * + */ +#include +#include +#include +#include +#include +#include + +extern struct pci_ops titan_pci_ops; + +static struct resource py_mem_resource = { + "Titan PCI MEM", 0xe0000000UL, 0xe3ffffffUL, IORESOURCE_MEM +}; + +static struct resource py_io_resource = { + "Titan IO MEM", 0x00000000UL, 0x00ffffffUL, IORESOURCE_IO, +}; + +static struct pci_controller py_controller = { + .pci_ops = &titan_pci_ops, + .mem_resource = &py_mem_resource, + .mem_offset = 0x10000000UL, + .io_resource = &py_io_resource, + .io_offset = 0x00000000UL +}; + +static int __init pmc_yosemite_setup(void) +{ + register_pci_controller(&py_controller); +} diff --git a/arch/mips/pmc-sierra/yosemite/dbg_io.c b/arch/mips/pmc-sierra/yosemite/dbg_io.c new file mode 100644 index 000000000..1ff8d95d0 --- /dev/null +++ b/arch/mips/pmc-sierra/yosemite/dbg_io.c @@ -0,0 +1,184 @@ +/* + * Copyright 2003 PMC-Sierra + * Author: Manish Lachwani (lachwani@pmc-sierra.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * Support for KGDB for the Yosemite board. We make use of single serial + * port to be used for KGDB as well as console. The second serial port + * seems to be having a problem. Single IRQ is allocated for both the + * ports. Hence, the interrupt routing code needs to figure out whether + * the interrupt came from channel A or B. + */ + +#include + +#if defined(CONFIG_KGDB) +#include + +/* + * Baud rate, Parity, Data and Stop bit settings for the + * serial port on the Yosemite. Note that the Early printk + * patch has been added. So, we should be all set to go + */ +#define YOSEMITE_BAUD_2400 2400 +#define YOSEMITE_BAUD_4800 4800 +#define YOSEMITE_BAUD_9600 9600 +#define YOSEMITE_BAUD_19200 19200 +#define YOSEMITE_BAUD_38400 38400 +#define YOSEMITE_BAUD_57600 57600 +#define YOSEMITE_BAUD_115200 115200 + +#define YOSEMITE_PARITY_NONE 0 +#define YOSEMITE_PARITY_ODD 0x08 +#define YOSEMITE_PARITY_EVEN 0x18 +#define YOSEMITE_PARITY_MARK 0x28 +#define YOSEMITE_PARITY_SPACE 0x38 + +#define YOSEMITE_DATA_5BIT 0x0 +#define YOSEMITE_DATA_6BIT 0x1 +#define YOSEMITE_DATA_7BIT 0x2 +#define YOSEMITE_DATA_8BIT 0x3 + +#define YOSEMITE_STOP_1BIT 0x0 +#define YOSEMITE_STOP_2BIT 0x4 + +/* This is crucial */ +#define SERIAL_REG_OFS 0x1 + +#define SERIAL_RCV_BUFFER 0x0 +#define SERIAL_TRANS_HOLD 0x0 +#define SERIAL_SEND_BUFFER 0x0 +#define SERIAL_INTR_ENABLE (1 * SERIAL_REG_OFS) +#define SERIAL_INTR_ID (2 * SERIAL_REG_OFS) +#define SERIAL_DATA_FORMAT (3 * SERIAL_REG_OFS) +#define SERIAL_LINE_CONTROL (3 * SERIAL_REG_OFS) +#define SERIAL_MODEM_CONTROL (4 * SERIAL_REG_OFS) +#define SERIAL_RS232_OUTPUT (4 * SERIAL_REG_OFS) +#define SERIAL_LINE_STATUS (5 * SERIAL_REG_OFS) +#define SERIAL_MODEM_STATUS (6 * SERIAL_REG_OFS) +#define SERIAL_RS232_INPUT (6 * SERIAL_REG_OFS) +#define SERIAL_SCRATCH_PAD (7 * SERIAL_REG_OFS) + +#define SERIAL_DIVISOR_LSB (0 * SERIAL_REG_OFS) +#define SERIAL_DIVISOR_MSB (1 * SERIAL_REG_OFS) + +/* + * Functions to READ and WRITE to serial port 0 + */ +#define SERIAL_READ(ofs) (*((volatile unsigned char*) \ + (TITAN_SERIAL_BASE + ofs))) + +#define SERIAL_WRITE(ofs, val) ((*((volatile unsigned char*) \ + (TITAN_SERIAL_BASE + ofs))) = val) + +/* + * Functions to READ and WRITE to serial port 1 + */ +#define SERIAL_READ_1(ofs) (*((volatile unsigned char*) \ + (TITAN_SERIAL_BASE_1 + ofs) + +#define SERIAL_WRITE_1(ofs, val) ((*((volatile unsigned char*) \ + (TITAN_SERIAL_BASE_1 + ofs))) = val) + +/* + * Second serial port initialization + */ +void init_second_port(void) +{ + /* Disable Interrupts */ + SERIAL_WRITE_1(SERIAL_LINE_CONTROL, 0x0); + SERIAL_WRITE_1(SERIAL_INTR_ENABLE, 0x0); + + { + unsigned int divisor; + + SERIAL_WRITE_1(SERIAL_LINE_CONTROL, 0x80); + divisor = TITAN_SERIAL_BASE_BAUD / YOSEMITE_BAUD_115200; + SERIAL_WRITE_1(SERIAL_DIVISOR_LSB, divisor & 0xff); + + SERIAL_WRITE_1(SERIAL_DIVISOR_MSB, + (divisor & 0xff00) >> 8); + SERIAL_WRITE_1(SERIAL_LINE_CONTROL, 0x0); + } + + SERIAL_WRITE_1(SERIAL_DATA_FORMAT, YOSEMITE_DATA_8BIT | + YOSEMITE_PARITY_NONE | YOSEMITE_STOP_1BIT); + + /* Enable Interrupts */ + SERIAL_WRITE_1(SERIAL_INTR_ENABLE, 0xf); +} + +/* Initialize the serial port for KGDB debugging */ +void debugInit(unsigned int baud, unsigned char data, unsigned char parity, + unsigned char stop) +{ + /* Disable Interrupts */ + SERIAL_WRITE(SERIAL_LINE_CONTROL, 0x0); + SERIAL_WRITE(SERIAL_INTR_ENABLE, 0x0); + + { + unsigned int divisor; + + SERIAL_WRITE(SERIAL_LINE_CONTROL, 0x80); + + divisor = TITAN_SERIAL_BASE_BAUD / baud; + SERIAL_WRITE(SERIAL_DIVISOR_LSB, divisor & 0xff); + + SERIAL_WRITE(SERIAL_DIVISOR_MSB, (divisor & 0xff00) >> 8); + SERIAL_WRITE(SERIAL_LINE_CONTROL, 0x0); + } + + SERIAL_WRITE(SERIAL_DATA_FORMAT, data | parity | stop); +} + +static int remoteDebugInitialized = 0; + +unsigned char getDebugChar(void) +{ + if (!remoteDebugInitialized) { + remoteDebugInitialized = 1; + debugInit(YOSEMITE_BAUD_115200, + YOSEMITE_DATA_8BIT, + YOSEMITE_PARITY_NONE, YOSEMITE_STOP_1BIT); + } + + while ((SERIAL_READ(SERIAL_LINE_STATUS) & 0x1) == 0); + return SERIAL_READ(SERIAL_RCV_BUFFER); +} + +int putDebugChar(unsigned char byte) +{ + if (!remoteDebugInitialized) { + remoteDebugInitialized = 1; + debugInit(YOSEMITE_BAUD_115200, + YOSEMITE_DATA_8BIT, + YOSEMITE_PARITY_NONE, YOSEMITE_STOP_1BIT); + } + + while ((SERIAL_READ(SERIAL_LINE_STATUS) & 0x20) == 0); + SERIAL_WRITE(SERIAL_SEND_BUFFER, byte); + + return 1; +} +#endif diff --git a/arch/mips/pmc-sierra/yosemite/i2c-yosemite.c b/arch/mips/pmc-sierra/yosemite/i2c-yosemite.c new file mode 100644 index 000000000..416da22b3 --- /dev/null +++ b/arch/mips/pmc-sierra/yosemite/i2c-yosemite.c @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2003 PMC-Sierra Inc. + * Author: Manish Lachwani (lachwani@pmc-sierra.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * Detailed Description: + * + * This block implements the I2C interface to the slave devices like the + * Atmel 24C32 EEPROM and the MAX 1619 Sensors device. The I2C Master interface + * can be controlled by the SCMB block. And the SCMB block kicks in only when + * using the Ethernet Mode of operation and __not__ the SysAD mode + * + * The SCMB controls the two modes: MDIO and the I2C. The MDIO mode is used to + * communicate with the Quad-PHY from Marvel. The I2C is used to communicate + * with the I2C slave devices. It seems that the driver does not explicitly + * deal with the control of SDA and SCL serial lines. So, the driver will set + * the slave address, drive the command and then the data. The SCMB will then + * control the two serial lines as required. + * + * It seems the documents are very unclear abt this. Hence, I took some time + * out to write the desciption to have an idea of how the I2C can actually + * work. Currently, this Linux driver wont be integrated into the generic Linux + * I2C framework. And finally, the I2C interface is also known as the 2BI + * interface. 2BI means 2-bit interface referring to SDA and SCL serial lines + * respectively. + * + * - Manish Lachwani (12/09/2003) + */ + +#include "i2c-yosemite.h" + +/* + * Poll the I2C interface for the BUSY bit. + */ +static int titan_i2c_poll(void) +{ + int i = 0; + unsigned long val = 0; + + for (i = 0; i < TITAN_I2C_MAX_POLL; i++) { + val = TITAN_I2C_READ(TITAN_I2C_COMMAND); + + if (!(val & 0x8000)) + return 0; + } + + return TITAN_I2C_ERR_TIMEOUT; +} + +/* + * Execute the I2C command + */ +int titan_i2c_xfer(unsigned int slave_addr, titan_i2c_command * cmd, + int size, unsigned int *addr) +{ + int loop = 0, bytes, i; + unsigned int *write_data, data, *read_data; + unsigned long reg_val, val; + + write_data = cmd->data; + read_data = addr; + + TITAN_I2C_WRITE(TITAN_I2C_SLAVE_ADDRESS, slave_addr); + + if (cmd->type == TITAN_I2C_CMD_WRITE) + loop = cmd->write_size; + else + loop = size; + + while (loop > 0) { + if ((cmd->type == TITAN_I2C_CMD_WRITE) || + (cmd->type == TITAN_I2C_CMD_READ_WRITE)) { + + reg_val = TITAN_I2C_DATA; + for (i = 0; i < TITAN_I2C_MAX_WORDS_PER_RW; + ++i, write_data += 2, reg_val += 4) { + if (bytes < cmd->write_size) { + data = write_data[0]; + ++data; + } + + if (bytes < cmd->write_size) { + data = write_data[1]; + ++data; + } + + TITAN_I2C_WRITE(reg_val, data); + } + } + + TITAN_I2C_WRITE(TITAN_I2C_COMMAND, + (unsigned int) (cmd->type << 13)); + if (titan_i2c_poll() != TITAN_I2C_ERR_OK) + return TITAN_I2C_ERR_TIMEOUT; + + if ((cmd->type == TITAN_I2C_CMD_READ) || + (cmd->type == TITAN_I2C_CMD_READ_WRITE)) { + + reg_val = TITAN_I2C_DATA; + for (i = 0; i < TITAN_I2C_MAX_WORDS_PER_RW; + ++i, read_data += 2, reg_val += 4) { + data = TITAN_I2C_READ(reg_val); + + if (bytes < size) { + read_data[0] = data & 0xff; + ++bytes; + } + + if (bytes < size) { + read_data[1] = + ((data >> 8) & 0xff); + ++bytes; + } + } + } + + loop -= (TITAN_I2C_MAX_WORDS_PER_RW * 2); + } + + /* + * Read the Interrupt status and then return the appropriate error code + */ + + val = TITAN_I2C_READ(TITAN_I2C_INTERRUPTS); + if (val & 0x0020) + return TITAN_I2C_ERR_ARB_LOST; + + if (val & 0x0040) + return TITAN_I2C_ERR_NO_RESP; + + if (val & 0x0080) + return TITAN_I2C_ERR_DATA_COLLISION; + + return TITAN_I2C_ERR_OK; +} + +/* + * Init the I2C subsystem of the PMC-Sierra Yosemite board + */ +int titan_i2c_init(titan_i2c_config * config) +{ + unsigned int val; + + /* + * Reset the SCMB and program into the I2C mode + */ + TITAN_I2C_WRITE(TITAN_I2C_SCMB_CONTROL, 0xA000); + TITAN_I2C_WRITE(TITAN_I2C_SCMB_CONTROL, 0x2000); + + /* + * Configure the filtera and clka values + */ + val = TITAN_I2C_READ(TITAN_I2C_SCMB_CLOCK_A); + val |= ((val & ~(0xF000)) | ((config->filtera << 12) & 0xF000)); + val |= ((val & ~(0x03FF)) | (config->clka & 0x03FF)); + TITAN_I2C_WRITE(TITAN_I2C_SCMB_CLOCK_A, val); + + /* + * Configure the filterb and clkb values + */ + val = TITAN_I2C_READ(TITAN_I2C_SCMB_CLOCK_B); + val |= ((val & ~(0xF000)) | ((config->filterb << 12) & 0xF000)); + val |= ((val & ~(0x03FF)) | (config->clkb & 0x03FF)); + TITAN_I2C_WRITE(TITAN_I2C_SCMB_CLOCK_B, val); + + return TITAN_I2C_ERR_OK; +} diff --git a/arch/mips/pmc-sierra/yosemite/py-console.c b/arch/mips/pmc-sierra/yosemite/py-console.c new file mode 100644 index 000000000..22c336f9a --- /dev/null +++ b/arch/mips/pmc-sierra/yosemite/py-console.c @@ -0,0 +1,130 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001, 2002, 2004 Ralf Baechle + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* SUPERIO uart register map */ +struct yo_uartregs { + union { + volatile u8 rbr; /* read only, DLAB == 0 */ + volatile u8 thr; /* write only, DLAB == 0 */ + volatile u8 dll; /* DLAB == 1 */ + } u1; + union { + volatile u8 ier; /* DLAB == 0 */ + volatile u8 dlm; /* DLAB == 1 */ + } u2; + union { + volatile u8 iir; /* read only */ + volatile u8 fcr; /* write only */ + } u3; + volatile u8 iu_lcr; + volatile u8 iu_mcr; + volatile u8 iu_lsr; + volatile u8 iu_msr; + volatile u8 iu_scr; +} yo_uregs_t; + +#define iu_rbr u1.rbr +#define iu_thr u1.thr +#define iu_dll u1.dll +#define iu_ier u2.ier +#define iu_dlm u2.dlm +#define iu_iir u3.iir +#define iu_fcr u3.fcr + +extern unsigned long uart_base; + +#define IO_BASE_64 0x9000000000000000ULL + +static unsigned char readb_outer_space(unsigned long phys) +{ + unsigned long long vaddr = IO_BASE_64 | phys; + unsigned char res; + unsigned int sr; + + sr = read_c0_status(); + write_c0_status((sr | ST0_KX) & ~ ST0_IE); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + + __asm__ __volatile__ ( + " .set mips3 \n" + " ld %0, (%0) \n" + " lbu %0, (%0) \n" + " .set mips0 \n" + : "=r" (res) + : "0" (&vaddr)); + + write_c0_status(sr); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + + return res; +} + +static void writeb_outer_space(unsigned long phys, unsigned char c) +{ + unsigned long long vaddr = IO_BASE_64 | phys; + unsigned long tmp; + unsigned int sr; + + sr = read_c0_status(); + write_c0_status((sr | ST0_KX) & ~ ST0_IE); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + + __asm__ __volatile__ ( + " .set mips3 \n" + " ld %0, (%1) \n" + " sb %2, (%0) \n" + " .set mips0 \n" + : "=r" (tmp) + : "r" (&vaddr), "r" (c)); + + write_c0_status(sr); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); +} + +static inline struct yo_uartregs *console_uart(void) +{ + return (struct yo_uartregs *) (uart_base + 8); +} + +void prom_putchar(char c) +{ + unsigned long lsr = 0xfd000008UL + offsetof(struct yo_uartregs, iu_lsr); + unsigned long thr = 0xfd000008UL + offsetof(struct yo_uartregs, iu_thr); + + while ((readb_outer_space(lsr) & 0x20) == 0); + writeb_outer_space(thr, c); +} + +char __init prom_getchar(void) +{ + return 0; +} diff --git a/arch/mips/vr41xx/tanbac-tb0229/tb0219.c b/arch/mips/vr41xx/tanbac-tb0229/tb0219.c new file mode 100644 index 000000000..a07d9fa45 --- /dev/null +++ b/arch/mips/vr41xx/tanbac-tb0229/tb0219.c @@ -0,0 +1,44 @@ +/* + * tb0219.c, Setup for the TANBAC TB0219 + * + * Copyright (C) 2003 Megasolution Inc. + * Copyright (C) 2004 Yoichi Yuasa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include + +#include +#include + +#define TB0219_RESET_REGS KSEG1ADDR(0x0a00000e) + +#define tb0219_hard_reset() writew(0, TB0219_RESET_REGS) + +static void tanbac_tb0219_restart(char *command) +{ + local_irq_disable(); + tb0219_hard_reset(); + while (1); +} + +static int __init tanbac_tb0219_setup(void) +{ + _machine_restart = tanbac_tb0219_restart; + + return 0; +} + +early_initcall(tanbac_tb0219_setup); diff --git a/arch/parisc/configs/n4000_defconfig b/arch/parisc/configs/n4000_defconfig new file mode 100644 index 000000000..8df4bb1cc --- /dev/null +++ b/arch/parisc/configs/n4000_defconfig @@ -0,0 +1,905 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_PARISC=y +CONFIG_MMU=y +CONFIG_STACK_GROWSUP=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +# CONFIG_CLEAN_COMPILE is not set +# CONFIG_STANDALONE is not set +CONFIG_BROKEN=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=16 +CONFIG_HOTPLUG=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_OBSOLETE_MODPARM=y +# CONFIG_MODVERSIONS is not set +CONFIG_KMOD=y + +# +# Processor type and features +# +# CONFIG_PA7000 is not set +# CONFIG_PA7100LC is not set +# CONFIG_PA7200 is not set +# CONFIG_PA7300LC is not set +CONFIG_PA8X00=y +CONFIG_PA20=y +CONFIG_PREFETCH=y +CONFIG_PARISC64=y +CONFIG_64BIT=y +# CONFIG_SMP is not set +CONFIG_DISCONTIGMEM=y +# CONFIG_PREEMPT is not set +CONFIG_COMPAT=y + +# +# Bus options (PCI, PCMCIA, EISA, GSC, ISA) +# +# CONFIG_GSC is not set +CONFIG_PCI=y +CONFIG_PCI_LEGACY_PROC=y +CONFIG_PCI_NAMES=y +CONFIG_PCI_LBA=y +CONFIG_IOSAPIC=y +CONFIG_IOMMU_SBA=y +# CONFIG_SUPERIO is not set +CONFIG_CHASSIS_LCD_LED=y +# CONFIG_PDC_CHASSIS is not set + +# +# PCMCIA/CardBus support +# +CONFIG_PCMCIA=m +CONFIG_PCMCIA_DEBUG=y +CONFIG_YENTA=m +CONFIG_CARDBUS=y +# CONFIG_I82092 is not set +# CONFIG_TCIC is not set + +# +# PCI Hotplug Support +# +# CONFIG_HOTPLUG_PCI is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +# CONFIG_FW_LOADER is not set +CONFIG_DEBUG_DRIVER=y + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +CONFIG_BLK_DEV_UMEM=m +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_CARMEL is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=6144 +CONFIG_BLK_DEV_INITRD=y + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +CONFIG_SCSI=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +# CONFIG_CHR_DEV_OSST is not set +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set +CONFIG_CHR_DEV_SG=y + +# +# Some SCSI devices (e.g. CD jukebox) support multiple LUNs +# +CONFIG_SCSI_MULTI_LUN=y +# CONFIG_SCSI_CONSTANTS is not set +# CONFIG_SCSI_LOGGING is not set + +# +# SCSI Transport Attributes +# +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m + +# +# SCSI low-level drivers +# +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC7XXX_OLD is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_MEGARAID is not set +# CONFIG_SCSI_SATA is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_CPQFCTS is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_EATA is not set +# CONFIG_SCSI_EATA_PIO is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +# CONFIG_SCSI_GDTH is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +CONFIG_SCSI_SYM53C8XX_2=y +CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 +CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 +CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 +CONFIG_SCSI_SYM53C8XX_IOMAPPED=y +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_PCI2000 is not set +# CONFIG_SCSI_PCI2220I is not set +# CONFIG_SCSI_QLOGIC_ISP is not set +CONFIG_SCSI_QLOGIC_FC=m +# CONFIG_SCSI_QLOGIC_FC_FIRMWARE is not set +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA2XXX=y +# CONFIG_SCSI_QLA21XX is not set +# CONFIG_SCSI_QLA22XX is not set +CONFIG_SCSI_QLA2300=m +CONFIG_SCSI_QLA2322=m +CONFIG_SCSI_QLA6312=m +CONFIG_SCSI_QLA6322=m +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_DC390T is not set +CONFIG_SCSI_DEBUG=m + +# +# PCMCIA SCSI adapter support +# +# CONFIG_PCMCIA_FDOMAIN is not set +# CONFIG_PCMCIA_QLOGIC is not set +# CONFIG_PCMCIA_SYM53C500 is not set + +# +# Multi-device support (RAID and LVM) +# +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=y +CONFIG_MD_RAID0=y +CONFIG_MD_RAID1=y +# CONFIG_MD_RAID5 is not set +# CONFIG_MD_RAID6 is not set +# CONFIG_MD_MULTIPATH is not set +# CONFIG_BLK_DEV_DM is not set + +# +# Fusion MPT device support +# +CONFIG_FUSION=m +CONFIG_FUSION_MAX_SGE=40 +CONFIG_FUSION_ISENSE=m +CONFIG_FUSION_CTL=m + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_MMAP=y +CONFIG_NETLINK_DEV=y +CONFIG_UNIX=y +CONFIG_NET_KEY=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +# CONFIG_INET_IPCOMP is not set + +# +# IP: Virtual Server Configuration +# +# CONFIG_IP_VS is not set +# CONFIG_IPV6 is not set +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_DEBUG is not set + +# +# IP: Netfilter Configuration +# +CONFIG_IP_NF_CONNTRACK=m +CONFIG_IP_NF_FTP=m +CONFIG_IP_NF_IRC=m +CONFIG_IP_NF_TFTP=m +CONFIG_IP_NF_AMANDA=m +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_LIMIT=m +CONFIG_IP_NF_MATCH_IPRANGE=m +CONFIG_IP_NF_MATCH_MAC=m +CONFIG_IP_NF_MATCH_PKTTYPE=m +CONFIG_IP_NF_MATCH_MARK=m +CONFIG_IP_NF_MATCH_MULTIPORT=m +CONFIG_IP_NF_MATCH_TOS=m +CONFIG_IP_NF_MATCH_RECENT=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_DSCP=m +CONFIG_IP_NF_MATCH_AH_ESP=m +CONFIG_IP_NF_MATCH_LENGTH=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_MATCH_TCPMSS=m +CONFIG_IP_NF_MATCH_HELPER=m +CONFIG_IP_NF_MATCH_STATE=m +CONFIG_IP_NF_MATCH_CONNTRACK=m +CONFIG_IP_NF_MATCH_OWNER=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_NAT_NEEDED=y +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_SAME=m +# CONFIG_IP_NF_NAT_LOCAL is not set +CONFIG_IP_NF_NAT_SNMP_BASIC=m +CONFIG_IP_NF_NAT_IRC=m +CONFIG_IP_NF_NAT_FTP=m +CONFIG_IP_NF_NAT_TFTP=m +CONFIG_IP_NF_NAT_AMANDA=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_TOS=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_DSCP=m +CONFIG_IP_NF_TARGET_MARK=m +CONFIG_IP_NF_TARGET_CLASSIFY=m +CONFIG_IP_NF_TARGET_LOG=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_TARGET_TCPMSS=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# CONFIG_IP_NF_COMPAT_IPCHAINS is not set +# CONFIG_IP_NF_COMPAT_IPFWADM is not set +CONFIG_IP_NF_TARGET_NOTRACK=m +CONFIG_IP_NF_RAW=m +CONFIG_XFRM=y +CONFIG_XFRM_USER=m + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +CONFIG_LLC=m +CONFIG_LLC2=m +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +CONFIG_DUMMY=m +CONFIG_BONDING=m +# CONFIG_EQUALIZER is not set +CONFIG_TUN=m +# CONFIG_ETHERTAP is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=m +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +CONFIG_NET_VENDOR_3COM=y +CONFIG_VORTEX=m +CONFIG_TYPHOON=m + +# +# Tulip family network device support +# +CONFIG_NET_TULIP=y +CONFIG_DE2104X=y +CONFIG_TULIP=y +# CONFIG_TULIP_MWI is not set +CONFIG_TULIP_MMIO=y +# CONFIG_TULIP_NAPI is not set +# CONFIG_DE4X5 is not set +# CONFIG_WINBOND_840 is not set +# CONFIG_DM9102 is not set +CONFIG_PCMCIA_XIRCOM=m +CONFIG_PCMCIA_XIRTULIP=m +CONFIG_HP100=m +CONFIG_NET_PCI=y +CONFIG_PCNET32=m +# CONFIG_AMD8111_ETH is not set +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_B44 is not set +# CONFIG_FORCEDETH is not set +# CONFIG_DGRS is not set +CONFIG_EEPRO100=m +# CONFIG_EEPRO100_PIO is not set +CONFIG_E100=m +CONFIG_E100_NAPI=y +# CONFIG_FEALNX is not set +CONFIG_NATSEMI=m +# CONFIG_NE2K_PCI is not set +# CONFIG_8139CP is not set +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +# CONFIG_8139TOO_8129 is not set +# CONFIG_8139_OLD_RX_RESET is not set +# CONFIG_SIS900 is not set +CONFIG_EPIC100=m +# CONFIG_SUNDANCE is not set +CONFIG_VIA_RHINE=m +CONFIG_VIA_RHINE_MMIO=y + +# +# Ethernet (1000 Mbit) +# +CONFIG_ACENIC=m +CONFIG_ACENIC_OMIT_TIGON_I=y +CONFIG_DL2K=m +CONFIG_E1000=m +CONFIG_E1000_NAPI=y +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +CONFIG_TIGON3=m + +# +# Ethernet (10000 Mbit) +# +CONFIG_IXGB=m +CONFIG_IXGB_NAPI=y +CONFIG_S2IO=m +CONFIG_S2IO_NAPI=y + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +CONFIG_NET_RADIO=y + +# +# Obsolete Wireless cards support (pre-802.11) +# +# CONFIG_STRIP is not set +CONFIG_PCMCIA_WAVELAN=m +CONFIG_PCMCIA_NETWAVE=m + +# +# Wireless 802.11 Frequency Hopping cards support +# +# CONFIG_PCMCIA_RAYCS is not set + +# +# Wireless 802.11b ISA/PCI cards support +# +# CONFIG_AIRO is not set +CONFIG_HERMES=m +CONFIG_PLX_HERMES=m +CONFIG_TMD_HERMES=m +CONFIG_PCI_HERMES=m +# CONFIG_ATMEL is not set + +# +# Wireless 802.11b Pcmcia/Cardbus cards support +# +CONFIG_PCMCIA_HERMES=m +CONFIG_AIRO_CS=m +# CONFIG_PCMCIA_WL3501 is not set + +# +# Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support +# +# CONFIG_PRISM54 is not set +CONFIG_NET_WIRELESS=y + +# +# PCMCIA network device support +# +CONFIG_NET_PCMCIA=y +CONFIG_PCMCIA_3C589=m +CONFIG_PCMCIA_3C574=m +# CONFIG_PCMCIA_FMVJ18X is not set +# CONFIG_PCMCIA_PCNET is not set +# CONFIG_PCMCIA_NMCLAN is not set +CONFIG_PCMCIA_SMC91C92=m +CONFIG_PCMCIA_XIRC2PS=m +# CONFIG_PCMCIA_AXNET is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +CONFIG_PPP=m +# CONFIG_PPP_MULTILINK is not set +# CONFIG_PPP_FILTER is not set +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +# CONFIG_PPPOE is not set +# CONFIG_SLIP is not set +# CONFIG_NET_FC is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +# CONFIG_SERIO is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_CS is not set +CONFIG_SERIAL_8250_NR_UARTS=8 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +# CONFIG_SERIAL_8250_MULTIPORT is not set +# CONFIG_SERIAL_8250_RSA is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MUX is not set +CONFIG_PDC_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +CONFIG_GEN_RTC=y +CONFIG_GEN_RTC_X=y +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set + +# +# PCMCIA character devices +# +# CONFIG_SYNCLINK_CS is not set +CONFIG_RAW_DRIVER=y +CONFIG_MAX_RAW_DEVS=256 + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Console display driver support +# +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE_COLUMNS=160 +CONFIG_DUMMY_CONSOLE_ROWS=64 +CONFIG_DUMMY_CONSOLE=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +# CONFIG_EXT3_FS_XATTR is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +# CONFIG_REISERFS_FS is not set +CONFIG_JFS_FS=m +# CONFIG_JFS_POSIX_ACL is not set +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=m +# CONFIG_XFS_RT is not set +# CONFIG_XFS_QUOTA is not set +# CONFIG_XFS_SECURITY is not set +# CONFIG_XFS_POSIX_ACL is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +# CONFIG_ZISOFS is not set +CONFIG_UDF_FS=m + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +CONFIG_NFS_V4=y +CONFIG_NFS_DIRECTIO=y +CONFIG_NFSD=m +CONFIG_NFSD_V3=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_TCP=y +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_EXPORTFS=m +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_RPCSEC_GSS_KRB5=y +CONFIG_SMB_FS=m +CONFIG_SMB_NLS_DEFAULT=y +CONFIG_SMB_NLS_REMOTE="cp437" +CONFIG_CIFS=m +# CONFIG_CIFS_STATS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +CONFIG_NLS_CODEPAGE_437=m +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +CONFIG_NLS_CODEPAGE_863=m +# CONFIG_NLS_CODEPAGE_864 is not set +CONFIG_NLS_CODEPAGE_865=m +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +CONFIG_NLS_ISO8859_15=m +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +CONFIG_NLS_UTF8=m + +# +# Profiling support +# +CONFIG_PROFILING=y +CONFIG_OPROFILE=m + +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SLAB is not set +CONFIG_MAGIC_SYSRQ=y +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_FRAME_POINTER is not set +# CONFIG_DEBUG_INFO is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +CONFIG_CRYPTO=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_SHA1=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_AES=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +# CONFIG_CRYPTO_ARC4 is not set +CONFIG_CRYPTO_DEFLATE=m +# CONFIG_CRYPTO_MICHAEL_MIC is not set +CONFIG_CRYPTO_CRC32C=m +CONFIG_CRYPTO_TEST=m + +# +# Library routines +# +CONFIG_CRC32=y +CONFIG_LIBCRC32C=m +CONFIG_ZLIB_INFLATE=m +CONFIG_ZLIB_DEFLATE=m diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c new file mode 100644 index 000000000..ccfd5fe5a --- /dev/null +++ b/arch/parisc/kernel/unwind.c @@ -0,0 +1,295 @@ +/* + * Kernel unwinding support + * + * (c) 2002-2004 Randolph Chung + * + * Derived partially from the IA64 implementation. The PA-RISC + * Runtime Architecture Document is also a useful reference to + * understand what is happening here + */ + +/* + * J. David Anglin writes: + * + * "You have to adjust the current sp to that at the begining of the function. + * There can be up to two stack additions to allocate the frame in the + * prologue. Similar things happen in the epilogue. In the presence of + * interrupts, you have to be concerned about where you are in the function + * and what stack adjustments have taken place." + * + * For now these cases are not handled, but they should be! + */ + +#include +#include +#include +#include + +#include + +#include + +/* #define DEBUG 1 */ +#ifdef DEBUG +#define dbg(x...) printk(x) +#else +#define dbg(x...) +#endif + +extern const struct unwind_table_entry __start___unwind[]; +extern const struct unwind_table_entry __stop___unwind[]; + +static spinlock_t unwind_lock; +/* + * the kernel unwind block is not dynamically allocated so that + * we can call unwind_init as early in the bootup process as + * possible (before the slab allocator is initialized) + */ +static struct unwind_table kernel_unwind_table; +static struct unwind_table *unwind_tables, *unwind_tables_end; + + +static inline const struct unwind_table_entry * +find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr) +{ + const struct unwind_table_entry *e = 0; + unsigned long lo, hi, mid; + + addr -= table->base_addr; + + for (lo = 0, hi = table->length; lo < hi; ) + { + mid = (lo + hi) / 2; + e = &table->table[mid]; + if (addr < e->region_start) + hi = mid; + else if (addr > e->region_end) + lo = mid + 1; + else + break; + } + + return e; +} + +static inline const struct unwind_table_entry * +find_unwind_entry(unsigned long addr) +{ + struct unwind_table *table = unwind_tables; + const struct unwind_table_entry *e = NULL; + + if (addr >= kernel_unwind_table.start && + addr <= kernel_unwind_table.end) + e = find_unwind_entry_in_table(&kernel_unwind_table, addr); + else + for (; table; table = table->next) + { + if (addr >= table->start && + addr <= table->end) + e = find_unwind_entry_in_table(table, addr); + if (e) + break; + } + + return e; +} + +static void +unwind_table_init(struct unwind_table *table, const char *name, + unsigned long base_addr, unsigned long gp, + const void *table_start, const void *table_end) +{ + const struct unwind_table_entry *start = table_start; + const struct unwind_table_entry *end = table_end - 1; + + table->name = name; + table->base_addr = base_addr; + table->gp = gp; + table->start = base_addr + start->region_start; + table->end = base_addr + end->region_end; + table->table = (struct unwind_table_entry *)table_start; + table->length = end - start; + table->next = NULL; +} + +void * +unwind_table_add(const char *name, unsigned long base_addr, + unsigned long gp, + const void *start, const void *end) +{ + struct unwind_table *table; + unsigned long flags; + + table = kmalloc(sizeof(struct unwind_table), GFP_USER); + if (table == NULL) + return 0; + unwind_table_init(table, name, base_addr, gp, start, end); + spin_lock_irqsave(&unwind_lock, flags); + if (unwind_tables) + { + unwind_tables_end->next = table; + unwind_tables_end = table; + } + else + { + unwind_tables = unwind_tables_end = table; + } + spin_unlock_irqrestore(&unwind_lock, flags); + + return table; +} + +/* Called from setup_arch to import the kernel unwind info */ +static int unwind_init(void) +{ + long start, stop; + register unsigned long gp __asm__ ("r27"); + + start = (long)&__start___unwind[0]; + stop = (long)&__stop___unwind[0]; + + printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", + start, stop, + (stop - start) / sizeof(struct unwind_table_entry)); + + unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START, + gp, + &__start___unwind[0], &__stop___unwind[0]); +#if 0 + { + int i; + for (i = 0; i < 10; i++) + { + printk("region 0x%x-0x%x\n", + __start___unwind[i].region_start, + __start___unwind[i].region_end); + } + } +#endif + return 0; +} + +static void unwind_frame_regs(struct unwind_frame_info *info) +{ + const struct unwind_table_entry *e; + unsigned long npc; + unsigned int insn; + long frame_size = 0; + int looking_for_rp, rpoffset = 0; + + e = find_unwind_entry(info->ip); + if (!e) { + unsigned long sp; + extern char _stext[], _etext[]; + + dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip); + + /* Since we are doing the unwinding blind, we don't know if + we are adjusting the stack correctly or extracting the rp + correctly. The rp is checked to see if it belongs to the + kernel text section, if not we assume we don't have a + correct stack frame and we continue to unwind the stack. + This is not quite correct, and will fail for loadable + modules. */ + sp = info->sp & ~63; + do { + info->prev_sp = sp - 64; + + /* FIXME: what happens if we unwind too far so that + sp no longer falls in a mapped kernel page? */ +#ifndef __LP64__ + info->prev_ip = *(unsigned long *)(info->prev_sp - 20); +#else + info->prev_ip = *(unsigned long *)(info->prev_sp - 16); +#endif + + sp = info->prev_sp; + } while (info->prev_ip < (unsigned long)_stext || + info->prev_ip > (unsigned long)_etext); + } else { + + dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, Save_RP = %d size = %u\n", + e->region_start, e->region_end, e->Save_SP, e->Save_RP, e->Total_frame_size); + + looking_for_rp = e->Save_RP; + + for (npc = e->region_start; + (frame_size < (e->Total_frame_size << 3) || looking_for_rp) && + npc < info->ip; + npc += 4) { + + insn = *(unsigned int *)npc; + + if ((insn & 0xffffc000) == 0x37de0000 || + (insn & 0xffe00000) == 0x6fc00000) { + /* ldo X(sp), sp, or stwm X,D(sp) */ + frame_size += (insn & 0x1 ? -1 << 13 : 0) | + ((insn & 0x3fff) >> 1); + } else if ((insn & 0xffe00008) == 0x7ec00008) { + /* std,ma X,D(sp) */ + frame_size += (insn & 0x1 ? -1 << 13 : 0) | + (((insn >> 4) & 0x3ff) << 3); + } else if (insn == 0x6bc23fd9) { + /* stw rp,-20(sp) */ + rpoffset = 20; + looking_for_rp = 0; + } else if (insn == 0x0fc212c1) { + /* std rp,-16(sr0,sp) */ + rpoffset = 16; + looking_for_rp = 0; + } + } + + info->prev_sp = info->sp - frame_size; + if (rpoffset) + info->prev_ip = *(unsigned long *)(info->prev_sp - rpoffset); + } +} + +void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, + struct pt_regs *regs) +{ + memset(info, 0, sizeof(struct unwind_frame_info)); + info->t = t; + info->sp = regs->ksp; + info->ip = regs->kpc; + + dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", (int)t->pid, info->sp, info->ip); +} + +void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t) +{ + struct pt_regs *regs = &t->thread.regs; + unwind_frame_init(info, t, regs); +} + +int unwind_once(struct unwind_frame_info *next_frame) +{ + unwind_frame_regs(next_frame); + + if (next_frame->prev_sp == 0 || + next_frame->prev_ip == 0) + return -1; + + next_frame->sp = next_frame->prev_sp; + next_frame->ip = next_frame->prev_ip; + next_frame->prev_sp = 0; + next_frame->prev_ip = 0; + + dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", (int)next_frame->t->pid, next_frame->sp, next_frame->ip); + + return 0; +} + +int unwind_to_user(struct unwind_frame_info *info) +{ + int ret; + + do { + ret = unwind_once(info); + } while (!ret && !(info->ip & 3)); + + return ret; +} + +module_init(unwind_init); diff --git a/arch/parisc/lib/debuglocks.c b/arch/parisc/lib/debuglocks.c new file mode 100644 index 000000000..7e79f1bad --- /dev/null +++ b/arch/parisc/lib/debuglocks.c @@ -0,0 +1,227 @@ +/* + * Debugging versions of SMP locking primitives. + * + * Copyright (C) 2004 Thibaut VARENE + * + * Some code stollen from alpha & sparc64 ;) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#include +#include +#include +#include +#include +#include /* in_interrupt() */ + +#undef INIT_STUCK +#define INIT_STUCK 1L << 30 + +#ifdef CONFIG_DEBUG_SPINLOCK + +void _dbg_spin_lock(spinlock_t * lock, const char *base_file, int line_no) +{ + volatile unsigned int *a; + long stuck = INIT_STUCK; + void *inline_pc = __builtin_return_address(0); + unsigned long started = jiffies; + int printed = 0; + int cpu = smp_processor_id(); + +try_again: + + /* Do the actual locking */ + /* ggg: we can't get stuck on the outter loop? + * T-Bone: We can hit the outer loop + * alot if multiple CPUs are constantly racing for a lock + * and the backplane is NOT fair about which CPU sees + * the update first. But it won't hang since every failed + * attempt will drop us back into the inner loop and + * decrement `stuck'. + * K-class and some of the others are NOT fair in the HW + * implementation so we could see false positives. + * But fixing the lock contention is easier than + * fixing the HW to be fair. + * __ldcw() returns 1 if we get the lock; otherwise we + * spin until the value of the lock changes, or we time out. + */ + a = __ldcw_align(lock); + while (stuck && (__ldcw(a) == 0)) + while ((*a == 0) && --stuck); + + if (unlikely(stuck <= 0)) { + printk(KERN_WARNING + "%s:%d: spin_lock(%s/%p) stuck in %s at %p(%d)" + " owned by %s:%d in %s at %p(%d)\n", + base_file, line_no, lock->module, lock, + current->comm, inline_pc, cpu, + lock->bfile, lock->bline, lock->task->comm, + lock->previous, lock->oncpu); + stuck = INIT_STUCK; + printed = 1; + goto try_again; + } + + /* Exiting. Got the lock. */ + lock->oncpu = cpu; + lock->previous = inline_pc; + lock->task = current; + lock->bfile = (char *)base_file; + lock->bline = line_no; + + if (unlikely(printed)) { + printk(KERN_WARNING + "%s:%d: spin_lock grabbed in %s at %p(%d) %ld ticks\n", + base_file, line_no, current->comm, inline_pc, + cpu, jiffies - started); + } +} + +void _dbg_spin_unlock(spinlock_t * lock, const char *base_file, int line_no) +{ + CHECK_LOCK(lock); + volatile unsigned int *a = __ldcw_align(lock); + if (unlikely((*a != 0) && lock->babble)) { + lock->babble--; + printk(KERN_WARNING + "%s:%d: spin_unlock(%s:%p) not locked\n", + base_file, line_no, lock->module, lock); + } + *a = 1; +} + +int _dbg_spin_trylock(spinlock_t * lock, const char *base_file, int line_no) +{ + int ret; + volatile unsigned int *a = __ldcw_align(lock); + if ((ret = (__ldcw(a) != 0))) { + lock->oncpu = smp_processor_id(); + lock->previous = __builtin_return_address(0); + lock->task = current; + } else { + lock->bfile = (char *)base_file; + lock->bline = line_no; + } + return ret; +} + +#endif /* CONFIG_DEBUG_SPINLOCK */ + +#ifdef CONFIG_DEBUG_RWLOCK + +/* Interrupts trouble detailed explanation, thx Grant: + * + * o writer (wants to modify data) attempts to acquire the rwlock + * o He gets the write lock. + * o Interupts are still enabled, we take an interrupt with the + * write still holding the lock. + * o interrupt handler tries to acquire the rwlock for read. + * o deadlock since the writer can't release it at this point. + * + * In general, any use of spinlocks that competes between "base" + * level and interrupt level code will risk deadlock. Interrupts + * need to be disabled in the base level routines to avoid it. + * Or more precisely, only the IRQ the base level routine + * is competing with for the lock. But it's more efficient/faster + * to just disable all interrupts on that CPU to guarantee + * once it gets the lock it can release it quickly too. + */ + +void _dbg_write_lock(rwlock_t *rw, const char *bfile, int bline) +{ + void *inline_pc = __builtin_return_address(0); + unsigned long started = jiffies; + long stuck = INIT_STUCK; + int printed = 0; + int cpu = smp_processor_id(); + + if(unlikely(in_interrupt())) { /* acquiring write lock in interrupt context, bad idea */ + printk(KERN_WARNING "write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline); + BUG(); + } + + /* Note: if interrupts are disabled (which is most likely), the printk + will never show on the console. We might need a polling method to flush + the dmesg buffer anyhow. */ + +retry: + _raw_spin_lock(&rw->lock); + + if(rw->counter != 0) { + /* this basically never happens */ + _raw_spin_unlock(&rw->lock); + + stuck--; + if ((unlikely(stuck <= 0)) && (rw->counter < 0)) { + printk(KERN_WARNING + "%s:%d: write_lock stuck on writer" + " in %s at %p(%d) %ld ticks\n", + bfile, bline, current->comm, inline_pc, + cpu, jiffies - started); + stuck = INIT_STUCK; + printed = 1; + } + else if (unlikely(stuck <= 0)) { + printk(KERN_WARNING + "%s:%d: write_lock stuck on reader" + " in %s at %p(%d) %ld ticks\n", + bfile, bline, current->comm, inline_pc, + cpu, jiffies - started); + stuck = INIT_STUCK; + printed = 1; + } + + while(rw->counter != 0); + + goto retry; + } + + /* got it. now leave without unlocking */ + rw->counter = -1; /* remember we are locked */ + + if (unlikely(printed)) { + printk(KERN_WARNING + "%s:%d: write_lock grabbed in %s at %p(%d) %ld ticks\n", + bfile, bline, current->comm, inline_pc, + cpu, jiffies - started); + } +} + +void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline) +{ +#if 0 + void *inline_pc = __builtin_return_address(0); + unsigned long started = jiffies; + int cpu = smp_processor_id(); +#endif + unsigned long flags; + + local_irq_save(flags); + _raw_spin_lock(&rw->lock); + + rw->counter++; +#if 0 + printk(KERN_WARNING + "%s:%d: read_lock grabbed in %s at %p(%d) %ld ticks\n", + bfile, bline, current->comm, inline_pc, + cpu, jiffies - started); +#endif + _raw_spin_unlock(&rw->lock); + local_irq_restore(flags); +} + +#endif /* CONFIG_DEBUG_RWLOCK */ diff --git a/arch/ppc/boot/simple/mpc52xx_tty.c b/arch/ppc/boot/simple/mpc52xx_tty.c new file mode 100644 index 000000000..8a1c663e7 --- /dev/null +++ b/arch/ppc/boot/simple/mpc52xx_tty.c @@ -0,0 +1,138 @@ +/* + * arch/ppc/boot/simple/mpc52xx_tty.c + * + * Minimal serial functions needed to send messages out a MPC52xx + * Programmable Serial Controller (PSC). + * + * Author: Dale Farnsworth + * + * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is licensed + * "as is" without any warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +#if MPC52xx_PF_CONSOLE_PORT == 0 +#define MPC52xx_CONSOLE MPC52xx_PSC1 +#define MPC52xx_PSC_CONFIG_SHIFT 0 +#elif MPC52xx_PF_CONSOLE_PORT == 1 +#define MPC52xx_CONSOLE MPC52xx_PSC2 +#define MPC52xx_PSC_CONFIG_SHIFT 4 +#elif MPC52xx_PF_CONSOLE_PORT == 2 +#define MPC52xx_CONSOLE MPC52xx_PSC3 +#define MPC52xx_PSC_CONFIG_SHIFT 8 +#else +#error "MPC52xx_PF_CONSOLE_PORT not defined" +#endif + +static struct mpc52xx_psc *psc = (struct mpc52xx_psc *)MPC52xx_CONSOLE; + +/* The decrementer counts at the system bus clock frequency + * divided by four. The most accurate time base is connected to the + * rtc. We read the decrementer change during one rtc tick (one second) + * and multiply by 4 to get the system bus clock frequency. + */ +int +mpc52xx_ipbfreq(void) +{ + struct mpc52xx_rtc *rtc = (struct mpc52xx_rtc*)MPC52xx_RTC; + struct mpc52xx_cdm *cdm = (struct mpc52xx_cdm*)MPC52xx_CDM; + int current_time, previous_time; + int tbl_start, tbl_end; + int xlbfreq, ipbfreq; + + out_be32(&rtc->dividers, 0x8f1f0000); /* Set RTC 64x faster */ + previous_time = in_be32(&rtc->time); + while ((current_time = in_be32(&rtc->time)) == previous_time) ; + tbl_start = get_tbl(); + previous_time = current_time; + while ((current_time = in_be32(&rtc->time)) == previous_time) ; + tbl_end = get_tbl(); + out_be32(&rtc->dividers, 0xffff0000); /* Restore RTC */ + + xlbfreq = (tbl_end - tbl_start) << 8; + ipbfreq = (in_8(&cdm->ipb_clk_sel) & 1) ? xlbfreq / 2 : xlbfreq; + + return ipbfreq; +} + +unsigned long +serial_init(int ignored, void *ignored2) +{ + struct mpc52xx_gpio *gpio = (struct mpc52xx_gpio *)MPC52xx_GPIO; + int divisor; + int mode1; + int mode2; + u32 val32; + + static int been_here = 0; + + if (been_here) + return 0; + + been_here = 1; + + val32 = in_be32(&gpio->port_config); + val32 &= ~(0x7 << MPC52xx_PSC_CONFIG_SHIFT); + val32 |= MPC52xx_GPIO_PSC_CONFIG_UART_WITHOUT_CD + << MPC52xx_PSC_CONFIG_SHIFT; + out_be32(&gpio->port_config, val32); + + out_8(&psc->command, MPC52xx_PSC_RST_TX + | MPC52xx_PSC_RX_DISABLE | MPC52xx_PSC_TX_ENABLE); + out_8(&psc->command, MPC52xx_PSC_RST_RX); + + out_be32(&psc->sicr, 0x0); + out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); + out_be16(&psc->tfalarm, 0xf8); + + out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1 + | MPC52xx_PSC_RX_ENABLE + | MPC52xx_PSC_TX_ENABLE); + + divisor = ((mpc52xx_ipbfreq() + / (CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD * 16)) + 1) >> 1; + + mode1 = MPC52xx_PSC_MODE_8_BITS | MPC52xx_PSC_MODE_PARNONE + | MPC52xx_PSC_MODE_ERR; + mode2 = MPC52xx_PSC_MODE_ONE_STOP; + + out_8(&psc->ctur, divisor>>8); + out_8(&psc->ctlr, divisor); + out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); + out_8(&psc->mode, mode1); + out_8(&psc->mode, mode2); + + return 0; /* ignored */ +} + +void +serial_putc(void *ignored, const char c) +{ + serial_init(0, 0); + + while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP)) ; + out_8(&psc->mpc52xx_psc_buffer_8, c); + while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP)) ; +} + +char +serial_getc(void *ignored) +{ + while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY)) ; + + return in_8(&psc->mpc52xx_psc_buffer_8); +} + +int +serial_tstc(void *ignored) +{ + return (in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY) != 0; +} diff --git a/arch/ppc/configs/ads8272_defconfig b/arch/ppc/configs/ads8272_defconfig new file mode 100644 index 000000000..709a18066 --- /dev/null +++ b/arch/ppc/configs/ads8272_defconfig @@ -0,0 +1,583 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_MMU=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_HAVE_DEC_LOCK=y +CONFIG_PPC=y +CONFIG_PPC32=y +CONFIG_GENERIC_NVRAM=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +# CONFIG_KALLSYMS is not set +CONFIG_FUTEX=y +# CONFIG_EPOLL is not set +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Processor +# +CONFIG_6xx=y +# CONFIG_40x is not set +# CONFIG_44x is not set +# CONFIG_POWER3 is not set +# CONFIG_POWER4 is not set +# CONFIG_8xx is not set +# CONFIG_CPU_FREQ is not set +CONFIG_EMBEDDEDBOOT=y +CONFIG_PPC_STD_MMU=y + +# +# Platform options +# +# CONFIG_PPC_MULTIPLATFORM is not set +# CONFIG_APUS is not set +# CONFIG_WILLOW is not set +# CONFIG_PCORE is not set +# CONFIG_POWERPMC250 is not set +# CONFIG_EV64260 is not set +# CONFIG_SPRUCE is not set +# CONFIG_LOPEC is not set +# CONFIG_MCPN765 is not set +# CONFIG_MVME5100 is not set +# CONFIG_PPLUS is not set +# CONFIG_PRPMC750 is not set +# CONFIG_PRPMC800 is not set +# CONFIG_SANDPOINT is not set +# CONFIG_ADIR is not set +# CONFIG_K2 is not set +# CONFIG_PAL4 is not set +# CONFIG_GEMINI is not set +# CONFIG_EST8260 is not set +# CONFIG_SBC82xx is not set +# CONFIG_SBS8260 is not set +# CONFIG_RPX6 is not set +# CONFIG_TQM8260 is not set +CONFIG_ADS8272=y +CONFIG_PQ2ADS=y +CONFIG_8260=y +CONFIG_8272=y +CONFIG_CPM2=y +# CONFIG_PC_KEYBOARD is not set +CONFIG_SERIAL_CONSOLE=y +# CONFIG_SMP is not set +# CONFIG_PREEMPT is not set +# CONFIG_HIGHMEM is not set +CONFIG_KERNEL_ELF=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +# CONFIG_CMDLINE_BOOL is not set + +# +# Bus options +# +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +# CONFIG_PCI_LEGACY_PROC is not set +# CONFIG_PCI_NAMES is not set + +# +# Advanced setup +# +# CONFIG_ADVANCED_OPTIONS is not set + +# +# Default settings for advanced configuration options are used +# +CONFIG_HIGHMEM_START=0xfe000000 +CONFIG_LOWMEM_SIZE=0x30000000 +CONFIG_KERNEL_START=0xc0000000 +CONFIG_TASK_SIZE=0x80000000 +CONFIG_BOOT_LOAD=0x00400000 + +# +# Device Drivers +# + +# +# Generic Driver Options +# + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_CARMEL is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=32768 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# Macintosh device drivers +# + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +# CONFIG_OAKNET is not set +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set + +# +# Tulip family network device support +# +# CONFIG_NET_TULIP is not set +# CONFIG_HP100 is not set +# CONFIG_NET_PCI is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +# CONFIG_SERIO is not set +# CONFIG_SERIO_I8042 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +CONFIG_GEN_RTC=y +# CONFIG_GEN_RTC_X is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set +# CONFIG_SCC_ENET is not set +CONFIG_FEC_ENET=y +# CONFIG_USE_MDIO is not set + +# +# CPM2 Options +# +CONFIG_SCC_CONSOLE=y +CONFIG_FCC1_ENET=y +# CONFIG_FCC2_ENET is not set +# CONFIG_FCC3_ENET is not set + +# +# Library routines +# +# CONFIG_CRC32 is not set +# CONFIG_LIBCRC32C is not set + +# +# Kernel hacking +# +# CONFIG_DEBUG_KERNEL is not set +# CONFIG_KGDB_CONSOLE is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set diff --git a/arch/ppc/configs/lite5200_defconfig b/arch/ppc/configs/lite5200_defconfig new file mode 100644 index 000000000..7e7a943d8 --- /dev/null +++ b/arch/ppc/configs/lite5200_defconfig @@ -0,0 +1,436 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_MMU=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_HAVE_DEC_LOCK=y +CONFIG_PPC=y +CONFIG_PPC32=y +CONFIG_GENERIC_NVRAM=y +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_OBSOLETE_MODPARM=y +CONFIG_MODVERSIONS=y +CONFIG_KMOD=y +# +# Processor +# +CONFIG_6xx=y +# CONFIG_40x is not set +# CONFIG_44x is not set +# CONFIG_POWER3 is not set +# CONFIG_POWER4 is not set +# CONFIG_8xx is not set +# CONFIG_E500 is not set +# CONFIG_ALTIVEC is not set +# CONFIG_TAU is not set +# CONFIG_CPU_FREQ is not set +CONFIG_FSL_OCP=y +CONFIG_PPC_STD_MMU=y +# +# Platform options +# +# CONFIG_PPC_MULTIPLATFORM is not set +# CONFIG_APUS is not set +# CONFIG_WILLOW is not set +# CONFIG_PCORE is not set +# CONFIG_POWERPMC250 is not set +# CONFIG_EV64260 is not set +# CONFIG_SPRUCE is not set +# CONFIG_LOPEC is not set +# CONFIG_MCPN765 is not set +# CONFIG_MVME5100 is not set +# CONFIG_PPLUS is not set +# CONFIG_PRPMC750 is not set +# CONFIG_PRPMC800 is not set +# CONFIG_SANDPOINT is not set +# CONFIG_ADIR is not set +# CONFIG_K2 is not set +# CONFIG_PAL4 is not set +# CONFIG_GEMINI is not set +# CONFIG_EST8260 is not set +# CONFIG_SBC82xx is not set +# CONFIG_SBS8260 is not set +# CONFIG_RPX6 is not set +# CONFIG_TQM8260 is not set +# CONFIG_ADS8272 is not set +CONFIG_LITE5200=y +CONFIG_PPC_MPC52xx=y +# CONFIG_SMP is not set +# CONFIG_PREEMPT is not set +# CONFIG_HIGHMEM is not set +CONFIG_KERNEL_ELF=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttyS0 root=/dev/ram0 rw" +# +# Bus options +# +CONFIG_GENERIC_ISA_DMA=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +# CONFIG_PCI_LEGACY_PROC is not set +# CONFIG_PCI_NAMES is not set +# +# Advanced setup +# +CONFIG_ADVANCED_OPTIONS=y +CONFIG_HIGHMEM_START=0xfe000000 +# CONFIG_LOWMEM_SIZE_BOOL is not set +CONFIG_LOWMEM_SIZE=0x30000000 +# CONFIG_KERNEL_START_BOOL is not set +CONFIG_KERNEL_START=0xc0000000 +# CONFIG_TASK_SIZE_BOOL is not set +CONFIG_TASK_SIZE=0x80000000 +# CONFIG_BOOT_LOAD_BOOL is not set +CONFIG_BOOT_LOAD=0x00800000 +# +# Device Drivers +# +# +# Generic Driver Options +# +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_DEBUG_DRIVER is not set +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set +# +# Parallel port support +# +# CONFIG_PARPORT is not set +# +# Plug and Play support +# +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_LBD is not set +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set +# +# SCSI device support +# +# CONFIG_SCSI is not set +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set +# +# Fusion MPT device support +# +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set +# +# I2O device support +# +# CONFIG_I2O is not set +# +# Macintosh device drivers +# +# +# Networking support +# +# CONFIG_NET is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# +# ISDN subsystem +# +# +# Telephony Support +# +# CONFIG_PHONE is not set +# +# Input device support +# +CONFIG_INPUT=y +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_EVBUG=y +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MPC52xx=y +CONFIG_SERIAL_MPC52xx_CONSOLE=y +CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600 +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set +# +# I2C support +# +# CONFIG_I2C is not set +# +# Misc devices +# +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set +# +# Digital Video Broadcasting Devices +# +# +# Graphics support +# +# CONFIG_FB is not set +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +# +# Sound +# +# CONFIG_SOUND is not set +# +# USB support +# +# CONFIG_USB is not set +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set +# +# Library routines +# +# CONFIG_CRC16 is not set +# CONFIG_CRC32 is not set +# CONFIG_LIBCRC32C is not set +# +# Profiling support +# +# CONFIG_PROFILING is not set +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SLAB is not set +CONFIG_MAGIC_SYSRQ=y +# CONFIG_DEBUG_SPINLOCK is not set +CONFIG_DEBUG_SPINLOCK_SLEEP=y +# CONFIG_KGDB is not set +# CONFIG_XMON is not set +# CONFIG_BDI_SWITCH is not set +CONFIG_DEBUG_INFO=y +CONFIG_SERIAL_TEXT_DEBUG=y +CONFIG_PPC_OCP=y +# +# Security options +# +# CONFIG_SECURITY is not set +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set diff --git a/arch/ppc/configs/rpx8260_defconfig b/arch/ppc/configs/rpx8260_defconfig new file mode 100644 index 000000000..a69e0b485 --- /dev/null +++ b/arch/ppc/configs/rpx8260_defconfig @@ -0,0 +1,556 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_MMU=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_HAVE_DEC_LOCK=y +CONFIG_PPC=y +CONFIG_PPC32=y +CONFIG_GENERIC_NVRAM=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +# CONFIG_SWAP is not set +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +# CONFIG_KALLSYMS is not set +CONFIG_FUTEX=y +# CONFIG_EPOLL is not set +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Processor +# +CONFIG_6xx=y +# CONFIG_40x is not set +# CONFIG_44x is not set +# CONFIG_POWER3 is not set +# CONFIG_POWER4 is not set +# CONFIG_8xx is not set +# CONFIG_E500 is not set +# CONFIG_CPU_FREQ is not set +CONFIG_EMBEDDEDBOOT=y +CONFIG_PPC_STD_MMU=y + +# +# Platform options +# +# CONFIG_PPC_MULTIPLATFORM is not set +# CONFIG_APUS is not set +# CONFIG_WILLOW is not set +# CONFIG_PCORE is not set +# CONFIG_POWERPMC250 is not set +# CONFIG_EV64260 is not set +# CONFIG_SPRUCE is not set +# CONFIG_LOPEC is not set +# CONFIG_MCPN765 is not set +# CONFIG_MVME5100 is not set +# CONFIG_PPLUS is not set +# CONFIG_PRPMC750 is not set +# CONFIG_PRPMC800 is not set +# CONFIG_SANDPOINT is not set +# CONFIG_ADIR is not set +# CONFIG_K2 is not set +# CONFIG_PAL4 is not set +# CONFIG_GEMINI is not set +# CONFIG_EST8260 is not set +# CONFIG_SBC82xx is not set +# CONFIG_SBS8260 is not set +CONFIG_RPX8260=y +# CONFIG_TQM8260 is not set +# CONFIG_ADS8272 is not set +CONFIG_8260=y +CONFIG_CPM2=y +# CONFIG_PC_KEYBOARD is not set +# CONFIG_SMP is not set +# CONFIG_PREEMPT is not set +# CONFIG_HIGHMEM is not set +CONFIG_KERNEL_ELF=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +# CONFIG_CMDLINE_BOOL is not set + +# +# Bus options +# +# CONFIG_PCI is not set +# CONFIG_PCI_DOMAINS is not set + +# +# Advanced setup +# +# CONFIG_ADVANCED_OPTIONS is not set + +# +# Default settings for advanced configuration options are used +# +CONFIG_HIGHMEM_START=0xfe000000 +CONFIG_LOWMEM_SIZE=0x30000000 +CONFIG_KERNEL_START=0xc0000000 +CONFIG_TASK_SIZE=0x80000000 +CONFIG_BOOT_LOAD=0x00400000 + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# + +# +# Macintosh device drivers +# + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +# CONFIG_OAKNET is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +# CONFIG_INPUT is not set + +# +# Userland interfaces +# + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +# CONFIG_SERIO is not set +# CONFIG_SERIO_I8042 is not set + +# +# Input Device Drivers +# + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_CPM=y +CONFIG_SERIAL_CPM_CONSOLE=y +# CONFIG_SERIAL_CPM_SCC1 is not set +# CONFIG_SERIAL_CPM_SCC2 is not set +# CONFIG_SERIAL_CPM_SCC3 is not set +# CONFIG_SERIAL_CPM_SCC4 is not set +CONFIG_SERIAL_CPM_SMC1=y +# CONFIG_SERIAL_CPM_SMC2 is not set +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Dallas's 1-wire bus +# +# CONFIG_W1 is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set +# CONFIG_SCC_ENET is not set +CONFIG_FEC_ENET=y +# CONFIG_USE_MDIO is not set + +# +# CPM2 Options +# +# CONFIG_FCC1_ENET is not set +# CONFIG_FCC2_ENET is not set +CONFIG_FCC3_ENET=y + +# +# Library routines +# +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC32 is not set +# CONFIG_LIBCRC32C is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_DEBUG_KERNEL is not set +# CONFIG_KGDB_CONSOLE is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set diff --git a/arch/ppc/kernel/dma-mapping.c b/arch/ppc/kernel/dma-mapping.c new file mode 100644 index 000000000..c859f1127 --- /dev/null +++ b/arch/ppc/kernel/dma-mapping.c @@ -0,0 +1,439 @@ +/* + * PowerPC version derived from arch/arm/mm/consistent.c + * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) + * + * Copyright (C) 2000 Russell King + * + * Consistent memory allocators. Used for DMA devices that want to + * share uncached memory with the processor core. The function return + * is the virtual address and 'dma_handle' is the physical address. + * Mostly stolen from the ARM port, with some changes for PowerPC. + * -- Dan + * + * Reorganized to get rid of the arch-specific consistent_* functions + * and provide non-coherent implementations for the DMA API. -Matt + * + * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent() + * implementation. This is pulled straight from ARM and barely + * modified. -Matt + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int map_page(unsigned long va, phys_addr_t pa, int flags); + +#include + +/* + * This address range defaults to a value that is safe for all + * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It + * can be further configured for specific applications under + * the "Advanced Setup" menu. -Matt + */ +#define CONSISTENT_BASE (CONFIG_CONSISTENT_START) +#define CONSISTENT_END (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE) +#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) + +/* + * This is the page table (2MB) covering uncached, DMA consistent allocations + */ +static pte_t *consistent_pte; +static spinlock_t consistent_lock = SPIN_LOCK_UNLOCKED; + +/* + * VM region handling support. + * + * This should become something generic, handling VM region allocations for + * vmalloc and similar (ioremap, module space, etc). + * + * I envisage vmalloc()'s supporting vm_struct becoming: + * + * struct vm_struct { + * struct vm_region region; + * unsigned long flags; + * struct page **pages; + * unsigned int nr_pages; + * unsigned long phys_addr; + * }; + * + * get_vm_area() would then call vm_region_alloc with an appropriate + * struct vm_region head (eg): + * + * struct vm_region vmalloc_head = { + * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), + * .vm_start = VMALLOC_START, + * .vm_end = VMALLOC_END, + * }; + * + * However, vmalloc_head.vm_start is variable (typically, it is dependent on + * the amount of RAM found at boot time.) I would imagine that get_vm_area() + * would have to initialise this each time prior to calling vm_region_alloc(). + */ +struct vm_region { + struct list_head vm_list; + unsigned long vm_start; + unsigned long vm_end; +}; + +static struct vm_region consistent_head = { + .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), + .vm_start = CONSISTENT_BASE, + .vm_end = CONSISTENT_END, +}; + +static struct vm_region * +vm_region_alloc(struct vm_region *head, size_t size, int gfp) +{ + unsigned long addr = head->vm_start, end = head->vm_end - size; + unsigned long flags; + struct vm_region *c, *new; + + new = kmalloc(sizeof(struct vm_region), gfp); + if (!new) + goto out; + + spin_lock_irqsave(&consistent_lock, flags); + + list_for_each_entry(c, &head->vm_list, vm_list) { + if ((addr + size) < addr) + goto nospc; + if ((addr + size) <= c->vm_start) + goto found; + addr = c->vm_end; + if (addr > end) + goto nospc; + } + + found: + /* + * Insert this entry _before_ the one we found. + */ + list_add_tail(&new->vm_list, &c->vm_list); + new->vm_start = addr; + new->vm_end = addr + size; + + spin_unlock_irqrestore(&consistent_lock, flags); + return new; + + nospc: + spin_unlock_irqrestore(&consistent_lock, flags); + kfree(new); + out: + return NULL; +} + +static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) +{ + struct vm_region *c; + + list_for_each_entry(c, &head->vm_list, vm_list) { + if (c->vm_start == addr) + goto out; + } + c = NULL; + out: + return c; +} + +/* + * Allocate DMA-coherent memory space and return both the kernel remapped + * virtual and bus address for that space. + */ +void * +__dma_alloc_coherent(size_t size, dma_addr_t *handle, int gfp) +{ + struct page *page; + struct vm_region *c; + unsigned long order; + u64 mask = 0x00ffffff, limit; /* ISA default */ + + if (!consistent_pte) { + printk(KERN_ERR "%s: not initialised\n", __func__); + dump_stack(); + return NULL; + } + + size = PAGE_ALIGN(size); + limit = (mask + 1) & ~mask; + if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) { + printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n", + size, mask); + return NULL; + } + + order = get_order(size); + + if (mask != 0xffffffff) + gfp |= GFP_DMA; + + page = alloc_pages(gfp, order); + if (!page) + goto no_page; + + /* + * Invalidate any data that might be lurking in the + * kernel direct-mapped region for device DMA. + */ + { + unsigned long kaddr = (unsigned long)page_address(page); + memset(page_address(page), 0, size); + flush_dcache_range(kaddr, kaddr + size); + } + + /* + * Allocate a virtual address in the consistent mapping region. + */ + c = vm_region_alloc(&consistent_head, size, + gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); + if (c) { + pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start); + struct page *end = page + (1 << order); + + /* + * Set the "dma handle" + */ + *handle = page_to_bus(page); + + do { + BUG_ON(!pte_none(*pte)); + + set_page_count(page, 1); + SetPageReserved(page); + set_pte(pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL))); + page++; + pte++; + } while (size -= PAGE_SIZE); + + /* + * Free the otherwise unused pages. + */ + while (page < end) { + set_page_count(page, 1); + __free_page(page); + page++; + } + + return (void *)c->vm_start; + } + + if (page) + __free_pages(page, order); + no_page: + return NULL; +} + +/* + * free a page as defined by the above mapping. + */ +void __dma_free_coherent(size_t size, void *vaddr) +{ + struct vm_region *c; + unsigned long flags; + pte_t *ptep; + + size = PAGE_ALIGN(size); + + spin_lock_irqsave(&consistent_lock, flags); + + c = vm_region_find(&consistent_head, (unsigned long)vaddr); + if (!c) + goto no_area; + + if ((c->vm_end - c->vm_start) != size) { + printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", + __func__, c->vm_end - c->vm_start, size); + dump_stack(); + size = c->vm_end - c->vm_start; + } + + ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); + do { + pte_t pte = ptep_get_and_clear(ptep); + unsigned long pfn; + + ptep++; + + if (!pte_none(pte) && pte_present(pte)) { + pfn = pte_pfn(pte); + + if (pfn_valid(pfn)) { + struct page *page = pfn_to_page(pfn); + ClearPageReserved(page); + + __free_page(page); + continue; + } + } + + printk(KERN_CRIT "%s: bad page in kernel page table\n", + __func__); + } while (size -= PAGE_SIZE); + + flush_tlb_kernel_range(c->vm_start, c->vm_end); + + list_del(&c->vm_list); + + spin_unlock_irqrestore(&consistent_lock, flags); + + kfree(c); + return; + + no_area: + spin_unlock_irqrestore(&consistent_lock, flags); + printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", + __func__, vaddr); + dump_stack(); +} +EXPORT_SYMBOL(dma_free_coherent); + +/* + * Initialise the consistent memory allocation. + */ +static int __init dma_alloc_init(void) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int ret = 0; + + spin_lock(&init_mm.page_table_lock); + + do { + pgd = pgd_offset(&init_mm, CONSISTENT_BASE); + pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); + if (!pmd) { + printk(KERN_ERR "%s: no pmd tables\n", __func__); + ret = -ENOMEM; + break; + } + WARN_ON(!pmd_none(*pmd)); + + pte = pte_alloc_kernel(&init_mm, pmd, CONSISTENT_BASE); + if (!pte) { + printk(KERN_ERR "%s: no pte tables\n", __func__); + ret = -ENOMEM; + break; + } + + consistent_pte = pte; + } while (0); + + spin_unlock(&init_mm.page_table_lock); + + return ret; +} + +core_initcall(dma_alloc_init); + +/* + * make an area consistent. + */ +void __dma_sync(void *vaddr, size_t size, int direction) +{ + unsigned long start = (unsigned long)vaddr; + unsigned long end = start + size; + + switch (direction) { + case DMA_NONE: + BUG(); + case DMA_FROM_DEVICE: /* invalidate only */ + invalidate_dcache_range(start, end); + break; + case DMA_TO_DEVICE: /* writeback only */ + clean_dcache_range(start, end); + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ + flush_dcache_range(start, end); + break; + } +} + +#ifdef CONFIG_HIGHMEM +/* + * __dma_sync_page() implementation for systems using highmem. + * In this case, each page of a buffer must be kmapped/kunmapped + * in order to have a virtual address for __dma_sync(). This must + * not sleep so kmap_atmomic()/kunmap_atomic() are used. + * + * Note: yes, it is possible and correct to have a buffer extend + * beyond the first page. + */ +static inline void __dma_sync_page_highmem(struct page *page, + unsigned long offset, size_t size, int direction) +{ + size_t seg_size = min((size_t)PAGE_SIZE, size) - offset; + size_t cur_size = seg_size; + unsigned long flags, start, seg_offset = offset; + int nr_segs = PAGE_ALIGN(size + (PAGE_SIZE - offset))/PAGE_SIZE; + int seg_nr = 0; + + local_irq_save(flags); + + do { + start = (unsigned long)kmap_atomic(page + seg_nr, + KM_PPC_SYNC_PAGE) + seg_offset; + + /* Sync this buffer segment */ + __dma_sync((void *)start, seg_size, direction); + kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); + seg_nr++; + + /* Calculate next buffer segment size */ + seg_size = min((size_t)PAGE_SIZE, size - cur_size); + + /* Add the segment size to our running total */ + cur_size += seg_size; + seg_offset = 0; + } while (seg_nr < nr_segs); + + local_irq_restore(flags); +} +#endif /* CONFIG_HIGHMEM */ + +/* + * __dma_sync_page makes memory consistent. identical to __dma_sync, but + * takes a struct page instead of a virtual address + */ +void __dma_sync_page(struct page *page, unsigned long offset, + size_t size, int direction) +{ +#ifdef CONFIG_HIGHMEM + __dma_sync_page_highmem(page, offset, size, direction); +#else + unsigned long start = (unsigned long)page_address(page) + offset; + __dma_sync((void *)start, size, direction); +#endif +} diff --git a/arch/ppc/kernel/head_e500.S b/arch/ppc/kernel/head_e500.S new file mode 100644 index 000000000..8f0c590f1 --- /dev/null +++ b/arch/ppc/kernel/head_e500.S @@ -0,0 +1,1329 @@ +/* + * arch/ppc/kernel/head_e500.S + * + * Kernel execution entry point code. + * + * Copyright (c) 1995-1996 Gary Thomas + * Initial PowerPC version. + * Copyright (c) 1996 Cort Dougan + * Rewritten for PReP + * Copyright (c) 1996 Paul Mackerras + * Low-level exception handers, MMU support, and rewrite. + * Copyright (c) 1997 Dan Malek + * PowerPC 8xx modifications. + * Copyright (c) 1998-1999 TiVo, Inc. + * PowerPC 403GCX modifications. + * Copyright (c) 1999 Grant Erickson + * PowerPC 403GCX/405GP modifications. + * Copyright 2000 MontaVista Software Inc. + * PPC405 modifications + * PowerPC 403GCX/405GP modifications. + * Author: MontaVista Software, Inc. + * frank_rowand@mvista.com or source@mvista.com + * debbie_chu@mvista.com + * Copyright 2002-2004 MontaVista Software, Inc. + * PowerPC 44x support, Matt Porter + * Copyright 2004 Freescale Semiconductor, Inc + * PowerPC e500 modifications, Kumar Gala + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Macros + */ + +#define SET_IVOR(vector_number, vector_label) \ + li r26,vector_label@l; \ + mtspr SPRN_IVOR##vector_number,r26; \ + sync + +/* As with the other PowerPC ports, it is expected that when code + * execution begins here, the following registers contain valid, yet + * optional, information: + * + * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) + * r4 - Starting address of the init RAM disk + * r5 - Ending address of the init RAM disk + * r6 - Start of kernel command line string (e.g. "mem=128") + * r7 - End of kernel command line string + * + */ + .text +_GLOBAL(_stext) +_GLOBAL(_start) + /* + * Reserve a word at a fixed location to store the address + * of abatron_pteptrs + */ + nop +/* + * Save parameters we are passed + */ + mr r31,r3 + mr r30,r4 + mr r29,r5 + mr r28,r6 + mr r27,r7 + li r24,0 /* CPU number */ + +/* We try to not make any assumptions about how the boot loader + * setup or used the TLBs. We invalidate all mappings from the + * boot loader and load a single entry in TLB1[0] to map the + * first 16M of kernel memory. Any boot info passed from the + * bootloader needs to live in this first 16M. + * + * Requirement on bootloader: + * - The page we're executing in needs to reside in TLB1 and + * have IPROT=1. If not an invalidate broadcast could + * evict the entry we're currently executing in. + * + * r3 = Index of TLB1 were executing in + * r4 = Current MSR[IS] + * r5 = Index of TLB1 temp mapping + * + * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] + * if needed + */ + +/* 1. Find the index of the entry we're executing in */ + bl invstr /* Find our address */ +invstr: mflr r6 /* Make it accessible */ + mfmsr r7 + rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ + mfspr r7, SPRN_PID0 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ + mfspr r7,SPRN_MAS1 + andis. r7,r7,MAS1_VALID@h + bne match_TLB + mfspr r7,SPRN_PID1 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ + mfspr r7,SPRN_MAS1 + andis. r7,r7,MAS1_VALID@h + bne match_TLB + mfspr r7, SPRN_PID2 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* Fall through, we had to match */ +match_TLB: + mfspr r7,SPRN_MAS0 + rlwinm r3,r7,16,28,31 /* Extract MAS0(Entry) */ + + mfspr r7,SPRN_MAS1 /* Insure IPROT set */ + oris r7,r7,MAS1_IPROT@h + mtspr SPRN_MAS1,r7 + tlbwe + +/* 2. Invalidate all entries except the entry we're executing in */ + mfspr r9,SPRN_TLB1CFG + andi. r9,r9,0xfff + li r6,0 /* Set Entry counter to 0 */ +1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r6,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ + mtspr SPRN_MAS0,r7 + tlbre + mfspr r7,SPRN_MAS1 + rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ + cmpw r3,r6 + beq skpinv /* Dont update the current execution TLB */ + mtspr SPRN_MAS1,r7 + tlbwe + isync +skpinv: addi r6,r6,1 /* Increment */ + cmpw r6,r9 /* Are we done? */ + bne 1b /* If not, repeat */ + + /* Invalidate TLB0 */ + li r6,0x04 + tlbivax 0,r6 +#ifdef CONFIG_SMP + tlbsync +#endif + /* Invalidate TLB1 */ + li r6,0x0c + tlbivax 0,r6 +#ifdef CONFIG_SMP + tlbsync +#endif + msync + +/* 3. Setup a temp mapping and jump to it */ + andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ + addi r5, r5, 0x1 + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r3,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r7 + tlbre + + /* Just modify the entry ID and EPN for the temp mapping */ + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r5,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ + mtspr SPRN_MAS0,r7 + xori r6,r4,1 /* Setup TMP mapping in the other Address space */ + slwi r6,r6,12 + oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h + ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l + mtspr SPRN_MAS1,r6 + mfspr r6,SPRN_MAS2 + li r7,0 /* temp EPN = 0 */ + rlwimi r7,r6,0,20,31 + mtspr SPRN_MAS2,r7 + tlbwe + + xori r6,r4,1 + slwi r6,r6,5 /* setup new context with other address space */ + bl 1f /* Find our address */ +1: mflr r9 + rlwimi r7,r9,0,20,31 + addi r7,r7,24 + mtspr SRR0,r7 + mtspr SRR1,r6 + rfi + +/* 4. Clear out PIDs & Search info */ + li r6,0 + mtspr SPRN_PID0,r6 + mtspr SPRN_PID1,r6 + mtspr SPRN_PID2,r6 + mtspr SPRN_MAS6,r6 + +/* 5. Invalidate mapping we started in */ + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r3,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r7 + tlbre + li r6,0 + mtspr SPRN_MAS1,r6 + tlbwe + /* Invalidate TLB1 */ + li r9,0x0c + tlbivax 0,r9 +#ifdef CONFIG_SMP + tlbsync +#endif + msync + +/* 6. Setup KERNELBASE mapping in TLB1[0] */ + lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ + mtspr SPRN_MAS0,r6 + lis r6,(MAS1_VALID|MAS1_IPROT)@h + ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l + mtspr SPRN_MAS1,r6 + li r7,0 + lis r6,KERNELBASE@h + ori r6,r6,KERNELBASE@l + rlwimi r6,r7,0,20,31 + mtspr SPRN_MAS2,r6 + li r7,(MAS3_SX|MAS3_SW|MAS3_SR) + mtspr SPRN_MAS3,r7 + tlbwe + +/* 7. Jump to KERNELBASE mapping */ + li r7,0 + bl 1f /* Find our address */ +1: mflr r9 + rlwimi r6,r9,0,20,31 + addi r6,r6,24 + mtspr SRR0,r6 + mtspr SRR1,r7 + rfi /* start execution out of TLB1[0] entry */ + +/* 8. Clear out the temp mapping */ + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r5,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ + mtspr SPRN_MAS0,r7 + tlbre + mtspr SPRN_MAS1,r8 + tlbwe + /* Invalidate TLB1 */ + li r9,0x0c + tlbivax 0,r9 +#ifdef CONFIG_SMP + tlbsync +#endif + msync + + /* Establish the interrupt vector offsets */ + SET_IVOR(0, CriticalInput); + SET_IVOR(1, MachineCheck); + SET_IVOR(2, DataStorage); + SET_IVOR(3, InstructionStorage); + SET_IVOR(4, ExternalInput); + SET_IVOR(5, Alignment); + SET_IVOR(6, Program); + SET_IVOR(7, FloatingPointUnavailable); + SET_IVOR(8, SystemCall); + SET_IVOR(9, AuxillaryProcessorUnavailable); + SET_IVOR(10, Decrementer); + SET_IVOR(11, FixedIntervalTimer); + SET_IVOR(12, WatchdogTimer); + SET_IVOR(13, DataTLBError); + SET_IVOR(14, InstructionTLBError); + SET_IVOR(15, Debug); + SET_IVOR(32, SPEUnavailable); + SET_IVOR(33, SPEFloatingPointData); + SET_IVOR(34, SPEFloatingPointRound); + SET_IVOR(35, PerformanceMonitor); + + /* Establish the interrupt vector base */ + lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ + mtspr SPRN_IVPR,r4 + + /* Setup the defaults for TLB entries */ + li r2,MAS4_TSIZED(BOOKE_PAGESZ_4K) + mtspr SPRN_MAS4, r2 + +#if 0 + /* Enable DOZE */ + mfspr r2,SPRN_HID0 + oris r2,r2,HID0_DOZE@h + mtspr SPRN_HID0, r2 +#endif + + /* + * This is where the main kernel code starts. + */ + + /* ptr to current */ + lis r2,init_task@h + ori r2,r2,init_task@l + + /* ptr to current thread */ + addi r4,r2,THREAD /* init task's THREAD */ + mtspr SPRG3,r4 + + /* stack */ + lis r1,init_thread_union@h + ori r1,r1,init_thread_union@l + li r0,0 + stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + + bl early_init + + mfspr r3,SPRN_TLB1CFG + andi. r3,r3,0xfff + lis r4,num_tlbcam_entries@ha + stw r3,num_tlbcam_entries@l(r4) +/* + * Decide what sort of machine this is and initialize the MMU. + */ + mr r3,r31 + mr r4,r30 + mr r5,r29 + mr r6,r28 + mr r7,r27 + bl machine_init + bl MMU_init + + /* Setup PTE pointers for the Abatron bdiGDB */ + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + lis r4, KERNELBASE@h + ori r4, r4, KERNELBASE@l + stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ + stw r6, 0(r5) + + /* Let's move on */ + lis r4,start_kernel@h + ori r4,r4,start_kernel@l + lis r3,MSR_KERNEL@h + ori r3,r3,MSR_KERNEL@l + mtspr SRR0,r4 + mtspr SRR1,r3 + rfi /* change context and jump to start_kernel */ + +/* + * Interrupt vector entry code + * + * The Book E MMUs are always on so we don't need to handle + * interrupts in real mode as with previous PPC processors. In + * this case we handle interrupts in the kernel virtual address + * space. + * + * Interrupt vectors are dynamically placed relative to the + * interrupt prefix as determined by the address of interrupt_base. + * The interrupt vectors offsets are programmed using the labels + * for each interrupt vector entry. + * + * Interrupt vectors must be aligned on a 16 byte boundary. + * We align on a 32 byte cache line boundary for good measure. + */ + +#define NORMAL_EXCEPTION_PROLOG \ + mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ + mtspr SPRN_SPRG1,r11; \ + mtspr SPRN_SPRG4W,r1; \ + mfcr r10; /* save CR in r10 for now */\ + mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ + andi. r11,r11,MSR_PR; \ + beq 1f; \ + mfspr r1,SPRG3; /* if from user, start at top of */\ + lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ + addi r1,r1,THREAD_SIZE; \ +1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ + tophys(r11,r1); \ + stw r10,_CCR(r11); /* save various registers */\ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mfspr r10,SPRG0; \ + stw r10,GPR10(r11); \ + mfspr r12,SPRG1; \ + stw r12,GPR11(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r10,SPRG4R; \ + mfspr r12,SRR0; \ + stw r10,GPR1(r11); \ + mfspr r9,SRR1; \ + stw r10,0(r11); \ + rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Exception prolog for critical exceptions. This is a little different + * from the normal exception prolog above since a critical exception + * can potentially occur at any point during normal exception processing. + * Thus we cannot use the same SPRG registers as the normal prolog above. + * Instead we use a couple of words of memory at low physical addresses. + * This is OK since we don't support SMP on these processors. For Book E + * processors, we also have a reserved register (SPRG2) that is only used + * in critical exceptions so we can free up a GPR to use as the base for + * indirect access to the critical exception save area. This is necessary + * since the MMU is always on and the save area is offset from KERNELBASE. + */ +#define CRITICAL_EXCEPTION_PROLOG \ + mtspr SPRG2,r8; /* SPRG2 only used in criticals */ \ + lis r8,crit_save@ha; \ + stw r10,crit_r10@l(r8); \ + stw r11,crit_r11@l(r8); \ + mfspr r10,SPRG0; \ + stw r10,crit_sprg0@l(r8); \ + mfspr r10,SPRG1; \ + stw r10,crit_sprg1@l(r8); \ + mfspr r10,SPRG4R; \ + stw r10,crit_sprg4@l(r8); \ + mfspr r10,SPRG5R; \ + stw r10,crit_sprg5@l(r8); \ + mfspr r10,SPRG7R; \ + stw r10,crit_sprg7@l(r8); \ + mfspr r10,SPRN_PID; \ + stw r10,crit_pid@l(r8); \ + mfspr r10,SRR0; \ + stw r10,crit_srr0@l(r8); \ + mfspr r10,SRR1; \ + stw r10,crit_srr1@l(r8); \ + mfspr r8,SPRG2; /* SPRG2 only used in criticals */ \ + mfcr r10; /* save CR in r10 for now */\ + mfspr r11,SPRN_CSRR1; /* check whether user or kernel */\ + andi. r11,r11,MSR_PR; \ + lis r11,critical_stack_top@h; \ + ori r11,r11,critical_stack_top@l; \ + beq 1f; \ + /* COMING FROM USER MODE */ \ + mfspr r11,SPRG3; /* if from user, start at top of */\ + lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ + addi r11,r11,THREAD_SIZE; \ +1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ + stw r10,_CCR(r11); /* save various registers */\ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ + stw r12,_DEAR(r11); /* since they may have had stuff */\ + mfspr r9,SPRN_ESR; /* in them at the point where the */\ + stw r9,_ESR(r11); /* exception was taken */\ + mfspr r12,CSRR0; \ + stw r1,GPR1(r11); \ + mfspr r9,CSRR1; \ + stw r1,0(r11); \ + tovirt(r1,r11); \ + rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Exception prolog for machine check exceptions. This is similar to + * the critical exception prolog, except that machine check exceptions + * have their own save area. For Book E processors, we also have a + * reserved register (SPRG6) that is only used in machine check exceptions + * so we can free up a GPR to use as the base for indirect access to the + * machine check exception save area. This is necessary since the MMU + * is always on and the save area is offset from KERNELBASE. + */ +#define MCHECK_EXCEPTION_PROLOG \ + mtspr SPRG6W,r8; /* SPRG6 used in machine checks */ \ + lis r8,mcheck_save@ha; \ + stw r10,mcheck_r10@l(r8); \ + stw r11,mcheck_r11@l(r8); \ + mfspr r10,SPRG0; \ + stw r10,mcheck_sprg0@l(r8); \ + mfspr r10,SPRG1; \ + stw r10,mcheck_sprg1@l(r8); \ + mfspr r10,SPRG4R; \ + stw r10,mcheck_sprg4@l(r8); \ + mfspr r10,SPRG5R; \ + stw r10,mcheck_sprg5@l(r8); \ + mfspr r10,SPRG7R; \ + stw r10,mcheck_sprg7@l(r8); \ + mfspr r10,SPRN_PID; \ + stw r10,mcheck_pid@l(r8); \ + mfspr r10,SRR0; \ + stw r10,mcheck_srr0@l(r8); \ + mfspr r10,SRR1; \ + stw r10,mcheck_srr1@l(r8); \ + mfspr r10,CSRR0; \ + stw r10,mcheck_csrr0@l(r8); \ + mfspr r10,CSRR1; \ + stw r10,mcheck_csrr1@l(r8); \ + mfspr r8,SPRG6R; /* SPRG6 used in machine checks */ \ + mfcr r10; /* save CR in r10 for now */\ + mfspr r11,SPRN_MCSRR1; /* check whether user or kernel */\ + andi. r11,r11,MSR_PR; \ + lis r11,mcheck_stack_top@h; \ + ori r11,r11,mcheck_stack_top@l; \ + beq 1f; \ + /* COMING FROM USER MODE */ \ + mfspr r11,SPRG3; /* if from user, start at top of */\ + lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ + addi r11,r11,THREAD_SIZE; \ +1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ + stw r10,_CCR(r11); /* save various registers */\ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ + stw r12,_DEAR(r11); /* since they may have had stuff */\ + mfspr r9,SPRN_ESR; /* in them at the point where the */\ + stw r9,_ESR(r11); /* exception was taken */\ + mfspr r12,MCSRR0; \ + stw r1,GPR1(r11); \ + mfspr r9,MCSRR1; \ + stw r1,0(r11); \ + tovirt(r1,r11); \ + rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Exception vectors. + */ +#define START_EXCEPTION(label) \ + .align 5; \ +label: + +#define FINISH_EXCEPTION(func) \ + bl transfer_to_handler_full; \ + .long func; \ + .long ret_from_except_full + +#define EXCEPTION(n, label, hdlr, xfer) \ + START_EXCEPTION(label); \ + NORMAL_EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + xfer(n, hdlr) + +#define CRITICAL_EXCEPTION(n, label, hdlr) \ + START_EXCEPTION(label); \ + CRITICAL_EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ + NOCOPY, transfer_to_handler_full, \ + ret_from_except_full) + +#define MCHECK_EXCEPTION(n, label, hdlr) \ + START_EXCEPTION(label); \ + MCHECK_EXCEPTION_PROLOG; \ + mfspr r5,SPRN_ESR; \ + stw r5,_ESR(r11); \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ + NOCOPY, mcheck_transfer_to_handler, \ + ret_from_mcheck_exc) + +#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ + li r10,trap; \ + stw r10,TRAP(r11); \ + lis r10,msr@h; \ + ori r10,r10,msr@l; \ + copyee(r10, r9); \ + bl tfer; \ + .long hdlr; \ + .long ret + +#define COPY_EE(d, s) rlwimi d,s,0,16,16 +#define NOCOPY(d, s) + +#define EXC_XFER_STD(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ + ret_from_except) + +#define EXC_XFER_EE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_EE_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ + ret_from_except) + +interrupt_base: + /* Critical Input Interrupt */ + CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) + + /* Machine Check Interrupt */ + MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) + + /* Data Storage Interrupt */ + START_EXCEPTION(DataStorage) + mtspr SPRG0, r10 /* Save some working registers */ + mtspr SPRG1, r11 + mtspr SPRG4W, r12 + mtspr SPRG5W, r13 + mfcr r11 + mtspr SPRG7W, r11 + + /* + * Check if it was a store fault, if not then bail + * because a user tried to access a kernel or + * read-protected page. Otherwise, get the + * offending address and handle it. + */ + mfspr r10, SPRN_ESR + andis. r10, r10, ESR_ST@h + beq 2f + + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + ori r11, r11, TASK_SIZE@l + cmplw 0, r10, r11 + bge 2f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRG3 + lwz r11,PGDIR(r11) +4: + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r11, 0(r11) /* Get L1 entry */ + rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + + /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ + andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE + cmpwi 0, r13, _PAGE_RW|_PAGE_USER + bne 2f /* Bail if not */ + + /* Update 'changed'. */ + ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE + stw r11, 0(r12) /* Update Linux page table */ + + /* MAS2 not updated as the entry does exist in the tlb, this + fault taken to detect state transition (eg: COW -> DIRTY) + */ + lis r12, MAS3_RPN@h + ori r12, r12, _PAGE_HWEXEC | MAS3_RPN@l + and r11, r11, r12 + rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ + ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ + + /* update search PID in MAS6, AS = 0 */ + mfspr r12, SPRN_PID0 + slwi r12, r12, 16 + mtspr SPRN_MAS6, r12 + + /* find the TLB index that caused the fault. It has to be here. */ + tlbsx 0, r10 + + mtspr SPRN_MAS3,r11 + tlbwe + + /* Done...restore registers and get out of here. */ + mfspr r11, SPRG7R + mtcr r11 + mfspr r13, SPRG5R + mfspr r12, SPRG4R + mfspr r11, SPRG1 + mfspr r10, SPRG0 + rfi /* Force context change */ + +2: + /* + * The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRG7R + mtcr r11 + mfspr r13, SPRG5R + mfspr r12, SPRG4R + mfspr r11, SPRG1 + mfspr r10, SPRG0 + b data_access + + /* Instruction Storage Interrupt */ + START_EXCEPTION(InstructionStorage) + NORMAL_EXCEPTION_PROLOG + mfspr r5,SPRN_ESR /* Grab the ESR and save it */ + stw r5,_ESR(r11) + mr r4,r12 /* Pass SRR0 as arg2 */ + li r5,0 /* Pass zero as arg3 */ + EXC_XFER_EE_LITE(0x0400, handle_page_fault) + + /* External Input Interrupt */ + EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) + + /* Alignment Interrupt */ + START_EXCEPTION(Alignment) + NORMAL_EXCEPTION_PROLOG + mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ + stw r4,_DEAR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE(0x0600, AlignmentException) + + /* Program Interrupt */ + START_EXCEPTION(Program) + NORMAL_EXCEPTION_PROLOG + mfspr r4,SPRN_ESR /* Grab the ESR and save it */ + stw r4,_ESR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_STD(0x0700, ProgramCheckException) + + /* Floating Point Unavailable Interrupt */ + EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) + + /* System Call Interrupt */ + START_EXCEPTION(SystemCall) + NORMAL_EXCEPTION_PROLOG + EXC_XFER_EE_LITE(0x0c00, DoSyscall) + + /* Auxillary Processor Unavailable Interrupt */ + EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) + + /* Decrementer Interrupt */ + START_EXCEPTION(Decrementer) + NORMAL_EXCEPTION_PROLOG + lis r0,TSR_DIS@h /* Setup the DEC interrupt mask */ + mtspr SPRN_TSR,r0 /* Clear the DEC interrupt */ + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_LITE(0x0900, timer_interrupt) + + /* Fixed Internal Timer Interrupt */ + /* TODO: Add FIT support */ + EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE) + + /* Watchdog Timer Interrupt */ + /* TODO: Add watchdog support */ + CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException) + + /* Data TLB Error Interrupt */ + START_EXCEPTION(DataTLBError) + mtspr SPRG0, r10 /* Save some working registers */ + mtspr SPRG1, r11 + mtspr SPRG4W, r12 + mtspr SPRG5W, r13 + mfcr r11 + mtspr SPRG7W, r11 + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + ori r11, r11, TASK_SIZE@l + cmplw 5, r10, r11 + blt 5, 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MAS1 /* Set TID to 0 */ + li r13,MAS1_TID@l + andc r12,r12,r13 + mtspr SPRN_MAS1,r12 + + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRG3 + lwz r11,PGDIR(r11) + +4: + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r11, 0(r11) /* Get L1 entry */ + rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + andi. r13, r11, _PAGE_PRESENT + beq 2f + + ori r11, r11, _PAGE_ACCESSED + stw r11, 0(r12) + + /* Jump to common tlb load */ + b finish_tlb_load +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRG7R + mtcr r11 + mfspr r13, SPRG5R + mfspr r12, SPRG4R + mfspr r11, SPRG1 + mfspr r10, SPRG0 + b data_access + + /* Instruction TLB Error Interrupt */ + /* + * Nearly the same as above, except we get our + * information from different registers and bailout + * to a different point. + */ + START_EXCEPTION(InstructionTLBError) + mtspr SPRG0, r10 /* Save some working registers */ + mtspr SPRG1, r11 + mtspr SPRG4W, r12 + mtspr SPRG5W, r13 + mfcr r11 + mtspr SPRG7W, r11 + mfspr r10, SRR0 /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + ori r11, r11, TASK_SIZE@l + cmplw 5, r10, r11 + blt 5, 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MAS1 /* Set TID to 0 */ + li r13,MAS1_TID@l + andc r12,r12,r13 + mtspr SPRN_MAS1,r12 + + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRG3 + lwz r11,PGDIR(r11) + +4: + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r11, 0(r11) /* Get L1 entry */ + rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + andi. r13, r11, _PAGE_PRESENT + beq 2f + + ori r11, r11, _PAGE_ACCESSED + stw r11, 0(r12) + + /* Jump to common TLB load point */ + b finish_tlb_load + +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRG7R + mtcr r11 + mfspr r13, SPRG5R + mfspr r12, SPRG4R + mfspr r11, SPRG1 + mfspr r10, SPRG0 + b InstructionStorage + +#ifdef CONFIG_SPE + /* SPE Unavailable */ + START_EXCEPTION(SPEUnavailable) + NORMAL_EXCEPTION_PROLOG + bne load_up_spe + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE_LITE(0x2010, KernelSPE) +#else + EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE) +#endif /* CONFIG_SPE */ + + /* SPE Floating Point Data */ +#ifdef CONFIG_SPE + EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); +#else + EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE) +#endif /* CONFIG_SPE */ + + /* SPE Floating Point Round */ + EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE) + + /* Performance Monitor */ + EXCEPTION(0x2060, PerformanceMonitor, UnknownException, EXC_XFER_EE) + +/* Check for a single step debug exception while in an exception + * handler before state has been saved. This is to catch the case + * where an instruction that we are trying to single step causes + * an exception (eg ITLB/DTLB miss) and thus the first instruction of + * the exception handler generates a single step debug exception. + * + * If we get a debug trap on the first instruction of an exception handler, + * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is + * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). + * The exception handler was handling a non-critical interrupt, so it will + * save (and later restore) the MSR via SPRN_SRR1, which will still have + * the MSR_DE bit set. + */ + /* Debug Interrupt */ + START_EXCEPTION(Debug) + CRITICAL_EXCEPTION_PROLOG + + /* + * If this is a single step or branch-taken exception in an + * exception entry sequence, it was probably meant to apply to + * the code where the exception occurred (since exception entry + * doesn't turn off DE automatically). We simulate the effect + * of turning off DE on entry to an exception handler by turning + * off DE in the CSRR1 value and clearing the debug status. + */ + mfspr r10,SPRN_DBSR /* check single-step/branch taken */ + andis. r10,r10,(DBSR_IC|DBSR_BT)@h + beq+ 1f + andi. r0,r9,MSR_PR /* check supervisor */ + beq 2f /* branch if we need to fix it up... */ + + /* continue normal handling for a critical exception... */ +1: mfspr r4,SPRN_DBSR + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_TEMPLATE(DebugException, 0x2002, \ + (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ + NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) + + /* here it looks like we got an inappropriate debug exception. */ +2: rlwinm r9,r9,0,~MSR_DE /* clear DE in the CSRR1 value */ + mtspr SPRN_DBSR,r10 /* clear the IC/BT debug intr status */ + /* restore state and get out */ + lwz r10,_CCR(r11) + lwz r0,GPR0(r11) + lwz r1,GPR1(r11) + mtcrf 0x80,r10 + mtspr CSRR0,r12 + mtspr CSRR1,r9 + lwz r9,GPR9(r11) + + mtspr SPRG2,r8; /* SPRG2 only used in criticals */ + lis r8,crit_save@ha; + lwz r10,crit_r10@l(r8) + lwz r11,crit_r11@l(r8) + mfspr r8,SPRG2 + + rfci + b . + +/* + * Local functions + */ + /* + * Data TLB exceptions will bail out to this point + * if they can't resolve the lightweight TLB fault. + */ +data_access: + NORMAL_EXCEPTION_PROLOG + mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + stw r5,_ESR(r11) + mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + andis. r10,r5,(ESR_ILK|ESR_DLK)@h + bne 1f + EXC_XFER_EE_LITE(0x0300, handle_page_fault) +1: + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE_LITE(0x0300, CacheLockingException) + +/* + + * Both the instruction and data TLB miss get to this + * point to load the TLB. + * r10 - EA of fault + * r11 - TLB (info from Linux PTE) + * r12, r13 - available to use + * CR5 - results of addr < TASK_SIZE + * MAS0, MAS1 - loaded with proper value when we get here + * MAS2, MAS3 - will need additional info from Linux PTE + * Upon exit, we reload everything and RFI. + */ +finish_tlb_load: + /* + * We set execute, because we don't have the granularity to + * properly set this at the page level (Linux problem). + * Many of these bits are software only. Bits we don't set + * here we (properly should) assume have the appropriate value. + */ + + mfspr r12, SPRN_MAS2 + rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ + mtspr SPRN_MAS2, r12 + + bge 5, 1f + + /* addr > TASK_SIZE */ + li r10, (MAS3_UX | MAS3_UW | MAS3_UR) + andi. r13, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) + andi. r12, r11, _PAGE_USER /* Test for _PAGE_USER */ + iseleq r12, 0, r10 + and r10, r12, r13 + srwi r12, r10, 1 + or r12, r12, r10 /* Copy user perms into supervisor */ + b 2f + + /* addr <= TASK_SIZE */ +1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ + ori r12, r12, (MAS3_SX | MAS3_SR) + +2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ + mtspr SPRN_MAS3, r11 + tlbwe + + /* Done...restore registers and get out of here. */ + mfspr r11, SPRG7R + mtcr r11 + mfspr r13, SPRG5R + mfspr r12, SPRG4R + mfspr r11, SPRG1 + mfspr r10, SPRG0 + rfi /* Force context change */ + +#ifdef CONFIG_SPE +/* Note that the SPE support is closely modeled after the AltiVec + * support. Changes to one are likely to be applicable to the + * other! */ +load_up_spe: +/* + * Disable SPE for the task which had SPE previously, + * and save its SPE registers in its thread_struct. + * Enables SPE for use in the kernel on return. + * On SMP we know the SPE units are free, since we give it up every + * switch. -- Kumar + */ + mfmsr r5 + oris r5,r5,MSR_SPE@h + mtmsr r5 /* enable use of SPE now */ + isync +/* + * For SMP, we don't do lazy SPE switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_spe in switch_to. + */ +#ifndef CONFIG_SMP + lis r3,last_task_used_spe@ha + lwz r4,last_task_used_spe@l(r3) + cmpi 0,r4,0 + beq 1f + addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ + SAVE_32EVR(0,r10,r4) + evxor evr10, evr10, evr10 /* clear out evr10 */ + evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ + li r5,THREAD_ACC + evstddx evr10, r4, r5 /* save off accumulator */ + lwz r5,PT_REGS(r4) + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r10,MSR_SPE@h + andc r4,r4,r10 /* disable SPE for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of SPE after return */ + oris r9,r9,MSR_SPE@h + mfspr r5,SPRG3 /* current task's THREAD (phys) */ + li r4,1 + li r10,THREAD_ACC + stw r4,THREAD_USED_SPE(r5) + evlddx evr4,r10,r5 + evmra evr4,evr4 + REST_32EVR(0,r10,r5) +#ifndef CONFIG_SMP + subi r4,r5,THREAD + stw r4,last_task_used_spe@l(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ +2: REST_4GPRS(3, r11) + lwz r10,_CCR(r11) + REST_GPR(1, r11) + mtcr r10 + lwz r10,_LINK(r11) + mtlr r10 + REST_GPR(10, r11) + mtspr SRR1,r9 + mtspr SRR0,r12 + REST_GPR(9, r11) + REST_GPR(12, r11) + lwz r11,GPR11(r11) + SYNC + rfi + + + +/* + * SPE unavailable trap from kernel - print a message, but let + * the task use SPE in the kernel until it returns to user mode. + */ +KernelSPE: + lwz r3,_MSR(r1) + oris r3,r3,MSR_SPE@h + stw r3,_MSR(r1) /* enable use of SPE after return */ + lis r3,87f@h + ori r3,r3,87f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl printk + b ret_from_except +87: .string "SPE used in kernel (task=%p, pc=%x) \n" + .align 4,0 + +#endif /* CONFIG_SPE */ + +/* + * Global functions + */ + +/* + * extern void loadcam_entry(unsigned int index) + * + * Load TLBCAM[index] entry in to the L2 CAM MMU + */ +_GLOBAL(loadcam_entry) + lis r4,TLBCAM@ha + addi r4,r4,TLBCAM@l + mulli r5,r3,20 + add r3,r5,r4 + lwz r4,0(r3) + mtspr SPRN_MAS0,r4 + lwz r4,4(r3) + mtspr SPRN_MAS1,r4 + lwz r4,8(r3) + mtspr SPRN_MAS2,r4 + lwz r4,12(r3) + mtspr SPRN_MAS3,r4 + tlbwe + isync + blr + +/* + * extern void giveup_altivec(struct task_struct *prev) + * + * The e500 core does not have an AltiVec unit. + */ +_GLOBAL(giveup_altivec) + blr + +#ifdef CONFIG_SPE +/* + * extern void giveup_spe(struct task_struct *prev) + * + */ +_GLOBAL(giveup_spe) + mfmsr r5 + oris r5,r5,MSR_SPE@h + SYNC + mtmsr r5 /* enable use of SPE now */ + isync + cmpi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpi 0,r5,0 + SAVE_32EVR(0, r4, r3) + evxor evr6, evr6, evr6 /* clear out evr6 */ + evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ + li r4,THREAD_ACC + evstddx evr6, r4, r3 /* save off accumulator */ + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r3,MSR_SPE@h + andc r4,r4,r3 /* disable SPE for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + lis r4,last_task_used_spe@ha + stw r5,last_task_used_spe@l(r4) +#endif /* CONFIG_SMP */ + blr +#endif /* CONFIG_SPE */ + +/* + * extern void giveup_fpu(struct task_struct *prev) + * + * The e500 core does not have an FPU. + */ +_GLOBAL(giveup_fpu) + blr + +/* + * extern void abort(void) + * + * At present, this routine just applies a system reset. + */ +_GLOBAL(abort) + li r13,0 + mtspr SPRN_DBCR0,r13 /* disable all debug events */ + mfmsr r13 + ori r13,r13,MSR_DE@l /* Enable Debug Events */ + mtmsr r13 + mfspr r13,SPRN_DBCR0 + lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h + mtspr SPRN_DBCR0,r13 + +_GLOBAL(set_context) + +#ifdef CONFIG_BDI_SWITCH + /* Context switch the PTE pointer for the Abatron BDI2000. + * The PGDIR is the second parameter. + */ + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + stw r4, 0x4(r5) +#endif + mtspr SPRN_PID,r3 + isync /* Force context change */ + blr + +/* + * We put a few things here that have to be page-aligned. This stuff + * goes at the beginning of the data segment, which is page-aligned. + */ + .data +_GLOBAL(sdata) +_GLOBAL(empty_zero_page) + .space 4096 +_GLOBAL(swapper_pg_dir) + .space 4096 + + .section .bss +/* Stack for handling critical exceptions from kernel mode */ +critical_stack_bottom: + .space 4096 +critical_stack_top: + .previous + +/* Stack for handling machine check exceptions from kernel mode */ +mcheck_stack_bottom: + .space 4096 +mcheck_stack_top: + .previous + +/* + * This area is used for temporarily saving registers during the + * critical and machine check exception prologs. It must always + * follow the page aligned allocations, so it starts on a page + * boundary, ensuring that all crit_save areas are in a single + * page. + */ + +/* crit_save */ +_GLOBAL(crit_save) + .space 4 +_GLOBAL(crit_r10) + .space 4 +_GLOBAL(crit_r11) + .space 4 +_GLOBAL(crit_sprg0) + .space 4 +_GLOBAL(crit_sprg1) + .space 4 +_GLOBAL(crit_sprg4) + .space 4 +_GLOBAL(crit_sprg5) + .space 4 +_GLOBAL(crit_sprg7) + .space 4 +_GLOBAL(crit_pid) + .space 4 +_GLOBAL(crit_srr0) + .space 4 +_GLOBAL(crit_srr1) + .space 4 + +/* mcheck_save */ +_GLOBAL(mcheck_save) + .space 4 +_GLOBAL(mcheck_r10) + .space 4 +_GLOBAL(mcheck_r11) + .space 4 +_GLOBAL(mcheck_sprg0) + .space 4 +_GLOBAL(mcheck_sprg1) + .space 4 +_GLOBAL(mcheck_sprg4) + .space 4 +_GLOBAL(mcheck_sprg5) + .space 4 +_GLOBAL(mcheck_sprg7) + .space 4 +_GLOBAL(mcheck_pid) + .space 4 +_GLOBAL(mcheck_srr0) + .space 4 +_GLOBAL(mcheck_srr1) + .space 4 +_GLOBAL(mcheck_csrr0) + .space 4 +_GLOBAL(mcheck_csrr1) + .space 4 + +/* + * This space gets a copy of optional info passed to us by the bootstrap + * which is used to pass parameters into the kernel like root=/dev/sda1, etc. + */ +_GLOBAL(cmd_line) + .space 512 + +/* + * Room for two PTE pointers, usually the kernel and current user pointers + * to their respective root page table. + */ +abatron_pteptrs: + .space 8 + + diff --git a/arch/ppc/kernel/vecemu.c b/arch/ppc/kernel/vecemu.c new file mode 100644 index 000000000..1430ef592 --- /dev/null +++ b/arch/ppc/kernel/vecemu.c @@ -0,0 +1,346 @@ +/* + * Routines to emulate some Altivec/VMX instructions, specifically + * those that can trap when given denormalized operands in Java mode. + */ +#include +#include +#include +#include +#include +#include + +/* Functions in vector.S */ +extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b); +extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b); +extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); +extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); +extern void vrefp(vector128 *dst, vector128 *src); +extern void vrsqrtefp(vector128 *dst, vector128 *src); +extern void vexptep(vector128 *dst, vector128 *src); + +static unsigned int exp2s[8] = { + 0x800000, + 0x8b95c2, + 0x9837f0, + 0xa5fed7, + 0xb504f3, + 0xc5672a, + 0xd744fd, + 0xeac0c7 +}; + +/* + * Computes an estimate of 2^x. The `s' argument is the 32-bit + * single-precision floating-point representation of x. + */ +static unsigned int eexp2(unsigned int s) +{ + int exp, pwr; + unsigned int mant, frac; + + /* extract exponent field from input */ + exp = ((s >> 23) & 0xff) - 127; + if (exp > 7) { + /* check for NaN input */ + if (exp == 128 && (s & 0x7fffff) != 0) + return s | 0x400000; /* return QNaN */ + /* 2^-big = 0, 2^+big = +Inf */ + return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */ + } + if (exp < -23) + return 0x3f800000; /* 1.0 */ + + /* convert to fixed point integer in 9.23 representation */ + pwr = (s & 0x7fffff) | 0x800000; + if (exp > 0) + pwr <<= exp; + else + pwr >>= -exp; + if (s & 0x80000000) + pwr = -pwr; + + /* extract integer part, which becomes exponent part of result */ + exp = (pwr >> 23) + 126; + if (exp >= 254) + return 0x7f800000; + if (exp < -23) + return 0; + + /* table lookup on top 3 bits of fraction to get mantissa */ + mant = exp2s[(pwr >> 20) & 7]; + + /* linear interpolation using remaining 20 bits of fraction */ + asm("mulhwu %0,%1,%2" : "=r" (frac) + : "r" (pwr << 12), "r" (0x172b83ff)); + asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant)); + mant += frac; + + if (exp >= 0) + return mant + (exp << 23); + + /* denormalized result */ + exp = -exp; + mant += 1 << (exp - 1); + return mant >> exp; +} + +/* + * Computes an estimate of log_2(x). The `s' argument is the 32-bit + * single-precision floating-point representation of x. + */ +static unsigned int elog2(unsigned int s) +{ + int exp, mant, lz, frac; + + exp = s & 0x7f800000; + mant = s & 0x7fffff; + if (exp == 0x7f800000) { /* Inf or NaN */ + if (mant != 0) + s |= 0x400000; /* turn NaN into QNaN */ + return s; + } + if ((exp | mant) == 0) /* +0 or -0 */ + return 0xff800000; /* return -Inf */ + + if (exp == 0) { + /* denormalized */ + asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant)); + mant <<= lz - 8; + exp = (-118 - lz) << 23; + } else { + mant |= 0x800000; + exp -= 127 << 23; + } + + if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */ + exp |= 0x400000; /* 0.5 * 2^23 */ + asm("mulhwu %0,%1,%2" : "=r" (mant) + : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */ + } + if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */ + exp |= 0x200000; /* 0.25 * 2^23 */ + asm("mulhwu %0,%1,%2" : "=r" (mant) + : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */ + } + if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */ + exp |= 0x100000; /* 0.125 * 2^23 */ + asm("mulhwu %0,%1,%2" : "=r" (mant) + : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */ + } + if (mant > 0x800000) { /* 1.0 * 2^23 */ + /* calculate (mant - 1) * 1.381097463 */ + /* 1.381097463 == 0.125 / (2^0.125 - 1) */ + asm("mulhwu %0,%1,%2" : "=r" (frac) + : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a)); + exp += frac; + } + s = exp & 0x80000000; + if (exp != 0) { + if (s) + exp = -exp; + asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp)); + lz = 8 - lz; + if (lz > 0) + exp >>= lz; + else if (lz < 0) + exp <<= -lz; + s += ((lz + 126) << 23) + exp; + } + return s; +} + +#define VSCR_SAT 1 + +static int ctsxs(unsigned int x, int scale, unsigned int *vscrp) +{ + int exp, mant; + + exp = (x >> 23) & 0xff; + mant = x & 0x7fffff; + if (exp == 255 && mant != 0) + return 0; /* NaN -> 0 */ + exp = exp - 127 + scale; + if (exp < 0) + return 0; /* round towards zero */ + if (exp >= 31) { + /* saturate, unless the result would be -2^31 */ + if (x + (scale << 23) != 0xcf000000) + *vscrp |= VSCR_SAT; + return (x & 0x80000000)? 0x80000000: 0x7fffffff; + } + mant |= 0x800000; + mant = (mant << 7) >> (30 - exp); + return (x & 0x80000000)? -mant: mant; +} + +static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp) +{ + int exp; + unsigned int mant; + + exp = (x >> 23) & 0xff; + mant = x & 0x7fffff; + if (exp == 255 && mant != 0) + return 0; /* NaN -> 0 */ + exp = exp - 127 + scale; + if (exp < 0) + return 0; /* round towards zero */ + if (x & 0x80000000) { + /* negative => saturate to 0 */ + *vscrp |= VSCR_SAT; + return 0; + } + if (exp >= 32) { + /* saturate */ + *vscrp |= VSCR_SAT; + return 0xffffffff; + } + mant |= 0x800000; + mant = (mant << 8) >> (31 - exp); + return mant; +} + +/* Round to floating integer, towards 0 */ +static unsigned int rfiz(unsigned int x) +{ + int exp; + + exp = ((x >> 23) & 0xff) - 127; + if (exp == 128 && (x & 0x7fffff) != 0) + return x | 0x400000; /* NaN -> make it a QNaN */ + if (exp >= 23) + return x; /* it's an integer already (or Inf) */ + if (exp < 0) + return x & 0x80000000; /* |x| < 1.0 rounds to 0 */ + return x & ~(0x7fffff >> exp); +} + +/* Round to floating integer, towards +/- Inf */ +static unsigned int rfii(unsigned int x) +{ + int exp, mask; + + exp = ((x >> 23) & 0xff) - 127; + if (exp == 128 && (x & 0x7fffff) != 0) + return x | 0x400000; /* NaN -> make it a QNaN */ + if (exp >= 23) + return x; /* it's an integer already (or Inf) */ + if ((x & 0x7fffffff) == 0) + return x; /* +/-0 -> +/-0 */ + if (exp < 0) + /* 0 < |x| < 1.0 rounds to +/- 1.0 */ + return (x & 0x80000000) | 0x3f800000; + mask = 0x7fffff >> exp; + /* mantissa overflows into exponent - that's OK, + it can't overflow into the sign bit */ + return (x + mask) & ~mask; +} + +/* Round to floating integer, to nearest */ +static unsigned int rfin(unsigned int x) +{ + int exp, half; + + exp = ((x >> 23) & 0xff) - 127; + if (exp == 128 && (x & 0x7fffff) != 0) + return x | 0x400000; /* NaN -> make it a QNaN */ + if (exp >= 23) + return x; /* it's an integer already (or Inf) */ + if (exp < -1) + return x & 0x80000000; /* |x| < 0.5 -> +/-0 */ + if (exp == -1) + /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */ + return (x & 0x80000000) | 0x3f800000; + half = 0x400000 >> exp; + /* add 0.5 to the magnitude and chop off the fraction bits */ + return (x + half) & ~(0x7fffff >> exp); +} + +int +emulate_altivec(struct pt_regs *regs) +{ + unsigned int instr, i; + unsigned int va, vb, vc, vd; + vector128 *vrs; + + if (get_user(instr, (unsigned int *) regs->nip)) + return -EFAULT; + if ((instr >> 26) != 4) + return -EINVAL; /* not an altivec instruction */ + vd = (instr >> 21) & 0x1f; + va = (instr >> 16) & 0x1f; + vb = (instr >> 11) & 0x1f; + vc = (instr >> 6) & 0x1f; + + vrs = current->thread.vr; + switch (instr & 0x3f) { + case 10: + switch (vc) { + case 0: /* vaddfp */ + vaddfp(&vrs[vd], &vrs[va], &vrs[vb]); + break; + case 1: /* vsubfp */ + vsubfp(&vrs[vd], &vrs[va], &vrs[vb]); + break; + case 4: /* vrefp */ + vrefp(&vrs[vd], &vrs[vb]); + break; + case 5: /* vrsqrtefp */ + vrsqrtefp(&vrs[vd], &vrs[vb]); + break; + case 6: /* vexptefp */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = eexp2(vrs[vb].u[i]); + break; + case 7: /* vlogefp */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = elog2(vrs[vb].u[i]); + break; + case 8: /* vrfin */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = rfin(vrs[vb].u[i]); + break; + case 9: /* vrfiz */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = rfiz(vrs[vb].u[i]); + break; + case 10: /* vrfip */ + for (i = 0; i < 4; ++i) { + u32 x = vrs[vb].u[i]; + x = (x & 0x80000000)? rfiz(x): rfii(x); + vrs[vd].u[i] = x; + } + break; + case 11: /* vrfim */ + for (i = 0; i < 4; ++i) { + u32 x = vrs[vb].u[i]; + x = (x & 0x80000000)? rfii(x): rfiz(x); + vrs[vd].u[i] = x; + } + break; + case 14: /* vctuxs */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va, + ¤t->thread.vscr.u[3]); + break; + case 15: /* vctsxs */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va, + ¤t->thread.vscr.u[3]); + break; + default: + return -EINVAL; + } + break; + case 46: /* vmaddfp */ + vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]); + break; + case 47: /* vnmsubfp */ + vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]); + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/arch/ppc/kernel/vector.S b/arch/ppc/kernel/vector.S new file mode 100644 index 000000000..d8fe6b5fb --- /dev/null +++ b/arch/ppc/kernel/vector.S @@ -0,0 +1,217 @@ +#include +#include + +/* + * The routines below are in assembler so we can closely control the + * usage of floating-point registers. These routines must be called + * with preempt disabled. + */ + .data +fpzero: + .long 0 +fpone: + .long 0x3f800000 /* 1.0 in single-precision FP */ +fphalf: + .long 0x3f000000 /* 0.5 in single-precision FP */ + + .text +/* + * Internal routine to enable floating point and set FPSCR to 0. + * Don't call it from C; it doesn't use the normal calling convention. + */ +fpenable: + mfmsr r10 + ori r11,r10,MSR_FP + mtmsr r11 + isync + stfd fr0,24(r1) + stfd fr1,16(r1) + stfd fr31,8(r1) + lis r11,fpzero@ha + mffs fr31 + lfs fr1,fpzero@l(r11) + mtfsf 0xff,fr1 + blr + +fpdisable: + mtfsf 0xff,fr31 + lfd fr31,8(r1) + lfd fr1,16(r1) + lfd fr0,24(r1) + mtmsr r10 + isync + blr + +/* + * Vector add, floating point. + */ + .globl vaddfp +vaddfp: + stwu r1,-32(r1) + mflr r0 + stw r0,36(r1) + bl fpenable + li r0,4 + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + lfsx fr1,r5,r6 + fadds fr0,fr0,fr1 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + bl fpdisable + lwz r0,36(r1) + mtlr r0 + addi r1,r1,32 + blr + +/* + * Vector subtract, floating point. + */ + .globl vsubfp +vsubfp: + stwu r1,-32(r1) + mflr r0 + stw r0,36(r1) + bl fpenable + li r0,4 + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + lfsx fr1,r5,r6 + fsubs fr0,fr0,fr1 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + bl fpdisable + lwz r0,36(r1) + mtlr r0 + addi r1,r1,32 + blr + +/* + * Vector multiply and add, floating point. + */ + .globl vmaddfp +vmaddfp: + stwu r1,-48(r1) + mflr r0 + stw r0,52(r1) + bl fpenable + stfd fr2,32(r1) + li r0,4 + mtctr r0 + li r7,0 +1: lfsx fr0,r4,r7 + lfsx fr1,r5,r7 + lfsx fr2,r6,r7 + fmadds fr0,fr0,fr1,fr2 + stfsx fr0,r3,r7 + addi r7,r7,4 + bdnz 1b + lfd fr2,32(r1) + bl fpdisable + lwz r0,52(r1) + mtlr r0 + addi r1,r1,48 + blr + +/* + * Vector negative multiply and subtract, floating point. + */ + .globl vnmsubfp +vnmsubfp: + stwu r1,-48(r1) + mflr r0 + stw r0,52(r1) + bl fpenable + stfd fr2,32(r1) + li r0,4 + mtctr r0 + li r7,0 +1: lfsx fr0,r4,r7 + lfsx fr1,r5,r7 + lfsx fr2,r6,r7 + fnmsubs fr0,fr0,fr1,fr2 + stfsx fr0,r3,r7 + addi r7,r7,4 + bdnz 1b + lfd fr2,32(r1) + bl fpdisable + lwz r0,52(r1) + mtlr r0 + addi r1,r1,48 + blr + +/* + * Vector reciprocal estimate. We just compute 1.0/x. + * r3 -> destination, r4 -> source. + */ + .globl vrefp +vrefp: + stwu r1,-32(r1) + mflr r0 + stw r0,36(r1) + bl fpenable + lis r9,fpone@ha + li r0,4 + lfs fr1,fpone@l(r9) + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + fdivs fr0,fr1,fr0 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + bl fpdisable + lwz r0,36(r1) + mtlr r0 + addi r1,r1,32 + blr + +/* + * Vector reciprocal square-root estimate, floating point. + * We use the frsqrte instruction for the initial estimate followed + * by 2 iterations of Newton-Raphson to get sufficient accuracy. + * r3 -> destination, r4 -> source. + */ + .globl vrsqrtefp +vrsqrtefp: + stwu r1,-48(r1) + mflr r0 + stw r0,52(r1) + bl fpenable + stfd fr2,32(r1) + stfd fr3,40(r1) + stfd fr4,48(r1) + stfd fr5,56(r1) + lis r9,fpone@ha + lis r8,fphalf@ha + li r0,4 + lfs fr4,fpone@l(r9) + lfs fr5,fphalf@l(r8) + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + frsqrte fr1,fr0 /* r = frsqrte(s) */ + fmuls fr3,fr1,fr0 /* r * s */ + fmuls fr2,fr1,fr5 /* r * 0.5 */ + fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ + fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ + fmuls fr3,fr1,fr0 /* r * s */ + fmuls fr2,fr1,fr5 /* r * 0.5 */ + fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ + fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ + stfsx fr1,r3,r6 + addi r6,r6,4 + bdnz 1b + lfd fr5,56(r1) + lfd fr4,48(r1) + lfd fr3,40(r1) + lfd fr2,32(r1) + bl fpdisable + lwz r0,36(r1) + mtlr r0 + addi r1,r1,32 + blr diff --git a/arch/ppc/lib/rheap.c b/arch/ppc/lib/rheap.c new file mode 100644 index 000000000..4d938a062 --- /dev/null +++ b/arch/ppc/lib/rheap.c @@ -0,0 +1,692 @@ +/* + * arch/ppc/syslib/rheap.c + * + * A Remote Heap. Remote means that we don't touch the memory that the + * heap points to. Normal heap implementations use the memory they manage + * to place their list. We cannot do that because the memory we manage may + * have special properties, for example it is uncachable or of different + * endianess. + * + * Author: Pantelis Antoniou + * + * 2004 (c) INTRACOM S.A. Greece. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#include +#include +#include +#include + +#include + +/* + * Fixup a list_head, needed when copying lists. If the pointers fall + * between s and e, apply the delta. This assumes that + * sizeof(struct list_head *) == sizeof(unsigned long *). + */ +static inline void fixup(unsigned long s, unsigned long e, int d, + struct list_head *l) +{ + unsigned long *pp; + + pp = (unsigned long *)&l->next; + if (*pp >= s && *pp < e) + *pp += d; + + pp = (unsigned long *)&l->prev; + if (*pp >= s && *pp < e) + *pp += d; +} + +/* Grow the allocated blocks */ +static int grow(rh_info_t * info, int max_blocks) +{ + rh_block_t *block, *blk; + int i, new_blocks; + int delta; + unsigned long blks, blke; + + if (max_blocks <= info->max_blocks) + return -EINVAL; + + new_blocks = max_blocks - info->max_blocks; + + block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL); + if (block == NULL) + return -ENOMEM; + + if (info->max_blocks > 0) { + + /* copy old block area */ + memcpy(block, info->block, + sizeof(rh_block_t) * info->max_blocks); + + delta = (char *)block - (char *)info->block; + + /* and fixup list pointers */ + blks = (unsigned long)info->block; + blke = (unsigned long)(info->block + info->max_blocks); + + for (i = 0, blk = block; i < info->max_blocks; i++, blk++) + fixup(blks, blke, delta, &blk->list); + + fixup(blks, blke, delta, &info->empty_list); + fixup(blks, blke, delta, &info->free_list); + fixup(blks, blke, delta, &info->taken_list); + + /* free the old allocated memory */ + if ((info->flags & RHIF_STATIC_BLOCK) == 0) + kfree(info->block); + } + + info->block = block; + info->empty_slots += new_blocks; + info->max_blocks = max_blocks; + info->flags &= ~RHIF_STATIC_BLOCK; + + /* add all new blocks to the free list */ + for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++) + list_add(&blk->list, &info->empty_list); + + return 0; +} + +/* + * Assure at least the required amount of empty slots. If this function + * causes a grow in the block area then all pointers kept to the block + * area are invalid! + */ +static int assure_empty(rh_info_t * info, int slots) +{ + int max_blocks; + + /* This function is not meant to be used to grow uncontrollably */ + if (slots >= 4) + return -EINVAL; + + /* Enough space */ + if (info->empty_slots >= slots) + return 0; + + /* Next 16 sized block */ + max_blocks = ((info->max_blocks + slots) + 15) & ~15; + + return grow(info, max_blocks); +} + +static rh_block_t *get_slot(rh_info_t * info) +{ + rh_block_t *blk; + + /* If no more free slots, and failure to extend. */ + /* XXX: You should have called assure_empty before */ + if (info->empty_slots == 0) { + printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); + return NULL; + } + + /* Get empty slot to use */ + blk = list_entry(info->empty_list.next, rh_block_t, list); + list_del_init(&blk->list); + info->empty_slots--; + + /* Initialize */ + blk->start = NULL; + blk->size = 0; + blk->owner = NULL; + + return blk; +} + +static inline void release_slot(rh_info_t * info, rh_block_t * blk) +{ + list_add(&blk->list, &info->empty_list); + info->empty_slots++; +} + +static void attach_free_block(rh_info_t * info, rh_block_t * blkn) +{ + rh_block_t *blk; + rh_block_t *before; + rh_block_t *after; + rh_block_t *next; + int size; + unsigned long s, e, bs, be; + struct list_head *l; + + /* We assume that they are aligned properly */ + size = blkn->size; + s = (unsigned long)blkn->start; + e = s + size; + + /* Find the blocks immediately before and after the given one + * (if any) */ + before = NULL; + after = NULL; + next = NULL; + + list_for_each(l, &info->free_list) { + blk = list_entry(l, rh_block_t, list); + + bs = (unsigned long)blk->start; + be = bs + blk->size; + + if (next == NULL && s >= bs) + next = blk; + + if (be == s) + before = blk; + + if (e == bs) + after = blk; + + /* If both are not null, break now */ + if (before != NULL && after != NULL) + break; + } + + /* Now check if they are really adjacent */ + if (before != NULL && s != (unsigned long)before->start + before->size) + before = NULL; + + if (after != NULL && e != (unsigned long)after->start) + after = NULL; + + /* No coalescing; list insert and return */ + if (before == NULL && after == NULL) { + + if (next != NULL) + list_add(&blkn->list, &next->list); + else + list_add(&blkn->list, &info->free_list); + + return; + } + + /* We don't need it anymore */ + release_slot(info, blkn); + + /* Grow the before block */ + if (before != NULL && after == NULL) { + before->size += size; + return; + } + + /* Grow the after block backwards */ + if (before == NULL && after != NULL) { + (int8_t *) after->start -= size; + after->size += size; + return; + } + + /* Grow the before block, and release the after block */ + before->size += size + after->size; + list_del(&after->list); + release_slot(info, after); +} + +static void attach_taken_block(rh_info_t * info, rh_block_t * blkn) +{ + rh_block_t *blk; + struct list_head *l; + + /* Find the block immediately before the given one (if any) */ + list_for_each(l, &info->taken_list) { + blk = list_entry(l, rh_block_t, list); + if (blk->start > blkn->start) { + list_add_tail(&blkn->list, &blk->list); + return; + } + } + + list_add_tail(&blkn->list, &info->taken_list); +} + +/* + * Create a remote heap dynamically. Note that no memory for the blocks + * are allocated. It will upon the first allocation + */ +rh_info_t *rh_create(unsigned int alignment) +{ + rh_info_t *info; + + /* Alignment must be a power of two */ + if ((alignment & (alignment - 1)) != 0) + return NULL; + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (info == NULL) + return NULL; + + info->alignment = alignment; + + /* Initially everything as empty */ + info->block = NULL; + info->max_blocks = 0; + info->empty_slots = 0; + info->flags = 0; + + INIT_LIST_HEAD(&info->empty_list); + INIT_LIST_HEAD(&info->free_list); + INIT_LIST_HEAD(&info->taken_list); + + return info; +} + +/* + * Destroy a dynamically created remote heap. Deallocate only if the areas + * are not static + */ +void rh_destroy(rh_info_t * info) +{ + if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) + kfree(info->block); + + if ((info->flags & RHIF_STATIC_INFO) == 0) + kfree(info); +} + +/* + * Initialize in place a remote heap info block. This is needed to support + * operation very early in the startup of the kernel, when it is not yet safe + * to call kmalloc. + */ +void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, + rh_block_t * block) +{ + int i; + rh_block_t *blk; + + /* Alignment must be a power of two */ + if ((alignment & (alignment - 1)) != 0) + return; + + info->alignment = alignment; + + /* Initially everything as empty */ + info->block = block; + info->max_blocks = max_blocks; + info->empty_slots = max_blocks; + info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; + + INIT_LIST_HEAD(&info->empty_list); + INIT_LIST_HEAD(&info->free_list); + INIT_LIST_HEAD(&info->taken_list); + + /* Add all new blocks to the free list */ + for (i = 0, blk = block; i < max_blocks; i++, blk++) + list_add(&blk->list, &info->empty_list); +} + +/* Attach a free memory region, coalesces regions if adjuscent */ +int rh_attach_region(rh_info_t * info, void *start, int size) +{ + rh_block_t *blk; + unsigned long s, e, m; + int r; + + /* The region must be aligned */ + s = (unsigned long)start; + e = s + size; + m = info->alignment - 1; + + /* Round start up */ + s = (s + m) & ~m; + + /* Round end down */ + e = e & ~m; + + /* Take final values */ + start = (void *)s; + size = (int)(e - s); + + /* Grow the blocks, if needed */ + r = assure_empty(info, 1); + if (r < 0) + return r; + + blk = get_slot(info); + blk->start = start; + blk->size = size; + blk->owner = NULL; + + attach_free_block(info, blk); + + return 0; +} + +/* Detatch given address range, splits free block if needed. */ +void *rh_detach_region(rh_info_t * info, void *start, int size) +{ + struct list_head *l; + rh_block_t *blk, *newblk; + unsigned long s, e, m, bs, be; + + /* Validate size */ + if (size <= 0) + return NULL; + + /* The region must be aligned */ + s = (unsigned long)start; + e = s + size; + m = info->alignment - 1; + + /* Round start up */ + s = (s + m) & ~m; + + /* Round end down */ + e = e & ~m; + + if (assure_empty(info, 1) < 0) + return NULL; + + blk = NULL; + list_for_each(l, &info->free_list) { + blk = list_entry(l, rh_block_t, list); + /* The range must lie entirely inside one free block */ + bs = (unsigned long)blk->start; + be = (unsigned long)blk->start + blk->size; + if (s >= bs && e <= be) + break; + blk = NULL; + } + + if (blk == NULL) + return NULL; + + /* Perfect fit */ + if (bs == s && be == e) { + /* Delete from free list, release slot */ + list_del(&blk->list); + release_slot(info, blk); + return (void *)s; + } + + /* blk still in free list, with updated start and/or size */ + if (bs == s || be == e) { + if (bs == s) + (int8_t *) blk->start += size; + blk->size -= size; + + } else { + /* The front free fragment */ + blk->size = s - bs; + + /* the back free fragment */ + newblk = get_slot(info); + newblk->start = (void *)e; + newblk->size = be - e; + + list_add(&newblk->list, &blk->list); + } + + return (void *)s; +} + +void *rh_alloc(rh_info_t * info, int size, const char *owner) +{ + struct list_head *l; + rh_block_t *blk; + rh_block_t *newblk; + void *start; + + /* Validate size */ + if (size <= 0) + return NULL; + + /* Align to configured alignment */ + size = (size + (info->alignment - 1)) & ~(info->alignment - 1); + + if (assure_empty(info, 1) < 0) + return NULL; + + blk = NULL; + list_for_each(l, &info->free_list) { + blk = list_entry(l, rh_block_t, list); + if (size <= blk->size) + break; + blk = NULL; + } + + if (blk == NULL) + return NULL; + + /* Just fits */ + if (blk->size == size) { + /* Move from free list to taken list */ + list_del(&blk->list); + blk->owner = owner; + start = blk->start; + + attach_taken_block(info, blk); + + return start; + } + + newblk = get_slot(info); + newblk->start = blk->start; + newblk->size = size; + newblk->owner = owner; + + /* blk still in free list, with updated start, size */ + (int8_t *) blk->start += size; + blk->size -= size; + + start = newblk->start; + + attach_taken_block(info, newblk); + + return start; +} + +/* allocate at precisely the given address */ +void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) +{ + struct list_head *l; + rh_block_t *blk, *newblk1, *newblk2; + unsigned long s, e, m, bs, be; + + /* Validate size */ + if (size <= 0) + return NULL; + + /* The region must be aligned */ + s = (unsigned long)start; + e = s + size; + m = info->alignment - 1; + + /* Round start up */ + s = (s + m) & ~m; + + /* Round end down */ + e = e & ~m; + + if (assure_empty(info, 2) < 0) + return NULL; + + blk = NULL; + list_for_each(l, &info->free_list) { + blk = list_entry(l, rh_block_t, list); + /* The range must lie entirely inside one free block */ + bs = (unsigned long)blk->start; + be = (unsigned long)blk->start + blk->size; + if (s >= bs && e <= be) + break; + } + + if (blk == NULL) + return NULL; + + /* Perfect fit */ + if (bs == s && be == e) { + /* Move from free list to taken list */ + list_del(&blk->list); + blk->owner = owner; + + start = blk->start; + attach_taken_block(info, blk); + + return start; + + } + + /* blk still in free list, with updated start and/or size */ + if (bs == s || be == e) { + if (bs == s) + (int8_t *) blk->start += size; + blk->size -= size; + + } else { + /* The front free fragment */ + blk->size = s - bs; + + /* The back free fragment */ + newblk2 = get_slot(info); + newblk2->start = (void *)e; + newblk2->size = be - e; + + list_add(&newblk2->list, &blk->list); + } + + newblk1 = get_slot(info); + newblk1->start = (void *)s; + newblk1->size = e - s; + newblk1->owner = owner; + + start = newblk1->start; + attach_taken_block(info, newblk1); + + return start; +} + +int rh_free(rh_info_t * info, void *start) +{ + rh_block_t *blk, *blk2; + struct list_head *l; + int size; + + /* Linear search for block */ + blk = NULL; + list_for_each(l, &info->taken_list) { + blk2 = list_entry(l, rh_block_t, list); + if (start < blk2->start) + break; + blk = blk2; + } + + if (blk == NULL || start > (blk->start + blk->size)) + return -EINVAL; + + /* Remove from taken list */ + list_del(&blk->list); + + /* Get size of freed block */ + size = blk->size; + attach_free_block(info, blk); + + return size; +} + +int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) +{ + rh_block_t *blk; + struct list_head *l; + struct list_head *h; + int nr; + + switch (what) { + + case RHGS_FREE: + h = &info->free_list; + break; + + case RHGS_TAKEN: + h = &info->taken_list; + break; + + default: + return -EINVAL; + } + + /* Linear search for block */ + nr = 0; + list_for_each(l, h) { + blk = list_entry(l, rh_block_t, list); + if (stats != NULL && nr < max_stats) { + stats->start = blk->start; + stats->size = blk->size; + stats->owner = blk->owner; + stats++; + } + nr++; + } + + return nr; +} + +int rh_set_owner(rh_info_t * info, void *start, const char *owner) +{ + rh_block_t *blk, *blk2; + struct list_head *l; + int size; + + /* Linear search for block */ + blk = NULL; + list_for_each(l, &info->taken_list) { + blk2 = list_entry(l, rh_block_t, list); + if (start < blk2->start) + break; + blk = blk2; + } + + if (blk == NULL || start > (blk->start + blk->size)) + return -EINVAL; + + blk->owner = owner; + + return size; +} + +void rh_dump(rh_info_t * info) +{ + static rh_stats_t st[32]; /* XXX maximum 32 blocks */ + int maxnr; + int i, nr; + + maxnr = sizeof(st) / sizeof(st[0]); + + printk(KERN_INFO + "info @0x%p (%d slots empty / %d max)\n", + info, info->empty_slots, info->max_blocks); + + printk(KERN_INFO " Free:\n"); + nr = rh_get_stats(info, RHGS_FREE, maxnr, st); + if (nr > maxnr) + nr = maxnr; + for (i = 0; i < nr; i++) + printk(KERN_INFO + " 0x%p-0x%p (%u)\n", + st[i].start, (int8_t *) st[i].start + st[i].size, + st[i].size); + printk(KERN_INFO "\n"); + + printk(KERN_INFO " Taken:\n"); + nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); + if (nr > maxnr) + nr = maxnr; + for (i = 0; i < nr; i++) + printk(KERN_INFO + " 0x%p-0x%p (%u) %s\n", + st[i].start, (int8_t *) st[i].start + st[i].size, + st[i].size, st[i].owner != NULL ? st[i].owner : ""); + printk(KERN_INFO "\n"); +} + +void rh_dump_blk(rh_info_t * info, rh_block_t * blk) +{ + printk(KERN_INFO + "blk @0x%p: 0x%p-0x%p (%u)\n", + blk, blk->start, (int8_t *) blk->start + blk->size, blk->size); +} diff --git a/arch/ppc/mm/fsl_booke_mmu.c b/arch/ppc/mm/fsl_booke_mmu.c new file mode 100644 index 000000000..baed25b9f --- /dev/null +++ b/arch/ppc/mm/fsl_booke_mmu.c @@ -0,0 +1,236 @@ +/* + * Modifications by Kumar Gala (kumar.gala@freescale.com) to support + * E500 Book E processors. + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This file contains the routines for initializing the MMU + * on the 4xx series of chips. + * -- paulus + * + * Derived from arch/ppc/mm/init.c: + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * + * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) + * and Cort Dougan (PReP) (cort@cs.nmt.edu) + * Copyright (C) 1996 Paul Mackerras + * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). + * + * Derived from "arch/i386/mm/init.c" + * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void loadcam_entry(unsigned int index); +unsigned int tlbcam_index; +unsigned int num_tlbcam_entries; +static unsigned long __cam0, __cam1, __cam2; +extern unsigned long total_lowmem; +extern unsigned long __max_low_memory; +#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE + +struct tlbcam { + u32 MAS0; + u32 MAS1; + u32 MAS2; + u32 MAS3; + u32 MAS7; +} TLBCAM[NUM_TLBCAMS]; + +struct tlbcamrange { + unsigned long start; + unsigned long limit; + phys_addr_t phys; +} tlbcam_addrs[NUM_TLBCAMS]; + +extern unsigned int tlbcam_index; + +/* + * Return PA for this VA if it is mapped by a CAM, or 0 + */ +unsigned long v_mapped_by_tlbcam(unsigned long va) +{ + int b; + for (b = 0; b < tlbcam_index; ++b) + if (va >= tlbcam_addrs[b].start && va < tlbcam_addrs[b].limit) + return tlbcam_addrs[b].phys + (va - tlbcam_addrs[b].start); + return 0; +} + +/* + * Return VA for a given PA or 0 if not mapped + */ +unsigned long p_mapped_by_tlbcam(unsigned long pa) +{ + int b; + for (b = 0; b < tlbcam_index; ++b) + if (pa >= tlbcam_addrs[b].phys + && pa < (tlbcam_addrs[b].limit-tlbcam_addrs[b].start) + +tlbcam_addrs[b].phys) + return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); + return 0; +} + +/* + * Set up one of the I/D BAT (block address translation) register pairs. + * The parameters are not checked; in particular size must be a power + * of 4 between 4k and 256M. + */ +void settlbcam(int index, unsigned long virt, phys_addr_t phys, + unsigned int size, int flags, unsigned int pid) +{ + unsigned int tsize, lz; + + asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size)); + tsize = (21 - lz) / 2; + +#ifdef CONFIG_SMP + if ((flags & _PAGE_NO_CACHE) == 0) + flags |= _PAGE_COHERENT; +#endif + + TLBCAM[index].MAS0 = MAS0_TLBSEL | (index << 16); + TLBCAM[index].MAS1 = MAS1_VALID | MAS1_IPROT | MAS1_TSIZE(tsize) | ((pid << 16) & MAS1_TID); + TLBCAM[index].MAS2 = virt & PAGE_MASK; + + TLBCAM[index].MAS2 |= (flags & _PAGE_WRITETHRU) ? MAS2_W : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_NO_CACHE) ? MAS2_I : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_COHERENT) ? MAS2_M : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0; + TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0; + + TLBCAM[index].MAS3 = (phys & PAGE_MASK) | MAS3_SX | MAS3_SR; + TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0); + +#ifndef CONFIG_KGDB /* want user access for breakpoints */ + if (flags & _PAGE_USER) { + TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; + TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); + } +#else + TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; + TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); +#endif + + tlbcam_addrs[index].start = virt; + tlbcam_addrs[index].limit = virt + size - 1; + tlbcam_addrs[index].phys = phys; + + loadcam_entry(index); +} + +void invalidate_tlbcam_entry(int index) +{ + TLBCAM[index].MAS0 = MAS0_TLBSEL | (index << 16); + TLBCAM[index].MAS1 = ~MAS1_VALID; + + loadcam_entry(index); +} + +void __init cam_mapin_ram(unsigned long cam0, unsigned long cam1, + unsigned long cam2) +{ + settlbcam(0, KERNELBASE, PPC_MEMSTART, cam0, _PAGE_KERNEL, 0); + tlbcam_index++; + if (cam1) { + tlbcam_index++; + settlbcam(1, KERNELBASE+cam0, PPC_MEMSTART+cam0, cam1, _PAGE_KERNEL, 0); + } + if (cam2) { + tlbcam_index++; + settlbcam(2, KERNELBASE+cam0+cam1, PPC_MEMSTART+cam0+cam1, cam2, _PAGE_KERNEL, 0); + } +} + +/* + * MMU_init_hw does the chip-specific initialization of the MMU hardware. + */ +void __init MMU_init_hw(void) +{ + flush_instruction_cache(); +} + +unsigned long __init mmu_mapin_ram(void) +{ + cam_mapin_ram(__cam0, __cam1, __cam2); + + return __cam0 + __cam1 + __cam2; +} + + +void __init +adjust_total_lowmem(void) +{ + unsigned long max_low_mem = MAX_LOW_MEM; + unsigned long cam_max = 0x10000000; + unsigned long ram; + + /* adjust CAM size to max_low_mem */ + if (max_low_mem < cam_max) + cam_max = max_low_mem; + + /* adjust lowmem size to max_low_mem */ + if (max_low_mem < total_lowmem) + ram = max_low_mem; + else + ram = total_lowmem; + + /* Calculate CAM values */ + __cam0 = 1UL << 2 * (__ilog2(ram) / 2); + if (__cam0 > cam_max) + __cam0 = cam_max; + ram -= __cam0; + if (ram) { + __cam1 = 1UL << 2 * (__ilog2(ram) / 2); + if (__cam1 > cam_max) + __cam1 = cam_max; + ram -= __cam1; + } + if (ram) { + __cam2 = 1UL << 2 * (__ilog2(ram) / 2); + if (__cam2 > cam_max) + __cam2 = cam_max; + ram -= __cam2; + } + + printk(KERN_INFO "Memory CAM mapping: CAM0=%ldMb, CAM1=%ldMb," + " CAM2=%ldMb residual: %ldMb\n", + __cam0 >> 20, __cam1 >> 20, __cam2 >> 20, + (total_lowmem - __cam0 - __cam1 - __cam2) >> 20); + __max_low_memory = max_low_mem = __cam0 + __cam1 + __cam2; +} diff --git a/arch/ppc/oprofile/Kconfig b/arch/ppc/oprofile/Kconfig new file mode 100644 index 000000000..19d37730b --- /dev/null +++ b/arch/ppc/oprofile/Kconfig @@ -0,0 +1,23 @@ + +menu "Profiling support" + depends on EXPERIMENTAL + +config PROFILING + bool "Profiling support (EXPERIMENTAL)" + help + Say Y here to enable the extended profiling support mechanisms used + by profilers such as OProfile. + + +config OPROFILE + tristate "OProfile system profiling (EXPERIMENTAL)" + depends on PROFILING + help + OProfile is a profiling system capable of profiling the + whole system, include the kernel, kernel modules, libraries, + and applications. + + If unsure, say N. + +endmenu + diff --git a/arch/ppc/oprofile/Makefile b/arch/ppc/oprofile/Makefile new file mode 100644 index 000000000..06e7c81ea --- /dev/null +++ b/arch/ppc/oprofile/Makefile @@ -0,0 +1,9 @@ +obj-$(CONFIG_OPROFILE) += oprofile.o + +DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \ + oprof.o cpu_buffer.o buffer_sync.o \ + event_buffer.o oprofile_files.o \ + oprofilefs.o oprofile_stats.o \ + timer_int.o ) + +oprofile-y := $(DRIVER_OBJS) init.o diff --git a/arch/ppc/oprofile/init.c b/arch/ppc/oprofile/init.c new file mode 100644 index 000000000..e4217d661 --- /dev/null +++ b/arch/ppc/oprofile/init.c @@ -0,0 +1,23 @@ +/** + * @file init.c + * + * @remark Copyright 2002 OProfile authors + * @remark Read the file COPYING + * + * @author John Levon + */ + +#include +#include +#include +#include + +int __init oprofile_arch_init(struct oprofile_operations ** ops) +{ + return -ENODEV; +} + + +void oprofile_arch_exit(void) +{ +} diff --git a/arch/ppc/platforms/4xx/bubinga.c b/arch/ppc/platforms/4xx/bubinga.c new file mode 100644 index 000000000..3678abf86 --- /dev/null +++ b/arch/ppc/platforms/4xx/bubinga.c @@ -0,0 +1,263 @@ +/* + * Support for IBM PPC 405EP evaluation board (Bubinga). + * + * Author: SAW (IBM), derived from walnut.c. + * Maintained by MontaVista Software + * + * 2003 (c) MontaVista Softare Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is + * licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#undef DEBUG + +#ifdef DEBUG +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +extern bd_t __res; + +void *bubinga_rtc_base; + +/* Some IRQs unique to the board + * Used by the generic 405 PCI setup functions in ppc4xx_pci.c + */ +int __init +ppc405_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) +{ + static char pci_irq_table[][4] = + /* + * PCI IDSEL/INTPIN->INTLINE + * A B C D + */ + { + {28, 28, 28, 28}, /* IDSEL 1 - PCI slot 1 */ + {29, 29, 29, 29}, /* IDSEL 2 - PCI slot 2 */ + {30, 30, 30, 30}, /* IDSEL 3 - PCI slot 3 */ + {31, 31, 31, 31}, /* IDSEL 4 - PCI slot 4 */ + }; + + const long min_idsel = 1, max_idsel = 4, irqs_per_slot = 4; + return PCI_IRQ_TABLE_LOOKUP; +}; + +/* The serial clock for the chip is an internal clock determined by + * different clock speeds/dividers. + * Calculate the proper input baud rate and setup the serial driver. + */ +static void __init +bubinga_early_serial_map(void) +{ + u32 uart_div; + int uart_clock; + struct uart_port port; + + /* Calculate the serial clock input frequency + * + * The base baud is the PLL OUTA (provided in the board info + * structure) divided by the external UART Divisor, divided + * by 16. + */ + uart_div = (mfdcr(DCRN_CPC0_UCR_BASE) & DCRN_CPC0_UCR_U0DIV); + uart_clock = __res.bi_pllouta_freq / uart_div; + + /* Setup serial port access */ + memset(&port, 0, sizeof(port)); + port.membase = (void*)ACTING_UART0_IO_BASE; + port.irq = ACTING_UART0_INT; + port.uartclk = uart_clock; + port.regshift = 0; + port.iotype = SERIAL_IO_MEM; + port.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST; + port.line = 0; + + if (early_serial_setup(&port) != 0) { + printk("Early serial init of port 0 failed\n"); + } + + port.membase = (void*)ACTING_UART1_IO_BASE; + port.irq = ACTING_UART1_INT; + port.line = 1; + + if (early_serial_setup(&port) != 0) { + printk("Early serial init of port 1 failed\n"); + } +} + +void __init +bios_fixup(struct pci_controller *hose, struct pcil0_regs *pcip) +{ + + unsigned int bar_response, bar; + /* + * Expected PCI mapping: + * + * PLB addr PCI memory addr + * --------------------- --------------------- + * 0000'0000 - 7fff'ffff <--- 0000'0000 - 7fff'ffff + * 8000'0000 - Bfff'ffff ---> 8000'0000 - Bfff'ffff + * + * PLB addr PCI io addr + * --------------------- --------------------- + * e800'0000 - e800'ffff ---> 0000'0000 - 0001'0000 + * + * The following code is simplified by assuming that the bootrom + * has been well behaved in following this mapping. + */ + +#ifdef DEBUG + int i; + + printk("ioremap PCLIO_BASE = 0x%x\n", pcip); + printk("PCI bridge regs before fixup \n"); + for (i = 0; i <= 3; i++) { + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].ma))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].la))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pcila))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pciha))); + } + printk(" ptm1ms\t0x%x\n", in_le32(&(pcip->ptm1ms))); + printk(" ptm1la\t0x%x\n", in_le32(&(pcip->ptm1la))); + printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms))); + printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la))); + +#endif + + /* added for IBM boot rom version 1.15 bios bar changes -AK */ + + /* Disable region first */ + out_le32((void *) &(pcip->pmm[0].ma), 0x00000000); + /* PLB starting addr, PCI: 0x80000000 */ + out_le32((void *) &(pcip->pmm[0].la), 0x80000000); + /* PCI start addr, 0x80000000 */ + out_le32((void *) &(pcip->pmm[0].pcila), PPC405_PCI_MEM_BASE); + /* 512MB range of PLB to PCI */ + out_le32((void *) &(pcip->pmm[0].pciha), 0x00000000); + /* Enable no pre-fetch, enable region */ + out_le32((void *) &(pcip->pmm[0].ma), ((0xffffffff - + (PPC405_PCI_UPPER_MEM - + PPC405_PCI_MEM_BASE)) | 0x01)); + + /* Disable region one */ + out_le32((void *) &(pcip->pmm[1].ma), 0x00000000); + out_le32((void *) &(pcip->pmm[1].la), 0x00000000); + out_le32((void *) &(pcip->pmm[1].pcila), 0x00000000); + out_le32((void *) &(pcip->pmm[1].pciha), 0x00000000); + out_le32((void *) &(pcip->pmm[1].ma), 0x00000000); + out_le32((void *) &(pcip->ptm1ms), 0x00000001); + + /* Disable region two */ + out_le32((void *) &(pcip->pmm[2].ma), 0x00000000); + out_le32((void *) &(pcip->pmm[2].la), 0x00000000); + out_le32((void *) &(pcip->pmm[2].pcila), 0x00000000); + out_le32((void *) &(pcip->pmm[2].pciha), 0x00000000); + out_le32((void *) &(pcip->pmm[2].ma), 0x00000000); + out_le32((void *) &(pcip->ptm2ms), 0x00000000); + out_le32((void *) &(pcip->ptm2la), 0x00000000); + + /* Zero config bars */ + for (bar = PCI_BASE_ADDRESS_1; bar <= PCI_BASE_ADDRESS_2; bar += 4) { + early_write_config_dword(hose, hose->first_busno, + PCI_FUNC(hose->first_busno), bar, + 0x00000000); + early_read_config_dword(hose, hose->first_busno, + PCI_FUNC(hose->first_busno), bar, + &bar_response); + DBG("BUS %d, device %d, Function %d bar 0x%8.8x is 0x%8.8x\n", + hose->first_busno, PCI_SLOT(hose->first_busno), + PCI_FUNC(hose->first_busno), bar, bar_response); + } + /* end work arround */ + +#ifdef DEBUG + printk("PCI bridge regs after fixup \n"); + for (i = 0; i <= 3; i++) { + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].ma))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].la))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pcila))); + printk(" pmm%dma\t0x%x\n", i, in_le32(&(pcip->pmm[i].pciha))); + } + printk(" ptm1ms\t0x%x\n", in_le32(&(pcip->ptm1ms))); + printk(" ptm1la\t0x%x\n", in_le32(&(pcip->ptm1la))); + printk(" ptm2ms\t0x%x\n", in_le32(&(pcip->ptm2ms))); + printk(" ptm2la\t0x%x\n", in_le32(&(pcip->ptm2la))); + +#endif +} + +void __init +bubinga_setup_arch(void) +{ + ppc4xx_setup_arch(); + + ibm_ocp_set_emac(0, 1); + + bubinga_early_serial_map(); + + /* RTC step for the evb405ep */ + bubinga_rtc_base = (void *) BUBINGA_RTC_VADDR; + TODC_INIT(TODC_TYPE_DS1743, bubinga_rtc_base, bubinga_rtc_base, + bubinga_rtc_base, 8); + /* Identify the system */ + printk("IBM Bubinga port (MontaVista Software, Inc. )\n"); +} + +void __init +bubinga_map_io(void) +{ + ppc4xx_map_io(); + io_block_mapping(BUBINGA_RTC_VADDR, + BUBINGA_RTC_PADDR, BUBINGA_RTC_SIZE, _PAGE_IO); +} + +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + ppc4xx_init(r3, r4, r5, r6, r7); + + ppc_md.setup_arch = bubinga_setup_arch; + ppc_md.setup_io_mappings = bubinga_map_io; + +#ifdef CONFIG_GEN_RTC + ppc_md.time_init = todc_time_init; + ppc_md.set_rtc_time = todc_set_rtc_time; + ppc_md.get_rtc_time = todc_get_rtc_time; + ppc_md.nvram_read_val = todc_direct_read_val; + ppc_md.nvram_write_val = todc_direct_write_val; +#endif +#ifdef CONFIG_KGDB + ppc_md.early_serial_map = bubinga_early_serial_map; +#endif +} + diff --git a/arch/ppc/platforms/4xx/bubinga.h b/arch/ppc/platforms/4xx/bubinga.h new file mode 100644 index 000000000..b1df856f8 --- /dev/null +++ b/arch/ppc/platforms/4xx/bubinga.h @@ -0,0 +1,69 @@ +/* + * Support for IBM PPC 405EP evaluation board (Bubinga). + * + * Author: SAW (IBM), derived from walnut.h. + * Maintained by MontaVista Software + * + * 2003 (c) MontaVista Softare Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is + * licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#ifdef __KERNEL__ +#ifndef __BUBINGA_H__ +#define __BUBINGA_H__ + +/* 405EP */ +#include + +#ifndef __ASSEMBLY__ +/* + * Data structure defining board information maintained by the boot + * ROM on IBM's evaluation board. An effort has been made to + * keep the field names consistent with the 8xx 'bd_t' board info + * structures. + */ + +typedef struct board_info { + unsigned char bi_s_version[4]; /* Version of this structure */ + unsigned char bi_r_version[30]; /* Version of the IBM ROM */ + unsigned int bi_memsize; /* DRAM installed, in bytes */ + unsigned char bi_enetaddr[2][6]; /* Local Ethernet MAC address */ unsigned char bi_pci_enetaddr[6]; /* PCI Ethernet MAC address */ + unsigned int bi_intfreq; /* Processor speed, in Hz */ + unsigned int bi_busfreq; /* PLB Bus speed, in Hz */ + unsigned int bi_pci_busfreq; /* PCI Bus speed, in Hz */ + unsigned int bi_opb_busfreq; /* OPB Bus speed, in Hz */ + unsigned int bi_pllouta_freq; /* PLL OUTA speed, in Hz */ +} bd_t; + +/* Some 4xx parts use a different timebase frequency from the internal clock. +*/ +#define bi_tbfreq bi_intfreq + + +/* Memory map for the Bubinga board. + * Generic 4xx plus RTC. + */ + +extern void *bubinga_rtc_base; +#define BUBINGA_RTC_PADDR ((uint)0xf0000000) +#define BUBINGA_RTC_VADDR BUBINGA_RTC_PADDR +#define BUBINGA_RTC_SIZE ((uint)8*1024) + +/* The UART clock is based off an internal clock - + * define BASE_BAUD based on the internal clock and divider(s). + * Since BASE_BAUD must be a constant, we will initialize it + * using clock/divider values which OpenBIOS initializes + * for typical configurations at various CPU speeds. + * The base baud is calculated as (FWDA / EXT UART DIV / 16) + */ +#define BASE_BAUD 0 + +#define BUBINGA_FPGA_BASE 0xF0300000 + +#define PPC4xx_MACHINE_NAME "IBM Bubinga" + +#endif /* !__ASSEMBLY__ */ +#endif /* __BUBINGA_H__ */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/platforms/4xx/ibm405ep.c b/arch/ppc/platforms/4xx/ibm405ep.c new file mode 100644 index 000000000..fb48e8254 --- /dev/null +++ b/arch/ppc/platforms/4xx/ibm405ep.c @@ -0,0 +1,134 @@ +/* + * arch/ppc/platforms/ibm405ep.c + * + * Support for IBM PPC 405EP processors. + * + * Author: SAW (IBM), derived from ibmnp405l.c. + * Maintained by MontaVista Software + * + * 2003 (c) MontaVista Softare Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is + * licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +static struct ocp_func_mal_data ibm405ep_mal0_def = { + .num_tx_chans = 4, /* Number of TX channels */ + .num_rx_chans = 2, /* Number of RX channels */ + .txeob_irq = 11, /* TX End Of Buffer IRQ */ + .rxeob_irq = 12, /* RX End Of Buffer IRQ */ + .txde_irq = 13, /* TX Descriptor Error IRQ */ + .rxde_irq = 14, /* RX Descriptor Error IRQ */ + .serr_irq = 10, /* MAL System Error IRQ */ +}; +OCP_SYSFS_MAL_DATA() + +static struct ocp_func_emac_data ibm405ep_emac0_def = { + .rgmii_idx = -1, /* No RGMII */ + .rgmii_mux = -1, /* No RGMII */ + .zmii_idx = -1, /* ZMII device index */ + .zmii_mux = 0, /* ZMII input of this EMAC */ + .mal_idx = 0, /* MAL device index */ + .mal_rx_chan = 0, /* MAL rx channel number */ + .mal_tx_chan = 0, /* MAL tx channel number */ + .wol_irq = 9, /* WOL interrupt number */ + .mdio_idx = 0, /* MDIO via EMAC0 */ + .tah_idx = -1, /* No TAH */ +}; + +static struct ocp_func_emac_data ibm405ep_emac1_def = { + .rgmii_idx = -1, /* No RGMII */ + .rgmii_mux = -1, /* No RGMII */ + .zmii_idx = -1, /* ZMII device index */ + .zmii_mux = 0, /* ZMII input of this EMAC */ + .mal_idx = 0, /* MAL device index */ + .mal_rx_chan = 1, /* MAL rx channel number */ + .mal_tx_chan = 2, /* MAL tx channel number */ + .wol_irq = 9, /* WOL interrupt number */ + .mdio_idx = 0, /* MDIO via EMAC0 */ + .tah_idx = -1, /* No TAH */ +}; +OCP_SYSFS_EMAC_DATA() + +static struct ocp_func_iic_data ibm405ep_iic0_def = { + .fast_mode = 0, /* Use standad mode (100Khz) */ +}; +OCP_SYSFS_IIC_DATA() + +struct ocp_def core_ocp[] = { + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_OPB, + .index = 0, + .paddr = 0xEF600000, + .irq = OCP_IRQ_NA, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_16550, + .index = 0, + .paddr = UART0_IO_BASE, + .irq = UART0_INT, + .pm = IBM_CPM_UART0 + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_16550, + .index = 1, + .paddr = UART1_IO_BASE, + .irq = UART1_INT, + .pm = IBM_CPM_UART1 + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_IIC, + .paddr = 0xEF600500, + .irq = 2, + .pm = IBM_CPM_IIC0, + .additions = &ibm405ep_iic0_def, + .show = &ocp_show_iic_data + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_GPIO, + .paddr = 0xEF600700, + .irq = OCP_IRQ_NA, + .pm = IBM_CPM_GPIO0 + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_MAL, + .paddr = OCP_PADDR_NA, + .irq = OCP_IRQ_NA, + .pm = OCP_CPM_NA, + .additions = &ibm405ep_mal0_def, + .show = &ocp_show_mal_data + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_EMAC, + .index = 0, + .paddr = EMAC0_BASE, + .irq = 15, + .pm = OCP_CPM_NA, + .additions = &ibm405ep_emac0_def, + .show = &ocp_show_emac_data + }, + { .vendor = OCP_VENDOR_IBM, + .function = OCP_FUNC_EMAC, + .index = 1, + .paddr = 0xEF600900, + .irq = 17, + .pm = OCP_CPM_NA, + .additions = &ibm405ep_emac1_def, + .show = &ocp_show_emac_data + }, + { .vendor = OCP_VENDOR_INVALID + } +}; diff --git a/arch/ppc/platforms/4xx/ibm405ep.h b/arch/ppc/platforms/4xx/ibm405ep.h new file mode 100644 index 000000000..e051e3fe8 --- /dev/null +++ b/arch/ppc/platforms/4xx/ibm405ep.h @@ -0,0 +1,148 @@ +/* + * arch/ppc/platforms/4xx/ibm405ep.h + * + * IBM PPC 405EP processor defines. + * + * Author: SAW (IBM), derived from ibm405gp.h. + * Maintained by MontaVista Software + * + * 2003 (c) MontaVista Softare Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is + * licensed "as is" without any warranty of any kind, whether express + * or implied. + */ + +#ifdef __KERNEL__ +#ifndef __ASM_IBM405EP_H__ +#define __ASM_IBM405EP_H__ + +#include + +/* ibm405.h at bottom of this file */ + +/* PCI + * PCI Bridge config reg definitions + * see 17-19 of manual + */ + +#define PPC405_PCI_CONFIG_ADDR 0xeec00000 +#define PPC405_PCI_CONFIG_DATA 0xeec00004 + +#define PPC405_PCI_PHY_MEM_BASE 0x80000000 /* hose_a->pci_mem_offset */ + /* setbat */ +#define PPC405_PCI_MEM_BASE PPC405_PCI_PHY_MEM_BASE /* setbat */ +#define PPC405_PCI_PHY_IO_BASE 0xe8000000 /* setbat */ +#define PPC405_PCI_IO_BASE PPC405_PCI_PHY_IO_BASE /* setbat */ + +#define PPC405_PCI_LOWER_MEM 0x80000000 /* hose_a->mem_space.start */ +#define PPC405_PCI_UPPER_MEM 0xBfffffff /* hose_a->mem_space.end */ +#define PPC405_PCI_LOWER_IO 0x00000000 /* hose_a->io_space.start */ +#define PPC405_PCI_UPPER_IO 0x0000ffff /* hose_a->io_space.end */ + +#define PPC405_ISA_IO_BASE PPC405_PCI_IO_BASE + +#define PPC4xx_PCI_IO_PADDR ((uint)PPC405_PCI_PHY_IO_BASE) +#define PPC4xx_PCI_IO_VADDR PPC4xx_PCI_IO_PADDR +#define PPC4xx_PCI_IO_SIZE ((uint)64*1024) +#define PPC4xx_PCI_CFG_PADDR ((uint)PPC405_PCI_CONFIG_ADDR) +#define PPC4xx_PCI_CFG_VADDR PPC4xx_PCI_CFG_PADDR +#define PPC4xx_PCI_CFG_SIZE ((uint)4*1024) +#define PPC4xx_PCI_LCFG_PADDR ((uint)0xef400000) +#define PPC4xx_PCI_LCFG_VADDR PPC4xx_PCI_LCFG_PADDR +#define PPC4xx_PCI_LCFG_SIZE ((uint)4*1024) +#define PPC4xx_ONB_IO_PADDR ((uint)0xef600000) +#define PPC4xx_ONB_IO_VADDR PPC4xx_ONB_IO_PADDR +#define PPC4xx_ONB_IO_SIZE ((uint)4*1024) + +/* serial port defines */ +#define RS_TABLE_SIZE 2 + +#define UART0_INT 0 +#define UART1_INT 1 + +#define PCIL0_BASE 0xEF400000 +#define UART0_IO_BASE 0xEF600300 +#define UART1_IO_BASE 0xEF600400 +#define EMAC0_BASE 0xEF600800 + +#define BD_EMAC_ADDR(e,i) bi_enetaddr[e][i] + +#if defined(CONFIG_UART0_TTYS0) +#define ACTING_UART0_IO_BASE UART0_IO_BASE +#define ACTING_UART1_IO_BASE UART1_IO_BASE +#define ACTING_UART0_INT UART0_INT +#define ACTING_UART1_INT UART1_INT +#else +#define ACTING_UART0_IO_BASE UART1_IO_BASE +#define ACTING_UART1_IO_BASE UART0_IO_BASE +#define ACTING_UART0_INT UART1_INT +#define ACTING_UART1_INT UART0_INT +#endif + +#define STD_UART_OP(num) \ + { 0, BASE_BAUD, 0, ACTING_UART##num##_INT, \ + (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST), \ + iomem_base: (u8 *)ACTING_UART##num##_IO_BASE, \ + io_type: SERIAL_IO_MEM}, + +#define SERIAL_DEBUG_IO_BASE ACTING_UART0_IO_BASE +#define SERIAL_PORT_DFNS \ + STD_UART_OP(0) \ + STD_UART_OP(1) + +/* DCR defines */ +#define DCRN_CPMSR_BASE 0x0BA +#define DCRN_CPMFR_BASE 0x0B9 + +#define DCRN_CPC0_PLLMR0_BASE 0x0F0 +#define DCRN_CPC0_BOOT_BASE 0x0F1 +#define DCRN_CPC0_CR1_BASE 0x0F2 +#define DCRN_CPC0_EPRCSR_BASE 0x0F3 +#define DCRN_CPC0_PLLMR1_BASE 0x0F4 +#define DCRN_CPC0_UCR_BASE 0x0F5 +#define DCRN_CPC0_UCR_U0DIV 0x07F +#define DCRN_CPC0_SRR_BASE 0x0F6 +#define DCRN_CPC0_JTAGID_BASE 0x0F7 +#define DCRN_CPC0_SPARE_BASE 0x0F8 +#define DCRN_CPC0_PCI_BASE 0x0F9 + + +#define IBM_CPM_GPT 0x80000000 /* GPT interface */ +#define IBM_CPM_PCI 0x40000000 /* PCI bridge */ +#define IBM_CPM_UIC 0x00010000 /* Universal Int Controller */ +#define IBM_CPM_CPU 0x00008000 /* processor core */ +#define IBM_CPM_EBC 0x00002000 /* EBC controller */ +#define IBM_CPM_SDRAM0 0x00004000 /* SDRAM memory controller */ +#define IBM_CPM_GPIO0 0x00001000 /* General Purpose IO */ +#define IBM_CPM_TMRCLK 0x00000400 /* CPU timers */ +#define IBM_CPM_PLB 0x00000100 /* PLB bus arbiter */ +#define IBM_CPM_OPB 0x00000080 /* PLB to OPB bridge */ +#define IBM_CPM_DMA 0x00000040 /* DMA controller */ +#define IBM_CPM_IIC0 0x00000010 /* IIC interface */ +#define IBM_CPM_UART1 0x00000002 /* serial port 0 */ +#define IBM_CPM_UART0 0x00000001 /* serial port 1 */ +#define DFLT_IBM4xx_PM ~(IBM_CPM_PCI | IBM_CPM_CPU | IBM_CPM_DMA \ + | IBM_CPM_OPB | IBM_CPM_EBC \ + | IBM_CPM_SDRAM0 | IBM_CPM_PLB \ + | IBM_CPM_UIC | IBM_CPM_TMRCLK) +#define DCRN_DMA0_BASE 0x100 +#define DCRN_DMA1_BASE 0x108 +#define DCRN_DMA2_BASE 0x110 +#define DCRN_DMA3_BASE 0x118 +#define DCRNCAP_DMA_SG 1 /* have DMA scatter/gather capability */ +#define DCRN_DMASR_BASE 0x120 +#define DCRN_EBC_BASE 0x012 +#define DCRN_DCP0_BASE 0x014 +#define DCRN_MAL_BASE 0x180 +#define DCRN_OCM0_BASE 0x018 +#define DCRN_PLB0_BASE 0x084 +#define DCRN_PLLMR_BASE 0x0B0 +#define DCRN_POB0_BASE 0x0A0 +#define DCRN_SDRAM0_BASE 0x010 +#define DCRN_UIC0_BASE 0x0C0 +#define UIC0 DCRN_UIC0_BASE + +#include + +#endif /* __ASM_IBM405EP_H__ */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/platforms/85xx/Kconfig b/arch/ppc/platforms/85xx/Kconfig new file mode 100644 index 000000000..cfe29eb04 --- /dev/null +++ b/arch/ppc/platforms/85xx/Kconfig @@ -0,0 +1,44 @@ +config 85xx + bool + depends on E500 + default y + +config PPC_INDIRECT_PCI_BE + bool + depends on 85xx + default y + +menu "Freescale 85xx options" + depends on E500 + +choice + prompt "Machine Type" + depends on 85xx + default MPC8540_ADS + +config MPC8540_ADS + bool "MPC8540ADS" + help + This option enables support for the MPC 8540 ADS evaluation board. + +endchoice + +# It's often necessary to know the specific 85xx processor type. +# Fortunately, it is implied (so far) from the board type, so we +# don't need to ask more redundant questions. +config MPC8540 + bool + depends on MPC8540_ADS + default y + +config FSL_OCP + bool + depends on 85xx + default y + +config PPC_GEN550 + bool + depends on MPC8540 + default y + +endmenu diff --git a/arch/ppc/platforms/85xx/Makefile b/arch/ppc/platforms/85xx/Makefile new file mode 100644 index 000000000..92a88233f --- /dev/null +++ b/arch/ppc/platforms/85xx/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the PowerPC 85xx linux kernel. +# + +obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads_common.o mpc8540_ads.o + +obj-$(CONFIG_MPC8540) += mpc8540.o diff --git a/arch/ppc/platforms/85xx/mpc8540.c b/arch/ppc/platforms/85xx/mpc8540.c new file mode 100644 index 000000000..f05ef12d2 --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc8540.c @@ -0,0 +1,97 @@ +/* + * arch/ppc/platforms/85xx/mpc8540.c + * + * MPC8540 I/O descriptions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include + +/* These should be defined in platform code */ +extern struct ocp_gfar_data mpc85xx_tsec1_def; +extern struct ocp_gfar_data mpc85xx_tsec2_def; +extern struct ocp_gfar_data mpc85xx_fec_def; +extern struct ocp_mpc_i2c_data mpc85xx_i2c1_def; + +/* We use offsets for paddr since we do not know at compile time + * what CCSRBAR is, platform code should fix this up in + * setup_arch + * + * Only the first IRQ is given even if a device has + * multiple lines associated with ita + */ +struct ocp_def core_ocp[] = { + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_IIC, + .index = 0, + .paddr = MPC85xx_IIC1_OFFSET, + .irq = MPC85xx_IRQ_IIC1, + .pm = OCP_CPM_NA, + .additions = &mpc85xx_i2c1_def, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_16550, + .index = 0, + .paddr = MPC85xx_UART0_OFFSET, + .irq = MPC85xx_IRQ_DUART, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_16550, + .index = 1, + .paddr = MPC85xx_UART1_OFFSET, + .irq = MPC85xx_IRQ_DUART, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_GFAR, + .index = 0, + .paddr = MPC85xx_ENET1_OFFSET, + .irq = MPC85xx_IRQ_TSEC1_TX, + .pm = OCP_CPM_NA, + .additions = &mpc85xx_tsec1_def, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_GFAR, + .index = 1, + .paddr = MPC85xx_ENET2_OFFSET, + .irq = MPC85xx_IRQ_TSEC2_TX, + .pm = OCP_CPM_NA, + .additions = &mpc85xx_tsec2_def, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_GFAR, + .index = 2, + .paddr = MPC85xx_ENET3_OFFSET, + .irq = MPC85xx_IRQ_FEC, + .pm = OCP_CPM_NA, + .additions = &mpc85xx_fec_def, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_DMA, + .index = 0, + .paddr = MPC85xx_DMA_OFFSET, + .irq = MPC85xx_IRQ_DMA0, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_PERFMON, + .index = 0, + .paddr = MPC85xx_PERFMON_OFFSET, + .irq = MPC85xx_IRQ_PERFMON, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_INVALID + } +}; diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c new file mode 100644 index 000000000..aada593a6 --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc8540_ads.c @@ -0,0 +1,238 @@ +/* + * arch/ppc/platforms/85xx/mpc8540_ads.c + * + * MPC8540ADS board specific routines + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for linux/serial_core.h */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct ocp_gfar_data mpc85xx_tsec1_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC1_TX, + .interruptError = MPC85xx_IRQ_TSEC1_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC1_RX, + .interruptPHY = MPC85xx_IRQ_EXT5, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR + | GFAR_HAS_RMON + | GFAR_HAS_PHY_INTR | GFAR_HAS_COALESCE), + .phyid = 0, + .phyregidx = 0, +}; + +struct ocp_gfar_data mpc85xx_tsec2_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC2_TX, + .interruptError = MPC85xx_IRQ_TSEC2_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC2_RX, + .interruptPHY = MPC85xx_IRQ_EXT5, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR + | GFAR_HAS_RMON + | GFAR_HAS_PHY_INTR | GFAR_HAS_COALESCE), + .phyid = 1, + .phyregidx = 0, +}; + +struct ocp_gfar_data mpc85xx_fec_def = { + .interruptTransmit = MPC85xx_IRQ_FEC, + .interruptError = MPC85xx_IRQ_FEC, + .interruptReceive = MPC85xx_IRQ_FEC, + .interruptPHY = MPC85xx_IRQ_EXT5, + .flags = 0, + .phyid = 3, + .phyregidx = 0, +}; + +struct ocp_fs_i2c_data mpc85xx_i2c1_def = { + .flags = FS_I2C_SEPARATE_DFSRR, +}; + +/* ************************************************************************ + * + * Setup the architecture + * + */ +static void __init +mpc8540ads_setup_arch(void) +{ + struct ocp_def *def; + struct ocp_gfar_data *einfo; + bd_t *binfo = (bd_t *) __res; + unsigned int freq; + + /* get the core frequency */ + freq = binfo->bi_intfreq; + + if (ppc_md.progress) + ppc_md.progress("mpc8540ads_setup_arch()", 0); + + /* Set loops_per_jiffy to a half-way reasonable value, + for use until calibrate_delay gets called. */ + loops_per_jiffy = freq / HZ; + +#ifdef CONFIG_PCI + /* setup PCI host bridges */ + mpc85xx_setup_hose(); +#endif + +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#endif + +#ifdef CONFIG_SERIAL_8250 + mpc85xx_early_serial_map(); +#endif + +#ifdef CONFIG_SERIAL_TEXT_DEBUG + /* Invalidate the entry we stole earlier the serial ports + * should be properly mapped */ + invalidate_tlbcam_entry(NUM_TLBCAMS - 1); +#endif + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6); + } + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6); + } + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 2); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enet2addr, 6); + } + +#ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start) + ROOT_DEV = Root_RAM0; + else +#endif +#ifdef CONFIG_ROOT_NFS + ROOT_DEV = Root_NFS; +#else + ROOT_DEV = Root_HDA1; +#endif + + ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base)); +} + +/* ************************************************************************ */ +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* parse_bootinfo must always be called first */ + parse_bootinfo(find_bootinfo()); + + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *) __res, (void *) (r3 + KERNELBASE), + sizeof (bd_t)); + } +#ifdef CONFIG_SERIAL_TEXT_DEBUG + { + bd_t *binfo = (bd_t *) __res; + + /* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */ + settlbcam(NUM_TLBCAMS - 1, binfo->bi_immr_base, + binfo->bi_immr_base, MPC85xx_CCSRBAR_SIZE, _PAGE_IO, 0); + } +#endif + +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *) (r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *) (r6 + KERNELBASE)); + } + + /* setup the PowerPC module struct */ + ppc_md.setup_arch = mpc8540ads_setup_arch; + ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo; + + ppc_md.init_IRQ = mpc85xx_ads_init_IRQ; + ppc_md.get_irq = openpic_get_irq; + + ppc_md.restart = mpc85xx_restart; + ppc_md.power_off = mpc85xx_power_off; + ppc_md.halt = mpc85xx_halt; + + ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory; + + ppc_md.time_init = NULL; + ppc_md.set_rtc_time = NULL; + ppc_md.get_rtc_time = NULL; + ppc_md.calibrate_decr = mpc85xx_calibrate_decr; + +#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG) + ppc_md.progress = gen550_progress; +#endif /* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */ + + if (ppc_md.progress) + ppc_md.progress("mpc8540ads_init(): exit", 0); + + return; +} diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.h b/arch/ppc/platforms/85xx/mpc8540_ads.h new file mode 100644 index 000000000..9056361f7 --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc8540_ads.h @@ -0,0 +1,30 @@ +/* + * arch/ppc/platforms/85xx/mpc8540_ads.h + * + * MPC8540ADS board definitions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MACH_MPC8540ADS_H__ +#define __MACH_MPC8540ADS_H__ + +#include +#include +#include +#include +#include + +#define SERIAL_PORT_DFNS \ + STD_UART_OP(0) \ + STD_UART_OP(1) + +#endif /* __MACH_MPC8540ADS_H__ */ diff --git a/arch/ppc/platforms/85xx/mpc8555.c b/arch/ppc/platforms/85xx/mpc8555.c new file mode 100644 index 000000000..942758480 --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc8555.c @@ -0,0 +1,88 @@ +/* + * arch/ppc/platform/85xx/mpc8555.c + * + * MPC8555 I/O descriptions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include + +/* These should be defined in platform code */ +extern struct ocp_gfar_data mpc85xx_tsec1_def; +extern struct ocp_gfar_data mpc85xx_tsec2_def; +extern struct ocp_mpc_i2c_data mpc85xx_i2c1_def; + +/* We use offsets for paddr since we do not know at compile time + * what CCSRBAR is, platform code should fix this up in + * setup_arch + * + * Only the first IRQ is given even if a device has + * multiple lines associated with ita + */ +struct ocp_def core_ocp[] = { + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_IIC, + .index = 0, + .paddr = MPC85xx_IIC1_OFFSET, + .irq = MPC85xx_IRQ_IIC1, + .pm = OCP_CPM_NA, + .additions = &mpc85xx_i2c1_def, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_16550, + .index = 0, + .paddr = MPC85xx_UART0_OFFSET, + .irq = MPC85xx_IRQ_DUART, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_16550, + .index = 1, + .paddr = MPC85xx_UART1_OFFSET, + .irq = MPC85xx_IRQ_DUART, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_GFAR, + .index = 0, + .paddr = MPC85xx_ENET1_OFFSET, + .irq = MPC85xx_IRQ_TSEC1_TX, + .pm = OCP_CPM_NA, + .additions = &mpc85xx_tsec1_def, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_GFAR, + .index = 1, + .paddr = MPC85xx_ENET2_OFFSET, + .irq = MPC85xx_IRQ_TSEC2_TX, + .pm = OCP_CPM_NA, + .additions = &mpc85xx_tsec2_def, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_DMA, + .index = 0, + .paddr = MPC85xx_DMA_OFFSET, + .irq = MPC85xx_IRQ_DMA0, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_PERFMON, + .index = 0, + .paddr = MPC85xx_PERFMON_OFFSET, + .irq = MPC85xx_IRQ_PERFMON, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_INVALID + } +}; diff --git a/arch/ppc/platforms/85xx/mpc8555_cds.h b/arch/ppc/platforms/85xx/mpc8555_cds.h new file mode 100644 index 000000000..566e0e1aa --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc8555_cds.h @@ -0,0 +1,26 @@ +/* + * arch/ppc/platforms/mpc8555_cds.h + * + * MPC8555CDS board definitions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MACH_MPC8555CDS_H__ +#define __MACH_MPC8555CDS_H__ + +#include +#include +#include + +#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET) + +#endif /* __MACH_MPC8555CDS_H__ */ diff --git a/arch/ppc/platforms/85xx/mpc8560.c b/arch/ppc/platforms/85xx/mpc8560.c new file mode 100644 index 000000000..c254299bd --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc8560.c @@ -0,0 +1,74 @@ +/* + * arch/ppc/platforms/85xx/mpc8560.c + * + * MPC8560 I/O descriptions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include + +/* These should be defined in platform code */ +extern struct ocp_gfar_data mpc85xx_tsec1_def; +extern struct ocp_gfar_data mpc85xx_tsec2_def; +extern struct ocp_mpc_i2c_data mpc85xx_i2c1_def; + +/* We use offsets for paddr since we do not know at compile time + * what CCSRBAR is, platform code should fix this up in + * setup_arch + * + * Only the first IRQ is given even if a device has + * multiple lines associated with ita + */ +struct ocp_def core_ocp[] = { + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_IIC, + .index = 0, + .paddr = MPC85xx_IIC1_OFFSET, + .irq = MPC85xx_IRQ_IIC1, + .pm = OCP_CPM_NA, + .additions = &mpc85xx_i2c1_def, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_GFAR, + .index = 0, + .paddr = MPC85xx_ENET1_OFFSET, + .irq = MPC85xx_IRQ_TSEC1_TX, + .pm = OCP_CPM_NA, + .additions = &mpc85xx_tsec1_def, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_GFAR, + .index = 1, + .paddr = MPC85xx_ENET2_OFFSET, + .irq = MPC85xx_IRQ_TSEC2_TX, + .pm = OCP_CPM_NA, + .additions = &mpc85xx_tsec2_def, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_DMA, + .index = 0, + .paddr = MPC85xx_DMA_OFFSET, + .irq = MPC85xx_IRQ_DMA0, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_PERFMON, + .index = 0, + .paddr = MPC85xx_PERFMON_OFFSET, + .irq = MPC85xx_IRQ_PERFMON, + .pm = OCP_CPM_NA, + }, + { .vendor = OCP_VENDOR_INVALID + } +}; diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.c b/arch/ppc/platforms/85xx/mpc8560_ads.c new file mode 100644 index 000000000..0cb2c3458 --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc8560_ads.c @@ -0,0 +1,241 @@ +/* + * arch/ppc/platforms/85xx/mpc8560_ads.c + * + * MPC8560ADS board specific routines + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for linux/serial_core.h */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +extern void cpm2_reset(void); + +struct ocp_gfar_data mpc85xx_tsec1_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC1_TX, + .interruptError = MPC85xx_IRQ_TSEC1_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC1_RX, + .interruptPHY = MPC85xx_IRQ_EXT5, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR + | GFAR_HAS_RMON | GFAR_HAS_COALESCE + | GFAR_HAS_PHY_INTR), + .phyid = 0, + .phyregidx = 0, +}; + +struct ocp_gfar_data mpc85xx_tsec2_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC2_TX, + .interruptError = MPC85xx_IRQ_TSEC2_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC2_RX, + .interruptPHY = MPC85xx_IRQ_EXT5, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR + | GFAR_HAS_RMON | GFAR_HAS_COALESCE + | GFAR_HAS_PHY_INTR), + .phyid = 1, + .phyregidx = 0, +}; + +struct ocp_fs_i2c_data mpc85xx_i2c1_def = { + .flags = FS_I2C_SEPARATE_DFSRR, +}; + +/* ************************************************************************ + * + * Setup the architecture + * + */ + +static void __init +mpc8560ads_setup_arch(void) +{ + struct ocp_def *def; + struct ocp_gfar_data *einfo; + bd_t *binfo = (bd_t *) __res; + unsigned int freq; + + cpm2_reset(); + + /* get the core frequency */ + freq = binfo->bi_intfreq; + + if (ppc_md.progress) + ppc_md.progress("mpc8560ads_setup_arch()", 0); + + /* Set loops_per_jiffy to a half-way reasonable value, + for use until calibrate_delay gets called. */ + loops_per_jiffy = freq / HZ; + +#ifdef CONFIG_PCI + /* setup PCI host bridges */ + mpc85xx_setup_hose(); +#endif + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6); + } + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6); + } + +#ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start) + ROOT_DEV = Root_RAM0; + else +#endif +#ifdef CONFIG_ROOT_NFS + ROOT_DEV = Root_NFS; +#else + ROOT_DEV = Root_HDA1; +#endif + + ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base)); +} + +static irqreturn_t cpm2_cascade(int irq, void *dev_id, struct pt_regs *regs) +{ + while ((irq = cpm2_get_irq(regs)) >= 0) { + ppc_irq_dispatch_handler(regs, irq); + } + return IRQ_HANDLED; +} + +static void __init +mpc8560_ads_init_IRQ(void) +{ + int i; + volatile cpm2_map_t *immap = cpm2_immr; + + /* Setup OpenPIC */ + mpc85xx_ads_init_IRQ(); + + /* disable all CPM interupts */ + immap->im_intctl.ic_simrh = 0x0; + immap->im_intctl.ic_simrl = 0x0; + + for (i = CPM_IRQ_OFFSET; i < (NR_CPM_INTS + CPM_IRQ_OFFSET); i++) + irq_desc[i].handler = &cpm2_pic; + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + immap->im_intctl.ic_sicr = 0; + immap->im_intctl.ic_scprrh = 0x05309770; + immap->im_intctl.ic_scprrl = 0x05309770; + + request_irq(MPC85xx_IRQ_CPM, cpm2_cascade, SA_INTERRUPT, "cpm2_cascade", NULL); + + return; +} + + + +/* ************************************************************************ */ +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* parse_bootinfo must always be called first */ + parse_bootinfo(find_bootinfo()); + + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *) __res, (void *) (r3 + KERNELBASE), + sizeof (bd_t)); + + } +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *) (r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *) (r6 + KERNELBASE)); + } + + /* setup the PowerPC module struct */ + ppc_md.setup_arch = mpc8560ads_setup_arch; + ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo; + + ppc_md.init_IRQ = mpc8560_ads_init_IRQ; + ppc_md.get_irq = openpic_get_irq; + + ppc_md.restart = mpc85xx_restart; + ppc_md.power_off = mpc85xx_power_off; + ppc_md.halt = mpc85xx_halt; + + ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory; + + ppc_md.time_init = NULL; + ppc_md.set_rtc_time = NULL; + ppc_md.get_rtc_time = NULL; + ppc_md.calibrate_decr = mpc85xx_calibrate_decr; + + if (ppc_md.progress) + ppc_md.progress("mpc8560ads_init(): exit", 0); + + return; +} diff --git a/arch/ppc/platforms/85xx/mpc8560_ads.h b/arch/ppc/platforms/85xx/mpc8560_ads.h new file mode 100644 index 000000000..7df885d73 --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc8560_ads.h @@ -0,0 +1,27 @@ +/* + * arch/ppc/platforms/mpc8560_ads.h + * + * MPC8540ADS board definitions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MACH_MPC8560ADS_H +#define __MACH_MPC8560ADS_H + +#include +#include +#include + +#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET) +#define PHY_INTERRUPT MPC85xx_IRQ_EXT7 + +#endif /* __MACH_MPC8560ADS_H */ diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.c b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c new file mode 100644 index 000000000..7f0fabc5d --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.c @@ -0,0 +1,237 @@ +/* + * arch/ppc/platforms/85xx/mpc85xx_ads_common.c + * + * MPC85xx ADS board common routines + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#ifndef CONFIG_PCI +unsigned long isa_io_base = 0; +unsigned long isa_mem_base = 0; +#endif + +extern unsigned long total_memory; /* in mm/init */ + +unsigned char __res[sizeof (bd_t)]; + +/* Internal interrupts are all Level Sensitive, and Positive Polarity */ + +static u_char mpc85xx_ads_openpic_initsenses[] __initdata = { + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0: L2 Cache */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1: ECM */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8: PCI/PCI-X */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9: RIO Inbound Port Write Error */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10: RIO Doorbell Inbound */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11: RIO Outbound Message */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12: RIO Inbound Message */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 0 Transmit */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 0 Receive */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 0 Receive/Transmit Error */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 1 Transmit */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 1 Receive */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 1 Receive/Transmit Error */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25: Fast Ethernet */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26: DUART */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30: CPM */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */ + 0x0, /* External 0: */ +#if defined(CONFIG_PCI) + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 1: PCI slot 0 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 2: PCI slot 1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 3: PCI slot 2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 4: PCI slot 3 */ +#else + 0x0, /* External 1: */ + 0x0, /* External 2: */ + 0x0, /* External 3: */ + 0x0, /* External 4: */ +#endif + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */ + 0x0, /* External 6: */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 7: PHY */ + 0x0, /* External 8: */ + 0x0, /* External 9: */ + 0x0, /* External 10: */ + 0x0, /* External 11: */ +}; + +/* ************************************************************************ */ +int +mpc85xx_ads_show_cpuinfo(struct seq_file *m) +{ + uint pvid, svid, phid1; + uint memsize = total_memory; + bd_t *binfo = (bd_t *) __res; + unsigned int freq; + + /* get the core frequency */ + freq = binfo->bi_intfreq; + + pvid = mfspr(PVR); + svid = mfspr(SVR); + + seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); + + switch (svid & 0xffff0000) { + case SVR_8540: + seq_printf(m, "Machine\t\t: mpc8540ads\n"); + break; + case SVR_8560: + seq_printf(m, "Machine\t\t: mpc8560ads\n"); + break; + default: + seq_printf(m, "Machine\t\t: unknown\n"); + break; + } + seq_printf(m, "bus freq\t: %u.%.6u MHz\n", freq / 1000000, + freq % 1000000); + seq_printf(m, "PVR\t\t: 0x%x\n", pvid); + seq_printf(m, "SVR\t\t: 0x%x\n", svid); + + /* Display cpu Pll setting */ + phid1 = mfspr(HID1); + seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); + + /* Display the amount of memory */ + seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); + + return 0; +} + +void __init +mpc85xx_ads_init_IRQ(void) +{ + bd_t *binfo = (bd_t *) __res; + /* Determine the Physical Address of the OpenPIC regs */ + phys_addr_t OpenPIC_PAddr = + binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET; + OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE); + OpenPIC_InitSenses = mpc85xx_ads_openpic_initsenses; + OpenPIC_NumInitSenses = sizeof (mpc85xx_ads_openpic_initsenses); + + /* Skip reserved space and internal sources */ + openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200); + /* Map PIC IRQs 0-11 */ + openpic_set_sources(32, 12, OpenPIC_Addr + 0x10000); + + /* we let openpic interrupts starting from an offset, to + * leave space for cascading interrupts underneath. + */ + openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET); + + return; +} + +#ifdef CONFIG_PCI +/* + * interrupt routing + */ + +int +mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) +{ + static char pci_irq_table[][4] = + /* + * This is little evil, but works around the fact + * that revA boards have IDSEL starting at 18 + * and others boards (older) start at 12 + * + * PCI IDSEL/INTPIN->INTLINE + * A B C D + */ + { + {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 2 */ + {PIRQD, PIRQA, PIRQB, PIRQC}, + {PIRQC, PIRQD, PIRQA, PIRQB}, + {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 5 */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 12 */ + {PIRQD, PIRQA, PIRQB, PIRQC}, + {PIRQC, PIRQD, PIRQA, PIRQB}, + {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 15 */ + {0, 0, 0, 0}, /* -- */ + {0, 0, 0, 0}, /* -- */ + {PIRQA, PIRQB, PIRQC, PIRQD}, /* IDSEL 18 */ + {PIRQD, PIRQA, PIRQB, PIRQC}, + {PIRQC, PIRQD, PIRQA, PIRQB}, + {PIRQB, PIRQC, PIRQD, PIRQA}, /* IDSEL 21 */ + }; + + const long min_idsel = 2, max_idsel = 21, irqs_per_slot = 4; + return PCI_IRQ_TABLE_LOOKUP; +} + +int +mpc85xx_exclude_device(u_char bus, u_char devfn) +{ + if (bus == 0 && PCI_SLOT(devfn) == 0) + return PCIBIOS_DEVICE_NOT_FOUND; + else + return PCIBIOS_SUCCESSFUL; +} + +#endif /* CONFIG_PCI */ diff --git a/arch/ppc/platforms/85xx/mpc85xx_ads_common.h b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h new file mode 100644 index 000000000..3875e839c --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc85xx_ads_common.h @@ -0,0 +1,50 @@ +/* + * arch/ppc/platforms/85xx/mpc85xx_ads_common.h + * + * MPC85XX ADS common board definitions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MACH_MPC85XX_ADS_H__ +#define __MACH_MPC85XX_ADS_H__ + +#include +#include +#include +#include + +#define BOARD_CCSRBAR ((uint)0xe0000000) +#define BCSR_ADDR ((uint)0xf8000000) +#define BCSR_SIZE ((uint)(32 * 1024)) + +extern int mpc85xx_ads_show_cpuinfo(struct seq_file *m); +extern void mpc85xx_ads_init_IRQ(void) __init; +extern void mpc85xx_ads_map_io(void) __init; + +/* PCI interrupt controller */ +#define PIRQA MPC85xx_IRQ_EXT1 +#define PIRQB MPC85xx_IRQ_EXT2 +#define PIRQC MPC85xx_IRQ_EXT3 +#define PIRQD MPC85xx_IRQ_EXT4 + +#define MPC85XX_PCI1_LOWER_IO 0x00000000 +#define MPC85XX_PCI1_UPPER_IO 0x00ffffff + +#define MPC85XX_PCI1_LOWER_MEM 0x80000000 +#define MPC85XX_PCI1_UPPER_MEM 0x9fffffff + +#define MPC85XX_PCI1_IO_BASE 0xe2000000 +#define MPC85XX_PCI1_MEM_OFFSET 0x00000000 + +#define MPC85XX_PCI1_IO_SIZE 0x01000000 + +#endif /* __MACH_MPC85XX_ADS_H__ */ diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.c b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c new file mode 100644 index 000000000..c7e53e39c --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.c @@ -0,0 +1,473 @@ +/* + * arch/ppc/platform/85xx/mpc85xx_cds_common.c + * + * MPC85xx CDS board specific routines + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +#ifndef CONFIG_PCI +unsigned long isa_io_base = 0; +unsigned long isa_mem_base = 0; +#endif + +extern unsigned long total_memory; /* in mm/init */ + +unsigned char __res[sizeof (bd_t)]; + +static int cds_pci_slot = 2; +static volatile u8 * cadmus; + +/* Internal interrupts are all Level Sensitive, and Positive Polarity */ + +static u_char mpc85xx_cds_openpic_initsenses[] __initdata = { + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 0: L2 Cache */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 1: ECM */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 8: PCI/PCI-X */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 9: RIO Inbound Port Write Error */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 10: RIO Doorbell Inbound */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 11: RIO Outbound Message */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 12: RIO Inbound Message */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 0 Transmit */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 0 Receive */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 15: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 16: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 17: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 0 Receive/Transmit Error */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 1 Transmit */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 1 Receive */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 21: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 22: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 23: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 1 Receive/Transmit Error */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 25: Fast Ethernet */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 26: DUART */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 30: CPM */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */ +#if defined(CONFIG_PCI) + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 0: PCI1 slot */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 1: PCI1 slot */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 2: PCI1 slot */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 3: PCI1 slot */ +#else + 0x0, /* External 0: */ + 0x0, /* External 1: */ + 0x0, /* External 2: */ + 0x0, /* External 3: */ +#endif + 0x0, /* External 4: */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PHY */ + 0x0, /* External 6: */ + 0x0, /* External 7: */ + 0x0, /* External 8: */ + 0x0, /* External 9: */ + 0x0, /* External 10: */ +#if defined(CONFIG_85xx_PCI2) && defined(CONFIG_PCI) + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 11: PCI2 slot 0 */ +#else + 0x0, /* External 11: */ +#endif +}; + +struct ocp_gfar_data mpc85xx_tsec1_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC1_TX, + .interruptError = MPC85xx_IRQ_TSEC1_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC1_RX, + .interruptPHY = MPC85xx_IRQ_EXT5, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR | + GFAR_HAS_PHY_INTR), + .phyid = 0, + .phyregidx = 0, +}; + +struct ocp_gfar_data mpc85xx_tsec2_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC2_TX, + .interruptError = MPC85xx_IRQ_TSEC2_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC2_RX, + .interruptPHY = MPC85xx_IRQ_EXT5, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR | + GFAR_HAS_PHY_INTR), + .phyid = 1, + .phyregidx = 0, +}; + +struct ocp_fs_i2c_data mpc85xx_i2c1_def = { + .flags = FS_I2C_SEPARATE_DFSRR, +}; + +/* ************************************************************************ */ +int +mpc85xx_cds_show_cpuinfo(struct seq_file *m) +{ + uint pvid, svid, phid1; + uint memsize = total_memory; + bd_t *binfo = (bd_t *) __res; + unsigned int freq; + + /* get the core frequency */ + freq = binfo->bi_intfreq; + + pvid = mfspr(PVR); + svid = mfspr(SVR); + + seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); + seq_printf(m, "Machine\t\t: CDS (%x)\n", cadmus[CM_VER]); + seq_printf(m, "bus freq\t: %u.%.6u MHz\n", freq / 1000000, + freq % 1000000); + seq_printf(m, "PVR\t\t: 0x%x\n", pvid); + seq_printf(m, "SVR\t\t: 0x%x\n", svid); + + /* Display cpu Pll setting */ + phid1 = mfspr(HID1); + seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); + + /* Display the amount of memory */ + seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); + + return 0; +} + +#ifdef CONFIG_CPM2 +static void cpm2_cascade(int irq, void *dev_id, struct pt_regs *regs) +{ + while((irq = cpm2_get_irq(regs)) >= 0) + { + ppc_irq_dispatch_handler(regs,irq); + } +} +#endif /* CONFIG_CPM2 */ + +void __init +mpc85xx_cds_init_IRQ(void) +{ + bd_t *binfo = (bd_t *) __res; +#ifdef CONFIG_CPM2 + volatile cpm2_map_t *immap = cpm2_immr; + int i; +#endif + + /* Determine the Physical Address of the OpenPIC regs */ + phys_addr_t OpenPIC_PAddr = binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET; + OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE); + OpenPIC_InitSenses = mpc85xx_cds_openpic_initsenses; + OpenPIC_NumInitSenses = sizeof (mpc85xx_cds_openpic_initsenses); + + /* Skip reserved space and internal sources */ + openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200); + /* Map PIC IRQs 0-11 */ + openpic_set_sources(32, 12, OpenPIC_Addr + 0x10000); + + /* we let openpic interrupts starting from an offset, to + * leave space for cascading interrupts underneath. + */ + openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET); + +#ifdef CONFIG_CPM2 + /* disable all CPM interupts */ + immap->im_intctl.ic_simrh = 0x0; + immap->im_intctl.ic_simrl = 0x0; + + for (i = CPM_IRQ_OFFSET; i < (NR_CPM_INTS + CPM_IRQ_OFFSET); i++) + irq_desc[i].handler = &cpm2_pic; + + /* Initialize the default interrupt mapping priorities, + * in case the boot rom changed something on us. + */ + immap->im_intctl.ic_sicr = 0; + immap->im_intctl.ic_scprrh = 0x05309770; + immap->im_intctl.ic_scprrl = 0x05309770; + + request_irq(MPC85xx_IRQ_CPM, cpm2_cascade, SA_INTERRUPT, "cpm2_cascade", NULL); +#endif + + return; +} + +#ifdef CONFIG_PCI +/* + * interrupt routing + */ +int +mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin) +{ + struct pci_controller *hose = pci_bus_to_hose(dev->bus->number); + + if (!hose->index) + { + /* Handle PCI1 interrupts */ + char pci_irq_table[][4] = + /* + * PCI IDSEL/INTPIN->INTLINE + * A B C D + */ + + /* Note IRQ assignment for slots is based on which slot the elysium is + * in -- in this setup elysium is in slot #2 (this PIRQA as first + * interrupt on slot */ + { + { 0, 1, 2, 3 }, /* 16 - PMC */ + { 3, 0, 0, 0 }, /* 17 P2P (Tsi320) */ + { 0, 1, 2, 3 }, /* 18 - Slot 1 */ + { 1, 2, 3, 0 }, /* 19 - Slot 2 */ + { 2, 3, 0, 1 }, /* 20 - Slot 3 */ + { 3, 0, 1, 2 }, /* 21 - Slot 4 */ + }; + + const long min_idsel = 16, max_idsel = 21, irqs_per_slot = 4; + int i, j; + + for (i = 0; i < 6; i++) + for (j = 0; j < 4; j++) + pci_irq_table[i][j] = + ((pci_irq_table[i][j] + 5 - + cds_pci_slot) & 0x3) + PIRQ0A; + + return PCI_IRQ_TABLE_LOOKUP; + } else { + /* Handle PCI2 interrupts (if we have one) */ + char pci_irq_table[][4] = + { + /* + * We only have one slot and one interrupt + * going to PIRQA - PIRQD */ + { PIRQ1A, PIRQ1A, PIRQ1A, PIRQ1A }, /* 21 - slot 0 */ + }; + + const long min_idsel = 21, max_idsel = 21, irqs_per_slot = 4; + + return PCI_IRQ_TABLE_LOOKUP; + } +} + +#define ARCADIA_HOST_BRIDGE_IDSEL 17 +#define ARCADIA_2ND_BRIDGE_IDSEL 3 + +int +mpc85xx_exclude_device(u_char bus, u_char devfn) +{ + if (bus == 0 && PCI_SLOT(devfn) == 0) + return PCIBIOS_DEVICE_NOT_FOUND; +#if CONFIG_85xx_PCI2 + /* With the current code we know PCI2 will be bus 2, however this may + * not be guarnteed */ + if (bus == 2 && PCI_SLOT(devfn) == 0) + return PCIBIOS_DEVICE_NOT_FOUND; +#endif + /* We explicitly do not go past the Tundra 320 Bridge */ + if (bus == 1) + return PCIBIOS_DEVICE_NOT_FOUND; + if ((bus == 0) && (PCI_SLOT(devfn) == ARCADIA_2ND_BRIDGE_IDSEL)) + return PCIBIOS_DEVICE_NOT_FOUND; + else + return PCIBIOS_SUCCESSFUL; +} +#endif /* CONFIG_PCI */ + +/* ************************************************************************ + * + * Setup the architecture + * + */ +static void __init +mpc85xx_cds_setup_arch(void) +{ + struct ocp_def *def; + struct ocp_gfar_data *einfo; + bd_t *binfo = (bd_t *) __res; + unsigned int freq; + + /* get the core frequency */ + freq = binfo->bi_intfreq; + + printk("mpc85xx_cds_setup_arch\n"); + +#ifdef CONFIG_CPM2 + cpm2_reset(); +#endif + + cadmus = ioremap(CADMUS_BASE, CADMUS_SIZE); + cds_pci_slot = ((cadmus[CM_CSR] >> 6) & 0x3) + 1; + printk("CDS Version = %x in PCI slot %d\n", cadmus[CM_VER], cds_pci_slot); + + /* Set loops_per_jiffy to a half-way reasonable value, + for use until calibrate_delay gets called. */ + loops_per_jiffy = freq / HZ; + +#ifdef CONFIG_PCI + /* setup PCI host bridges */ + mpc85xx_setup_hose(); +#endif + +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#endif + +#ifdef CONFIG_SERIAL_8250 + mpc85xx_early_serial_map(); +#endif + +#ifdef CONFIG_SERIAL_TEXT_DEBUG + /* Invalidate the entry we stole earlier the serial ports + * should be properly mapped */ + invalidate_tlbcam_entry(NUM_TLBCAMS - 1); +#endif + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6); + } + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6); + } + +#ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start) + ROOT_DEV = Root_RAM0; + else +#endif +#ifdef CONFIG_ROOT_NFS + ROOT_DEV = Root_NFS; +#else + ROOT_DEV = Root_HDA1; +#endif + + ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base)); +} + +/* ************************************************************************ */ +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* parse_bootinfo must always be called first */ + parse_bootinfo(find_bootinfo()); + + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *) __res, (void *) (r3 + KERNELBASE), + sizeof (bd_t)); + + } +#ifdef CONFIG_SERIAL_TEXT_DEBUG + { + bd_t *binfo = (bd_t *) __res; + + /* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */ + settlbcam(NUM_TLBCAMS - 1, binfo->bi_immr_base, + binfo->bi_immr_base, MPC85xx_CCSRBAR_SIZE, _PAGE_IO, 0); + + } +#endif + +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *) (r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *) (r6 + KERNELBASE)); + } + + /* setup the PowerPC module struct */ + ppc_md.setup_arch = mpc85xx_cds_setup_arch; + ppc_md.show_cpuinfo = mpc85xx_cds_show_cpuinfo; + + ppc_md.init_IRQ = mpc85xx_cds_init_IRQ; + ppc_md.get_irq = openpic_get_irq; + + ppc_md.restart = mpc85xx_restart; + ppc_md.power_off = mpc85xx_power_off; + ppc_md.halt = mpc85xx_halt; + + ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory; + + ppc_md.time_init = NULL; + ppc_md.set_rtc_time = NULL; + ppc_md.get_rtc_time = NULL; + ppc_md.calibrate_decr = mpc85xx_calibrate_decr; + +#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG) + ppc_md.progress = gen550_progress; +#endif /* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */ + + if (ppc_md.progress) + ppc_md.progress("mpc85xx_cds_init(): exit", 0); + + return; +} diff --git a/arch/ppc/platforms/85xx/mpc85xx_cds_common.h b/arch/ppc/platforms/85xx/mpc85xx_cds_common.h new file mode 100644 index 000000000..a7290ed83 --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc85xx_cds_common.h @@ -0,0 +1,78 @@ +/* + * arch/ppc/platforms/85xx/mpc85xx_cds_common.h + * + * MPC85xx CDS board definitions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MACH_MPC85XX_CDS_H__ +#define __MACH_MPC85XX_CDS_H__ + +#include +#include +#include +#include +#include + +#define BOARD_CCSRBAR ((uint)0xe0000000) +#define CCSRBAR_SIZE ((uint)1024*1024) + +/* CADMUS info */ +#define CADMUS_BASE (0xf8004000) +#define CADMUS_SIZE (256) +#define CM_VER (0) +#define CM_CSR (1) +#define CM_RST (2) + +/* PCI config */ +#define PCI1_CFG_ADDR_OFFSET (0x8000) +#define PCI1_CFG_DATA_OFFSET (0x8004) + +#define PCI2_CFG_ADDR_OFFSET (0x9000) +#define PCI2_CFG_DATA_OFFSET (0x9004) + +/* PCI interrupt controller */ +#define PIRQ0A MPC85xx_IRQ_EXT0 +#define PIRQ0B MPC85xx_IRQ_EXT1 +#define PIRQ0C MPC85xx_IRQ_EXT2 +#define PIRQ0D MPC85xx_IRQ_EXT3 +#define PIRQ1A MPC85xx_IRQ_EXT11 + +/* PCI 1 memory map */ +#define MPC85XX_PCI1_LOWER_IO 0x00000000 +#define MPC85XX_PCI1_UPPER_IO 0x00ffffff + +#define MPC85XX_PCI1_LOWER_MEM 0x80000000 +#define MPC85XX_PCI1_UPPER_MEM 0x9fffffff + +#define MPC85XX_PCI1_IO_BASE 0xe2000000 +#define MPC85XX_PCI1_MEM_OFFSET 0x00000000 + +#define MPC85XX_PCI1_IO_SIZE 0x01000000 + +/* PCI 2 memory map */ +#define MPC85XX_PCI2_LOWER_IO 0x01000000 +#define MPC85XX_PCI2_UPPER_IO 0x01ffffff + +#define MPC85XX_PCI2_LOWER_MEM 0xa0000000 +#define MPC85XX_PCI2_UPPER_MEM 0xbfffffff + +#define MPC85XX_PCI2_IO_BASE 0xe3000000 +#define MPC85XX_PCI2_MEM_OFFSET 0x00000000 + +#define MPC85XX_PCI2_IO_SIZE 0x01000000 + +#define SERIAL_PORT_DFNS \ + STD_UART_OP(0) \ + STD_UART_OP(1) + +#endif /* __MACH_MPC85XX_CDS_H__ */ diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c new file mode 100644 index 000000000..c32d662fe --- /dev/null +++ b/arch/ppc/platforms/85xx/sbc8560.c @@ -0,0 +1,249 @@ +/* + * arch/ppc/platforms/85xx/sbc8560.c + * + * Wind River SBC8560 board specific routines + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for linux/serial_core.h */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct ocp_gfar_data mpc85xx_tsec1_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC1_TX, + .interruptError = MPC85xx_IRQ_TSEC1_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC1_RX, + .interruptPHY = MPC85xx_IRQ_EXT6, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR | GFAR_HAS_PHY_INTR), + .phyid = 25, + .phyregidx = 0, +}; + +struct ocp_gfar_data mpc85xx_tsec2_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC2_TX, + .interruptError = MPC85xx_IRQ_TSEC2_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC2_RX, + .interruptPHY = MPC85xx_IRQ_EXT7, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR | GFAR_HAS_PHY_INTR), + .phyid = 26, + .phyregidx = 0, +}; + +struct ocp_fs_i2c_data mpc85xx_i2c1_def = { + .flags = FS_I2C_SEPARATE_DFSRR, +}; + + +#ifdef CONFIG_SERIAL_8250 +static void __init +sbc8560_early_serial_map(void) +{ + struct uart_port uart_req; + + /* Setup serial port access */ + memset(&uart_req, 0, sizeof (uart_req)); + uart_req.irq = MPC85xx_IRQ_EXT9; + uart_req.flags = STD_COM_FLAGS; + uart_req.uartclk = BASE_BAUD * 16; + uart_req.iotype = SERIAL_IO_MEM; + uart_req.mapbase = UARTA_ADDR; + uart_req.membase = ioremap(uart_req.mapbase, MPC85xx_UART0_SIZE); + +#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB) + gen550_init(0, &uart_req); +#endif + + if (early_serial_setup(&uart_req) != 0) + printk("Early serial init of port 0 failed\n"); + + /* Assume early_serial_setup() doesn't modify uart_req */ + uart_req.line = 1; + uart_req.mapbase = UARTB_ADDR; + uart_req.membase = ioremap(uart_req.mapbase, MPC85xx_UART1_SIZE); + uart_req.irq = MPC85xx_IRQ_EXT10; + +#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB) + gen550_init(1, &uart_req); +#endif + + if (early_serial_setup(&uart_req) != 0) + printk("Early serial init of port 1 failed\n"); +} +#endif + +/* ************************************************************************ + * + * Setup the architecture + * + */ +static void __init +sbc8560_setup_arch(void) +{ + struct ocp_def *def; + struct ocp_gfar_data *einfo; + bd_t *binfo = (bd_t *) __res; + unsigned int freq; + + /* get the core frequency */ + freq = binfo->bi_intfreq; + + if (ppc_md.progress) + ppc_md.progress("sbc8560_setup_arch()", 0); + + /* Set loops_per_jiffy to a half-way reasonable value, + for use until calibrate_delay gets called. */ + loops_per_jiffy = freq / HZ; + +#ifdef CONFIG_PCI + /* setup PCI host bridges */ + mpc85xx_setup_hose(); +#endif +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#endif +#ifdef CONFIG_SERIAL_8250 + sbc8560_early_serial_map(); +#endif +#ifdef CONFIG_SERIAL_TEXT_DEBUG + /* Invalidate the entry we stole earlier the serial ports + * should be properly mapped */ + invalidate_tlbcam_entry(NUM_TLBCAMS - 1); +#endif + + /* Set up MAC addresses for the Ethernet devices */ + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6); + } + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6); + } + +#ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start) + ROOT_DEV = Root_RAM0; + else +#endif +#ifdef CONFIG_ROOT_NFS + ROOT_DEV = Root_NFS; +#else + ROOT_DEV = Root_HDA1; +#endif + + ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base)); +} + +/* ************************************************************************ */ +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* parse_bootinfo must always be called first */ + parse_bootinfo(find_bootinfo()); + + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *) __res, (void *) (r3 + KERNELBASE), + sizeof (bd_t)); + } + +#ifdef CONFIG_SERIAL_TEXT_DEBUG + /* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */ + settlbcam(NUM_TLBCAMS - 1, UARTA_ADDR, + UARTA_ADDR, 0x1000, _PAGE_IO, 0); +#endif + +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *) (r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *) (r6 + KERNELBASE)); + } + + /* setup the PowerPC module struct */ + ppc_md.setup_arch = sbc8560_setup_arch; + ppc_md.show_cpuinfo = sbc8560_show_cpuinfo; + + ppc_md.init_IRQ = sbc8560_init_IRQ; + ppc_md.get_irq = openpic_get_irq; + + ppc_md.restart = mpc85xx_restart; + ppc_md.power_off = mpc85xx_power_off; + ppc_md.halt = mpc85xx_halt; + + ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory; + + ppc_md.time_init = NULL; + ppc_md.set_rtc_time = NULL; + ppc_md.get_rtc_time = NULL; + ppc_md.calibrate_decr = mpc85xx_calibrate_decr; + +#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG) + ppc_md.progress = gen550_progress; +#endif /* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */ + + if (ppc_md.progress) + ppc_md.progress("sbc8560_init(): exit", 0); +} diff --git a/arch/ppc/platforms/85xx/sbc8560.h b/arch/ppc/platforms/85xx/sbc8560.h new file mode 100644 index 000000000..5c86187f8 --- /dev/null +++ b/arch/ppc/platforms/85xx/sbc8560.h @@ -0,0 +1,48 @@ +/* + * arch/ppc/platforms/85xx/sbc8560.h + * + * Wind River SBC8560 board definitions + * + * Copyright 2003 Motorola Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MACH_SBC8560_H__ +#define __MACH_SBC8560_H__ + +#include +#include +#include + +#ifdef CONFIG_SERIAL_MANY_PORTS +#define RS_TABLE_SIZE 64 +#else +#define RS_TABLE_SIZE 2 +#endif + +/* Rate for the 1.8432 Mhz clock for the onboard serial chip */ +#define BASE_BAUD ( 1843200 / 16 ) + +#ifdef CONFIG_SERIAL_DETECT_IRQ +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF|ASYNC_SKIP_TEST|ASYNC_AUTO_IRQ) +#else +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF|ASYNC_SKIP_TEST) +#endif + +#define STD_SERIAL_PORT_DFNS \ + { 0, BASE_BAUD, UARTA_ADDR, MPC85xx_IRQ_EXT9, STD_COM_FLAGS, /* ttyS0 */ \ + iomem_base: (u8 *)UARTA_ADDR, \ + io_type: SERIAL_IO_MEM }, \ + { 0, BASE_BAUD, UARTB_ADDR, MPC85xx_IRQ_EXT10, STD_COM_FLAGS, /* ttyS1 */ \ + iomem_base: (u8 *)UARTB_ADDR, \ + io_type: SERIAL_IO_MEM }, + +#define SERIAL_PORT_DFNS \ + STD_SERIAL_PORT_DFNS + +#endif /* __MACH_SBC8560_H__ */ diff --git a/arch/ppc/platforms/85xx/sbc85xx.c b/arch/ppc/platforms/85xx/sbc85xx.c new file mode 100644 index 000000000..a7a33fdff --- /dev/null +++ b/arch/ppc/platforms/85xx/sbc85xx.c @@ -0,0 +1,215 @@ +/* + * arch/ppc/platform/85xx/sbc85xx.c + * + * WindRiver PowerQUICC III SBC85xx board common routines + * + * Copyright 2002, 2003 Motorola Inc. + * Copyright 2004 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +unsigned char __res[sizeof (bd_t)]; + +#ifndef CONFIG_PCI +unsigned long isa_io_base = 0; +unsigned long isa_mem_base = 0; +unsigned long pci_dram_offset = 0; +#endif + +extern unsigned long total_memory; /* in mm/init */ + +/* Internal interrupts are all Level Sensitive, and Positive Polarity */ + +static u_char sbc8560_openpic_initsenses[] __initdata = { + (IRQ_POLARITY_POSITIVE), /* Internal 0: L2 Cache */ + (IRQ_POLARITY_POSITIVE), /* Internal 1: ECM */ + (IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */ + (IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */ + (IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */ + (IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */ + (IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */ + (IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */ + (IRQ_POLARITY_POSITIVE), /* Internal 8: PCI/PCI-X */ + (IRQ_POLARITY_POSITIVE), /* Internal 9: RIO Inbound Port Write Error */ + (IRQ_POLARITY_POSITIVE), /* Internal 10: RIO Doorbell Inbound */ + (IRQ_POLARITY_POSITIVE), /* Internal 11: RIO Outbound Message */ + (IRQ_POLARITY_POSITIVE), /* Internal 12: RIO Inbound Message */ + (IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 0 Transmit */ + (IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 0 Receive */ + (IRQ_POLARITY_POSITIVE), /* Internal 15: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 16: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 17: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 0 Receive/Transmit Error */ + (IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 1 Transmit */ + (IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 1 Receive */ + (IRQ_POLARITY_POSITIVE), /* Internal 21: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 22: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 23: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 1 Receive/Transmit Error */ + (IRQ_POLARITY_POSITIVE), /* Internal 25: Fast Ethernet */ + (IRQ_POLARITY_POSITIVE), /* Internal 26: DUART */ + (IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */ + (IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */ + (IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 30: CPM */ + (IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */ + 0x0, /* External 0: */ + 0x0, /* External 1: */ +#if defined(CONFIG_PCI) + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 2: PCI slot 0 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 3: PCI slot 1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 4: PCI slot 2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PCI slot 3 */ +#else + 0x0, /* External 2: */ + 0x0, /* External 3: */ + 0x0, /* External 4: */ + 0x0, /* External 5: */ +#endif + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 6: PHY */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 7: PHY */ + 0x0, /* External 8: */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* External 9: PHY */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* External 10: PHY */ + 0x0, /* External 11: */ +}; + +/* ************************************************************************ */ +int +sbc8560_show_cpuinfo(struct seq_file *m) +{ + uint pvid, svid, phid1; + uint memsize = total_memory; + bd_t *binfo = (bd_t *) __res; + unsigned int freq; + + /* get the core frequency */ + freq = binfo->bi_intfreq; + + pvid = mfspr(PVR); + svid = mfspr(SVR); + + seq_printf(m, "Vendor\t\t: Wind River\n"); + + switch (svid & 0xffff0000) { + case SVR_8540: + seq_printf(m, "Machine\t\t: hhmmm, this board isn't made yet!\n"); + break; + case SVR_8560: + seq_printf(m, "Machine\t\t: SBC8560\n"); + break; + default: + seq_printf(m, "Machine\t\t: unknown\n"); + break; + } + seq_printf(m, "bus freq\t: %u.%.6u MHz\n", freq / 1000000, + freq % 1000000); + seq_printf(m, "PVR\t\t: 0x%x\n", pvid); + seq_printf(m, "SVR\t\t: 0x%x\n", svid); + + /* Display cpu Pll setting */ + phid1 = mfspr(HID1); + seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); + + /* Display the amount of memory */ + seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); + + return 0; +} + +void __init +sbc8560_init_IRQ(void) +{ + bd_t *binfo = (bd_t *) __res; + /* Determine the Physical Address of the OpenPIC regs */ + phys_addr_t OpenPIC_PAddr = + binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET; + OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE); + OpenPIC_InitSenses = sbc8560_openpic_initsenses; + OpenPIC_NumInitSenses = sizeof (sbc8560_openpic_initsenses); + + /* Skip reserved space and internal sources */ + openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200); + /* Map PIC IRQs 0-11 */ + openpic_set_sources(32, 12, OpenPIC_Addr + 0x10000); + + /* we let openpic interrupts starting from an offset, to + * leave space for cascading interrupts underneath. + */ + openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET); + + return; +} + +/* + * interrupt routing + */ + +#ifdef CONFIG_PCI +int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, + unsigned char pin) +{ + static char pci_irq_table[][4] = + /* + * PCI IDSEL/INTPIN->INTLINE + * A B C D + */ + { + {PIRQA, PIRQB, PIRQC, PIRQD}, + {PIRQD, PIRQA, PIRQB, PIRQC}, + {PIRQC, PIRQD, PIRQA, PIRQB}, + {PIRQB, PIRQC, PIRQD, PIRQA}, + }; + + const long min_idsel = 12, max_idsel = 15, irqs_per_slot = 4; + return PCI_IRQ_TABLE_LOOKUP; +} + +int mpc85xx_exclude_device(u_char bus, u_char devfn) +{ + if (bus == 0 && PCI_SLOT(devfn) == 0) + return PCIBIOS_DEVICE_NOT_FOUND; + else + return PCIBIOS_SUCCESSFUL; +} +#endif /* CONFIG_PCI */ diff --git a/arch/ppc/platforms/85xx/sbc85xx.h b/arch/ppc/platforms/85xx/sbc85xx.h new file mode 100644 index 000000000..7af93c691 --- /dev/null +++ b/arch/ppc/platforms/85xx/sbc85xx.h @@ -0,0 +1,55 @@ +/* + * arch/ppc/platforms/85xx/sbc85xx.h + * + * WindRiver PowerQUICC III SBC85xx common board definitions + * + * Copyright 2003 Motorola Inc. + * Copyright 2004 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __PLATFORMS_85XX_SBC85XX_H__ +#define __PLATFORMS_85XX_SBC85XX_H__ + +#include +#include +#include +#include + +#define BOARD_CCSRBAR ((uint)0xff700000) +#define CCSRBAR_SIZE ((uint)1024*1024) + +#define BCSR_ADDR ((uint)0xfc000000) +#define BCSR_SIZE ((uint)(16 * 1024 * 1024)) + +#define UARTA_ADDR (BCSR_ADDR + 0x00700000) +#define UARTB_ADDR (BCSR_ADDR + 0x00800000) +#define RTC_DEVICE_ADDR (BCSR_ADDR + 0x00900000) +#define EEPROM_ADDR (BCSR_ADDR + 0x00b00000) + +extern int sbc8560_show_cpuinfo(struct seq_file *m); +extern void sbc8560_init_IRQ(void) __init; + +/* PCI interrupt controller */ +#define PIRQA MPC85xx_IRQ_EXT1 +#define PIRQB MPC85xx_IRQ_EXT2 +#define PIRQC MPC85xx_IRQ_EXT3 +#define PIRQD MPC85xx_IRQ_EXT4 + +#define MPC85XX_PCI1_LOWER_IO 0x00000000 +#define MPC85XX_PCI1_UPPER_IO 0x00ffffff + +#define MPC85XX_PCI1_LOWER_MEM 0x80000000 +#define MPC85XX_PCI1_UPPER_MEM 0x9fffffff + +#define MPC85XX_PCI1_IO_BASE 0xe2000000 +#define MPC85XX_PCI1_MEM_OFFSET 0x00000000 + +#define MPC85XX_PCI1_IO_SIZE 0x01000000 + +#endif /* __PLATFORMS_85XX_SBC85XX_H__ */ diff --git a/arch/ppc/platforms/lite5200.c b/arch/ppc/platforms/lite5200.c new file mode 100644 index 000000000..043040dc9 --- /dev/null +++ b/arch/ppc/platforms/lite5200.c @@ -0,0 +1,152 @@ +/* + * arch/ppc/platforms/lite5200.c + * + * Platform support file for the Freescale LITE5200 based on MPC52xx. + * A maximum of this file should be moved to syslib/mpc52xx_????? + * so that new platform based on MPC52xx need a minimal platform file + * ( avoid code duplication ) + * + * + * Maintainer : Sylvain Munaut + * + * Based on the 2.4 code written by Kent Borg, + * Dale Farnsworth and + * Wolfgang Denk + * + * Copyright 2004 Sylvain Munaut + * Copyright 2003 Motorola Inc. + * Copyright 2003 MontaVista Software Inc. + * Copyright 2003 DENX Software Engineering (wd@denx.de) + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + + +/* Board data given by U-Boot */ +bd_t __res; +EXPORT_SYMBOL(__res); /* For modules */ + + +/* ======================================================================== */ +/* OCP device definition */ +/* For board/shared resources like PSCs */ +/* ======================================================================== */ +/* Be sure not to load conficting devices : e.g. loading the UART drivers for + * PSC1 and then also loading a AC97 for this same PSC. + * For details about how to create an entry, look in the doc of the concerned + * driver ( eg drivers/serial/mpc52xx_uart.c for the PSC in uart mode ) + */ + +struct ocp_def board_ocp[] = { + { + .vendor = OCP_VENDOR_FREESCALE, + .function = OCP_FUNC_PSC_UART, + .index = 0, + .paddr = MPC52xx_PSC1, + .irq = MPC52xx_PSC1_IRQ, + .pm = OCP_CPM_NA, + }, + { /* Terminating entry */ + .vendor = OCP_VENDOR_INVALID + } +}; + + +/* ======================================================================== */ +/* Platform specific code */ +/* ======================================================================== */ + +static int +icecube_show_cpuinfo(struct seq_file *m) +{ + seq_printf(m, "machine\t\t: Freescale LITE5200\n"); + return 0; +} + +static void __init +icecube_setup_arch(void) +{ + + /* Add board OCP definitions */ + mpc52xx_add_board_devices(board_ocp); +} + +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* Generic MPC52xx platform initialization */ + /* TODO Create one and move a max of stuff in it. + Put this init in the syslib */ + + struct bi_record *bootinfo = find_bootinfo(); + + if (bootinfo) + parse_bootinfo(bootinfo); + else { + /* Load the bd_t board info structure */ + if (r3) + memcpy((void*)&__res,(void*)(r3+KERNELBASE), + sizeof(bd_t)); + +#ifdef CONFIG_BLK_DEV_INITRD + /* Load the initrd */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif + + /* Load the command line */ + if (r6) { + *(char *)(r7+KERNELBASE) = 0; + strcpy(cmd_line, (char *)(r6+KERNELBASE)); + } + } + + /* BAT setup */ + mpc52xx_set_bat(); + + /* No ISA bus AFAIK */ + isa_io_base = 0; + isa_mem_base = 0; + + /* Setup the ppc_md struct */ + ppc_md.setup_arch = icecube_setup_arch; + ppc_md.show_cpuinfo = icecube_show_cpuinfo; + ppc_md.show_percpuinfo = NULL; + ppc_md.init_IRQ = mpc52xx_init_irq; + ppc_md.get_irq = mpc52xx_get_irq; + + ppc_md.find_end_of_memory = mpc52xx_find_end_of_memory; + ppc_md.setup_io_mappings = mpc52xx_map_io; + + ppc_md.restart = mpc52xx_restart; + ppc_md.power_off = mpc52xx_power_off; + ppc_md.halt = mpc52xx_halt; + + /* No time keeper on the IceCube */ + ppc_md.time_init = NULL; + ppc_md.get_rtc_time = NULL; + ppc_md.set_rtc_time = NULL; + + ppc_md.calibrate_decr = mpc52xx_calibrate_decr; +#ifdef CONFIG_SERIAL_TEXT_DEBUG + ppc_md.progress = mpc52xx_progress; +#endif +} + diff --git a/arch/ppc/platforms/lite5200.h b/arch/ppc/platforms/lite5200.h new file mode 100644 index 000000000..833134b19 --- /dev/null +++ b/arch/ppc/platforms/lite5200.h @@ -0,0 +1,23 @@ +/* + * arch/ppc/platforms/lite5200.h + * + * Definitions for Freescale LITE5200 : MPC52xx Standard Development + * Platform board support + * + * Maintainer : Sylvain Munaut + * + * Copyright (C) 2004 Sylvain Munaut + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __PLATFORMS_LITE5200_H__ +#define __PLATFORMS_LITE5200_H__ + +/* Serial port used for low-level debug */ +#define MPC52xx_PF_CONSOLE_PORT 0 /* PSC1 */ + + +#endif /* __PLATFORMS_LITE5200_H__ */ diff --git a/arch/ppc/platforms/mpc5200.c b/arch/ppc/platforms/mpc5200.c new file mode 100644 index 000000000..30b6936c3 --- /dev/null +++ b/arch/ppc/platforms/mpc5200.c @@ -0,0 +1,29 @@ +/* + * arch/ppc/platforms/mpc5200.c + * + * OCP Definitions for the boards based on MPC5200 processor. Contains + * definitions for every common peripherals. (Mostly all but PSCs) + * + * Maintainer : Sylvain Munaut + * + * Copyright 2004 Sylvain Munaut + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include + +/* Here is the core_ocp struct. + * With all the devices common to all board. Even if port multiplexing is + * not setup for them (if the user don't want them, just don't select the + * config option). The potentially conflicting devices (like PSCs) goes in + * board specific file. + */ +struct ocp_def core_ocp[] = { + { /* Terminating entry */ + .vendor = OCP_VENDOR_INVALID + } +}; diff --git a/arch/ppc/platforms/pq2ads.h b/arch/ppc/platforms/pq2ads.h new file mode 100644 index 000000000..1bee6ca56 --- /dev/null +++ b/arch/ppc/platforms/pq2ads.h @@ -0,0 +1,90 @@ +/* + * A collection of structures, addresses, and values associated with + * the Motorola MPC8260ADS/MPC8266ADS-PCI boards. + * Copied from the RPX-Classic and SBS8260 stuff. + * + * Copyright (c) 2001 Dan Malek (dan@mvista.com) + */ +#ifdef __KERNEL__ +#ifndef __MACH_ADS8260_DEFS +#define __MACH_ADS8260_DEFS + +#include + +#include + +/* Memory map is configured by the PROM startup. + * We just map a few things we need. The CSR is actually 4 byte-wide + * registers that can be accessed as 8-, 16-, or 32-bit values. + */ +#define CPM_MAP_ADDR ((uint)0xf0000000) +#define BCSR_ADDR ((uint)0xf4500000) +#define BCSR_SIZE ((uint)(32 * 1024)) + +#define BOOTROM_RESTART_ADDR ((uint)0xff000104) + +/* The ADS8260 has 16, 32-bit wide control/status registers, accessed + * only on word boundaries. + * Not all are used (yet), or are interesting to us (yet). + */ + +/* Things of interest in the CSR. +*/ +#define BCSR0_LED0 ((uint)0x02000000) /* 0 == on */ +#define BCSR0_LED1 ((uint)0x01000000) /* 0 == on */ +#define BCSR1_FETHIEN ((uint)0x08000000) /* 0 == enable */ +#define BCSR1_FETH_RST ((uint)0x04000000) /* 0 == reset */ +#define BCSR1_RS232_EN1 ((uint)0x02000000) /* 0 == enable */ +#define BCSR1_RS232_EN2 ((uint)0x01000000) /* 0 == enable */ + +#define PHY_INTERRUPT SIU_INT_IRQ7 + +#ifdef CONFIG_PCI +/* PCI interrupt controller */ +#define PCI_INT_STAT_REG 0xF8200000 +#define PCI_INT_MASK_REG 0xF8200004 +#define PIRQA (NR_SIU_INTS + 0) +#define PIRQB (NR_SIU_INTS + 1) +#define PIRQC (NR_SIU_INTS + 2) +#define PIRQD (NR_SIU_INTS + 3) + +/* + * PCI memory map definitions for MPC8266ADS-PCI. + * + * processor view + * local address PCI address target + * 0x80000000-0x9FFFFFFF 0x80000000-0x9FFFFFFF PCI mem with prefetch + * 0xA0000000-0xBFFFFFFF 0xA0000000-0xBFFFFFFF PCI mem w/o prefetch + * 0xF4000000-0xF7FFFFFF 0x00000000-0x03FFFFFF PCI IO + * + * PCI master view + * local address PCI address target + * 0x00000000-0x1FFFFFFF 0x00000000-0x1FFFFFFF MPC8266 local memory + */ + +/* window for a PCI master to access MPC8266 memory */ +#define PCI_SLV_MEM_LOCAL 0x00000000 /* Local base */ +#define PCI_SLV_MEM_BUS 0x00000000 /* PCI base */ + +/* window for the processor to access PCI memory with prefetching */ +#define PCI_MSTR_MEM_LOCAL 0x80000000 /* Local base */ +#define PCI_MSTR_MEM_BUS 0x80000000 /* PCI base */ +#define PCI_MSTR_MEM_SIZE 0x20000000 /* 512MB */ + +/* window for the processor to access PCI memory without prefetching */ +#define PCI_MSTR_MEMIO_LOCAL 0xA0000000 /* Local base */ +#define PCI_MSTR_MEMIO_BUS 0xA0000000 /* PCI base */ +#define PCI_MSTR_MEMIO_SIZE 0x20000000 /* 512MB */ + +/* window for the processor to access PCI I/O */ +#define PCI_MSTR_IO_LOCAL 0xF4000000 /* Local base */ +#define PCI_MSTR_IO_BUS 0x00000000 /* PCI base */ +#define PCI_MSTR_IO_SIZE 0x04000000 /* 64MB */ + +#define _IO_BASE PCI_MSTR_IO_LOCAL +#define _ISA_MEM_BASE PCI_MSTR_MEMIO_LOCAL +#define PCI_DRAM_OFFSET PCI_SLV_MEM_BUS +#endif /* CONFIG_PCI */ + +#endif /* __MACH_ADS8260_DEFS */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/platforms/pq2ads_setup.c b/arch/ppc/platforms/pq2ads_setup.c new file mode 100644 index 000000000..eaeb2d964 --- /dev/null +++ b/arch/ppc/platforms/pq2ads_setup.c @@ -0,0 +1,66 @@ +/* + * arch/ppc/platforms/pq2ads_setup.c + * + * PQ2ADS platform support + * + * Author: Kumar Gala + * Derived from: est8260_setup.c by Allen Curtis + * + * Copyright 2004 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include + +#include +#include + +static void (*callback_setup_arch)(void); + +extern unsigned char __res[sizeof(bd_t)]; + +extern void m8260_init(unsigned long r3, unsigned long r4, + unsigned long r5, unsigned long r6, unsigned long r7); + +static int +pq2ads_show_cpuinfo(struct seq_file *m) +{ + bd_t *binfo = (bd_t *)__res; + + seq_printf(m, "vendor\t\t: Motorola\n" + "machine\t\t: PQ2 ADS PowerPC\n" + "\n" + "mem size\t\t: 0x%08lx\n" + "console baud\t\t: %ld\n" + "\n", + binfo->bi_memsize, + binfo->bi_baudrate); + return 0; +} + +static void __init +pq2ads_setup_arch(void) +{ + printk("PQ2 ADS Port\n"); + callback_setup_arch(); + *(volatile uint *)(BCSR_ADDR + 4) &= ~BCSR1_RS232_EN2; +} + +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* Generic 8260 platform initialization */ + m8260_init(r3, r4, r5, r6, r7); + + /* Anything special for this platform */ + ppc_md.show_cpuinfo = pq2ads_show_cpuinfo; + + callback_setup_arch = ppc_md.setup_arch; + ppc_md.setup_arch = pq2ads_setup_arch; +} diff --git a/arch/ppc/platforms/rpx8260.c b/arch/ppc/platforms/rpx8260.c new file mode 100644 index 000000000..07d78d496 --- /dev/null +++ b/arch/ppc/platforms/rpx8260.c @@ -0,0 +1,65 @@ +/* + * arch/ppc/platforms/rpx8260.c + * + * RPC EP8260 platform support + * + * Author: Dan Malek + * Derived from: pq2ads_setup.c by Kumar + * + * Copyright 2004 Embedded Edge, LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include + +#include +#include + +static void (*callback_setup_arch)(void); + +extern unsigned char __res[sizeof(bd_t)]; + +extern void m8260_init(unsigned long r3, unsigned long r4, + unsigned long r5, unsigned long r6, unsigned long r7); + +static int +ep8260_show_cpuinfo(struct seq_file *m) +{ + bd_t *binfo = (bd_t *)__res; + + seq_printf(m, "vendor\t\t: RPC\n" + "machine\t\t: EP8260 PPC\n" + "\n" + "mem size\t\t: 0x%08x\n" + "console baud\t\t: %d\n" + "\n", + binfo->bi_memsize, + binfo->bi_baudrate); + return 0; +} + +static void __init +ep8260_setup_arch(void) +{ + printk("RPC EP8260 Port\n"); + callback_setup_arch(); +} + +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* Generic 8260 platform initialization */ + m8260_init(r3, r4, r5, r6, r7); + + /* Anything special for this platform */ + ppc_md.show_cpuinfo = ep8260_show_cpuinfo; + + callback_setup_arch = ppc_md.setup_arch; + ppc_md.setup_arch = ep8260_setup_arch; +} diff --git a/arch/ppc/platforms/rpx8260.h b/arch/ppc/platforms/rpx8260.h new file mode 100644 index 000000000..7d5cd8893 --- /dev/null +++ b/arch/ppc/platforms/rpx8260.h @@ -0,0 +1,74 @@ +/* + * A collection of structures, addresses, and values associated with + * the Embedded Planet RPX6 (or RPX Super) MPC8260 board. + * Copied from the RPX-Classic and SBS8260 stuff. + * + * Copyright (c) 2001 Dan Malek + */ +#ifdef __KERNEL__ +#ifndef __ASM_PLATFORMS_RPX8260_H__ +#define __ASM_PLATFORMS_RPX8260_H__ + +/* A Board Information structure that is given to a program when + * prom starts it up. + */ +typedef struct bd_info { + unsigned int bi_memstart; /* Memory start address */ + unsigned int bi_memsize; /* Memory (end) size in bytes */ + unsigned int bi_nvsize; /* NVRAM size in bytes (can be 0) */ + unsigned int bi_intfreq; /* Internal Freq, in Hz */ + unsigned int bi_busfreq; /* Bus Freq, in MHz */ + unsigned int bi_cpmfreq; /* CPM Freq, in MHz */ + unsigned int bi_brgfreq; /* BRG Freq, in MHz */ + unsigned int bi_vco; /* VCO Out from PLL */ + unsigned int bi_baudrate; /* Default console baud rate */ + unsigned int bi_immr; /* IMMR when called from boot rom */ + unsigned char bi_enetaddr[6]; +} bd_t; + +extern bd_t m8xx_board_info; + +/* Memory map is configured by the PROM startup. + * We just map a few things we need. The CSR is actually 4 byte-wide + * registers that can be accessed as 8-, 16-, or 32-bit values. + */ +#define CPM_MAP_ADDR ((uint)0xf0000000) +#define RPX_CSR_ADDR ((uint)0xfa000000) +#define RPX_CSR_SIZE ((uint)(512 * 1024)) +#define RPX_NVRTC_ADDR ((uint)0xfa080000) +#define RPX_NVRTC_SIZE ((uint)(512 * 1024)) + +/* The RPX6 has 16, byte wide control/status registers. + * Not all are used (yet). + */ +extern volatile u_char *rpx6_csr_addr; + +/* Things of interest in the CSR. +*/ +#define BCSR0_ID_MASK ((u_char)0xf0) /* Read only */ +#define BCSR0_SWITCH_MASK ((u_char)0x0f) /* Read only */ +#define BCSR1_XCVR_SMC1 ((u_char)0x80) +#define BCSR1_XCVR_SMC2 ((u_char)0x40) +#define BCSR2_FLASH_WENABLE ((u_char)0x20) +#define BCSR2_NVRAM_ENABLE ((u_char)0x10) +#define BCSR2_ALT_IRQ2 ((u_char)0x08) +#define BCSR2_ALT_IRQ3 ((u_char)0x04) +#define BCSR2_PRST ((u_char)0x02) /* Force reset */ +#define BCSR2_ENPRST ((u_char)0x01) /* Enable POR */ +#define BCSR3_MODCLK_MASK ((u_char)0xe0) +#define BCSR3_ENCLKHDR ((u_char)0x10) +#define BCSR3_LED5 ((u_char)0x04) /* 0 == on */ +#define BCSR3_LED6 ((u_char)0x02) /* 0 == on */ +#define BCSR3_LED7 ((u_char)0x01) /* 0 == on */ +#define BCSR4_EN_PHY ((u_char)0x80) /* Enable PHY */ +#define BCSR4_EN_MII ((u_char)0x40) /* Enable PHY */ +#define BCSR4_MII_READ ((u_char)0x04) +#define BCSR4_MII_MDC ((u_char)0x02) +#define BCSR4_MII_MDIO ((u_char)0x01) +#define BCSR13_FETH_IRQMASK ((u_char)0xf0) +#define BCSR15_FETH_IRQ ((u_char)0x20) + +#define PHY_INTERRUPT SIU_INT_IRQ7 + +#endif /* __ASM_PLATFORMS_RPX8260_H__ */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/platforms/sbc82xx.c b/arch/ppc/platforms/sbc82xx.c new file mode 100644 index 000000000..0da699d3b --- /dev/null +++ b/arch/ppc/platforms/sbc82xx.c @@ -0,0 +1,113 @@ +/* + * arch/ppc/platforms/sbc82xx.c + * + * SBC82XX platform support + * + * Author: Guy Streeter + * + * Derived from: est8260_setup.c by Allen Curtis, ONZ + * + * Copyright 2004 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +static void (*callback_setup_arch)(void); + +extern unsigned char __res[sizeof(bd_t)]; + +extern void m8260_init(unsigned long r3, unsigned long r4, + unsigned long r5, unsigned long r6, unsigned long r7); + +extern void (*late_time_init)(void); + +static int +sbc82xx_show_cpuinfo(struct seq_file *m) +{ + bd_t *binfo = (bd_t *)__res; + + seq_printf(m, "vendor\t\t: Wind River\n" + "machine\t\t: SBC PowerQUICC II\n" + "\n" + "mem size\t\t: 0x%08lx\n" + "console baud\t\t: %ld\n" + "\n", + binfo->bi_memsize, + binfo->bi_baudrate); + return 0; +} + +static void __init +sbc82xx_setup_arch(void) +{ + printk("SBC PowerQUICC II Port\n"); + callback_setup_arch(); +} + +TODC_ALLOC(); + +/* + * Timer init happens before mem_init but after paging init, so we cannot + * directly use ioremap() at that time. + * late_time_init() is call after paging init. + */ +#ifdef CONFIG_GEN_RTC +static void sbc82xx_time_init(void) +{ + volatile memctl8260_t *mc = &immr->im_memctl; + TODC_INIT(TODC_TYPE_MK48T59, 0, 0, SBC82xx_TODC_NVRAM_ADDR, 0); + + /* Set up CS11 for RTC chip */ + mc->memc_br11=0; + mc->memc_or11=0xffff0836; + mc->memc_br11=0x80000801; + + todc_info->nvram_data = + (unsigned int)ioremap(todc_info->nvram_data, 0x2000); + BUG_ON(!todc_info->nvram_data); + ppc_md.get_rtc_time = todc_get_rtc_time; + ppc_md.set_rtc_time = todc_set_rtc_time; + ppc_md.nvram_read_val = todc_direct_read_val; + ppc_md.nvram_write_val = todc_direct_write_val; + todc_time_init(); +} +#endif /* CONFIG_GEN_RTC */ + +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* Generic 8260 platform initialization */ + m8260_init(r3, r4, r5, r6, r7); + + /* u-boot may be using one of the FCC Ethernet devices. + Use the MAC address to the SCC. */ + __res[offsetof(bd_t, bi_enetaddr[5])] &= ~3; + + /* Anything special for this platform */ + ppc_md.show_cpuinfo = sbc82xx_show_cpuinfo; + + callback_setup_arch = ppc_md.setup_arch; + ppc_md.setup_arch = sbc82xx_setup_arch; +#ifdef CONFIG_GEN_RTC + ppc_md.time_init = NULL; + ppc_md.get_rtc_time = NULL; + ppc_md.set_rtc_time = NULL; + ppc_md.nvram_read_val = NULL; + ppc_md.nvram_write_val = NULL; + late_time_init = sbc82xx_time_init; +#endif /* CONFIG_GEN_RTC */ +} diff --git a/arch/ppc/platforms/sbc82xx.h b/arch/ppc/platforms/sbc82xx.h new file mode 100644 index 000000000..b9d1c8ddb --- /dev/null +++ b/arch/ppc/platforms/sbc82xx.h @@ -0,0 +1,24 @@ +/* Board information for the SBCPowerQUICCII, which should be generic for + * all 8260 boards. The IMMR is now given to us so the hard define + * will soon be removed. All of the clock values are computed from + * the configuration SCMR and the Power-On-Reset word. + */ + +#ifndef __PPC_SBC82xx_H__ +#define __PPC_SBC82xx_H__ + +#include + +#define IMAP_ADDR 0xf0000000 +#define CPM_MAP_ADDR 0xf0000000 + +#define SBC82xx_TODC_NVRAM_ADDR 0x80000000 + +#define SBC82xx_MACADDR_NVRAM_FCC1 0x220000c9 /* JP6B */ +#define SBC82xx_MACADDR_NVRAM_SCC1 0x220000cf /* JP6A */ +#define SBC82xx_MACADDR_NVRAM_FCC2 0x220000d5 /* JP7A */ +#define SBC82xx_MACADDR_NVRAM_FCC3 0x220000db /* JP7B */ + +#define BOOTROM_RESTART_ADDR ((uint)0x40000104) + +#endif /* __PPC_SBC82xx_H__ */ diff --git a/arch/ppc/syslib/cpm2_common.c b/arch/ppc/syslib/cpm2_common.c new file mode 100644 index 000000000..5ca6d4cf3 --- /dev/null +++ b/arch/ppc/syslib/cpm2_common.c @@ -0,0 +1,205 @@ +/* + * General Purpose functions for the global management of the + * 8260 Communication Processor Module. + * Copyright (c) 1999 Dan Malek (dmalek@jlc.net) + * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com) + * 2.3.99 Updates + * + * In addition to the individual control of the communication + * channels, there are a few functions that globally affect the + * communication processor. + * + * Buffer descriptors must be allocated from the dual ported memory + * space. The allocator for that is here. When the communication + * process is reset, we reclaim the memory available. There is + * currently no deallocator for this memory. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void cpm2_dpinit(void); +cpm_cpm2_t *cpmp; /* Pointer to comm processor space */ + +/* We allocate this here because it is used almost exclusively for + * the communication processor devices. + */ +cpm2_map_t *cpm2_immr; + +void +cpm2_reset(void) +{ + cpm2_immr = (cpm2_map_t *)CPM_MAP_ADDR; + + /* Reclaim the DP memory for our use. + */ + cpm2_dpinit(); + + /* Tell everyone where the comm processor resides. + */ + cpmp = &cpm2_immr->im_cpm; +} + +/* Set a baud rate generator. This needs lots of work. There are + * eight BRGs, which can be connected to the CPM channels or output + * as clocks. The BRGs are in two different block of internal + * memory mapped space. + * The baud rate clock is the system clock divided by something. + * It was set up long ago during the initial boot phase and is + * is given to us. + * Baud rate clocks are zero-based in the driver code (as that maps + * to port numbers). Documentation uses 1-based numbering. + */ +#define BRG_INT_CLK (((bd_t *)__res)->bi_brgfreq) +#define BRG_UART_CLK (BRG_INT_CLK/16) + +/* This function is used by UARTS, or anything else that uses a 16x + * oversampled clock. + */ +void +cpm2_setbrg(uint brg, uint rate) +{ + volatile uint *bp; + + /* This is good enough to get SMCs running..... + */ + if (brg < 4) { + bp = (uint *)&cpm2_immr->im_brgc1; + } + else { + bp = (uint *)&cpm2_immr->im_brgc5; + brg -= 4; + } + bp += brg; + *bp = ((BRG_UART_CLK / rate) << 1) | CPM_BRG_EN; +} + +/* This function is used to set high speed synchronous baud rate + * clocks. + */ +void +cpm2_fastbrg(uint brg, uint rate, int div16) +{ + volatile uint *bp; + + if (brg < 4) { + bp = (uint *)&cpm2_immr->im_brgc1; + } + else { + bp = (uint *)&cpm2_immr->im_brgc5; + brg -= 4; + } + bp += brg; + *bp = ((BRG_INT_CLK / rate) << 1) | CPM_BRG_EN; + if (div16) + *bp |= CPM_BRG_DIV16; +} + +/* + * dpalloc / dpfree bits. + */ +static spinlock_t cpm_dpmem_lock; +/* 16 blocks should be enough to satisfy all requests + * until the memory subsystem goes up... */ +static rh_block_t cpm_boot_dpmem_rh_block[16]; +static rh_info_t cpm_dpmem_info; + +static void cpm2_dpinit(void) +{ + void *dprambase = &((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase; + + spin_lock_init(&cpm_dpmem_lock); + + /* initialize the info header */ + rh_init(&cpm_dpmem_info, 1, + sizeof(cpm_boot_dpmem_rh_block) / + sizeof(cpm_boot_dpmem_rh_block[0]), + cpm_boot_dpmem_rh_block); + + /* Attach the usable dpmem area */ + /* XXX: This is actually crap. CPM_DATAONLY_BASE and + * CPM_DATAONLY_SIZE is only a subset of the available dpram. It + * varies with the processor and the microcode patches activated. + * But the following should be at least safe. + */ + rh_attach_region(&cpm_dpmem_info, dprambase + CPM_DATAONLY_BASE, + CPM_DATAONLY_SIZE); +} + +/* This function used to return an index into the DPRAM area. + * Now it returns the actuall physical address of that area. + * use cpm2_dpram_offset() to get the index + */ +void *cpm2_dpalloc(uint size, uint align) +{ + void *start; + unsigned long flags; + + spin_lock_irqsave(&cpm_dpmem_lock, flags); + cpm_dpmem_info.alignment = align; + start = rh_alloc(&cpm_dpmem_info, size, "commproc"); + spin_unlock_irqrestore(&cpm_dpmem_lock, flags); + + return start; +} +EXPORT_SYMBOL(cpm2_dpalloc); + +int cpm2_dpfree(void *addr) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&cpm_dpmem_lock, flags); + ret = rh_free(&cpm_dpmem_info, addr); + spin_unlock_irqrestore(&cpm_dpmem_lock, flags); + + return ret; +} +EXPORT_SYMBOL(cpm2_dpfree); + +/* not sure if this is ever needed */ +void *cpm2_dpalloc_fixed(void *addr, uint size, uint align) +{ + void *start; + unsigned long flags; + + spin_lock_irqsave(&cpm_dpmem_lock, flags); + cpm_dpmem_info.alignment = align; + start = rh_alloc_fixed(&cpm_dpmem_info, addr, size, "commproc"); + spin_unlock_irqrestore(&cpm_dpmem_lock, flags); + + return start; +} +EXPORT_SYMBOL(cpm2_dpalloc_fixed); + +void cpm2_dpdump(void) +{ + rh_dump(&cpm_dpmem_info); +} +EXPORT_SYMBOL(cpm2_dpdump); + +uint cpm2_dpram_offset(void *addr) +{ + return (uint)((u_char *)addr - + ((uint)((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase)); +} +EXPORT_SYMBOL(cpm2_dpram_offset); + +void *cpm2_dpram_addr(int offset) +{ + return (void *)&((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase[offset]; +} +EXPORT_SYMBOL(cpm2_dpram_addr); diff --git a/arch/ppc/syslib/cpm2_pic.c b/arch/ppc/syslib/cpm2_pic.c new file mode 100644 index 000000000..43eac4135 --- /dev/null +++ b/arch/ppc/syslib/cpm2_pic.c @@ -0,0 +1,131 @@ +#include +#include +#include +#include +#include +#include +#include +#include "cpm2_pic.h" + +/* The CPM2 internal interrupt controller. It is usually + * the only interrupt controller. + * There are two 32-bit registers (high/low) for up to 64 + * possible interrupts. + * + * Now, the fun starts.....Interrupt Numbers DO NOT MAP + * in a simple arithmetic fashion to mask or pending registers. + * That is, interrupt 4 does not map to bit position 4. + * We create two tables, indexed by vector number, to indicate + * which register to use and which bit in the register to use. + */ +static u_char irq_to_siureg[] = { + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 +}; + +static u_char irq_to_siubit[] = { + 31, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, + 29, 30, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 31, + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 15, 14, 13, 12, 11, 10, 9, 8, + 7, 6, 5, 4, 3, 2, 1, 0 +}; + +static void cpm2_mask_irq(unsigned int irq_nr) +{ + int bit, word; + volatile uint *simr; + + bit = irq_to_siubit[irq_nr]; + word = irq_to_siureg[irq_nr]; + + simr = &(cpm2_immr->im_intctl.ic_simrh); + ppc_cached_irq_mask[word] &= ~(1 << (31 - bit)); + simr[word] = ppc_cached_irq_mask[word]; +} + +static void cpm2_unmask_irq(unsigned int irq_nr) +{ + int bit, word; + volatile uint *simr; + + bit = irq_to_siubit[irq_nr]; + word = irq_to_siureg[irq_nr]; + + simr = &(cpm2_immr->im_intctl.ic_simrh); + ppc_cached_irq_mask[word] |= (1 << (31 - bit)); + simr[word] = ppc_cached_irq_mask[word]; +} + +static void cpm2_mask_and_ack(unsigned int irq_nr) +{ + int bit, word; + volatile uint *simr, *sipnr; + + bit = irq_to_siubit[irq_nr]; + word = irq_to_siureg[irq_nr]; + + simr = &(cpm2_immr->im_intctl.ic_simrh); + sipnr = &(cpm2_immr->im_intctl.ic_sipnrh); + ppc_cached_irq_mask[word] &= ~(1 << (31 - bit)); + simr[word] = ppc_cached_irq_mask[word]; + sipnr[word] = 1 << (31 - bit); +} + +static void cpm2_end_irq(unsigned int irq_nr) +{ + int bit, word; + volatile uint *simr; + + if (!(irq_desc[irq_nr].status & (IRQ_DISABLED|IRQ_INPROGRESS)) + && irq_desc[irq_nr].action) { + + bit = irq_to_siubit[irq_nr]; + word = irq_to_siureg[irq_nr]; + + simr = &(cpm2_immr->im_intctl.ic_simrh); + ppc_cached_irq_mask[word] |= (1 << (31 - bit)); + simr[word] = ppc_cached_irq_mask[word]; + } +} + +struct hw_interrupt_type cpm2_pic = { + " CPM2 SIU ", + NULL, + NULL, + cpm2_unmask_irq, + cpm2_mask_irq, + cpm2_mask_and_ack, + cpm2_end_irq, + 0 +}; + + +int +cpm2_get_irq(struct pt_regs *regs) +{ + int irq; + unsigned long bits; + + /* For CPM2, read the SIVEC register and shift the bits down + * to get the irq number. */ + bits = cpm2_immr->im_intctl.ic_sivec; + irq = bits >> 26; + + if (irq == 0) + return(-1); +#if 0 + irq += ppc8260_pic.irq_offset; +#endif + return irq; +} + diff --git a/arch/ppc/syslib/cpm2_pic.h b/arch/ppc/syslib/cpm2_pic.h new file mode 100644 index 000000000..a9da44168 --- /dev/null +++ b/arch/ppc/syslib/cpm2_pic.h @@ -0,0 +1,13 @@ +#ifndef _PPC_KERNEL_CPM2_H +#define _PPC_KERNEL_CPM2_H + +#include + +extern struct hw_interrupt_type cpm2_pic; + +void cpm2_pic_init(void); +void cpm2_do_IRQ(struct pt_regs *regs, + int cpu); +int cpm2_get_irq(struct pt_regs *regs); + +#endif /* _PPC_KERNEL_CPM2_H */ diff --git a/arch/ppc/syslib/dcr.S b/arch/ppc/syslib/dcr.S new file mode 100644 index 000000000..895f10243 --- /dev/null +++ b/arch/ppc/syslib/dcr.S @@ -0,0 +1,41 @@ +/* + * arch/ppc/syslib/dcr.S + * + * "Indirect" DCR access + * + * Copyright (c) 2004 Eugene Surovegin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include + +#define DCR_ACCESS_PROLOG(table) \ + rlwinm r3,r3,4,18,27; \ + lis r5,table@h; \ + ori r5,r5,table@l; \ + add r3,r3,r5; \ + mtctr r3; \ + bctr + +_GLOBAL(__mfdcr) + DCR_ACCESS_PROLOG(__mfdcr_table) + +_GLOBAL(__mtdcr) + DCR_ACCESS_PROLOG(__mtdcr_table) + +__mfdcr_table: + mfdcr r3,0; blr +__mtdcr_table: + mtdcr 0,r4; blr + +dcr = 1 + .rept 1023 + mfdcr r3,dcr; blr + mtdcr dcr,r4; blr + dcr = dcr + 1 + .endr diff --git a/arch/ppc/syslib/ibm440gx_common.c b/arch/ppc/syslib/ibm440gx_common.c new file mode 100644 index 000000000..5da7bca6b --- /dev/null +++ b/arch/ppc/syslib/ibm440gx_common.c @@ -0,0 +1,212 @@ +/* + * arch/ppc/kernel/ibm440gx_common.c + * + * PPC440GX system library + * + * Eugene Surovegin or + * Copyright (c) 2003 Zultys Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#include +#include +#include +#include +#include +#include + +/* + * Calculate 440GX clocks + */ +static inline u32 __fix_zero(u32 v, u32 def){ + return v ? v : def; +} + +void __init ibm440gx_get_clocks(struct ibm44x_clocks* p, unsigned int sys_clk, + unsigned int ser_clk) +{ + u32 pllc = CPR_READ(DCRN_CPR_PLLC); + u32 plld = CPR_READ(DCRN_CPR_PLLD); + u32 uart0 = SDR_READ(DCRN_SDR_UART0); + u32 uart1 = SDR_READ(DCRN_SDR_UART1); + + /* Dividers */ + u32 fbdv = __fix_zero((plld >> 24) & 0x1f, 32); + u32 fwdva = __fix_zero((plld >> 16) & 0xf, 16); + u32 fwdvb = __fix_zero((plld >> 8) & 7, 8); + u32 lfbdv = __fix_zero(plld & 0x3f, 64); + u32 pradv0 = __fix_zero((CPR_READ(DCRN_CPR_PRIMAD) >> 24) & 7, 8); + u32 prbdv0 = __fix_zero((CPR_READ(DCRN_CPR_PRIMBD) >> 24) & 7, 8); + u32 opbdv0 = __fix_zero((CPR_READ(DCRN_CPR_OPBD) >> 24) & 3, 4); + u32 perdv0 = __fix_zero((CPR_READ(DCRN_CPR_PERD) >> 24) & 3, 4); + + /* Input clocks for primary dividers */ + u32 clk_a, clk_b; + + if (pllc & 0x40000000){ + u32 m; + + /* Feedback path */ + switch ((pllc >> 24) & 7){ + case 0: + /* PLLOUTx */ + m = ((pllc & 0x20000000) ? fwdvb : fwdva) * lfbdv; + break; + case 1: + /* CPU */ + m = fwdva * pradv0; + break; + case 5: + /* PERClk */ + m = fwdvb * prbdv0 * opbdv0 * perdv0; + break; + default: + printk(KERN_EMERG "invalid PLL feedback source\n"); + goto bypass; + } + m *= fbdv; + p->vco = sys_clk * m; + clk_a = p->vco / fwdva; + clk_b = p->vco / fwdvb; + } + else { +bypass: + /* Bypass system PLL */ + p->vco = 0; + clk_a = clk_b = sys_clk; + } + + p->cpu = clk_a / pradv0; + p->plb = clk_b / prbdv0; + p->opb = p->plb / opbdv0; + p->ebc = p->opb / perdv0; + + /* UARTs clock */ + if (uart0 & 0x00800000) + p->uart0 = ser_clk; + else + p->uart0 = p->plb / __fix_zero(uart0 & 0xff, 256); + + if (uart1 & 0x00800000) + p->uart1 = ser_clk; + else + p->uart1 = p->plb / __fix_zero(uart1 & 0xff, 256); +} + +/* Enable L2 cache (call with IRQs disabled) */ +void __init ibm440gx_l2c_enable(void){ + u32 r; + + asm volatile ("sync" ::: "memory"); + + /* Disable SRAM */ + mtdcr(DCRN_SRAM0_DPC, mfdcr(DCRN_SRAM0_DPC) & ~SRAM_DPC_ENABLE); + mtdcr(DCRN_SRAM0_SB0CR, mfdcr(DCRN_SRAM0_SB0CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(DCRN_SRAM0_SB1CR, mfdcr(DCRN_SRAM0_SB1CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(DCRN_SRAM0_SB2CR, mfdcr(DCRN_SRAM0_SB2CR) & ~SRAM_SBCR_BU_MASK); + mtdcr(DCRN_SRAM0_SB3CR, mfdcr(DCRN_SRAM0_SB3CR) & ~SRAM_SBCR_BU_MASK); + + /* Enable L2_MODE without ICU/DCU */ + r = mfdcr(DCRN_L2C0_CFG) & ~(L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_SS_MASK); + r |= L2C_CFG_L2M | L2C_CFG_SS_256; + mtdcr(DCRN_L2C0_CFG, r); + + mtdcr(DCRN_L2C0_ADDR, 0); + + /* Hardware Clear Command */ + mtdcr(DCRN_L2C0_CMD, L2C_CMD_HCC); + while (!(mfdcr(DCRN_L2C0_SR) & L2C_SR_CC)) ; + + /* Clear Cache Parity and Tag Errors */ + mtdcr(DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE); + + /* Enable 64G snoop region starting at 0 */ + r = mfdcr(DCRN_L2C0_SNP0) & ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK); + r |= L2C_SNP_SSR_32G | L2C_SNP_ESR; + mtdcr(DCRN_L2C0_SNP0, r); + + r = mfdcr(DCRN_L2C0_SNP1) & ~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK); + r |= 0x80000000 | L2C_SNP_SSR_32G | L2C_SNP_ESR; + mtdcr(DCRN_L2C0_SNP1, r); + + asm volatile ("sync" ::: "memory"); + + /* Enable ICU/DCU ports */ + r = mfdcr(DCRN_L2C0_CFG); + r &= ~(L2C_CFG_DCW_MASK | L2C_CFG_CPIM | L2C_CFG_TPIM | L2C_CFG_LIM + | L2C_CFG_PMUX_MASK | L2C_CFG_PMIM | L2C_CFG_TPEI | L2C_CFG_CPEI + | L2C_CFG_NAM | L2C_CFG_NBRM); + r |= L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_TPC | L2C_CFG_CPC | L2C_CFG_FRAN + | L2C_CFG_SMCM; + mtdcr(DCRN_L2C0_CFG, r); + + asm volatile ("sync; isync" ::: "memory"); +} + +/* Disable L2 cache (call with IRQs disabled) */ +void __init ibm440gx_l2c_disable(void){ + u32 r; + + asm volatile ("sync" ::: "memory"); + + /* Disable L2C mode */ + r = mfdcr(DCRN_L2C0_CFG) & ~(L2C_CFG_L2M | L2C_CFG_ICU | L2C_CFG_DCU); + mtdcr(DCRN_L2C0_CFG, r); + + /* Enable SRAM */ + mtdcr(DCRN_SRAM0_DPC, mfdcr(DCRN_SRAM0_DPC) | SRAM_DPC_ENABLE); + mtdcr(DCRN_SRAM0_SB0CR, + SRAM_SBCR_BAS0 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW); + mtdcr(DCRN_SRAM0_SB1CR, + SRAM_SBCR_BAS1 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW); + mtdcr(DCRN_SRAM0_SB2CR, + SRAM_SBCR_BAS2 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW); + mtdcr(DCRN_SRAM0_SB3CR, + SRAM_SBCR_BAS3 | SRAM_SBCR_BS_64KB | SRAM_SBCR_BU_RW); + + asm volatile ("sync; isync" ::: "memory"); +} + +int __init ibm440gx_get_eth_grp(void) +{ + return (SDR_READ(DCRN_SDR_PFC1) & DCRN_SDR_PFC1_EPS) >> DCRN_SDR_PFC1_EPS_SHIFT; +} + +void __init ibm440gx_set_eth_grp(int group) +{ + SDR_WRITE(DCRN_SDR_PFC1, (SDR_READ(DCRN_SDR_PFC1) & ~DCRN_SDR_PFC1_EPS) | (group << DCRN_SDR_PFC1_EPS_SHIFT)); +} + +void __init ibm440gx_tah_enable(void) +{ + /* Enable TAH0 and TAH1 */ + SDR_WRITE(DCRN_SDR_MFR,SDR_READ(DCRN_SDR_MFR) & + ~DCRN_SDR_MFR_TAH0); + SDR_WRITE(DCRN_SDR_MFR,SDR_READ(DCRN_SDR_MFR) & + ~DCRN_SDR_MFR_TAH1); +} + +int ibm440gx_show_cpuinfo(struct seq_file *m){ + + u32 l2c_cfg = mfdcr(DCRN_L2C0_CFG); + const char* s; + if (l2c_cfg & L2C_CFG_L2M){ + switch (l2c_cfg & (L2C_CFG_ICU | L2C_CFG_DCU)){ + case L2C_CFG_ICU: s = "I-Cache only"; break; + case L2C_CFG_DCU: s = "D-Cache only"; break; + default: s = "I-Cache/D-Cache"; break; + } + } + else + s = "disabled"; + + seq_printf(m, "L2-Cache\t: %s (0x%08x 0x%08x)\n", s, + l2c_cfg, mfdcr(DCRN_L2C0_SR)); + + return 0; +} + diff --git a/arch/ppc/syslib/ibm440gx_common.h b/arch/ppc/syslib/ibm440gx_common.h new file mode 100644 index 000000000..5dbca9896 --- /dev/null +++ b/arch/ppc/syslib/ibm440gx_common.h @@ -0,0 +1,54 @@ +/* + * arch/ppc/kernel/ibm440gx_common.h + * + * PPC440GX system library + * + * Eugene Surovegin or + * Copyright (c) 2003 Zultys Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifdef __KERNEL__ +#ifndef __PPC_SYSLIB_IBM440GX_COMMON_H +#define __PPC_SYSLIB_IBM440GX_COMMON_H + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#include + +/* + * Please, refer to the Figure 14.1 in 440GX user manual + * + * if internal UART clock is used, ser_clk is ignored + */ +void ibm440gx_get_clocks(struct ibm44x_clocks*, unsigned int sys_clk, + unsigned int ser_clk) __init; + +/* Enable L2 cache */ +void ibm440gx_l2c_enable(void) __init; + +/* Disable L2 cache */ +void ibm440gx_l2c_disable(void) __init; + +/* Get Ethernet Group */ +int ibm440gx_get_eth_grp(void) __init; + +/* Set Ethernet Group */ +void ibm440gx_set_eth_grp(int group) __init; + +/* Enable TAH devices */ +void ibm440gx_tah_enable(void) __init; + +/* Add L2C info to /proc/cpuinfo */ +int ibm440gx_show_cpuinfo(struct seq_file*); + +#endif /* __ASSEMBLY__ */ +#endif /* __PPC_SYSLIB_IBM440GX_COMMON_H */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/syslib/ibm44x_common.h b/arch/ppc/syslib/ibm44x_common.h new file mode 100644 index 000000000..ee1053ac2 --- /dev/null +++ b/arch/ppc/syslib/ibm44x_common.h @@ -0,0 +1,36 @@ +/* + * arch/ppc/kernel/ibm44x_common.h + * + * PPC44x system library + * + * Eugene Surovegin or + * Copyright (c) 2003 Zultys Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifdef __KERNEL__ +#ifndef __PPC_SYSLIB_IBM44x_COMMON_H +#define __PPC_SYSLIB_IBM44x_COMMON_H + +#ifndef __ASSEMBLY__ + +/* + * All clocks are in Hz + */ +struct ibm44x_clocks { + unsigned int vco; /* VCO, 0 if system PLL is bypassed */ + unsigned int cpu; /* CPUCoreClk */ + unsigned int plb; /* PLBClk */ + unsigned int opb; /* OPBClk */ + unsigned int ebc; /* PerClk */ + unsigned int uart0; + unsigned int uart1; +}; + +#endif /* __ASSEMBLY__ */ +#endif /* __PPC_SYSLIB_IBM44x_COMMON_H */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/syslib/m8260_pci.c b/arch/ppc/syslib/m8260_pci.c new file mode 100644 index 000000000..bd564fb35 --- /dev/null +++ b/arch/ppc/syslib/m8260_pci.c @@ -0,0 +1,194 @@ +/* + * (C) Copyright 2003 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + * + * (C) Copyright 2004 Red Hat, Inc. + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "m8260_pci.h" + + +/* PCI bus configuration registers. + */ + +static void __init m8260_setup_pci(struct pci_controller *hose) +{ + volatile cpm2_map_t *immap = cpm2_immr; + unsigned long pocmr; + u16 tempShort; + +#ifndef CONFIG_ATC /* already done in U-Boot */ + /* + * Setting required to enable IRQ1-IRQ7 (SIUMCR [DPPC]), + * and local bus for PCI (SIUMCR [LBPC]). + */ + immap->im_siu_conf.siu_82xx.sc_siumcr = 0x00640000; +#endif + + /* Make PCI lowest priority */ + /* Each 4 bits is a device bus request and the MS 4bits + is highest priority */ + /* Bus 4bit value + --- ---------- + CPM high 0b0000 + CPM middle 0b0001 + CPM low 0b0010 + PCI reguest 0b0011 + Reserved 0b0100 + Reserved 0b0101 + Internal Core 0b0110 + External Master 1 0b0111 + External Master 2 0b1000 + External Master 3 0b1001 + The rest are reserved */ + immap->im_siu_conf.siu_82xx.sc_ppc_alrh = 0x61207893; + + /* Park bus on core while modifying PCI Bus accesses */ + immap->im_siu_conf.siu_82xx.sc_ppc_acr = 0x6; + + /* + * Set up master window that allows the CPU to access PCI space. This + * window is set up using the first SIU PCIBR registers. + */ + immap->im_memctl.memc_pcimsk0 = MPC826x_PCI_MASK; + immap->im_memctl.memc_pcibr0 = MPC826x_PCI_BASE | PCIBR_ENABLE; + + /* Disable machine check on no response or target abort */ + immap->im_pci.pci_emr = cpu_to_le32(0x1fe7); + /* Release PCI RST (by default the PCI RST signal is held low) */ + immap->im_pci.pci_gcr = cpu_to_le32(PCIGCR_PCI_BUS_EN); + + /* give it some time */ + mdelay(1); + + /* + * Set up master window that allows the CPU to access PCI Memory (prefetch) + * space. This window is set up using the first set of Outbound ATU registers. + */ + immap->im_pci.pci_potar0 = cpu_to_le32(MPC826x_PCI_LOWER_MEM >> 12); + immap->im_pci.pci_pobar0 = cpu_to_le32((MPC826x_PCI_LOWER_MEM - MPC826x_PCI_MEM_OFFSET) >> 12); + pocmr = ((MPC826x_PCI_UPPER_MEM - MPC826x_PCI_LOWER_MEM) >> 12) ^ 0xfffff; + immap->im_pci.pci_pocmr0 = cpu_to_le32(pocmr | POCMR_ENABLE | POCMR_PREFETCH_EN); + + /* + * Set up master window that allows the CPU to access PCI Memory (non-prefetch) + * space. This window is set up using the second set of Outbound ATU registers. + */ + immap->im_pci.pci_potar1 = cpu_to_le32(MPC826x_PCI_LOWER_MMIO >> 12); + immap->im_pci.pci_pobar1 = cpu_to_le32((MPC826x_PCI_LOWER_MMIO - MPC826x_PCI_MMIO_OFFSET) >> 12); + pocmr = ((MPC826x_PCI_UPPER_MMIO - MPC826x_PCI_LOWER_MMIO) >> 12) ^ 0xfffff; + immap->im_pci.pci_pocmr1 = cpu_to_le32(pocmr | POCMR_ENABLE); + + /* + * Set up master window that allows the CPU to access PCI IO space. This window + * is set up using the third set of Outbound ATU registers. + */ + immap->im_pci.pci_potar2 = cpu_to_le32(MPC826x_PCI_IO_BASE >> 12); + immap->im_pci.pci_pobar2 = cpu_to_le32(MPC826x_PCI_LOWER_IO >> 12); + pocmr = ((MPC826x_PCI_UPPER_IO - MPC826x_PCI_LOWER_IO) >> 12) ^ 0xfffff; + immap->im_pci.pci_pocmr2 = cpu_to_le32(pocmr | POCMR_ENABLE | POCMR_PCI_IO); + + /* + * Set up slave window that allows PCI masters to access MPC826x local memory. + * This window is set up using the first set of Inbound ATU registers + */ + + immap->im_pci.pci_pitar0 = cpu_to_le32(MPC826x_PCI_SLAVE_MEM_LOCAL >> 12); + immap->im_pci.pci_pibar0 = cpu_to_le32(MPC826x_PCI_SLAVE_MEM_BUS >> 12); + pocmr = ((MPC826x_PCI_SLAVE_MEM_SIZE-1) >> 12) ^ 0xfffff; + immap->im_pci.pci_picmr0 = cpu_to_le32(pocmr | PICMR_ENABLE | PICMR_PREFETCH_EN); + + /* See above for description - puts PCI request as highest priority */ + immap->im_siu_conf.siu_82xx.sc_ppc_alrh = 0x03124567; + + /* Park the bus on the PCI */ + immap->im_siu_conf.siu_82xx.sc_ppc_acr = PPC_ACR_BUS_PARK_PCI; + + /* Host mode - specify the bridge as a host-PCI bridge */ + early_write_config_word(hose, 0, 0, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_HOST); + + /* Enable the host bridge to be a master on the PCI bus, and to act as a PCI memory target */ + early_read_config_word(hose, 0, 0, PCI_COMMAND, &tempShort); + early_write_config_word(hose, 0, 0, PCI_COMMAND, + tempShort | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY); +} + +void __init m8260_find_bridges(void) +{ + extern int pci_assign_all_busses; + struct pci_controller * hose; + + pci_assign_all_busses = 1; + + hose = pcibios_alloc_controller(); + + if (!hose) + return; + + ppc_md.pci_swizzle = common_swizzle; + + hose->first_busno = 0; + hose->bus_offset = 0; + hose->last_busno = 0xff; + + setup_m8260_indirect_pci(hose, + (unsigned long)&cpm2_immr->im_pci.pci_cfg_addr, + (unsigned long)&cpm2_immr->im_pci.pci_cfg_data); + + m8260_setup_pci(hose); + hose->pci_mem_offset = MPC826x_PCI_MEM_OFFSET; + + isa_io_base = + (unsigned long) ioremap(MPC826x_PCI_IO_BASE, + MPC826x_PCI_IO_SIZE); + hose->io_base_virt = (void *) isa_io_base; + + /* setup resources */ + pci_init_resource(&hose->mem_resources[0], + MPC826x_PCI_LOWER_MEM, + MPC826x_PCI_UPPER_MEM, + IORESOURCE_MEM|IORESOURCE_PREFETCH, "PCI prefetchable memory"); + + pci_init_resource(&hose->mem_resources[1], + MPC826x_PCI_LOWER_MMIO, + MPC826x_PCI_UPPER_MMIO, + IORESOURCE_MEM, "PCI memory"); + + pci_init_resource(&hose->io_resource, + MPC826x_PCI_LOWER_IO, + MPC826x_PCI_UPPER_IO, + IORESOURCE_IO, "PCI I/O"); +} diff --git a/arch/ppc/syslib/m8260_pci.h b/arch/ppc/syslib/m8260_pci.h new file mode 100644 index 000000000..4e2ce7f7c --- /dev/null +++ b/arch/ppc/syslib/m8260_pci.h @@ -0,0 +1,75 @@ + +#ifndef _PPC_KERNEL_M8260_PCI_H +#define _PPC_KERNEL_M8260_PCI_H + +#include + +/* + * Local->PCI map (from CPU) controlled by + * MPC826x master window + * + * 0x80000000 - 0xBFFFFFFF Total CPU2PCI space PCIBR0 + * + * 0x80000000 - 0x9FFFFFFF PCI Mem with prefetch (Outbound ATU #1) + * 0xA0000000 - 0xAFFFFFFF PCI Mem w/o prefetch (Outbound ATU #2) + * 0xB0000000 - 0xB0FFFFFF 32-bit PCI IO (Outbound ATU #3) + * + * PCI->Local map (from PCI) + * MPC826x slave window controlled by + * + * 0x00000000 - 0x07FFFFFF MPC826x local memory (Inbound ATU #1) + */ + +/* + * Slave window that allows PCI masters to access MPC826x local memory. + * This window is set up using the first set of Inbound ATU registers + */ + +#ifndef MPC826x_PCI_SLAVE_MEM_LOCAL +#define MPC826x_PCI_SLAVE_MEM_LOCAL (((struct bd_info *)__res)->bi_memstart) +#define MPC826x_PCI_SLAVE_MEM_BUS (((struct bd_info *)__res)->bi_memstart) +#define MPC826x_PCI_SLAVE_MEM_SIZE (((struct bd_info *)__res)->bi_memsize) +#endif + +/* + * This is the window that allows the CPU to access PCI address space. + * It will be setup with the SIU PCIBR0 register. All three PCI master + * windows, which allow the CPU to access PCI prefetch, non prefetch, + * and IO space (see below), must all fit within this window. + */ +#ifndef MPC826x_PCI_BASE +#define MPC826x_PCI_BASE 0x80000000 +#define MPC826x_PCI_MASK 0xc0000000 +#endif + +#ifndef MPC826x_PCI_LOWER_MEM +#define MPC826x_PCI_LOWER_MEM 0x80000000 +#define MPC826x_PCI_UPPER_MEM 0x9fffffff +#define MPC826x_PCI_MEM_OFFSET 0x00000000 +#endif + +#ifndef MPC826x_PCI_LOWER_MMIO +#define MPC826x_PCI_LOWER_MMIO 0xa0000000 +#define MPC826x_PCI_UPPER_MMIO 0xafffffff +#define MPC826x_PCI_MMIO_OFFSET 0x00000000 +#endif + +#ifndef MPC826x_PCI_LOWER_IO +#define MPC826x_PCI_LOWER_IO 0x00000000 +#define MPC826x_PCI_UPPER_IO 0x00ffffff +#define MPC826x_PCI_IO_BASE 0xb0000000 +#define MPC826x_PCI_IO_SIZE 0x01000000 +#endif + +#ifndef _IO_BASE +#define _IO_BASE isa_io_base +#endif + +#ifdef CONFIG_8260_PCI9 +extern void setup_m8260_indirect_pci(struct pci_controller* hose, + u32 cfg_addr, u32 cfg_data); +#else +#define setup_m8260_indirect_pci setup_indirect_pci +#endif + +#endif /* _PPC_KERNEL_M8260_PCI_H */ diff --git a/arch/ppc/syslib/m8260_pci_erratum9.c b/arch/ppc/syslib/m8260_pci_erratum9.c new file mode 100644 index 000000000..ae76a1b52 --- /dev/null +++ b/arch/ppc/syslib/m8260_pci_erratum9.c @@ -0,0 +1,474 @@ +/* + * arch/ppc/platforms/mpc8260_pci9.c + * + * Workaround for device erratum PCI 9. + * See Motorola's "XPC826xA Family Device Errata Reference." + * The erratum applies to all 8260 family Hip4 processors. It is scheduled + * to be fixed in HiP4 Rev C. Erratum PCI 9 states that a simultaneous PCI + * inbound write transaction and PCI outbound read transaction can result in a + * bus deadlock. The suggested workaround is to use the IDMA controller to + * perform all reads from PCI configuration, memory, and I/O space. + * + * Author: andy_lowe@mvista.com + * + * 2003 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "m8260_pci.h" + +#ifdef CONFIG_8260_PCI9 +/*#include */ /* included in asm/io.h */ + +#define IDMA_XFER_BUF_SIZE 64 /* size of the IDMA transfer buffer */ + +/* define a structure for the IDMA dpram usage */ +typedef struct idma_dpram_s { + idma_t pram; /* IDMA parameter RAM */ + u_char xfer_buf[IDMA_XFER_BUF_SIZE]; /* IDMA transfer buffer */ + idma_bd_t bd; /* buffer descriptor */ +} idma_dpram_t; + +/* define offsets relative to start of IDMA dpram */ +#define IDMA_XFER_BUF_OFFSET (sizeof(idma_t)) +#define IDMA_BD_OFFSET (sizeof(idma_t) + IDMA_XFER_BUF_SIZE) + +/* define globals */ +static volatile idma_dpram_t *idma_dpram; + +/* Exactly one of CONFIG_8260_PCI9_IDMAn must be defined, + * where n is 1, 2, 3, or 4. This selects the IDMA channel used for + * the PCI9 workaround. + */ +#ifdef CONFIG_8260_PCI9_IDMA1 +#define IDMA_CHAN 0 +#define PROFF_IDMA PROFF_IDMA1_BASE +#define IDMA_PAGE CPM_CR_IDMA1_PAGE +#define IDMA_SBLOCK CPM_CR_IDMA1_SBLOCK +#endif +#ifdef CONFIG_8260_PCI9_IDMA2 +#define IDMA_CHAN 1 +#define PROFF_IDMA PROFF_IDMA2_BASE +#define IDMA_PAGE CPM_CR_IDMA2_PAGE +#define IDMA_SBLOCK CPM_CR_IDMA2_SBLOCK +#endif +#ifdef CONFIG_8260_PCI9_IDMA3 +#define IDMA_CHAN 2 +#define PROFF_IDMA PROFF_IDMA3_BASE +#define IDMA_PAGE CPM_CR_IDMA3_PAGE +#define IDMA_SBLOCK CPM_CR_IDMA3_SBLOCK +#endif +#ifdef CONFIG_8260_PCI9_IDMA4 +#define IDMA_CHAN 3 +#define PROFF_IDMA PROFF_IDMA4_BASE +#define IDMA_PAGE CPM_CR_IDMA4_PAGE +#define IDMA_SBLOCK CPM_CR_IDMA4_SBLOCK +#endif + +void idma_pci9_init(void) +{ + uint dpram_offset; + volatile idma_t *pram; + volatile im_idma_t *idma_reg; + volatile cpm2_map_t *immap = cpm2_immr; + + /* allocate IDMA dpram */ + dpram_offset = cpm2_dpalloc(sizeof(idma_dpram_t), 64); + idma_dpram = + (volatile idma_dpram_t *)&immap->im_dprambase[dpram_offset]; + + /* initialize the IDMA parameter RAM */ + memset((void *)idma_dpram, 0, sizeof(idma_dpram_t)); + pram = &idma_dpram->pram; + pram->ibase = dpram_offset + IDMA_BD_OFFSET; + pram->dpr_buf = dpram_offset + IDMA_XFER_BUF_OFFSET; + pram->ss_max = 32; + pram->dts = 32; + + /* initialize the IDMA_BASE pointer to the IDMA parameter RAM */ + *((ushort *) &immap->im_dprambase[PROFF_IDMA]) = dpram_offset; + + /* initialize the IDMA registers */ + idma_reg = (volatile im_idma_t *) &immap->im_sdma.sdma_idsr1; + idma_reg[IDMA_CHAN].idmr = 0; /* mask all IDMA interrupts */ + idma_reg[IDMA_CHAN].idsr = 0xff; /* clear all event flags */ + + printk("<4>Using IDMA%d for MPC8260 device erratum PCI 9 workaround\n", + IDMA_CHAN + 1); + + return; +} + +/* Use the IDMA controller to transfer data from I/O memory to local RAM. + * The src address must be a physical address suitable for use by the DMA + * controller with no translation. The dst address must be a kernel virtual + * address. The dst address is translated to a physical address via + * virt_to_phys(). + * The sinc argument specifies whether or not the source address is incremented + * by the DMA controller. The source address is incremented if and only if sinc + * is non-zero. The destination address is always incremented since the + * destination is always host RAM. + */ +static void +idma_pci9_read(u8 *dst, u8 *src, int bytes, int unit_size, int sinc) +{ + unsigned long flags; + volatile idma_t *pram = &idma_dpram->pram; + volatile idma_bd_t *bd = &idma_dpram->bd; + volatile cpm2_map_t *immap = cpm2_immr; + + local_irq_save(flags); + + /* initialize IDMA parameter RAM for this transfer */ + if (sinc) + pram->dcm = IDMA_DCM_DMA_WRAP_64 | IDMA_DCM_SINC + | IDMA_DCM_DINC | IDMA_DCM_SD_MEM2MEM; + else + pram->dcm = IDMA_DCM_DMA_WRAP_64 | IDMA_DCM_DINC + | IDMA_DCM_SD_MEM2MEM; + pram->ibdptr = pram->ibase; + pram->sts = unit_size; + pram->istate = 0; + + /* initialize the buffer descriptor */ + bd->dst = virt_to_phys(dst); + bd->src = (uint) src; + bd->len = bytes; + bd->flags = IDMA_BD_V | IDMA_BD_W | IDMA_BD_I | IDMA_BD_L | IDMA_BD_DGBL + | IDMA_BD_DBO_BE | IDMA_BD_SBO_BE | IDMA_BD_SDTB; + + /* issue the START_IDMA command to the CP */ + while (immap->im_cpm.cp_cpcr & CPM_CR_FLG); + immap->im_cpm.cp_cpcr = mk_cr_cmd(IDMA_PAGE, IDMA_SBLOCK, 0, + CPM_CR_START_IDMA) | CPM_CR_FLG; + while (immap->im_cpm.cp_cpcr & CPM_CR_FLG); + + /* wait for transfer to complete */ + while(bd->flags & IDMA_BD_V); + + local_irq_restore(flags); + + return; +} + +/* Use the IDMA controller to transfer data from I/O memory to local RAM. + * The dst address must be a physical address suitable for use by the DMA + * controller with no translation. The src address must be a kernel virtual + * address. The src address is translated to a physical address via + * virt_to_phys(). + * The dinc argument specifies whether or not the dest address is incremented + * by the DMA controller. The source address is incremented if and only if sinc + * is non-zero. The source address is always incremented since the + * source is always host RAM. + */ +static void +idma_pci9_write(u8 *dst, u8 *src, int bytes, int unit_size, int dinc) +{ + unsigned long flags; + volatile idma_t *pram = &idma_dpram->pram; + volatile idma_bd_t *bd = &idma_dpram->bd; + volatile cpm2_map_t *immap = cpm2_immr; + + local_irq_save(flags); + + /* initialize IDMA parameter RAM for this transfer */ + if (dinc) + pram->dcm = IDMA_DCM_DMA_WRAP_64 | IDMA_DCM_SINC + | IDMA_DCM_DINC | IDMA_DCM_SD_MEM2MEM; + else + pram->dcm = IDMA_DCM_DMA_WRAP_64 | IDMA_DCM_SINC + | IDMA_DCM_SD_MEM2MEM; + pram->ibdptr = pram->ibase; + pram->sts = unit_size; + pram->istate = 0; + + /* initialize the buffer descriptor */ + bd->dst = (uint) dst; + bd->src = virt_to_phys(src); + bd->len = bytes; + bd->flags = IDMA_BD_V | IDMA_BD_W | IDMA_BD_I | IDMA_BD_L | IDMA_BD_DGBL + | IDMA_BD_DBO_BE | IDMA_BD_SBO_BE | IDMA_BD_SDTB; + + /* issue the START_IDMA command to the CP */ + while (immap->im_cpm.cp_cpcr & CPM_CR_FLG); + immap->im_cpm.cp_cpcr = mk_cr_cmd(IDMA_PAGE, IDMA_SBLOCK, 0, + CPM_CR_START_IDMA) | CPM_CR_FLG; + while (immap->im_cpm.cp_cpcr & CPM_CR_FLG); + + /* wait for transfer to complete */ + while(bd->flags & IDMA_BD_V); + + local_irq_restore(flags); + + return; +} + +/* Same as idma_pci9_read, but 16-bit little-endian byte swapping is performed + * if the unit_size is 2, and 32-bit little-endian byte swapping is performed if + * the unit_size is 4. + */ +static void +idma_pci9_read_le(u8 *dst, u8 *src, int bytes, int unit_size, int sinc) +{ + int i; + u8 *p; + + idma_pci9_read(dst, src, bytes, unit_size, sinc); + switch(unit_size) { + case 2: + for (i = 0, p = dst; i < bytes; i += 2, p += 2) + swab16s((u16 *) p); + break; + case 4: + for (i = 0, p = dst; i < bytes; i += 4, p += 4) + swab32s((u32 *) p); + break; + default: + break; + } +} +EXPORT_SYMBOL(idma_pci9_init); +EXPORT_SYMBOL(idma_pci9_read); +EXPORT_SYMBOL(idma_pci9_read_le); + +static inline int is_pci_mem(unsigned long addr) +{ + if (addr >= MPC826x_PCI_LOWER_MMIO && + addr <= MPC826x_PCI_UPPER_MMIO) + return 1; + if (addr >= MPC826x_PCI_LOWER_MEM && + addr <= MPC826x_PCI_UPPER_MEM) + return 1; + return 0; +} + +#define is_pci_mem(pa) ( (pa > 0x80000000) && (pa < 0xc0000000)) +int readb(volatile unsigned char *addr) +{ + u8 val; + unsigned long pa = iopa((unsigned long) addr); + + if (!is_pci_mem(pa)) + return in_8(addr); + + idma_pci9_read((u8 *)&val, (u8 *)pa, sizeof(val), sizeof(val), 0); + return val; +} + +int readw(volatile unsigned short *addr) +{ + u16 val; + unsigned long pa = iopa((unsigned long) addr); + + if (!is_pci_mem(pa)) + return in_le16(addr); + + idma_pci9_read((u8 *)&val, (u8 *)pa, sizeof(val), sizeof(val), 0); + return swab16(val); +} + +unsigned readl(volatile unsigned *addr) +{ + u32 val; + unsigned long pa = iopa((unsigned long) addr); + + if (!is_pci_mem(pa)) + return in_le32(addr); + + idma_pci9_read((u8 *)&val, (u8 *)pa, sizeof(val), sizeof(val), 0); + return swab32(val); +} + +int inb(unsigned port) +{ + u8 val; + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)&val, (u8 *)addr, sizeof(val), sizeof(val), 0); + return val; +} + +int inw(unsigned port) +{ + u16 val; + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)&val, (u8 *)addr, sizeof(val), sizeof(val), 0); + return swab16(val); +} + +unsigned inl(unsigned port) +{ + u32 val; + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)&val, (u8 *)addr, sizeof(val), sizeof(val), 0); + return swab32(val); +} + +void insb(unsigned port, void *buf, int ns) +{ + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)buf, (u8 *)addr, ns*sizeof(u8), sizeof(u8), 0); +} + +void insw(unsigned port, void *buf, int ns) +{ + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)buf, (u8 *)addr, ns*sizeof(u16), sizeof(u16), 0); +} + +void insl(unsigned port, void *buf, int nl) +{ + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)buf, (u8 *)addr, nl*sizeof(u32), sizeof(u32), 0); +} + +void insw_ns(unsigned port, void *buf, int ns) +{ + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)buf, (u8 *)addr, ns*sizeof(u16), sizeof(u16), 0); +} + +void insl_ns(unsigned port, void *buf, int nl) +{ + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)buf, (u8 *)addr, nl*sizeof(u32), sizeof(u32), 0); +} + +void *memcpy_fromio(void *dest, unsigned long src, size_t count) +{ + unsigned long pa = iopa((unsigned long) src); + + if (is_pci_mem(pa)) + idma_pci9_read((u8 *)dest, (u8 *)pa, count, 32, 1); + else + memcpy(dest, (void *)src, count); + return dest; +} + +EXPORT_SYMBOL(readb); +EXPORT_SYMBOL(readw); +EXPORT_SYMBOL(readl); +EXPORT_SYMBOL(inb); +EXPORT_SYMBOL(inw); +EXPORT_SYMBOL(inl); +EXPORT_SYMBOL(insb); +EXPORT_SYMBOL(insw); +EXPORT_SYMBOL(insl); +EXPORT_SYMBOL(insw_ns); +EXPORT_SYMBOL(insl_ns); +EXPORT_SYMBOL(memcpy_fromio); + +#endif /* ifdef CONFIG_8260_PCI9 */ + +/* Indirect PCI routines adapted from arch/ppc/kernel/indirect_pci.c. + * Copyright (C) 1998 Gabriel Paubert. + */ +#ifndef CONFIG_8260_PCI9 +#define cfg_read(val, addr, type, op) *val = op((type)(addr)) +#else +#define cfg_read(val, addr, type, op) \ + idma_pci9_read_le((u8*)(val),(u8*)(addr),sizeof(*(val)),sizeof(*(val)),0) +#endif + +#define cfg_write(val, addr, type, op) op((type *)(addr), (val)) + +static int indirect_write_config(struct pci_bus *pbus, unsigned int devfn, int where, + int size, u32 value) +{ + struct pci_controller *hose = pbus->sysdata; + u8 cfg_type = 0; + if (ppc_md.pci_exclude_device) + if (ppc_md.pci_exclude_device(pbus->number, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (hose->set_cfg_type) + if (pbus->number != hose->first_busno) + cfg_type = 1; + + out_be32(hose->cfg_addr, + (((where & 0xfc) | cfg_type) << 24) | (devfn << 16) + | ((pbus->number - hose->bus_offset) << 8) | 0x80); + + switch (size) + { + case 1: + cfg_write(value, hose->cfg_data + (where & 3), u8, out_8); + break; + case 2: + cfg_write(value, hose->cfg_data + (where & 2), u16, out_le16); + break; + case 4: + cfg_write(value, hose->cfg_data + (where & 0), u32, out_le32); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int indirect_read_config(struct pci_bus *pbus, unsigned int devfn, int where, + int size, u32 *value) +{ + struct pci_controller *hose = pbus->sysdata; + u8 cfg_type = 0; + if (ppc_md.pci_exclude_device) + if (ppc_md.pci_exclude_device(pbus->number, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (hose->set_cfg_type) + if (pbus->number != hose->first_busno) + cfg_type = 1; + + out_be32(hose->cfg_addr, + (((where & 0xfc) | cfg_type) << 24) | (devfn << 16) + | ((pbus->number - hose->bus_offset) << 8) | 0x80); + + switch (size) + { + case 1: + cfg_read(value, hose->cfg_data + (where & 3), u8 *, in_8); + break; + case 2: + cfg_read(value, hose->cfg_data + (where & 2), u16 *, in_le16); + break; + case 4: + cfg_read(value, hose->cfg_data + (where & 0), u32 *, in_le32); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops indirect_pci_ops = +{ + .read = indirect_read_config, + .write = indirect_write_config, +}; + +void +setup_m8260_indirect_pci(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data) +{ + hose->ops = &indirect_pci_ops; + hose->cfg_addr = (unsigned int *) ioremap(cfg_addr, 4); + hose->cfg_data = (unsigned char *) ioremap(cfg_data, 4); +} diff --git a/arch/ppc/syslib/mpc52xx_pic.c b/arch/ppc/syslib/mpc52xx_pic.c new file mode 100644 index 000000000..0f88e63b0 --- /dev/null +++ b/arch/ppc/syslib/mpc52xx_pic.c @@ -0,0 +1,252 @@ +/* + * arch/ppc/syslib/mpc52xx_pic.c + * + * Programmable Interrupt Controller functions for the Freescale MPC52xx + * embedded CPU. + * + * + * Maintainer : Sylvain Munaut + * + * Based on (well, mostly copied from) the code from the 2.4 kernel by + * Dale Farnsworth and Kent Borg. + * + * Copyright (C) 2004 Sylvain Munaut + * Copyright (C) 2003 Montavista Software, Inc + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + + +static struct mpc52xx_intr *intr; +static struct mpc52xx_sdma *sdma; + +static void +mpc52xx_ic_disable(unsigned int irq) +{ + u32 val; + + if (irq == MPC52xx_IRQ0) { + val = in_be32(&intr->ctrl); + val &= ~(1 << 11); + out_be32(&intr->ctrl, val); + } + else if (irq < MPC52xx_IRQ1) { + BUG(); + } + else if (irq <= MPC52xx_IRQ3) { + val = in_be32(&intr->ctrl); + val &= ~(1 << (10 - (irq - MPC52xx_IRQ1))); + out_be32(&intr->ctrl, val); + } + else if (irq < MPC52xx_SDMA_IRQ_BASE) { + val = in_be32(&intr->main_mask); + val |= 1 << (16 - (irq - MPC52xx_MAIN_IRQ_BASE)); + out_be32(&intr->main_mask, val); + } + else if (irq < MPC52xx_PERP_IRQ_BASE) { + val = in_be32(&sdma->IntMask); + val |= 1 << (irq - MPC52xx_SDMA_IRQ_BASE); + out_be32(&sdma->IntMask, val); + } + else { + val = in_be32(&intr->per_mask); + val |= 1 << (31 - (irq - MPC52xx_PERP_IRQ_BASE)); + out_be32(&intr->per_mask, val); + } +} + +static void +mpc52xx_ic_enable(unsigned int irq) +{ + u32 val; + + if (irq == MPC52xx_IRQ0) { + val = in_be32(&intr->ctrl); + val |= 1 << 11; + out_be32(&intr->ctrl, val); + } + else if (irq < MPC52xx_IRQ1) { + BUG(); + } + else if (irq <= MPC52xx_IRQ3) { + val = in_be32(&intr->ctrl); + val |= 1 << (10 - (irq - MPC52xx_IRQ1)); + out_be32(&intr->ctrl, val); + } + else if (irq < MPC52xx_SDMA_IRQ_BASE) { + val = in_be32(&intr->main_mask); + val &= ~(1 << (16 - (irq - MPC52xx_MAIN_IRQ_BASE))); + out_be32(&intr->main_mask, val); + } + else if (irq < MPC52xx_PERP_IRQ_BASE) { + val = in_be32(&sdma->IntMask); + val &= ~(1 << (irq - MPC52xx_SDMA_IRQ_BASE)); + out_be32(&sdma->IntMask, val); + } + else { + val = in_be32(&intr->per_mask); + val &= ~(1 << (31 - (irq - MPC52xx_PERP_IRQ_BASE))); + out_be32(&intr->per_mask, val); + } +} + +static void +mpc52xx_ic_ack(unsigned int irq) +{ + u32 val; + + /* + * Only some irqs are reset here, others in interrupting hardware. + */ + + switch (irq) { + case MPC52xx_IRQ0: + val = in_be32(&intr->ctrl); + val |= 0x08000000; + out_be32(&intr->ctrl, val); + break; + case MPC52xx_CCS_IRQ: + val = in_be32(&intr->enc_status); + val |= 0x00000400; + out_be32(&intr->enc_status, val); + break; + case MPC52xx_IRQ1: + val = in_be32(&intr->ctrl); + val |= 0x04000000; + out_be32(&intr->ctrl, val); + break; + case MPC52xx_IRQ2: + val = in_be32(&intr->ctrl); + val |= 0x02000000; + out_be32(&intr->ctrl, val); + break; + case MPC52xx_IRQ3: + val = in_be32(&intr->ctrl); + val |= 0x01000000; + out_be32(&intr->ctrl, val); + break; + default: + if (irq >= MPC52xx_SDMA_IRQ_BASE + && irq < (MPC52xx_SDMA_IRQ_BASE + MPC52xx_SDMA_IRQ_NUM)) { + out_be32(&sdma->IntPend, + 1 << (irq - MPC52xx_SDMA_IRQ_BASE)); + } + break; + } +} + +static void +mpc52xx_ic_disable_and_ack(unsigned int irq) +{ + mpc52xx_ic_disable(irq); + mpc52xx_ic_ack(irq); +} + +static void +mpc52xx_ic_end(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) + mpc52xx_ic_enable(irq); +} + +static struct hw_interrupt_type mpc52xx_ic = { + "MPC52xx", + NULL, /* startup(irq) */ + NULL, /* shutdown(irq) */ + mpc52xx_ic_enable, /* enable(irq) */ + mpc52xx_ic_disable, /* disable(irq) */ + mpc52xx_ic_disable_and_ack, /* disable_and_ack(irq) */ + mpc52xx_ic_end, /* end(irq) */ + 0 /* set_affinity(irq, cpumask) SMP. */ +}; + +void __init +mpc52xx_init_irq(void) +{ + int i; + + /* Remap the necessary zones */ + intr = (struct mpc52xx_intr *) + ioremap(MPC52xx_INTR, sizeof(struct mpc52xx_intr)); + sdma = (struct mpc52xx_sdma *) + ioremap(MPC52xx_SDMA, sizeof(struct mpc52xx_sdma)); + + if ((intr==NULL) || (sdma==NULL)) + panic("Can't ioremap PIC/SDMA register for init_irq !"); + + /* Disable all interrupt sources. */ + out_be32(&sdma->IntPend, 0xffffffff); /* 1 means clear pending */ + out_be32(&sdma->IntMask, 0xffffffff); /* 1 means disabled */ + out_be32(&intr->per_mask, 0x7ffffc00); /* 1 means disabled */ + out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */ + out_be32(&intr->ctrl, + 0x0f000000 | /* clear IRQ 0-3 */ + 0x00c00000 | /* IRQ0: level-sensitive, active low */ + 0x00001000 | /* MEE master external enable */ + 0x00000000 | /* 0 means disable IRQ 0-3 */ + 0x00000001); /* CEb route critical normally */ + + /* Zero a bunch of the priority settings. */ + out_be32(&intr->per_pri1, 0); + out_be32(&intr->per_pri2, 0); + out_be32(&intr->per_pri3, 0); + out_be32(&intr->main_pri1, 0); + out_be32(&intr->main_pri2, 0); + + /* Initialize irq_desc[i].handler's with mpc52xx_ic. */ + for (i = 0; i < NR_IRQS; i++) { + irq_desc[i].handler = &mpc52xx_ic; + irq_desc[i].status = IRQ_LEVEL; + } +} + +int +mpc52xx_get_irq(struct pt_regs *regs) +{ + u32 status; + int irq = -1; + + status = in_be32(&intr->enc_status); + + if (status & 0x00000400) { /* critical */ + irq = (status >> 8) & 0x3; + if (irq == 2) /* high priority peripheral */ + goto peripheral; + irq += MPC52xx_CRIT_IRQ_BASE; + } + else if (status & 0x00200000) { /* main */ + irq = (status >> 16) & 0x1f; + if (irq == 4) /* low priority peripheral */ + goto peripheral; + irq += MPC52xx_MAIN_IRQ_BASE; + } + else if (status & 0x20000000) { /* peripheral */ +peripheral: + irq = (status >> 24) & 0x1f; + if (irq == 0) { /* bestcomm */ + status = in_be32(&sdma->IntPend); + irq = ffs(status) + MPC52xx_SDMA_IRQ_BASE-1; + } + else + irq += MPC52xx_PERP_IRQ_BASE; + } + + return irq; +} + diff --git a/arch/ppc/syslib/mpc52xx_setup.c b/arch/ppc/syslib/mpc52xx_setup.c new file mode 100644 index 000000000..631cea34b --- /dev/null +++ b/arch/ppc/syslib/mpc52xx_setup.c @@ -0,0 +1,228 @@ +/* + * arch/ppc/syslib/mpc52xx_common.c + * + * Common code for the boards based on Freescale MPC52xx embedded CPU. + * + * + * Maintainer : Sylvain Munaut + * + * Support for other bootloaders than UBoot by Dale Farnsworth + * + * + * Copyright (C) 2004 Sylvain Munaut + * Copyright (C) 2003 Montavista Software, Inc + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include + +#include +#include +#include +#include +#include + +extern bd_t __res; + +static int core_mult[] = { /* CPU Frequency multiplier, taken */ + 0, 0, 0, 10, 20, 20, 25, 45, /* from the datasheet used to compute */ + 30, 55, 40, 50, 0, 60, 35, 0, /* CPU frequency from XLB freq and */ + 30, 25, 65, 10, 70, 20, 75, 45, /* external jumper config */ + 0, 55, 40, 50, 80, 60, 35, 0 +}; + +void +mpc52xx_restart(char *cmd) +{ + struct mpc52xx_gpt* gpt0 = (struct mpc52xx_gpt*) MPC52xx_GPTx(0); + + local_irq_disable(); + + /* Turn on the watchdog and wait for it to expire. It effectively + does a reset */ + if (gpt0 != NULL) { + out_be32(&gpt0->count, 0x000000ff); + out_be32(&gpt0->mode, 0x00009004); + } else + printk(KERN_ERR "mpc52xx_restart: Unable to ioremap GPT0 registers, -> looping ..."); + + while (1); +} + +void +mpc52xx_halt(void) +{ + local_irq_disable(); + + while (1); +} + +void +mpc52xx_power_off(void) +{ + /* By default we don't have any way of shut down. + If a specific board wants to, it can set the power down + code to any hardware implementation dependent code */ + mpc52xx_halt(); +} + + +void __init +mpc52xx_set_bat(void) +{ + /* Set BAT 2 to map the 0xf0000000 area */ + /* This mapping is used during mpc52xx_progress, + * mpc52xx_find_end_of_memory, and UARTs/GPIO access for debug + */ + mb(); + mtspr(DBAT2U, 0xf0001ffe); + mtspr(DBAT2L, 0xf000002a); + mb(); +} + +void __init +mpc52xx_map_io(void) +{ + /* Here we only map the MBAR */ + io_block_mapping( + MPC52xx_MBAR_VIRT, MPC52xx_MBAR, MPC52xx_MBAR_SIZE, _PAGE_IO); +} + + +#ifdef CONFIG_SERIAL_TEXT_DEBUG +#ifdef MPC52xx_PF_CONSOLE_PORT +#define MPC52xx_CONSOLE MPC52xx_PSCx(MPC52xx_PF_CONSOLE_PORT) +#else +#error "mpc52xx PSC for console not selected" +#endif + +void +mpc52xx_progress(char *s, unsigned short hex) +{ + struct mpc52xx_psc *psc = (struct mpc52xx_psc *)MPC52xx_CONSOLE; + char c; + + /* Don't we need to disable serial interrupts ? */ + + while ((c = *s++) != 0) { + if (c == '\n') { + while (!(in_be16(&psc->mpc52xx_psc_status) & + MPC52xx_PSC_SR_TXRDY)) ; + out_8(&psc->mpc52xx_psc_buffer_8, '\r'); + } + while (!(in_be16(&psc->mpc52xx_psc_status) & + MPC52xx_PSC_SR_TXRDY)) ; + out_8(&psc->mpc52xx_psc_buffer_8, c); + } +} + +#endif /* CONFIG_SERIAL_TEXT_DEBUG */ + + +unsigned long __init +mpc52xx_find_end_of_memory(void) +{ + u32 ramsize = __res.bi_memsize; + + /* + * if bootloader passed a memsize, just use it + * else get size from sdram config registers + */ + if (ramsize == 0) { + struct mpc52xx_mmap_ctl *mmap_ctl; + u32 sdram_config_0, sdram_config_1; + + /* Temp BAT2 mapping active when this is called ! */ + mmap_ctl = (struct mpc52xx_mmap_ctl*) MPC52xx_MMAP_CTL; + + sdram_config_0 = in_be32(&mmap_ctl->sdram0); + sdram_config_1 = in_be32(&mmap_ctl->sdram1); + + if ((sdram_config_0 & 0x1f) >= 0x13) + ramsize = 1 << ((sdram_config_0 & 0xf) + 17); + + if (((sdram_config_1 & 0x1f) >= 0x13) && + ((sdram_config_1 & 0xfff00000) == ramsize)) + ramsize += 1 << ((sdram_config_1 & 0xf) + 17); + + iounmap(mmap_ctl); + } + + return ramsize; +} + +void __init +mpc52xx_calibrate_decr(void) +{ + int current_time, previous_time; + int tbl_start, tbl_end; + unsigned int xlbfreq, cpufreq, ipbfreq, pcifreq, divisor; + + xlbfreq = __res.bi_busfreq; + /* if bootloader didn't pass bus frequencies, calculate them */ + if (xlbfreq == 0) { + /* Get RTC & Clock manager modules */ + struct mpc52xx_rtc *rtc; + struct mpc52xx_cdm *cdm; + + rtc = (struct mpc52xx_rtc*) + ioremap(MPC52xx_RTC, sizeof(struct mpc52xx_rtc)); + cdm = (struct mpc52xx_cdm*) + ioremap(MPC52xx_CDM, sizeof(struct mpc52xx_cdm)); + + if ((rtc==NULL) || (cdm==NULL)) + panic("Can't ioremap RTC/CDM while computing bus freq"); + + /* Count bus clock during 1/64 sec */ + out_be32(&rtc->dividers, 0x8f1f0000); /* Set RTC 64x faster */ + previous_time = in_be32(&rtc->time); + while ((current_time = in_be32(&rtc->time)) == previous_time) ; + tbl_start = get_tbl(); + previous_time = current_time; + while ((current_time = in_be32(&rtc->time)) == previous_time) ; + tbl_end = get_tbl(); + out_be32(&rtc->dividers, 0xffff0000); /* Restore RTC */ + + /* Compute all frequency from that & CDM settings */ + xlbfreq = (tbl_end - tbl_start) << 8; + cpufreq = (xlbfreq * core_mult[in_be32(&cdm->rstcfg)&0x1f])/10; + ipbfreq = (in_8(&cdm->ipb_clk_sel) & 1) ? + xlbfreq / 2 : xlbfreq; + switch (in_8(&cdm->pci_clk_sel) & 3) { + case 0: + pcifreq = ipbfreq; + break; + case 1: + pcifreq = ipbfreq / 2; + break; + default: + pcifreq = xlbfreq / 4; + break; + } + __res.bi_busfreq = xlbfreq; + __res.bi_intfreq = cpufreq; + __res.bi_ipbfreq = ipbfreq; + __res.bi_pcifreq = pcifreq; + + /* Release mapping */ + iounmap((void*)rtc); + iounmap((void*)cdm); + } + + divisor = 4; + + tb_ticks_per_jiffy = xlbfreq / HZ / divisor; + tb_to_us = mulhwu_scale_factor(xlbfreq / divisor, 1000000); +} + + +void __init +mpc52xx_add_board_devices(struct ocp_def board_ocp[]) { + while (board_ocp->vendor != OCP_VENDOR_INVALID) + if(ocp_add_one_device(board_ocp++)) + printk("mpc5200-ocp: Failed to add board device !\n"); +} + diff --git a/arch/ppc/syslib/ocp.c b/arch/ppc/syslib/ocp.c new file mode 100644 index 000000000..a5156c517 --- /dev/null +++ b/arch/ppc/syslib/ocp.c @@ -0,0 +1,485 @@ +/* + * ocp.c + * + * (c) Benjamin Herrenschmidt (benh@kernel.crashing.org) + * Mipsys - France + * + * Derived from work (c) Armin Kuster akuster@pacbell.net + * + * Additional support and port to 2.6 LDM/sysfs by + * Matt Porter + * Copyright 2004 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * OCP (On Chip Peripheral) is a software emulated "bus" with a + * pseudo discovery method for dumb peripherals. Usually these type + * of peripherals are found on embedded SoC (System On a Chip) + * processors or highly integrated system controllers that have + * a host bridge and many peripherals. Common examples where + * this is already used include the PPC4xx, PPC85xx, MPC52xx, + * and MV64xxx parts. + * + * This subsystem creates a standard OCP bus type within the + * device model. The devices on the OCP bus are seeded by an + * an initial OCP device array created by the arch-specific + * Device entries can be added/removed/modified through OCP + * helper functions to accomodate system and board-specific + * parameters commonly found in embedded systems. OCP also + * provides a standard method for devices to describe extended + * attributes about themselves to the system. A standard access + * method allows OCP drivers to obtain the information, both + * SoC-specific and system/board-specific, needed for operation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +//#define DBG(x) printk x +#define DBG(x) + +extern int mem_init_done; + +extern struct ocp_def core_ocp[]; /* Static list of devices, provided by + CPU core */ + +LIST_HEAD(ocp_devices); /* List of all OCP devices */ +DECLARE_RWSEM(ocp_devices_sem); /* Global semaphores for those lists */ + +static int ocp_inited; + +/* Sysfs support */ +#define OCP_DEF_ATTR(field, format_string) \ +static ssize_t \ +show_##field(struct device *dev, char *buf) \ +{ \ + struct ocp_device *odev = to_ocp_dev(dev); \ + \ + return sprintf(buf, format_string, odev->def->field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); + +OCP_DEF_ATTR(vendor, "0x%04x\n"); +OCP_DEF_ATTR(function, "0x%04x\n"); +OCP_DEF_ATTR(index, "0x%04x\n"); +#ifdef CONFIG_PTE_64BIT +OCP_DEF_ATTR(paddr, "0x%016Lx\n"); +#else +OCP_DEF_ATTR(paddr, "0x%08lx\n"); +#endif +OCP_DEF_ATTR(irq, "%d\n"); +OCP_DEF_ATTR(pm, "%lu\n"); + +void ocp_create_sysfs_dev_files(struct ocp_device *odev) +{ + struct device *dev = &odev->dev; + + /* Current OCP device def attributes */ + device_create_file(dev, &dev_attr_vendor); + device_create_file(dev, &dev_attr_function); + device_create_file(dev, &dev_attr_index); + device_create_file(dev, &dev_attr_paddr); + device_create_file(dev, &dev_attr_irq); + device_create_file(dev, &dev_attr_pm); + /* Current OCP device additions attributes */ + if (odev->def->additions && odev->def->show) + odev->def->show(dev); +} + +/** + * ocp_device_match - Match one driver to one device + * @drv: driver to match + * @dev: device to match + * + * This function returns 0 if the driver and device don't match + */ +static int +ocp_device_match(struct device *dev, struct device_driver *drv) +{ + struct ocp_device *ocp_dev = to_ocp_dev(dev); + struct ocp_driver *ocp_drv = to_ocp_drv(drv); + const struct ocp_device_id *ids = ocp_drv->id_table; + + if (!ids) + return 0; + + while (ids->vendor || ids->function) { + if ((ids->vendor == OCP_ANY_ID + || ids->vendor == ocp_dev->def->vendor) + && (ids->function == OCP_ANY_ID + || ids->function == ocp_dev->def->function)) + return 1; + ids++; + } + return 0; +} + +static int +ocp_device_probe(struct device *dev) +{ + int error = 0; + struct ocp_driver *drv; + struct ocp_device *ocp_dev; + + drv = to_ocp_drv(dev->driver); + ocp_dev = to_ocp_dev(dev); + + if (drv->probe) { + error = drv->probe(ocp_dev); + if (error >= 0) { + ocp_dev->driver = drv; + error = 0; + } + } + return error; +} + +static int +ocp_device_remove(struct device *dev) +{ + struct ocp_device *ocp_dev = to_ocp_dev(dev); + + if (ocp_dev->driver) { + if (ocp_dev->driver->remove) + ocp_dev->driver->remove(ocp_dev); + ocp_dev->driver = NULL; + } + return 0; +} + +static int +ocp_device_suspend(struct device *dev, u32 state) +{ + struct ocp_device *ocp_dev = to_ocp_dev(dev); + struct ocp_driver *ocp_drv = to_ocp_drv(dev->driver); + + if (dev->driver && ocp_drv->suspend) + return ocp_drv->suspend(ocp_dev, state); + return 0; +} + +static int +ocp_device_resume(struct device *dev) +{ + struct ocp_device *ocp_dev = to_ocp_dev(dev); + struct ocp_driver *ocp_drv = to_ocp_drv(dev->driver); + + if (dev->driver && ocp_drv->resume) + return ocp_drv->resume(ocp_dev); + return 0; +} + +struct bus_type ocp_bus_type = { + .name = "ocp", + .match = ocp_device_match, + .suspend = ocp_device_suspend, + .resume = ocp_device_resume, +}; + +/** + * ocp_register_driver - Register an OCP driver + * @drv: pointer to statically defined ocp_driver structure + * + * The driver's probe() callback is called either recursively + * by this function or upon later call of ocp_driver_init + * + * NOTE: Detection of devices is a 2 pass step on this implementation, + * hotswap isn't supported. First, all OCP devices are put in the device + * list, _then_ all drivers are probed on each match. + */ +int +ocp_register_driver(struct ocp_driver *drv) +{ + /* initialize common driver fields */ + drv->driver.name = drv->name; + drv->driver.bus = &ocp_bus_type; + drv->driver.probe = ocp_device_probe; + drv->driver.remove = ocp_device_remove; + + /* register with core */ + return driver_register(&drv->driver); +} + +/** + * ocp_unregister_driver - Unregister an OCP driver + * @drv: pointer to statically defined ocp_driver structure + * + * The driver's remove() callback is called recursively + * by this function for any device already registered + */ +void +ocp_unregister_driver(struct ocp_driver *drv) +{ + DBG(("ocp: ocp_unregister_driver(%s)...\n", drv->name)); + + driver_unregister(&drv->driver); + + DBG(("ocp: ocp_unregister_driver(%s)... done.\n", drv->name)); +} + +/* Core of ocp_find_device(). Caller must hold ocp_devices_sem */ +static struct ocp_device * +__ocp_find_device(unsigned int vendor, unsigned int function, int index) +{ + struct list_head *entry; + struct ocp_device *dev, *found = NULL; + + DBG(("ocp: __ocp_find_device(vendor: %x, function: %x, index: %d)...\n", vendor, function, index)); + + list_for_each(entry, &ocp_devices) { + dev = list_entry(entry, struct ocp_device, link); + if (vendor != OCP_ANY_ID && vendor != dev->def->vendor) + continue; + if (function != OCP_ANY_ID && function != dev->def->function) + continue; + if (index != OCP_ANY_INDEX && index != dev->def->index) + continue; + found = dev; + break; + } + + DBG(("ocp: __ocp_find_device(vendor: %x, function: %x, index: %d)... done\n", vendor, function, index)); + + return found; +} + +/** + * ocp_find_device - Find a device by function & index + * @vendor: vendor ID of the device (or OCP_ANY_ID) + * @function: function code of the device (or OCP_ANY_ID) + * @idx: index of the device (or OCP_ANY_INDEX) + * + * This function allows a lookup of a given function by it's + * index, it's typically used to find the MAL or ZMII associated + * with an EMAC or similar horrors. + * You can pass vendor, though you usually want OCP_ANY_ID there... + */ +struct ocp_device * +ocp_find_device(unsigned int vendor, unsigned int function, int index) +{ + struct ocp_device *dev; + + down_read(&ocp_devices_sem); + dev = __ocp_find_device(vendor, function, index); + up_read(&ocp_devices_sem); + + return dev; +} + +/** + * ocp_get_one_device - Find a def by function & index + * @vendor: vendor ID of the device (or OCP_ANY_ID) + * @function: function code of the device (or OCP_ANY_ID) + * @idx: index of the device (or OCP_ANY_INDEX) + * + * This function allows a lookup of a given ocp_def by it's + * vendor, function, and index. The main purpose for is to + * allow modification of the def before binding to the driver + */ +struct ocp_def * +ocp_get_one_device(unsigned int vendor, unsigned int function, int index) +{ + struct ocp_device *dev; + struct ocp_def *found = NULL; + + DBG(("ocp: ocp_get_one_device(vendor: %x, function: %x, index: %d)...\n", + vendor, function, index)); + + dev = ocp_find_device(vendor, function, index); + + if (dev) + found = dev->def; + + DBG(("ocp: ocp_get_one_device(vendor: %x, function: %x, index: %d)... done.\n", + vendor, function, index)); + + return found; +} + +/** + * ocp_add_one_device - Add a device + * @def: static device definition structure + * + * This function adds a device definition to the + * device list. It may only be called before + * ocp_driver_init() and will return an error + * otherwise. + */ +int +ocp_add_one_device(struct ocp_def *def) +{ + struct ocp_device *dev; + + DBG(("ocp: ocp_add_one_device()...\n")); + + /* Can't be called after ocp driver init */ + if (ocp_inited) + return 1; + + if (mem_init_done) + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + else + dev = alloc_bootmem(sizeof(*dev)); + + if (dev == NULL) + return 1; + memset(dev, 0, sizeof(*dev)); + dev->def = def; + dev->current_state = 4; + sprintf(dev->name, "OCP device %04x:%04x:%04x", + dev->def->vendor, dev->def->function, dev->def->index); + down_write(&ocp_devices_sem); + list_add_tail(&dev->link, &ocp_devices); + up_write(&ocp_devices_sem); + + DBG(("ocp: ocp_add_one_device()...done\n")); + + return 0; +} + +/** + * ocp_remove_one_device - Remove a device by function & index + * @vendor: vendor ID of the device (or OCP_ANY_ID) + * @function: function code of the device (or OCP_ANY_ID) + * @idx: index of the device (or OCP_ANY_INDEX) + * + * This function allows removal of a given function by its + * index. It may only be called before ocp_driver_init() + * and will return an error otherwise. + */ +int +ocp_remove_one_device(unsigned int vendor, unsigned int function, int index) +{ + struct ocp_device *dev; + + DBG(("ocp: ocp_remove_one_device(vendor: %x, function: %x, index: %d)...\n", vendor, function, index)); + + /* Can't be called after ocp driver init */ + if (ocp_inited) + return 1; + + down_write(&ocp_devices_sem); + dev = __ocp_find_device(vendor, function, index); + list_del((struct list_head *)dev); + up_write(&ocp_devices_sem); + + DBG(("ocp: ocp_remove_one_device(vendor: %x, function: %x, index: %d)... done.\n", vendor, function, index)); + + return 0; +} + +/** + * ocp_for_each_device - Iterate over OCP devices + * @callback: routine to execute for each ocp device. + * @arg: user data to be passed to callback routine. + * + * This routine holds the ocp_device semaphore, so the + * callback routine cannot modify the ocp_device list. + */ +void +ocp_for_each_device(void(*callback)(struct ocp_device *, void *arg), void *arg) +{ + struct list_head *entry; + + if (callback) { + down_read(&ocp_devices_sem); + list_for_each(entry, &ocp_devices) + callback(list_entry(entry, struct ocp_device, link), + arg); + up_read(&ocp_devices_sem); + } +} + +/** + * ocp_early_init - Init OCP device management + * + * This function builds the list of devices before setup_arch. + * This allows platform code to modify the device lists before + * they are bound to drivers (changes to paddr, removing devices + * etc) + */ +int __init +ocp_early_init(void) +{ + struct ocp_def *def; + + DBG(("ocp: ocp_early_init()...\n")); + + /* Fill the devices list */ + for (def = core_ocp; def->vendor != OCP_VENDOR_INVALID; def++) + ocp_add_one_device(def); + + DBG(("ocp: ocp_early_init()... done.\n")); + + return 0; +} + +/** + * ocp_driver_init - Init OCP device management + * + * This function is meant to be called via OCP bus registration. + */ +static int __init +ocp_driver_init(void) +{ + int ret = 0, index = 0; + struct device *ocp_bus; + struct list_head *entry; + struct ocp_device *dev; + + if (ocp_inited) + return ret; + ocp_inited = 1; + + DBG(("ocp: ocp_driver_init()...\n")); + + /* Allocate/register primary OCP bus */ + ocp_bus = kmalloc(sizeof(struct device), GFP_KERNEL); + if (ocp_bus == NULL) + return 1; + memset(ocp_bus, 0, sizeof(struct device)); + strcpy(ocp_bus->bus_id, "ocp"); + + bus_register(&ocp_bus_type); + + device_register(ocp_bus); + + /* Put each OCP device into global device list */ + list_for_each(entry, &ocp_devices) { + dev = list_entry(entry, struct ocp_device, link); + sprintf(dev->dev.bus_id, "%2.2x", index); + dev->dev.parent = ocp_bus; + dev->dev.bus = &ocp_bus_type; + device_register(&dev->dev); + ocp_create_sysfs_dev_files(dev); + index++; + } + + DBG(("ocp: ocp_driver_init()... done.\n")); + + return 0; +} + +postcore_initcall(ocp_driver_init); + +EXPORT_SYMBOL(ocp_bus_type); +EXPORT_SYMBOL(ocp_find_device); +EXPORT_SYMBOL(ocp_register_driver); +EXPORT_SYMBOL(ocp_unregister_driver); diff --git a/arch/ppc/syslib/ppc4xx_sgdma.c b/arch/ppc/syslib/ppc4xx_sgdma.c new file mode 100644 index 000000000..49c6e9c61 --- /dev/null +++ b/arch/ppc/syslib/ppc4xx_sgdma.c @@ -0,0 +1,455 @@ +/* + * arch/ppc/kernel/ppc4xx_sgdma.c + * + * IBM PPC4xx DMA engine scatter/gather library + * + * Copyright 2002-2003 MontaVista Software Inc. + * + * Cleaned up and converted to new DCR access + * Matt Porter + * + * Original code by Armin Kuster + * and Pete Popov + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +void +ppc4xx_set_sg_addr(int dmanr, phys_addr_t sg_addr) +{ + if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) { + printk("ppc4xx_set_sg_addr: bad channel: %d\n", dmanr); + return; + } + +#ifdef PPC4xx_DMA_64BIT + mtdcr(DCRN_ASGH0 + (dmanr * 0x8), (u32)(sg_addr >> 32)); +#endif + mtdcr(DCRN_ASG0 + (dmanr * 0x8), (u32)sg_addr); +} + +/* + * Add a new sgl descriptor to the end of a scatter/gather list + * which was created by alloc_dma_handle(). + * + * For a memory to memory transfer, both dma addresses must be + * valid. For a peripheral to memory transfer, one of the addresses + * must be set to NULL, depending on the direction of the transfer: + * memory to peripheral: set dst_addr to NULL, + * peripheral to memory: set src_addr to NULL. + */ +int +ppc4xx_add_dma_sgl(sgl_handle_t handle, phys_addr_t src_addr, phys_addr_t dst_addr, + unsigned int count) +{ + sgl_list_info_t *psgl = (sgl_list_info_t *) handle; + ppc_dma_ch_t *p_dma_ch; + + if (!handle) { + printk("ppc4xx_add_dma_sgl: null handle\n"); + return DMA_STATUS_BAD_HANDLE; + } + + if (psgl->dmanr >= MAX_PPC4xx_DMA_CHANNELS) { + printk("ppc4xx_add_dma_sgl: bad channel: %d\n", psgl->dmanr); + return DMA_STATUS_BAD_CHANNEL; + } + + p_dma_ch = &dma_channels[psgl->dmanr]; + +#ifdef DEBUG_4xxDMA + { + int error = 0; + unsigned int aligned = + (unsigned) src_addr | (unsigned) dst_addr | count; + switch (p_dma_ch->pwidth) { + case PW_8: + break; + case PW_16: + if (aligned & 0x1) + error = 1; + break; + case PW_32: + if (aligned & 0x3) + error = 1; + break; + case PW_64: + if (aligned & 0x7) + error = 1; + break; + default: + printk("ppc4xx_add_dma_sgl: invalid bus width: 0x%x\n", + p_dma_ch->pwidth); + return DMA_STATUS_GENERAL_ERROR; + } + if (error) + printk + ("Alignment warning: ppc4xx_add_dma_sgl src 0x%x dst 0x%x count 0x%x bus width var %d\n", + src_addr, dst_addr, count, p_dma_ch->pwidth); + + } +#endif + + if ((unsigned) (psgl->ptail + 1) >= ((unsigned) psgl + SGL_LIST_SIZE)) { + printk("sgl handle out of memory \n"); + return DMA_STATUS_OUT_OF_MEMORY; + } + + if (!psgl->ptail) { + psgl->phead = (ppc_sgl_t *) + ((unsigned) psgl + sizeof (sgl_list_info_t)); + psgl->phead_dma = psgl->dma_addr + sizeof(sgl_list_info_t); + psgl->ptail = psgl->phead; + psgl->ptail_dma = psgl->phead_dma; + } else { + psgl->ptail->next = psgl->ptail_dma + sizeof(ppc_sgl_t); + psgl->ptail++; + psgl->ptail_dma += sizeof(ppc_sgl_t); + } + + psgl->ptail->control = psgl->control; + psgl->ptail->src_addr = src_addr; + psgl->ptail->dst_addr = dst_addr; + psgl->ptail->control_count = (count >> p_dma_ch->shift) | + psgl->sgl_control; + psgl->ptail->next = (uint32_t) NULL; + + return DMA_STATUS_GOOD; +} + +/* + * Enable (start) the DMA described by the sgl handle. + */ +void +ppc4xx_enable_dma_sgl(sgl_handle_t handle) +{ + sgl_list_info_t *psgl = (sgl_list_info_t *) handle; + ppc_dma_ch_t *p_dma_ch; + uint32_t sg_command; + + if (!handle) { + printk("ppc4xx_enable_dma_sgl: null handle\n"); + return; + } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) { + printk("ppc4xx_enable_dma_sgl: bad channel in handle %d\n", + psgl->dmanr); + return; + } else if (!psgl->phead) { + printk("ppc4xx_enable_dma_sgl: sg list empty\n"); + return; + } + + p_dma_ch = &dma_channels[psgl->dmanr]; + psgl->ptail->control_count &= ~SG_LINK; /* make this the last dscrptr */ + sg_command = mfdcr(DCRN_ASGC); + + ppc4xx_set_sg_addr(psgl->dmanr, psgl->phead_dma); + + sg_command |= SSG_ENABLE(psgl->dmanr); + + mtdcr(DCRN_ASGC, sg_command); /* start transfer */ +} + +/* + * Halt an active scatter/gather DMA operation. + */ +void +ppc4xx_disable_dma_sgl(sgl_handle_t handle) +{ + sgl_list_info_t *psgl = (sgl_list_info_t *) handle; + uint32_t sg_command; + + if (!handle) { + printk("ppc4xx_enable_dma_sgl: null handle\n"); + return; + } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) { + printk("ppc4xx_enable_dma_sgl: bad channel in handle %d\n", + psgl->dmanr); + return; + } + + sg_command = mfdcr(DCRN_ASGC); + sg_command &= ~SSG_ENABLE(psgl->dmanr); + mtdcr(DCRN_ASGC, sg_command); /* stop transfer */ +} + +/* + * Returns number of bytes left to be transferred from the entire sgl list. + * *src_addr and *dst_addr get set to the source/destination address of + * the sgl descriptor where the DMA stopped. + * + * An sgl transfer must NOT be active when this function is called. + */ +int +ppc4xx_get_dma_sgl_residue(sgl_handle_t handle, phys_addr_t * src_addr, + phys_addr_t * dst_addr) +{ + sgl_list_info_t *psgl = (sgl_list_info_t *) handle; + ppc_dma_ch_t *p_dma_ch; + ppc_sgl_t *pnext, *sgl_addr; + uint32_t count_left; + + if (!handle) { + printk("ppc4xx_get_dma_sgl_residue: null handle\n"); + return DMA_STATUS_BAD_HANDLE; + } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) { + printk("ppc4xx_get_dma_sgl_residue: bad channel in handle %d\n", + psgl->dmanr); + return DMA_STATUS_BAD_CHANNEL; + } + + sgl_addr = (ppc_sgl_t *) __va(mfdcr(DCRN_ASG0 + (psgl->dmanr * 0x8))); + count_left = mfdcr(DCRN_DMACT0 + (psgl->dmanr * 0x8)); + + if (!sgl_addr) { + printk("ppc4xx_get_dma_sgl_residue: sgl addr register is null\n"); + goto error; + } + + pnext = psgl->phead; + while (pnext && + ((unsigned) pnext < ((unsigned) psgl + SGL_LIST_SIZE) && + (pnext != sgl_addr)) + ) { + pnext++; + } + + if (pnext == sgl_addr) { /* found the sgl descriptor */ + + *src_addr = pnext->src_addr; + *dst_addr = pnext->dst_addr; + + /* + * Now search the remaining descriptors and add their count. + * We already have the remaining count from this descriptor in + * count_left. + */ + pnext++; + + while ((pnext != psgl->ptail) && + ((unsigned) pnext < ((unsigned) psgl + SGL_LIST_SIZE)) + ) { + count_left += pnext->control_count & SG_COUNT_MASK; + } + + if (pnext != psgl->ptail) { /* should never happen */ + printk + ("ppc4xx_get_dma_sgl_residue error (1) psgl->ptail 0x%x handle 0x%x\n", + (unsigned int) psgl->ptail, (unsigned int) handle); + goto error; + } + + /* success */ + p_dma_ch = &dma_channels[psgl->dmanr]; + return (count_left << p_dma_ch->shift); /* count in bytes */ + + } else { + /* this shouldn't happen */ + printk + ("get_dma_sgl_residue, unable to match current address 0x%x, handle 0x%x\n", + (unsigned int) sgl_addr, (unsigned int) handle); + + } + + error: + *src_addr = (phys_addr_t) NULL; + *dst_addr = (phys_addr_t) NULL; + return 0; +} + +/* + * Returns the address(es) of the buffer(s) contained in the head element of + * the scatter/gather list. The element is removed from the scatter/gather + * list and the next element becomes the head. + * + * This function should only be called when the DMA is not active. + */ +int +ppc4xx_delete_dma_sgl_element(sgl_handle_t handle, phys_addr_t * src_dma_addr, + phys_addr_t * dst_dma_addr) +{ + sgl_list_info_t *psgl = (sgl_list_info_t *) handle; + + if (!handle) { + printk("ppc4xx_delete_sgl_element: null handle\n"); + return DMA_STATUS_BAD_HANDLE; + } else if (psgl->dmanr > (MAX_PPC4xx_DMA_CHANNELS - 1)) { + printk("ppc4xx_delete_sgl_element: bad channel in handle %d\n", + psgl->dmanr); + return DMA_STATUS_BAD_CHANNEL; + } + + if (!psgl->phead) { + printk("ppc4xx_delete_sgl_element: sgl list empty\n"); + *src_dma_addr = (phys_addr_t) NULL; + *dst_dma_addr = (phys_addr_t) NULL; + return DMA_STATUS_SGL_LIST_EMPTY; + } + + *src_dma_addr = (phys_addr_t) psgl->phead->src_addr; + *dst_dma_addr = (phys_addr_t) psgl->phead->dst_addr; + + if (psgl->phead == psgl->ptail) { + /* last descriptor on the list */ + psgl->phead = NULL; + psgl->ptail = NULL; + } else { + psgl->phead++; + psgl->phead_dma += sizeof(ppc_sgl_t); + } + + return DMA_STATUS_GOOD; +} + + +/* + * Create a scatter/gather list handle. This is simply a structure which + * describes a scatter/gather list. + * + * A handle is returned in "handle" which the driver should save in order to + * be able to access this list later. A chunk of memory will be allocated + * to be used by the API for internal management purposes, including managing + * the sg list and allocating memory for the sgl descriptors. One page should + * be more than enough for that purpose. Perhaps it's a bit wasteful to use + * a whole page for a single sg list, but most likely there will be only one + * sg list per channel. + * + * Interrupt notes: + * Each sgl descriptor has a copy of the DMA control word which the DMA engine + * loads in the control register. The control word has a "global" interrupt + * enable bit for that channel. Interrupts are further qualified by a few bits + * in the sgl descriptor count register. In order to setup an sgl, we have to + * know ahead of time whether or not interrupts will be enabled at the completion + * of the transfers. Thus, enable_dma_interrupt()/disable_dma_interrupt() MUST + * be called before calling alloc_dma_handle(). If the interrupt mode will never + * change after powerup, then enable_dma_interrupt()/disable_dma_interrupt() + * do not have to be called -- interrupts will be enabled or disabled based + * on how the channel was configured after powerup by the hw_init_dma_channel() + * function. Each sgl descriptor will be setup to interrupt if an error occurs; + * however, only the last descriptor will be setup to interrupt. Thus, an + * interrupt will occur (if interrupts are enabled) only after the complete + * sgl transfer is done. + */ +int +ppc4xx_alloc_dma_handle(sgl_handle_t * phandle, unsigned int mode, unsigned int dmanr) +{ + sgl_list_info_t *psgl; + dma_addr_t dma_addr; + ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr]; + uint32_t sg_command; + void *ret; + + if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) { + printk("ppc4xx_alloc_dma_handle: invalid channel 0x%x\n", dmanr); + return DMA_STATUS_BAD_CHANNEL; + } + + if (!phandle) { + printk("ppc4xx_alloc_dma_handle: null handle pointer\n"); + return DMA_STATUS_NULL_POINTER; + } + + /* Get a page of memory, which is zeroed out by consistent_alloc() */ + ret = dma_alloc_coherent(NULL, DMA_PPC4xx_SIZE, &dma_addr, GFP_KERNEL); + if (ret != NULL) { + memset(ret, 0, DMA_PPC4xx_SIZE); + psgl = (sgl_list_info_t *) ret; + } + + if (psgl == NULL) { + *phandle = (sgl_handle_t) NULL; + return DMA_STATUS_OUT_OF_MEMORY; + } + + psgl->dma_addr = dma_addr; + psgl->dmanr = dmanr; + + /* + * Modify and save the control word. These words will be + * written to each sgl descriptor. The DMA engine then + * loads this control word into the control register + * every time it reads a new descriptor. + */ + psgl->control = p_dma_ch->control; + /* Clear all mode bits */ + psgl->control &= ~(DMA_TM_MASK | DMA_TD); + /* Save control word and mode */ + psgl->control |= (mode | DMA_CE_ENABLE); + + /* In MM mode, we must set ETD/TCE */ + if (mode == DMA_MODE_MM) + psgl->control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE; + + if (p_dma_ch->int_enable) { + /* Enable channel interrupt */ + psgl->control |= DMA_CIE_ENABLE; + } else { + psgl->control &= ~DMA_CIE_ENABLE; + } + + sg_command = mfdcr(DCRN_ASGC); + sg_command |= SSG_MASK_ENABLE(dmanr); + + /* Enable SGL control access */ + mtdcr(DCRN_ASGC, sg_command); + psgl->sgl_control = SG_ERI_ENABLE | SG_LINK; + + if (p_dma_ch->int_enable) { + if (p_dma_ch->tce_enable) + psgl->sgl_control |= SG_TCI_ENABLE; + else + psgl->sgl_control |= SG_ETI_ENABLE; + } + + *phandle = (sgl_handle_t) psgl; + return DMA_STATUS_GOOD; +} + +/* + * Destroy a scatter/gather list handle that was created by alloc_dma_handle(). + * The list must be empty (contain no elements). + */ +void +ppc4xx_free_dma_handle(sgl_handle_t handle) +{ + sgl_list_info_t *psgl = (sgl_list_info_t *) handle; + + if (!handle) { + printk("ppc4xx_free_dma_handle: got NULL\n"); + return; + } else if (psgl->phead) { + printk("ppc4xx_free_dma_handle: list not empty\n"); + return; + } else if (!psgl->dma_addr) { /* should never happen */ + printk("ppc4xx_free_dma_handle: no dma address\n"); + return; + } + + dma_free_coherent(NULL, DMA_PPC4xx_SIZE, (void *) psgl, 0); +} + +EXPORT_SYMBOL(ppc4xx_alloc_dma_handle); +EXPORT_SYMBOL(ppc4xx_free_dma_handle); +EXPORT_SYMBOL(ppc4xx_add_dma_sgl); +EXPORT_SYMBOL(ppc4xx_delete_dma_sgl_element); +EXPORT_SYMBOL(ppc4xx_enable_dma_sgl); +EXPORT_SYMBOL(ppc4xx_disable_dma_sgl); +EXPORT_SYMBOL(ppc4xx_get_dma_sgl_residue); diff --git a/arch/ppc/syslib/ppc85xx_common.c b/arch/ppc/syslib/ppc85xx_common.c new file mode 100644 index 000000000..7de3e4560 --- /dev/null +++ b/arch/ppc/syslib/ppc85xx_common.c @@ -0,0 +1,46 @@ +/* + * arch/ppc/syslib/ppc85xx_common.c + * + * MPC85xx support routines + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include + +#include +#include +#include + +/* ************************************************************************ */ +/* Return the value of CCSRBAR for the current board */ + +phys_addr_t +get_ccsrbar(void) +{ + return BOARD_CCSRBAR; +} + +/* ************************************************************************ */ +/* Update the 85xx OCP tables paddr field */ +void +mpc85xx_update_paddr_ocp(struct ocp_device *dev, void *arg) +{ + phys_addr_t ccsrbar; + if (arg) { + ccsrbar = *(phys_addr_t *)arg; + dev->def->paddr += ccsrbar; + } +} + +EXPORT_SYMBOL(get_ccsrbar); diff --git a/arch/ppc/syslib/ppc85xx_common.h b/arch/ppc/syslib/ppc85xx_common.h new file mode 100644 index 000000000..741e2a955 --- /dev/null +++ b/arch/ppc/syslib/ppc85xx_common.h @@ -0,0 +1,29 @@ +/* + * arch/ppc/syslib/ppc85xx_common.h + * + * MPC85xx support routines + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __PPC_SYSLIB_PPC85XX_COMMON_H +#define __PPC_SYSLIB_PPC85XX_COMMON_H + +#include +#include +#include + +/* Provide access to ccsrbar for any modules, etc */ +phys_addr_t get_ccsrbar(void); + +/* Update the 85xx OCP tables paddr field */ +void mpc85xx_update_paddr_ocp(struct ocp_device *dev, void *ccsrbar); + +#endif /* __PPC_SYSLIB_PPC85XX_COMMON_H */ diff --git a/arch/ppc/syslib/ppc85xx_setup.c b/arch/ppc/syslib/ppc85xx_setup.c new file mode 100644 index 000000000..33aa1dc93 --- /dev/null +++ b/arch/ppc/syslib/ppc85xx_setup.c @@ -0,0 +1,341 @@ +/* + * arch/ppc/syslib/ppc85xx_setup.c + * + * MPC85XX common board code + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include /* for linux/serial_core.h */ +#include + +#include +#include +#include +#include +#include +#include +#include + +/* Return the amount of memory */ +unsigned long __init +mpc85xx_find_end_of_memory(void) +{ + bd_t *binfo; + + binfo = (bd_t *) __res; + + return binfo->bi_memsize; +} + +/* The decrementer counts at the system (internal) clock freq divided by 8 */ +void __init +mpc85xx_calibrate_decr(void) +{ + bd_t *binfo = (bd_t *) __res; + unsigned int freq, divisor; + + /* get the core frequency */ + freq = binfo->bi_busfreq; + + /* The timebase is updated every 8 bus clocks, HID0[SEL_TBCLK] = 0 */ + divisor = 8; + tb_ticks_per_jiffy = freq / divisor / HZ; + tb_to_us = mulhwu_scale_factor(freq / divisor, 1000000); + + /* Set the time base to zero */ + mtspr(SPRN_TBWL, 0); + mtspr(SPRN_TBWU, 0); + + /* Clear any pending timer interrupts */ + mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); + + /* Enable decrementer interrupt */ + mtspr(SPRN_TCR, TCR_DIE); +} + +#ifdef CONFIG_SERIAL_8250 +void __init +mpc85xx_early_serial_map(void) +{ + struct uart_port serial_req; + bd_t *binfo = (bd_t *) __res; + phys_addr_t duart_paddr = binfo->bi_immr_base + MPC85xx_UART0_OFFSET; + + /* Setup serial port access */ + memset(&serial_req, 0, sizeof (serial_req)); + serial_req.uartclk = binfo->bi_busfreq; + serial_req.line = 0; + serial_req.irq = MPC85xx_IRQ_DUART; + serial_req.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST; + serial_req.iotype = SERIAL_IO_MEM; + serial_req.membase = ioremap(duart_paddr, MPC85xx_UART0_SIZE); + serial_req.mapbase = duart_paddr; + serial_req.regshift = 0; + +#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB) + gen550_init(0, &serial_req); +#endif + + if (early_serial_setup(&serial_req) != 0) + printk("Early serial init of port 0 failed\n"); + + /* Assume early_serial_setup() doesn't modify serial_req */ + duart_paddr = binfo->bi_immr_base + MPC85xx_UART1_OFFSET; + serial_req.line = 1; + serial_req.mapbase = duart_paddr; + serial_req.membase = ioremap(duart_paddr, MPC85xx_UART1_SIZE); + +#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB) + gen550_init(1, &serial_req); +#endif + + if (early_serial_setup(&serial_req) != 0) + printk("Early serial init of port 1 failed\n"); +} +#endif + +void +mpc85xx_restart(char *cmd) +{ + local_irq_disable(); + abort(); +} + +void +mpc85xx_power_off(void) +{ + local_irq_disable(); + for(;;); +} + +void +mpc85xx_halt(void) +{ + local_irq_disable(); + for(;;); +} + +#ifdef CONFIG_PCI +static void __init +mpc85xx_setup_pci1(struct pci_controller *hose) +{ + volatile struct ccsr_pci *pci; + volatile struct ccsr_guts *guts; + unsigned short temps; + bd_t *binfo = (bd_t *) __res; + + pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI1_OFFSET, + MPC85xx_PCI1_SIZE); + + guts = ioremap(binfo->bi_immr_base + MPC85xx_GUTS_OFFSET, + MPC85xx_GUTS_SIZE); + + early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps); + temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; + early_write_config_word(hose, 0, 0, PCI_COMMAND, temps); + +#define PORDEVSR_PCI (0x00800000) /* PCI Mode */ + if (guts->pordevsr & PORDEVSR_PCI) { + early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80); + } else { + /* PCI-X init */ + temps = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ + | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E; + early_write_config_word(hose, 0, 0, PCIX_COMMAND, temps); + } + + /* Disable all windows (except powar0 since its ignored) */ + pci->powar1 = 0; + pci->powar2 = 0; + pci->powar3 = 0; + pci->powar4 = 0; + pci->piwar1 = 0; + pci->piwar2 = 0; + pci->piwar3 = 0; + + /* Setup 512M Phys:PCI 1:1 outbound mem window @ 0x80000000 */ + pci->potar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff; + pci->potear1 = 0x00000000; + pci->powbar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff; + pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */ + + /* Setup 16M outboud IO windows @ 0xe2000000 */ + pci->potar2 = 0x00000000; + pci->potear2 = 0x00000000; + pci->powbar2 = (MPC85XX_PCI1_IO_BASE >> 12) & 0x000fffff; + pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */ + + /* Setup 2G inbound Memory Window @ 0 */ + pci->pitar1 = 0x00000000; + pci->piwbar1 = 0x00000000; + pci->piwar1 = 0xa0f5501e; /* Enable, Prefetch, Local + Mem, Snoop R/W, 2G */ +} + + +extern int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin); +extern int mpc85xx_exclude_device(u_char bus, u_char devfn); + +#if CONFIG_85xx_PCI2 +static void __init +mpc85xx_setup_pci2(struct pci_controller *hose) +{ + volatile struct ccsr_pci *pci; + unsigned short temps; + bd_t *binfo = (bd_t *) __res; + + pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI2_OFFSET, + MPC85xx_PCI2_SIZE); + + early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps); + temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; + early_write_config_word(hose, 0, 0, PCI_COMMAND, temps); + early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80); + + /* Disable all windows (except powar0 since its ignored) */ + pci->powar1 = 0; + pci->powar2 = 0; + pci->powar3 = 0; + pci->powar4 = 0; + pci->piwar1 = 0; + pci->piwar2 = 0; + pci->piwar3 = 0; + + /* Setup 512M Phys:PCI 1:1 outbound mem window @ 0xa0000000 */ + pci->potar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff; + pci->potear1 = 0x00000000; + pci->powbar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff; + pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */ + + /* Setup 16M outboud IO windows @ 0xe3000000 */ + pci->potar2 = 0x00000000; + pci->potear2 = 0x00000000; + pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff; + pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */ + + /* Setup 2G inbound Memory Window @ 0 */ + pci->pitar1 = 0x00000000; + pci->piwbar1 = 0x00000000; + pci->piwar1 = 0xa0f5501e; /* Enable, Prefetch, Local + Mem, Snoop R/W, 2G */ +} +#endif /* CONFIG_85xx_PCI2 */ + +void __init +mpc85xx_setup_hose(void) +{ + struct pci_controller *hose_a; +#ifdef CONFIG_85xx_PCI2 + struct pci_controller *hose_b; +#endif + bd_t *binfo = (bd_t *) __res; + + hose_a = pcibios_alloc_controller(); + + if (!hose_a) + return; + + ppc_md.pci_swizzle = common_swizzle; + ppc_md.pci_map_irq = mpc85xx_map_irq; + + hose_a->first_busno = 0; + hose_a->bus_offset = 0; + hose_a->last_busno = 0xff; + + setup_indirect_pci(hose_a, binfo->bi_immr_base + PCI1_CFG_ADDR_OFFSET, + binfo->bi_immr_base + PCI1_CFG_DATA_OFFSET); + hose_a->set_cfg_type = 1; + + mpc85xx_setup_pci1(hose_a); + + hose_a->pci_mem_offset = MPC85XX_PCI1_MEM_OFFSET; + hose_a->mem_space.start = MPC85XX_PCI1_LOWER_MEM; + hose_a->mem_space.end = MPC85XX_PCI1_UPPER_MEM; + + hose_a->io_space.start = MPC85XX_PCI1_LOWER_IO; + hose_a->io_space.end = MPC85XX_PCI1_UPPER_IO; + hose_a->io_base_phys = MPC85XX_PCI1_IO_BASE; +#if CONFIG_85xx_PCI2 + isa_io_base = + (unsigned long) ioremap(MPC85XX_PCI1_IO_BASE, + MPC85XX_PCI1_IO_SIZE + + MPC85XX_PCI2_IO_SIZE); +#else + isa_io_base = + (unsigned long) ioremap(MPC85XX_PCI1_IO_BASE, + MPC85XX_PCI1_IO_SIZE); +#endif + hose_a->io_base_virt = (void *) isa_io_base; + + /* setup resources */ + pci_init_resource(&hose_a->mem_resources[0], + MPC85XX_PCI1_LOWER_MEM, + MPC85XX_PCI1_UPPER_MEM, + IORESOURCE_MEM, "PCI1 host bridge"); + + pci_init_resource(&hose_a->io_resource, + MPC85XX_PCI1_LOWER_IO, + MPC85XX_PCI1_UPPER_IO, + IORESOURCE_IO, "PCI1 host bridge"); + + ppc_md.pci_exclude_device = mpc85xx_exclude_device; + + hose_a->last_busno = pciauto_bus_scan(hose_a, hose_a->first_busno); + +#if CONFIG_85xx_PCI2 + hose_b = pcibios_alloc_controller(); + + if (!hose_b) + return; + + hose_b->bus_offset = hose_a->last_busno + 1; + hose_b->first_busno = hose_a->last_busno + 1; + hose_b->last_busno = 0xff; + + setup_indirect_pci(hose_b, binfo->bi_immr_base + PCI2_CFG_ADDR_OFFSET, + binfo->bi_immr_base + PCI2_CFG_DATA_OFFSET); + hose_b->set_cfg_type = 1; + + mpc85xx_setup_pci2(hose_b); + + hose_b->pci_mem_offset = MPC85XX_PCI2_MEM_OFFSET; + hose_b->mem_space.start = MPC85XX_PCI2_LOWER_MEM; + hose_b->mem_space.end = MPC85XX_PCI2_UPPER_MEM; + + hose_b->io_space.start = MPC85XX_PCI2_LOWER_IO; + hose_b->io_space.end = MPC85XX_PCI2_UPPER_IO; + hose_b->io_base_phys = MPC85XX_PCI2_IO_BASE; + hose_b->io_base_virt = (void *) isa_io_base + MPC85XX_PCI1_IO_SIZE; + + /* setup resources */ + pci_init_resource(&hose_b->mem_resources[0], + MPC85XX_PCI2_LOWER_MEM, + MPC85XX_PCI2_UPPER_MEM, + IORESOURCE_MEM, "PCI2 host bridge"); + + pci_init_resource(&hose_b->io_resource, + MPC85XX_PCI2_LOWER_IO, + MPC85XX_PCI2_UPPER_IO, + IORESOURCE_IO, "PCI2 host bridge"); + + hose_b->last_busno = pciauto_bus_scan(hose_b, hose_b->first_busno); +#endif + return; +} +#endif /* CONFIG_PCI */ + + diff --git a/arch/ppc/syslib/ppc85xx_setup.h b/arch/ppc/syslib/ppc85xx_setup.h new file mode 100644 index 000000000..311b8a418 --- /dev/null +++ b/arch/ppc/syslib/ppc85xx_setup.h @@ -0,0 +1,67 @@ +/* + * arch/ppc/syslib/ppc85xx_setup.h + * + * MPC85XX common board definitions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __PPC_SYSLIB_PPC85XX_SETUP_H +#define __PPC_SYSLIB_PPC85XX_SETUP_H + +#include +#include +#include +#include + +extern unsigned long mpc85xx_find_end_of_memory(void) __init; +extern void mpc85xx_calibrate_decr(void) __init; +extern void mpc85xx_early_serial_map(void) __init; +extern void mpc85xx_restart(char *cmd); +extern void mpc85xx_power_off(void); +extern void mpc85xx_halt(void); +extern void mpc85xx_setup_hose(void) __init; + +/* PCI config */ +#define PCI1_CFG_ADDR_OFFSET (0x8000) +#define PCI1_CFG_DATA_OFFSET (0x8004) + +#define PCI2_CFG_ADDR_OFFSET (0x9000) +#define PCI2_CFG_DATA_OFFSET (0x9004) + +/* Additional register for PCI-X configuration */ +#define PCIX_NEXT_CAP 0x60 +#define PCIX_CAP_ID 0x61 +#define PCIX_COMMAND 0x62 +#define PCIX_STATUS 0x64 + +/* Serial Config */ +#define MPC85XX_0_SERIAL (CCSRBAR + 0x4500) +#define MPC85XX_1_SERIAL (CCSRBAR + 0x4600) + +#ifdef CONFIG_SERIAL_MANY_PORTS +#define RS_TABLE_SIZE 64 +#else +#define RS_TABLE_SIZE 2 +#endif + +#define BASE_BAUD 0 + +#define STD_UART_OP(num) \ + { 0, BASE_BAUD, num, MPC85xx_IRQ_DUART, \ + (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST), \ + iomem_base: (u8 *)MPC85XX_##num##_SERIAL, \ + io_type: SERIAL_IO_MEM}, + +/* Offset of CPM register space */ +#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET) + +#endif /* __PPC_SYSLIB_PPC85XX_SETUP_H */ diff --git a/arch/ppc64/kernel/hvcserver.c b/arch/ppc64/kernel/hvcserver.c new file mode 100644 index 000000000..fbe445ec0 --- /dev/null +++ b/arch/ppc64/kernel/hvcserver.c @@ -0,0 +1,219 @@ +/* + * hvcserver.c + * Copyright (C) 2004 Ryan S Arnold, IBM Corporation + * + * PPC64 virtual I/O console server support. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include + +#define HVCS_ARCH_VERSION "1.0.0" + +MODULE_AUTHOR("Ryan S. Arnold "); +MODULE_DESCRIPTION("IBM hvcs ppc64 API"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(HVCS_ARCH_VERSION); + +/* + * Convert arch specific return codes into relevant errnos. The hvcs + * functions aren't performance sensitive, so this conversion isn't an + * issue. + */ +int hvcs_convert(long to_convert) +{ + switch (to_convert) { + case H_Success: + return 0; + case H_Parameter: + return -EINVAL; + case H_Hardware: + return -EIO; + case H_Busy: + case H_LongBusyOrder1msec: + case H_LongBusyOrder10msec: + case H_LongBusyOrder100msec: + case H_LongBusyOrder1sec: + case H_LongBusyOrder10sec: + case H_LongBusyOrder100sec: + return -EBUSY; + case H_Function: /* fall through */ + default: + return -EPERM; + } +} + +int hvcs_free_partner_info(struct list_head *head) +{ + struct hvcs_partner_info *pi; + struct list_head *element; + + if (!head) { + return -EINVAL; + } + + while (!list_empty(head)) { + element = head->next; + pi = list_entry(element, struct hvcs_partner_info, node); + list_del(element); + kfree(pi); + } + + return 0; +} +EXPORT_SYMBOL(hvcs_free_partner_info); + +/* Helper function for hvcs_get_partner_info */ +int hvcs_next_partner(unsigned int unit_address, + unsigned long last_p_partition_ID, + unsigned long last_p_unit_address, unsigned long *pi_buff) + +{ + long retval; + retval = plpar_hcall_norets(H_VTERM_PARTNER_INFO, unit_address, + last_p_partition_ID, + last_p_unit_address, virt_to_phys(pi_buff)); + return hvcs_convert(retval); +} + +/* + * The unit_address parameter is the unit address of the vty-server vdevice + * in whose partner information the caller is interested. This function + * uses a pointer to a list_head instance in which to store the partner info. + * This function returns non-zero on success, or if there is no partner info. + * + * Invocation of this function should always be followed by an invocation of + * hvcs_free_partner_info() using a pointer to the SAME list head instance + * that was used to store the partner_info list. + */ +int hvcs_get_partner_info(unsigned int unit_address, struct list_head *head, + unsigned long *pi_buff) +{ + /* + * This is a page sized buffer to be passed to hvcall per invocation. + * NOTE: the first long returned is unit_address. The second long + * returned is the partition ID and starting with pi_buff[2] are + * HVCS_CLC_LENGTH characters, which are diff size than the unsigned + * long, hence the casting mumbojumbo you see later. + */ + unsigned long last_p_partition_ID; + unsigned long last_p_unit_address; + struct hvcs_partner_info *next_partner_info = NULL; + int more = 1; + int retval; + + memset(pi_buff, 0x00, PAGE_SIZE); + /* invalid parameters */ + if (!head) + return -EINVAL; + + last_p_partition_ID = last_p_unit_address = ~0UL; + INIT_LIST_HEAD(head); + + if (!pi_buff) + return -ENOMEM; + + do { + retval = hvcs_next_partner(unit_address, last_p_partition_ID, + last_p_unit_address, pi_buff); + if (retval) { + /* + * Don't indicate that we've failed if we have + * any list elements. + */ + if (!list_empty(head)) + return 0; + return retval; + } + + last_p_partition_ID = pi_buff[0]; + last_p_unit_address = pi_buff[1]; + + /* This indicates that there are no further partners */ + if (last_p_partition_ID == ~0UL + && last_p_unit_address == ~0UL) + break; + + /* This is a very small struct and will be freed soon in + * hvcs_free_partner_info(). */ + next_partner_info = kmalloc(sizeof(struct hvcs_partner_info), + GFP_ATOMIC); + + if (!next_partner_info) { + printk(KERN_WARNING "HVCONSOLE: kmalloc() failed to" + " allocate partner info struct.\n"); + hvcs_free_partner_info(head); + return -ENOMEM; + } + + next_partner_info->unit_address + = (unsigned int)last_p_unit_address; + next_partner_info->partition_ID + = (unsigned int)last_p_partition_ID; + + /* copy the Null-term char too */ + strncpy(&next_partner_info->location_code[0], + (char *)&pi_buff[2], + strlen((char *)&pi_buff[2]) + 1); + + list_add_tail(&(next_partner_info->node), head); + next_partner_info = NULL; + + } while (more); + + return 0; +} +EXPORT_SYMBOL(hvcs_get_partner_info); + +/* + * If this function is called once and -EINVAL is returned it may + * indicate that the partner info needs to be refreshed for the + * target unit address at which point the caller must invoke + * hvcs_get_partner_info() and then call this function again. If, + * for a second time, -EINVAL is returned then it indicates that + * there is probably already a partner connection registered to a + * different vty-server@ vdevice. It is also possible that a second + * -EINVAL may indicate that one of the parms is not valid, for + * instance if the link was removed between the vty-server@ vdevice + * and the vty@ vdevice that you are trying to open. Don't shoot the + * messenger. Firmware implemented it this way. + */ +int hvcs_register_connection( unsigned int unit_address, + unsigned int p_partition_ID, unsigned int p_unit_address) +{ + long retval; + retval = plpar_hcall_norets(H_REGISTER_VTERM, unit_address, + p_partition_ID, p_unit_address); + return hvcs_convert(retval); +} +EXPORT_SYMBOL(hvcs_register_connection); + +/* + * If -EBUSY is returned continue to call this function + * until 0 is returned. + */ +int hvcs_free_connection(unsigned int unit_address) +{ + long retval; + retval = plpar_hcall_norets(H_FREE_VTERM, unit_address); + return hvcs_convert(retval); +} +EXPORT_SYMBOL(hvcs_free_connection); diff --git a/arch/ppc64/kernel/vecemu.c b/arch/ppc64/kernel/vecemu.c new file mode 100644 index 000000000..1430ef592 --- /dev/null +++ b/arch/ppc64/kernel/vecemu.c @@ -0,0 +1,346 @@ +/* + * Routines to emulate some Altivec/VMX instructions, specifically + * those that can trap when given denormalized operands in Java mode. + */ +#include +#include +#include +#include +#include +#include + +/* Functions in vector.S */ +extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b); +extern void vsubfp(vector128 *dst, vector128 *a, vector128 *b); +extern void vmaddfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); +extern void vnmsubfp(vector128 *dst, vector128 *a, vector128 *b, vector128 *c); +extern void vrefp(vector128 *dst, vector128 *src); +extern void vrsqrtefp(vector128 *dst, vector128 *src); +extern void vexptep(vector128 *dst, vector128 *src); + +static unsigned int exp2s[8] = { + 0x800000, + 0x8b95c2, + 0x9837f0, + 0xa5fed7, + 0xb504f3, + 0xc5672a, + 0xd744fd, + 0xeac0c7 +}; + +/* + * Computes an estimate of 2^x. The `s' argument is the 32-bit + * single-precision floating-point representation of x. + */ +static unsigned int eexp2(unsigned int s) +{ + int exp, pwr; + unsigned int mant, frac; + + /* extract exponent field from input */ + exp = ((s >> 23) & 0xff) - 127; + if (exp > 7) { + /* check for NaN input */ + if (exp == 128 && (s & 0x7fffff) != 0) + return s | 0x400000; /* return QNaN */ + /* 2^-big = 0, 2^+big = +Inf */ + return (s & 0x80000000)? 0: 0x7f800000; /* 0 or +Inf */ + } + if (exp < -23) + return 0x3f800000; /* 1.0 */ + + /* convert to fixed point integer in 9.23 representation */ + pwr = (s & 0x7fffff) | 0x800000; + if (exp > 0) + pwr <<= exp; + else + pwr >>= -exp; + if (s & 0x80000000) + pwr = -pwr; + + /* extract integer part, which becomes exponent part of result */ + exp = (pwr >> 23) + 126; + if (exp >= 254) + return 0x7f800000; + if (exp < -23) + return 0; + + /* table lookup on top 3 bits of fraction to get mantissa */ + mant = exp2s[(pwr >> 20) & 7]; + + /* linear interpolation using remaining 20 bits of fraction */ + asm("mulhwu %0,%1,%2" : "=r" (frac) + : "r" (pwr << 12), "r" (0x172b83ff)); + asm("mulhwu %0,%1,%2" : "=r" (frac) : "r" (frac), "r" (mant)); + mant += frac; + + if (exp >= 0) + return mant + (exp << 23); + + /* denormalized result */ + exp = -exp; + mant += 1 << (exp - 1); + return mant >> exp; +} + +/* + * Computes an estimate of log_2(x). The `s' argument is the 32-bit + * single-precision floating-point representation of x. + */ +static unsigned int elog2(unsigned int s) +{ + int exp, mant, lz, frac; + + exp = s & 0x7f800000; + mant = s & 0x7fffff; + if (exp == 0x7f800000) { /* Inf or NaN */ + if (mant != 0) + s |= 0x400000; /* turn NaN into QNaN */ + return s; + } + if ((exp | mant) == 0) /* +0 or -0 */ + return 0xff800000; /* return -Inf */ + + if (exp == 0) { + /* denormalized */ + asm("cntlzw %0,%1" : "=r" (lz) : "r" (mant)); + mant <<= lz - 8; + exp = (-118 - lz) << 23; + } else { + mant |= 0x800000; + exp -= 127 << 23; + } + + if (mant >= 0xb504f3) { /* 2^0.5 * 2^23 */ + exp |= 0x400000; /* 0.5 * 2^23 */ + asm("mulhwu %0,%1,%2" : "=r" (mant) + : "r" (mant), "r" (0xb504f334)); /* 2^-0.5 * 2^32 */ + } + if (mant >= 0x9837f0) { /* 2^0.25 * 2^23 */ + exp |= 0x200000; /* 0.25 * 2^23 */ + asm("mulhwu %0,%1,%2" : "=r" (mant) + : "r" (mant), "r" (0xd744fccb)); /* 2^-0.25 * 2^32 */ + } + if (mant >= 0x8b95c2) { /* 2^0.125 * 2^23 */ + exp |= 0x100000; /* 0.125 * 2^23 */ + asm("mulhwu %0,%1,%2" : "=r" (mant) + : "r" (mant), "r" (0xeac0c6e8)); /* 2^-0.125 * 2^32 */ + } + if (mant > 0x800000) { /* 1.0 * 2^23 */ + /* calculate (mant - 1) * 1.381097463 */ + /* 1.381097463 == 0.125 / (2^0.125 - 1) */ + asm("mulhwu %0,%1,%2" : "=r" (frac) + : "r" ((mant - 0x800000) << 1), "r" (0xb0c7cd3a)); + exp += frac; + } + s = exp & 0x80000000; + if (exp != 0) { + if (s) + exp = -exp; + asm("cntlzw %0,%1" : "=r" (lz) : "r" (exp)); + lz = 8 - lz; + if (lz > 0) + exp >>= lz; + else if (lz < 0) + exp <<= -lz; + s += ((lz + 126) << 23) + exp; + } + return s; +} + +#define VSCR_SAT 1 + +static int ctsxs(unsigned int x, int scale, unsigned int *vscrp) +{ + int exp, mant; + + exp = (x >> 23) & 0xff; + mant = x & 0x7fffff; + if (exp == 255 && mant != 0) + return 0; /* NaN -> 0 */ + exp = exp - 127 + scale; + if (exp < 0) + return 0; /* round towards zero */ + if (exp >= 31) { + /* saturate, unless the result would be -2^31 */ + if (x + (scale << 23) != 0xcf000000) + *vscrp |= VSCR_SAT; + return (x & 0x80000000)? 0x80000000: 0x7fffffff; + } + mant |= 0x800000; + mant = (mant << 7) >> (30 - exp); + return (x & 0x80000000)? -mant: mant; +} + +static unsigned int ctuxs(unsigned int x, int scale, unsigned int *vscrp) +{ + int exp; + unsigned int mant; + + exp = (x >> 23) & 0xff; + mant = x & 0x7fffff; + if (exp == 255 && mant != 0) + return 0; /* NaN -> 0 */ + exp = exp - 127 + scale; + if (exp < 0) + return 0; /* round towards zero */ + if (x & 0x80000000) { + /* negative => saturate to 0 */ + *vscrp |= VSCR_SAT; + return 0; + } + if (exp >= 32) { + /* saturate */ + *vscrp |= VSCR_SAT; + return 0xffffffff; + } + mant |= 0x800000; + mant = (mant << 8) >> (31 - exp); + return mant; +} + +/* Round to floating integer, towards 0 */ +static unsigned int rfiz(unsigned int x) +{ + int exp; + + exp = ((x >> 23) & 0xff) - 127; + if (exp == 128 && (x & 0x7fffff) != 0) + return x | 0x400000; /* NaN -> make it a QNaN */ + if (exp >= 23) + return x; /* it's an integer already (or Inf) */ + if (exp < 0) + return x & 0x80000000; /* |x| < 1.0 rounds to 0 */ + return x & ~(0x7fffff >> exp); +} + +/* Round to floating integer, towards +/- Inf */ +static unsigned int rfii(unsigned int x) +{ + int exp, mask; + + exp = ((x >> 23) & 0xff) - 127; + if (exp == 128 && (x & 0x7fffff) != 0) + return x | 0x400000; /* NaN -> make it a QNaN */ + if (exp >= 23) + return x; /* it's an integer already (or Inf) */ + if ((x & 0x7fffffff) == 0) + return x; /* +/-0 -> +/-0 */ + if (exp < 0) + /* 0 < |x| < 1.0 rounds to +/- 1.0 */ + return (x & 0x80000000) | 0x3f800000; + mask = 0x7fffff >> exp; + /* mantissa overflows into exponent - that's OK, + it can't overflow into the sign bit */ + return (x + mask) & ~mask; +} + +/* Round to floating integer, to nearest */ +static unsigned int rfin(unsigned int x) +{ + int exp, half; + + exp = ((x >> 23) & 0xff) - 127; + if (exp == 128 && (x & 0x7fffff) != 0) + return x | 0x400000; /* NaN -> make it a QNaN */ + if (exp >= 23) + return x; /* it's an integer already (or Inf) */ + if (exp < -1) + return x & 0x80000000; /* |x| < 0.5 -> +/-0 */ + if (exp == -1) + /* 0.5 <= |x| < 1.0 rounds to +/- 1.0 */ + return (x & 0x80000000) | 0x3f800000; + half = 0x400000 >> exp; + /* add 0.5 to the magnitude and chop off the fraction bits */ + return (x + half) & ~(0x7fffff >> exp); +} + +int +emulate_altivec(struct pt_regs *regs) +{ + unsigned int instr, i; + unsigned int va, vb, vc, vd; + vector128 *vrs; + + if (get_user(instr, (unsigned int *) regs->nip)) + return -EFAULT; + if ((instr >> 26) != 4) + return -EINVAL; /* not an altivec instruction */ + vd = (instr >> 21) & 0x1f; + va = (instr >> 16) & 0x1f; + vb = (instr >> 11) & 0x1f; + vc = (instr >> 6) & 0x1f; + + vrs = current->thread.vr; + switch (instr & 0x3f) { + case 10: + switch (vc) { + case 0: /* vaddfp */ + vaddfp(&vrs[vd], &vrs[va], &vrs[vb]); + break; + case 1: /* vsubfp */ + vsubfp(&vrs[vd], &vrs[va], &vrs[vb]); + break; + case 4: /* vrefp */ + vrefp(&vrs[vd], &vrs[vb]); + break; + case 5: /* vrsqrtefp */ + vrsqrtefp(&vrs[vd], &vrs[vb]); + break; + case 6: /* vexptefp */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = eexp2(vrs[vb].u[i]); + break; + case 7: /* vlogefp */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = elog2(vrs[vb].u[i]); + break; + case 8: /* vrfin */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = rfin(vrs[vb].u[i]); + break; + case 9: /* vrfiz */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = rfiz(vrs[vb].u[i]); + break; + case 10: /* vrfip */ + for (i = 0; i < 4; ++i) { + u32 x = vrs[vb].u[i]; + x = (x & 0x80000000)? rfiz(x): rfii(x); + vrs[vd].u[i] = x; + } + break; + case 11: /* vrfim */ + for (i = 0; i < 4; ++i) { + u32 x = vrs[vb].u[i]; + x = (x & 0x80000000)? rfii(x): rfiz(x); + vrs[vd].u[i] = x; + } + break; + case 14: /* vctuxs */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va, + ¤t->thread.vscr.u[3]); + break; + case 15: /* vctsxs */ + for (i = 0; i < 4; ++i) + vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va, + ¤t->thread.vscr.u[3]); + break; + default: + return -EINVAL; + } + break; + case 46: /* vmaddfp */ + vmaddfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]); + break; + case 47: /* vnmsubfp */ + vnmsubfp(&vrs[vd], &vrs[va], &vrs[vb], &vrs[vc]); + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/arch/ppc64/kernel/vector.S b/arch/ppc64/kernel/vector.S new file mode 100644 index 000000000..940cd7287 --- /dev/null +++ b/arch/ppc64/kernel/vector.S @@ -0,0 +1,172 @@ +#include +#include + +/* + * The routines below are in assembler so we can closely control the + * usage of floating-point registers. These routines must be called + * with preempt disabled. + */ + .section ".toc","aw" +fpzero: + .tc FD_0_0[TC],0 +fpone: + .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */ +fphalf: + .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */ + + .text +/* + * Internal routine to enable floating point and set FPSCR to 0. + * Don't call it from C; it doesn't use the normal calling convention. + */ +fpenable: + mfmsr r10 + ori r11,r10,MSR_FP + mtmsr r11 + isync + stfd fr31,-8(r1) + stfd fr0,-16(r1) + stfd fr1,-24(r1) + mffs fr31 + lfd fr1,fpzero@toc(r2) + mtfsf 0xff,fr1 + blr + +fpdisable: + mtlr r12 + mtfsf 0xff,fr31 + lfd fr1,-24(r1) + lfd fr0,-16(r1) + lfd fr31,-8(r1) + mtmsr r10 + isync + blr + +/* + * Vector add, floating point. + */ +_GLOBAL(vaddfp) + mflr r12 + bl fpenable + li r0,4 + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + lfsx fr1,r5,r6 + fadds fr0,fr0,fr1 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + b fpdisable + +/* + * Vector subtract, floating point. + */ +_GLOBAL(vsubfp) + mflr r12 + bl fpenable + li r0,4 + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + lfsx fr1,r5,r6 + fsubs fr0,fr0,fr1 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + b fpdisable + +/* + * Vector multiply and add, floating point. + */ +_GLOBAL(vmaddfp) + mflr r12 + bl fpenable + stfd fr2,-32(r1) + li r0,4 + mtctr r0 + li r7,0 +1: lfsx fr0,r4,r7 + lfsx fr1,r5,r7 + lfsx fr2,r6,r7 + fmadds fr0,fr0,fr1,fr2 + stfsx fr0,r3,r7 + addi r7,r7,4 + bdnz 1b + lfd fr2,-32(r1) + b fpdisable + +/* + * Vector negative multiply and subtract, floating point. + */ +_GLOBAL(vnmsubfp) + mflr r12 + bl fpenable + stfd fr2,-32(r1) + li r0,4 + mtctr r0 + li r7,0 +1: lfsx fr0,r4,r7 + lfsx fr1,r5,r7 + lfsx fr2,r6,r7 + fnmsubs fr0,fr0,fr1,fr2 + stfsx fr0,r3,r7 + addi r7,r7,4 + bdnz 1b + lfd fr2,-32(r1) + b fpdisable + +/* + * Vector reciprocal estimate. We just compute 1.0/x. + * r3 -> destination, r4 -> source. + */ +_GLOBAL(vrefp) + mflr r12 + bl fpenable + li r0,4 + lfd fr1,fpone@toc(r2) + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + fdivs fr0,fr1,fr0 + stfsx fr0,r3,r6 + addi r6,r6,4 + bdnz 1b + b fpdisable + +/* + * Vector reciprocal square-root estimate, floating point. + * We use the frsqrte instruction for the initial estimate followed + * by 2 iterations of Newton-Raphson to get sufficient accuracy. + * r3 -> destination, r4 -> source. + */ +_GLOBAL(vrsqrtefp) + mflr r12 + bl fpenable + stfd fr2,-32(r1) + stfd fr3,-40(r1) + stfd fr4,-48(r1) + stfd fr5,-56(r1) + li r0,4 + lfd fr4,fpone@toc(r2) + lfd fr5,fphalf@toc(r2) + mtctr r0 + li r6,0 +1: lfsx fr0,r4,r6 + frsqrte fr1,fr0 /* r = frsqrte(s) */ + fmuls fr3,fr1,fr0 /* r * s */ + fmuls fr2,fr1,fr5 /* r * 0.5 */ + fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ + fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ + fmuls fr3,fr1,fr0 /* r * s */ + fmuls fr2,fr1,fr5 /* r * 0.5 */ + fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */ + fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */ + stfsx fr1,r3,r6 + addi r6,r6,4 + bdnz 1b + lfd fr5,-56(r1) + lfd fr4,-48(r1) + lfd fr3,-40(r1) + lfd fr2,-32(r1) + b fpdisable diff --git a/arch/ppc64/lib/e2a.c b/arch/ppc64/lib/e2a.c new file mode 100644 index 000000000..d2b834887 --- /dev/null +++ b/arch/ppc64/lib/e2a.c @@ -0,0 +1,108 @@ +/* + * arch/ppc64/lib/e2a.c + * + * EBCDIC to ASCII conversion + * + * This function moved here from arch/ppc64/kernel/viopath.c + * + * (C) Copyright 2000-2004 IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) anyu later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include + +unsigned char e2a(unsigned char x) +{ + switch (x) { + case 0xF0: + return '0'; + case 0xF1: + return '1'; + case 0xF2: + return '2'; + case 0xF3: + return '3'; + case 0xF4: + return '4'; + case 0xF5: + return '5'; + case 0xF6: + return '6'; + case 0xF7: + return '7'; + case 0xF8: + return '8'; + case 0xF9: + return '9'; + case 0xC1: + return 'A'; + case 0xC2: + return 'B'; + case 0xC3: + return 'C'; + case 0xC4: + return 'D'; + case 0xC5: + return 'E'; + case 0xC6: + return 'F'; + case 0xC7: + return 'G'; + case 0xC8: + return 'H'; + case 0xC9: + return 'I'; + case 0xD1: + return 'J'; + case 0xD2: + return 'K'; + case 0xD3: + return 'L'; + case 0xD4: + return 'M'; + case 0xD5: + return 'N'; + case 0xD6: + return 'O'; + case 0xD7: + return 'P'; + case 0xD8: + return 'Q'; + case 0xD9: + return 'R'; + case 0xE2: + return 'S'; + case 0xE3: + return 'T'; + case 0xE4: + return 'U'; + case 0xE5: + return 'V'; + case 0xE6: + return 'W'; + case 0xE7: + return 'X'; + case 0xE8: + return 'Y'; + case 0xE9: + return 'Z'; + } + return ' '; +} +EXPORT_SYMBOL(e2a); + + diff --git a/arch/ppc64/lib/locks.c b/arch/ppc64/lib/locks.c new file mode 100644 index 000000000..4610cb938 --- /dev/null +++ b/arch/ppc64/lib/locks.c @@ -0,0 +1,285 @@ +/* + * Spin and read/write lock operations. + * + * Copyright (C) 2001-2004 Paul Mackerras , IBM + * Copyright (C) 2001 Anton Blanchard , IBM + * Copyright (C) 2002 Dave Engebretsen , IBM + * Rework to support virtual processors + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#ifndef CONFIG_SPINLINE + +/* + * On a system with shared processors (that is, where a physical + * processor is multiplexed between several virtual processors), + * there is no point spinning on a lock if the holder of the lock + * isn't currently scheduled on a physical processor. Instead + * we detect this situation and ask the hypervisor to give the + * rest of our timeslice to the lock holder. + * + * So that we can tell which virtual processor is holding a lock, + * we put 0x80000000 | smp_processor_id() in the lock when it is + * held. Conveniently, we have a word in the paca that holds this + * value. + */ + +/* waiting for a spinlock... */ +#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) +void __spin_yield(spinlock_t *lock) +{ + unsigned int lock_value, holder_cpu, yield_count; + struct paca_struct *holder_paca; + + lock_value = lock->lock; + if (lock_value == 0) + return; + holder_cpu = lock_value & 0xffff; + BUG_ON(holder_cpu >= NR_CPUS); + holder_paca = &paca[holder_cpu]; + yield_count = holder_paca->xLpPaca.xYieldCount; + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + if (lock->lock != lock_value) + return; /* something has changed */ +#ifdef CONFIG_PPC_ISERIES + HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, + ((u64)holder_cpu << 32) | yield_count); +#else + plpar_hcall_norets(H_CONFER, holder_cpu, yield_count); +#endif +} + +#else /* SPLPAR || ISERIES */ +#define __spin_yield(x) barrier() +#endif + +/* + * This returns the old value in the lock, so we succeeded + * in getting the lock if the return value is 0. + */ +static __inline__ unsigned long __spin_trylock(spinlock_t *lock) +{ + unsigned long tmp, tmp2; + + __asm__ __volatile__( +" lwz %1,24(13) # __spin_trylock\n\ +1: lwarx %0,0,%2\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n\ + stwcx. %1,0,%2\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp), "=&r" (tmp2) + : "r" (&lock->lock) + : "cr0", "memory"); + + return tmp; +} + +int _raw_spin_trylock(spinlock_t *lock) +{ + return __spin_trylock(lock) == 0; +} + +EXPORT_SYMBOL(_raw_spin_trylock); + +void _raw_spin_lock(spinlock_t *lock) +{ + while (1) { + if (likely(__spin_trylock(lock) == 0)) + break; + do { + HMT_low(); + __spin_yield(lock); + } while (likely(lock->lock != 0)); + HMT_medium(); + } +} + +EXPORT_SYMBOL(_raw_spin_lock); + +void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags) +{ + unsigned long flags_dis; + + while (1) { + if (likely(__spin_trylock(lock) == 0)) + break; + local_save_flags(flags_dis); + local_irq_restore(flags); + do { + HMT_low(); + __spin_yield(lock); + } while (likely(lock->lock != 0)); + HMT_medium(); + local_irq_restore(flags_dis); + } +} + +EXPORT_SYMBOL(_raw_spin_lock_flags); + +void spin_unlock_wait(spinlock_t *lock) +{ + while (lock->lock) + __spin_yield(lock); +} + +EXPORT_SYMBOL(spin_unlock_wait); + +/* + * Waiting for a read lock or a write lock on a rwlock... + * This turns out to be the same for read and write locks, since + * we only know the holder if it is write-locked. + */ +#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) +void __rw_yield(rwlock_t *rw) +{ + int lock_value; + unsigned int holder_cpu, yield_count; + struct paca_struct *holder_paca; + + lock_value = rw->lock; + if (lock_value >= 0) + return; /* no write lock at present */ + holder_cpu = lock_value & 0xffff; + BUG_ON(holder_cpu >= NR_CPUS); + holder_paca = &paca[holder_cpu]; + yield_count = holder_paca->xLpPaca.xYieldCount; + if ((yield_count & 1) == 0) + return; /* virtual cpu is currently running */ + rmb(); + if (rw->lock != lock_value) + return; /* something has changed */ +#ifdef CONFIG_PPC_ISERIES + HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, + ((u64)holder_cpu << 32) | yield_count); +#else + plpar_hcall_norets(H_CONFER, holder_cpu, yield_count); +#endif +} + +#else /* SPLPAR || ISERIES */ +#define __rw_yield(x) barrier() +#endif + +/* + * This returns the old value in the lock + 1, + * so we got a read lock if the return value is > 0. + */ +static __inline__ long __read_trylock(rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( +"1: lwarx %0,0,%1 # read_trylock\n\ + extsw %0,%0\n\ + addic. %0,%0,1\n\ + ble- 2f\n\ + stwcx. %0,0,%1\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp) + : "r" (&rw->lock) + : "cr0", "xer", "memory"); + + return tmp; +} + +int _raw_read_trylock(rwlock_t *rw) +{ + return __read_trylock(rw) > 0; +} + +EXPORT_SYMBOL(_raw_read_trylock); + +void _raw_read_lock(rwlock_t *rw) +{ + while (1) { + if (likely(__read_trylock(rw) > 0)) + break; + do { + HMT_low(); + __rw_yield(rw); + } while (likely(rw->lock < 0)); + HMT_medium(); + } +} + +EXPORT_SYMBOL(_raw_read_lock); + +void _raw_read_unlock(rwlock_t *rw) +{ + long tmp; + + __asm__ __volatile__( + "eieio # read_unlock\n\ +1: lwarx %0,0,%1\n\ + addic %0,%0,-1\n\ + stwcx. %0,0,%1\n\ + bne- 1b" + : "=&r"(tmp) + : "r"(&rw->lock) + : "cr0", "memory"); +} + +EXPORT_SYMBOL(_raw_read_unlock); + +/* + * This returns the old value in the lock, + * so we got the write lock if the return value is 0. + */ +static __inline__ long __write_trylock(rwlock_t *rw) +{ + long tmp, tmp2; + + __asm__ __volatile__( +" lwz %1,24(13) # write_trylock\n\ +1: lwarx %0,0,%2\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n\ + stwcx. %1,0,%2\n\ + bne- 1b\n\ + isync\n\ +2:" : "=&r" (tmp), "=&r" (tmp2) + : "r" (&rw->lock) + : "cr0", "memory"); + + return tmp; +} + +int _raw_write_trylock(rwlock_t *rw) +{ + return __write_trylock(rw) == 0; +} + +EXPORT_SYMBOL(_raw_write_trylock); + +void _raw_write_lock(rwlock_t *rw) +{ + while (1) { + if (likely(__write_trylock(rw) == 0)) + break; + do { + HMT_low(); + __rw_yield(rw); + } while (likely(rw->lock != 0)); + HMT_medium(); + } +} + +EXPORT_SYMBOL(_raw_write_lock); + +#endif /* CONFIG_SPINLINE */ diff --git a/arch/ppc64/lib/usercopy.c b/arch/ppc64/lib/usercopy.c new file mode 100644 index 000000000..5eea6f3c1 --- /dev/null +++ b/arch/ppc64/lib/usercopy.c @@ -0,0 +1,41 @@ +/* + * Functions which are too large to be inlined. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include + +unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (likely(access_ok(VERIFY_READ, from, n))) + n = __copy_from_user(to, from, n); + else + memset(to, 0, n); + return n; +} + +unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (likely(access_ok(VERIFY_WRITE, to, n))) + n = __copy_to_user(to, from, n); + return n; +} + +unsigned long copy_in_user(void __user *to, const void __user *from, + unsigned long n) +{ + might_sleep(); + if (likely(access_ok(VERIFY_READ, from, n) && + access_ok(VERIFY_WRITE, to, n))) + n =__copy_tofrom_user(to, from, n); + return n; +} + +EXPORT_SYMBOL(copy_from_user); +EXPORT_SYMBOL(copy_to_user); +EXPORT_SYMBOL(copy_in_user); + diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c new file mode 100644 index 000000000..bc6125804 --- /dev/null +++ b/arch/ppc64/mm/slb.c @@ -0,0 +1,136 @@ +/* + * PowerPC64 SLB support. + * + * Copyright (C) 2004 David Gibson , IBM + * Based on earlier code writteh by: + * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com + * Copyright (c) 2001 Dave Engebretsen + * Copyright (C) 2002 Anton Blanchard , IBM + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +extern void slb_allocate(unsigned long ea); + +static inline void create_slbe(unsigned long ea, unsigned long vsid, + unsigned long flags, unsigned long entry) +{ + ea = (ea & ESID_MASK) | SLB_ESID_V | entry; + vsid = (vsid << SLB_VSID_SHIFT) | flags; + asm volatile("slbmte %0,%1" : + : "r" (vsid), "r" (ea) + : "memory" ); +} + +static void slb_add_bolted(void) +{ +#ifndef CONFIG_PPC_ISERIES + WARN_ON(!irqs_disabled()); + + /* If you change this make sure you change SLB_NUM_BOLTED + * appropriately too */ + + /* Slot 1 - first VMALLOC segment + * Since modules end up there it gets hit very heavily. + */ + create_slbe(VMALLOCBASE, get_kernel_vsid(VMALLOCBASE), + SLB_VSID_KERNEL, 1); + + asm volatile("isync":::"memory"); +#endif +} + +/* Flush all user entries from the segment table of the current processor. */ +void switch_slb(struct task_struct *tsk, struct mm_struct *mm) +{ + unsigned long offset = get_paca()->slb_cache_ptr; + unsigned long esid_data; + unsigned long pc = KSTK_EIP(tsk); + unsigned long stack = KSTK_ESP(tsk); + unsigned long unmapped_base; + + if (offset <= SLB_CACHE_ENTRIES) { + int i; + asm volatile("isync" : : : "memory"); + for (i = 0; i < offset; i++) { + esid_data = (unsigned long)get_paca()->slb_cache[i] + << SID_SHIFT; + asm volatile("slbie %0" : : "r" (esid_data)); + } + asm volatile("isync" : : : "memory"); + } else { + asm volatile("isync; slbia; isync" : : : "memory"); + slb_add_bolted(); + } + + /* Workaround POWER5 < DD2.1 issue */ + if (offset == 1 || offset > SLB_CACHE_ENTRIES) { + /* flush segment in EEH region, we shouldn't ever + * access addresses in this region. */ + asm volatile("slbie %0" : : "r"(EEHREGIONBASE)); + } + + get_paca()->slb_cache_ptr = 0; + get_paca()->context = mm->context; + + /* + * preload some userspace segments into the SLB. + */ + if (test_tsk_thread_flag(tsk, TIF_32BIT)) + unmapped_base = TASK_UNMAPPED_BASE_USER32; + else + unmapped_base = TASK_UNMAPPED_BASE_USER64; + + if (pc >= KERNELBASE) + return; + slb_allocate(pc); + + if (GET_ESID(pc) == GET_ESID(stack)) + return; + + if (stack >= KERNELBASE) + return; + slb_allocate(stack); + + if ((GET_ESID(pc) == GET_ESID(unmapped_base)) + || (GET_ESID(stack) == GET_ESID(unmapped_base))) + return; + + if (unmapped_base >= KERNELBASE) + return; + slb_allocate(unmapped_base); +} + +void slb_initialize(void) +{ +#ifdef CONFIG_PPC_ISERIES + asm volatile("isync; slbia; isync":::"memory"); +#else + unsigned long flags = SLB_VSID_KERNEL; + + /* Invalidate the entire SLB (even slot 0) & all the ERATS */ + if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) + flags |= SLB_VSID_L; + + asm volatile("isync":::"memory"); + asm volatile("slbmte %0,%0"::"r" (0) : "memory"); + asm volatile("isync; slbia; isync":::"memory"); + create_slbe(KERNELBASE, get_kernel_vsid(KERNELBASE), + flags, 0); + +#endif + slb_add_bolted(); + get_paca()->stab_rr = SLB_NUM_BOLTED; +} diff --git a/arch/ppc64/mm/slb_low.S b/arch/ppc64/mm/slb_low.S new file mode 100644 index 000000000..4b3dfe091 --- /dev/null +++ b/arch/ppc64/mm/slb_low.S @@ -0,0 +1,179 @@ +/* + * arch/ppc64/mm/slb_low.S + * + * Low-level SLB routines + * + * Copyright (C) 2004 David Gibson , IBM + * + * Based on earlier C version: + * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com + * Copyright (c) 2001 Dave Engebretsen + * Copyright (C) 2002 Anton Blanchard , IBM + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* void slb_allocate(unsigned long ea); + * + * Create an SLB entry for the given EA (user or kernel). + * r3 = faulting address, r13 = PACA + * r9, r10, r11 are clobbered by this function + * No other registers are examined or changed. + */ +_GLOBAL(slb_allocate) + /* + * First find a slot, round robin. Previously we tried to find + * a free slot first but that took too long. Unfortunately we + * dont have any LRU information to help us choose a slot. + */ + ld r10,PACASTABRR(r13) +3: + addi r10,r10,1 + /* use a cpu feature mask if we ever change our slb size */ + cmpldi r10,SLB_NUM_ENTRIES + + blt+ 4f + li r10,SLB_NUM_BOLTED + + /* + * Never cast out the segment for our kernel stack. Since we + * dont invalidate the ERAT we could have a valid translation + * for the kernel stack during the first part of exception exit + * which gets invalidated due to a tlbie from another cpu at a + * non recoverable point (after setting srr0/1) - Anton + */ +4: slbmfee r11,r10 + srdi r11,r11,27 + /* + * Use paca->ksave as the value of the kernel stack pointer, + * because this is valid at all times. + * The >> 27 (rather than >> 28) is so that the LSB is the + * valid bit - this way we check valid and ESID in one compare. + * In order to completely close the tiny race in the context + * switch (between updating r1 and updating paca->ksave), + * we check against both r1 and paca->ksave. + */ + srdi r9,r1,27 + ori r9,r9,1 /* mangle SP for later compare */ + cmpd r11,r9 + beq- 3b + ld r9,PACAKSAVE(r13) + srdi r9,r9,27 + ori r9,r9,1 + cmpd r11,r9 + beq- 3b + + std r10,PACASTABRR(r13) + + /* r3 = faulting address, r10 = entry */ + + srdi r9,r3,60 /* get region */ + srdi r3,r3,28 /* get esid */ + cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ + + /* r9 = region, r3 = esid, cr7 = <>KERNELBASE */ + + rldicr. r11,r3,32,16 + bne- 8f /* invalid ea bits set */ + addi r11,r9,-1 + cmpldi r11,0xb + blt- 8f /* invalid region */ + + /* r9 = region, r3 = esid, r10 = entry, cr7 = <>KERNELBASE */ + + blt cr7,0f /* user or kernel? */ + + /* kernel address */ + li r11,SLB_VSID_KERNEL +BEGIN_FTR_SECTION + bne cr7,9f + li r11,(SLB_VSID_KERNEL|SLB_VSID_L) +END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) + b 9f + +0: /* user address */ + li r11,SLB_VSID_USER +#ifdef CONFIG_HUGETLB_PAGE +BEGIN_FTR_SECTION + /* check against the hugepage ranges */ + cmpldi r3,(TASK_HPAGE_END>>SID_SHIFT) + bge 6f /* >= TASK_HPAGE_END */ + cmpldi r3,(TASK_HPAGE_BASE>>SID_SHIFT) + bge 5f /* TASK_HPAGE_BASE..TASK_HPAGE_END */ + cmpldi r3,16 + bge 6f /* 4GB..TASK_HPAGE_BASE */ + + lhz r9,PACAHTLBSEGS(r13) + srd r9,r9,r3 + andi. r9,r9,1 + beq 6f + +5: /* this is a hugepage user address */ + li r11,(SLB_VSID_USER|SLB_VSID_L) +END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE) +#endif /* CONFIG_HUGETLB_PAGE */ + +6: ld r9,PACACONTEXTID(r13) + +9: /* r9 = "context", r3 = esid, r11 = flags, r10 = entry */ + + rldimi r9,r3,15,0 /* r9= VSID ordinal */ + +7: rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */ + oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */ + + /* r9 = ordinal, r3 = esid, r11 = flags, r10 = esid_data */ + + li r3,VSID_RANDOMIZER@higher + sldi r3,r3,32 + oris r3,r3,VSID_RANDOMIZER@h + ori r3,r3,VSID_RANDOMIZER@l + + mulld r9,r3,r9 /* r9 = ordinal * VSID_RANDOMIZER */ + clrldi r9,r9,28 /* r9 &= VSID_MASK */ + sldi r9,r9,SLB_VSID_SHIFT /* r9 <<= SLB_VSID_SHIFT */ + or r9,r9,r11 /* r9 |= flags */ + + /* r9 = vsid_data, r10 = esid_data, cr7 = <>KERNELBASE */ + + /* + * No need for an isync before or after this slbmte. The exception + * we enter with and the rfid we exit with are context synchronizing. + */ + slbmte r9,r10 + + bgelr cr7 /* we're done for kernel addresses */ + + /* Update the slb cache */ + lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */ + cmpldi r3,SLB_CACHE_ENTRIES + bge 1f + + /* still room in the slb cache */ + sldi r11,r3,1 /* r11 = offset * sizeof(u16) */ + rldicl r10,r10,36,28 /* get low 16 bits of the ESID */ + add r11,r11,r13 /* r11 = (u16 *)paca + offset */ + sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ + addi r3,r3,1 /* offset++ */ + b 2f +1: /* offset >= SLB_CACHE_ENTRIES */ + li r3,SLB_CACHE_ENTRIES+1 +2: + sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */ + blr + +8: /* invalid EA */ + li r9,0 /* 0 VSID ordinal -> BAD_VSID */ + li r11,SLB_VSID_USER /* flags don't much matter */ + b 7b diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c new file mode 100644 index 000000000..d9ccdbdc2 --- /dev/null +++ b/arch/s390/kernel/vtime.c @@ -0,0 +1,480 @@ +/* + * arch/s390/kernel/vtime.c + * Virtual cpu timer based timer functions. + * + * S390 version + * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Jan Glauber + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define VTIMER_MAGIC (0x4b87ad6e + 1) +static ext_int_info_t ext_int_info_timer; +DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); + +void start_cpu_timer(void) +{ + struct vtimer_queue *vt_list; + + vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + set_vtimer(vt_list->idle); +} + +void stop_cpu_timer(void) +{ + __u64 done; + struct vtimer_queue *vt_list; + + vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + + /* nothing to do */ + if (list_empty(&vt_list->list)) { + vt_list->idle = VTIMER_MAX_SLICE; + goto fire; + } + + /* store progress */ + asm volatile ("STPT %0" : "=m" (done)); + + /* + * If done is negative we do not stop the CPU timer + * because we will get instantly an interrupt that + * will start the CPU timer again. + */ + if (done & 1LL<<63) + return; + else + vt_list->offset += vt_list->to_expire - done; + + /* save the actual expire value */ + vt_list->idle = done; + + /* + * We cannot halt the CPU timer, we just write a value that + * nearly never expires (only after 71 years) and re-write + * the stored expire value if we continue the timer + */ + fire: + set_vtimer(VTIMER_MAX_SLICE); +} + +void set_vtimer(__u64 expires) +{ + asm volatile ("SPT %0" : : "m" (expires)); + + /* store expire time for this CPU timer */ + per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; +} + +/* + * Sorted add to a list. List is linear searched until first bigger + * element is found. + */ +void list_add_sorted(struct vtimer_list *timer, struct list_head *head) +{ + struct vtimer_list *event; + + list_for_each_entry(event, head, entry) { + if (event->expires > timer->expires) { + list_add_tail(&timer->entry, &event->entry); + return; + } + } + list_add_tail(&timer->entry, head); +} + +/* + * Do the callback functions of expired vtimer events. + * Called from within the interrupt handler. + */ +static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs) +{ + struct vtimer_queue *vt_list; + struct vtimer_list *event, *tmp; + void (*fn)(unsigned long, struct pt_regs*); + unsigned long data; + + if (list_empty(cb_list)) + return; + + vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + + list_for_each_entry_safe(event, tmp, cb_list, entry) { + fn = event->function; + data = event->data; + fn(data, regs); + + if (!event->interval) + /* delete one shot timer */ + list_del_init(&event->entry); + else { + /* move interval timer back to list */ + spin_lock(&vt_list->lock); + list_del_init(&event->entry); + list_add_sorted(event, &vt_list->list); + spin_unlock(&vt_list->lock); + } + } +} + +/* + * Handler for the virtual CPU timer. + */ +static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code) +{ + int cpu; + __u64 next, delta; + struct vtimer_queue *vt_list; + struct vtimer_list *event, *tmp; + struct list_head *ptr; + /* the callback queue */ + struct list_head cb_list; + + INIT_LIST_HEAD(&cb_list); + cpu = smp_processor_id(); + vt_list = &per_cpu(virt_cpu_timer, cpu); + + /* walk timer list, fire all expired events */ + spin_lock(&vt_list->lock); + + if (vt_list->to_expire < VTIMER_MAX_SLICE) + vt_list->offset += vt_list->to_expire; + + list_for_each_entry_safe(event, tmp, &vt_list->list, entry) { + if (event->expires > vt_list->offset) + /* found first unexpired event, leave */ + break; + + /* re-charge interval timer, we have to add the offset */ + if (event->interval) + event->expires = event->interval + vt_list->offset; + + /* move expired timer to the callback queue */ + list_move_tail(&event->entry, &cb_list); + } + spin_unlock(&vt_list->lock); + do_callbacks(&cb_list, regs); + + /* next event is first in list */ + spin_lock(&vt_list->lock); + if (!list_empty(&vt_list->list)) { + ptr = vt_list->list.next; + event = list_entry(ptr, struct vtimer_list, entry); + next = event->expires - vt_list->offset; + + /* add the expired time from this interrupt handler + * and the callback functions + */ + asm volatile ("STPT %0" : "=m" (delta)); + delta = 0xffffffffffffffffLL - delta + 1; + vt_list->offset += delta; + next -= delta; + } else { + vt_list->offset = 0; + next = VTIMER_MAX_SLICE; + } + spin_unlock(&vt_list->lock); + set_vtimer(next); +} + +void init_virt_timer(struct vtimer_list *timer) +{ + timer->magic = VTIMER_MAGIC; + timer->function = NULL; + INIT_LIST_HEAD(&timer->entry); + spin_lock_init(&timer->lock); +} +EXPORT_SYMBOL(init_virt_timer); + +static inline int check_vtimer(struct vtimer_list *timer) +{ + if (timer->magic != VTIMER_MAGIC) + return -EINVAL; + return 0; +} + +static inline int vtimer_pending(struct vtimer_list *timer) +{ + return (!list_empty(&timer->entry)); +} + +/* + * this function should only run on the specified CPU + */ +static void internal_add_vtimer(struct vtimer_list *timer) +{ + unsigned long flags; + __u64 done; + struct vtimer_list *event; + struct vtimer_queue *vt_list; + + vt_list = &per_cpu(virt_cpu_timer, timer->cpu); + spin_lock_irqsave(&vt_list->lock, flags); + + if (timer->cpu != smp_processor_id()) + printk("internal_add_vtimer: BUG, running on wrong CPU"); + + /* if list is empty we only have to set the timer */ + if (list_empty(&vt_list->list)) { + /* reset the offset, this may happen if the last timer was + * just deleted by mod_virt_timer and the interrupt + * didn't happen until here + */ + vt_list->offset = 0; + goto fire; + } + + /* save progress */ + asm volatile ("STPT %0" : "=m" (done)); + + /* calculate completed work */ + done = vt_list->to_expire - done + vt_list->offset; + vt_list->offset = 0; + + list_for_each_entry(event, &vt_list->list, entry) + event->expires -= done; + + fire: + list_add_sorted(timer, &vt_list->list); + + /* get first element, which is the next vtimer slice */ + event = list_entry(vt_list->list.next, struct vtimer_list, entry); + + set_vtimer(event->expires); + spin_unlock_irqrestore(&vt_list->lock, flags); + /* release CPU aquired in prepare_vtimer or mod_virt_timer() */ + put_cpu(); +} + +static inline int prepare_vtimer(struct vtimer_list *timer) +{ + if (check_vtimer(timer) || !timer->function) { + printk("add_virt_timer: uninitialized timer\n"); + return -EINVAL; + } + + if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) { + printk("add_virt_timer: invalid timer expire value!\n"); + return -EINVAL; + } + + if (vtimer_pending(timer)) { + printk("add_virt_timer: timer pending\n"); + return -EBUSY; + } + + timer->cpu = get_cpu(); + return 0; +} + +/* + * add_virt_timer - add an oneshot virtual CPU timer + */ +void add_virt_timer(void *new) +{ + struct vtimer_list *timer; + + timer = (struct vtimer_list *)new; + + if (prepare_vtimer(timer) < 0) + return; + + timer->interval = 0; + internal_add_vtimer(timer); +} +EXPORT_SYMBOL(add_virt_timer); + +/* + * add_virt_timer_int - add an interval virtual CPU timer + */ +void add_virt_timer_periodic(void *new) +{ + struct vtimer_list *timer; + + timer = (struct vtimer_list *)new; + + if (prepare_vtimer(timer) < 0) + return; + + timer->interval = timer->expires; + internal_add_vtimer(timer); +} +EXPORT_SYMBOL(add_virt_timer_periodic); + +/* + * If we change a pending timer the function must be called on the CPU + * where the timer is running on, e.g. by smp_call_function_on() + * + * The original mod_timer adds the timer if it is not pending. For compatibility + * we do the same. The timer will be added on the current CPU as a oneshot timer. + * + * returns whether it has modified a pending timer (1) or not (0) + */ +int mod_virt_timer(struct vtimer_list *timer, __u64 expires) +{ + struct vtimer_queue *vt_list; + unsigned long flags; + int cpu; + + if (check_vtimer(timer) || !timer->function) { + printk("mod_virt_timer: uninitialized timer\n"); + return -EINVAL; + } + + if (!expires || expires > VTIMER_MAX_SLICE) { + printk("mod_virt_timer: invalid expire range\n"); + return -EINVAL; + } + + /* + * This is a common optimization triggered by the + * networking code - if the timer is re-modified + * to be the same thing then just return: + */ + if (timer->expires == expires && vtimer_pending(timer)) + return 1; + + cpu = get_cpu(); + vt_list = &per_cpu(virt_cpu_timer, cpu); + + /* disable interrupts before test if timer is pending */ + spin_lock_irqsave(&vt_list->lock, flags); + + /* if timer isn't pending add it on the current CPU */ + if (!vtimer_pending(timer)) { + spin_unlock_irqrestore(&vt_list->lock, flags); + /* we do not activate an interval timer with mod_virt_timer */ + timer->interval = 0; + timer->expires = expires; + timer->cpu = cpu; + internal_add_vtimer(timer); + return 0; + } + + /* check if we run on the right CPU */ + if (timer->cpu != cpu) { + printk("mod_virt_timer: running on wrong CPU, check your code\n"); + spin_unlock_irqrestore(&vt_list->lock, flags); + put_cpu(); + return -EINVAL; + } + + list_del_init(&timer->entry); + timer->expires = expires; + + /* also change the interval if we have an interval timer */ + if (timer->interval) + timer->interval = expires; + + /* the timer can't expire anymore so we can release the lock */ + spin_unlock_irqrestore(&vt_list->lock, flags); + internal_add_vtimer(timer); + return 1; +} +EXPORT_SYMBOL(mod_virt_timer); + +/* + * delete a virtual timer + * + * returns whether the deleted timer was pending (1) or not (0) + */ +int del_virt_timer(struct vtimer_list *timer) +{ + unsigned long flags; + struct vtimer_queue *vt_list; + + if (check_vtimer(timer)) { + printk("del_virt_timer: timer not initialized\n"); + return -EINVAL; + } + + /* check if timer is pending */ + if (!vtimer_pending(timer)) + return 0; + + vt_list = &per_cpu(virt_cpu_timer, timer->cpu); + spin_lock_irqsave(&vt_list->lock, flags); + + /* we don't interrupt a running timer, just let it expire! */ + list_del_init(&timer->entry); + + /* last timer removed */ + if (list_empty(&vt_list->list)) { + vt_list->to_expire = 0; + vt_list->offset = 0; + } + + spin_unlock_irqrestore(&vt_list->lock, flags); + return 1; +} +EXPORT_SYMBOL(del_virt_timer); + +/* + * Start the virtual CPU timer on the current CPU. + */ +void init_cpu_vtimer(void) +{ + struct vtimer_queue *vt_list; + unsigned long cr0; + __u64 timer; + + /* kick the virtual timer */ + timer = VTIMER_MAX_SLICE; + asm volatile ("SPT %0" : : "m" (timer)); + __ctl_store(cr0, 0, 0); + cr0 |= 0x400; + __ctl_load(cr0, 0, 0); + + vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); + INIT_LIST_HEAD(&vt_list->list); + spin_lock_init(&vt_list->lock); + vt_list->to_expire = 0; + vt_list->offset = 0; + vt_list->idle = 0; + +} + +static int vtimer_idle_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + switch (action) { + case CPU_IDLE: + stop_cpu_timer(); + break; + case CPU_NOT_IDLE: + start_cpu_timer(); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block vtimer_idle_nb = { + .notifier_call = vtimer_idle_notify, +}; + +void __init vtime_init(void) +{ + /* request the cpu timer external interrupt */ + if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt, + &ext_int_info_timer) != 0) + panic("Couldn't request external interrupt 0x1005"); + + if (register_idle_notifier(&vtimer_idle_nb)) + panic("Couldn't register idle notifier"); + + init_cpu_vtimer(); +} + diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c new file mode 100644 index 000000000..dea4957d0 --- /dev/null +++ b/arch/s390/lib/string.c @@ -0,0 +1,405 @@ +/* + * arch/s390/lib/string.c + * Optimized string functions + * + * S390 version + * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#define IN_ARCH_STRING_C 1 + +#include +#include + +/* + * Helper functions to find the end of a string + */ +static inline char *__strend(const char *s) +{ + register unsigned long r0 asm("0") = 0; + + asm volatile ("0: srst %0,%1\n" + " jo 0b" + : "+d" (r0), "+a" (s) : : "cc" ); + return (char *) r0; +} + +static inline char *__strnend(const char *s, size_t n) +{ + register unsigned long r0 asm("0") = 0; + const char *p = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b" + : "+d" (p), "+a" (s) : "d" (r0) : "cc" ); + return (char *) p; +} + +/** + * strlen - Find the length of a string + * @s: The string to be sized + * + * returns the length of @s + */ +size_t strlen(const char *s) +{ + return __strend(s) - s; +} +EXPORT_SYMBOL_NOVERS(strlen); + +/** + * strnlen - Find the length of a length-limited string + * @s: The string to be sized + * @n: The maximum number of bytes to search + * + * returns the minimum of the length of @s and @n + */ +size_t strnlen(const char * s, size_t n) +{ + return __strnend(s, n) - s; +} +EXPORT_SYMBOL_NOVERS(strnlen); + +/** + * strcpy - Copy a %NUL terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * + * returns a pointer to @dest + */ +char *strcpy(char *dest, const char *src) +{ + register int r0 asm("0") = 0; + char *ret = dest; + + asm volatile ("0: mvst %0,%1\n" + " jo 0b" + : "+&a" (dest), "+&a" (src) : "d" (r0) + : "cc", "memory" ); + return ret; +} +EXPORT_SYMBOL_NOVERS(strcpy); + +/** + * strlcpy - Copy a %NUL terminated string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @size: size of destination buffer + * + * Compatible with *BSD: the result is always a valid + * NUL-terminated string that fits in the buffer (unless, + * of course, the buffer size is zero). It does not pad + * out the result like strncpy() does. + */ +size_t strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = __strend(src) - src; + + if (size) { + size_t len = (ret >= size) ? size-1 : ret; + dest[len] = '\0'; + __builtin_memcpy(dest, src, len); + } + return ret; +} +EXPORT_SYMBOL_NOVERS(strlcpy); + +/** + * strncpy - Copy a length-limited, %NUL-terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @n: The maximum number of bytes to copy + * + * The result is not %NUL-terminated if the source exceeds + * @n bytes. + */ +char *strncpy(char *dest, const char *src, size_t n) +{ + size_t len = __strnend(src, n) - src; + __builtin_memset(dest + len, 0, n - len); + __builtin_memcpy(dest, src, len); + return dest; +} +EXPORT_SYMBOL_NOVERS(strncpy); + +/** + * strcat - Append one %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * + * returns a pointer to @dest + */ +char *strcat(char *dest, const char *src) +{ + register int r0 asm("0") = 0; + unsigned long dummy; + char *ret = dest; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + "1: mvst %0,%2\n" + " jo 1b" + : "=&a" (dummy), "+a" (dest), "+a" (src) + : "d" (r0), "0" (0UL) : "cc", "memory" ); + return ret; +} +EXPORT_SYMBOL_NOVERS(strcat); + +/** + * strlcat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @n: The size of the destination buffer. + */ +size_t strlcat(char *dest, const char *src, size_t n) +{ + size_t dsize = __strend(dest) - dest; + size_t len = __strend(src) - src; + size_t res = dsize + len; + + if (dsize < n) { + dest += dsize; + n -= dsize; + if (len >= n) + len = n - 1; + dest[len] = '\0'; + __builtin_memcpy(dest, src, len); + } + return res; +} +EXPORT_SYMBOL_NOVERS(strlcat); + +/** + * strncat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @n: The maximum numbers of bytes to copy + * + * returns a pointer to @dest + * + * Note that in contrast to strncpy, strncat ensures the result is + * terminated. + */ +char *strncat(char *dest, const char *src, size_t n) +{ + size_t len = __strnend(src, n) - src; + char *p = __strend(dest); + + p[len] = '\0'; + __builtin_memcpy(p, src, len); + return dest; +} +EXPORT_SYMBOL_NOVERS(strncat); + +/** + * strcmp - Compare two strings + * @cs: One string + * @ct: Another string + * + * returns 0 if @cs and @ct are equal, + * < 0 if @cs is less than @ct + * > 0 if @cs is greater than @ct + */ +int strcmp(const char *cs, const char *ct) +{ + register int r0 asm("0") = 0; + int ret = 0; + + asm volatile ("0: clst %2,%3\n" + " jo 0b\n" + " je 1f\n" + " ic %0,0(%2)\n" + " ic %1,0(%3)\n" + " sr %0,%1\n" + "1:" + : "+d" (ret), "+d" (r0), "+a" (cs), "+a" (ct) + : : "cc" ); + return ret; +} +EXPORT_SYMBOL_NOVERS(strcmp); + +/** + * strrchr - Find the last occurrence of a character in a string + * @s: The string to be searched + * @c: The character to search for + */ +char * strrchr(const char * s, int c) +{ + size_t len = __strend(s) - s; + + if (len) + do { + if (s[len] == (char) c) + return (char *) s + len; + } while (--len > 0); + return 0; +} +EXPORT_SYMBOL_NOVERS(strrchr); + +/** + * strstr - Find the first substring in a %NUL terminated string + * @s1: The string to be searched + * @s2: The string to search for + */ +char * strstr(const char * s1,const char * s2) +{ + int l1, l2; + + l2 = __strend(s2) - s2; + if (!l2) + return (char *) s1; + l1 = __strend(s1) - s1; + while (l1-- >= l2) { + register unsigned long r2 asm("2") = (unsigned long) s1; + register unsigned long r3 asm("3") = (unsigned long) l2; + register unsigned long r4 asm("4") = (unsigned long) s2; + register unsigned long r5 asm("5") = (unsigned long) l2; + int cc; + + asm volatile ("0: clcle %1,%3,0\n" + " jo 0b\n" + " ipm %0\n" + " srl %0,28" + : "=&d" (cc), "+a" (r2), "+a" (r3), + "+a" (r4), "+a" (r5) : : "cc" ); + if (!cc) + return (char *) s1; + s1++; + } + return 0; +} +EXPORT_SYMBOL_NOVERS(strstr); + +/** + * memchr - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or %NULL + * if @c is not found + */ +void *memchr(const void *s, int c, size_t n) +{ + register int r0 asm("0") = (char) c; + const void *ret = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + " jl 1f\n" + " la %0,0\n" + "1:" + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); + return (void *) ret; +} +EXPORT_SYMBOL_NOVERS(memchr); + +/** + * memcmp - Compare two areas of memory + * @cs: One area of memory + * @ct: Another area of memory + * @count: The size of the area. + */ +int memcmp(const void *cs, const void *ct, size_t n) +{ + register unsigned long r2 asm("2") = (unsigned long) cs; + register unsigned long r3 asm("3") = (unsigned long) n; + register unsigned long r4 asm("4") = (unsigned long) ct; + register unsigned long r5 asm("5") = (unsigned long) n; + int ret; + + asm volatile ("0: clcle %1,%3,0\n" + " jo 0b\n" + " ipm %0\n" + " srl %0,28" + : "=&d" (ret), "+a" (r2), "+a" (r3), "+a" (r4), "+a" (r5) + : : "cc" ); + if (ret) + ret = *(char *) r2 - *(char *) r4; + return ret; +} +EXPORT_SYMBOL_NOVERS(memcmp); + +/** + * memscan - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or 1 byte past + * the area if @c is not found + */ +void *memscan(void *s, int c, size_t n) +{ + register int r0 asm("0") = (char) c; + const void *ret = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc" ); + return (void *) ret; +} +EXPORT_SYMBOL_NOVERS(memscan); + +/** + * memcpy - Copy one area of memory to another + * @dest: Where to copy to + * @src: Where to copy from + * @n: The size of the area. + * + * returns a pointer to @dest + */ +void *memcpy(void *dest, const void *src, size_t n) +{ + return __builtin_memcpy(dest, src, n); +} +EXPORT_SYMBOL_NOVERS(memcpy); + +/** + * bcopy - Copy one area of memory to another + * @src: Where to copy from + * @dest: Where to copy to + * @n: The size of the area. + * + * Note that this is the same as memcpy(), with the arguments reversed. + * memcpy() is the standard, bcopy() is a legacy BSD function. + */ +void bcopy(const void *srcp, void *destp, size_t n) +{ + __builtin_memcpy(destp, srcp, n); +} +EXPORT_SYMBOL_NOVERS(bcopy); + +/** + * memset - Fill a region of memory with the given value + * @s: Pointer to the start of the area. + * @c: The byte to fill the area with + * @n: The size of the area. + * + * returns a pointer to @s + */ +void *memset(void *s, int c, size_t n) +{ + char *xs; + + if (c == 0) + return __builtin_memset(s, 0, n); + + xs = (char *) s; + if (n > 0) + do { + *xs++ = c; + } while (--n > 0); + return s; +} +EXPORT_SYMBOL_NOVERS(memset); + +/* + * missing exports for string functions defined in lib/string.c + */ +EXPORT_SYMBOL_NOVERS(memmove); +EXPORT_SYMBOL_NOVERS(strchr); +EXPORT_SYMBOL_NOVERS(strnchr); +EXPORT_SYMBOL_NOVERS(strncmp); +EXPORT_SYMBOL_NOVERS(strpbrk); diff --git a/arch/sh/boards/renesas/hs7751rvoip/Makefile b/arch/sh/boards/renesas/hs7751rvoip/Makefile new file mode 100644 index 000000000..e8b4109ac --- /dev/null +++ b/arch/sh/boards/renesas/hs7751rvoip/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for the HS7751RVoIP specific parts of the kernel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +obj-y := mach.o setup.o io.o irq.o led.o + +obj-$(CONFIG_PCI) += pci.o + diff --git a/arch/sh/boards/renesas/hs7751rvoip/io.c b/arch/sh/boards/renesas/hs7751rvoip/io.c new file mode 100644 index 000000000..a2ecd2d6a --- /dev/null +++ b/arch/sh/boards/renesas/hs7751rvoip/io.c @@ -0,0 +1,310 @@ +/* + * linux/arch/sh/kernel/io_hs7751rvoip.c + * + * Copyright (C) 2001 Ian da Silva, Jeremy Siegel + * Based largely on io_se.c. + * + * I/O routine for Renesas Technology sales HS7751RVoIP + * + * Initial version only to support LAN access; some + * placeholder code from io_hs7751rvoip.c left in with the + * expectation of later SuperIO and PCMCIA access. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include "../../../drivers/pci/pci-sh7751.h" + +extern void *area5_io8_base; /* Area 5 8bit I/O Base address */ +extern void *area6_io8_base; /* Area 6 8bit I/O Base address */ +extern void *area5_io16_base; /* Area 5 16bit I/O Base address */ +extern void *area6_io16_base; /* Area 6 16bit I/O Base address */ + +/* + * The 7751R HS7751RVoIP uses the built-in PCI controller (PCIC) + * of the 7751R processor, and has a SuperIO accessible via the PCI. + * The board also includes a PCMCIA controller on its memory bus, + * like the other Solution Engine boards. + */ + +#define PCIIOBR (volatile long *)PCI_REG(SH7751_PCIIOBR) +#define PCIMBR (volatile long *)PCI_REG(SH7751_PCIMBR) +#define PCI_IO_AREA SH7751_PCI_IO_BASE +#define PCI_MEM_AREA SH7751_PCI_CONFIG_BASE + +#define PCI_IOMAP(adr) (PCI_IO_AREA + (adr & ~SH7751_PCIIOBR_MASK)) + +#if defined(CONFIG_HS7751RVOIP_CODEC) +#define CODEC_IO_BASE 0x1000 +#endif + +#define maybebadio(name,port) \ + printk("bad PC-like io %s for port 0x%lx at 0x%08x\n", \ + #name, (port), (__u32) __builtin_return_address(0)) + +static inline void delay(void) +{ + ctrl_inw(0xa0000000); +} + +static inline unsigned long port2adr(unsigned int port) +{ + if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6) + if (port == 0x3f6) + return ((unsigned long)area5_io16_base + 0x0c); + else + return ((unsigned long)area5_io16_base + 0x800 + ((port-0x1f0) << 1)); + else + maybebadio(port2adr, (unsigned long)port); + return port; +} + +/* The 7751R HS7751RVoIP seems to have everything hooked */ +/* up pretty normally (nothing on high-bytes only...) so this */ +/* shouldn't be needed */ +static inline int shifted_port(unsigned long port) +{ + /* For IDE registers, value is not shifted */ + if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6) + return 0; + else + return 1; +} + +#if defined(CONFIG_HS7751RVOIP_CODEC) +static inline int +codec_port(unsigned long port) +{ + if (CODEC_IO_BASE <= port && port < (CODEC_IO_BASE+0x20)) + return 1; + else + return 0; +} +#endif + +/* In case someone configures the kernel w/o PCI support: in that */ +/* scenario, don't ever bother to check for PCI-window addresses */ + +/* NOTE: WINDOW CHECK MAY BE A BIT OFF, HIGH PCIBIOS_MIN_IO WRAPS? */ +#if defined(CONFIG_PCI) +#define CHECK_SH7751_PCIIO(port) \ + ((port >= PCIBIOS_MIN_IO) && (port < (PCIBIOS_MIN_IO + SH7751_PCI_IO_SIZE))) +#else +#define CHECK_SH7751_PCIIO(port) (0) +#endif + +/* + * General outline: remap really low stuff [eventually] to SuperIO, + * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO) + * is mapped through the PCI IO window. Stuff with high bits (PXSEG) + * should be way beyond the window, and is used w/o translation for + * compatibility. + */ +unsigned char hs7751rvoip_inb(unsigned long port) +{ + if (PXSEG(port)) + return *(volatile unsigned char *)port; +#if defined(CONFIG_HS7751RVOIP_CODEC) + else if (codec_port(port)) + return *(volatile unsigned char *)((unsigned long)area6_io8_base+(port-CODEC_IO_BASE)); +#endif + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + return *(volatile unsigned char *)PCI_IOMAP(port); + else + return (*(volatile unsigned short *)port2adr(port) & 0xff); +} + +unsigned char hs7751rvoip_inb_p(unsigned long port) +{ + unsigned char v; + + if (PXSEG(port)) + v = *(volatile unsigned char *)port; +#if defined(CONFIG_HS7751RVOIP_CODEC) + else if (codec_port(port)) + v = *(volatile unsigned char *)((unsigned long)area6_io8_base+(port-CODEC_IO_BASE)); +#endif + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + v = *(volatile unsigned char *)PCI_IOMAP(port); + else + v = (*(volatile unsigned short *)port2adr(port) & 0xff); + delay(); + return v; +} + +unsigned short hs7751rvoip_inw(unsigned long port) +{ + if (PXSEG(port)) + return *(volatile unsigned short *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + return *(volatile unsigned short *)PCI_IOMAP(port); + else + maybebadio(inw, port); + return 0; +} + +unsigned int hs7751rvoip_inl(unsigned long port) +{ + if (PXSEG(port)) + return *(volatile unsigned long *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + return *(volatile unsigned long *)PCI_IOMAP(port); + else + maybebadio(inl, port); + return 0; +} + +void hs7751rvoip_outb(unsigned char value, unsigned long port) +{ + + if (PXSEG(port)) + *(volatile unsigned char *)port = value; +#if defined(CONFIG_HS7751RVOIP_CIDEC) + else if (codec_port(port)) + *(volatile unsigned cjar *)((unsigned long)area6_io8_base+(port-CODEC_IO_BASE)) = value; +#endif + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + *(unsigned char *)PCI_IOMAP(port) = value; + else + *(volatile unsigned short *)port2adr(port) = value; +} + +void hs7751rvoip_outb_p(unsigned char value, unsigned long port) +{ + if (PXSEG(port)) + *(volatile unsigned char *)port = value; +#if defined(CONFIG_HS7751RVOIP_CIDEC) + else if (codec_port(port)) + *(volatile unsigned cjar *)((unsigned long)area6_io8_base+(port-CODEC_IO_BASE)) = value; +#endif + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + *(unsigned char *)PCI_IOMAP(port) = value; + else + *(volatile unsigned short *)port2adr(port) = value; + delay(); +} + +void hs7751rvoip_outw(unsigned short value, unsigned long port) +{ + if (PXSEG(port)) + *(volatile unsigned short *)port = value; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + *(unsigned short *)PCI_IOMAP(port) = value; + else + maybebadio(outw, port); +} + +void hs7751rvoip_outl(unsigned int value, unsigned long port) +{ + if (PXSEG(port)) + *(volatile unsigned long *)port = value; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + *((unsigned long *)PCI_IOMAP(port)) = value; + else + maybebadio(outl, port); +} + +void hs7751rvoip_insb(unsigned long port, void *addr, unsigned long count) +{ + if (PXSEG(port)) + while (count--) *((unsigned char *) addr)++ = *(volatile unsigned char *)port; +#if defined(CONFIG_HS7751RVOIP_CODEC) + else if (codec_port(port)) + while (count--) *((unsigned char *) addr)++ = *(volatile unsigned char *)((unsigned long)area6_io8_base+(port-CODEC_IO_BASE)); +#endif + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) { + volatile __u8 *bp = (__u8 *)PCI_IOMAP(port); + + while (count--) *((volatile unsigned char *) addr)++ = *bp; + } else { + volatile __u16 *p = (volatile unsigned short *)port2adr(port); + + while (count--) *((unsigned char *) addr)++ = *p & 0xff; + } +} + +void hs7751rvoip_insw(unsigned long port, void *addr, unsigned long count) +{ + volatile __u16 *p; + + if (PXSEG(port)) + p = (volatile unsigned short *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + p = (volatile unsigned short *)PCI_IOMAP(port); + else + p = (volatile unsigned short *)port2adr(port); + while (count--) *((__u16 *) addr)++ = *p; +} + +void hs7751rvoip_insl(unsigned long port, void *addr, unsigned long count) +{ + if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) { + volatile __u32 *p = (__u32 *)PCI_IOMAP(port); + + while (count--) *((__u32 *) addr)++ = *p; + } else + maybebadio(insl, port); +} + +void hs7751rvoip_outsb(unsigned long port, const void *addr, unsigned long count) +{ + if (PXSEG(port)) + while (count--) *(volatile unsigned char *)port = *((unsigned char *) addr)++; +#if defined(CONFIG_HS7751RVOIP_CODEC) + else if (codec_port(port)) + while (count--) *(volatile unsigned char *)((unsigned long)area6_io8_base+(port-CODEC_IO_BASE)) = *((unsigned char *) addr)++; +#endif + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) { + volatile __u8 *bp = (__u8 *)PCI_IOMAP(port); + + while (count--) *bp = *((volatile unsigned char *) addr)++; + } else { + volatile __u16 *p = (volatile unsigned short *)port2adr(port); + + while (count--) *p = *((unsigned char *) addr)++; + } +} + +void hs7751rvoip_outsw(unsigned long port, const void *addr, unsigned long count) +{ + volatile __u16 *p; + + if (PXSEG(port)) + p = (volatile unsigned short *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + p = (volatile unsigned short *)PCI_IOMAP(port); + else + p = (volatile unsigned short *)port2adr(port); + while (count--) *p = *((__u16 *) addr)++; +} + +void hs7751rvoip_outsl(unsigned long port, const void *addr, unsigned long count) +{ + if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) { + volatile __u32 *p = (__u32 *)PCI_IOMAP(port); + + while (count--) *p = *((__u32 *) addr)++; + } else + maybebadio(outsl, port); +} + +void *hs7751rvoip_ioremap(unsigned long offset, unsigned long size) +{ + if (offset >= 0xfd000000) + return (void *)offset; + else + return (void *)P2SEGADDR(offset); +} +EXPORT_SYMBOL(hs7751rvoip_ioremap); + +unsigned long hs7751rvoip_isa_port2addr(unsigned long offset) +{ + return port2adr(offset); +} diff --git a/arch/sh/boards/renesas/hs7751rvoip/irq.c b/arch/sh/boards/renesas/hs7751rvoip/irq.c new file mode 100644 index 000000000..a7921f67a --- /dev/null +++ b/arch/sh/boards/renesas/hs7751rvoip/irq.c @@ -0,0 +1,122 @@ +/* + * linux/arch/sh/boards/renesas/hs7751rvoip/irq.c + * + * Copyright (C) 2000 Kazumoto Kojima + * + * Renesas Technology Sales HS7751RVoIP Support. + * + * Modified for HS7751RVoIP by + * Atom Create Engineering Co., Ltd. 2002. + * Lineo uSolutions, Inc. 2003. + */ + +#include +#include +#include +#include +#include +#include + +static int mask_pos[] = {8, 9, 10, 11, 12, 13, 0, 1, 2, 3, 4, 5, 6, 7}; + +static void enable_hs7751rvoip_irq(unsigned int irq); +static void disable_hs7751rvoip_irq(unsigned int irq); + +/* shutdown is same as "disable" */ +#define shutdown_hs7751rvoip_irq disable_hs7751rvoip_irq + +static void ack_hs7751rvoip_irq(unsigned int irq); +static void end_hs7751rvoip_irq(unsigned int irq); + +static unsigned int startup_hs7751rvoip_irq(unsigned int irq) +{ + enable_hs7751rvoip_irq(irq); + return 0; /* never anything pending */ +} + +static void disable_hs7751rvoip_irq(unsigned int irq) +{ + unsigned long flags; + unsigned short val; + unsigned short mask = 0xffff ^ (0x0001 << mask_pos[irq]); + + /* Set the priority in IPR to 0 */ + local_irq_save(flags); + val = ctrl_inw(IRLCNTR3); + val &= mask; + ctrl_outw(val, IRLCNTR3); + local_irq_restore(flags); +} + +static void enable_hs7751rvoip_irq(unsigned int irq) +{ + unsigned long flags; + unsigned short val; + unsigned short value = (0x0001 << mask_pos[irq]); + + /* Set priority in IPR back to original value */ + local_irq_save(flags); + val = ctrl_inw(IRLCNTR3); + val |= value; + ctrl_outw(val, IRLCNTR3); + local_irq_restore(flags); +} + +static void ack_hs7751rvoip_irq(unsigned int irq) +{ + disable_hs7751rvoip_irq(irq); +} + +static void end_hs7751rvoip_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_hs7751rvoip_irq(irq); +} + +static struct hw_interrupt_type hs7751rvoip_irq_type = { + "HS7751RVoIP IRQ", + startup_hs7751rvoip_irq, + shutdown_hs7751rvoip_irq, + enable_hs7751rvoip_irq, + disable_hs7751rvoip_irq, + ack_hs7751rvoip_irq, + end_hs7751rvoip_irq, +}; + +static void make_hs7751rvoip_irq(unsigned int irq) +{ + disable_irq_nosync(irq); + irq_desc[irq].handler = &hs7751rvoip_irq_type; + disable_hs7751rvoip_irq(irq); +} + +/* + * Initialize IRQ setting + */ +void __init init_hs7751rvoip_IRQ(void) +{ + int i; + + /* IRL0=ON HOOK1 + * IRL1=OFF HOOK1 + * IRL2=ON HOOK2 + * IRL3=OFF HOOK2 + * IRL4=Ringing Detection + * IRL5=CODEC + * IRL6=Ethernet + * IRL7=Ethernet Hub + * IRL8=USB Communication + * IRL9=USB Connection + * IRL10=USB DMA + * IRL11=CF Card + * IRL12=PCMCIA + * IRL13=PCI Slot + */ + ctrl_outw(0x9876, IRLCNTR1); + ctrl_outw(0xdcba, IRLCNTR2); + ctrl_outw(0x0050, IRLCNTR4); + ctrl_outw(0x4321, IRLCNTR5); + + for (i=0; i<14; i++) + make_hs7751rvoip_irq(i); +} diff --git a/arch/sh/boards/renesas/hs7751rvoip/led.c b/arch/sh/boards/renesas/hs7751rvoip/led.c new file mode 100644 index 000000000..18a13c8da --- /dev/null +++ b/arch/sh/boards/renesas/hs7751rvoip/led.c @@ -0,0 +1,27 @@ +/* + * linux/arch/sh/kernel/setup_hs7751rvoip.c + * + * Copyright (C) 2000 Kazumoto Kojima + * + * Renesas Technology Sales HS7751RVoIP Support. + * + * Modified for HS7751RVoIP by + * Atom Create Engineering Co., Ltd. 2002. + * Lineo uSolutions, Inc. 2003. + */ + +#include +#include +#include + +extern unsigned int debug_counter; + +void debug_led_disp(void) +{ + unsigned short value; + + value = (unsigned char)debug_counter++; + ctrl_outb((0xf0|value), PA_OUTPORTR); + if (value == 0x0f) + debug_counter = 0; +} diff --git a/arch/sh/boards/renesas/hs7751rvoip/mach.c b/arch/sh/boards/renesas/hs7751rvoip/mach.c new file mode 100644 index 000000000..8bbed6022 --- /dev/null +++ b/arch/sh/boards/renesas/hs7751rvoip/mach.c @@ -0,0 +1,55 @@ +/* + * linux/arch/sh/kernel/mach_hs7751rvoip.c + * + * Minor tweak of mach_se.c file to reference hs7751rvoip-specific items. + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Machine vector for the Renesas Technology sales HS7751RVoIP + */ + +#include +#include + +#include +#include +#include +#include + +extern void init_hs7751rvoip_IRQ(void); +extern void *hs7751rvoip_ioremap(unsigned long, unsigned long); + +/* + * The Machine Vector + */ + +struct sh_machine_vector mv_hs7751rvoip __initmv = { + .mv_nr_irqs = 72, + + .mv_inb = hs7751rvoip_inb, + .mv_inw = hs7751rvoip_inw, + .mv_inl = hs7751rvoip_inl, + .mv_outb = hs7751rvoip_outb, + .mv_outw = hs7751rvoip_outw, + .mv_outl = hs7751rvoip_outl, + + .mv_inb_p = hs7751rvoip_inb_p, + .mv_inw_p = hs7751rvoip_inw, + .mv_inl_p = hs7751rvoip_inl, + .mv_outb_p = hs7751rvoip_outb_p, + .mv_outw_p = hs7751rvoip_outw, + .mv_outl_p = hs7751rvoip_outl, + + .mv_insb = hs7751rvoip_insb, + .mv_insw = hs7751rvoip_insw, + .mv_insl = hs7751rvoip_insl, + .mv_outsb = hs7751rvoip_outsb, + .mv_outsw = hs7751rvoip_outsw, + .mv_outsl = hs7751rvoip_outsl, + + .mv_ioremap = hs7751rvoip_ioremap, + .mv_isa_port2addr = hs7751rvoip_isa_port2addr, + .mv_init_irq = init_hs7751rvoip_IRQ, +}; +ALIAS_MV(hs7751rvoip) diff --git a/arch/sh/boards/renesas/hs7751rvoip/pci.c b/arch/sh/boards/renesas/hs7751rvoip/pci.c new file mode 100644 index 000000000..7a442d1ec --- /dev/null +++ b/arch/sh/boards/renesas/hs7751rvoip/pci.c @@ -0,0 +1,150 @@ +/* + * linux/arch/sh/kernel/pci-hs7751rvoip.c + * + * Author: Ian DaSilva (idasilva@mvista.com) + * + * Highly leveraged from pci-bigsur.c, written by Dustin McIntire. + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * PCI initialization for the Renesas SH7751R HS7751RVoIP board + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include "../../../drivers/pci/pci-sh7751.h" +#include + +#define PCIMCR_MRSET_OFF 0xBFFFFFFF +#define PCIMCR_RFSH_OFF 0xFFFFFFFB + +/* + * Only long word accesses of the PCIC's internal local registers and the + * configuration registers from the CPU is supported. + */ +#define PCIC_WRITE(x,v) writel((v), PCI_REG(x)) +#define PCIC_READ(x) readl(PCI_REG(x)) + +/* + * Description: This function sets up and initializes the pcic, sets + * up the BARS, maps the DRAM into the address space etc, etc. + */ +int __init pcibios_init_platform(void) +{ + unsigned long bcr1, wcr1, wcr2, wcr3, mcr; + unsigned short bcr2, bcr3; + + /* + * Initialize the slave bus controller on the pcic. The values used + * here should not be hardcoded, but they should be taken from the bsc + * on the processor, to make this function as generic as possible. + * (i.e. Another sbc may usr different SDRAM timing settings -- in order + * for the pcic to work, its settings need to be exactly the same.) + */ + bcr1 = (*(volatile unsigned long *)(SH7751_BCR1)); + bcr2 = (*(volatile unsigned short *)(SH7751_BCR2)); + bcr3 = (*(volatile unsigned short *)(SH7751_BCR3)); + wcr1 = (*(volatile unsigned long *)(SH7751_WCR1)); + wcr2 = (*(volatile unsigned long *)(SH7751_WCR2)); + wcr3 = (*(volatile unsigned long *)(SH7751_WCR3)); + mcr = (*(volatile unsigned long *)(SH7751_MCR)); + + bcr1 = bcr1 | 0x00080000; /* Enable Bit 19, BREQEN */ + (*(volatile unsigned long *)(SH7751_BCR1)) = bcr1; + + bcr1 = bcr1 | 0x40080000; /* Enable Bit 19 BREQEN, set PCIC to slave */ + PCIC_WRITE(SH7751_PCIBCR1, bcr1); /* PCIC BCR1 */ + PCIC_WRITE(SH7751_PCIBCR2, bcr2); /* PCIC BCR2 */ + PCIC_WRITE(SH7751_PCIBCR3, bcr3); /* PCIC BCR3 */ + PCIC_WRITE(SH7751_PCIWCR1, wcr1); /* PCIC WCR1 */ + PCIC_WRITE(SH7751_PCIWCR2, wcr2); /* PCIC WCR2 */ + PCIC_WRITE(SH7751_PCIWCR3, wcr3); /* PCIC WCR3 */ + mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF; + PCIC_WRITE(SH7751_PCIMCR, mcr); /* PCIC MCR */ + + /* Enable all interrupts, so we know what to fix */ + PCIC_WRITE(SH7751_PCIINTM, 0x0000c3ff); + PCIC_WRITE(SH7751_PCIAINTM, 0x0000380f); + + /* Set up standard PCI config registers */ + PCIC_WRITE(SH7751_PCICONF1, 0xFB900047); /* Bus Master, Mem & I/O access */ + PCIC_WRITE(SH7751_PCICONF2, 0x00000000); /* PCI Class code & Revision ID */ + PCIC_WRITE(SH7751_PCICONF4, 0xab000001); /* PCI I/O address (local regs) */ + PCIC_WRITE(SH7751_PCICONF5, 0x0c000000); /* PCI MEM address (local RAM) */ + PCIC_WRITE(SH7751_PCICONF6, 0xd0000000); /* PCI MEM address (unused) */ + PCIC_WRITE(SH7751_PCICONF11, 0x35051054); /* PCI Subsystem ID & Vendor ID */ + PCIC_WRITE(SH7751_PCILSR0, 0x03f00000); /* MEM (full 64M exposed) */ + PCIC_WRITE(SH7751_PCILSR1, 0x00000000); /* MEM (unused) */ + PCIC_WRITE(SH7751_PCILAR0, 0x0c000000); /* MEM (direct map from PCI) */ + PCIC_WRITE(SH7751_PCILAR1, 0x00000000); /* MEM (unused) */ + + /* Now turn it on... */ + PCIC_WRITE(SH7751_PCICR, 0xa5000001); + + /* + * Set PCIMBR and PCIIOBR here, assuming a single window + * (16M MEM, 256K IO) is enough. If a larger space is + * needed, the readx/writex and inx/outx functions will + * have to do more (e.g. setting registers for each call). + */ + + /* + * Set the MBR so PCI address is one-to-one with window, + * meaning all calls go straight through... use ifdef to + * catch erroneous assumption. + */ + BUG_ON(PCIBIOS_MIN_MEM != SH7751_PCI_MEMORY_BASE); + + PCIC_WRITE(SH7751_PCIMBR, PCIBIOS_MIN_MEM); + + /* Set IOBR for window containing area specified in pci.h */ + PCIC_WRITE(SH7751_PCIIOBR, (PCIBIOS_MIN_IO & SH7751_PCIIOBR_MASK)); + + /* All done, may as well say so... */ + printk("SH7751R PCI: Finished initialization of the PCI controller\n"); + + return 1; +} + +int __init pcibios_map_platform_irq(u8 slot, u8 pin) +{ + switch (slot) { + case 0: return IRQ_PCISLOT; /* PCI Extend slot */ + case 1: return IRQ_PCMCIA; /* PCI Cardbus Bridge */ + case 2: return IRQ_PCIETH; /* Realtek Ethernet controller */ + case 3: return IRQ_PCIHUB; /* Realtek Ethernet Hub controller */ + default: + printk("PCI: Bad IRQ mapping request for slot %d\n", slot); + return -1; + } +} + +static struct resource sh7751_io_resource = { + .name = "SH7751_IO", + .start = 0x4000, + .end = 0x4000 + SH7751_PCI_IO_SIZE - 1, + .flags = IORESOURCE_IO +}; + +static struct resource sh7751_mem_resource = { + .name = "SH7751_mem", + .start = SH7751_PCI_MEMORY_BASE, + .end = SH7751_PCI_MEMORY_BASE + SH7751_PCI_MEM_SIZE - 1, + .flags = IORESOURCE_MEM +}; + +extern struct pci_ops sh7751_pci_ops; + +struct pci_channel board_pci_channels[] = { + { &sh7751_pci_ops, &sh7751_io_resource, &sh7751_mem_resource, 0, 0xff }, + { NULL, NULL, NULL, 0, 0 }, +}; +EXPORT_SYMBOL(board_pci_channels); diff --git a/arch/sh/boards/renesas/hs7751rvoip/setup.c b/arch/sh/boards/renesas/hs7751rvoip/setup.c new file mode 100644 index 000000000..f1a78b6c7 --- /dev/null +++ b/arch/sh/boards/renesas/hs7751rvoip/setup.c @@ -0,0 +1,89 @@ +/* + * linux/arch/sh/kernel/setup_hs7751rvoip.c + * + * Copyright (C) 2000 Kazumoto Kojima + * + * Renesas Technology Sales HS7751RVoIP Support. + * + * Modified for HS7751RVoIP by + * Atom Create Engineering Co., Ltd. 2002. + * Lineo uSolutions, Inc. 2003. + */ + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +/* defined in mm/ioremap.c */ +extern void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags); + +unsigned int debug_counter; + +const char *get_system_type(void) +{ + return "HS7751RVoIP"; +} + +/* + * Initialize the board + */ +void __init platform_setup(void) +{ + printk(KERN_INFO "Renesas Technology Sales HS7751RVoIP-2 support.\n"); + ctrl_outb(0xf0, PA_OUTPORTR); + debug_counter = 0; +} + +void *area5_io8_base; +void *area6_io8_base; +void *area5_io16_base; +void *area6_io16_base; + +int __init cf_init(void) +{ + pgprot_t prot; + unsigned long paddrbase, psize; + + /* open I/O area window */ + paddrbase = virt_to_phys((void *)(PA_AREA5_IO+0x00000800)); + psize = PAGE_SIZE; + prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_COM16); + area5_io16_base = p3_ioremap(paddrbase, psize, prot.pgprot); + if (!area5_io16_base) { + printk("allocate_cf_area : can't open CF I/O window!\n"); + return -ENOMEM; + } + + /* XXX : do we need attribute and common-memory area also? */ + + paddrbase = virt_to_phys((void *)PA_AREA6_IO); + psize = PAGE_SIZE; +#if defined(CONFIG_HS7751RVOIP_CODEC) + prot = PAGE_KERNEL_PCC(0, _PAGE_PCC_COM8); +#else + prot = PAGE_KERNEL_PCC(0, _PAGE_PCC_IO8); +#endif + area6_io8_base = p3_ioremap(paddrbase, psize, prot.pgprot); + if (!area6_io8_base) { + printk("allocate_cf_area : can't open CODEC I/O 8bit window!\n"); + return -ENOMEM; + } + prot = PAGE_KERNEL_PCC(0, _PAGE_PCC_IO16); + area6_io16_base = p3_ioremap(paddrbase, psize, prot.pgprot); + if (!area6_io16_base) { + printk("allocate_cf_area : can't open CODEC I/O 16bit window!\n"); + return -ENOMEM; + } + + return 0; +} + +__initcall (cf_init); diff --git a/arch/sh/boards/renesas/rts7751r2d/Makefile b/arch/sh/boards/renesas/rts7751r2d/Makefile new file mode 100644 index 000000000..daa53334b --- /dev/null +++ b/arch/sh/boards/renesas/rts7751r2d/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the RTS7751R2D specific parts of the kernel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +obj-y := mach.o setup.o io.o irq.o led.o + diff --git a/arch/sh/boards/renesas/rts7751r2d/io.c b/arch/sh/boards/renesas/rts7751r2d/io.c new file mode 100644 index 000000000..c46f9154c --- /dev/null +++ b/arch/sh/boards/renesas/rts7751r2d/io.c @@ -0,0 +1,319 @@ +/* + * linux/arch/sh/kernel/io_rts7751r2d.c + * + * Copyright (C) 2001 Ian da Silva, Jeremy Siegel + * Based largely on io_se.c. + * + * I/O routine for Renesas Technology sales RTS7751R2D. + * + * Initial version only to support LAN access; some + * placeholder code from io_rts7751r2d.c left in with the + * expectation of later SuperIO and PCMCIA access. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include "../../../drivers/pci/pci-sh7751.h" + +/* + * The 7751R RTS7751R2D uses the built-in PCI controller (PCIC) + * of the 7751R processor, and has a SuperIO accessible via the PCI. + * The board also includes a PCMCIA controller on its memory bus, + * like the other Solution Engine boards. + */ + +#define PCIIOBR (volatile long *)PCI_REG(SH7751_PCIIOBR) +#define PCIMBR (volatile long *)PCI_REG(SH7751_PCIMBR) +#define PCI_IO_AREA SH7751_PCI_IO_BASE +#define PCI_MEM_AREA SH7751_PCI_CONFIG_BASE + +#define PCI_IOMAP(adr) (PCI_IO_AREA + (adr & ~SH7751_PCIIOBR_MASK)) + +#define maybebadio(name,port) \ + printk("bad PC-like io %s for port 0x%lx at 0x%08x\n", \ + #name, (port), (__u32) __builtin_return_address(0)) + +static inline void delay(void) +{ + ctrl_inw(0xa0000000); +} + +static inline unsigned long port2adr(unsigned int port) +{ + if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6) + if (port == 0x3f6) + return (PA_AREA5_IO + 0x80c); + else + return (PA_AREA5_IO + 0x1000 + ((port-0x1f0) << 1)); + else + maybebadio(port2adr, (unsigned long)port); + + return port; +} + +static inline unsigned long port88796l(unsigned int port, int flag) +{ + unsigned long addr; + + if (flag) + addr = PA_AX88796L + ((port - AX88796L_IO_BASE) << 1); + else + addr = PA_AX88796L + ((port - AX88796L_IO_BASE) << 1) + 0x1000; + + return addr; +} + +/* The 7751R RTS7751R2D seems to have everything hooked */ +/* up pretty normally (nothing on high-bytes only...) so this */ +/* shouldn't be needed */ +static inline int shifted_port(unsigned long port) +{ + /* For IDE registers, value is not shifted */ + if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6) + return 0; + else + return 1; +} + +/* In case someone configures the kernel w/o PCI support: in that */ +/* scenario, don't ever bother to check for PCI-window addresses */ + +/* NOTE: WINDOW CHECK MAY BE A BIT OFF, HIGH PCIBIOS_MIN_IO WRAPS? */ +#if defined(CONFIG_PCI) +#define CHECK_SH7751_PCIIO(port) \ + ((port >= PCIBIOS_MIN_IO) && (port < (PCIBIOS_MIN_IO + SH7751_PCI_IO_SIZE))) +#else +#define CHECK_SH7751_PCIIO(port) (0) +#endif + +#if defined(CONFIG_NE2000) || defined(CONFIG_NE2000_MODULE) +#define CHECK_AX88796L_PORT(port) \ + ((port >= AX88796L_IO_BASE) && (port < (AX88796L_IO_BASE+0x20))) +#else +#define CHECK_AX88796L_PORT(port) (0) +#endif + +/* + * General outline: remap really low stuff [eventually] to SuperIO, + * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO) + * is mapped through the PCI IO window. Stuff with high bits (PXSEG) + * should be way beyond the window, and is used w/o translation for + * compatibility. + */ +unsigned char rts7751r2d_inb(unsigned long port) +{ + if (CHECK_AX88796L_PORT(port)) + return (*(volatile unsigned short *)port88796l(port, 0)) & 0xff; + else if (PXSEG(port)) + return *(volatile unsigned char *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + return *(volatile unsigned char *)PCI_IOMAP(port); + else + return (*(volatile unsigned short *)port2adr(port) & 0xff); +} + +unsigned char rts7751r2d_inb_p(unsigned long port) +{ + unsigned char v; + + if (CHECK_AX88796L_PORT(port)) + v = (*(volatile unsigned short *)port88796l(port, 0)) & 0xff; + else if (PXSEG(port)) + v = *(volatile unsigned char *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + v = *(volatile unsigned char *)PCI_IOMAP(port); + else + v = (*(volatile unsigned short *)port2adr(port) & 0xff); + delay(); + + return v; +} + +unsigned short rts7751r2d_inw(unsigned long port) +{ + if (CHECK_AX88796L_PORT(port)) + maybebadio(inw, port); + else if (PXSEG(port)) + return *(volatile unsigned short *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + return *(volatile unsigned short *)PCI_IOMAP(port); + else + maybebadio(inw, port); + + return 0; +} + +unsigned int rts7751r2d_inl(unsigned long port) +{ + if (CHECK_AX88796L_PORT(port)) + maybebadio(inl, port); + else if (PXSEG(port)) + return *(volatile unsigned long *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + return *(volatile unsigned long *)PCI_IOMAP(port); + else + maybebadio(inl, port); + + return 0; +} + +void rts7751r2d_outb(unsigned char value, unsigned long port) +{ + if (CHECK_AX88796L_PORT(port)) + *((volatile unsigned short *)port88796l(port, 0)) = value; + else if (PXSEG(port)) + *(volatile unsigned char *)port = value; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + *(volatile unsigned char *)PCI_IOMAP(port) = value; + else + *(volatile unsigned short *)port2adr(port) = value; +} + +void rts7751r2d_outb_p(unsigned char value, unsigned long port) +{ + if (CHECK_AX88796L_PORT(port)) + *((volatile unsigned short *)port88796l(port, 0)) = value; + else if (PXSEG(port)) + *(volatile unsigned char *)port = value; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + *(volatile unsigned char *)PCI_IOMAP(port) = value; + else + *(volatile unsigned short *)port2adr(port) = value; + delay(); +} + +void rts7751r2d_outw(unsigned short value, unsigned long port) +{ + if (CHECK_AX88796L_PORT(port)) + maybebadio(outw, port); + else if (PXSEG(port)) + *(volatile unsigned short *)port = value; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + *(volatile unsigned short *)PCI_IOMAP(port) = value; + else + maybebadio(outw, port); +} + +void rts7751r2d_outl(unsigned int value, unsigned long port) +{ + if (CHECK_AX88796L_PORT(port)) + maybebadio(outl, port); + else if (PXSEG(port)) + *(volatile unsigned long *)port = value; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + *(volatile unsigned long *)PCI_IOMAP(port) = value; + else + maybebadio(outl, port); +} + +void rts7751r2d_insb(unsigned long port, void *addr, unsigned long count) +{ + volatile __u8 *bp; + volatile __u16 *p; + + if (CHECK_AX88796L_PORT(port)) { + p = (volatile unsigned short *)port88796l(port, 0); + while (count--) *((unsigned char *) addr)++ = *p & 0xff; + } else if (PXSEG(port)) + while (count--) *((unsigned char *) addr)++ = *(volatile unsigned char *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) { + bp = (__u8 *)PCI_IOMAP(port); + while (count--) *((volatile unsigned char *) addr)++ = *bp; + } else { + p = (volatile unsigned short *)port2adr(port); + while (count--) *((unsigned char *) addr)++ = *p & 0xff; + } +} + +void rts7751r2d_insw(unsigned long port, void *addr, unsigned long count) +{ + volatile __u16 *p; + + if (CHECK_AX88796L_PORT(port)) + p = (volatile unsigned short *)port88796l(port, 1); + else if (PXSEG(port)) + p = (volatile unsigned short *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + p = (volatile unsigned short *)PCI_IOMAP(port); + else + p = (volatile unsigned short *)port2adr(port); + while (count--) *((__u16 *) addr)++ = *p; +} + +void rts7751r2d_insl(unsigned long port, void *addr, unsigned long count) +{ + if (CHECK_AX88796L_PORT(port)) + maybebadio(insl, port); + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) { + volatile __u32 *p = (__u32 *)PCI_IOMAP(port); + + while (count--) *((__u32 *) addr)++ = *p; + } else + maybebadio(insl, port); +} + +void rts7751r2d_outsb(unsigned long port, const void *addr, unsigned long count) +{ + volatile __u8 *bp; + volatile __u16 *p; + + if (CHECK_AX88796L_PORT(port)) { + p = (volatile unsigned short *)port88796l(port, 0); + while (count--) *p = *((unsigned char *) addr)++; + } else if (PXSEG(port)) + while (count--) *(volatile unsigned char *)port = *((unsigned char *) addr)++; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) { + bp = (__u8 *)PCI_IOMAP(port); + while (count--) *bp = *((volatile unsigned char *) addr)++; + } else { + p = (volatile unsigned short *)port2adr(port); + while (count--) *p = *((unsigned char *) addr)++; + } +} + +void rts7751r2d_outsw(unsigned long port, const void *addr, unsigned long count) +{ + volatile __u16 *p; + + if (CHECK_AX88796L_PORT(port)) + p = (volatile unsigned short *)port88796l(port, 1); + else if (PXSEG(port)) + p = (volatile unsigned short *)port; + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) + p = (volatile unsigned short *)PCI_IOMAP(port); + else + p = (volatile unsigned short *)port2adr(port); + while (count--) *p = *((__u16 *) addr)++; +} + +void rts7751r2d_outsl(unsigned long port, const void *addr, unsigned long count) +{ + if (CHECK_AX88796L_PORT(port)) + maybebadio(outsl, port); + else if (CHECK_SH7751_PCIIO(port) || shifted_port(port)) { + volatile __u32 *p = (__u32 *)PCI_IOMAP(port); + + while (count--) *p = *((__u32 *) addr)++; + } else + maybebadio(outsl, port); +} + +void *rts7751r2d_ioremap(unsigned long offset, unsigned long size) +{ + if (offset >= 0xfd000000) + return (void *)offset; + else + return (void *)P2SEGADDR(offset); +} +EXPORT_SYMBOL(rts7751r2d_ioremap); + +unsigned long rts7751r2d_isa_port2addr(unsigned long offset) +{ + return port2adr(offset); +} diff --git a/arch/sh/boards/renesas/rts7751r2d/irq.c b/arch/sh/boards/renesas/rts7751r2d/irq.c new file mode 100644 index 000000000..95717f4f1 --- /dev/null +++ b/arch/sh/boards/renesas/rts7751r2d/irq.c @@ -0,0 +1,135 @@ +/* + * linux/arch/sh/boards/renesas/rts7751r2d/irq.c + * + * Copyright (C) 2000 Kazumoto Kojima + * + * Renesas Technology Sales RTS7751R2D Support. + * + * Modified for RTS7751R2D by + * Atom Create Engineering Co., Ltd. 2002. + */ + +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_RTS7751R2D_REV11) +static int mask_pos[] = {11, 9, 8, 12, 10, 6, 5, 4, 7, 14, 13, 0, 0, 0, 0}; +#else +static int mask_pos[] = {6, 11, 9, 8, 12, 10, 5, 4, 7, 14, 13, 0, 0, 0, 0}; +#endif + +extern int voyagergx_irq_demux(int irq); +extern void setup_voyagergx_irq(void); + +static void enable_rts7751r2d_irq(unsigned int irq); +static void disable_rts7751r2d_irq(unsigned int irq); + +/* shutdown is same as "disable" */ +#define shutdown_rts7751r2d_irq disable_rts7751r2d_irq + +static void ack_rts7751r2d_irq(unsigned int irq); +static void end_rts7751r2d_irq(unsigned int irq); + +static unsigned int startup_rts7751r2d_irq(unsigned int irq) +{ + enable_rts7751r2d_irq(irq); + return 0; /* never anything pending */ +} + +static void disable_rts7751r2d_irq(unsigned int irq) +{ + unsigned long flags; + unsigned short val; + unsigned short mask = 0xffff ^ (0x0001 << mask_pos[irq]); + + /* Set the priority in IPR to 0 */ + local_irq_save(flags); + val = ctrl_inw(IRLCNTR1); + val &= mask; + ctrl_outw(val, IRLCNTR1); + local_irq_restore(flags); +} + +static void enable_rts7751r2d_irq(unsigned int irq) +{ + unsigned long flags; + unsigned short val; + unsigned short value = (0x0001 << mask_pos[irq]); + + /* Set priority in IPR back to original value */ + local_irq_save(flags); + val = ctrl_inw(IRLCNTR1); + val |= value; + ctrl_outw(val, IRLCNTR1); + local_irq_restore(flags); +} + +int rts7751r2d_irq_demux(int irq) +{ + int demux_irq; + + demux_irq = voyagergx_irq_demux(irq); + return demux_irq; +} + +static void ack_rts7751r2d_irq(unsigned int irq) +{ + disable_rts7751r2d_irq(irq); +} + +static void end_rts7751r2d_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_rts7751r2d_irq(irq); +} + +static struct hw_interrupt_type rts7751r2d_irq_type = { + "RTS7751R2D IRQ", + startup_rts7751r2d_irq, + shutdown_rts7751r2d_irq, + enable_rts7751r2d_irq, + disable_rts7751r2d_irq, + ack_rts7751r2d_irq, + end_rts7751r2d_irq, +}; + +static void make_rts7751r2d_irq(unsigned int irq) +{ + disable_irq_nosync(irq); + irq_desc[irq].handler = &rts7751r2d_irq_type; + disable_rts7751r2d_irq(irq); +} + +/* + * Initialize IRQ setting + */ +void __init init_rts7751r2d_IRQ(void) +{ + int i; + + /* IRL0=KEY Input + * IRL1=Ethernet + * IRL2=CF Card + * IRL3=CF Card Insert + * IRL4=PCMCIA + * IRL5=VOYAGER + * IRL6=RTC Alarm + * IRL7=RTC Timer + * IRL8=SD Card + * IRL9=PCI Slot #1 + * IRL10=PCI Slot #2 + * IRL11=Extention #0 + * IRL12=Extention #1 + * IRL13=Extention #2 + * IRL14=Extention #3 + */ + + for (i=0; i<15; i++) + make_rts7751r2d_irq(i); + + setup_voyagergx_irq(); +} diff --git a/arch/sh/boards/renesas/rts7751r2d/led.c b/arch/sh/boards/renesas/rts7751r2d/led.c new file mode 100644 index 000000000..9993259a8 --- /dev/null +++ b/arch/sh/boards/renesas/rts7751r2d/led.c @@ -0,0 +1,67 @@ +/* + * linux/arch/sh/kernel/led_rts7751r2d.c + * + * Copyright (C) Atom Create Engineering Co., Ltd. + * + * May be copied or modified under the terms of GNU General Public + * License. See linux/COPYING for more information. + * + * This file contains Renesas Technology Sales RTS7751R2D specific LED code. + */ + +#include +#include +#include + +extern unsigned int debug_counter; + +#ifdef CONFIG_HEARTBEAT + +#include + +/* Cycle the LED's in the clasic Knightriger/Sun pattern */ +void heartbeat_rts7751r2d(void) +{ + static unsigned int cnt = 0, period = 0; + volatile unsigned short *p = (volatile unsigned short *)PA_OUTPORT; + static unsigned bit = 0, up = 1; + + cnt += 1; + if (cnt < period) + return; + + cnt = 0; + + /* Go through the points (roughly!): + * f(0)=10, f(1)=16, f(2)=20, f(5)=35, f(int)->110 + */ + period = 110 - ((300 << FSHIFT)/((avenrun[0]/5) + (3< +#include +#include + +#include +#include +#include +#include + +extern void heartbeat_rts7751r2d(void); +extern void init_rts7751r2d_IRQ(void); +extern void *rts7751r2d_ioremap(unsigned long, unsigned long); +extern int rts7751r2d_irq_demux(int irq); + +extern void *voyagergx_consistent_alloc(struct device *, size_t, dma_addr_t *, int); +extern void voyagergx_consistent_free(struct device *, size_t, void *, dma_addr_t); + +/* + * The Machine Vector + */ + +struct sh_machine_vector mv_rts7751r2d __initmv = { + .mv_nr_irqs = 72, + + .mv_inb = rts7751r2d_inb, + .mv_inw = rts7751r2d_inw, + .mv_inl = rts7751r2d_inl, + .mv_outb = rts7751r2d_outb, + .mv_outw = rts7751r2d_outw, + .mv_outl = rts7751r2d_outl, + + .mv_inb_p = rts7751r2d_inb_p, + .mv_inw_p = rts7751r2d_inw, + .mv_inl_p = rts7751r2d_inl, + .mv_outb_p = rts7751r2d_outb_p, + .mv_outw_p = rts7751r2d_outw, + .mv_outl_p = rts7751r2d_outl, + + .mv_insb = rts7751r2d_insb, + .mv_insw = rts7751r2d_insw, + .mv_insl = rts7751r2d_insl, + .mv_outsb = rts7751r2d_outsb, + .mv_outsw = rts7751r2d_outsw, + .mv_outsl = rts7751r2d_outsl, + + .mv_ioremap = rts7751r2d_ioremap, + .mv_isa_port2addr = rts7751r2d_isa_port2addr, + .mv_init_irq = init_rts7751r2d_IRQ, +#ifdef CONFIG_HEARTBEAT + .mv_heartbeat = heartbeat_rts7751r2d, +#endif + .mv_irq_demux = rts7751r2d_irq_demux, + + .mv_consistent_alloc = voyagergx_consistent_alloc, + .mv_consistent_free = voyagergx_consistent_free, +}; +ALIAS_MV(rts7751r2d) diff --git a/arch/sh/boards/renesas/rts7751r2d/setup.c b/arch/sh/boards/renesas/rts7751r2d/setup.c new file mode 100644 index 000000000..2587fd1a0 --- /dev/null +++ b/arch/sh/boards/renesas/rts7751r2d/setup.c @@ -0,0 +1,31 @@ +/* + * linux/arch/sh/kernel/setup_rts7751r2d.c + * + * Copyright (C) 2000 Kazumoto Kojima + * + * Renesas Technology Sales RTS7751R2D Support. + * + * Modified for RTS7751R2D by + * Atom Create Engineering Co., Ltd. 2002. + */ + +#include +#include +#include + +unsigned int debug_counter; + +const char *get_system_type(void) +{ + return "RTS7751R2D"; +} + +/* + * Initialize the board + */ +void __init platform_setup(void) +{ + printk(KERN_INFO "Renesas Technology Sales RTS7751R2D support.\n"); + ctrl_outw(0x0000, PA_OUTPORT); + debug_counter = 0; +} diff --git a/arch/sh/boards/renesas/systemh/Makefile b/arch/sh/boards/renesas/systemh/Makefile new file mode 100644 index 000000000..2cc6a23d9 --- /dev/null +++ b/arch/sh/boards/renesas/systemh/Makefile @@ -0,0 +1,13 @@ +# +# Makefile for the SystemH specific parts of the kernel +# + +obj-y := setup.o irq.o io.o + +# XXX: This wants to be consolidated in arch/sh/drivers/pci, and more +# importantly, with the generic sh7751_pcic_init() code. For now, we'll +# just abuse the hell out of kbuild, because we can.. + +obj-$(CONFIG_PCI) += pci.o +pci-y := ../../se/7751/pci.o + diff --git a/arch/sh/boards/renesas/systemh/io.c b/arch/sh/boards/renesas/systemh/io.c new file mode 100644 index 000000000..cf979011a --- /dev/null +++ b/arch/sh/boards/renesas/systemh/io.c @@ -0,0 +1,283 @@ +/* + * linux/arch/sh/boards/systemh/io.c + * + * Copyright (C) 2001 Ian da Silva, Jeremy Siegel + * Based largely on io_se.c. + * + * I/O routine for Hitachi 7751 Systemh. + * + */ + +#include +#include +#include +#include +#include + +#include +#include "../../drivers/pci/pci-sh7751.h" + +/* + * The 7751 SystemH Engine uses the built-in PCI controller (PCIC) + * of the 7751 processor, and has a SuperIO accessible on its memory + * bus. + */ + +#define PCIIOBR (volatile long *)PCI_REG(SH7751_PCIIOBR) +#define PCIMBR (volatile long *)PCI_REG(SH7751_PCIMBR) +#define PCI_IO_AREA SH7751_PCI_IO_BASE +#define PCI_MEM_AREA SH7751_PCI_CONFIG_BASE + +#define PCI_IOMAP(adr) (PCI_IO_AREA + (adr & ~SH7751_PCIIOBR_MASK)) +#define ETHER_IOMAP(adr) (0xB3000000 + (adr)) /*map to 16bits access area + of smc lan chip*/ + +#define maybebadio(name,port) \ + printk("bad PC-like io %s for port 0x%lx at 0x%08x\n", \ + #name, (port), (__u32) __builtin_return_address(0)) + +static inline void delay(void) +{ + ctrl_inw(0xa0000000); +} + +static inline volatile __u16 * +port2adr(unsigned int port) +{ + if (port >= 0x2000) + return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000)); +#if 0 + else + return (volatile __u16 *) (PA_SUPERIO + (port << 1)); +#endif + maybebadio(name,(unsigned long)port); + return (volatile __u16*)port; +} + +/* In case someone configures the kernel w/o PCI support: in that */ +/* scenario, don't ever bother to check for PCI-window addresses */ + +/* NOTE: WINDOW CHECK MAY BE A BIT OFF, HIGH PCIBIOS_MIN_IO WRAPS? */ +#if defined(CONFIG_PCI) +#define CHECK_SH7751_PCIIO(port) \ + ((port >= PCIBIOS_MIN_IO) && (port < (PCIBIOS_MIN_IO + SH7751_PCI_IO_SIZE))) +#else +#define CHECK_SH7751_PCIIO(port) (0) +#endif + +/* + * General outline: remap really low stuff [eventually] to SuperIO, + * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO) + * is mapped through the PCI IO window. Stuff with high bits (PXSEG) + * should be way beyond the window, and is used w/o translation for + * compatibility. + */ +unsigned char sh7751systemh_inb(unsigned long port) +{ + if (PXSEG(port)) + return *(volatile unsigned char *)port; + else if (CHECK_SH7751_PCIIO(port)) + return *(volatile unsigned char *)PCI_IOMAP(port); + else if (port <= 0x3F1) + return *(volatile unsigned char *)ETHER_IOMAP(port); + else + return (*port2adr(port))&0xff; +} + +unsigned char sh7751systemh_inb_p(unsigned long port) +{ + unsigned char v; + + if (PXSEG(port)) + v = *(volatile unsigned char *)port; + else if (CHECK_SH7751_PCIIO(port)) + v = *(volatile unsigned char *)PCI_IOMAP(port); + else if (port <= 0x3F1) + v = *(volatile unsigned char *)ETHER_IOMAP(port); + else + v = (*port2adr(port))&0xff; + delay(); + return v; +} + +unsigned short sh7751systemh_inw(unsigned long port) +{ + if (PXSEG(port)) + return *(volatile unsigned short *)port; + else if (CHECK_SH7751_PCIIO(port)) + return *(volatile unsigned short *)PCI_IOMAP(port); + else if (port >= 0x2000) + return *port2adr(port); + else if (port <= 0x3F1) + return *(volatile unsigned int *)ETHER_IOMAP(port); + else + maybebadio(inw, port); + return 0; +} + +unsigned int sh7751systemh_inl(unsigned long port) +{ + if (PXSEG(port)) + return *(volatile unsigned long *)port; + else if (CHECK_SH7751_PCIIO(port)) + return *(volatile unsigned int *)PCI_IOMAP(port); + else if (port >= 0x2000) + return *port2adr(port); + else if (port <= 0x3F1) + return *(volatile unsigned int *)ETHER_IOMAP(port); + else + maybebadio(inl, port); + return 0; +} + +void sh7751systemh_outb(unsigned char value, unsigned long port) +{ + + if (PXSEG(port)) + *(volatile unsigned char *)port = value; + else if (CHECK_SH7751_PCIIO(port)) + *((unsigned char*)PCI_IOMAP(port)) = value; + else if (port <= 0x3F1) + *(volatile unsigned char *)ETHER_IOMAP(port) = value; + else + *(port2adr(port)) = value; +} + +void sh7751systemh_outb_p(unsigned char value, unsigned long port) +{ + if (PXSEG(port)) + *(volatile unsigned char *)port = value; + else if (CHECK_SH7751_PCIIO(port)) + *((unsigned char*)PCI_IOMAP(port)) = value; + else if (port <= 0x3F1) + *(volatile unsigned char *)ETHER_IOMAP(port) = value; + else + *(port2adr(port)) = value; + delay(); +} + +void sh7751systemh_outw(unsigned short value, unsigned long port) +{ + if (PXSEG(port)) + *(volatile unsigned short *)port = value; + else if (CHECK_SH7751_PCIIO(port)) + *((unsigned short *)PCI_IOMAP(port)) = value; + else if (port >= 0x2000) + *port2adr(port) = value; + else if (port <= 0x3F1) + *(volatile unsigned short *)ETHER_IOMAP(port) = value; + else + maybebadio(outw, port); +} + +void sh7751systemh_outl(unsigned int value, unsigned long port) +{ + if (PXSEG(port)) + *(volatile unsigned long *)port = value; + else if (CHECK_SH7751_PCIIO(port)) + *((unsigned long*)PCI_IOMAP(port)) = value; + else + maybebadio(outl, port); +} + +void sh7751systemh_insb(unsigned long port, void *addr, unsigned long count) +{ + unsigned char *p = addr; + while (count--) *p++ = sh7751systemh_inb(port); +} + +void sh7751systemh_insw(unsigned long port, void *addr, unsigned long count) +{ + unsigned short *p = addr; + while (count--) *p++ = sh7751systemh_inw(port); +} + +void sh7751systemh_insl(unsigned long port, void *addr, unsigned long count) +{ + maybebadio(insl, port); +} + +void sh7751systemh_outsb(unsigned long port, const void *addr, unsigned long count) +{ + unsigned char *p = (unsigned char*)addr; + while (count--) sh7751systemh_outb(*p++, port); +} + +void sh7751systemh_outsw(unsigned long port, const void *addr, unsigned long count) +{ + unsigned short *p = (unsigned short*)addr; + while (count--) sh7751systemh_outw(*p++, port); +} + +void sh7751systemh_outsl(unsigned long port, const void *addr, unsigned long count) +{ + maybebadio(outsw, port); +} + +/* For read/write calls, just copy generic (pass-thru); PCIMBR is */ +/* already set up. For a larger memory space, these would need to */ +/* reset PCIMBR as needed on a per-call basis... */ + +unsigned char sh7751systemh_readb(unsigned long addr) +{ + return *(volatile unsigned char*)addr; +} + +unsigned short sh7751systemh_readw(unsigned long addr) +{ + return *(volatile unsigned short*)addr; +} + +unsigned int sh7751systemh_readl(unsigned long addr) +{ + return *(volatile unsigned long*)addr; +} + +void sh7751systemh_writeb(unsigned char b, unsigned long addr) +{ + *(volatile unsigned char*)addr = b; +} + +void sh7751systemh_writew(unsigned short b, unsigned long addr) +{ + *(volatile unsigned short*)addr = b; +} + +void sh7751systemh_writel(unsigned int b, unsigned long addr) +{ + *(volatile unsigned long*)addr = b; +} + + + +/* Map ISA bus address to the real address. Only for PCMCIA. */ + +/* ISA page descriptor. */ +static __u32 sh_isa_memmap[256]; + +#if 0 +static int +sh_isa_mmap(__u32 start, __u32 length, __u32 offset) +{ + int idx; + + if (start >= 0x100000 || (start & 0xfff) || (length != 0x1000)) + return -1; + + idx = start >> 12; + sh_isa_memmap[idx] = 0xb8000000 + (offset &~ 0xfff); + printk("sh_isa_mmap: start %x len %x offset %x (idx %x paddr %x)\n", + start, length, offset, idx, sh_isa_memmap[idx]); + return 0; +} +#endif + +unsigned long +sh7751systemh_isa_port2addr(unsigned long offset) +{ + int idx; + + idx = (offset >> 12) & 0xff; + offset &= 0xfff; + return sh_isa_memmap[idx] + offset; +} diff --git a/arch/sh/boards/renesas/systemh/irq.c b/arch/sh/boards/renesas/systemh/irq.c new file mode 100644 index 000000000..5675a4134 --- /dev/null +++ b/arch/sh/boards/renesas/systemh/irq.c @@ -0,0 +1,111 @@ +/* + * linux/arch/sh/boards/systemh/irq.c + * + * Copyright (C) 2000 Kazumoto Kojima + * + * Hitachi SystemH Support. + * + * Modified for 7751 SystemH by + * Jonathan Short. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +/* address of external interrupt mask register + * address must be set prior to use these (maybe in init_XXX_irq()) + * XXX : is it better to use .config than specifying it in code? */ +static unsigned long *systemh_irq_mask_register = (unsigned long *)0xB3F10004; +static unsigned long *systemh_irq_request_register = (unsigned long *)0xB3F10000; + +/* forward declaration */ +static unsigned int startup_systemh_irq(unsigned int irq); +static void shutdown_systemh_irq(unsigned int irq); +static void enable_systemh_irq(unsigned int irq); +static void disable_systemh_irq(unsigned int irq); +static void mask_and_ack_systemh(unsigned int); +static void end_systemh_irq(unsigned int irq); + +/* hw_interrupt_type */ +static struct hw_interrupt_type systemh_irq_type = { + " SystemH Register", + startup_systemh_irq, + shutdown_systemh_irq, + enable_systemh_irq, + disable_systemh_irq, + mask_and_ack_systemh, + end_systemh_irq +}; + +static unsigned int startup_systemh_irq(unsigned int irq) +{ + enable_systemh_irq(irq); + return 0; /* never anything pending */ +} + +static void shutdown_systemh_irq(unsigned int irq) +{ + disable_systemh_irq(irq); +} + +static void disable_systemh_irq(unsigned int irq) +{ + if (systemh_irq_mask_register) { + unsigned long flags; + unsigned long val, mask = 0x01 << 1; + + /* Clear the "irq"th bit in the mask and set it in the request */ + local_irq_save(flags); + + val = ctrl_inl((unsigned long)systemh_irq_mask_register); + val &= ~mask; + ctrl_outl(val, (unsigned long)systemh_irq_mask_register); + + val = ctrl_inl((unsigned long)systemh_irq_request_register); + val |= mask; + ctrl_outl(val, (unsigned long)systemh_irq_request_register); + + local_irq_restore(flags); + } +} + +static void enable_systemh_irq(unsigned int irq) +{ + if (systemh_irq_mask_register) { + unsigned long flags; + unsigned long val, mask = 0x01 << 1; + + /* Set "irq"th bit in the mask register */ + local_irq_save(flags); + val = ctrl_inl((unsigned long)systemh_irq_mask_register); + val |= mask; + ctrl_outl(val, (unsigned long)systemh_irq_mask_register); + local_irq_restore(flags); + } +} + +static void mask_and_ack_systemh(unsigned int irq) +{ + disable_systemh_irq(irq); +} + +static void end_systemh_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_systemh_irq(irq); +} + +void make_systemh_irq(unsigned int irq) +{ + disable_irq_nosync(irq); + irq_desc[irq].handler = &systemh_irq_type; + disable_systemh_irq(irq); +} + diff --git a/arch/sh/boards/renesas/systemh/setup.c b/arch/sh/boards/renesas/systemh/setup.c new file mode 100644 index 000000000..826fa3d76 --- /dev/null +++ b/arch/sh/boards/renesas/systemh/setup.c @@ -0,0 +1,80 @@ +/* + * linux/arch/sh/boards/systemh/setup.c + * + * Copyright (C) 2000 Kazumoto Kojima + * Copyright (C) 2003 Paul Mundt + * + * Hitachi SystemH Support. + * + * Modified for 7751 SystemH by Jonathan Short. + * + * Rewritten for 2.6 by Paul Mundt. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include + +extern void make_systemh_irq(unsigned int irq); + +const char *get_system_type(void) +{ + return "7751 SystemH"; +} + +/* + * Initialize IRQ setting + */ +void __init init_7751systemh_IRQ(void) +{ +/* make_ipr_irq(10, BCR_ILCRD, 1, 0x0f-10); LAN */ +/* make_ipr_irq(14, BCR_ILCRA, 2, 0x0f-4); */ + make_systemh_irq(0xb); /* Ethernet interrupt */ +} + +struct sh_machine_vector mv_7751systemh __initmv = { + .mv_nr_irqs = 72, + + .mv_inb = sh7751systemh_inb, + .mv_inw = sh7751systemh_inw, + .mv_inl = sh7751systemh_inl, + .mv_outb = sh7751systemh_outb, + .mv_outw = sh7751systemh_outw, + .mv_outl = sh7751systemh_outl, + + .mv_inb_p = sh7751systemh_inb_p, + .mv_inw_p = sh7751systemh_inw, + .mv_inl_p = sh7751systemh_inl, + .mv_outb_p = sh7751systemh_outb_p, + .mv_outw_p = sh7751systemh_outw, + .mv_outl_p = sh7751systemh_outl, + + .mv_insb = sh7751systemh_insb, + .mv_insw = sh7751systemh_insw, + .mv_insl = sh7751systemh_insl, + .mv_outsb = sh7751systemh_outsb, + .mv_outsw = sh7751systemh_outsw, + .mv_outsl = sh7751systemh_outsl, + + .mv_readb = sh7751systemh_readb, + .mv_readw = sh7751systemh_readw, + .mv_readl = sh7751systemh_readl, + .mv_writeb = sh7751systemh_writeb, + .mv_writew = sh7751systemh_writew, + .mv_writel = sh7751systemh_writel, + + .mv_isa_port2addr = sh7751systemh_isa_port2addr, + + .mv_init_irq = init_7751systemh_IRQ, +}; +ALIAS_MV(7751systemh) + +int __init platform_setup(void) +{ + return 0; +} + diff --git a/arch/sh/boards/se/7300/Makefile b/arch/sh/boards/se/7300/Makefile new file mode 100644 index 000000000..0fbd4f478 --- /dev/null +++ b/arch/sh/boards/se/7300/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the 7300 SolutionEngine specific parts of the kernel +# + +obj-y := setup.o io.o irq.o + +obj-$(CONFIG_HEARTBEAT) += led.o diff --git a/arch/sh/boards/se/7300/io.c b/arch/sh/boards/se/7300/io.c new file mode 100644 index 000000000..4e8939197 --- /dev/null +++ b/arch/sh/boards/se/7300/io.c @@ -0,0 +1,261 @@ +/* + * linux/arch/sh/boards/se/7300/io.c + * + * Copyright (C) 2003 YOSHII Takashi + */ + +#include +#include +#include +#include + +#define badio(fn, a) panic("bad i/o operation %s for %08lx.", #fn, a) + +struct iop { + unsigned long start, end; + unsigned long base; + struct iop *(*check) (struct iop * p, unsigned long port); + unsigned char (*inb) (struct iop * p, unsigned long port); + unsigned short (*inw) (struct iop * p, unsigned long port); + void (*outb) (struct iop * p, unsigned char value, unsigned long port); + void (*outw) (struct iop * p, unsigned short value, unsigned long port); +}; + +struct iop * +simple_check(struct iop *p, unsigned long port) +{ + if ((p->start <= port) && (port <= p->end)) + return p; + else + badio(check, port); +} + +struct iop * +ide_check(struct iop *p, unsigned long port) +{ + if (((0x1f0 <= port) && (port <= 0x1f7)) || (port == 0x3f7)) + return p; + return NULL; +} + +unsigned char +simple_inb(struct iop *p, unsigned long port) +{ + return *(unsigned char *) (p->base + port); +} + +unsigned short +simple_inw(struct iop *p, unsigned long port) +{ + return *(unsigned short *) (p->base + port); +} + +void +simple_outb(struct iop *p, unsigned char value, unsigned long port) +{ + *(unsigned char *) (p->base + port) = value; +} + +void +simple_outw(struct iop *p, unsigned short value, unsigned long port) +{ + *(unsigned short *) (p->base + port) = value; +} + +unsigned char +pcc_inb(struct iop *p, unsigned long port) +{ + unsigned long addr = p->base + port + 0x40000; + unsigned long v; + + if (port & 1) + addr += 0x00400000; + v = *(volatile unsigned char *) addr; + return v; +} + +void +pcc_outb(struct iop *p, unsigned char value, unsigned long port) +{ + unsigned long addr = p->base + port + 0x40000; + + if (port & 1) + addr += 0x00400000; + *(volatile unsigned char *) addr = value; +} + +unsigned char +bad_inb(struct iop *p, unsigned long port) +{ + badio(inb, port); +} + +void +bad_outb(struct iop *p, unsigned char value, unsigned long port) +{ + badio(inw, port); +} + +/* MSTLANEX01 LAN at 0xb400:0000 */ +static struct iop laniop = { + .start = 0x300, + .end = 0x30f, + .base = 0xb4000000, + .check = simple_check, + .inb = simple_inb, + .inw = simple_inw, + .outb = simple_outb, + .outw = simple_outw, +}; + +/* NE2000 pc card NIC */ +static struct iop neiop = { + .start = 0x280, + .end = 0x29f, + .base = 0xb0600000 + 0x80, /* soft 0x280 -> hard 0x300 */ + .check = simple_check, + .inb = pcc_inb, + .inw = simple_inw, + .outb = pcc_outb, + .outw = simple_outw, +}; + +/* CF in CF slot */ +static struct iop cfiop = { + .base = 0xb0600000, + .check = ide_check, + .inb = pcc_inb, + .inw = simple_inw, + .outb = pcc_outb, + .outw = simple_outw, +}; + +static __inline__ struct iop * +port2iop(unsigned long port) +{ + if (0) ; +#if defined(CONFIG_SMC91111) + else if (laniop.check(&laniop, port)) + return &laniop; +#endif +#if defined(CONFIG_NE2000) + else if (neiop.check(&neiop, port)) + return &neiop; +#endif +#if defined(CONFIG_IDE) + else if (cfiop.check(&cfiop, port)) + return &cfiop; +#endif + else + return &neiop; /* fallback */ +} + +static inline void +delay(void) +{ + ctrl_inw(0xac000000); + ctrl_inw(0xac000000); +} + +unsigned char +sh7300se_inb(unsigned long port) +{ + struct iop *p = port2iop(port); + return (p->inb) (p, port); +} + +unsigned char +sh7300se_inb_p(unsigned long port) +{ + unsigned char v = sh7300se_inb(port); + delay(); + return v; +} + +unsigned short +sh7300se_inw(unsigned long port) +{ + struct iop *p = port2iop(port); + return (p->inw) (p, port); +} + +unsigned int +sh7300se_inl(unsigned long port) +{ + badio(inl, port); +} + +void +sh7300se_outb(unsigned char value, unsigned long port) +{ + struct iop *p = port2iop(port); + (p->outb) (p, value, port); +} + +void +sh7300se_outb_p(unsigned char value, unsigned long port) +{ + sh7300se_outb(value, port); + delay(); +} + +void +sh7300se_outw(unsigned short value, unsigned long port) +{ + struct iop *p = port2iop(port); + (p->outw) (p, value, port); +} + +void +sh7300se_outl(unsigned int value, unsigned long port) +{ + badio(outl, port); +} + +void +sh7300se_insb(unsigned long port, void *addr, unsigned long count) +{ + unsigned char *a = addr; + struct iop *p = port2iop(port); + while (count--) + *a++ = (p->inb) (p, port); +} + +void +sh7300se_insw(unsigned long port, void *addr, unsigned long count) +{ + unsigned short *a = addr; + struct iop *p = port2iop(port); + while (count--) + *a++ = (p->inw) (p, port); +} + +void +sh7300se_insl(unsigned long port, void *addr, unsigned long count) +{ + badio(insl, port); +} + +void +sh7300se_outsb(unsigned long port, const void *addr, unsigned long count) +{ + unsigned char *a = (unsigned char *) addr; + struct iop *p = port2iop(port); + while (count--) + (p->outb) (p, *a++, port); +} + +void +sh7300se_outsw(unsigned long port, const void *addr, unsigned long count) +{ + unsigned short *a = (unsigned short *) addr; + struct iop *p = port2iop(port); + while (count--) + (p->outw) (p, *a++, port); +} + +void +sh7300se_outsl(unsigned long port, const void *addr, unsigned long count) +{ + badio(outsw, port); +} diff --git a/arch/sh/boards/se/7300/irq.c b/arch/sh/boards/se/7300/irq.c new file mode 100644 index 000000000..96c8c23d6 --- /dev/null +++ b/arch/sh/boards/se/7300/irq.c @@ -0,0 +1,37 @@ +/* + * linux/arch/sh/boards/se/7300/irq.c + * + * Copyright (C) 2003 Takashi Kusuda + * + * SH-Mobile SolutionEngine 7300 Support. + * + */ + +#include +#include +#include +#include +#include +#include + +/* + * Initialize IRQ setting + */ +void __init +init_7300se_IRQ(void) +{ + ctrl_outw(0x0028, PA_EPLD_MODESET); /* mode set IRQ0,1 active low. */ + ctrl_outw(0xa000, INTC_ICR1); /* IRQ mode; IRQ0,1 enable. */ + ctrl_outw(0x0000, PORT_PFCR); /* use F for IRQ[3:0] and SIU. */ + + /* PC_IRQ[0-3] -> IRQ0 (32) */ + make_ipr_irq(IRQ0_IRQ, IRQ0_IPR_ADDR, IRQ0_IPR_POS, 0x0f - IRQ0_IRQ); + /* A_IRQ[0-3] -> IRQ1 (33) */ + make_ipr_irq(IRQ1_IRQ, IRQ1_IPR_ADDR, IRQ1_IPR_POS, 0x0f - IRQ1_IRQ); + make_ipr_irq(SIOF0_IRQ, SIOF0_IPR_ADDR, SIOF0_IPR_POS, SIOF0_PRIORITY); + make_ipr_irq(DMTE2_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY); + make_ipr_irq(DMTE3_IRQ, DMA1_IPR_ADDR, DMA1_IPR_POS, DMA1_PRIORITY); + make_ipr_irq(VIO_IRQ, VIO_IPR_ADDR, VIO_IPR_POS, VIO_PRIORITY); + + ctrl_outw(0x2000, PA_MRSHPC + 0x0c); /* mrshpc irq enable */ +} diff --git a/arch/sh/boards/se/7300/led.c b/arch/sh/boards/se/7300/led.c new file mode 100644 index 000000000..02c7f846c --- /dev/null +++ b/arch/sh/boards/se/7300/led.c @@ -0,0 +1,69 @@ +/* + * linux/arch/sh/boards/se/7300/led.c + * + * Derived from linux/arch/sh/boards/se/770x/led.c + * + * Copyright (C) 2000 Stuart Menefy + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * This file contains Solution Engine specific LED code. + */ + +#include +#include +#include + +static void +mach_led(int position, int value) +{ + volatile unsigned short *p = (volatile unsigned short *) PA_LED; + + if (value) { + *p |= (1 << 8); + } else { + *p &= ~(1 << 8); + } +} + + +/* Cycle the LED's in the clasic Knightrider/Sun pattern */ +void +heartbeat_7300se(void) +{ + static unsigned int cnt = 0, period = 0; + volatile unsigned short *p = (volatile unsigned short *) PA_LED; + static unsigned bit = 0, up = 1; + + cnt += 1; + if (cnt < period) { + return; + } + + cnt = 0; + + /* Go through the points (roughly!): + * f(0)=10, f(1)=16, f(2)=20, f(5)=35,f(inf)->110 + */ + period = 110 - ((300 << FSHIFT) / ((avenrun[0] / 5) + (3 << FSHIFT))); + + if (up) { + if (bit == 7) { + bit--; + up = 0; + } else { + bit++; + } + } else { + if (bit == 0) { + bit++; + up = 1; + } else { + bit--; + } + } + *p = 1 << (bit + 8); + +} + diff --git a/arch/sh/boards/se/7300/setup.c b/arch/sh/boards/se/7300/setup.c new file mode 100644 index 000000000..08536bc22 --- /dev/null +++ b/arch/sh/boards/se/7300/setup.c @@ -0,0 +1,66 @@ +/* + * linux/arch/sh/boards/se/7300/setup.c + * + * Copyright (C) 2003 Takashi Kusuda + * + * SH-Mobile SolutionEngine 7300 Support. + * + */ + +#include +#include +#include +#include +#include + +void heartbeat_7300se(void); +void init_7300se_IRQ(void); + +const char * +get_system_type(void) +{ + return "SolutionEngine 7300"; +} + +/* + * The Machine Vector + */ + +struct sh_machine_vector mv_7300se __initmv = { + .mv_nr_irqs = 109, + .mv_inb = sh7300se_inb, + .mv_inw = sh7300se_inw, + .mv_inl = sh7300se_inl, + .mv_outb = sh7300se_outb, + .mv_outw = sh7300se_outw, + .mv_outl = sh7300se_outl, + + .mv_inb_p = sh7300se_inb_p, + .mv_inw_p = sh7300se_inw, + .mv_inl_p = sh7300se_inl, + .mv_outb_p = sh7300se_outb_p, + .mv_outw_p = sh7300se_outw, + .mv_outl_p = sh7300se_outl, + + .mv_insb = sh7300se_insb, + .mv_insw = sh7300se_insw, + .mv_insl = sh7300se_insl, + .mv_outsb = sh7300se_outsb, + .mv_outsw = sh7300se_outsw, + .mv_outsl = sh7300se_outsl, + + .mv_init_irq = init_7300se_IRQ, +#ifdef CONFIG_HEARTBEAT + .mv_heartbeat = heartbeat_7300se, +#endif +}; + +ALIAS_MV(7300se) +/* + * Initialize the board + */ +void __init +platform_setup(void) +{ + +} diff --git a/arch/sh/cchips/voyagergx/Makefile b/arch/sh/cchips/voyagergx/Makefile new file mode 100644 index 000000000..085de72fd --- /dev/null +++ b/arch/sh/cchips/voyagergx/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for VoyagerGX +# + +obj-y := irq.o setup.o + +obj-$(CONFIG_USB_OHCI_HCD) += consistent.o + diff --git a/arch/sh/cchips/voyagergx/consistent.c b/arch/sh/cchips/voyagergx/consistent.c new file mode 100644 index 000000000..95a309d14 --- /dev/null +++ b/arch/sh/cchips/voyagergx/consistent.c @@ -0,0 +1,126 @@ +/* + * arch/sh/cchips/voyagergx/consistent.c + * + * Copyright (C) 2004 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct voya_alloc_entry { + struct list_head list; + unsigned long ofs; + unsigned long len; +}; + +static spinlock_t voya_list_lock = SPIN_LOCK_UNLOCKED; +static LIST_HEAD(voya_alloc_list); + +#define OHCI_SRAM_START 0xb0000000 +#define OHCI_HCCA_SIZE 0x100 +#define OHCI_SRAM_SIZE 0x10000 + +void *voyagergx_consistent_alloc(struct device *dev, size_t size, + dma_addr_t *handle, int flag) +{ + struct list_head *list = &voya_alloc_list; + struct voya_alloc_entry *entry; + struct sh_dev *shdev = to_sh_dev(dev); + unsigned long start, end; + unsigned long flags; + + /* + * The SM501 contains an integrated 8051 with its own SRAM. + * Devices within the cchip can all hook into the 8051 SRAM. + * We presently use this for the OHCI. + * + * Everything else goes through consistent_alloc(). + */ + if (!dev || dev->bus != &sh_bus_types[SH_BUS_VIRT] || + (dev->bus == &sh_bus_types[SH_BUS_VIRT] && + shdev->dev_id != SH_DEV_ID_USB_OHCI)) + return consistent_alloc(flag, size, handle); + + start = OHCI_SRAM_START + OHCI_HCCA_SIZE; + + entry = kmalloc(sizeof(struct voya_alloc_entry), GFP_ATOMIC); + if (!entry) + return NULL; + + entry->len = (size + 15) & ~15; + + /* + * The basis for this allocator is dwmw2's malloc.. the + * Matrox allocator :-) + */ + spin_lock_irqsave(&voya_list_lock, flags); + list_for_each(list, &voya_alloc_list) { + struct voya_alloc_entry *p; + + p = list_entry(list, struct voya_alloc_entry, list); + + if (p->ofs - start >= size) + goto out; + + start = p->ofs + p->len; + } + + end = start + (OHCI_SRAM_SIZE - OHCI_HCCA_SIZE); + list = &voya_alloc_list; + + if (end - start >= size) { +out: + entry->ofs = start; + list_add_tail(&entry->list, list); + spin_unlock_irqrestore(&voya_list_lock, flags); + + *handle = start; + return (void *)start; + } + + kfree(entry); + spin_unlock_irqrestore(&voya_list_lock, flags); + + return NULL; +} + +void voyagergx_consistent_free(struct device *dev, size_t size, + void *vaddr, dma_addr_t handle) +{ + struct voya_alloc_entry *entry; + struct sh_dev *shdev = to_sh_dev(dev); + unsigned long flags; + + if (!dev || dev->bus != &sh_bus_types[SH_BUS_VIRT] || + (dev->bus == &sh_bus_types[SH_BUS_VIRT] && + shdev->dev_id != SH_DEV_ID_USB_OHCI)) { + consistent_free(vaddr, size); + return; + } + + spin_lock_irqsave(&voya_list_lock, flags); + list_for_each_entry(entry, &voya_alloc_list, list) { + if (entry->ofs != handle) + continue; + + list_del(&entry->list); + kfree(entry); + + break; + } + spin_unlock_irqrestore(&voya_list_lock, flags); +} + +EXPORT_SYMBOL(voyagergx_consistent_alloc); +EXPORT_SYMBOL(voyagergx_consistent_free); + diff --git a/arch/sh/cchips/voyagergx/irq.c b/arch/sh/cchips/voyagergx/irq.c new file mode 100644 index 000000000..3079234cb --- /dev/null +++ b/arch/sh/cchips/voyagergx/irq.c @@ -0,0 +1,194 @@ +/* -------------------------------------------------------------------- */ +/* setup_voyagergx.c: */ +/* -------------------------------------------------------------------- */ +/* This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + Copyright 2003 (c) Lineo uSolutions,Inc. +*/ +/* -------------------------------------------------------------------- */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static void disable_voyagergx_irq(unsigned int irq) +{ + unsigned long flags, val; + unsigned long mask = 1 << (irq - VOYAGER_IRQ_BASE); + + pr_debug("disable_voyagergx_irq(%d): mask=%x\n", irq, mask); + local_irq_save(flags); + val = inl(VOYAGER_INT_MASK); + val &= ~mask; + outl(val, VOYAGER_INT_MASK); + local_irq_restore(flags); +} + + +static void enable_voyagergx_irq(unsigned int irq) +{ + unsigned long flags, val; + unsigned long mask = 1 << (irq - VOYAGER_IRQ_BASE); + + pr_debug("disable_voyagergx_irq(%d): mask=%x\n", irq, mask); + local_irq_save(flags); + val = inl(VOYAGER_INT_MASK); + val |= mask; + outl(val, VOYAGER_INT_MASK); + local_irq_restore(flags); +} + + +static void mask_and_ack_voyagergx(unsigned int irq) +{ + disable_voyagergx_irq(irq); +} + +static void end_voyagergx_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_voyagergx_irq(irq); +} + +static unsigned int startup_voyagergx_irq(unsigned int irq) +{ + enable_voyagergx_irq(irq); + return 0; +} + +static void shutdown_voyagergx_irq(unsigned int irq) +{ + disable_voyagergx_irq(irq); +} + +static struct hw_interrupt_type voyagergx_irq_type = { + "VOYAGERGX-IRQ", + startup_voyagergx_irq, + shutdown_voyagergx_irq, + enable_voyagergx_irq, + disable_voyagergx_irq, + mask_and_ack_voyagergx, + end_voyagergx_irq, +}; + +static irqreturn_t voyagergx_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + printk(KERN_INFO + "VoyagerGX: spurious interrupt, status: 0x%x\n", + inl(INT_STATUS)); + return IRQ_HANDLED; +} + + +/*====================================================*/ + +static struct { + int (*func)(int, void *); + void *dev; +} voyagergx_demux[VOYAGER_IRQ_NUM]; + +void voyagergx_register_irq_demux(int irq, + int (*demux)(int irq, void *dev), void *dev) +{ + voyagergx_demux[irq - VOYAGER_IRQ_BASE].func = demux; + voyagergx_demux[irq - VOYAGER_IRQ_BASE].dev = dev; +} + +void voyagergx_unregister_irq_demux(int irq) +{ + voyagergx_demux[irq - VOYAGER_IRQ_BASE].func = 0; +} + +int voyagergx_irq_demux(int irq) +{ + + if (irq == IRQ_VOYAGER ) { + unsigned long i = 0, bit __attribute__ ((unused)); + unsigned long val = inl(INT_STATUS); +#if 1 + if ( val & ( 1 << 1 )){ + i = 1; + } else if ( val & ( 1 << 2 )){ + i = 2; + } else if ( val & ( 1 << 6 )){ + i = 6; + } else if( val & ( 1 << 10 )){ + i = 10; + } else if( val & ( 1 << 11 )){ + i = 11; + } else if( val & ( 1 << 12 )){ + i = 12; + } else if( val & ( 1 << 17 )){ + i = 17; + } else { + printk("Unexpected IRQ irq = %d status = 0x%08lx\n", irq, val); + } + pr_debug("voyagergx_irq_demux %d \n", i); +#else + for (bit = 1, i = 0 ; i < VOYAGER_IRQ_NUM ; bit <<= 1, i++) + if (val & bit) + break; +#endif + if (i < VOYAGER_IRQ_NUM) { + irq = VOYAGER_IRQ_BASE + i; + if (voyagergx_demux[i].func != 0) + irq = voyagergx_demux[i].func(irq, voyagergx_demux[i].dev); + } + } + return irq; +} + +static struct irqaction irq0 = { voyagergx_interrupt, SA_INTERRUPT, 0, "VOYAGERGX", NULL, NULL}; + +void __init setup_voyagergx_irq(void) +{ + int i, flag; + + printk(KERN_INFO "VoyagerGX configured at 0x%x on irq %d(mapped into %d to %d)\n", + VOYAGER_BASE, + IRQ_VOYAGER, + VOYAGER_IRQ_BASE, + VOYAGER_IRQ_BASE + VOYAGER_IRQ_NUM - 1); + + for (i=0; i +#include +#include +#include + +static int __init setup_voyagergx(void) +{ + unsigned long val; + + val = inl(DRAM_CTRL); + val |= (DRAM_CTRL_CPU_COLUMN_SIZE_256 | + DRAM_CTRL_CPU_ACTIVE_PRECHARGE | + DRAM_CTRL_CPU_RESET | + DRAM_CTRL_REFRESH_COMMAND | + DRAM_CTRL_BLOCK_WRITE_TIME | + DRAM_CTRL_BLOCK_WRITE_PRECHARGE | + DRAM_CTRL_ACTIVE_PRECHARGE | + DRAM_CTRL_RESET | + DRAM_CTRL_REMAIN_ACTIVE); + outl(val, DRAM_CTRL); + + return 0; +} + +module_init(setup_voyagergx); diff --git a/arch/sh/configs/rts7751r2d_defconfig b/arch/sh/configs/rts7751r2d_defconfig new file mode 100644 index 000000000..f9e1f7c5a --- /dev/null +++ b/arch/sh/configs/rts7751r2d_defconfig @@ -0,0 +1,809 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_SUPERH=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y + +# +# Loadable module support +# +CONFIG_MODULES=y +# CONFIG_MODULE_UNLOAD is not set +CONFIG_OBSOLETE_MODPARM=y +# CONFIG_MODVERSIONS is not set +# CONFIG_KMOD is not set + +# +# System type +# +# CONFIG_SH_SOLUTION_ENGINE is not set +# CONFIG_SH_7751_SOLUTION_ENGINE is not set +# CONFIG_SH_7751_SYSTEMH is not set +# CONFIG_SH_STB1_HARP is not set +# CONFIG_SH_STB1_OVERDRIVE is not set +# CONFIG_SH_HP620 is not set +# CONFIG_SH_HP680 is not set +# CONFIG_SH_HP690 is not set +# CONFIG_SH_CQREEK is not set +# CONFIG_SH_DMIDA is not set +# CONFIG_SH_EC3104 is not set +# CONFIG_SH_SATURN is not set +# CONFIG_SH_DREAMCAST is not set +# CONFIG_SH_CAT68701 is not set +# CONFIG_SH_BIGSUR is not set +# CONFIG_SH_SH2000 is not set +# CONFIG_SH_ADX is not set +# CONFIG_SH_MPC1211 is not set +# CONFIG_SH_SECUREEDGE5410 is not set +CONFIG_SH_RTS7751R2D=y +# CONFIG_SH_UNKNOWN is not set +# CONFIG_CPU_SH2 is not set +# CONFIG_CPU_SH3 is not set +CONFIG_CPU_SH4=y +# CONFIG_CPU_SUBTYPE_SH7604 is not set +# CONFIG_CPU_SUBTYPE_SH7300 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +CONFIG_CPU_SUBTYPE_SH7751=y +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_ST40STB1 is not set +CONFIG_MMU=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="mem=64M console=ttySC0,115200 root=/dev/hda1" +CONFIG_MEMORY_START=0x0c000000 +CONFIG_MEMORY_SIZE=0x04000000 +CONFIG_MEMORY_SET=y +# CONFIG_MEMORY_OVERRIDE is not set +CONFIG_SH_RTC=y +CONFIG_ZERO_PAGE_OFFSET=0x00010000 +CONFIG_BOOT_LINK_OFFSET=0x00800000 +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_PREEMPT is not set +# CONFIG_UBC_WAKEUP is not set +# CONFIG_SH_WRITETHROUGH is not set +# CONFIG_SH_OCRAM is not set +# CONFIG_SH_STORE_QUEUES is not set +# CONFIG_SMP is not set +CONFIG_VOYAGERGX=y +CONFIG_RTS7751R2D_REV11=y +CONFIG_SH_PCLK_FREQ=60000000 +# CONFIG_CPU_FREQ is not set +CONFIG_SH_DMA=y +CONFIG_NR_ONCHIP_DMA_CHANNELS=8 +# CONFIG_NR_DMA_CHANNELS_BOOL is not set +# CONFIG_DMA_PAGE_OPS is not set + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +CONFIG_ISA=y +CONFIG_PCI=y +CONFIG_SH_PCIDMA_NONCOHERENT=y +CONFIG_PCI_AUTO=y +CONFIG_PCI_AUTO_UPDATE_RESOURCES=y +CONFIG_PCI_DMA=y +# CONFIG_PCI_LEGACY_PROC is not set +CONFIG_PCI_NAMES=y +CONFIG_HOTPLUG=y + +# +# PCMCIA/CardBus support +# +CONFIG_PCMCIA=m +CONFIG_YENTA=m +CONFIG_CARDBUS=y +# CONFIG_I82092 is not set +# CONFIG_I82365 is not set +# CONFIG_TCIC is not set +CONFIG_PCMCIA_PROBE=y + +# +# PCI Hotplug Support +# +CONFIG_HOTPLUG_PCI=y +# CONFIG_HOTPLUG_PCI_FAKE is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_FLAT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Generic Driver Options +# +# CONFIG_FW_LOADER is not set + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_XD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +CONFIG_BLK_DEV_IDEDISK=y +# CONFIG_IDEDISK_MULTI_MODE is not set +# CONFIG_IDEDISK_STROKE is not set +CONFIG_BLK_DEV_IDECS=m +# CONFIG_BLK_DEV_IDECD is not set +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDEFLOPPY is not set +# CONFIG_IDE_TASK_IOCTL is not set +# CONFIG_IDE_TASKFILE_IO is not set + +# +# IDE chipset support/bugfixes +# +# CONFIG_BLK_DEV_IDEPCI is not set +# CONFIG_IDE_CHIPSETS is not set +# CONFIG_BLK_DEV_IDEDMA is not set +# CONFIG_IDEDMA_AUTO is not set +# CONFIG_DMA_NONPCI is not set +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Old CD-ROM drivers (not SCSI, not IDE) +# +# CONFIG_CD_NO_IDESCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# IEEE 1394 (FireWire) support (EXPERIMENTAL) +# +# CONFIG_IEEE1394 is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_INET_ECN is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_DECNET is not set +# CONFIG_BRIDGE is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +CONFIG_IPV6_SCTP__=y +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +CONFIG_NETDEVICES=y + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_STNIC is not set +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_LANCE is not set +# CONFIG_NET_VENDOR_SMC is not set +# CONFIG_NET_VENDOR_RACAL is not set + +# +# Tulip family network device support +# +# CONFIG_NET_TULIP is not set +# CONFIG_AT1700 is not set +# CONFIG_DEPCA is not set +# CONFIG_HP100 is not set +CONFIG_NET_ISA=y +# CONFIG_E2100 is not set +# CONFIG_EWRK3 is not set +# CONFIG_EEXPRESS is not set +# CONFIG_EEXPRESS_PRO is not set +# CONFIG_HPLAN_PLUS is not set +# CONFIG_HPLAN is not set +# CONFIG_LP486E is not set +# CONFIG_ETH16I is not set +CONFIG_NE2000=m +# CONFIG_ZNET is not set +# CONFIG_SEEQ8005 is not set +CONFIG_NET_PCI=y +# CONFIG_PCNET32 is not set +# CONFIG_AMD8111_ETH is not set +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_AC3200 is not set +# CONFIG_APRICOT is not set +# CONFIG_B44 is not set +# CONFIG_CS89x0 is not set +# CONFIG_DGRS is not set +# CONFIG_EEPRO100 is not set +# CONFIG_E100 is not set +# CONFIG_FEALNX is not set +# CONFIG_NATSEMI is not set +# CONFIG_NE2K_PCI is not set +# CONFIG_8139CP is not set +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +# CONFIG_8139TOO_8129 is not set +# CONFIG_8139_OLD_RX_RESET is not set +# CONFIG_SIS900 is not set +# CONFIG_EPIC100 is not set +# CONFIG_SUNDANCE is not set +# CONFIG_TLAN is not set +# CONFIG_VIA_RHINE is not set +# CONFIG_NET_POCKET is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SIS190 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# Wireless LAN (non-hamradio) +# +CONFIG_NET_RADIO=y + +# +# Obsolete Wireless cards support (pre-802.11) +# +# CONFIG_STRIP is not set +# CONFIG_ARLAN is not set +# CONFIG_WAVELAN is not set +# CONFIG_PCMCIA_WAVELAN is not set +# CONFIG_PCMCIA_NETWAVE is not set + +# +# Wireless 802.11 Frequency Hopping cards support +# +# CONFIG_PCMCIA_RAYCS is not set + +# +# Wireless 802.11b ISA/PCI cards support +# +# CONFIG_AIRO is not set +CONFIG_HERMES=m +# CONFIG_PLX_HERMES is not set +# CONFIG_TMD_HERMES is not set +# CONFIG_PCI_HERMES is not set + +# +# Wireless 802.11b Pcmcia/Cardbus cards support +# +CONFIG_PCMCIA_HERMES=m +# CONFIG_AIRO_CS is not set +# CONFIG_PCMCIA_ATMEL is not set +# CONFIG_PCMCIA_WL3501 is not set +CONFIG_NET_WIRELESS=y + +# +# Token Ring devices +# +# CONFIG_TR is not set +# CONFIG_RCPCI is not set +# CONFIG_SHAPER is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set + +# +# PCMCIA network device support +# +# CONFIG_NET_PCMCIA is not set + +# +# Amateur Radio support +# +# CONFIG_HAMRADIO is not set + +# +# IrDA (infrared) support +# +# CONFIG_IRDA is not set + +# +# Bluetooth support +# +# CONFIG_BT is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN_BOOL is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +# CONFIG_INPUT is not set + +# +# Userland interfaces +# + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +# CONFIG_SERIO is not set +# CONFIG_SERIO_I8042 is not set + +# +# Input Device Drivers +# + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL is not set +CONFIG_SH_SCI=y +CONFIG_SERIAL_CONSOLE=y +CONFIG_RTC_9701JE=y + +# +# Unix 98 PTY support +# +# CONFIG_UNIX98_PTYS is not set +CONFIG_HEARTBEAT=y +# CONFIG_PSMOUSE is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_RTC is not set + +# +# PCMCIA character devices +# +# CONFIG_SYNCLINK_CS is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_SH_SCI is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# I2C Algorithms +# + +# +# I2C Hardware Bus support +# + +# +# I2C Hardware Sensors Chip support +# +# CONFIG_I2C_SENSOR is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +CONFIG_MINIX_FS=y +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +# CONFIG_DEVFS_FS is not set +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_EXPORTFS is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_INTERMEZZO_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y + +# +# Native Language Support +# +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +CONFIG_NLS_CODEPAGE_932=y +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Sound +# +CONFIG_SOUND=y + +# +# Advanced Linux Sound Architecture +# +CONFIG_SND=m +# CONFIG_SND_SEQUENCER is not set +# CONFIG_SND_OSSEMUL is not set +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set + +# +# Generic devices +# +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set + +# +# ISA devices +# +# CONFIG_SND_AD1848 is not set +# CONFIG_SND_CS4231 is not set +# CONFIG_SND_CS4232 is not set +# CONFIG_SND_CS4236 is not set +# CONFIG_SND_ES1688 is not set +# CONFIG_SND_ES18XX is not set +# CONFIG_SND_GUSCLASSIC is not set +# CONFIG_SND_GUSEXTREME is not set +# CONFIG_SND_GUSMAX is not set +# CONFIG_SND_INTERWAVE is not set +# CONFIG_SND_INTERWAVE_STB is not set +# CONFIG_SND_OPTI92X_AD1848 is not set +# CONFIG_SND_OPTI92X_CS4231 is not set +# CONFIG_SND_OPTI93X is not set +# CONFIG_SND_SB8 is not set +# CONFIG_SND_SB16 is not set +# CONFIG_SND_SBAWE is not set +# CONFIG_SND_WAVEFRONT is not set +# CONFIG_SND_CMI8330 is not set +# CONFIG_SND_OPL3SA2 is not set +# CONFIG_SND_SGALAXY is not set +# CONFIG_SND_SSCAPE is not set + +# +# PCI devices +# +# CONFIG_SND_ALI5451 is not set +# CONFIG_SND_AZT3328 is not set +# CONFIG_SND_CS46XX is not set +# CONFIG_SND_CS4281 is not set +# CONFIG_SND_EMU10K1 is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_TRIDENT is not set +CONFIG_SND_YMFPCI=m +# CONFIG_SND_ALS4000 is not set +# CONFIG_SND_CMIPCI is not set +# CONFIG_SND_ENS1370 is not set +# CONFIG_SND_ENS1371 is not set +# CONFIG_SND_ES1938 is not set +# CONFIG_SND_ES1968 is not set +# CONFIG_SND_MAESTRO3 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_ICE1712 is not set +# CONFIG_SND_ICE1724 is not set +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_SONICVIBES is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VX222 is not set + +# +# PCMCIA devices +# +# CONFIG_SND_VXPOCKET is not set +# CONFIG_SND_VXP440 is not set + +# +# Open Sound System +# +CONFIG_SOUND_PRIME=m +# CONFIG_SOUND_BT878 is not set +CONFIG_SOUND_CMPCI=m +# CONFIG_SOUND_CMPCI_FM is not set +# CONFIG_SOUND_CMPCI_MIDI is not set +# CONFIG_SOUND_CMPCI_JOYSTICK is not set +# CONFIG_SOUND_CMPCI_CM8738 is not set +# CONFIG_SOUND_EMU10K1 is not set +# CONFIG_SOUND_FUSION is not set +# CONFIG_SOUND_CS4281 is not set +# CONFIG_SOUND_ES1370 is not set +# CONFIG_SOUND_ES1371 is not set +# CONFIG_SOUND_ESSSOLO1 is not set +# CONFIG_SOUND_MAESTRO is not set +# CONFIG_SOUND_MAESTRO3 is not set +# CONFIG_SOUND_ICH is not set +# CONFIG_SOUND_SONICVIBES is not set +# CONFIG_SOUND_TRIDENT is not set +# CONFIG_SOUND_MSNDCLAS is not set +# CONFIG_SOUND_MSNDPIN is not set +# CONFIG_SOUND_VIA82CXXX is not set +# CONFIG_SOUND_OSS is not set +# CONFIG_SOUND_ALI5455 is not set +# CONFIG_SOUND_FORTE is not set +# CONFIG_SOUND_RME96XX is not set +# CONFIG_SOUND_AD1980 is not set +CONFIG_SOUND_VOYAGERGX=m + +# +# USB support +# +# CONFIG_USB is not set +# CONFIG_USB_GADGET is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_SH_STANDARD_BIOS is not set +# CONFIG_KGDB is not set +# CONFIG_FRAME_POINTER is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y diff --git a/arch/sh/configs/se7300_defconfig b/arch/sh/configs/se7300_defconfig new file mode 100644 index 000000000..842ca47a6 --- /dev/null +++ b/arch/sh/configs/se7300_defconfig @@ -0,0 +1,461 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_SUPERH=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +# CONFIG_SWAP is not set +# CONFIG_SYSVIPC is not set +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +# CONFIG_KALLSYMS is not set +# CONFIG_FUTEX is not set +# CONFIG_EPOLL is not set +CONFIG_IOSCHED_NOOP=y +# CONFIG_IOSCHED_AS is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# System type +# +# CONFIG_SH_SOLUTION_ENGINE is not set +# CONFIG_SH_7751_SOLUTION_ENGINE is not set +CONFIG_SH_7300_SOLUTION_ENGINE=y +# CONFIG_SH_7751_SYSTEMH is not set +# CONFIG_SH_STB1_HARP is not set +# CONFIG_SH_STB1_OVERDRIVE is not set +# CONFIG_SH_HP620 is not set +# CONFIG_SH_HP680 is not set +# CONFIG_SH_HP690 is not set +# CONFIG_SH_CQREEK is not set +# CONFIG_SH_DMIDA is not set +# CONFIG_SH_EC3104 is not set +# CONFIG_SH_SATURN is not set +# CONFIG_SH_DREAMCAST is not set +# CONFIG_SH_CAT68701 is not set +# CONFIG_SH_BIGSUR is not set +# CONFIG_SH_SH2000 is not set +# CONFIG_SH_ADX is not set +# CONFIG_SH_MPC1211 is not set +# CONFIG_SH_SECUREEDGE5410 is not set +# CONFIG_SH_HS7751RVOIP is not set +# CONFIG_SH_RTS7751R2D is not set +# CONFIG_SH_UNKNOWN is not set +# CONFIG_CPU_SH2 is not set +CONFIG_CPU_SH3=y +# CONFIG_CPU_SH4 is not set +# CONFIG_CPU_SUBTYPE_SH7604 is not set +CONFIG_CPU_SUBTYPE_SH7300=y +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +# CONFIG_CPU_SUBTYPE_SH7751 is not set +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_ST40STB1 is not set +# CONFIG_CPU_SUBTYPE_ST40GX1 is not set +CONFIG_MMU=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttySC0,38400 root=/dev/ram0" +CONFIG_MEMORY_START=0x0c000000 +CONFIG_MEMORY_SIZE=0x04000000 +# CONFIG_MEMORY_OVERRIDE is not set +CONFIG_SH_DSP=y +# CONFIG_SH_ADC is not set +CONFIG_ZERO_PAGE_OFFSET=0x00001000 +CONFIG_BOOT_LINK_OFFSET=0x00210000 +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_PREEMPT is not set +# CONFIG_UBC_WAKEUP is not set +# CONFIG_SH_WRITETHROUGH is not set +# CONFIG_SH_OCRAM is not set +# CONFIG_SMP is not set +# CONFIG_SH_PCLK_CALC is not set +CONFIG_SH_PCLK_FREQ=33333333 + +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + +# +# DMA support +# +# CONFIG_SH_DMA is not set + +# +# Companion Chips +# +# CONFIG_HD6446X_SERIES is not set +CONFIG_HEARTBEAT=y + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +# CONFIG_PCI is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_FLAT is not set +# CONFIG_BINFMT_MISC is not set + +# +# SH initrd options +# +CONFIG_EMBEDDED_RAMDISK=y +CONFIG_EMBEDDED_RAMDISK_IMAGE="ramdisk.gz" + +# +# Device Drivers +# + +# +# Generic Driver Options +# + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_LOOP is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# + +# +# Networking support +# +# CONFIG_NET is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set + +# +# ISDN subsystem +# + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_SERIO_SERPORT is not set +# CONFIG_SERIO_CT82C710 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +# CONFIG_UNIX98_PTYS is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +CONFIG_IPMI_HANDLER=y +# CONFIG_IPMI_PANIC_EVENT is not set +CONFIG_IPMI_DEVICE_INTERFACE=y +# CONFIG_IPMI_SI is not set +CONFIG_IPMI_WATCHDOG=y + +# +# Watchdog Cards +# +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=y +# CONFIG_SH_WDT is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +CONFIG_DEVFS_FS=y +CONFIG_DEVFS_MOUNT=y +# CONFIG_DEVFS_DEBUG is not set +# CONFIG_TMPFS is not set +# CONFIG_HUGETLBFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_INFO is not set +CONFIG_SH_STANDARD_BIOS=y +CONFIG_EARLY_PRINTK=y +CONFIG_KGDB=y + +# +# KGDB configuration options +# +# CONFIG_MORE_COMPILE_OPTIONS is not set +# CONFIG_KGDB_NMI is not set +# CONFIG_KGDB_THREAD is not set +# CONFIG_SH_KGDB_CONSOLE is not set +# CONFIG_KGDB_SYSRQ is not set +# CONFIG_KGDB_KERNEL_ASSERTS is not set + +# +# Serial port setup +# +CONFIG_KGDB_DEFPORT=1 +CONFIG_KGDB_DEFBAUD=115200 +CONFIG_KGDB_DEFPARITY_N=y +# CONFIG_KGDB_DEFPARITY_E is not set +# CONFIG_KGDB_DEFPARITY_O is not set +CONFIG_KGDB_DEFBITS_8=y +# CONFIG_KGDB_DEFBITS_7 is not set +# CONFIG_FRAME_POINTER is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c new file mode 100644 index 000000000..71a6d4e78 --- /dev/null +++ b/arch/sh/drivers/dma/dma-sysfs.c @@ -0,0 +1,133 @@ +/* + * arch/sh/drivers/dma/dma-sysfs.c + * + * sysfs interface for SH DMA API + * + * Copyright (C) 2004 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +static struct sysdev_class dma_sysclass = { + set_kset_name("dma"), +}; + +EXPORT_SYMBOL(dma_sysclass); + +static ssize_t dma_show_devices(struct sys_device *dev, char *buf) +{ + ssize_t len = 0; + int i; + + for (i = 0; i < MAX_DMA_CHANNELS; i++) { + struct dma_info *info = get_dma_info(i); + struct dma_channel *channel = &info->channels[i]; + + len += sprintf(buf + len, "%2d: %14s %s\n", + channel->chan, info->name, + channel->dev_id); + } + + return len; +} + +static SYSDEV_ATTR(devices, S_IRUGO, dma_show_devices, NULL); + +static int __init dma_sysclass_init(void) +{ + int ret; + + ret = sysdev_class_register(&dma_sysclass); + if (ret == 0) + sysfs_create_file(&dma_sysclass.kset.kobj, &attr_devices.attr); + + return ret; +} + +postcore_initcall(dma_sysclass_init); + +static ssize_t dma_show_dev_id(struct sys_device *dev, char *buf) +{ + struct dma_channel *channel = to_dma_channel(dev); + return sprintf(buf, "%s\n", channel->dev_id); +} + +static ssize_t dma_store_dev_id(struct sys_device *dev, + const char *buf, size_t count) +{ + struct dma_channel *channel = to_dma_channel(dev); + strcpy(channel->dev_id, buf); + return count; +} + +static SYSDEV_ATTR(dev_id, S_IRUGO | S_IWUSR, dma_show_dev_id, dma_store_dev_id); + +static ssize_t dma_store_config(struct sys_device *dev, + const char *buf, size_t count) +{ + struct dma_channel *channel = to_dma_channel(dev); + unsigned long config; + + config = simple_strtoul(buf, NULL, 0); + dma_configure_channel(channel->chan, config); + + return count; +} + +static SYSDEV_ATTR(config, S_IWUSR, NULL, dma_store_config); + +static ssize_t dma_show_mode(struct sys_device *dev, char *buf) +{ + struct dma_channel *channel = to_dma_channel(dev); + return sprintf(buf, "0x%08x\n", channel->mode); +} + +static ssize_t dma_store_mode(struct sys_device *dev, + const char *buf, size_t count) +{ + struct dma_channel *channel = to_dma_channel(dev); + channel->mode = simple_strtoul(buf, NULL, 0); + return count; +} + +static SYSDEV_ATTR(mode, S_IRUGO | S_IWUSR, dma_show_mode, dma_store_mode); + +#define dma_ro_attr(field, fmt) \ +static ssize_t dma_show_##field(struct sys_device *dev, char *buf) \ +{ \ + struct dma_channel *channel = to_dma_channel(dev); \ + return sprintf(buf, fmt, channel->field); \ +} \ +static SYSDEV_ATTR(field, S_IRUGO, dma_show_##field, NULL); + +dma_ro_attr(count, "0x%08x\n"); +dma_ro_attr(flags, "0x%08lx\n"); + +int __init dma_create_sysfs_files(struct dma_channel *chan) +{ + struct sys_device *dev = &chan->dev; + int ret; + + dev->id = chan->chan; + dev->cls = &dma_sysclass; + + ret = sysdev_register(dev); + if (ret) + return ret; + + sysdev_create_file(dev, &attr_dev_id); + sysdev_create_file(dev, &attr_count); + sysdev_create_file(dev, &attr_mode); + sysdev_create_file(dev, &attr_flags); + sysdev_create_file(dev, &attr_config); + + return 0; +} + diff --git a/arch/sh/drivers/pci/fixups-rts7751r2d.c b/arch/sh/drivers/pci/fixups-rts7751r2d.c new file mode 100644 index 000000000..7b5dbe157 --- /dev/null +++ b/arch/sh/drivers/pci/fixups-rts7751r2d.c @@ -0,0 +1,32 @@ +/* + * arch/sh/drivers/pci/fixups-rts7751r2d.c + * + * RTS7751R2D PCI fixups + * + * Copyright (C) 2003 Lineo uSolutions, Inc. + * Copyright (C) 2004 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include "pci-sh7751.h" +#include + +#define PCIMCR_MRSET_OFF 0xBFFFFFFF +#define PCIMCR_RFSH_OFF 0xFFFFFFFB + +int pci_fixup_pcic(void) +{ + unsigned long mcr; + + outl(0xfb900047, SH7751_PCICONF1); + outl(0xab000001, SH7751_PCICONF4); + + mcr = inl(SH7751_MCR); + mcr = (mcr & PCIMCR_MRSET_OFF) & PCIMCR_RFSH_OFF; + outl(mcr, SH7751_PCIMCR); + + return 0; +} + diff --git a/arch/sh/drivers/pci/ops-rts7751r2d.c b/arch/sh/drivers/pci/ops-rts7751r2d.c new file mode 100644 index 000000000..2bceb43c9 --- /dev/null +++ b/arch/sh/drivers/pci/ops-rts7751r2d.c @@ -0,0 +1,74 @@ +/* + * linux/arch/sh/kernel/pci-rts7751r2d.c + * + * Author: Ian DaSilva (idasilva@mvista.com) + * + * Highly leveraged from pci-bigsur.c, written by Dustin McIntire. + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * PCI initialization for the Renesas SH7751R RTS7751R2D board + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include "pci-sh7751.h" +#include + +int __init pcibios_map_platform_irq(u8 slot, u8 pin) +{ + switch (slot) { + case 0: return IRQ_PCISLOT1; /* PCI Extend slot #1 */ + case 1: return IRQ_PCISLOT2; /* PCI Extend slot #2 */ + case 2: return IRQ_PCMCIA; /* PCI Cardbus Bridge */ + case 3: return IRQ_PCIETH; /* Realtek Ethernet controller */ + default: + printk("PCI: Bad IRQ mapping request for slot %d\n", slot); + return -1; + } +} + +static struct resource sh7751_io_resource = { + .name = "SH7751_IO", + .start = 0x4000, + .end = 0x4000 + SH7751_PCI_IO_SIZE - 1, + .flags = IORESOURCE_IO +}; + +static struct resource sh7751_mem_resource = { + .name = "SH7751_mem", + .start = SH7751_PCI_MEMORY_BASE, + .end = SH7751_PCI_MEMORY_BASE + SH7751_PCI_MEM_SIZE - 1, + .flags = IORESOURCE_MEM +}; + +extern struct pci_ops sh7751_pci_ops; + +struct pci_channel board_pci_channels[] = { + { &sh7751_pci_ops, &sh7751_io_resource, &sh7751_mem_resource, 0, 0xff }, + { NULL, NULL, NULL, 0, 0 }, +}; +EXPORT_SYMBOL(board_pci_channels); + +static struct sh7751_pci_address_map sh7751_pci_map = { + .window0 = { + .base = SH7751_CS3_BASE_ADDR, + .size = 0x03f00000, + }, + + .flags = SH7751_PCIC_NO_RESET, +}; + +int __init pcibios_init_platform(void) +{ + return sh7751_pcic_init(&sh7751_pci_map); +} + diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c new file mode 100644 index 000000000..da3d6877f --- /dev/null +++ b/arch/sh/kernel/cpu/adc.c @@ -0,0 +1,36 @@ +/* + * linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support + * + * Copyright (C) 2004 Andriy Skulysh + */ + +#include +#include +#include + + +int adc_single(unsigned int channel) +{ + int off; + unsigned char csr; + + if (channel >= 8) return -1; + + off = (channel & 0x03) << 2; + + csr = ctrl_inb(ADCSR); + csr = channel | ADCSR_ADST | ADCSR_CKS; + ctrl_outb(csr, ADCSR); + + do { + csr = ctrl_inb(ADCSR); + } while ((csr & ADCSR_ADF) == 0); + + csr &= ~(ADCSR_ADF | ADCSR_ADST); + ctrl_outb(csr, ADCSR); + + return (((ctrl_inb(ADDRAH + off) << 8) | + ctrl_inb(ADDRAL + off)) >> 6); +} + +EXPORT_SYMBOL(adc_single); diff --git a/arch/sh/kernel/cpu/bus.c b/arch/sh/kernel/cpu/bus.c new file mode 100644 index 000000000..ace82f4b4 --- /dev/null +++ b/arch/sh/kernel/cpu/bus.c @@ -0,0 +1,195 @@ +/* + * arch/sh/kernel/cpu/bus.c + * + * Virtual bus for SuperH. + * + * Copyright (C) 2004 Paul Mundt + * + * Shamelessly cloned from arch/arm/mach-omap/bus.c, which was written + * by: + * + * Copyright (C) 2003 - 2004 Nokia Corporation + * Written by Tony Lindgren + * Portions of code based on sa1111.c. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include + +static int sh_bus_match(struct device *dev, struct device_driver *drv) +{ + struct sh_driver *shdrv = to_sh_driver(drv); + struct sh_dev *shdev = to_sh_dev(dev); + + return shdev->dev_id == shdrv->dev_id; +} + +static int sh_bus_suspend(struct device *dev, u32 state) +{ + struct sh_dev *shdev = to_sh_dev(dev); + struct sh_driver *shdrv = to_sh_driver(dev->driver); + + if (shdrv && shdrv->suspend) + return shdrv->suspend(shdev, state); + + return 0; +} + +static int sh_bus_resume(struct device *dev) +{ + struct sh_dev *shdev = to_sh_dev(dev); + struct sh_driver *shdrv = to_sh_driver(dev->driver); + + if (shdrv && shdrv->resume) + return shdrv->resume(shdev); + + return 0; +} + +static struct device sh_bus_devices[SH_NR_BUSES] = { + { + .bus_id = SH_BUS_NAME_VIRT, + }, +}; + +struct bus_type sh_bus_types[SH_NR_BUSES] = { + { + .name = SH_BUS_NAME_VIRT, + .match = sh_bus_match, + .suspend = sh_bus_suspend, + .resume = sh_bus_resume, + }, +}; + +static int sh_device_probe(struct device *dev) +{ + struct sh_dev *shdev = to_sh_dev(dev); + struct sh_driver *shdrv = to_sh_driver(dev->driver); + + if (shdrv && shdrv->probe) + return shdrv->probe(shdev); + + return -ENODEV; +} + +static int sh_device_remove(struct device *dev) +{ + struct sh_dev *shdev = to_sh_dev(dev); + struct sh_driver *shdrv = to_sh_driver(dev->driver); + + if (shdrv && shdrv->remove) + return shdrv->remove(shdev); + + return 0; +} + +int sh_device_register(struct sh_dev *dev) +{ + if (!dev) + return -EINVAL; + + if (dev->bus_id < 0 || dev->bus_id >= SH_NR_BUSES) { + printk(KERN_ERR "%s: bus_id invalid: %s bus: %d\n", + __FUNCTION__, dev->name, dev->bus_id); + return -EINVAL; + } + + dev->dev.parent = &sh_bus_devices[dev->bus_id]; + dev->dev.bus = &sh_bus_types[dev->bus_id]; + + /* This is needed for USB OHCI to work */ + if (dev->dma_mask) + dev->dev.dma_mask = dev->dma_mask; + + snprintf(dev->dev.bus_id, BUS_ID_SIZE, "%s%u", + dev->name, dev->dev_id); + + printk(KERN_INFO "Registering SH device '%s'. Parent at %s\n", + dev->dev.bus_id, dev->dev.parent->bus_id); + + return device_register(&dev->dev); +} + +void sh_device_unregister(struct sh_dev *dev) +{ + device_unregister(&dev->dev); +} + +int sh_driver_register(struct sh_driver *drv) +{ + if (!drv) + return -EINVAL; + + if (drv->bus_id < 0 || drv->bus_id >= SH_NR_BUSES) { + printk(KERN_ERR "%s: bus_id invalid: bus: %d device %d\n", + __FUNCTION__, drv->bus_id, drv->dev_id); + return -EINVAL; + } + + drv->drv.probe = sh_device_probe; + drv->drv.remove = sh_device_remove; + drv->drv.bus = &sh_bus_types[drv->bus_id]; + + return driver_register(&drv->drv); +} + +void sh_driver_unregister(struct sh_driver *drv) +{ + driver_unregister(&drv->drv); +} + +static int __init sh_bus_init(void) +{ + int i, ret = 0; + + for (i = 0; i < SH_NR_BUSES; i++) { + ret = device_register(&sh_bus_devices[i]); + if (ret != 0) { + printk(KERN_ERR "Unable to register bus device %s\n", + sh_bus_devices[i].bus_id); + continue; + } + + ret = bus_register(&sh_bus_types[i]); + if (ret != 0) { + printk(KERN_ERR "Unable to register bus %s\n", + sh_bus_types[i].name); + device_unregister(&sh_bus_devices[i]); + } + } + + printk(KERN_INFO "SH Virtual Bus initialized\n"); + + return ret; +} + +static void __exit sh_bus_exit(void) +{ + int i; + + for (i = 0; i < SH_NR_BUSES; i++) { + bus_unregister(&sh_bus_types[i]); + device_unregister(&sh_bus_devices[i]); + } +} + +module_init(sh_bus_init); +module_exit(sh_bus_exit); + +MODULE_AUTHOR("Paul Mundt "); +MODULE_DESCRIPTION("SH Virtual Bus"); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(sh_bus_types); +EXPORT_SYMBOL(sh_device_register); +EXPORT_SYMBOL(sh_device_unregister); +EXPORT_SYMBOL(sh_driver_register); +EXPORT_SYMBOL(sh_driver_unregister); + diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c new file mode 100644 index 000000000..8c2769c0d --- /dev/null +++ b/arch/sh/kernel/early_printk.c @@ -0,0 +1,135 @@ +/* + * arch/sh/kernel/early_printk.c + * + * Copyright (C) 1999, 2000 Niibe Yutaka + * Copyright (C) 2002 M. R. Brown + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include + +#ifdef CONFIG_SH_STANDARD_BIOS +#include + +/* + * Print a string through the BIOS + */ +static void sh_console_write(struct console *co, const char *s, + unsigned count) +{ + sh_bios_console_write(s, count); +} + +/* + * Setup initial baud/bits/parity. We do two things here: + * - construct a cflag setting for the first rs_open() + * - initialize the serial port + * Return non-zero if we didn't find a serial port. + */ +static int __init sh_console_setup(struct console *co, char *options) +{ + int cflag = CREAD | HUPCL | CLOCAL; + + /* + * Now construct a cflag setting. + * TODO: this is a totally bogus cflag, as we have + * no idea what serial settings the BIOS is using, or + * even if its using the serial port at all. + */ + cflag |= B115200 | CS8 | /*no parity*/0; + + co->cflag = cflag; + + return 0; +} + +static struct console early_console = { + .name = "bios", + .write = sh_console_write, + .setup = sh_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, +}; +#endif + +#ifdef CONFIG_EARLY_SCIF_CONSOLE +#define SCIF_REG 0xffe80000 + +static void scif_sercon_putc(int c) +{ + while (!(ctrl_inw(SCIF_REG + 0x10) & 0x20)) ; + + ctrl_outb(c, SCIF_REG + 12); + ctrl_outw((ctrl_inw(SCIF_REG + 0x10) & 0x9f), SCIF_REG + 0x10); + + if (c == '\n') + scif_sercon_putc('\r'); +} + +static void scif_sercon_flush(void) +{ + ctrl_outw((ctrl_inw(SCIF_REG + 0x10) & 0xbf), SCIF_REG + 0x10); + + while (!(ctrl_inw(SCIF_REG + 0x10) & 0x40)) ; + + ctrl_outw((ctrl_inw(SCIF_REG + 0x10) & 0xbf), SCIF_REG + 0x10); +} + +static void scif_sercon_write(struct console *con, const char *s, unsigned count) +{ + while (count-- > 0) + scif_sercon_putc(*s++); + + scif_sercon_flush(); +} + +static int __init scif_sercon_setup(struct console *con, char *options) +{ + con->cflag = CREAD | HUPCL | CLOCAL | B115200 | CS8; + + return 0; +} + +static struct console early_console = { + .name = "sercon", + .write = scif_sercon_write, + .setup = scif_sercon_setup, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +void scif_sercon_init(int baud) +{ + ctrl_outw(0, SCIF_REG + 8); + ctrl_outw(0, SCIF_REG); + + /* Set baud rate */ + ctrl_outb((50000000 / (32 * baud)) - 1, SCIF_REG + 4); + + ctrl_outw(12, SCIF_REG + 24); + ctrl_outw(8, SCIF_REG + 24); + ctrl_outw(0, SCIF_REG + 32); + ctrl_outw(0x60, SCIF_REG + 16); + ctrl_outw(0, SCIF_REG + 36); + ctrl_outw(0x30, SCIF_REG + 8); +} +#endif + +void __init enable_early_printk(void) +{ +#ifdef CONFIG_EARLY_SCIF_CONSOLE + scif_sercon_init(115200); +#endif + register_console(&early_console); +} + +void disable_early_printk(void) +{ + unregister_console(&early_console); +} + diff --git a/arch/sh/ramdisk/Makefile b/arch/sh/ramdisk/Makefile new file mode 100644 index 000000000..a22d86bf0 --- /dev/null +++ b/arch/sh/ramdisk/Makefile @@ -0,0 +1,19 @@ +# +# Makefile for a ramdisk image +# + +obj-y += ramdisk.o + + +O_FORMAT = $(shell $(OBJDUMP) -i | head -n 2 | grep elf32) +img := $(subst ",,$(CONFIG_EMBEDDED_RAMDISK_IMAGE)) +# add $(src) when $(img) is relative +img := $(subst $(src)//,/,$(src)/$(img)) + +quiet_cmd_ramdisk = LD $@ +define cmd_ramdisk + $(LD) -T $(src)/ld.script -b binary --oformat $(O_FORMAT) -o $@ $(img) +endef + +$(obj)/ramdisk.o: $(img) $(src)/ld.script + $(call cmd,ramdisk) diff --git a/arch/sh/ramdisk/ld.script b/arch/sh/ramdisk/ld.script new file mode 100644 index 000000000..94beee248 --- /dev/null +++ b/arch/sh/ramdisk/ld.script @@ -0,0 +1,9 @@ +OUTPUT_ARCH(sh) +SECTIONS +{ + .initrd : + { + *(.data) + } +} + diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig new file mode 100644 index 000000000..0ac5d418e --- /dev/null +++ b/arch/sh64/Kconfig @@ -0,0 +1,320 @@ +# +# For a description of the syntax of this configuration file, +# see Documentation/kbuild/config-language.txt. +# + +mainmenu "Linux/SH64 Kernel Configuration" + +config SUPERH + bool + default y + +config SUPERH64 + bool + default y + +config MMU + bool + default y + +config UID16 + bool + default y + +config RWSEM_GENERIC_SPINLOCK + bool + default y + +config LOG_BUF_SHIFT + int + default 14 + +config RWSEM_XCHGADD_ALGORITHM + bool + +config GENERIC_ISA_DMA + bool + +source init/Kconfig + +menu "System type" + +choice + prompt "SuperH system type" + default SH_SIMULATOR + +config SH_GENERIC + bool "Generic" + +config SH_SIMULATOR + bool "Simulator" + +config SH_CAYMAN + bool "Cayman" + +config SH_ROMRAM + bool "ROM/RAM" + +config SH_HARP + bool "ST50-Harp" + +endchoice + +choice + prompt "Processor family" + default CPU_SH5 + +config CPU_SH5 + bool "SH-5" + +endchoice + +choice + prompt "Processor type" + +config CPU_SUBTYPE_SH5_101 + bool "SH5-101" + depends on CPU_SH5 + +config CPU_SUBTYPE_SH5_103 + bool "SH5-103" + depends on CPU_SH5 + +endchoice + +choice + prompt "Endianness" + default LITTLE_ENDIAN + +config LITTLE_ENDIAN + bool "Little-Endian" + +config BIG_ENDIAN + bool "Big-Endian" + +endchoice + +config SH64_FPU_DENORM_FLUSH + bool "Flush floating point denorms to zero" + +choice + prompt "Page table levels" + default SH64_PGTABLE_2_LEVEL + +config SH64_PGTABLE_2_LEVEL + bool "2" + +config SH64_PGTABLE_3_LEVEL + bool "3" + +endchoice + +choice + prompt "HugeTLB page size" + depends on HUGETLB_PAGE && MMU + default HUGETLB_PAGE_SIZE_64K + +config HUGETLB_PAGE_SIZE_64K + bool "64K" + +config HUGETLB_PAGE_SIZE_1MB + bool "1MB" + +config HUGETLB_PAGE_SIZE_512MB + bool "512MB" + +endchoice + +config SH64_USER_MISALIGNED_FIXUP + bool "Fixup misaligned loads/stores occurring in user mode" + +comment "Memory options" + +config CACHED_MEMORY_OFFSET + hex "Cached Area Offset" + depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR + default "20000000" + +config MEMORY_START + hex "Physical memory start address" + depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR + default "80000000" + +config MEMORY_SIZE_IN_MB + int "Memory size (in MB)" if SH_HARP || SH_CAYMAN || SH_SIMULATOR + default "64" if SH_HARP || SH_CAYMAN + default "8" if SH_SIMULATOR + +comment "Cache options" + +config DCACHE_DISABLED + bool "DCache Disabling" + depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR + +choice + prompt "DCache mode" + depends on !DCACHE_DISABLED && !SH_SIMULATOR + default DCACHE_WRITE_BACK + +config DCACHE_WRITE_BACK + bool "Write-back" + +config DCACHE_WRITE_THROUGH + bool "Write-through" + +endchoice + +config ICACHE_DISABLED + bool "ICache Disabling" + depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR + +config PCIDEVICE_MEMORY_START + hex + depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR + default "C0000000" + +config DEVICE_MEMORY_START + hex + depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR + default "E0000000" + +config FLASH_MEMORY_START + hex "Flash memory/on-chip devices start address" + depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR + default "00000000" + +config PCI_BLOCK_START + hex "PCI block start address" + depends on SH_HARP || SH_CAYMAN || SH_SIMULATOR + default "40000000" + +comment "CPU Subtype specific options" + +config SH64_ID2815_WORKAROUND + bool "Include workaround for SH5-101 cut2 silicon defect ID2815" + +comment "Misc options" +config HEARTBEAT + bool "Heartbeat LED" + +config HDSP253_LED + bool "Support for HDSP-253 LED" + depends on SH_CAYMAN + +config SH_DMA + tristate "DMA controller (DMAC) support" + +config PREEMPT + bool "Preemptible Kernel (EXPERIMENTAL)" + depends on EXPERIMENTAL + +endmenu + +menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)" + +config ISA + bool + +config SBUS + bool + +config PCI + bool "PCI support" + help + Find out whether you have a PCI motherboard. PCI is the name of a + bus system, i.e. the way the CPU talks to the other stuff inside + your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or + VESA. If you have PCI, say Y, otherwise N. + + The PCI-HOWTO, available from + , contains valuable + information about which PCI hardware does work under Linux and which + doesn't. + +config SH_PCIDMA_NONCOHERENT + bool "Cache and PCI noncoherent" + depends on PCI + default y + help + Enable this option if your platform does not have a CPU cache which + remains coherent with PCI DMA. It is safest to say 'Y', although you + will see better performance if you can say 'N', because the PCI DMA + code will not have to flush the CPU's caches. If you have a PCI host + bridge integrated with your SH CPU, refer carefully to the chip specs + to see if you can say 'N' here. Otherwise, leave it as 'Y'. + +source "drivers/pci/Kconfig" + +source "drivers/pcmcia/Kconfig" + +source "drivers/pci/hotplug/Kconfig" + +endmenu + +menu "Executable file formats" + +source "fs/Kconfig.binfmt" + +endmenu + +source "drivers/Kconfig" + +source "fs/Kconfig" + +source "arch/sh64/oprofile/Kconfig" + +menu "Kernel hacking" + +config MAGIC_SYSRQ + bool "Magic SysRq key" + help + If you say Y here, you will have some control over the system even + if the system crashes for example during kernel debugging (e.g., you + will be able to flush the buffer cache to disk, reboot the system + immediately or dump some status information). This is accomplished + by pressing various keys while holding SysRq (Alt+PrintScreen). It + also works on a serial console (on PC hardware at least), if you + send a BREAK and then within 5 seconds a command keypress. The + keys are documented in Documentation/sysrq.txt. Don't say Y unless + you really know what this hack does. + +config EARLY_PRINTK + bool "Early SCIF console support" + +config DEBUG_KERNEL_WITH_GDB_STUB + bool "GDB Stub kernel debug" + +config SH64_PROC_TLB + bool "Debug: report TLB fill/purge activity through /proc/tlb" + depends on PROC_FS + +config SH64_PROC_ASIDS + bool "Debug: report ASIDs through /proc/asids" + depends on PROC_FS + +config SH64_SR_WATCH + bool "Debug: set SR.WATCH to enable hardware watchpoints and trace" + +config SH_ALPHANUMERIC + bool "Enable debug outputs to on-board alphanumeric display" + +config SH_NO_BSS_INIT + bool "Avoid zeroing BSS (to speed-up startup on suitable platforms)" + +config FRAME_POINTER + bool "Compile the kernel with frame pointers" + default y if KGDB + help + If you say Y here the resulting kernel image will be slightly larger + and slower, but it will give very useful debugging information. + If you don't debug the kernel, you can say N, but we may not be able + to solve problems without frame pointers. + +endmenu + +source "security/Kconfig" + +source "crypto/Kconfig" + +source "lib/Kconfig" + diff --git a/arch/sh64/Makefile b/arch/sh64/Makefile new file mode 100644 index 000000000..62586a088 --- /dev/null +++ b/arch/sh64/Makefile @@ -0,0 +1,112 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2000, 2001 Paolo Alberelli +# Copyright (C) 2003, 2004 Paul Mundt +# +# This file is included by the global makefile so that you can add your own +# architecture-specific flags and dependencies. Remember to do have actions +# for "archclean" and "archdep" for cleaning up and making dependencies for +# this architecture +# +# Note that top level Makefile automagically builds dependencies for SUBDIRS +# but does not automagically clean SUBDIRS. Therefore "archclean" should clean +# up all, "archdep" does nothing on added SUBDIRS. +# +ifndef include_config +-include .config +endif + +cpu-y := -mb +cpu-$(CONFIG_LITTLE_ENDIAN) := -ml + +cpu-$(CONFIG_CPU_SH5) += -m5-32media-nofpu + +ifdef CONFIG_LITTLE_ENDIAN +LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' +LDFLAGS += -EL -mshlelf32_linux +else +LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' +LDFLAGS += -EB -mshelf32_linux +endif + +# No requirements for endianess support from AFLAGS, 'as' always run through gcc +AFLAGS += -m5 -isa=sh64 -traditional +CFLAGS += $(cpu-y) + +LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_CACHED_MEMORY_OFFSET) \ + -e phys_stext + +OBJCOPYFLAGS := -O binary -R .note -R .comment -R .stab -R .stabstr -S + +ifdef LOADADDR +LINKFLAGS += -Ttext $(word 1,$(LOADADDR)) +endif + +machine-$(CONFIG_SH_CAYMAN) := cayman +machine-$(CONFIG_SH_SIMULATOR) := sim +machine-$(CONFIG_SH_HARP) := harp +machine-$(CONFIG_SH_ROMRAM) := romram + +head-y := arch/$(ARCH)/kernel/head.o arch/$(ARCH)/kernel/init_task.o + +core-y += $(addprefix arch/$(ARCH)/, kernel/ mm/ mach-$(machine-y)/) + +LIBGCC := $(shell $(CC) $(CFLAGS) -print-libgcc-file-name) +libs-y += arch/$(ARCH)/lib/ $(LIBGCC) + +drivers-$(CONFIG_OPROFILE) += arch/sh64/oprofile/ + +boot := arch/$(ARCH)/boot + +zImage: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +compressed: zImage + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + +prepare: include/asm-$(ARCH)/asm-offsets.h arch/$(ARCH)/lib/syscalltab.h + +include/asm-$(ARCH)/asm-offsets.h: arch/$(ARCH)/kernel/asm-offsets.s \ + include/asm include/linux/version.h + $(call filechk,gen-asm-offsets) + +define filechk_gen-syscalltab + (set -e; \ + echo "/*"; \ + echo " * DO NOT MODIFY."; \ + echo " *"; \ + echo " * This file was generated by arch/$(ARCH)/Makefile"; \ + echo " * Any changes will be reverted at build time."; \ + echo " */"; \ + echo ""; \ + echo "#ifndef __SYSCALLTAB_H"; \ + echo "#define __SYSCALLTAB_H"; \ + echo ""; \ + echo "#include "; \ + echo ""; \ + echo "struct syscall_info {"; \ + echo " const char *name;"; \ + echo "} syscall_info_table[] = {"; \ + sed -e '/^.*\.long /!d;s//\t{ "/;s/\(\([^/]*\)\/\)\{1\}.*/\2/; \ + s/[ \t]*$$//g;s/$$/" },/;s/\("\)sys_/\1/g'; \ + echo "};"; \ + echo ""; \ + echo "#define NUM_SYSCALL_INFO_ENTRIES ARRAY_SIZE(syscall_info_table)"; \ + echo ""; \ + echo "#endif /* __SYSCALLTAB_H */" ) +endef + +arch/$(ARCH)/lib/syscalltab.h: arch/sh64/kernel/syscalls.S + $(call filechk,gen-syscalltab) + +CLEAN_FILES += include/asm-$(ARCH)/asm-offsets.h arch/$(ARCH)/lib/syscalltab.h + +define archhelp + @echo ' zImage - Compressed kernel image (arch/sh64/boot/zImage)' +endef + diff --git a/arch/sh64/boot/Makefile b/arch/sh64/boot/Makefile new file mode 100644 index 000000000..fb71087b7 --- /dev/null +++ b/arch/sh64/boot/Makefile @@ -0,0 +1,20 @@ +# +# arch/sh64/boot/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2002 Stuart Menefy +# + +targets := zImage +subdir- := compressed + +$(obj)/zImage: $(obj)/compressed/vmlinux FORCE + $(call if_changed,objcopy) + @echo 'Kernel: $@ is ready' + +$(obj)/compressed/vmlinux: FORCE + $(Q)$(MAKE) $(build)=$(obj)/compressed $@ + diff --git a/arch/sh64/boot/compressed/Makefile b/arch/sh64/boot/compressed/Makefile new file mode 100644 index 000000000..3f6cbf017 --- /dev/null +++ b/arch/sh64/boot/compressed/Makefile @@ -0,0 +1,46 @@ +# +# linux/arch/sh64/boot/compressed/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2002 Stuart Menefy +# Copyright (C) 2004 Paul Mundt +# +# create a compressed vmlinux image from the original vmlinux +# + +targets := vmlinux vmlinux.bin vmlinux.bin.gz \ + head.o misc.o cache.o piggy.o vmlinux.lds.o + +EXTRA_AFLAGS := -traditional + +OBJECTS := $(obj)/head.o $(obj)/misc.o $(obj)/cache.o + +# +# ZIMAGE_OFFSET is the load offset of the compression loader +# (4M for the kernel plus 64K for this loader) +# +ZIMAGE_OFFSET = $(shell printf "0x%8x" $$[$(CONFIG_MEMORY_START)+0x400000+0x10000]) + +LDFLAGS_vmlinux := -Ttext $(ZIMAGE_OFFSET) -e startup \ + -T $(obj)/../../kernel/vmlinux.lds.s \ + --no-warn-mismatch + +$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE + $(call if_changed,ld) + @: + +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) + +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) + +LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh64-linux -T +OBJCOPYFLAGS += -R .empty_zero_page + +$(obj)/piggy.o: $(obj)/vmlinux.lds.s $(obj)/vmlinux.bin.gz FORCE + $(call if_changed,ld) + diff --git a/arch/sh64/boot/compressed/cache.c b/arch/sh64/boot/compressed/cache.c new file mode 100644 index 000000000..708707355 --- /dev/null +++ b/arch/sh64/boot/compressed/cache.c @@ -0,0 +1,39 @@ +/* + * arch/shmedia/boot/compressed/cache.c -- simple cache management functions + * + * Code extracted from sh-ipl+g, sh-stub.c, which has the copyright: + * + * This is originally based on an m68k software stub written by Glenn + * Engel at HP, but has changed quite a bit. + * + * Modifications for the SH by Ben Lee and Steve Chamberlain + * +**************************************************************************** + + THIS SOFTWARE IS NOT COPYRIGHTED + + HP offers the following for use in the public domain. HP makes no + warranty with regard to the software or it's performance and the + user accepts the software "AS IS" with all faults. + + HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD + TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +****************************************************************************/ + +#define CACHE_ENABLE 0 +#define CACHE_DISABLE 1 + +int cache_control(unsigned int command) +{ + volatile unsigned int *p = (volatile unsigned int *) 0x80000000; + int i; + + for (i = 0; i < (32 * 1024); i += 32) { + (void *) *p; + p += (32 / sizeof (int)); + } + + return 0; +} diff --git a/arch/sh64/boot/compressed/head.S b/arch/sh64/boot/compressed/head.S new file mode 100644 index 000000000..82040b1a2 --- /dev/null +++ b/arch/sh64/boot/compressed/head.S @@ -0,0 +1,164 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/shmedia/boot/compressed/head.S + * + * Copied from + * arch/shmedia/kernel/head.S + * which carried the copyright: + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * Modification for compressed loader: + * Copyright (C) 2002 Stuart Menefy (stuart.menefy@st.com) + */ + +#include +#include +#include +#include + +/* + * Fixed TLB entries to identity map the beginning of RAM + */ +#define MMUIR_TEXT_H 0x0000000000000003 | CONFIG_MEMORY_START + /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ +#define MMUIR_TEXT_L 0x000000000000009a | CONFIG_MEMORY_START + /* 512 Mb, Cacheable (Write-back), execute, Not User, Ph. Add. */ + +#define MMUDR_CACHED_H 0x0000000000000003 | CONFIG_MEMORY_START + /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ +#define MMUDR_CACHED_L 0x000000000000015a | CONFIG_MEMORY_START + /* 512 Mb, Cacheable (Write-back), read/write, Not User, Ph. Add. */ + +#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */ +#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */ + +#if 1 +#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* OCE + OCI + WB */ +#else +#define OCCR0_INIT_VAL OCCR0_OFF +#endif +#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */ + + .text + + .global startup +startup: + /* + * Prevent speculative fetch on device memory due to + * uninitialized target registers. + * This must be executed before the first branch. + */ + ptabs/u ZERO, tr0 + ptabs/u ZERO, tr1 + ptabs/u ZERO, tr2 + ptabs/u ZERO, tr3 + ptabs/u ZERO, tr4 + ptabs/u ZERO, tr5 + ptabs/u ZERO, tr6 + ptabs/u ZERO, tr7 + synci + + /* + * Set initial TLB entries for cached and uncached regions. + * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't ! + */ + /* Clear ITLBs */ + pta 1f, tr1 + movi ITLB_FIXED, r21 + movi ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22 +1: putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */ + addi r21, TLB_STEP, r21 + bne r21, r22, tr1 + + /* Clear DTLBs */ + pta 1f, tr1 + movi DTLB_FIXED, r21 + movi DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22 +1: putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */ + addi r21, TLB_STEP, r21 + bne r21, r22, tr1 + + /* Map one big (512Mb) page for ITLB */ + movi ITLB_FIXED, r21 + movi MMUIR_TEXT_L, r22 /* PTEL first */ + putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ + movi MMUIR_TEXT_H, r22 /* PTEH last */ + putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */ + + /* Map one big CACHED (512Mb) page for DTLB */ + movi DTLB_FIXED, r21 + movi MMUDR_CACHED_L, r22 /* PTEL first */ + putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */ + movi MMUDR_CACHED_H, r22 /* PTEH last */ + putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */ + + /* ICache */ + movi ICCR_BASE, r21 + movi ICCR0_INIT_VAL, r22 + movi ICCR1_INIT_VAL, r23 + putcfg r21, ICCR_REG0, r22 + putcfg r21, ICCR_REG1, r23 + synci + + /* OCache */ + movi OCCR_BASE, r21 + movi OCCR0_INIT_VAL, r22 + movi OCCR1_INIT_VAL, r23 + putcfg r21, OCCR_REG0, r22 + putcfg r21, OCCR_REG1, r23 + synco + + /* + * Enable the MMU. + * From here-on code can be non-PIC. + */ + movi SR_HARMLESS | SR_ENABLE_MMU, r22 + putcon r22, SSR + movi 1f, r22 + putcon r22, SPC + synco + rte /* And now go into the hyperspace ... */ +1: /* ... that's the next instruction ! */ + + /* Set initial stack pointer */ + movi datalabel stack_start, r0 + ld.l r0, 0, r15 + + /* + * Clear bss + */ + pt 1f, tr1 + movi datalabel __bss_start, r22 + movi datalabel _end, r23 +1: st.l r22, 0, ZERO + addi r22, 4, r22 + bne r22, r23, tr1 + + /* + * Decompress the kernel. + */ + pt decompress_kernel, tr0 + blink tr0, r18 + + /* + * Disable the MMU. + */ + movi SR_HARMLESS, r22 + putcon r22, SSR + movi 1f, r22 + putcon r22, SPC + synco + rte /* And now go into the hyperspace ... */ +1: /* ... that's the next instruction ! */ + + /* Jump into the decompressed kernel */ + movi datalabel (CONFIG_MEMORY_START + 0x2000)+1, r19 + ptabs r19, tr0 + blink tr0, r18 + + /* Shouldn't return here, but just in case, loop forever */ + pt 1f, tr0 +1: blink tr0, ZERO diff --git a/arch/sh64/boot/compressed/install.sh b/arch/sh64/boot/compressed/install.sh new file mode 100644 index 000000000..90589f0fe --- /dev/null +++ b/arch/sh64/boot/compressed/install.sh @@ -0,0 +1,56 @@ +#!/bin/sh +# +# arch/sh/boot/install.sh +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1995 by Linus Torvalds +# +# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin +# Adapted from code in arch/i386/boot/install.sh by Russell King +# Adapted from code in arch/arm/boot/install.sh by Stuart Menefy +# +# "make install" script for sh architecture +# +# Arguments: +# $1 - kernel version +# $2 - kernel image file +# $3 - kernel map file +# $4 - default install path (blank if root directory) +# + +# User may have a custom install script + +if [ -x /sbin/installkernel ]; then + exec /sbin/installkernel "$@" +fi + +if [ "$2" = "zImage" ]; then +# Compressed install + echo "Installing compressed kernel" + if [ -f $4/vmlinuz-$1 ]; then + mv $4/vmlinuz-$1 $4/vmlinuz.old + fi + + if [ -f $4/System.map-$1 ]; then + mv $4/System.map-$1 $4/System.old + fi + + cat $2 > $4/vmlinuz-$1 + cp $3 $4/System.map-$1 +else +# Normal install + echo "Installing normal kernel" + if [ -f $4/vmlinux-$1 ]; then + mv $4/vmlinux-$1 $4/vmlinux.old + fi + + if [ -f $4/System.map ]; then + mv $4/System.map $4/System.old + fi + + cat $2 > $4/vmlinux-$1 + cp $3 $4/System.map +fi diff --git a/arch/sh64/boot/compressed/misc.c b/arch/sh64/boot/compressed/misc.c new file mode 100644 index 000000000..89dbf45df --- /dev/null +++ b/arch/sh64/boot/compressed/misc.c @@ -0,0 +1,251 @@ +/* + * arch/shmedia/boot/compressed/misc.c + * + * This is a collection of several routines from gzip-1.0.3 + * adapted for Linux. + * + * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 + * + * Adapted for SHmedia from sh by Stuart Menefy, May 2002 + */ + +#include +#include + +/* cache.c */ +#define CACHE_ENABLE 0 +#define CACHE_DISABLE 1 +int cache_control(unsigned int command); + +/* + * gzip declarations + */ + +#define OF(args) args +#define STATIC static + +#undef memset +#undef memcpy +#define memzero(s, n) memset ((s), 0, (n)) + +typedef unsigned char uch; +typedef unsigned short ush; +typedef unsigned long ulg; + +#define WSIZE 0x8000 /* Window size must be at least 32k, */ + /* and a power of two */ + +static uch *inbuf; /* input buffer */ +static uch window[WSIZE]; /* Sliding window buffer */ + +static unsigned insize = 0; /* valid bytes in inbuf */ +static unsigned inptr = 0; /* index of next byte to be processed in inbuf */ +static unsigned outcnt = 0; /* bytes in output buffer */ + +/* gzip flag byte */ +#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */ +#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */ +#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */ +#define ORIG_NAME 0x08 /* bit 3 set: original file name present */ +#define COMMENT 0x10 /* bit 4 set: file comment present */ +#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */ +#define RESERVED 0xC0 /* bit 6,7: reserved */ + +#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf()) + +/* Diagnostic functions */ +#ifdef DEBUG +# define Assert(cond,msg) {if(!(cond)) error(msg);} +# define Trace(x) fprintf x +# define Tracev(x) {if (verbose) fprintf x ;} +# define Tracevv(x) {if (verbose>1) fprintf x ;} +# define Tracec(c,x) {if (verbose && (c)) fprintf x ;} +# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;} +#else +# define Assert(cond,msg) +# define Trace(x) +# define Tracev(x) +# define Tracevv(x) +# define Tracec(c,x) +# define Tracecv(c,x) +#endif + +static int fill_inbuf(void); +static void flush_window(void); +static void error(char *m); +static void gzip_mark(void **); +static void gzip_release(void **); + +extern char input_data[]; +extern int input_len; + +static long bytes_out = 0; +static uch *output_data; +static unsigned long output_ptr = 0; + +static void *malloc(int size); +static void free(void *where); +static void error(char *m); +static void gzip_mark(void **); +static void gzip_release(void **); + +static void puts(const char *); + +extern int _text; /* Defined in vmlinux.lds.S */ +extern int _end; +static unsigned long free_mem_ptr; +static unsigned long free_mem_end_ptr; + +#define HEAP_SIZE 0x10000 + +#include "../../../../lib/inflate.c" + +static void *malloc(int size) +{ + void *p; + + if (size < 0) + error("Malloc error\n"); + if (free_mem_ptr == 0) + error("Memory error\n"); + + free_mem_ptr = (free_mem_ptr + 3) & ~3; /* Align */ + + p = (void *) free_mem_ptr; + free_mem_ptr += size; + + if (free_mem_ptr >= free_mem_end_ptr) + error("\nOut of memory\n"); + + return p; +} + +static void free(void *where) +{ /* Don't care */ +} + +static void gzip_mark(void **ptr) +{ + *ptr = (void *) free_mem_ptr; +} + +static void gzip_release(void **ptr) +{ + free_mem_ptr = (long) *ptr; +} + +void puts(const char *s) +{ +} + +void *memset(void *s, int c, size_t n) +{ + int i; + char *ss = (char *) s; + + for (i = 0; i < n; i++) + ss[i] = c; + return s; +} + +void *memcpy(void *__dest, __const void *__src, size_t __n) +{ + int i; + char *d = (char *) __dest, *s = (char *) __src; + + for (i = 0; i < __n; i++) + d[i] = s[i]; + return __dest; +} + +/* =========================================================================== + * Fill the input buffer. This is called only when the buffer is empty + * and at least one byte is really needed. + */ +static int fill_inbuf(void) +{ + if (insize != 0) { + error("ran out of input data\n"); + } + + inbuf = input_data; + insize = input_len; + inptr = 1; + return inbuf[0]; +} + +/* =========================================================================== + * Write the output window window[0..outcnt-1] and update crc and bytes_out. + * (Used for the decompressed data only.) + */ +static void flush_window(void) +{ + ulg c = crc; /* temporary variable */ + unsigned n; + uch *in, *out, ch; + + in = window; + out = &output_data[output_ptr]; + for (n = 0; n < outcnt; n++) { + ch = *out++ = *in++; + c = crc_32_tab[((int) c ^ ch) & 0xff] ^ (c >> 8); + } + crc = c; + bytes_out += (ulg) outcnt; + output_ptr += (ulg) outcnt; + outcnt = 0; + puts("."); +} + +static void error(char *x) +{ + puts("\n\n"); + puts(x); + puts("\n\n -- System halted"); + + while (1) ; /* Halt */ +} + +#define STACK_SIZE (4096) +long __attribute__ ((aligned(8))) user_stack[STACK_SIZE]; +long *stack_start = &user_stack[STACK_SIZE]; + +void decompress_kernel(void) +{ + output_data = (uch *) (CONFIG_MEMORY_START + 0x2000); + free_mem_ptr = (unsigned long) &_end; + free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; + + makecrc(); + puts("Uncompressing Linux... "); + cache_control(CACHE_ENABLE); + gunzip(); + puts("\n"); + +#if 0 + /* When booting from ROM may want to do something like this if the + * boot loader doesn't. + */ + + /* Set up the parameters and command line */ + { + volatile unsigned int *parambase = + (int *) (CONFIG_MEMORY_START + 0x1000); + + parambase[0] = 0x1; /* MOUNT_ROOT_RDONLY */ + parambase[1] = 0x0; /* RAMDISK_FLAGS */ + parambase[2] = 0x0200; /* ORIG_ROOT_DEV */ + parambase[3] = 0x0; /* LOADER_TYPE */ + parambase[4] = 0x0; /* INITRD_START */ + parambase[5] = 0x0; /* INITRD_SIZE */ + parambase[6] = 0; + + strcpy((char *) ((int) parambase + 0x100), + "console=ttySC0,38400"); + } +#endif + + puts("Ok, booting the kernel.\n"); + + cache_control(CACHE_DISABLE); +} diff --git a/arch/sh64/boot/compressed/vmlinux.lds.S b/arch/sh64/boot/compressed/vmlinux.lds.S new file mode 100644 index 000000000..15a737d9b --- /dev/null +++ b/arch/sh64/boot/compressed/vmlinux.lds.S @@ -0,0 +1,65 @@ +/* + * ld script to make compressed SuperH/shmedia Linux kernel+decompression + * bootstrap + * Modified by Stuart Menefy from arch/sh/vmlinux.lds.S written by Niibe Yutaka + */ + +#include + +#ifdef CONFIG_LITTLE_ENDIAN +/* OUTPUT_FORMAT("elf32-sh64l-linux", "elf32-sh64l-linux", "elf32-sh64l-linux") */ +#define NOP 0x6ff0fff0 +#else +/* OUTPUT_FORMAT("elf32-sh64", "elf32-sh64", "elf32-sh64") */ +#define NOP 0xf0fff06f +#endif + +OUTPUT_FORMAT("elf32-sh64-linux") +OUTPUT_ARCH(sh) +ENTRY(_start) + +#define ALIGNED_GAP(section, align) (((ADDR(section)+SIZEOF(section)+(align)-1) & ~((align)-1))-ADDR(section)) +#define FOLLOWING(section, align) AT (LOADADDR(section) + ALIGNED_GAP(section,align)) + +SECTIONS +{ + _text = .; /* Text and read-only data */ + + .text : { + *(.text) + *(.text64) + *(.text..SHmedia32) + *(.fixup) + *(.gnu.warning) + } = NOP + . = ALIGN(4); + .rodata : { *(.rodata) } + + /* There is no 'real' reason for eight byte alignment, four would work + * as well, but gdb downloads much (*4) faster with this. + */ + . = ALIGN(8); + .image : { *(.image) } + . = ALIGN(4); + _etext = .; /* End of text section */ + + .data : /* Data */ + FOLLOWING(.image, 4) + { + _data = .; + *(.data) + } + _data_image = LOADADDR(.data);/* Address of data section in ROM */ + + _edata = .; /* End of data section */ + + .stack : { stack = .; _stack = .; } + + . = ALIGN(4); + __bss_start = .; /* BSS */ + .bss : { + *(.bss) + } + . = ALIGN(4); + _end = . ; +} diff --git a/arch/sh64/configs/cayman_defconfig b/arch/sh64/configs/cayman_defconfig new file mode 100644 index 000000000..6dd7cea5d --- /dev/null +++ b/arch/sh64/configs/cayman_defconfig @@ -0,0 +1,660 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_SUPERH=y +CONFIG_SUPERH64=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_LOG_BUF_SHIFT=14 + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +CONFIG_POSIX_MQUEUE=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# System type +# +# CONFIG_SH_GENERIC is not set +# CONFIG_SH_SIMULATOR is not set +CONFIG_SH_CAYMAN=y +# CONFIG_SH_ROMRAM is not set +# CONFIG_SH_HARP is not set + +# +# Processor type and features +# +CONFIG_CPU_SH5=y +CONFIG_CPU_SUBTYPE_SH5_101=y +# CONFIG_CPU_SUBTYPE_SH5_103 is not set +CONFIG_LITTLE_ENDIAN=y +# CONFIG_BIG_ENDIAN is not set +# CONFIG_SH64_FPU_DENORM_FLUSH is not set +CONFIG_SH64_PGTABLE_2_LEVEL=y +# CONFIG_SH64_PGTABLE_3_LEVEL is not set +CONFIG_HUGETLB_PAGE_SIZE_64K=y +# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set +# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set +CONFIG_SH64_USER_MISALIGNED_FIXUP=y + +# +# Memory options +# +CONFIG_CACHED_MEMORY_OFFSET=0x20000000 +CONFIG_MEMORY_START=0x80000000 +CONFIG_MEMORY_SIZE_IN_MB=128 + +# +# Cache options +# +# CONFIG_DCACHE_DISABLED is not set +CONFIG_DCACHE_WRITE_BACK=y +# CONFIG_DCACHE_WRITE_THROUGH is not set +# CONFIG_ICACHE_DISABLED is not set +CONFIG_PCIDEVICE_MEMORY_START=C0000000 +CONFIG_DEVICE_MEMORY_START=E0000000 +CONFIG_FLASH_MEMORY_START=0x00000000 +CONFIG_PCI_BLOCK_START=0x40000000 + +# +# CPU Subtype specific options +# +CONFIG_SH64_ID2815_WORKAROUND=y + +# +# Misc options +# +CONFIG_HEARTBEAT=y +CONFIG_HDSP253_LED=y +CONFIG_SH_DMA=y +CONFIG_PREEMPT=y + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +CONFIG_PCI=y +CONFIG_SH_PCIDMA_NONCOHERENT=y +CONFIG_PCI_LEGACY_PROC=y +CONFIG_PCI_NAMES=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_FLAT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_CARMEL is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +# CONFIG_STNIC is not set +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set + +# +# Tulip family network device support +# +CONFIG_NET_TULIP=y +# CONFIG_DE2104X is not set +CONFIG_TULIP=y +# CONFIG_TULIP_MWI is not set +# CONFIG_TULIP_MMIO is not set +# CONFIG_TULIP_NAPI is not set +# CONFIG_DE4X5 is not set +# CONFIG_WINBOND_840 is not set +# CONFIG_DM9102 is not set +# CONFIG_HP100 is not set +CONFIG_NET_PCI=y +# CONFIG_PCNET32 is not set +# CONFIG_AMD8111_ETH is not set +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_B44 is not set +# CONFIG_FORCEDETH is not set +# CONFIG_DGRS is not set +# CONFIG_EEPRO100 is not set +# CONFIG_E100 is not set +# CONFIG_FEALNX is not set +# CONFIG_NATSEMI is not set +# CONFIG_NE2K_PCI is not set +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_SIS900 is not set +# CONFIG_EPIC100 is not set +# CONFIG_SUNDANCE is not set +# CONFIG_TLAN is not set +# CONFIG_VIA_RHINE is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_SH_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +CONFIG_FB=y +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_E1355 is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON_OLD is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +CONFIG_FB_KYRO=y +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_VIRTUAL is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_PCI_CONSOLE=y +CONFIG_FONTS=y +# CONFIG_FONT_8x8 is not set +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set + +# +# Logo configuration +# +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +# CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_LOGO_SUPERH_MONO is not set +# CONFIG_LOGO_SUPERH_VGA16 is not set +CONFIG_LOGO_SUPERH_CLUT224=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +CONFIG_MINIX_FS=y +CONFIG_ROMFS_FS=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Kernel hacking +# +CONFIG_MAGIC_SYSRQ=y +# CONFIG_EARLY_PRINTK is not set +# CONFIG_DEBUG_KERNEL_WITH_GDB_STUB is not set +# CONFIG_SH64_PROC_TLB is not set +# CONFIG_SH64_PROC_ASIDS is not set +CONFIG_SH64_SR_WATCH=y +# CONFIG_SH_ALPHANUMERIC is not set +# CONFIG_SH_NO_BSS_INIT is not set +CONFIG_FRAME_POINTER=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set diff --git a/arch/sh64/defconfig b/arch/sh64/defconfig new file mode 100644 index 000000000..e6233e8fb --- /dev/null +++ b/arch/sh64/defconfig @@ -0,0 +1,668 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_SUPERH=y +CONFIG_SUPERH64=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_LOG_BUF_SHIFT=14 + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +CONFIG_POSIX_MQUEUE=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# System type +# +# CONFIG_SH_GENERIC is not set +# CONFIG_SH_SIMULATOR is not set +CONFIG_SH_CAYMAN=y +# CONFIG_SH_ROMRAM is not set +# CONFIG_SH_HARP is not set +CONFIG_CPU_SH5=y +CONFIG_CPU_SUBTYPE_SH5_101=y +# CONFIG_CPU_SUBTYPE_SH5_103 is not set +CONFIG_LITTLE_ENDIAN=y +# CONFIG_BIG_ENDIAN is not set +# CONFIG_SH64_FPU_DENORM_FLUSH is not set +CONFIG_SH64_PGTABLE_2_LEVEL=y +# CONFIG_SH64_PGTABLE_3_LEVEL is not set +CONFIG_HUGETLB_PAGE_SIZE_64K=y +# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set +# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set +CONFIG_SH64_USER_MISALIGNED_FIXUP=y + +# +# Memory options +# +CONFIG_CACHED_MEMORY_OFFSET=0x20000000 +CONFIG_MEMORY_START=0x80000000 +CONFIG_MEMORY_SIZE_IN_MB=128 + +# +# Cache options +# +# CONFIG_DCACHE_DISABLED is not set +CONFIG_DCACHE_WRITE_BACK=y +# CONFIG_DCACHE_WRITE_THROUGH is not set +# CONFIG_ICACHE_DISABLED is not set +CONFIG_PCIDEVICE_MEMORY_START=C0000000 +CONFIG_DEVICE_MEMORY_START=E0000000 +CONFIG_FLASH_MEMORY_START=0x00000000 +CONFIG_PCI_BLOCK_START=0x40000000 + +# +# CPU Subtype specific options +# +CONFIG_SH64_ID2815_WORKAROUND=y + +# +# Misc options +# +CONFIG_HEARTBEAT=y +CONFIG_HDSP253_LED=y +CONFIG_SH_DMA=y +CONFIG_PREEMPT=y + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +CONFIG_PCI=y +CONFIG_SH_PCIDMA_NONCOHERENT=y +CONFIG_PCI_LEGACY_PROC=y +CONFIG_PCI_NAMES=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_FLAT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +# CONFIG_STNIC is not set +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set + +# +# Tulip family network device support +# +CONFIG_NET_TULIP=y +# CONFIG_DE2104X is not set +CONFIG_TULIP=y +# CONFIG_TULIP_MWI is not set +# CONFIG_TULIP_MMIO is not set +# CONFIG_TULIP_NAPI is not set +# CONFIG_DE4X5 is not set +# CONFIG_WINBOND_840 is not set +# CONFIG_DM9102 is not set +# CONFIG_HP100 is not set +CONFIG_NET_PCI=y +# CONFIG_PCNET32 is not set +# CONFIG_AMD8111_ETH is not set +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_B44 is not set +# CONFIG_FORCEDETH is not set +# CONFIG_DGRS is not set +# CONFIG_EEPRO100 is not set +# CONFIG_E100 is not set +# CONFIG_FEALNX is not set +# CONFIG_NATSEMI is not set +# CONFIG_NE2K_PCI is not set +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_SIS900 is not set +# CONFIG_EPIC100 is not set +# CONFIG_SUNDANCE is not set +# CONFIG_TLAN is not set +# CONFIG_VIA_RHINE is not set +# CONFIG_VIA_VELOCITY is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_SH_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_E1355 is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON_OLD is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +CONFIG_FB_KYRO=y +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_VIRTUAL is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_PCI_CONSOLE=y +CONFIG_FONTS=y +# CONFIG_FONT_8x8 is not set +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set + +# +# Logo configuration +# +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +# CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_LOGO_SUPERH_MONO is not set +# CONFIG_LOGO_SUPERH_VGA16 is not set +CONFIG_LOGO_SUPERH_CLUT224=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +CONFIG_MINIX_FS=y +CONFIG_ROMFS_FS=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Profiling support +# +CONFIG_PROFILING=y +# CONFIG_OPROFILE is not set + +# +# Kernel hacking +# +CONFIG_MAGIC_SYSRQ=y +# CONFIG_EARLY_PRINTK is not set +# CONFIG_DEBUG_KERNEL_WITH_GDB_STUB is not set +# CONFIG_SH64_PROC_TLB is not set +# CONFIG_SH64_PROC_ASIDS is not set +CONFIG_SH64_SR_WATCH=y +# CONFIG_SH_ALPHANUMERIC is not set +# CONFIG_SH_NO_BSS_INIT is not set +CONFIG_FRAME_POINTER=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +# CONFIG_CRC16 is not set +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set diff --git a/arch/sh64/kernel/Makefile b/arch/sh64/kernel/Makefile new file mode 100644 index 000000000..2f8d07746 --- /dev/null +++ b/arch/sh64/kernel/Makefile @@ -0,0 +1,38 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2000, 2001 Paolo Alberelli +# Copyright (C) 2003 Paul Mundt +# +# Makefile for the Linux sh64 kernel. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +extra-y := head.o init_task.o vmlinux.lds.s + +obj-y := process.o signal.o entry.o traps.o irq.o irq_intc.o \ + ptrace.o setup.o time.o sys_sh64.o semaphore.o sh_ksyms.o \ + switchto.o syscalls.o + +obj-$(CONFIG_HEARTBEAT) += led.o +obj-$(CONFIG_SH_ALPHANUMERIC) += alphanum.o +obj-$(CONFIG_SH_DMA) += dma.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_KALLSYMS) += unwind.o +obj-$(CONFIG_PCI) += pci-dma.o pcibios.o + +ifeq ($(CONFIG_PCI),y) +obj-$(CONFIG_CPU_SH5) += pci_sh5.o +endif + +ifndef CONFIG_NOFPU_SUPPORT +obj-y += fpu.o +endif + +USE_STANDARD_AS_RULE := true + diff --git a/arch/sh64/kernel/alphanum.c b/arch/sh64/kernel/alphanum.c new file mode 100644 index 000000000..56d6f9f71 --- /dev/null +++ b/arch/sh64/kernel/alphanum.c @@ -0,0 +1,45 @@ +/* + * arch/sh64/kernel/alpanum.c + * + * Copyright (C) 2002 Stuart Menefy + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Machine-independent functions for handling 8-digit alphanumeric display + * (e.g. Agilent HDSP-253x) + */ +#include +#include +#include + +void mach_alphanum(int pos, unsigned char val); +void mach_led(int pos, int val); + +void print_seg(char *file, int line) +{ + int i; + unsigned int nibble; + + for (i = 0; i < 5; i++) { + mach_alphanum(i, file[i]); + } + + for (i = 0; i < 3; i++) { + nibble = ((line >> (i * 4)) & 0xf); + mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48)); + } +} + +void print_seg_num(unsigned num) +{ + int i; + unsigned int nibble; + + for (i = 0; i < 8; i++) { + nibble = ((num >> (i * 4)) & 0xf); + + mach_alphanum(7 - i, nibble + ((nibble > 9) ? 55 : 48)); + } +} + diff --git a/arch/sh64/kernel/asm-offsets.c b/arch/sh64/kernel/asm-offsets.c new file mode 100644 index 000000000..ca76537c1 --- /dev/null +++ b/arch/sh64/kernel/asm-offsets.c @@ -0,0 +1,33 @@ +/* + * This program is used to generate definitions needed by + * assembly language modules. + * + * We use the technique used in the OSF Mach kernel code: + * generate asm statements containing #defines, + * compile this file to assembler, and then extract the + * #defines from the assembly-language output. + */ + +#include +#include +#include +#include + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +int main(void) +{ + /* offsets into the thread_info struct */ + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); + + return 0; +} diff --git a/arch/sh64/kernel/dma.c b/arch/sh64/kernel/dma.c new file mode 100644 index 000000000..f5183b20a --- /dev/null +++ b/arch/sh64/kernel/dma.c @@ -0,0 +1,297 @@ +/* + * arch/sh64/kernel/dma.c + * + * DMA routines for the SH-5 DMAC. + * + * Copyright (C) 2003 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct { + unsigned long dev_addr; + unsigned long mem_addr; + + unsigned int mode; + unsigned int count; +} dma_info_t; + +static dma_info_t dma_info[MAX_DMA_CHANNELS]; +extern spinlock_t dma_spin_lock; + +/* arch/sh64/kernel/irq_intc.c */ +extern void make_intc_irq(unsigned int irq); + +/* DMAC Interrupts */ +#define DMA_IRQ_DMTE0 18 +#define DMA_IRQ_DERR 22 + +#define DMAC_COMMON_BASE (dmac_base + 0x08) +#define DMAC_SAR_BASE (dmac_base + 0x10) +#define DMAC_DAR_BASE (dmac_base + 0x18) +#define DMAC_COUNT_BASE (dmac_base + 0x20) +#define DMAC_CTRL_BASE (dmac_base + 0x28) +#define DMAC_STATUS_BASE (dmac_base + 0x30) + +#define DMAC_SAR(n) (DMAC_SAR_BASE + ((n) * 0x28)) +#define DMAC_DAR(n) (DMAC_DAR_BASE + ((n) * 0x28)) +#define DMAC_COUNT(n) (DMAC_COUNT_BASE + ((n) * 0x28)) +#define DMAC_CTRL(n) (DMAC_CTRL_BASE + ((n) * 0x28)) +#define DMAC_STATUS(n) (DMAC_STATUS_BASE + ((n) * 0x28)) + +/* DMAC.COMMON Bit Definitions */ +#define DMAC_COMMON_PR 0x00000001 /* Priority */ + /* Bits 1-2 Reserved */ +#define DMAC_COMMON_ME 0x00000008 /* Master Enable */ +#define DMAC_COMMON_NMI 0x00000010 /* NMI Flag */ + /* Bits 5-6 Reserved */ +#define DMAC_COMMON_ER 0x00000780 /* Error Response */ +#define DMAC_COMMON_AAE 0x00007800 /* Address Alignment Error */ + /* Bits 15-63 Reserved */ + +/* DMAC.SAR Bit Definitions */ +#define DMAC_SAR_ADDR 0xffffffff /* Source Address */ + +/* DMAC.DAR Bit Definitions */ +#define DMAC_DAR_ADDR 0xffffffff /* Destination Address */ + +/* DMAC.COUNT Bit Definitions */ +#define DMAC_COUNT_CNT 0xffffffff /* Transfer Count */ + +/* DMAC.CTRL Bit Definitions */ +#define DMAC_CTRL_TS 0x00000007 /* Transfer Size */ +#define DMAC_CTRL_SI 0x00000018 /* Source Increment */ +#define DMAC_CTRL_DI 0x00000060 /* Destination Increment */ +#define DMAC_CTRL_RS 0x00000780 /* Resource Select */ +#define DMAC_CTRL_IE 0x00000800 /* Interrupt Enable */ +#define DMAC_CTRL_TE 0x00001000 /* Transfer Enable */ + /* Bits 15-63 Reserved */ + +/* DMAC.STATUS Bit Definitions */ +#define DMAC_STATUS_TE 0x00000001 /* Transfer End */ +#define DMAC_STATUS_AAE 0x00000002 /* Address Alignment Error */ + /* Bits 2-63 Reserved */ + +static unsigned long dmac_base; + +void set_dma_count(unsigned int chan, unsigned int count); +void set_dma_addr(unsigned int chan, unsigned int addr); + +static irqreturn_t dma_mte(int irq, void *dev_id, struct pt_regs *regs) +{ + unsigned int chan = irq - DMA_IRQ_DMTE0; + dma_info_t *info = dma_info + chan; + u64 status; + + if (info->mode & DMA_MODE_WRITE) { + sh64_out64(info->mem_addr & DMAC_SAR_ADDR, DMAC_SAR(chan)); + } else { + sh64_out64(info->mem_addr & DMAC_DAR_ADDR, DMAC_DAR(chan)); + } + + set_dma_count(chan, info->count); + + /* Clear the TE bit */ + status = sh64_in64(DMAC_STATUS(chan)); + status &= ~DMAC_STATUS_TE; + sh64_out64(status, DMAC_STATUS(chan)); + + return IRQ_HANDLED; +} + +static struct irqaction irq_dmte = { + .handler = dma_mte, + .flags = SA_INTERRUPT, + .name = "DMA MTE", +}; + +static irqreturn_t dma_err(int irq, void *dev_id, struct pt_regs *regs) +{ + u64 tmp; + u8 chan; + + printk(KERN_NOTICE "DMAC: Got a DMA Error!\n"); + + tmp = sh64_in64(DMAC_COMMON_BASE); + + /* Check for the type of error */ + if ((chan = tmp & DMAC_COMMON_AAE)) { + /* It's an address alignment error.. */ + printk(KERN_NOTICE "DMAC: Alignment error on channel %d, ", chan); + + printk(KERN_NOTICE "SAR: 0x%08llx, DAR: 0x%08llx, COUNT: %lld\n", + (sh64_in64(DMAC_SAR(chan)) & DMAC_SAR_ADDR), + (sh64_in64(DMAC_DAR(chan)) & DMAC_DAR_ADDR), + (sh64_in64(DMAC_COUNT(chan)) & DMAC_COUNT_CNT)); + + } else if ((chan = tmp & DMAC_COMMON_ER)) { + /* Something else went wrong.. */ + printk(KERN_NOTICE "DMAC: Error on channel %d\n", chan); + } + + /* Reset the ME bit to clear the interrupt */ + tmp |= DMAC_COMMON_ME; + sh64_out64(tmp, DMAC_COMMON_BASE); + + return IRQ_HANDLED; +} + +static struct irqaction irq_derr = { + .handler = dma_err, + .flags = SA_INTERRUPT, + .name = "DMA Error", +}; + +static inline unsigned long calc_xmit_shift(unsigned int chan) +{ + return sh64_in64(DMAC_CTRL(chan)) & 0x03; +} + +void setup_dma(unsigned int chan, dma_info_t *info) +{ + unsigned int irq = DMA_IRQ_DMTE0 + chan; + dma_info_t *dma = dma_info + chan; + + make_intc_irq(irq); + setup_irq(irq, &irq_dmte); + dma = info; +} + +void enable_dma(unsigned int chan) +{ + u64 ctrl; + + ctrl = sh64_in64(DMAC_CTRL(chan)); + ctrl |= DMAC_CTRL_TE; + sh64_out64(ctrl, DMAC_CTRL(chan)); +} + +void disable_dma(unsigned int chan) +{ + u64 ctrl; + + ctrl = sh64_in64(DMAC_CTRL(chan)); + ctrl &= ~DMAC_CTRL_TE; + sh64_out64(ctrl, DMAC_CTRL(chan)); +} + +void set_dma_mode(unsigned int chan, char mode) +{ + dma_info_t *info = dma_info + chan; + + info->mode = mode; + + set_dma_addr(chan, info->mem_addr); + set_dma_count(chan, info->count); +} + +void set_dma_addr(unsigned int chan, unsigned int addr) +{ + dma_info_t *info = dma_info + chan; + unsigned long sar, dar; + + info->mem_addr = addr; + sar = (info->mode & DMA_MODE_WRITE) ? info->mem_addr : info->dev_addr; + dar = (info->mode & DMA_MODE_WRITE) ? info->dev_addr : info->mem_addr; + + sh64_out64(sar & DMAC_SAR_ADDR, DMAC_SAR(chan)); + sh64_out64(dar & DMAC_SAR_ADDR, DMAC_DAR(chan)); +} + +void set_dma_count(unsigned int chan, unsigned int count) +{ + dma_info_t *info = dma_info + chan; + u64 tmp; + + info->count = count; + + tmp = (info->count >> calc_xmit_shift(chan)) & DMAC_COUNT_CNT; + + sh64_out64(tmp, DMAC_COUNT(chan)); +} + +unsigned long claim_dma_lock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&dma_spin_lock, flags); + + return flags; +} + +void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +int get_dma_residue(unsigned int chan) +{ + return sh64_in64(DMAC_COUNT(chan) << calc_xmit_shift(chan)); +} + +int __init init_dma(void) +{ + struct vcr_info vcr; + u64 tmp; + + /* Remap the DMAC */ + dmac_base = onchip_remap(PHYS_DMAC_BLOCK, 1024, "DMAC"); + if (!dmac_base) { + printk(KERN_ERR "Unable to remap DMAC\n"); + return -ENOMEM; + } + + /* Report DMAC.VCR Info */ + vcr = sh64_get_vcr_info(dmac_base); + printk("DMAC: Module ID: 0x%04x, Module version: 0x%04x\n", + vcr.mod_id, vcr.mod_vers); + + /* Set the ME bit */ + tmp = sh64_in64(DMAC_COMMON_BASE); + tmp |= DMAC_COMMON_ME; + sh64_out64(tmp, DMAC_COMMON_BASE); + + /* Enable the DMAC Error Interrupt */ + make_intc_irq(DMA_IRQ_DERR); + setup_irq(DMA_IRQ_DERR, &irq_derr); + + return 0; +} + +static void __exit exit_dma(void) +{ + onchip_unmap(dmac_base); + free_irq(DMA_IRQ_DERR, 0); +} + +module_init(init_dma); +module_exit(exit_dma); + +MODULE_AUTHOR("Paul Mundt"); +MODULE_DESCRIPTION("DMA API for SH-5 DMAC"); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(setup_dma); +EXPORT_SYMBOL(claim_dma_lock); +EXPORT_SYMBOL(release_dma_lock); +EXPORT_SYMBOL(enable_dma); +EXPORT_SYMBOL(disable_dma); +EXPORT_SYMBOL(set_dma_mode); +EXPORT_SYMBOL(set_dma_addr); +EXPORT_SYMBOL(set_dma_count); +EXPORT_SYMBOL(get_dma_residue); + diff --git a/arch/sh64/kernel/early_printk.c b/arch/sh64/kernel/early_printk.c new file mode 100644 index 000000000..3d03ca737 --- /dev/null +++ b/arch/sh64/kernel/early_printk.c @@ -0,0 +1,107 @@ +/* + * arch/sh64/kernel/early_printk.c + * + * SH-5 Early SCIF console (cloned and hacked from sh implementation) + * + * Copyright (C) 2003, 2004 Paul Mundt + * Copyright (C) 2002 M. R. Brown + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +extern void cpu_relax(void); + +#define SCIF_BASE_ADDR 0x01030000 +#define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR + +/* + * Fixed virtual address where SCIF is mapped (should already be done + * in arch/sh64/kernel/head.S!). + */ +#define SCIF_REG 0xfa030000 + +enum { + SCIF_SCSMR2 = SCIF_REG + 0x00, + SCIF_SCBRR2 = SCIF_REG + 0x04, + SCIF_SCSCR2 = SCIF_REG + 0x08, + SCIF_SCFTDR2 = SCIF_REG + 0x0c, + SCIF_SCFSR2 = SCIF_REG + 0x10, + SCIF_SCFRDR2 = SCIF_REG + 0x14, + SCIF_SCFCR2 = SCIF_REG + 0x18, + SCIF_SCFDR2 = SCIF_REG + 0x1c, + SCIF_SCSPTR2 = SCIF_REG + 0x20, + SCIF_SCLSR2 = SCIF_REG + 0x24, +}; + +static void sh_console_putc(int c) +{ + while (!(ctrl_inw(SCIF_SCFSR2) & 0x20)) + cpu_relax(); + + ctrl_outb(c, SCIF_SCFTDR2); + ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0x9f), SCIF_SCFSR2); + + if (c == '\n') + sh_console_putc('\r'); +} + +static void sh_console_flush(void) +{ + ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2); + + while (!(ctrl_inw(SCIF_SCFSR2) & 0x40)) + cpu_relax(); + + ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2); +} + +static void sh_console_write(struct console *con, const char *s, unsigned count) +{ + while (count-- > 0) + sh_console_putc(*s++); + + sh_console_flush(); +} + +static int __init sh_console_setup(struct console *con, char *options) +{ + con->cflag = CREAD | HUPCL | CLOCAL | B19200 | CS8; + + return 0; +} + +static struct console sh_console = { + .name = "scifcon", + .write = sh_console_write, + .setup = sh_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +void __init enable_early_printk(void) +{ + ctrl_outb(0x2a, SCIF_SCBRR2); /* 19200bps */ + + ctrl_outw(0x04, SCIF_SCFCR2); /* Reset TFRST */ + ctrl_outw(0x10, SCIF_SCFCR2); /* TTRG0=1 */ + + ctrl_outw(0, SCIF_SCSPTR2); + ctrl_outw(0x60, SCIF_SCFSR2); + ctrl_outw(0, SCIF_SCLSR2); + ctrl_outw(0x30, SCIF_SCSCR2); + + register_console(&sh_console); +} + +void disable_early_printk(void) +{ + unregister_console(&sh_console); +} + diff --git a/arch/sh64/kernel/entry.S b/arch/sh64/kernel/entry.S new file mode 100644 index 000000000..52dda3814 --- /dev/null +++ b/arch/sh64/kernel/entry.S @@ -0,0 +1,2101 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/entry.S + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2004 Paul Mundt + * Copyright (C) 2003, 2004 Richard Curnow + * + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +/* + * SR fields. + */ +#define SR_ASID_MASK 0x00ff0000 +#define SR_FD_MASK 0x00008000 +#define SR_SS 0x08000000 +#define SR_BL 0x10000000 +#define SR_MD 0x40000000 + +/* + * Event code. + */ +#define EVENT_INTERRUPT 0 +#define EVENT_FAULT_TLB 1 +#define EVENT_FAULT_NOT_TLB 2 +#define EVENT_DEBUG 3 + +/* EXPEVT values */ +#define RESET_CAUSE 0x20 +#define DEBUGSS_CAUSE 0x980 + +/* + * Frame layout. Quad index. + */ +#define FRAME_T(x) FRAME_TBASE+(x*8) +#define FRAME_R(x) FRAME_RBASE+(x*8) +#define FRAME_S(x) FRAME_SBASE+(x*8) +#define FSPC 0 +#define FSSR 1 +#define FSYSCALL_ID 2 + +/* Arrange the save frame to be a multiple of 32 bytes long */ +#define FRAME_SBASE 0 +#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */ +#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */ +#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */ +#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */ + +#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */ +#define FP_FRAME_BASE 0 + +#define SAVED_R2 0*8 +#define SAVED_R3 1*8 +#define SAVED_R4 2*8 +#define SAVED_R5 3*8 +#define SAVED_R18 4*8 +#define SAVED_R6 5*8 +#define SAVED_TR0 6*8 + +/* These are the registers saved in the TLB path that aren't saved in the first + level of the normal one. */ +#define TLB_SAVED_R25 7*8 +#define TLB_SAVED_TR1 8*8 +#define TLB_SAVED_TR2 9*8 +#define TLB_SAVED_TR3 10*8 +#define TLB_SAVED_TR4 11*8 +/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing + breakage otherwise. */ +#define TLB_SAVED_R0 12*8 +#define TLB_SAVED_R1 13*8 + +#define CLI() \ + getcon SR, r6; \ + ori r6, 0xf0, r6; \ + putcon r6, SR; + +#define STI() \ + getcon SR, r6; \ + andi r6, ~0xf0, r6; \ + putcon r6, SR; + +#ifdef CONFIG_PREEMPT +# define preempt_stop() CLI() +#else +# define preempt_stop() +# define resume_kernel restore_all +#endif + + .section .data, "aw" + +#define FAST_TLBMISS_STACK_CACHELINES 4 +#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES) + +/* Register back-up area for all exceptions */ + .balign 32 + /* Allow for 16 quadwords to be pushed by fast tlbmiss handling + * register saves etc. */ + .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0 +/* This is 32 byte aligned by construction */ +/* Register back-up area for all exceptions */ +reg_save_area: + .quad 0 + .quad 0 + .quad 0 + .quad 0 + + .quad 0 + .quad 0 + .quad 0 + .quad 0 + + .quad 0 + .quad 0 + .quad 0 + .quad 0 + + .quad 0 + .quad 0 + +/* Save area for RESVEC exceptions. We cannot use reg_save_area because of + * reentrancy. Note this area may be accessed via physical address. + * Align so this fits a whole single cache line, for ease of purging. + */ + .balign 32,0,32 +resvec_save_area: + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .balign 32,0,32 + +/* Jump table of 3rd level handlers */ +trap_jtable: + .long do_exception_error /* 0x000 */ + .long do_exception_error /* 0x020 */ + .long tlb_miss_load /* 0x040 */ + .long tlb_miss_store /* 0x060 */ + ! ARTIFICIAL pseudo-EXPEVT setting + .long do_debug_interrupt /* 0x080 */ + .long tlb_miss_load /* 0x0A0 */ + .long tlb_miss_store /* 0x0C0 */ + .long do_address_error_load /* 0x0E0 */ + .long do_address_error_store /* 0x100 */ +#ifndef CONFIG_NOFPU_SUPPORT + .long do_fpu_error /* 0x120 */ +#else + .long do_exception_error /* 0x120 */ +#endif + .long do_exception_error /* 0x140 */ + .long system_call /* 0x160 */ + .long do_reserved_inst /* 0x180 */ + .long do_illegal_slot_inst /* 0x1A0 */ + .long do_NMI /* 0x1C0 */ + .long do_exception_error /* 0x1E0 */ + .rept 15 + .long do_IRQ /* 0x200 - 0x3C0 */ + .endr + .long do_exception_error /* 0x3E0 */ + .rept 32 + .long do_IRQ /* 0x400 - 0x7E0 */ + .endr + .long fpu_error_or_IRQA /* 0x800 */ + .long fpu_error_or_IRQB /* 0x820 */ + .long do_IRQ /* 0x840 */ + .long do_IRQ /* 0x860 */ + .rept 6 + .long do_exception_error /* 0x880 - 0x920 */ + .endr + .long do_software_break_point /* 0x940 */ + .long do_exception_error /* 0x960 */ + .long do_single_step /* 0x980 */ + + .rept 3 + .long do_exception_error /* 0x9A0 - 0x9E0 */ + .endr + .long do_IRQ /* 0xA00 */ + .long do_IRQ /* 0xA20 */ + .long itlb_miss_or_IRQ /* 0xA40 */ + .long do_IRQ /* 0xA60 */ + .long do_IRQ /* 0xA80 */ + .long itlb_miss_or_IRQ /* 0xAA0 */ + .long do_exception_error /* 0xAC0 */ + .long do_address_error_exec /* 0xAE0 */ + .rept 8 + .long do_exception_error /* 0xB00 - 0xBE0 */ + .endr + .rept 18 + .long do_IRQ /* 0xC00 - 0xE20 */ + .endr + + .section .text64, "ax" + +/* + * --- Exception/Interrupt/Event Handling Section + */ + +/* + * VBR and RESVEC blocks. + * + * First level handler for VBR-based exceptions. + * + * To avoid waste of space, align to the maximum text block size. + * This is assumed to be at most 128 bytes or 32 instructions. + * DO NOT EXCEED 32 instructions on the first level handlers ! + * + * Also note that RESVEC is contained within the VBR block + * where the room left (1KB - TEXT_SIZE) allows placing + * the RESVEC block (at most 512B + TEXT_SIZE). + * + * So first (and only) level handler for RESVEC-based exceptions. + * + * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss + * and interrupt) we are a lot tight with register space until + * saving onto the stack frame, which is done in handle_exception(). + * + */ + +#define TEXT_SIZE 128 +#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */ + + .balign TEXT_SIZE +LVBR_block: + .space 256, 0 /* Power-on class handler, */ + /* not required here */ +not_a_tlb_miss: + /* Save original stack pointer into KCR1 */ + putcon SP, KCR1 + + /* Save other original registers into reg_save_area */ + movi reg_save_area, SP + st.q SP, SAVED_R2, r2 + st.q SP, SAVED_R3, r3 + st.q SP, SAVED_R4, r4 + st.q SP, SAVED_R5, r5 + st.q SP, SAVED_R6, r6 + st.q SP, SAVED_R18, r18 + gettr tr0, r3 + st.q SP, SAVED_TR0, r3 + + /* Set args for Non-debug, Not a TLB miss class handler */ + getcon EXPEVT, r2 + movi ret_from_exception, r3 + ori r3, 1, r3 + movi EVENT_FAULT_NOT_TLB, r4 + or SP, ZERO, r5 + getcon KCR1, SP + pta handle_exception, tr0 + blink tr0, ZERO + + .balign 256 + ! VBR+0x200 + nop + .balign 256 + ! VBR+0x300 + nop + .balign 256 + /* + * Instead of the natural .balign 1024 place RESVEC here + * respecting the final 1KB alignment. + */ + .balign TEXT_SIZE + /* + * Instead of '.space 1024-TEXT_SIZE' place the RESVEC + * block making sure the final alignment is correct. + */ +tlb_miss: + putcon SP, KCR1 + movi reg_save_area, SP + /* SP is guaranteed 32-byte aligned. */ + st.q SP, TLB_SAVED_R0 , r0 + st.q SP, TLB_SAVED_R1 , r1 + st.q SP, SAVED_R2 , r2 + st.q SP, SAVED_R3 , r3 + st.q SP, SAVED_R4 , r4 + st.q SP, SAVED_R5 , r5 + st.q SP, SAVED_R6 , r6 + st.q SP, SAVED_R18, r18 + + /* Save R25 for safety; as/ld may want to use it to achieve the call to + * the code in mm/tlbmiss.c */ + st.q SP, TLB_SAVED_R25, r25 + gettr tr0, r2 + gettr tr1, r3 + gettr tr2, r4 + gettr tr3, r5 + gettr tr4, r18 + st.q SP, SAVED_TR0 , r2 + st.q SP, TLB_SAVED_TR1 , r3 + st.q SP, TLB_SAVED_TR2 , r4 + st.q SP, TLB_SAVED_TR3 , r5 + st.q SP, TLB_SAVED_TR4 , r18 + + pt do_fast_page_fault, tr0 + getcon SSR, r2 + getcon EXPEVT, r3 + getcon TEA, r4 + shlri r2, 30, r2 + andi r2, 1, r2 /* r2 = SSR.MD */ + blink tr0, LINK + + pt fixup_to_invoke_general_handler, tr1 + + /* If the fast path handler fixed the fault, just drop through quickly + to the restore code right away to return to the excepting context. + */ + beqi/u r2, 0, tr1 + +fast_tlb_miss_restore: + ld.q SP, SAVED_TR0, r2 + ld.q SP, TLB_SAVED_TR1, r3 + ld.q SP, TLB_SAVED_TR2, r4 + + ld.q SP, TLB_SAVED_TR3, r5 + ld.q SP, TLB_SAVED_TR4, r18 + + ptabs r2, tr0 + ptabs r3, tr1 + ptabs r4, tr2 + ptabs r5, tr3 + ptabs r18, tr4 + + ld.q SP, TLB_SAVED_R0, r0 + ld.q SP, TLB_SAVED_R1, r1 + ld.q SP, SAVED_R2, r2 + ld.q SP, SAVED_R3, r3 + ld.q SP, SAVED_R4, r4 + ld.q SP, SAVED_R5, r5 + ld.q SP, SAVED_R6, r6 + ld.q SP, SAVED_R18, r18 + ld.q SP, TLB_SAVED_R25, r25 + + getcon KCR1, SP + rte + nop /* for safety, in case the code is run on sh5-101 cut1.x */ + +fixup_to_invoke_general_handler: + + /* OK, new method. Restore stuff that's not expected to get saved into + the 'first-level' reg save area, then just fall through to setting + up the registers and calling the second-level handler. */ + + /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore + r25,tr1-4 and save r6 to get into the right state. */ + + ld.q SP, TLB_SAVED_TR1, r3 + ld.q SP, TLB_SAVED_TR2, r4 + ld.q SP, TLB_SAVED_TR3, r5 + ld.q SP, TLB_SAVED_TR4, r18 + ld.q SP, TLB_SAVED_R25, r25 + + ld.q SP, TLB_SAVED_R0, r0 + ld.q SP, TLB_SAVED_R1, r1 + + ptabs/u r3, tr1 + ptabs/u r4, tr2 + ptabs/u r5, tr3 + ptabs/u r18, tr4 + + /* Set args for Non-debug, TLB miss class handler */ + getcon EXPEVT, r2 + movi ret_from_exception, r3 + ori r3, 1, r3 + movi EVENT_FAULT_TLB, r4 + or SP, ZERO, r5 + getcon KCR1, SP + pta handle_exception, tr0 + blink tr0, ZERO + +/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE + DOES END UP AT VBR+0x600 */ + nop + nop + nop + nop + nop + nop + + .balign 256 + /* VBR + 0x600 */ + +interrupt: + /* Save original stack pointer into KCR1 */ + putcon SP, KCR1 + + /* Save other original registers into reg_save_area */ + movi reg_save_area, SP + st.q SP, SAVED_R2, r2 + st.q SP, SAVED_R3, r3 + st.q SP, SAVED_R4, r4 + st.q SP, SAVED_R5, r5 + st.q SP, SAVED_R6, r6 + st.q SP, SAVED_R18, r18 + gettr tr0, r3 + st.q SP, SAVED_TR0, r3 + + /* Set args for interrupt class handler */ + getcon INTEVT, r2 + movi ret_from_irq, r3 + ori r3, 1, r3 + movi EVENT_INTERRUPT, r4 + or SP, ZERO, r5 + getcon KCR1, SP + pta handle_exception, tr0 + blink tr0, ZERO + .balign TEXT_SIZE /* let's waste the bare minimum */ + +LVBR_block_end: /* Marker. Used for total checking */ + + .balign 256 +LRESVEC_block: + /* Panic handler. Called with MMU off. Possible causes/actions: + * - Reset: Jump to program start. + * - Single Step: Turn off Single Step & return. + * - Others: Call panic handler, passing PC as arg. + * (this may need to be extended...) + */ +reset_or_panic: + putcon SP, DCR + /* First save r0-1 and tr0, as we need to use these */ + movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP + st.q SP, 0, r0 + st.q SP, 8, r1 + gettr tr0, r0 + st.q SP, 32, r0 + + /* Check cause */ + getcon EXPEVT, r0 + movi RESET_CAUSE, r1 + sub r1, r0, r1 /* r1=0 if reset */ + movi _stext-CONFIG_CACHED_MEMORY_OFFSET, r0 + ori r0, 1, r0 + ptabs r0, tr0 + beqi r1, 0, tr0 /* Jump to start address if reset */ + + getcon EXPEVT, r0 + movi DEBUGSS_CAUSE, r1 + sub r1, r0, r1 /* r1=0 if single step */ + pta single_step_panic, tr0 + beqi r1, 0, tr0 /* jump if single step */ + + /* Now jump to where we save the registers. */ + movi panic_stash_regs-CONFIG_CACHED_MEMORY_OFFSET, r1 + ptabs r1, tr0 + blink tr0, r63 + +single_step_panic: + /* We are in a handler with Single Step set. We need to resume the + * handler, by turning on MMU & turning off Single Step. */ + getcon SSR, r0 + movi SR_MMU, r1 + or r0, r1, r0 + movi ~SR_SS, r1 + and r0, r1, r0 + putcon r0, SSR + /* Restore EXPEVT, as the rte won't do this */ + getcon PEXPEVT, r0 + putcon r0, EXPEVT + /* Restore regs */ + ld.q SP, 32, r0 + ptabs r0, tr0 + ld.q SP, 0, r0 + ld.q SP, 8, r1 + getcon DCR, SP + synco + rte + + + .balign 256 +debug_exception: + /* + * Single step/software_break_point first level handler. + * Called with MMU off, so the first thing we do is enable it + * by doing an rte with appropriate SSR. + */ + putcon SP, DCR + /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */ + movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP + + /* With the MMU off, we are bypassing the cache, so purge any + * data that will be made stale by the following stores. + */ + ocbp SP, 0 + synco + + st.q SP, 0, r0 + st.q SP, 8, r1 + getcon SPC, r0 + st.q SP, 16, r0 + getcon SSR, r0 + st.q SP, 24, r0 + + /* Enable MMU, block exceptions, set priv mode, disable single step */ + movi SR_MMU | SR_BL | SR_MD, r1 + or r0, r1, r0 + movi ~SR_SS, r1 + and r0, r1, r0 + putcon r0, SSR + /* Force control to debug_exception_2 when rte is executed */ + movi debug_exeception_2, r0 + ori r0, 1, r0 /* force SHmedia, just in case */ + putcon r0, SPC + getcon DCR, SP + synco + rte +debug_exeception_2: + /* Restore saved regs */ + putcon SP, KCR1 + movi resvec_save_area, SP + ld.q SP, 24, r0 + putcon r0, SSR + ld.q SP, 16, r0 + putcon r0, SPC + ld.q SP, 0, r0 + ld.q SP, 8, r1 + + /* Save other original registers into reg_save_area */ + movi reg_save_area, SP + st.q SP, SAVED_R2, r2 + st.q SP, SAVED_R3, r3 + st.q SP, SAVED_R4, r4 + st.q SP, SAVED_R5, r5 + st.q SP, SAVED_R6, r6 + st.q SP, SAVED_R18, r18 + gettr tr0, r3 + st.q SP, SAVED_TR0, r3 + + /* Set args for debug class handler */ + getcon EXPEVT, r2 + movi ret_from_exception, r3 + ori r3, 1, r3 + movi EVENT_DEBUG, r4 + or SP, ZERO, r5 + getcon KCR1, SP + pta handle_exception, tr0 + blink tr0, ZERO + + .balign 256 +debug_interrupt: + /* !!! WE COME HERE IN REAL MODE !!! */ + /* Hook-up debug interrupt to allow various debugging options to be + * hooked into its handler. */ + /* Save original stack pointer into KCR1 */ + synco + putcon SP, KCR1 + movi resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP + ocbp SP, 0 + ocbp SP, 32 + synco + + /* Save other original registers into reg_save_area thru real addresses */ + st.q SP, SAVED_R2, r2 + st.q SP, SAVED_R3, r3 + st.q SP, SAVED_R4, r4 + st.q SP, SAVED_R5, r5 + st.q SP, SAVED_R6, r6 + st.q SP, SAVED_R18, r18 + gettr tr0, r3 + st.q SP, SAVED_TR0, r3 + + /* move (spc,ssr)->(pspc,pssr). The rte will shift + them back again, so that they look like the originals + as far as the real handler code is concerned. */ + getcon spc, r6 + putcon r6, pspc + getcon ssr, r6 + putcon r6, pssr + + ! construct useful SR for handle_exception + movi 3, r6 + shlli r6, 30, r6 + getcon sr, r18 + or r18, r6, r6 + putcon r6, ssr + + ! SSR is now the current SR with the MD and MMU bits set + ! i.e. the rte will switch back to priv mode and put + ! the mmu back on + + ! construct spc + movi handle_exception, r18 + ori r18, 1, r18 ! for safety (do we need this?) + putcon r18, spc + + /* Set args for Non-debug, Not a TLB miss class handler */ + + ! EXPEVT==0x80 is unused, so 'steal' this value to put the + ! debug interrupt handler in the vectoring table + movi 0x80, r2 + movi ret_from_exception, r3 + ori r3, 1, r3 + movi EVENT_FAULT_NOT_TLB, r4 + + or SP, ZERO, r5 + movi CONFIG_CACHED_MEMORY_OFFSET, r6 + add r6, r5, r5 + getcon KCR1, SP + + synco ! for safety + rte ! -> handle_exception, switch back to priv mode again + +LRESVEC_block_end: /* Marker. Unused. */ + + .balign TEXT_SIZE + +/* + * Second level handler for VBR-based exceptions. Pre-handler. + * In common to all stack-frame sensitive handlers. + * + * Inputs: + * (KCR0) Current [current task union] + * (KCR1) Original SP + * (r2) INTEVT/EXPEVT + * (r3) appropriate return address + * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug) + * (r5) Pointer to reg_save_area + * (SP) Original SP + * + * Available registers: + * (r6) + * (r18) + * (tr0) + * + */ +handle_exception: + /* Common 2nd level handler. */ + + /* First thing we need an appropriate stack pointer */ + getcon SSR, r6 + shlri r6, 30, r6 + andi r6, 1, r6 + pta stack_ok, tr0 + bne r6, ZERO, tr0 /* Original stack pointer is fine */ + + /* Set stack pointer for user fault */ + getcon KCR0, SP + movi THREAD_SIZE, r6 /* Point to the end */ + add SP, r6, SP + +stack_ok: + +/* DEBUG : check for underflow/overflow of the kernel stack */ + pta no_underflow, tr0 + getcon KCR0, r6 + movi 1024, r18 + add r6, r18, r6 + bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone + +/* Just panic to cause a crash. */ +bad_sp: + ld.b r63, 0, r6 + nop + +no_underflow: + pta bad_sp, tr0 + getcon kcr0, r6 + movi THREAD_SIZE, r18 + add r18, r6, r6 + bgt SP, r6, tr0 ! sp above the stack + + /* Make some room for the BASIC frame. */ + movi -(FRAME_SIZE), r6 + add SP, r6, SP + +/* Could do this with no stalling if we had another spare register, but the + code below will be OK. */ + ld.q r5, SAVED_R2, r6 + ld.q r5, SAVED_R3, r18 + st.q SP, FRAME_R(2), r6 + ld.q r5, SAVED_R4, r6 + st.q SP, FRAME_R(3), r18 + ld.q r5, SAVED_R5, r18 + st.q SP, FRAME_R(4), r6 + ld.q r5, SAVED_R6, r6 + st.q SP, FRAME_R(5), r18 + ld.q r5, SAVED_R18, r18 + st.q SP, FRAME_R(6), r6 + ld.q r5, SAVED_TR0, r6 + st.q SP, FRAME_R(18), r18 + st.q SP, FRAME_T(0), r6 + + /* Keep old SP around */ + getcon KCR1, r6 + + /* Save the rest of the general purpose registers */ + st.q SP, FRAME_R(0), r0 + st.q SP, FRAME_R(1), r1 + st.q SP, FRAME_R(7), r7 + st.q SP, FRAME_R(8), r8 + st.q SP, FRAME_R(9), r9 + st.q SP, FRAME_R(10), r10 + st.q SP, FRAME_R(11), r11 + st.q SP, FRAME_R(12), r12 + st.q SP, FRAME_R(13), r13 + st.q SP, FRAME_R(14), r14 + + /* SP is somewhere else */ + st.q SP, FRAME_R(15), r6 + + st.q SP, FRAME_R(16), r16 + st.q SP, FRAME_R(17), r17 + /* r18 is saved earlier. */ + st.q SP, FRAME_R(19), r19 + st.q SP, FRAME_R(20), r20 + st.q SP, FRAME_R(21), r21 + st.q SP, FRAME_R(22), r22 + st.q SP, FRAME_R(23), r23 + st.q SP, FRAME_R(24), r24 + st.q SP, FRAME_R(25), r25 + st.q SP, FRAME_R(26), r26 + st.q SP, FRAME_R(27), r27 + st.q SP, FRAME_R(28), r28 + st.q SP, FRAME_R(29), r29 + st.q SP, FRAME_R(30), r30 + st.q SP, FRAME_R(31), r31 + st.q SP, FRAME_R(32), r32 + st.q SP, FRAME_R(33), r33 + st.q SP, FRAME_R(34), r34 + st.q SP, FRAME_R(35), r35 + st.q SP, FRAME_R(36), r36 + st.q SP, FRAME_R(37), r37 + st.q SP, FRAME_R(38), r38 + st.q SP, FRAME_R(39), r39 + st.q SP, FRAME_R(40), r40 + st.q SP, FRAME_R(41), r41 + st.q SP, FRAME_R(42), r42 + st.q SP, FRAME_R(43), r43 + st.q SP, FRAME_R(44), r44 + st.q SP, FRAME_R(45), r45 + st.q SP, FRAME_R(46), r46 + st.q SP, FRAME_R(47), r47 + st.q SP, FRAME_R(48), r48 + st.q SP, FRAME_R(49), r49 + st.q SP, FRAME_R(50), r50 + st.q SP, FRAME_R(51), r51 + st.q SP, FRAME_R(52), r52 + st.q SP, FRAME_R(53), r53 + st.q SP, FRAME_R(54), r54 + st.q SP, FRAME_R(55), r55 + st.q SP, FRAME_R(56), r56 + st.q SP, FRAME_R(57), r57 + st.q SP, FRAME_R(58), r58 + st.q SP, FRAME_R(59), r59 + st.q SP, FRAME_R(60), r60 + st.q SP, FRAME_R(61), r61 + st.q SP, FRAME_R(62), r62 + + /* + * Save the S* registers. + */ + getcon SSR, r61 + st.q SP, FRAME_S(FSSR), r61 + getcon SPC, r62 + st.q SP, FRAME_S(FSPC), r62 + movi -1, r62 /* Reset syscall_nr */ + st.q SP, FRAME_S(FSYSCALL_ID), r62 + + /* Save the rest of the target registers */ + gettr tr1, r6 + st.q SP, FRAME_T(1), r6 + gettr tr2, r6 + st.q SP, FRAME_T(2), r6 + gettr tr3, r6 + st.q SP, FRAME_T(3), r6 + gettr tr4, r6 + st.q SP, FRAME_T(4), r6 + gettr tr5, r6 + st.q SP, FRAME_T(5), r6 + gettr tr6, r6 + st.q SP, FRAME_T(6), r6 + gettr tr7, r6 + st.q SP, FRAME_T(7), r6 + + ! setup FP so that unwinder can wind back through nested kernel mode + ! exceptions + add SP, ZERO, r14 + +#define POOR_MANS_STRACE 0 + +#if POOR_MANS_STRACE + /* We've pushed all the registers now, so only r2-r4 hold anything + * useful. Move them into callee save registers */ + or r2, ZERO, r28 + or r3, ZERO, r29 + or r4, ZERO, r30 + + /* Preserve r2 as the event code */ + movi evt_debug, r3 + ori r3, 1, r3 + ptabs r3, tr0 + + or SP, ZERO, r6 + getcon TRA, r5 + blink tr0, LINK + + or r28, ZERO, r2 + or r29, ZERO, r3 + or r30, ZERO, r4 +#endif + + + /* For syscall and debug race condition, get TRA now */ + getcon TRA, r5 + + /* We are in a safe position to turn SR.BL off, but set IMASK=0xf + * Also set FD, to catch FPU usage in the kernel. + * + * benedict.gaster@superh.com 29/07/2002 + * + * On all SH5-101 revisions it is unsafe to raise the IMASK and at the + * same time change BL from 1->0, as any pending interrupt of a level + * higher than he previous value of IMASK will leak through and be + * taken unexpectedly. + * + * To avoid this we raise the IMASK and then issue another PUTCON to + * enable interrupts. + */ + getcon SR, r6 + movi SR_IMASK | SR_FD, r7 + or r6, r7, r6 + putcon r6, SR + movi SR_UNBLOCK_EXC, r7 + and r6, r7, r6 + putcon r6, SR + + + /* Now call the appropriate 3rd level handler */ + or r3, ZERO, LINK + movi trap_jtable, r3 + shlri r2, 3, r2 + ldx.l r2, r3, r3 + shlri r2, 2, r2 + ptabs r3, tr0 + or SP, ZERO, r3 + blink tr0, ZERO + +/* + * Second level handler for VBR-based exceptions. Post-handlers. + * + * Post-handlers for interrupts (ret_from_irq), exceptions + * (ret_from_exception) and common reentrance doors (restore_all + * to get back to the original context, ret_from_syscall loop to + * check kernel exiting). + * + * ret_with_reschedule and work_notifysig are an inner lables of + * the ret_from_syscall loop. + * + * In common to all stack-frame sensitive handlers. + * + * Inputs: + * (SP) struct pt_regs *, original register's frame pointer (basic) + * + */ + .global ret_from_irq +ret_from_irq: +#if POOR_MANS_STRACE + pta evt_debug_ret_from_irq, tr0 + ori SP, 0, r2 + blink tr0, LINK +#endif + ld.q SP, FRAME_S(FSSR), r6 + shlri r6, 30, r6 + andi r6, 1, r6 + pta resume_kernel, tr0 + bne r6, ZERO, tr0 /* no further checks */ + STI() + pta ret_with_reschedule, tr0 + blink tr0, ZERO /* Do not check softirqs */ + + .global ret_from_exception +ret_from_exception: + preempt_stop() + +#if POOR_MANS_STRACE + pta evt_debug_ret_from_exc, tr0 + ori SP, 0, r2 + blink tr0, LINK +#endif + + ld.q SP, FRAME_S(FSSR), r6 + shlri r6, 30, r6 + andi r6, 1, r6 + pta resume_kernel, tr0 + bne r6, ZERO, tr0 /* no further checks */ + + /* Check softirqs */ + +#ifdef CONFIG_PREEMPT + pta ret_from_syscall, tr0 + blink tr0, ZERO + +resume_kernel: + pta restore_all, tr0 + + getcon KCR0, r6 + ld.l r6, TI_PRE_COUNT, r7 + beq/u r7, ZERO, tr0 + +need_resched: + ld.l r6, TI_FLAGS, r7 + movi (1 << TIF_NEED_RESCHED), r8 + and r8, r7, r8 + bne r8, ZERO, tr0 + + getcon SR, r7 + andi r7, 0xf0, r7 + bne r7, ZERO, tr0 + + movi ((PREEMPT_ACTIVE >> 16) & 65535), r8 + shori (PREEMPT_ACTIVE & 65535), r8 + st.l r6, TI_PRE_COUNT, r8 + + STI() + movi schedule, r7 + ori r7, 1, r7 + ptabs r7, tr1 + blink tr1, LINK + + st.l r6, TI_PRE_COUNT, ZERO + CLI() + + pta need_resched, tr1 + blink tr1, ZERO +#endif + + .global ret_from_syscall +ret_from_syscall: + +ret_with_reschedule: + getcon KCR0, r6 ! r6 contains current_thread_info + ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags + + ! FIXME:!!! + ! no handling of TIF_SYSCALL_TRACE yet!! + + movi (1 << TIF_NEED_RESCHED), r8 + and r8, r7, r8 + pta work_resched, tr0 + bne r8, ZERO, tr0 + + pta restore_all, tr1 + + movi (1 << TIF_SIGPENDING), r8 + and r8, r7, r8 + pta work_notifysig, tr0 + bne r8, ZERO, tr0 + + blink tr1, ZERO + +work_resched: + pta ret_from_syscall, tr0 + gettr tr0, LINK + movi schedule, r6 + ptabs r6, tr0 + blink tr0, ZERO /* Call schedule(), return on top */ + +work_notifysig: + gettr tr1, LINK + + movi do_signal, r6 + ptabs r6, tr0 + or SP, ZERO, r2 + or ZERO, ZERO, r3 + blink tr0, LINK /* Call do_signal(regs, 0), return here */ + +restore_all: + /* Do prefetches */ + + ld.q SP, FRAME_T(0), r6 + ld.q SP, FRAME_T(1), r7 + ld.q SP, FRAME_T(2), r8 + ld.q SP, FRAME_T(3), r9 + ptabs r6, tr0 + ptabs r7, tr1 + ptabs r8, tr2 + ptabs r9, tr3 + ld.q SP, FRAME_T(4), r6 + ld.q SP, FRAME_T(5), r7 + ld.q SP, FRAME_T(6), r8 + ld.q SP, FRAME_T(7), r9 + ptabs r6, tr4 + ptabs r7, tr5 + ptabs r8, tr6 + ptabs r9, tr7 + + ld.q SP, FRAME_R(0), r0 + ld.q SP, FRAME_R(1), r1 + ld.q SP, FRAME_R(2), r2 + ld.q SP, FRAME_R(3), r3 + ld.q SP, FRAME_R(4), r4 + ld.q SP, FRAME_R(5), r5 + ld.q SP, FRAME_R(6), r6 + ld.q SP, FRAME_R(7), r7 + ld.q SP, FRAME_R(8), r8 + ld.q SP, FRAME_R(9), r9 + ld.q SP, FRAME_R(10), r10 + ld.q SP, FRAME_R(11), r11 + ld.q SP, FRAME_R(12), r12 + ld.q SP, FRAME_R(13), r13 + ld.q SP, FRAME_R(14), r14 + + ld.q SP, FRAME_R(16), r16 + ld.q SP, FRAME_R(17), r17 + ld.q SP, FRAME_R(18), r18 + ld.q SP, FRAME_R(19), r19 + ld.q SP, FRAME_R(20), r20 + ld.q SP, FRAME_R(21), r21 + ld.q SP, FRAME_R(22), r22 + ld.q SP, FRAME_R(23), r23 + ld.q SP, FRAME_R(24), r24 + ld.q SP, FRAME_R(25), r25 + ld.q SP, FRAME_R(26), r26 + ld.q SP, FRAME_R(27), r27 + ld.q SP, FRAME_R(28), r28 + ld.q SP, FRAME_R(29), r29 + ld.q SP, FRAME_R(30), r30 + ld.q SP, FRAME_R(31), r31 + ld.q SP, FRAME_R(32), r32 + ld.q SP, FRAME_R(33), r33 + ld.q SP, FRAME_R(34), r34 + ld.q SP, FRAME_R(35), r35 + ld.q SP, FRAME_R(36), r36 + ld.q SP, FRAME_R(37), r37 + ld.q SP, FRAME_R(38), r38 + ld.q SP, FRAME_R(39), r39 + ld.q SP, FRAME_R(40), r40 + ld.q SP, FRAME_R(41), r41 + ld.q SP, FRAME_R(42), r42 + ld.q SP, FRAME_R(43), r43 + ld.q SP, FRAME_R(44), r44 + ld.q SP, FRAME_R(45), r45 + ld.q SP, FRAME_R(46), r46 + ld.q SP, FRAME_R(47), r47 + ld.q SP, FRAME_R(48), r48 + ld.q SP, FRAME_R(49), r49 + ld.q SP, FRAME_R(50), r50 + ld.q SP, FRAME_R(51), r51 + ld.q SP, FRAME_R(52), r52 + ld.q SP, FRAME_R(53), r53 + ld.q SP, FRAME_R(54), r54 + ld.q SP, FRAME_R(55), r55 + ld.q SP, FRAME_R(56), r56 + ld.q SP, FRAME_R(57), r57 + ld.q SP, FRAME_R(58), r58 + + getcon SR, r59 + movi SR_BLOCK_EXC, r60 + or r59, r60, r59 + putcon r59, SR /* SR.BL = 1, keep nesting out */ + ld.q SP, FRAME_S(FSSR), r61 + ld.q SP, FRAME_S(FSPC), r62 + movi SR_ASID_MASK, r60 + and r59, r60, r59 + andc r61, r60, r61 /* Clear out older ASID */ + or r59, r61, r61 /* Retain current ASID */ + putcon r61, SSR + putcon r62, SPC + + /* Ignore FSYSCALL_ID */ + + ld.q SP, FRAME_R(59), r59 + ld.q SP, FRAME_R(60), r60 + ld.q SP, FRAME_R(61), r61 + ld.q SP, FRAME_R(62), r62 + + /* Last touch */ + ld.q SP, FRAME_R(15), SP + rte + nop + +/* + * Third level handlers for VBR-based exceptions. Adapting args to + * and/or deflecting to fourth level handlers. + * + * Fourth level handlers interface. + * Most are C-coded handlers directly pointed by the trap_jtable. + * (Third = Fourth level) + * Inputs: + * (r2) fault/interrupt code, entry number (e.g. NMI = 14, + * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...) + * (r3) struct pt_regs *, original register's frame pointer + * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault) + * (r5) TRA control register (for syscall/debug benefit only) + * (LINK) return address + * (SP) = r3 + * + * Kernel TLB fault handlers will get a slightly different interface. + * (r2) struct pt_regs *, original register's frame pointer + * (r3) writeaccess, whether it's a store fault as opposed to load fault + * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault + * (r5) Effective Address of fault + * (LINK) return address + * (SP) = r2 + * + * fpu_error_or_IRQ? is a helper to deflect to the right cause. + * + */ +tlb_miss_load: + or SP, ZERO, r2 + or ZERO, ZERO, r3 /* Read */ + or ZERO, ZERO, r4 /* Data */ + getcon TEA, r5 + pta call_do_page_fault, tr0 + beq ZERO, ZERO, tr0 + +tlb_miss_store: + or SP, ZERO, r2 + movi 1, r3 /* Write */ + or ZERO, ZERO, r4 /* Data */ + getcon TEA, r5 + pta call_do_page_fault, tr0 + beq ZERO, ZERO, tr0 + +itlb_miss_or_IRQ: + pta its_IRQ, tr0 + beqi/u r4, EVENT_INTERRUPT, tr0 + or SP, ZERO, r2 + or ZERO, ZERO, r3 /* Read */ + movi 1, r4 /* Text */ + getcon TEA, r5 + /* Fall through */ + +call_do_page_fault: + movi do_page_fault, r6 + ptabs r6, tr0 + blink tr0, ZERO + +fpu_error_or_IRQA: + pta its_IRQ, tr0 + beqi/l r4, EVENT_INTERRUPT, tr0 +#ifndef CONFIG_NOFPU_SUPPORT + movi do_fpu_state_restore, r6 +#else + movi do_exception_error, r6 +#endif + ptabs r6, tr0 + blink tr0, ZERO + +fpu_error_or_IRQB: + pta its_IRQ, tr0 + beqi/l r4, EVENT_INTERRUPT, tr0 +#ifndef CONFIG_NOFPU_SUPPORT + movi do_fpu_state_restore, r6 +#else + movi do_exception_error, r6 +#endif + ptabs r6, tr0 + blink tr0, ZERO + +its_IRQ: + movi do_IRQ, r6 + ptabs r6, tr0 + blink tr0, ZERO + +/* + * system_call/unknown_trap third level handler: + * + * Inputs: + * (r2) fault/interrupt code, entry number (TRAP = 11) + * (r3) struct pt_regs *, original register's frame pointer + * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault) + * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr) + * (SP) = r3 + * (LINK) return address: ret_from_exception + * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7) + * + * Outputs: + * (*r3) Syscall reply (Saved r2) + * (LINK) In case of syscall only it can be scrapped. + * Common second level post handler will be ret_from_syscall. + * Common (non-trace) exit point to that is syscall_ret (saving + * result to r2). Common bad exit point is syscall_bad (returning + * ENOSYS then saved to r2). + * + */ + +unknown_trap: + /* Unknown Trap or User Trace */ + movi do_unknown_trapa, r6 + ptabs r6, tr0 + ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */ + andi r2, 0x1ff, r2 /* r2 = syscall # */ + blink tr0, LINK + + pta syscall_ret, tr0 + blink tr0, ZERO + + /* New syscall implementation*/ +system_call: + pta unknown_trap, tr0 + or r5, ZERO, r4 /* TRA (=r5) -> r4 */ + shlri r4, 20, r4 + bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */ + + /* It's a system call */ + st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */ + andi r5, 0x1ff, r5 /* syscall # -> r5 */ + + STI() + + pta syscall_allowed, tr0 + movi NR_syscalls - 1, r4 /* Last valid */ + bgeu/l r4, r5, tr0 + +syscall_bad: + /* Return ENOSYS ! */ + movi -(ENOSYS), r2 /* Fall-through */ + + .global syscall_ret +syscall_ret: + st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */ + +#if POOR_MANS_STRACE + /* nothing useful in registers at this point */ + + movi evt_debug2, r5 + ori r5, 1, r5 + ptabs r5, tr0 + ld.q SP, FRAME_R(9), r2 + or SP, ZERO, r3 + blink tr0, LINK +#endif + + ld.q SP, FRAME_S(FSPC), r2 + addi r2, 4, r2 /* Move PC, being pre-execution event */ + st.q SP, FRAME_S(FSPC), r2 + pta ret_from_syscall, tr0 + blink tr0, ZERO + + +/* A different return path for ret_from_fork, because we now need + * to call schedule_tail with the later kernels. Because prev is + * loaded into r2 by switch_to() means we can just call it straight away + */ + +.global ret_from_fork +ret_from_fork: + + movi schedule_tail,r5 + ori r5, 1, r5 + ptabs r5, tr0 + blink tr0, LINK + +#if POOR_MANS_STRACE + /* nothing useful in registers at this point */ + + movi evt_debug2, r5 + ori r5, 1, r5 + ptabs r5, tr0 + ld.q SP, FRAME_R(9), r2 + or SP, ZERO, r3 + blink tr0, LINK +#endif + + ld.q SP, FRAME_S(FSPC), r2 + addi r2, 4, r2 /* Move PC, being pre-execution event */ + st.q SP, FRAME_S(FSPC), r2 + pta ret_from_syscall, tr0 + blink tr0, ZERO + + + +syscall_allowed: + /* Use LINK to deflect the exit point, default is syscall_ret */ + pta syscall_ret, tr0 + gettr tr0, LINK + pta syscall_notrace, tr0 + + getcon KCR0, r2 + ld.l r2, TI_FLAGS, r4 + movi (1 << TIF_SYSCALL_TRACE), r6 + and r6, r4, r6 + beq/l r6, ZERO, tr0 + + /* Trace it by calling syscall_trace before and after */ + movi syscall_trace, r4 + ptabs r4, tr0 + blink tr0, LINK + /* Reload syscall number as r5 is trashed by syscall_trace */ + ld.q SP, FRAME_S(FSYSCALL_ID), r5 + andi r5, 0x1ff, r5 + + pta syscall_ret_trace, tr0 + gettr tr0, LINK + +syscall_notrace: + /* Now point to the appropriate 4th level syscall handler */ + movi sys_call_table, r4 + shlli r5, 2, r5 + ldx.l r4, r5, r5 + ptabs r5, tr0 + + /* Prepare original args */ + ld.q SP, FRAME_R(2), r2 + ld.q SP, FRAME_R(3), r3 + ld.q SP, FRAME_R(4), r4 + ld.q SP, FRAME_R(5), r5 + ld.q SP, FRAME_R(6), r6 + ld.q SP, FRAME_R(7), r7 + + /* And now the trick for those syscalls requiring regs * ! */ + or SP, ZERO, r8 + + /* Call it */ + blink tr0, ZERO /* LINK is already properly set */ + +syscall_ret_trace: + /* We get back here only if under trace */ + st.q SP, FRAME_R(9), r2 /* Save return value */ + + movi syscall_trace, LINK + ptabs LINK, tr0 + blink tr0, LINK + + /* This needs to be done after any syscall tracing */ + ld.q SP, FRAME_S(FSPC), r2 + addi r2, 4, r2 /* Move PC, being pre-execution event */ + st.q SP, FRAME_S(FSPC), r2 + + pta ret_from_syscall, tr0 + blink tr0, ZERO /* Resume normal return sequence */ + +/* + * --- Switch to running under a particular ASID and return the previous ASID value + * --- The caller is assumed to have done a cli before calling this. + * + * Input r2 : new ASID + * Output r2 : old ASID + */ + + .global switch_and_save_asid +switch_and_save_asid: + getcon sr, r0 + movi 255, r4 + shlli r4, 16, r4 /* r4 = mask to select ASID */ + and r0, r4, r3 /* r3 = shifted old ASID */ + andi r2, 255, r2 /* mask down new ASID */ + shlli r2, 16, r2 /* align new ASID against SR.ASID */ + andc r0, r4, r0 /* efface old ASID from SR */ + or r0, r2, r0 /* insert the new ASID */ + putcon r0, ssr + movi 1f, r0 + putcon r0, spc + rte + nop +1: + ptabs LINK, tr0 + shlri r3, 16, r2 /* r2 = old ASID */ + blink tr0, r63 + + .global route_to_panic_handler +route_to_panic_handler: + /* Switch to real mode, goto panic_handler, don't return. Useful for + last-chance debugging, e.g. if no output wants to go to the console. + */ + + movi panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1 + ptabs r1, tr0 + pta 1f, tr1 + gettr tr1, r0 + putcon r0, spc + getcon sr, r0 + movi 1, r1 + shlli r1, 31, r1 + andc r0, r1, r0 + putcon r0, ssr + rte + nop +1: /* Now in real mode */ + blink tr0, r63 + nop + + .global peek_real_address_q +peek_real_address_q: + /* Two args: + r2 : real mode address to peek + r2(out) : result quadword + + This is provided as a cheapskate way of manipulating device + registers for debugging (to avoid the need to onchip_remap the debug + module, and to avoid the need to onchip_remap the watchpoint + controller in a way that identity maps sufficient bits to avoid the + SH5-101 cut2 silicon defect). + + This code is not performance critical + */ + + add.l r2, r63, r2 /* sign extend address */ + getcon sr, r0 /* r0 = saved original SR */ + movi 1, r1 + shlli r1, 28, r1 + or r0, r1, r1 /* r0 with block bit set */ + putcon r1, sr /* now in critical section */ + movi 1, r36 + shlli r36, 31, r36 + andc r1, r36, r1 /* turn sr.mmu off in real mode section */ + + putcon r1, ssr + movi .peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */ + movi 1f, r37 /* virtual mode return addr */ + putcon r36, spc + + synco + rte + nop + +.peek0: /* come here in real mode, don't touch caches!! + still in critical section (sr.bl==1) */ + putcon r0, ssr + putcon r37, spc + /* Here's the actual peek. If the address is bad, all bets are now off + * what will happen (handlers invoked in real-mode = bad news) */ + ld.q r2, 0, r2 + synco + rte /* Back to virtual mode */ + nop + +1: + ptabs LINK, tr0 + blink tr0, r63 + + .global poke_real_address_q +poke_real_address_q: + /* Two args: + r2 : real mode address to poke + r3 : quadword value to write. + + This is provided as a cheapskate way of manipulating device + registers for debugging (to avoid the need to onchip_remap the debug + module, and to avoid the need to onchip_remap the watchpoint + controller in a way that identity maps sufficient bits to avoid the + SH5-101 cut2 silicon defect). + + This code is not performance critical + */ + + add.l r2, r63, r2 /* sign extend address */ + getcon sr, r0 /* r0 = saved original SR */ + movi 1, r1 + shlli r1, 28, r1 + or r0, r1, r1 /* r0 with block bit set */ + putcon r1, sr /* now in critical section */ + movi 1, r36 + shlli r36, 31, r36 + andc r1, r36, r1 /* turn sr.mmu off in real mode section */ + + putcon r1, ssr + movi .poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */ + movi 1f, r37 /* virtual mode return addr */ + putcon r36, spc + + synco + rte + nop + +.poke0: /* come here in real mode, don't touch caches!! + still in critical section (sr.bl==1) */ + putcon r0, ssr + putcon r37, spc + /* Here's the actual poke. If the address is bad, all bets are now off + * what will happen (handlers invoked in real-mode = bad news) */ + st.q r2, 0, r3 + synco + rte /* Back to virtual mode */ + nop + +1: + ptabs LINK, tr0 + blink tr0, r63 + +/* + * --- User Access Handling Section + */ + +/* + * User Access support. It all moved to non inlined Assembler + * functions in here. + * + * __kernel_size_t __copy_user(void *__to, const void *__from, + * __kernel_size_t __n) + * + * Inputs: + * (r2) target address + * (r3) source address + * (r4) size in bytes + * + * Ouputs: + * (*r2) target data + * (r2) non-copied bytes + * + * If a fault occurs on the user pointer, bail out early and return the + * number of bytes not copied in r2. + * Strategy : for large blocks, call a real memcpy function which can + * move >1 byte at a time using unaligned ld/st instructions, and can + * manipulate the cache using prefetch + alloco to improve the speed + * further. If a fault occurs in that function, just revert to the + * byte-by-byte approach used for small blocks; this is rare so the + * performance hit for that case does not matter. + * + * For small blocks it's not worth the overhead of setting up and calling + * the memcpy routine; do the copy a byte at a time. + * + */ + .global __copy_user +__copy_user: + pta __copy_user_byte_by_byte, tr1 + movi 16, r0 ! this value is a best guess, should tune it by benchmarking + bge/u r0, r4, tr1 + pta copy_user_memcpy, tr0 + addi SP, -32, SP + /* Save arguments in case we have to fix-up unhandled page fault */ + st.q SP, 0, r2 + st.q SP, 8, r3 + st.q SP, 16, r4 + st.q SP, 24, r35 ! r35 is callee-save + /* Save LINK in a register to reduce RTS time later (otherwise + ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */ + ori LINK, 0, r35 + blink tr0, LINK + + /* Copy completed normally if we get back here */ + ptabs r35, tr0 + ld.q SP, 24, r35 + /* don't restore r2-r4, pointless */ + /* set result=r2 to zero as the copy must have succeeded. */ + or r63, r63, r2 + addi SP, 32, SP + blink tr0, r63 ! RTS + + .global __copy_user_fixup +__copy_user_fixup: + /* Restore stack frame */ + ori r35, 0, LINK + ld.q SP, 24, r35 + ld.q SP, 16, r4 + ld.q SP, 8, r3 + ld.q SP, 0, r2 + addi SP, 32, SP + /* Fall through to original code, in the 'same' state we entered with */ + +/* The slow byte-by-byte method is used if the fast copy traps due to a bad + user address. In that rare case, the speed drop can be tolerated. */ +__copy_user_byte_by_byte: + pta ___copy_user_exit, tr1 + pta ___copy_user1, tr0 + beq/u r4, r63, tr1 /* early exit for zero length copy */ + sub r2, r3, r0 + addi r0, -1, r0 + +___copy_user1: + ld.b r3, 0, r5 /* Fault address 1 */ + + /* Could rewrite this to use just 1 add, but the second comes 'free' + due to load latency */ + addi r3, 1, r3 + addi r4, -1, r4 /* No real fixup required */ +___copy_user2: + stx.b r3, r0, r5 /* Fault address 2 */ + bne r4, ZERO, tr0 + +___copy_user_exit: + or r4, ZERO, r2 + ptabs LINK, tr0 + blink tr0, ZERO + +/* + * __kernel_size_t __clear_user(void *addr, __kernel_size_t size) + * + * Inputs: + * (r2) target address + * (r3) size in bytes + * + * Ouputs: + * (*r2) zero-ed target data + * (r2) non-zero-ed bytes + */ + .global __clear_user +__clear_user: + pta ___clear_user_exit, tr1 + pta ___clear_user1, tr0 + beq/u r3, r63, tr1 + +___clear_user1: + st.b r2, 0, ZERO /* Fault address */ + addi r2, 1, r2 + addi r3, -1, r3 /* No real fixup required */ + bne r3, ZERO, tr0 + +___clear_user_exit: + or r3, ZERO, r2 + ptabs LINK, tr0 + blink tr0, ZERO + + +/* + * int __strncpy_from_user(unsigned long __dest, unsigned long __src, + * int __count) + * + * Inputs: + * (r2) target address + * (r3) source address + * (r4) maximum size in bytes + * + * Ouputs: + * (*r2) copied data + * (r2) -EFAULT (in case of faulting) + * copied data (otherwise) + */ + .global __strncpy_from_user +__strncpy_from_user: + pta ___strncpy_from_user1, tr0 + pta ___strncpy_from_user_done, tr1 + or r4, ZERO, r5 /* r5 = original count */ + beq/u r4, r63, tr1 /* early exit if r4==0 */ + movi -(EFAULT), r6 /* r6 = reply, no real fixup */ + or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ + +___strncpy_from_user1: + ld.b r3, 0, r7 /* Fault address: only in reading */ + st.b r2, 0, r7 + addi r2, 1, r2 + addi r3, 1, r3 + beq/u ZERO, r7, tr1 + addi r4, -1, r4 /* return real number of copied bytes */ + bne/l ZERO, r4, tr0 + +___strncpy_from_user_done: + sub r5, r4, r6 /* If done, return copied */ + +___strncpy_from_user_exit: + or r6, ZERO, r2 + ptabs LINK, tr0 + blink tr0, ZERO + +/* + * extern long __strnlen_user(const char *__s, long __n) + * + * Inputs: + * (r2) source address + * (r3) source size in bytes + * + * Ouputs: + * (r2) -EFAULT (in case of faulting) + * string length (otherwise) + */ + .global __strnlen_user +__strnlen_user: + pta ___strnlen_user_set_reply, tr0 + pta ___strnlen_user1, tr1 + or ZERO, ZERO, r5 /* r5 = counter */ + movi -(EFAULT), r6 /* r6 = reply, no real fixup */ + or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */ + beq r3, ZERO, tr0 + +___strnlen_user1: + ldx.b r2, r5, r7 /* Fault address: only in reading */ + addi r3, -1, r3 /* No real fixup */ + addi r5, 1, r5 + beq r3, ZERO, tr0 + bne r7, ZERO, tr1 +! The line below used to be active. This meant led to a junk byte lying between each pair +! of entries in the argv & envp structures in memory. Whilst the program saw the right data +! via the argv and envp arguments to main, it meant the 'flat' representation visible through +! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example. +! addi r5, 1, r5 /* Include '\0' */ + +___strnlen_user_set_reply: + or r5, ZERO, r6 /* If done, return counter */ + +___strnlen_user_exit: + or r6, ZERO, r2 + ptabs LINK, tr0 + blink tr0, ZERO + +/* + * extern long __get_user_asm_?(void *val, long addr) + * + * Inputs: + * (r2) dest address + * (r3) source address (in User Space) + * + * Ouputs: + * (r2) -EFAULT (faulting) + * 0 (not faulting) + */ + .global __get_user_asm_b +__get_user_asm_b: + or r2, ZERO, r4 + movi -(EFAULT), r2 /* r2 = reply, no real fixup */ + +___get_user_asm_b1: + ld.b r3, 0, r5 /* r5 = data */ + st.b r4, 0, r5 + or ZERO, ZERO, r2 + +___get_user_asm_b_exit: + ptabs LINK, tr0 + blink tr0, ZERO + + + .global __get_user_asm_w +__get_user_asm_w: + or r2, ZERO, r4 + movi -(EFAULT), r2 /* r2 = reply, no real fixup */ + +___get_user_asm_w1: + ld.w r3, 0, r5 /* r5 = data */ + st.w r4, 0, r5 + or ZERO, ZERO, r2 + +___get_user_asm_w_exit: + ptabs LINK, tr0 + blink tr0, ZERO + + + .global __get_user_asm_l +__get_user_asm_l: + or r2, ZERO, r4 + movi -(EFAULT), r2 /* r2 = reply, no real fixup */ + +___get_user_asm_l1: + ld.l r3, 0, r5 /* r5 = data */ + st.l r4, 0, r5 + or ZERO, ZERO, r2 + +___get_user_asm_l_exit: + ptabs LINK, tr0 + blink tr0, ZERO + + + .global __get_user_asm_q +__get_user_asm_q: + or r2, ZERO, r4 + movi -(EFAULT), r2 /* r2 = reply, no real fixup */ + +___get_user_asm_q1: + ld.q r3, 0, r5 /* r5 = data */ + st.q r4, 0, r5 + or ZERO, ZERO, r2 + +___get_user_asm_q_exit: + ptabs LINK, tr0 + blink tr0, ZERO + +/* + * extern long __put_user_asm_?(void *pval, long addr) + * + * Inputs: + * (r2) kernel pointer to value + * (r3) dest address (in User Space) + * + * Ouputs: + * (r2) -EFAULT (faulting) + * 0 (not faulting) + */ + .global __put_user_asm_b +__put_user_asm_b: + ld.b r2, 0, r4 /* r4 = data */ + movi -(EFAULT), r2 /* r2 = reply, no real fixup */ + +___put_user_asm_b1: + st.b r3, 0, r4 + or ZERO, ZERO, r2 + +___put_user_asm_b_exit: + ptabs LINK, tr0 + blink tr0, ZERO + + + .global __put_user_asm_w +__put_user_asm_w: + ld.w r2, 0, r4 /* r4 = data */ + movi -(EFAULT), r2 /* r2 = reply, no real fixup */ + +___put_user_asm_w1: + st.w r3, 0, r4 + or ZERO, ZERO, r2 + +___put_user_asm_w_exit: + ptabs LINK, tr0 + blink tr0, ZERO + + + .global __put_user_asm_l +__put_user_asm_l: + ld.l r2, 0, r4 /* r4 = data */ + movi -(EFAULT), r2 /* r2 = reply, no real fixup */ + +___put_user_asm_l1: + st.l r3, 0, r4 + or ZERO, ZERO, r2 + +___put_user_asm_l_exit: + ptabs LINK, tr0 + blink tr0, ZERO + + + .global __put_user_asm_q +__put_user_asm_q: + ld.q r2, 0, r4 /* r4 = data */ + movi -(EFAULT), r2 /* r2 = reply, no real fixup */ + +___put_user_asm_q1: + st.q r3, 0, r4 + or ZERO, ZERO, r2 + +___put_user_asm_q_exit: + ptabs LINK, tr0 + blink tr0, ZERO + +panic_stash_regs: + /* The idea is : when we get an unhandled panic, we dump the registers + to a known memory location, the just sit in a tight loop. + This allows the human to look at the memory region through the GDB + session (assuming the debug module's SHwy initiator isn't locked up + or anything), to hopefully analyze the cause of the panic. */ + + /* On entry, former r15 (SP) is in DCR + former r0 is at resvec_saved_area + 0 + former r1 is at resvec_saved_area + 8 + former tr0 is at resvec_saved_area + 32 + DCR is the only register whose value is lost altogether. + */ + + movi 0xffffffff80000000, r0 ! phy of dump area + ld.q SP, 0x000, r1 ! former r0 + st.q r0, 0x000, r1 + ld.q SP, 0x008, r1 ! former r1 + st.q r0, 0x008, r1 + st.q r0, 0x010, r2 + st.q r0, 0x018, r3 + st.q r0, 0x020, r4 + st.q r0, 0x028, r5 + st.q r0, 0x030, r6 + st.q r0, 0x038, r7 + st.q r0, 0x040, r8 + st.q r0, 0x048, r9 + st.q r0, 0x050, r10 + st.q r0, 0x058, r11 + st.q r0, 0x060, r12 + st.q r0, 0x068, r13 + st.q r0, 0x070, r14 + getcon dcr, r14 + st.q r0, 0x078, r14 + st.q r0, 0x080, r16 + st.q r0, 0x088, r17 + st.q r0, 0x090, r18 + st.q r0, 0x098, r19 + st.q r0, 0x0a0, r20 + st.q r0, 0x0a8, r21 + st.q r0, 0x0b0, r22 + st.q r0, 0x0b8, r23 + st.q r0, 0x0c0, r24 + st.q r0, 0x0c8, r25 + st.q r0, 0x0d0, r26 + st.q r0, 0x0d8, r27 + st.q r0, 0x0e0, r28 + st.q r0, 0x0e8, r29 + st.q r0, 0x0f0, r30 + st.q r0, 0x0f8, r31 + st.q r0, 0x100, r32 + st.q r0, 0x108, r33 + st.q r0, 0x110, r34 + st.q r0, 0x118, r35 + st.q r0, 0x120, r36 + st.q r0, 0x128, r37 + st.q r0, 0x130, r38 + st.q r0, 0x138, r39 + st.q r0, 0x140, r40 + st.q r0, 0x148, r41 + st.q r0, 0x150, r42 + st.q r0, 0x158, r43 + st.q r0, 0x160, r44 + st.q r0, 0x168, r45 + st.q r0, 0x170, r46 + st.q r0, 0x178, r47 + st.q r0, 0x180, r48 + st.q r0, 0x188, r49 + st.q r0, 0x190, r50 + st.q r0, 0x198, r51 + st.q r0, 0x1a0, r52 + st.q r0, 0x1a8, r53 + st.q r0, 0x1b0, r54 + st.q r0, 0x1b8, r55 + st.q r0, 0x1c0, r56 + st.q r0, 0x1c8, r57 + st.q r0, 0x1d0, r58 + st.q r0, 0x1d8, r59 + st.q r0, 0x1e0, r60 + st.q r0, 0x1e8, r61 + st.q r0, 0x1f0, r62 + st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake... + + ld.q SP, 0x020, r1 ! former tr0 + st.q r0, 0x200, r1 + gettr tr1, r1 + st.q r0, 0x208, r1 + gettr tr2, r1 + st.q r0, 0x210, r1 + gettr tr3, r1 + st.q r0, 0x218, r1 + gettr tr4, r1 + st.q r0, 0x220, r1 + gettr tr5, r1 + st.q r0, 0x228, r1 + gettr tr6, r1 + st.q r0, 0x230, r1 + gettr tr7, r1 + st.q r0, 0x238, r1 + + getcon sr, r1 + getcon ssr, r2 + getcon pssr, r3 + getcon spc, r4 + getcon pspc, r5 + getcon intevt, r6 + getcon expevt, r7 + getcon pexpevt, r8 + getcon tra, r9 + getcon tea, r10 + getcon kcr0, r11 + getcon kcr1, r12 + getcon vbr, r13 + getcon resvec, r14 + + st.q r0, 0x240, r1 + st.q r0, 0x248, r2 + st.q r0, 0x250, r3 + st.q r0, 0x258, r4 + st.q r0, 0x260, r5 + st.q r0, 0x268, r6 + st.q r0, 0x270, r7 + st.q r0, 0x278, r8 + st.q r0, 0x280, r9 + st.q r0, 0x288, r10 + st.q r0, 0x290, r11 + st.q r0, 0x298, r12 + st.q r0, 0x2a0, r13 + st.q r0, 0x2a8, r14 + + getcon SPC,r2 + getcon SSR,r3 + getcon EXPEVT,r4 + /* Prepare to jump to C - physical address */ + movi panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1 + ori r1, 1, r1 + ptabs r1, tr0 + getcon DCR, SP + blink tr0, ZERO + nop + nop + nop + nop + + + + +/* + * --- Signal Handling Section + */ + +/* + * extern long long _sa_default_rt_restorer + * extern long long _sa_default_restorer + * + * or, better, + * + * extern void _sa_default_rt_restorer(void) + * extern void _sa_default_restorer(void) + * + * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn() + * from user space. Copied into user space by signal management. + * Both must be quad aligned and 2 quad long (4 instructions). + * + */ + .balign 8 + .global sa_default_rt_restorer +sa_default_rt_restorer: + movi 0x10, r9 + shori __NR_rt_sigreturn, r9 + trapa r9 + nop + + .balign 8 + .global sa_default_restorer +sa_default_restorer: + movi 0x10, r9 + shori __NR_sigreturn, r9 + trapa r9 + nop + +/* + * --- __ex_table Section + */ + +/* + * User Access Exception Table. + */ + .section __ex_table, "a" + + .global asm_uaccess_start /* Just a marker */ +asm_uaccess_start: + + .long ___copy_user1, ___copy_user_exit + .long ___copy_user2, ___copy_user_exit + .long ___clear_user1, ___clear_user_exit + .long ___strncpy_from_user1, ___strncpy_from_user_exit + .long ___strnlen_user1, ___strnlen_user_exit + .long ___get_user_asm_b1, ___get_user_asm_b_exit + .long ___get_user_asm_w1, ___get_user_asm_w_exit + .long ___get_user_asm_l1, ___get_user_asm_l_exit + .long ___get_user_asm_q1, ___get_user_asm_q_exit + .long ___put_user_asm_b1, ___put_user_asm_b_exit + .long ___put_user_asm_w1, ___put_user_asm_w_exit + .long ___put_user_asm_l1, ___put_user_asm_l_exit + .long ___put_user_asm_q1, ___put_user_asm_q_exit + + .global asm_uaccess_end /* Just a marker */ +asm_uaccess_end: + + + + +/* + * --- .text.init Section + */ + + .section .text.init, "ax" + +/* + * void trap_init (void) + * + */ + .global trap_init +trap_init: + addi SP, -24, SP /* Room to save r28/r29/r30 */ + st.q SP, 0, r28 + st.q SP, 8, r29 + st.q SP, 16, r30 + + /* Set VBR and RESVEC */ + movi LVBR_block, r19 + andi r19, -4, r19 /* reset MMUOFF + reserved */ + /* For RESVEC exceptions we force the MMU off, which means we need the + physical address. */ + movi LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20 + andi r20, -4, r20 /* reset reserved */ + ori r20, 1, r20 /* set MMUOFF */ + putcon r19, VBR + putcon r20, RESVEC + + /* Sanity check */ + movi LVBR_block_end, r21 + andi r21, -4, r21 + movi BLOCK_SIZE, r29 /* r29 = expected size */ + or r19, ZERO, r30 + add r19, r29, r19 + + /* + * Ugly, but better loop forever now than crash afterwards. + * We should print a message, but if we touch LVBR or + * LRESVEC blocks we should not be surprised if we get stuck + * in trap_init(). + */ + pta trap_init_loop, tr1 + gettr tr1, r28 /* r28 = trap_init_loop */ + sub r21, r30, r30 /* r30 = actual size */ + + /* + * VBR/RESVEC handlers overlap by being bigger than + * allowed. Very bad. Just loop forever. + * (r28) panic/loop address + * (r29) expected size + * (r30) actual size + */ +trap_init_loop: + bne r19, r21, tr1 + + /* Now that exception vectors are set up reset SR.BL */ + getcon SR, r22 + movi SR_UNBLOCK_EXC, r23 + and r22, r23, r22 + putcon r22, SR + + addi SP, 24, SP + ptabs LINK, tr0 + blink tr0, ZERO + diff --git a/arch/sh64/kernel/fpu.c b/arch/sh64/kernel/fpu.c new file mode 100644 index 000000000..175c88a5d --- /dev/null +++ b/arch/sh64/kernel/fpu.c @@ -0,0 +1,170 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/fpu.c + * + * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli + * Copyright (C) 2002 STMicroelectronics Limited + * Author : Stuart Menefy + * + * Started from SH4 version: + * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka + * + */ + +#include +#include +#include +#include +#include + +/* + * Initially load the FPU with signalling NANS. This bit pattern + * has the property that no matter whether considered as single or as + * double precision, it still represents a signalling NAN. + */ +#define sNAN64 0xFFFFFFFFFFFFFFFFULL +#define sNAN32 0xFFFFFFFFUL + +static union sh_fpu_union init_fpuregs = { + .hard = { + .fp_regs = { [0 ... 63] = sNAN32 }, + .fpscr = FPSCR_INIT + } +}; + +inline void fpsave(struct sh_fpu_hard_struct *fpregs) +{ + asm volatile("fst.p %0, (0*8), fp0\n\t" + "fst.p %0, (1*8), fp2\n\t" + "fst.p %0, (2*8), fp4\n\t" + "fst.p %0, (3*8), fp6\n\t" + "fst.p %0, (4*8), fp8\n\t" + "fst.p %0, (5*8), fp10\n\t" + "fst.p %0, (6*8), fp12\n\t" + "fst.p %0, (7*8), fp14\n\t" + "fst.p %0, (8*8), fp16\n\t" + "fst.p %0, (9*8), fp18\n\t" + "fst.p %0, (10*8), fp20\n\t" + "fst.p %0, (11*8), fp22\n\t" + "fst.p %0, (12*8), fp24\n\t" + "fst.p %0, (13*8), fp26\n\t" + "fst.p %0, (14*8), fp28\n\t" + "fst.p %0, (15*8), fp30\n\t" + "fst.p %0, (16*8), fp32\n\t" + "fst.p %0, (17*8), fp34\n\t" + "fst.p %0, (18*8), fp36\n\t" + "fst.p %0, (19*8), fp38\n\t" + "fst.p %0, (20*8), fp40\n\t" + "fst.p %0, (21*8), fp42\n\t" + "fst.p %0, (22*8), fp44\n\t" + "fst.p %0, (23*8), fp46\n\t" + "fst.p %0, (24*8), fp48\n\t" + "fst.p %0, (25*8), fp50\n\t" + "fst.p %0, (26*8), fp52\n\t" + "fst.p %0, (27*8), fp54\n\t" + "fst.p %0, (28*8), fp56\n\t" + "fst.p %0, (29*8), fp58\n\t" + "fst.p %0, (30*8), fp60\n\t" + "fst.p %0, (31*8), fp62\n\t" + + "fgetscr fr63\n\t" + "fst.s %0, (32*8), fr63\n\t" + : /* no output */ + : "r" (fpregs) + : "memory"); +} + + +static inline void +fpload(struct sh_fpu_hard_struct *fpregs) +{ + asm volatile("fld.p %0, (0*8), fp0\n\t" + "fld.p %0, (1*8), fp2\n\t" + "fld.p %0, (2*8), fp4\n\t" + "fld.p %0, (3*8), fp6\n\t" + "fld.p %0, (4*8), fp8\n\t" + "fld.p %0, (5*8), fp10\n\t" + "fld.p %0, (6*8), fp12\n\t" + "fld.p %0, (7*8), fp14\n\t" + "fld.p %0, (8*8), fp16\n\t" + "fld.p %0, (9*8), fp18\n\t" + "fld.p %0, (10*8), fp20\n\t" + "fld.p %0, (11*8), fp22\n\t" + "fld.p %0, (12*8), fp24\n\t" + "fld.p %0, (13*8), fp26\n\t" + "fld.p %0, (14*8), fp28\n\t" + "fld.p %0, (15*8), fp30\n\t" + "fld.p %0, (16*8), fp32\n\t" + "fld.p %0, (17*8), fp34\n\t" + "fld.p %0, (18*8), fp36\n\t" + "fld.p %0, (19*8), fp38\n\t" + "fld.p %0, (20*8), fp40\n\t" + "fld.p %0, (21*8), fp42\n\t" + "fld.p %0, (22*8), fp44\n\t" + "fld.p %0, (23*8), fp46\n\t" + "fld.p %0, (24*8), fp48\n\t" + "fld.p %0, (25*8), fp50\n\t" + "fld.p %0, (26*8), fp52\n\t" + "fld.p %0, (27*8), fp54\n\t" + "fld.p %0, (28*8), fp56\n\t" + "fld.p %0, (29*8), fp58\n\t" + "fld.p %0, (30*8), fp60\n\t" + + "fld.s %0, (32*8), fr63\n\t" + "fputscr fr63\n\t" + + "fld.p %0, (31*8), fp62\n\t" + : /* no output */ + : "r" (fpregs) ); +} + +void fpinit(struct sh_fpu_hard_struct *fpregs) +{ + *fpregs = init_fpuregs.hard; +} + +asmlinkage void +do_fpu_error(unsigned long ex, struct pt_regs *regs) +{ + struct task_struct *tsk = current; + + regs->pc += 4; + + tsk->thread.trap_no = 11; + tsk->thread.error_code = 0; + force_sig(SIGFPE, tsk); +} + + +asmlinkage void +do_fpu_state_restore(unsigned long ex, struct pt_regs *regs) +{ + void die(const char *str, struct pt_regs *regs, long err); + + if (! user_mode(regs)) + die("FPU used in kernel", regs, ex); + + regs->sr &= ~SR_FD; + + if (last_task_used_math == current) + return; + + grab_fpu(); + if (last_task_used_math != NULL) { + /* Other processes fpu state, save away */ + fpsave(&last_task_used_math->thread.fpu.hard); + } + last_task_used_math = current; + if (current->used_math) { + fpload(¤t->thread.fpu.hard); + } else { + /* First time FPU user. */ + fpload(&init_fpuregs.hard); + current->used_math = 1; + } + release_fpu(); +} + diff --git a/arch/sh64/kernel/head.S b/arch/sh64/kernel/head.S new file mode 100644 index 000000000..667aef479 --- /dev/null +++ b/arch/sh64/kernel/head.S @@ -0,0 +1,373 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/head.S + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003, 2004 Paul Mundt + * + * + * benedict.gaster@superh.com: 2nd May 2002 + * Moved definition of empty_zero_page to its own section allowing + * it to be placed at an absolute address known at load time. + * + * lethal@linux-sh.org: 9th May 2003 + * Kill off GLOBAL_NAME() usage. + * + * lethal@linux-sh.org: 8th May 2004 + * Add early SCIF console DTLB mapping. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +/* + * MMU defines: TLB boundaries. + */ + +#define MMUIR_FIRST ITLB_FIXED +#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP +#define MMUIR_STEP TLB_STEP + +#define MMUDR_FIRST DTLB_FIXED +#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP +#define MMUDR_STEP TLB_STEP + +/* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */ +#if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1)) +#error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb" +#endif + +/* + * MMU defines: Fixed TLBs. + */ +/* Deal safely with the case where the base of RAM is not 512Mb aligned */ + +#define ALIGN_512M_MASK (0xffffffffe0000000) +#define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK) +#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK) + +#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE) + /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ + +#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL) + /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */ + +#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE + /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ +#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL + /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */ + +#ifdef CONFIG_ICACHE_DISABLED +#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */ +#else +#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */ +#endif +#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */ + +#if defined (CONFIG_DCACHE_DISABLED) +#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */ +#elif defined (CONFIG_DCACHE_WRITE_THROUGH) +#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */ + /* WT, invalidate */ +#elif defined (CONFIG_DCACHE_WRITE_BACK) +#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */ + /* WB, invalidate */ +#else +#error preprocessor flag CONFIG_DCACHE_... not recognized! +#endif + +#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */ + + .section .empty_zero_page, "aw" + .global empty_zero_page + +empty_zero_page: + .long 1 /* MOUNT_ROOT_RDONLY */ + .long 0 /* RAMDISK_FLAGS */ + .long 0x0200 /* ORIG_ROOT_DEV */ + .long 1 /* LOADER_TYPE */ + .long 0x00360000 /* INITRD_START */ + .long 0x000a0000 /* INITRD_SIZE */ + .long 0 + + .text + .balign 4096,0,4096 + + .section .data, "aw" + .balign PAGE_SIZE + + .section .data, "aw" + .balign PAGE_SIZE + + .global swapper_pg_dir +swapper_pg_dir: + .space PAGE_SIZE, 0 + + .global empty_bad_page +empty_bad_page: + .space PAGE_SIZE, 0 + + .global empty_bad_pte_table +empty_bad_pte_table: + .space PAGE_SIZE, 0 + + .global fpu_in_use +fpu_in_use: .quad 0 + + + .section .text, "ax" + .balign L1_CACHE_BYTES +/* + * Condition at the entry of __stext: + * . Reset state: + * . SR.FD = 1 (FPU disabled) + * . SR.BL = 1 (Exceptions disabled) + * . SR.MD = 1 (Privileged Mode) + * . SR.MMU = 0 (MMU Disabled) + * . SR.CD = 0 (CTC User Visible) + * . SR.IMASK = Undefined (Interrupt Mask) + * + * Operations supposed to be performed by __stext: + * . prevent speculative fetch onto device memory while MMU is off + * . reflect as much as possible SH5 ABI (r15, r26, r27, r18) + * . first, save CPU state and set it to something harmless + * . any CPU detection and/or endianness settings (?) + * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD + * . set initial TLB entries for cached and uncached regions + * (no fine granularity paging) + * . set initial cache state + * . enable MMU and caches + * . set CPU to a consistent state + * . registers (including stack pointer and current/KCR0) + * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR + * at this stage. This is all to later Linux initialization steps. + * . initialize FPU + * . clear BSS + * . jump into start_kernel() + * . be prepared to hopeless start_kernel() returns. + * + */ + .global _stext +_stext: + /* + * Prevent speculative fetch on device memory due to + * uninitialized target registers. + */ + ptabs/u ZERO, tr0 + ptabs/u ZERO, tr1 + ptabs/u ZERO, tr2 + ptabs/u ZERO, tr3 + ptabs/u ZERO, tr4 + ptabs/u ZERO, tr5 + ptabs/u ZERO, tr6 + ptabs/u ZERO, tr7 + synci + + /* + * Read/Set CPU state. After this block: + * r29 = Initial SR + */ + getcon SR, r29 + movi SR_HARMLESS, r20 + putcon r20, SR + + /* + * Initialize EMI/LMI. To Be Done. + */ + + /* + * CPU detection and/or endianness settings (?). To Be Done. + * Pure PIC code here, please ! Just save state into r30. + * After this block: + * r30 = CPU type/Platform Endianness + */ + + /* + * Set initial TLB entries for cached and uncached regions. + * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't ! + */ + /* Clear ITLBs */ + pta clear_ITLB, tr1 + movi MMUIR_FIRST, r21 + movi MMUIR_END, r22 +clear_ITLB: + putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */ + addi r21, MMUIR_STEP, r21 + bne r21, r22, tr1 + + /* Clear DTLBs */ + pta clear_DTLB, tr1 + movi MMUDR_FIRST, r21 + movi MMUDR_END, r22 +clear_DTLB: + putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */ + addi r21, MMUDR_STEP, r21 + bne r21, r22, tr1 + + /* Map one big (512Mb) page for ITLB */ + movi MMUIR_FIRST, r21 + movi MMUIR_TEXT_L, r22 /* PTEL first */ + add.l r22, r63, r22 /* Sign extend */ + putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ + movi MMUIR_TEXT_H, r22 /* PTEH last */ + add.l r22, r63, r22 /* Sign extend */ + putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */ + + /* Map one big CACHED (512Mb) page for DTLB */ + movi MMUDR_FIRST, r21 + movi MMUDR_CACHED_L, r22 /* PTEL first */ + add.l r22, r63, r22 /* Sign extend */ + putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */ + movi MMUDR_CACHED_H, r22 /* PTEH last */ + add.l r22, r63, r22 /* Sign extend */ + putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */ + +#ifdef CONFIG_EARLY_PRINTK + /* + * Setup a DTLB translation for SCIF phys. + */ + addi r21, MMUDR_STEP, r21 + movi 0x0a03, r22 /* SCIF phys */ + shori 0x0148, r22 + putcfg r21, 1, r22 /* PTEL first */ + movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */ + shori 0x0003, r22 + putcfg r21, 0, r22 /* PTEH last */ +#endif + + /* + * Set cache behaviours. + */ + /* ICache */ + movi ICCR_BASE, r21 + movi ICCR0_INIT_VAL, r22 + movi ICCR1_INIT_VAL, r23 + putcfg r21, ICCR_REG0, r22 + putcfg r21, ICCR_REG1, r23 + + /* OCache */ + movi OCCR_BASE, r21 + movi OCCR0_INIT_VAL, r22 + movi OCCR1_INIT_VAL, r23 + putcfg r21, OCCR_REG0, r22 + putcfg r21, OCCR_REG1, r23 + + + /* + * Enable Caches and MMU. Do the first non-PIC jump. + * Now head.S global variables, constants and externs + * can be used. + */ + getcon SR, r21 + movi SR_ENABLE_MMU, r22 + or r21, r22, r21 + putcon r21, SSR + movi hyperspace, r22 + ori r22, 1, r22 /* Make it SHmedia, not required but..*/ + putcon r22, SPC + synco + rte /* And now go into the hyperspace ... */ +hyperspace: /* ... that's the next instruction ! */ + + /* + * Set CPU to a consistent state. + * r31 = FPU support flag + * tr0/tr7 in use. Others give a chance to loop somewhere safe + */ + movi start_kernel, r32 + ori r32, 1, r32 + + ptabs r32, tr0 /* r32 = _start_kernel address */ + pta/u hopeless, tr1 + pta/u hopeless, tr2 + pta/u hopeless, tr3 + pta/u hopeless, tr4 + pta/u hopeless, tr5 + pta/u hopeless, tr6 + pta/u hopeless, tr7 + gettr tr1, r28 /* r28 = hopeless address */ + + /* Set initial stack pointer */ + movi init_thread_union, SP + putcon SP, KCR0 /* Set current to init_task */ + movi THREAD_SIZE, r22 /* Point to the end */ + add SP, r22, SP + + /* + * Initialize FPU. + * Keep FPU flag in r31. After this block: + * r31 = FPU flag + */ + movi fpu_in_use, r31 /* Temporary */ + +#ifndef CONFIG_NOFPU_SUPPORT + getcon SR, r21 + movi SR_ENABLE_FPU, r22 + and r21, r22, r22 + putcon r22, SR /* Try to enable */ + getcon SR, r22 + xor r21, r22, r21 + shlri r21, 15, r21 /* Supposedly 0/1 */ + st.q r31, 0 , r21 /* Set fpu_in_use */ +#else + movi 0, r21 + st.q r31, 0 , r21 /* Set fpu_in_use */ +#endif + or r21, ZERO, r31 /* Set FPU flag at last */ + +#ifndef CONFIG_SH_NO_BSS_INIT +/* Don't clear BSS if running on slow platforms such as an RTL simulation, + remote memory via SHdebug link, etc. For these the memory can be guaranteed + to be all zero on boot anyway. */ + /* + * Clear bss + */ + pta clear_quad, tr1 + movi __bss_start, r22 + movi _end, r23 +clear_quad: + st.q r22, 0, ZERO + addi r22, 8, r22 + bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */ +#endif + pta/u hopeless, tr1 + + /* Say bye to head.S but be prepared to wrongly get back ... */ + blink tr0, LINK + + /* If we ever get back here through LINK/tr1-tr7 */ + pta/u hopeless, tr7 + +hopeless: + /* + * Something's badly wrong here. Loop endlessly, + * there's nothing more we can do about it. + * + * Note on hopeless: it can be jumped into invariably + * before or after jumping into hyperspace. The only + * requirement is to be PIC called (PTA) before and + * any way (PTA/PTABS) after. According to Virtual + * to Physical mapping a simulator/emulator can easily + * tell where we came here from just looking at hopeless + * (PC) address. + * + * For debugging purposes: + * (r28) hopeless/loop address + * (r29) Original SR + * (r30) CPU type/Platform endianness + * (r31) FPU Support + * (r32) _start_kernel address + */ + blink tr7, ZERO + + diff --git a/arch/sh64/kernel/init_task.c b/arch/sh64/kernel/init_task.c new file mode 100644 index 000000000..de2d07db1 --- /dev/null +++ b/arch/sh64/kernel/init_task.c @@ -0,0 +1,46 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/init_task.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + */ +#include +#include +#include +#include +#include + +#include +#include + +static struct fs_struct init_fs = INIT_FS; +static struct files_struct init_files = INIT_FILES; +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); +struct mm_struct init_mm = INIT_MM(init_mm); + +struct pt_regs fake_swapper_regs; + +/* + * Initial thread structure. + * + * We need to make sure that this is THREAD_SIZE-byte aligned due + * to the way process stacks are handled. This is done by having a + * special "init_task" linker map entry.. + */ +union thread_union init_thread_union + __attribute__((__section__(".data.init_task"))) = + { INIT_THREAD_INFO(init_task) }; + +/* + * Initial task structure. + * + * All other task structs will be allocated on slabs in fork.c + */ +struct task_struct init_task = INIT_TASK(init_task); + diff --git a/arch/sh64/kernel/irq.c b/arch/sh64/kernel/irq.c new file mode 100644 index 000000000..bcf6385db --- /dev/null +++ b/arch/sh64/kernel/irq.c @@ -0,0 +1,720 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/irq.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + */ + +/* + * IRQs are in fact implemented a bit like signal handlers for the kernel. + * Naturally it's not a 1:1 relation, but there are similarities. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Controller mappings for all interrupt sources: + */ +irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { + [0 ... NR_IRQS-1] = { + .handler = &no_irq_type, + .lock = SPIN_LOCK_UNLOCKED + } +}; + + +/* + * Special irq handlers. + */ + +irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) +{ + return IRQ_NONE; +} + +/* + * Generic no controller code + */ + +static void enable_none(unsigned int irq) { } +static unsigned int startup_none(unsigned int irq) { return 0; } +static void disable_none(unsigned int irq) { } +static void ack_none(unsigned int irq) +{ +/* + * 'what should we do if we get a hw irq event on an illegal vector'. + * each architecture has to answer this themselves, it doesnt deserve + * a generic callback i think. + */ + printk("unexpected IRQ trap at irq %02x\n", irq); +} + +/* startup is the same as "enable", shutdown is same as "disable" */ +#define shutdown_none disable_none +#define end_none enable_none + +struct hw_interrupt_type no_irq_type = { + "none", + startup_none, + shutdown_none, + enable_none, + disable_none, + ack_none, + end_none +}; + +#if defined(CONFIG_PROC_FS) +int show_interrupts(struct seq_file *p, void *v) +{ + int i = *(loff_t *) v, j; + struct irqaction * action; + unsigned long flags; + + if (i == 0) { + seq_puts(p, " "); + for (j=0; jtypename); + seq_printf(p, " %s", action->name); + + for (action=action->next; action; action = action->next) + seq_printf(p, ", %s", action->name); + seq_putc(p, '\n'); +unlock: + spin_unlock_irqrestore(&irq_desc[i].lock, flags); + } + return 0; +} +#endif + +/* + * do_NMI handles all Non-Maskable Interrupts. + */ +asmlinkage void do_NMI(unsigned long vector_num, struct pt_regs * regs) +{ + if (regs->sr & 0x40000000) + printk("unexpected NMI trap in system mode\n"); + else + printk("unexpected NMI trap in user mode\n"); + + /* No statistics */ +} + +/* + * This should really return information about whether + * we should do bottom half handling etc. Right now we + * end up _always_ checking the bottom half, which is a + * waste of time and is not what some drivers would + * prefer. + */ +int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action) +{ + int status; + + status = 1; /* Force the "do bottom halves" bit */ + + if (!(action->flags & SA_INTERRUPT)) + local_irq_enable(); + + do { + status |= action->flags; + action->handler(irq, action->dev_id, regs); + action = action->next; + } while (action); + if (status & SA_SAMPLE_RANDOM) + add_interrupt_randomness(irq); + + local_irq_disable(); + + return status; +} + +/* + * Generic enable/disable code: this just calls + * down into the PIC-specific version for the actual + * hardware disable after having gotten the irq + * controller lock. + */ + +/** + * disable_irq_nosync - disable an irq without waiting + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. Disables of an interrupt + * stack. Unlike disable_irq(), this function does not ensure existing + * instances of the IRQ handler have completed before returning. + * + * This function may be called from IRQ context. + */ +void disable_irq_nosync(unsigned int irq) +{ + irq_desc_t *desc = irq_desc + irq; + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + if (!desc->depth++) { + desc->status |= IRQ_DISABLED; + desc->handler->disable(irq); + } + spin_unlock_irqrestore(&desc->lock, flags); +} + +/** + * disable_irq - disable an irq and wait for completion + * @irq: Interrupt to disable + * + * Disable the selected interrupt line. Disables of an interrupt + * stack. That is for two disables you need two enables. This + * function waits for any pending IRQ handlers for this interrupt + * to complete before returning. If you use this function while + * holding a resource the IRQ handler may need you will deadlock. + * + * This function may be called - with care - from IRQ context. + */ +void disable_irq(unsigned int irq) +{ + disable_irq_nosync(irq); + synchronize_irq(irq); +} + +/** + * enable_irq - enable interrupt handling on an irq + * @irq: Interrupt to enable + * + * Re-enables the processing of interrupts on this IRQ line + * providing no disable_irq calls are now in effect. + * + * This function may be called from IRQ context. + */ +void enable_irq(unsigned int irq) +{ + irq_desc_t *desc = irq_desc + irq; + unsigned long flags; + + spin_lock_irqsave(&desc->lock, flags); + switch (desc->depth) { + case 1: { + unsigned int status = desc->status & ~IRQ_DISABLED; + desc->status = status; + if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { + desc->status = status | IRQ_REPLAY; + hw_resend_irq(desc->handler,irq); + } + desc->handler->enable(irq); + /* fall-through */ + } + default: + desc->depth--; + break; + case 0: + printk("enable_irq() unbalanced from %p\n", + __builtin_return_address(0)); + } + spin_unlock_irqrestore(&desc->lock, flags); +} + +/* + * do_IRQ handles all normal device IRQ's. + */ +asmlinkage int do_IRQ(unsigned long vector_num, struct pt_regs * regs) +{ + /* + * We ack quickly, we don't want the irq controller + * thinking we're snobs just because some other CPU has + * disabled global interrupts (we have already done the + * INT_ACK cycles, it's too late to try to pretend to the + * controller that we aren't taking the interrupt). + * + * 0 return value means that this irq is already being + * handled by some other CPU. (or is disabled) + */ + int irq; + int cpu = smp_processor_id(); + irq_desc_t *desc = NULL; + struct irqaction * action; + unsigned int status; + + irq_enter(); + +#ifdef CONFIG_PREEMPT + /* + * At this point we're now about to actually call handlers, + * and interrupts might get reenabled during them... bump + * preempt_count to prevent any preemption while the handler + * called here is pending... + */ + preempt_disable(); +#endif + + irq = irq_demux(vector_num); + + /* + * Should never happen, if it does check + * vectorN_to_IRQ[] against trap_jtable[]. + */ + if (irq == -1) { + printk("unexpected IRQ trap at vector %03lx\n", vector_num); + goto out; + } + + desc = irq_desc + irq; + + kstat_cpu(cpu).irqs[irq]++; + spin_lock(&desc->lock); + desc->handler->ack(irq); + /* + REPLAY is when Linux resends an IRQ that was dropped earlier + WAITING is used by probe to mark irqs that are being tested + */ + status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING | IRQ_INPROGRESS); + status |= IRQ_PENDING; /* we _want_ to handle it */ + + /* + * If the IRQ is disabled for whatever reason, we cannot + * use the action we have. + */ + action = NULL; + if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { + action = desc->action; + status &= ~IRQ_PENDING; /* we commit to handling */ + status |= IRQ_INPROGRESS; /* we are handling it */ + } + desc->status = status; + + /* + * If there is no IRQ handler or it was disabled, exit early. + Since we set PENDING, if another processor is handling + a different instance of this same irq, the other processor + will take care of it. + */ + if (!action) + goto out; + + /* + * Edge triggered interrupts need to remember + * pending events. + * This applies to any hw interrupts that allow a second + * instance of the same irq to arrive while we are in do_IRQ + * or in the handler. But the code here only handles the _second_ + * instance of the irq, not the third or fourth. So it is mostly + * useful for irq hardware that does not mask cleanly in an + * SMP environment. + */ + for (;;) { + spin_unlock(&desc->lock); + handle_IRQ_event(irq, regs, action); + spin_lock(&desc->lock); + + if (!(desc->status & IRQ_PENDING)) + break; + desc->status &= ~IRQ_PENDING; + } + desc->status &= ~IRQ_INPROGRESS; +out: + /* + * The ->end() handler has to deal with interrupts which got + * disabled while the handler was running. + */ + if (desc) { + desc->handler->end(irq); + spin_unlock(&desc->lock); + } + + irq_exit(); + +#ifdef CONFIG_PREEMPT + /* + * We're done with the handlers, interrupts should be + * currently disabled; decrement preempt_count now so + * as we return preemption may be allowed... + */ + preempt_enable_no_resched(); +#endif + + return 1; +} + +/** + * request_irq - allocate an interrupt line + * @irq: Interrupt line to allocate + * @handler: Function to be called when the IRQ occurs + * @irqflags: Interrupt type flags + * @devname: An ascii name for the claiming device + * @dev_id: A cookie passed back to the handler function + * + * This call allocates interrupt resources and enables the + * interrupt line and IRQ handling. From the point this + * call is made your handler function may be invoked. Since + * your handler function must clear any interrupt the board + * raises, you must take care both to initialise your hardware + * and to set up the interrupt handler in the right order. + * + * Dev_id must be globally unique. Normally the address of the + * device data structure is used as the cookie. Since the handler + * receives this value it makes sense to use it. + * + * If your interrupt is shared you must pass a non NULL dev_id + * as this is required when freeing the interrupt. + * + * Flags: + * + * SA_SHIRQ Interrupt is shared + * + * SA_INTERRUPT Disable local interrupts while processing + * + * SA_SAMPLE_RANDOM The interrupt can be used for entropy + * + */ +int request_irq(unsigned int irq, + irqreturn_t (*handler)(int, void *, struct pt_regs *), + unsigned long irqflags, + const char * devname, + void *dev_id) +{ + int retval; + struct irqaction * action; + +#if 1 + /* + * Sanity-check: shared interrupts should REALLY pass in + * a real dev-ID, otherwise we'll have trouble later trying + * to figure out which interrupt is which (messes up the + * interrupt freeing logic etc). + */ + if (irqflags & SA_SHIRQ) { + if (!dev_id) + printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]); + } +#endif + + if (irq >= NR_IRQS) + return -EINVAL; + if (!handler) + return -EINVAL; + + action = (struct irqaction *) + kmalloc(sizeof(struct irqaction), GFP_KERNEL); + if (!action) + return -ENOMEM; + + action->handler = handler; + action->flags = irqflags; + cpus_clear(action->mask); + action->name = devname; + action->next = NULL; + action->dev_id = dev_id; + + retval = setup_irq(irq, action); + if (retval) + kfree(action); + return retval; +} + +/** + * free_irq - free an interrupt + * @irq: Interrupt line to free + * @dev_id: Device identity to free + * + * Remove an interrupt handler. The handler is removed and if the + * interrupt line is no longer in use by any driver it is disabled. + * On a shared IRQ the caller must ensure the interrupt is disabled + * on the card it drives before calling this function. The function + * does not return until any executing interrupts for this IRQ + * have completed. + * + * This function may be called from interrupt context. + * + * Bugs: Attempting to free an irq in a handler for the same irq hangs + * the machine. + */ +void free_irq(unsigned int irq, void *dev_id) +{ + irq_desc_t *desc; + struct irqaction **p; + unsigned long flags; + + if (irq >= NR_IRQS) + return; + + desc = irq_desc + irq; + spin_lock_irqsave(&desc->lock,flags); + p = &desc->action; + for (;;) { + struct irqaction * action = *p; + if (action) { + struct irqaction **pp = p; + p = &action->next; + if (action->dev_id != dev_id) + continue; + + /* Found it - now remove it from the list of entries */ + *pp = action->next; + if (!desc->action) { + desc->status |= IRQ_DISABLED; + desc->handler->shutdown(irq); + } + spin_unlock_irqrestore(&desc->lock,flags); + kfree(action); + return; + } + printk("Trying to free free IRQ%d\n",irq); + spin_unlock_irqrestore(&desc->lock,flags); + return; + } +} + +/* + * IRQ autodetection code.. + * + * This depends on the fact that any interrupt that + * comes in on to an unassigned handler will get stuck + * with "IRQ_WAITING" cleared and the interrupt + * disabled. + */ + +/** + * probe_irq_on - begin an interrupt autodetect + * + * Commence probing for an interrupt. The interrupts are scanned + * and a mask of potential interrupt lines is returned. + * + */ +unsigned long probe_irq_on(void) +{ + unsigned int i; + irq_desc_t *desc; + unsigned long val; + unsigned long delay; + + /* + * something may have generated an irq long ago and we want to + * flush such a longstanding irq before considering it as spurious. + */ + for (i = NR_IRQS-1; i >= 0; i--) { + desc = irq_desc + i; + + spin_lock_irq(&desc->lock); + if (!irq_desc[i].action) { + irq_desc[i].handler->startup(i); + } + spin_unlock_irq(&desc->lock); + } + + /* Wait for longstanding interrupts to trigger. */ + for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) + /* about 20ms delay */ synchronize_irq(); + + /* + * enable any unassigned irqs + * (we must startup again here because if a longstanding irq + * happened in the previous stage, it may have masked itself) + */ + for (i = NR_IRQS-1; i >= 0; i--) { + desc = irq_desc + 1; + + spin_lock_irq(&desc->lock); + if (!desc->action) { + desc->status |= IRQ_AUTODETECT | IRQ_WAITING; + if (desc->handler->startup(i)) + desc->status |= IRQ_PENDING; + } + spin_unlock_irq(&desc->lock); + } + + /* + * Wait for spurious interrupts to trigger + */ + for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) + /* about 100ms delay */ synchronize_irq(); + + /* + * Now filter out any obviously spurious interrupts + */ + val = 0; + for (i = 0; i < NR_IRQS; i++) { + irq_desc_t *desc = irq_desc + i; + unsigned int status; + + spin_lock_irq(&desc->lock); + status = desc->status; + + if (status & IRQ_AUTODETECT) { + /* It triggered already - consider it spurious. */ + if (!(status & IRQ_WAITING)) { + desc->status = status & ~IRQ_AUTODETECT; + desc->handler->shutdown(i); + } else + if (i < 32) + val |= 1 << i; + } + spin_unlock_irq(&desc->lock); + } + + return val; +} + +/* + * Return the one interrupt that triggered (this can + * handle any interrupt source). + */ + +/** + * probe_irq_off - end an interrupt autodetect + * @val: mask of potential interrupts (unused) + * + * Scans the unused interrupt lines and returns the line which + * appears to have triggered the interrupt. If no interrupt was + * found then zero is returned. If more than one interrupt is + * found then minus the first candidate is returned to indicate + * their is doubt. + * + * The interrupt probe logic state is returned to its previous + * value. + * + * BUGS: When used in a module (which arguably shouldnt happen) + * nothing prevents two IRQ probe callers from overlapping. The + * results of this are non-optimal. + */ +int probe_irq_off(unsigned long val) +{ + int i, irq_found, nr_irqs; + + nr_irqs = 0; + irq_found = 0; + for (i=0; ilock); + status = desc->status; + if (!(status & IRQ_AUTODETECT)) + continue; + + if (status & IRQ_AUTODETECT) { + if (!(status & IRQ_WAITING)) { + if (!nr_irqs) + irq_found = i; + nr_irqs++; + } + + desc->status = status & ~IRQ_AUTODETECT; + desc->handler->shutdown(i); + } + spin_unlock_irq(&desc->lock); + } + + if (nr_irqs > 1) + irq_found = -irq_found; + return irq_found; +} + +int setup_irq(unsigned int irq, struct irqaction * new) +{ + int shared = 0; + unsigned long flags; + struct irqaction *old, **p; + irq_desc_t *desc = irq_desc + irq; + + /* + * Some drivers like serial.c use request_irq() heavily, + * so we have to be careful not to interfere with a + * running system. + */ + if (new->flags & SA_SAMPLE_RANDOM) { + /* + * This function might sleep, we want to call it first, + * outside of the atomic block. + * Yes, this might clear the entropy pool if the wrong + * driver is attempted to be loaded, without actually + * installing a new handler, but is this really a problem, + * only the sysadmin is able to do this. + */ + rand_initialize_irq(irq); + } + + /* + * The following block of code has to be executed atomically + */ + spin_lock_irqsave(&desc->lock,flags); + p = &desc->action; + if ((old = *p) != NULL) { + /* Can't share interrupts unless both agree to */ + if (!(old->flags & new->flags & SA_SHIRQ)) { + spin_unlock_irqrestore(&desc->lock,flags); + return -EBUSY; + } + + /* add new interrupt at end of irq queue */ + do { + p = &old->next; + old = *p; + } while (old); + shared = 1; + } + + *p = new; + + if (!shared) { + desc->depth = 0; + desc->status &= ~IRQ_DISABLED; + desc->handler->startup(irq); + } + spin_unlock_irqrestore(&desc->lock,flags); + + /* + * No PROC FS support for interrupts. + * For improvements in this area please check + * the i386 branch. + */ + return 0; +} + +#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL) + +void init_irq_proc(void) +{ + /* + * No PROC FS support for interrupts. + * For improvements in this area please check + * the i386 branch. + */ +} +#endif diff --git a/arch/sh64/kernel/irq_intc.c b/arch/sh64/kernel/irq_intc.c new file mode 100644 index 000000000..4062ae55e --- /dev/null +++ b/arch/sh64/kernel/irq_intc.c @@ -0,0 +1,272 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/irq_intc.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + * Interrupt Controller support for SH5 INTC. + * Per-interrupt selective. IRLM=0 (Fixed priority) is not + * supported being useless without a cascaded interrupt + * controller. + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include /* this includes also +#include +#include + +/* + * Maybe the generic Peripheral block could move to a more + * generic include file. INTC Block will be defined here + * and only here to make INTC self-contained in a single + * file. + */ +#define INTC_BLOCK_OFFSET 0x01000000 + +/* Base */ +#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \ + INTC_BLOCK_OFFSET + +/* Address */ +#define INTC_ICR_SET (intc_virt + 0x0) +#define INTC_ICR_CLEAR (intc_virt + 0x8) +#define INTC_INTPRI_0 (intc_virt + 0x10) +#define INTC_INTSRC_0 (intc_virt + 0x50) +#define INTC_INTSRC_1 (intc_virt + 0x58) +#define INTC_INTREQ_0 (intc_virt + 0x60) +#define INTC_INTREQ_1 (intc_virt + 0x68) +#define INTC_INTENB_0 (intc_virt + 0x70) +#define INTC_INTENB_1 (intc_virt + 0x78) +#define INTC_INTDSB_0 (intc_virt + 0x80) +#define INTC_INTDSB_1 (intc_virt + 0x88) + +#define INTC_ICR_IRLM 0x1 +#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */ +#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */ + + +/* + * Mapper between the vector ordinal and the IRQ number + * passed to kernel/device drivers. + */ +int intc_evt_to_irq[(0xE20/0x20)+1] = { + -1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */ + 0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */ + 2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */ + 32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */ + -1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */ + -1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */ + 39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */ + 4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */ + 12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */ + -1, -1 /* 0xE00 - 0xE20 */ +}; + +/* + * Opposite mapper. + */ +static int IRQ_to_vectorN[NR_INTC_IRQS] = { + 0x12, 0x15, 0x18, 0x1B, 0x40, 0x41, 0x42, 0x43, /* 0- 7 */ + -1, -1, -1, -1, 0x50, 0x51, 0x52, 0x53, /* 8-15 */ + 0x54, 0x55, 0x32, 0x33, 0x34, 0x35, 0x36, -1, /* 16-23 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* 24-31 */ + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x38, /* 32-39 */ + 0x39, 0x3A, 0x3B, -1, -1, -1, -1, -1, /* 40-47 */ + -1, -1, -1, -1, -1, -1, -1, -1, /* 48-55 */ + -1, -1, -1, -1, -1, -1, -1, 0x2B, /* 56-63 */ + +}; + +static unsigned long intc_virt; + +static unsigned int startup_intc_irq(unsigned int irq); +static void shutdown_intc_irq(unsigned int irq); +static void enable_intc_irq(unsigned int irq); +static void disable_intc_irq(unsigned int irq); +static void mask_and_ack_intc(unsigned int); +static void end_intc_irq(unsigned int irq); + +static struct hw_interrupt_type intc_irq_type = { + "INTC", + startup_intc_irq, + shutdown_intc_irq, + enable_intc_irq, + disable_intc_irq, + mask_and_ack_intc, + end_intc_irq +}; + +static int irlm; /* IRL mode */ + +static unsigned int startup_intc_irq(unsigned int irq) +{ + enable_intc_irq(irq); + return 0; /* never anything pending */ +} + +static void shutdown_intc_irq(unsigned int irq) +{ + disable_intc_irq(irq); +} + +static void enable_intc_irq(unsigned int irq) +{ + unsigned long reg; + unsigned long bitmask; + + if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY)) + printk("Trying to use straight IRL0-3 with an encoding platform.\n"); + + if (irq < 32) { + reg = INTC_INTENB_0; + bitmask = 1 << irq; + } else { + reg = INTC_INTENB_1; + bitmask = 1 << (irq - 32); + } + + ctrl_outl(bitmask, reg); +} + +static void disable_intc_irq(unsigned int irq) +{ + unsigned long reg; + unsigned long bitmask; + + if (irq < 32) { + reg = INTC_INTDSB_0; + bitmask = 1 << irq; + } else { + reg = INTC_INTDSB_1; + bitmask = 1 << (irq - 32); + } + + ctrl_outl(bitmask, reg); +} + +static void mask_and_ack_intc(unsigned int irq) +{ + disable_intc_irq(irq); +} + +static void end_intc_irq(unsigned int irq) +{ + enable_intc_irq(irq); +} + +/* For future use, if we ever support IRLM=0) */ +void make_intc_irq(unsigned int irq) +{ + disable_irq_nosync(irq); + irq_desc[irq].handler = &intc_irq_type; + disable_intc_irq(irq); +} + +#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL) +int intc_irq_describe(char* p, int irq) +{ + if (irq < NR_INTC_IRQS) + return sprintf(p, "(0x%3x)", IRQ_to_vectorN[irq]*0x20); + else + return 0; +} +#endif + +void __init init_IRQ(void) +{ + unsigned long long __dummy0, __dummy1=~0x00000000100000f0; + unsigned long reg; + unsigned long data; + int i; + + intc_virt = onchip_remap(INTC_BASE, 1024, "INTC"); + if (!intc_virt) { + panic("Unable to remap INTC\n"); + } + + + /* Set default: per-line enable/disable, priority driven ack/eoi */ + for (i = 0; i < NR_INTC_IRQS; i++) { + if (platform_int_priority[i] != NO_PRIORITY) { + irq_desc[i].handler = &intc_irq_type; + } + } + + + /* Disable all interrupts and set all priorities to 0 to avoid trouble */ + ctrl_outl(-1, INTC_INTDSB_0); + ctrl_outl(-1, INTC_INTDSB_1); + + for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8) + ctrl_outl( NO_PRIORITY, reg); + + + /* Set IRLM */ + /* If all the priorities are set to 'no priority', then + * assume we are using encoded mode. + */ + irlm = platform_int_priority[IRQ_IRL0] + platform_int_priority[IRQ_IRL1] + \ + platform_int_priority[IRQ_IRL2] + platform_int_priority[IRQ_IRL3]; + + if (irlm == NO_PRIORITY) { + /* IRLM = 0 */ + reg = INTC_ICR_CLEAR; + i = IRQ_INTA; + printk("Trying to use encoded IRL0-3. IRLs unsupported.\n"); + } else { + /* IRLM = 1 */ + reg = INTC_ICR_SET; + i = IRQ_IRL0; + } + ctrl_outl(INTC_ICR_IRLM, reg); + + /* Set interrupt priorities according to platform description */ + for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) { + data |= platform_int_priority[i] << ((i % INTC_INTPRI_PPREG) * 4); + if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) { + /* Upon the 7th, set Priority Register */ + ctrl_outl(data, reg); + data = 0; + reg += 8; + } + } + +#ifdef CONFIG_SH_CAYMAN + { + extern void init_cayman_irq(void); + + init_cayman_irq(); + } +#endif + + /* + * And now let interrupts come in. + * sti() is not enough, we need to + * lower priority, too. + */ + __asm__ __volatile__("getcon " __SR ", %0\n\t" + "and %0, %1, %0\n\t" + "putcon %0, " __SR "\n\t" + : "=&r" (__dummy0) + : "r" (__dummy1)); +} diff --git a/arch/sh64/kernel/led.c b/arch/sh64/kernel/led.c new file mode 100644 index 000000000..cf993c4a9 --- /dev/null +++ b/arch/sh64/kernel/led.c @@ -0,0 +1,41 @@ +/* + * arch/sh64/kernel/led.c + * + * Copyright (C) 2002 Stuart Menefy + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Flash the LEDs + */ +#include +#include +#include + +void mach_led(int pos, int val); + +/* acts like an actual heart beat -- ie thump-thump-pause... */ +void heartbeat(void) +{ + static unsigned int cnt = 0, period = 0, dist = 0; + + if (cnt == 0 || cnt == dist) { + mach_led(-1, 1); + } else if (cnt == 7 || cnt == dist + 7) { + mach_led(-1, 0); + } + + if (++cnt > period) { + cnt = 0; + + /* + * The hyperbolic function below modifies the heartbeat period + * length in dependency of the current (5min) load. It goes + * through the points f(0)=126, f(1)=86, f(5)=51, f(inf)->30. + */ + period = ((672 << FSHIFT) / (5 * avenrun[0] + + (7 << FSHIFT))) + 30; + dist = period / 4; + } +} + diff --git a/arch/sh64/kernel/pci-dma.c b/arch/sh64/kernel/pci-dma.c new file mode 100644 index 000000000..a36c3d71a --- /dev/null +++ b/arch/sh64/kernel/pci-dma.c @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) + * Copyright (C) 2003 Paul Mundt (lethal@linux-sh.org) + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Dynamic DMA mapping support. + */ +#include +#include +#include +#include +#include + +void *consistent_alloc(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + void *ret; + int gfp = GFP_ATOMIC; + void *vp; + + if (hwdev == NULL || hwdev->dma_mask != 0xffffffff) + gfp |= GFP_DMA; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + + /* now call our friend ioremap_nocache to give us an uncached area */ + vp = ioremap_nocache(virt_to_phys(ret), size); + + if (vp != NULL) { + memset(vp, 0, size); + *dma_handle = virt_to_bus(ret); + dma_cache_wback_inv((unsigned long)ret, size); + } + + return vp; +} + +void consistent_free(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + void *alloc; + + alloc = bus_to_virt((unsigned long)dma_handle); + free_pages((unsigned long)alloc, get_order(size)); + + iounmap(vaddr); +} + diff --git a/arch/sh64/kernel/pci_sh5.c b/arch/sh64/kernel/pci_sh5.c new file mode 100644 index 000000000..c2b4f00e2 --- /dev/null +++ b/arch/sh64/kernel/pci_sh5.c @@ -0,0 +1,546 @@ +/* + * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) + * Copyright (C) 2003, 2004 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Support functions for the SH5 PCI hardware. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "pci_sh5.h" + +static unsigned long pcicr_virt; +unsigned long pciio_virt; + +static void __init pci_fixup_ide_bases(struct pci_dev *d) +{ + int i; + + /* + * PCI IDE controllers use non-standard I/O port decoding, respect it. + */ + if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE) + return; + printk("PCI: IDE base address fixup for %s\n", d->slot_name); + for(i=0; i<4; i++) { + struct resource *r = &d->resource[i]; + if ((r->start & ~0x80) == 0x374) { + r->start |= 2; + r->end = r->start; + } + } +} + +/* Add future fixups here... */ +struct pci_fixup pcibios_fixups[] = { + { PCI_FIXUP_HEADER, PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases }, + { 0 } +}; + +char * __init pcibios_setup(char *str) +{ + return str; +} + +/* Rounds a number UP to the nearest power of two. Used for + * sizing the PCI window. + */ +static u32 __init r2p2(u32 num) +{ + int i = 31; + u32 tmp = num; + + if (num == 0) + return 0; + + do { + if (tmp & (1 << 31)) + break; + i--; + tmp <<= 1; + } while (i >= 0); + + tmp = 1 << i; + /* If the original number isn't a power of 2, round it up */ + if (tmp != num) + tmp <<= 1; + + return tmp; +} + +extern unsigned long long memory_start, memory_end; + +int __init sh5pci_init(unsigned memStart, unsigned memSize) +{ + u32 lsr0; + u32 uval; + + pcicr_virt = onchip_remap(SH5PCI_ICR_BASE, 1024, "PCICR"); + if (!pcicr_virt) { + panic("Unable to remap PCICR\n"); + } + + pciio_virt = onchip_remap(SH5PCI_IO_BASE, 0x10000, "PCIIO"); + if (!pciio_virt) { + panic("Unable to remap PCIIO\n"); + } + + pr_debug("Register base addres is 0x%08lx\n", pcicr_virt); + + /* Clear snoop registers */ + SH5PCI_WRITE(CSCR0, 0); + SH5PCI_WRITE(CSCR1, 0); + + pr_debug("Wrote to reg\n"); + + /* Switch off interrupts */ + SH5PCI_WRITE(INTM, 0); + SH5PCI_WRITE(AINTM, 0); + SH5PCI_WRITE(PINTM, 0); + + /* Set bus active, take it out of reset */ + uval = SH5PCI_READ(CR); + + /* Set command Register */ + SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE | CR_PFCS | CR_BMAM); + + uval=SH5PCI_READ(CR); + pr_debug("CR is actually 0x%08x\n",uval); + + /* Allow it to be a master */ + /* NB - WE DISABLE I/O ACCESS to stop overlap */ + /* set WAIT bit to enable stepping, an attempt to improve stability */ + SH5PCI_WRITE_SHORT(CSR_CMD, + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_WAIT); + + /* + ** Set translation mapping memory in order to convert the address + ** used for the main bus, to the PCI internal address. + */ + SH5PCI_WRITE(MBR,0x40000000); + + /* Always set the max size 512M */ + SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024)); + + /* + ** I/O addresses are mapped at internal PCI specific address + ** as is described into the configuration bridge table. + ** These are changed to 0, to allow cards that have legacy + ** io such as vga to function correctly. We set the SH5 IOBAR to + ** 256K, which is a bit big as we can only have 64K of address space + */ + + SH5PCI_WRITE(IOBR,0x0); + + pr_debug("PCI:Writing 0x%08x to IOBR\n",0); + + /* Set up a 256K window. Totally pointless waste of address space */ + SH5PCI_WRITE(IOBMR,0); + pr_debug("PCI:Writing 0x%08x to IOBMR\n",0); + + /* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec. Ideally, + * we would want to map the I/O region somewhere, but it is so big this is not + * that easy! + */ + SH5PCI_WRITE(CSR_IBAR0,~0); + /* Set memory size value */ + memSize = memory_end - memory_start; + + /* Now we set up the mbars so the PCI bus can see the memory of the machine */ + if (memSize < (1024 * 1024)) { + printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%x?\n", memSize); + return -EINVAL; + } + + /* Set LSR 0 */ + lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 : ((r2p2(memSize) - 0x100000) | 0x1); + SH5PCI_WRITE(LSR0, lsr0); + + pr_debug("PCI:Writing 0x%08x to LSR0\n",lsr0); + + /* Set MBAR 0 */ + SH5PCI_WRITE(CSR_MBAR0, memory_start); + SH5PCI_WRITE(LAR0, memory_start); + + SH5PCI_WRITE(CSR_MBAR1,0); + SH5PCI_WRITE(LAR1,0); + SH5PCI_WRITE(LSR1,0); + + pr_debug("PCI:Writing 0x%08llx to CSR_MBAR0\n",memory_start); + pr_debug("PCI:Writing 0x%08llx to LAR0\n",memory_start); + + /* Enable the PCI interrupts on the device */ + SH5PCI_WRITE(INTM, ~0); + SH5PCI_WRITE(AINTM, ~0); + SH5PCI_WRITE(PINTM, ~0); + + pr_debug("Switching on all error interrupts\n"); + + return(0); +} + +static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 *val) +{ + SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where)); + + switch (size) { + case 1: + *val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3)); + break; + case 2: + *val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2)); + break; + case 4: + *val = SH5PCI_READ(PDR); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where, + int size, u32 val) +{ + SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where)); + + switch (size) { + case 1: + SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val); + break; + case 2: + SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val); + break; + case 4: + SH5PCI_WRITE(PDR, val); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops pci_config_ops = { + .read = sh5pci_read, + .write = sh5pci_write, +}; + +/* Everything hangs off this */ +static struct pci_bus *pci_root_bus; + + +static u8 __init no_swizzle(struct pci_dev *dev, u8 * pin) +{ + pr_debug("swizzle for dev %d on bus %d slot %d pin is %d\n", + dev->devfn,dev->bus->number, PCI_SLOT(dev->devfn),*pin); + return PCI_SLOT(dev->devfn); +} + +static inline u8 bridge_swizzle(u8 pin, u8 slot) +{ + return (((pin-1) + slot) % 4) + 1; +} + +u8 __init common_swizzle(struct pci_dev *dev, u8 *pinp) +{ + if (dev->bus->number != 0) { + u8 pin = *pinp; + do { + pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)); + /* Move up the chain of bridges. */ + dev = dev->bus->self; + } while (dev->bus->self); + *pinp = pin; + + /* The slot is the slot of the last bridge. */ + } + + return PCI_SLOT(dev->devfn); +} + +/* This needs to be shunted out of here into the board specific bit */ + +static int __init map_cayman_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int result = -1; + + /* The complication here is that the PCI IRQ lines from the Cayman's 2 + 5V slots get into the CPU via a different path from the IRQ lines + from the 3 3.3V slots. Thus, we have to detect whether the card's + interrupts go via the 5V or 3.3V path, i.e. the 'bridge swizzling' + at the point where we cross from 5V to 3.3V is not the normal case. + + The added complication is that we don't know that the 5V slots are + always bus 2, because a card containing a PCI-PCI bridge may be + plugged into a 3.3V slot, and this changes the bus numbering. + + Also, the Cayman has an intermediate PCI bus that goes a custom + expansion board header (and to the secondary bridge). This bus has + never been used in practice. + + The 1ary onboard PCI-PCI bridge is device 3 on bus 0 + The 2ary onboard PCI-PCI bridge is device 0 on the 2ary bus of the 1ary bridge. + */ + + struct slot_pin { + int slot; + int pin; + } path[4]; + int i=0; + + while (dev->bus->number > 0) { + + slot = path[i].slot = PCI_SLOT(dev->devfn); + pin = path[i].pin = bridge_swizzle(pin, slot); + dev = dev->bus->self; + i++; + if (i > 3) panic("PCI path to root bus too long!\n"); + } + + slot = PCI_SLOT(dev->devfn); + /* This is the slot on bus 0 through which the device is eventually + reachable. */ + + /* Now work back up. */ + if ((slot < 3) || (i == 0)) { + /* Bus 0 (incl. PCI-PCI bridge itself) : perform the final + swizzle now. */ + result = IRQ_INTA + bridge_swizzle(pin, slot) - 1; + } else { + i--; + slot = path[i].slot; + pin = path[i].pin; + if (slot > 0) { + panic("PCI expansion bus device found - not handled!\n"); + } else { + if (i > 0) { + /* 5V slots */ + i--; + slot = path[i].slot; + pin = path[i].pin; + /* 'pin' was swizzled earlier wrt slot, don't do it again. */ + result = IRQ_P2INTA + (pin - 1); + } else { + /* IRQ for 2ary PCI-PCI bridge : unused */ + result = -1; + } + } + } + + return result; +} + +irqreturn_t pcish5_err_irq(int irq, void *dev_id, struct pt_regs *regs) +{ + unsigned pci_int, pci_air, pci_cir, pci_aint; + + pci_int = SH5PCI_READ(INT); + pci_cir = SH5PCI_READ(CIR); + pci_air = SH5PCI_READ(AIR); + + if (pci_int) { + printk("PCI INTERRUPT (at %08llx)!\n", regs->pc); + printk("PCI INT -> 0x%x\n", pci_int & 0xffff); + printk("PCI AIR -> 0x%x\n", pci_air); + printk("PCI CIR -> 0x%x\n", pci_cir); + SH5PCI_WRITE(INT, ~0); + } + + pci_aint = SH5PCI_READ(AINT); + if (pci_aint) { + printk("PCI ARB INTERRUPT!\n"); + printk("PCI AINT -> 0x%x\n", pci_aint); + printk("PCI AIR -> 0x%x\n", pci_air); + printk("PCI CIR -> 0x%x\n", pci_cir); + SH5PCI_WRITE(AINT, ~0); + } + + return IRQ_HANDLED; +} + +irqreturn_t pcish5_serr_irq(int irq, void *dev_id, struct pt_regs *regs) +{ + printk("SERR IRQ\n"); + + return IRQ_NONE; +} + +#define ROUND_UP(x, a) (((x) + (a) - 1) & ~((a) - 1)) + +static void __init +pcibios_size_bridge(struct pci_bus *bus, struct resource *ior, + struct resource *memr) +{ + struct resource io_res, mem_res; + struct pci_dev *dev; + struct pci_dev *bridge = bus->self; + struct list_head *ln; + + if (!bridge) + return; /* host bridge, nothing to do */ + + /* set reasonable default locations for pcibios_align_resource */ + io_res.start = PCIBIOS_MIN_IO; + mem_res.start = PCIBIOS_MIN_MEM; + + io_res.end = io_res.start; + mem_res.end = mem_res.start; + + /* Collect information about how our direct children are layed out. */ + for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) { + int i; + dev = pci_dev_b(ln); + + /* Skip bridges for now */ + if (dev->class >> 8 == PCI_CLASS_BRIDGE_PCI) + continue; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource res; + unsigned long size; + + memcpy(&res, &dev->resource[i], sizeof(res)); + size = res.end - res.start + 1; + + if (res.flags & IORESOURCE_IO) { + res.start = io_res.end; + pcibios_align_resource(dev, &res, size, 0); + io_res.end = res.start + size; + } else if (res.flags & IORESOURCE_MEM) { + res.start = mem_res.end; + pcibios_align_resource(dev, &res, size, 0); + mem_res.end = res.start + size; + } + } + } + + /* And for all of the subordinate busses. */ + for (ln=bus->children.next; ln != &bus->children; ln=ln->next) + pcibios_size_bridge(pci_bus_b(ln), &io_res, &mem_res); + + /* turn the ending locations into sizes (subtract start) */ + io_res.end -= io_res.start; + mem_res.end -= mem_res.start; + + /* Align the sizes up by bridge rules */ + io_res.end = ROUND_UP(io_res.end, 4*1024) - 1; + mem_res.end = ROUND_UP(mem_res.end, 1*1024*1024) - 1; + + /* Adjust the bridge's allocation requirements */ + bridge->resource[0].end = bridge->resource[0].start + io_res.end; + bridge->resource[1].end = bridge->resource[1].start + mem_res.end; + + bridge->resource[PCI_BRIDGE_RESOURCES].end = + bridge->resource[PCI_BRIDGE_RESOURCES].start + io_res.end; + bridge->resource[PCI_BRIDGE_RESOURCES+1].end = + bridge->resource[PCI_BRIDGE_RESOURCES+1].start + mem_res.end; + + /* adjust parent's resource requirements */ + if (ior) { + ior->end = ROUND_UP(ior->end, 4*1024); + ior->end += io_res.end; + } + + if (memr) { + memr->end = ROUND_UP(memr->end, 1*1024*1024); + memr->end += mem_res.end; + } +} + +#undef ROUND_UP + +static void __init pcibios_size_bridges(void) +{ + struct resource io_res, mem_res; + + memset(&io_res, 0, sizeof(io_res)); + memset(&mem_res, 0, sizeof(mem_res)); + + pcibios_size_bridge(pci_root_bus, &io_res, &mem_res); +} + +static int __init pcibios_init(void) +{ + if (request_irq(IRQ_ERR, pcish5_err_irq, + SA_INTERRUPT, "PCI Error",NULL) < 0) { + printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n"); + return -EINVAL; + } + + if (request_irq(IRQ_SERR, pcish5_serr_irq, + SA_INTERRUPT, "PCI SERR interrupt", NULL) < 0) { + printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n"); + return -EINVAL; + } + + /* The pci subsytem needs to know where memory is and how much + * of it there is. I've simply made these globals. A better mechanism + * is probably needed. + */ + sh5pci_init(__pa(memory_start), + __pa(memory_end) - __pa(memory_start)); + + pci_root_bus = pci_scan_bus(0, &pci_config_ops, NULL); + pcibios_size_bridges(); + pci_assign_unassigned_resources(); + pci_fixup_irqs(no_swizzle, map_cayman_irq); + + return 0; +} + +subsys_initcall(pcibios_init); + +void __init pcibios_fixup_bus(struct pci_bus *bus) +{ + struct pci_dev *dev = bus->self; + int i; + +#if 1 + if(dev) { + for(i=0; i<3; i++) { + bus->resource[i] = + &dev->resource[PCI_BRIDGE_RESOURCES+i]; + bus->resource[i]->name = bus->name; + } + bus->resource[0]->flags |= IORESOURCE_IO; + bus->resource[1]->flags |= IORESOURCE_MEM; + + /* For now, propogate host limits to the bus; + * we'll adjust them later. */ + +#if 1 + bus->resource[0]->end = 64*1024 - 1 ; + bus->resource[1]->end = PCIBIOS_MIN_MEM+(256*1024*1024)-1; + bus->resource[0]->start = PCIBIOS_MIN_IO; + bus->resource[1]->start = PCIBIOS_MIN_MEM; +#else + bus->resource[0]->end = 0 + bus->resource[1]->end = 0 + bus->resource[0]->start =0 + bus->resource[1]->start = 0; +#endif + /* Turn off downstream PF memory address range by default */ + bus->resource[2]->start = 1024*1024; + bus->resource[2]->end = bus->resource[2]->start - 1; + } +#endif + +} + diff --git a/arch/sh64/kernel/pci_sh5.h b/arch/sh64/kernel/pci_sh5.h new file mode 100644 index 000000000..8f21f5d2a --- /dev/null +++ b/arch/sh64/kernel/pci_sh5.h @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Defintions for the SH5 PCI hardware. + */ + +/* Product ID */ +#define PCISH5_PID 0x350d + +/* vendor ID */ +#define PCISH5_VID 0x1054 + +/* Configuration types */ +#define ST_TYPE0 0x00 /* Configuration cycle type 0 */ +#define ST_TYPE1 0x01 /* Configuration cycle type 1 */ + +/* VCR data */ +#define PCISH5_VCR_STATUS 0x00 +#define PCISH5_VCR_VERSION 0x08 + +/* +** ICR register offsets and bits +*/ +#define PCISH5_ICR_CR 0x100 /* PCI control register values */ +#define CR_PBAM (1<<12) +#define CR_PFCS (1<<11) +#define CR_FTO (1<<10) +#define CR_PFE (1<<9) +#define CR_TBS (1<<8) +#define CR_SPUE (1<<7) +#define CR_BMAM (1<<6) +#define CR_HOST (1<<5) +#define CR_CLKEN (1<<4) +#define CR_SOCS (1<<3) +#define CR_IOCS (1<<2) +#define CR_RSTCTL (1<<1) +#define CR_CFINT (1<<0) +#define CR_LOCK_MASK 0xa5000000 + +#define PCISH5_ICR_INT 0x114 /* Interrupt registert values */ +#define INT_MADIM (1<<2) + +#define PCISH5_ICR_LSR0 0X104 /* Local space register values */ +#define PCISH5_ICR_LSR1 0X108 /* Local space register values */ +#define PCISH5_ICR_LAR0 0x10c /* Local address register values */ +#define PCISH5_ICR_LAR1 0x110 /* Local address register values */ +#define PCISH5_ICR_INTM 0x118 /* Interrupt mask register values */ +#define PCISH5_ICR_AIR 0x11c /* Interrupt error address information register values */ +#define PCISH5_ICR_CIR 0x120 /* Interrupt error command information register values */ +#define PCISH5_ICR_AINT 0x130 /* Interrupt error arbiter interrupt register values */ +#define PCISH5_ICR_AINTM 0x134 /* Interrupt error arbiter interrupt mask register values */ +#define PCISH5_ICR_BMIR 0x138 /* Interrupt error info register of bus master values */ +#define PCISH5_ICR_PAR 0x1c0 /* Pio address register values */ +#define PCISH5_ICR_MBR 0x1c4 /* Memory space bank register values */ +#define PCISH5_ICR_IOBR 0x1c8 /* I/O space bank register values */ +#define PCISH5_ICR_PINT 0x1cc /* power management interrupt register values */ +#define PCISH5_ICR_PINTM 0x1d0 /* power management interrupt mask register values */ +#define PCISH5_ICR_MBMR 0x1d8 /* memory space bank mask register values */ +#define PCISH5_ICR_IOBMR 0x1dc /* I/O space bank mask register values */ +#define PCISH5_ICR_CSCR0 0x210 /* PCI cache snoop control register 0 */ +#define PCISH5_ICR_CSCR1 0x214 /* PCI cache snoop control register 1 */ +#define PCISH5_ICR_PDR 0x220 /* Pio data register values */ + +/* These are configs space registers */ +#define PCISH5_ICR_CSR_VID 0x000 /* Vendor id */ +#define PCISH5_ICR_CSR_DID 0x002 /* Device id */ +#define PCISH5_ICR_CSR_CMD 0x004 /* Command register */ +#define PCISH5_ICR_CSR_STATUS 0x006 /* Stautus */ +#define PCISH5_ICR_CSR_IBAR0 0x010 /* I/O base address register */ +#define PCISH5_ICR_CSR_MBAR0 0x014 /* First Memory base address register */ +#define PCISH5_ICR_CSR_MBAR1 0x018 /* Second Memory base address register */ + + + +/* Base address of registers */ +#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000) +#define SH5PCI_IO_BASE (PHYS_PCI_BLOCK + 0x00800000) +/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG) */ + +/* Register selection macro */ +#define PCISH5_ICR_REG(x) ( pcicr_virt + (PCISH5_ICR_##x)) +/* #define PCISH5_VCR_REG(x) ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */ + +/* Write I/O functions */ +#define SH5PCI_WRITE(reg,val) ctrl_outl((u32)(val),PCISH5_ICR_REG(reg)) +#define SH5PCI_WRITE_SHORT(reg,val) ctrl_outw((u16)(val),PCISH5_ICR_REG(reg)) +#define SH5PCI_WRITE_BYTE(reg,val) ctrl_outb((u8)(val),PCISH5_ICR_REG(reg)) + +/* Read I/O functions */ +#define SH5PCI_READ(reg) ctrl_inl(PCISH5_ICR_REG(reg)) +#define SH5PCI_READ_SHORT(reg) ctrl_inw(PCISH5_ICR_REG(reg)) +#define SH5PCI_READ_BYTE(reg) ctrl_inb(PCISH5_ICR_REG(reg)) + +/* Set PCI config bits */ +#define SET_CONFIG_BITS(bus,devfn,where) ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000) + +/* Set PCI command register */ +#define CONFIG_CMD(bus, devfn, where) SET_CONFIG_BITS(bus->number,devfn,where) + +/* Size converters */ +#define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18) +#define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18) + + diff --git a/arch/sh64/kernel/pcibios.c b/arch/sh64/kernel/pcibios.c new file mode 100644 index 000000000..bc4ef3987 --- /dev/null +++ b/arch/sh64/kernel/pcibios.c @@ -0,0 +1,168 @@ +/* + * $Id: pcibios.c,v 1.1 2001/08/24 12:38:19 dwmw2 Exp $ + * + * arch/sh/kernel/pcibios.c + * + * Copyright (C) 2002 STMicroelectronics Limited + * Author : David J. McKay + * + * Copyright (C) 2004 Richard Curnow, SuperH UK Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * This is GPL'd. + * + * Provided here are generic versions of: + * pcibios_update_resource() + * pcibios_align_resource() + * pcibios_enable_device() + * pcibios_set_master() + * pcibios_update_irq() + * + * These functions are collected here to reduce duplication of common + * code amongst the many platform-specific PCI support code files. + * + * Platform-specific files are expected to provide: + * pcibios_fixup_bus() + * pcibios_init() + * pcibios_setup() + * pcibios_fixup_pbus_ranges() + */ + +#include +#include +#include + +void +pcibios_update_resource(struct pci_dev *dev, struct resource *root, + struct resource *res, int resource) +{ + u32 new, check; + int reg; + + new = res->start | (res->flags & PCI_REGION_FLAG_MASK); + if (resource < 6) { + reg = PCI_BASE_ADDRESS_0 + 4*resource; + } else if (resource == PCI_ROM_RESOURCE) { + res->flags |= PCI_ROM_ADDRESS_ENABLE; + new |= PCI_ROM_ADDRESS_ENABLE; + reg = dev->rom_base_reg; + } else { + /* Somebody might have asked allocation of a non-standard resource */ + return; + } + + pci_write_config_dword(dev, reg, new); + pci_read_config_dword(dev, reg, &check); + if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) { + printk(KERN_ERR "PCI: Error while updating region " + "%s/%d (%08x != %08x)\n", dev->slot_name, resource, + new, check); + } +} + +/* + * We need to avoid collisions with `mirrored' VGA ports + * and other strange ISA hardware, so we always want the + * addresses to be allocated in the 0x000-0x0ff region + * modulo 0x400. + */ +void pcibios_align_resource(void *data, struct resource *res, + unsigned long size, unsigned long align) +{ + if (res->flags & IORESOURCE_IO) { + unsigned long start = res->start; + + if (start & 0x300) { + start = (start + 0x3ff) & ~0x3ff; + res->start = start; + } + } +} + +static void pcibios_enable_bridge(struct pci_dev *dev) +{ + struct pci_bus *bus = dev->subordinate; + u16 cmd, old_cmd; + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + old_cmd = cmd; + + if (bus->resource[0]->flags & IORESOURCE_IO) { + cmd |= PCI_COMMAND_IO; + } + if ((bus->resource[1]->flags & IORESOURCE_MEM) || + (bus->resource[2]->flags & IORESOURCE_PREFETCH)) { + cmd |= PCI_COMMAND_MEMORY; + } + + if (cmd != old_cmd) { + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + + printk("PCI bridge %s, command register -> %04x\n", + pci_name(dev), cmd); + +} + + + +int pcibios_enable_device(struct pci_dev *dev, int mask) +{ + u16 cmd, old_cmd; + int idx; + struct resource *r; + + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { + pcibios_enable_bridge(dev); + } + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + old_cmd = cmd; + for(idx=0; idx<6; idx++) { + if (!(mask & (1 << idx))) + continue; + r = &dev->resource[idx]; + if (!r->start && r->end) { + printk(KERN_ERR "PCI: Device %s not available because of resource collisions\n", dev->slot_name); + return -EINVAL; + } + if (r->flags & IORESOURCE_IO) + cmd |= PCI_COMMAND_IO; + if (r->flags & IORESOURCE_MEM) + cmd |= PCI_COMMAND_MEMORY; + } + if (dev->resource[PCI_ROM_RESOURCE].start) + cmd |= PCI_COMMAND_MEMORY; + if (cmd != old_cmd) { + printk(KERN_INFO "PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd); + pci_write_config_word(dev, PCI_COMMAND, cmd); + } + return 0; +} + +/* + * If we set up a device for bus mastering, we need to check and set + * the latency timer as it may not be properly set. + */ +unsigned int pcibios_max_latency = 255; + +void pcibios_set_master(struct pci_dev *dev) +{ + u8 lat; + pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); + if (lat < 16) + lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; + else if (lat > pcibios_max_latency) + lat = pcibios_max_latency; + else + return; + printk(KERN_INFO "PCI: Setting latency timer of device %s to %d\n", pci_name(dev), lat); + pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); +} + +void __init pcibios_update_irq(struct pci_dev *dev, int irq) +{ + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); +} diff --git a/arch/sh64/kernel/process.c b/arch/sh64/kernel/process.c new file mode 100644 index 000000000..f9e82274e --- /dev/null +++ b/arch/sh64/kernel/process.c @@ -0,0 +1,963 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/process.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2003, 2004 Richard Curnow + * + * Started from SH3/4 version: + * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima + * + * In turn started from i386 version: + * Copyright (C) 1995 Linus Torvalds + * + */ + +/* + * This file handles the architecture-dependent parts of process handling.. + */ + +/* Temporary flags/tests. All to be removed/undefined. BEGIN */ +#define IDLE_TRACE +#define VM_SHOW_TABLES +#define VM_TEST_FAULT +#define VM_TEST_RTLBMISS +#define VM_TEST_WTLBMISS + +#undef VM_SHOW_TABLES +#undef IDLE_TRACE +/* Temporary flags/tests. All to be removed/undefined. END */ + +#define __KERNEL_SYSCALLS__ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include /* includes also */ +#include +#include +#include + +#include + +struct task_struct *last_task_used_math = NULL; + +#ifdef IDLE_TRACE +#ifdef VM_SHOW_TABLES +/* For testing */ +static void print_PTE(long base) +{ + int i, skip=0; + long long x, y, *p = (long long *) base; + + for (i=0; i< 512; i++, p++){ + if (*p == 0) { + if (!skip) { + skip++; + printk("(0s) "); + } + } else { + skip=0; + x = (*p) >> 32; + y = (*p) & 0xffffffff; + printk("%08Lx%08Lx ", x, y); + if (!((i+1)&0x3)) printk("\n"); + } + } +} + +/* For testing */ +static void print_DIR(long base) +{ + int i, skip=0; + long *p = (long *) base; + + for (i=0; i< 512; i++, p++){ + if (*p == 0) { + if (!skip) { + skip++; + printk("(0s) "); + } + } else { + skip=0; + printk("%08lx ", *p); + if (!((i+1)&0x7)) printk("\n"); + } + } +} + +/* For testing */ +static void print_vmalloc_first_tables(void) +{ + +#define PRESENT 0x800 /* Bit 11 */ + + /* + * Do it really dirty by looking at raw addresses, + * raw offsets, no types. If we used pgtable/pgalloc + * macros/definitions we could hide potential bugs. + * + * Note that pointers are 32-bit for CDC. + */ + long pgdt, pmdt, ptet; + + pgdt = (long) &swapper_pg_dir; + printk("-->PGD (0x%08lx):\n", pgdt); + print_DIR(pgdt); + printk("\n"); + + /* VMALLOC pool is mapped at 0xc0000000, second (pointer) entry in PGD */ + pgdt += 4; + pmdt = (long) (* (long *) pgdt); + if (!(pmdt & PRESENT)) { + printk("No PMD\n"); + return; + } else pmdt &= 0xfffff000; + + printk("-->PMD (0x%08lx):\n", pmdt); + print_DIR(pmdt); + printk("\n"); + + /* Get the pmdt displacement for 0xc0000000 */ + pmdt += 2048; + + /* just look at first two address ranges ... */ + /* ... 0xc0000000 ... */ + ptet = (long) (* (long *) pmdt); + if (!(ptet & PRESENT)) { + printk("No PTE0\n"); + return; + } else ptet &= 0xfffff000; + + printk("-->PTE0 (0x%08lx):\n", ptet); + print_PTE(ptet); + printk("\n"); + + /* ... 0xc0001000 ... */ + ptet += 4; + if (!(ptet & PRESENT)) { + printk("No PTE1\n"); + return; + } else ptet &= 0xfffff000; + printk("-->PTE1 (0x%08lx):\n", ptet); + print_PTE(ptet); + printk("\n"); +} +#else +#define print_vmalloc_first_tables() +#endif /* VM_SHOW_TABLES */ + +static void test_VM(void) +{ + void *a, *b, *c; + +#ifdef VM_SHOW_TABLES + printk("Initial PGD/PMD/PTE\n"); +#endif + print_vmalloc_first_tables(); + + printk("Allocating 2 bytes\n"); + a = vmalloc(2); + print_vmalloc_first_tables(); + + printk("Allocating 4100 bytes\n"); + b = vmalloc(4100); + print_vmalloc_first_tables(); + + printk("Allocating 20234 bytes\n"); + c = vmalloc(20234); + print_vmalloc_first_tables(); + +#ifdef VM_TEST_FAULT + /* Here you may want to fault ! */ + +#ifdef VM_TEST_RTLBMISS + printk("Ready to fault upon read.\n"); + if (* (char *) a) { + printk("RTLBMISSed on area a !\n"); + } + printk("RTLBMISSed on area a !\n"); +#endif + +#ifdef VM_TEST_WTLBMISS + printk("Ready to fault upon write.\n"); + *((char *) b) = 'L'; + printk("WTLBMISSed on area b !\n"); +#endif + +#endif /* VM_TEST_FAULT */ + + printk("Deallocating the 4100 byte chunk\n"); + vfree(b); + print_vmalloc_first_tables(); + + printk("Deallocating the 2 byte chunk\n"); + vfree(a); + print_vmalloc_first_tables(); + + printk("Deallocating the last chunk\n"); + vfree(c); + print_vmalloc_first_tables(); +} + +extern unsigned long volatile jiffies; +int once = 0; +unsigned long old_jiffies; +int pid = -1, pgid = -1; + +void idle_trace(void) +{ + + _syscall0(int, getpid) + _syscall1(int, getpgid, int, pid) + + if (!once) { + /* VM allocation/deallocation simple test */ + test_VM(); + pid = getpid(); + + printk("Got all through to Idle !!\n"); + printk("I'm now going to loop forever ...\n"); + printk("Any ! below is a timer tick.\n"); + printk("Any . below is a getpgid system call from pid = %d.\n", pid); + + + old_jiffies = jiffies; + once++; + } + + if (old_jiffies != jiffies) { + old_jiffies = jiffies - old_jiffies; + switch (old_jiffies) { + case 1: + printk("!"); + break; + case 2: + printk("!!"); + break; + case 3: + printk("!!!"); + break; + case 4: + printk("!!!!"); + break; + default: + printk("(%d!)", (int) old_jiffies); + } + old_jiffies = jiffies; + } + pgid = getpgid(pid); + printk("."); +} +#else +#define idle_trace() do { } while (0) +#endif /* IDLE_TRACE */ + +static int hlt_counter = 1; + +#define HARD_IDLE_TIMEOUT (HZ / 3) + +void disable_hlt(void) +{ + hlt_counter++; +} + +void enable_hlt(void) +{ + hlt_counter--; +} + +static int __init nohlt_setup(char *__unused) +{ + hlt_counter = 1; + return 1; +} + +static int __init hlt_setup(char *__unused) +{ + hlt_counter = 0; + return 1; +} + +__setup("nohlt", nohlt_setup); +__setup("hlt", hlt_setup); + +static inline void hlt(void) +{ + if (hlt_counter) + return; + + __asm__ __volatile__ ("sleep" : : : "memory"); +} + +/* + * The idle loop on a uniprocessor SH.. + */ +void default_idle(void) +{ + /* endless idle loop with no priority at all */ + while (1) { + if (hlt_counter) { + while (1) + if (need_resched()) + break; + } else { + local_irq_disable(); + while (!need_resched()) { + local_irq_enable(); + idle_trace(); + hlt(); + local_irq_disable(); + } + local_irq_enable(); + } + schedule(); + } +} + +void cpu_idle(void *unused) +{ + default_idle(); +} + +void machine_restart(char * __unused) +{ + extern void phys_stext(void); + + phys_stext(); +} + +void machine_halt(void) +{ + for (;;); +} + +void machine_power_off(void) +{ + extern void enter_deep_standby(void); + + enter_deep_standby(); +} + +void show_regs(struct pt_regs * regs) +{ + unsigned long long ah, al, bh, bl, ch, cl; + + printk("\n"); + + ah = (regs->pc) >> 32; + al = (regs->pc) & 0xffffffff; + bh = (regs->regs[18]) >> 32; + bl = (regs->regs[18]) & 0xffffffff; + ch = (regs->regs[15]) >> 32; + cl = (regs->regs[15]) & 0xffffffff; + printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->sr) >> 32; + al = (regs->sr) & 0xffffffff; + asm volatile ("getcon " __TEA ", %0" : "=r" (bh)); + asm volatile ("getcon " __TEA ", %0" : "=r" (bl)); + bh = (bh) >> 32; + bl = (bl) & 0xffffffff; + asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch)); + asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl)); + ch = (ch) >> 32; + cl = (cl) & 0xffffffff; + printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[0]) >> 32; + al = (regs->regs[0]) & 0xffffffff; + bh = (regs->regs[1]) >> 32; + bl = (regs->regs[1]) & 0xffffffff; + ch = (regs->regs[2]) >> 32; + cl = (regs->regs[2]) & 0xffffffff; + printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[3]) >> 32; + al = (regs->regs[3]) & 0xffffffff; + bh = (regs->regs[4]) >> 32; + bl = (regs->regs[4]) & 0xffffffff; + ch = (regs->regs[5]) >> 32; + cl = (regs->regs[5]) & 0xffffffff; + printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[6]) >> 32; + al = (regs->regs[6]) & 0xffffffff; + bh = (regs->regs[7]) >> 32; + bl = (regs->regs[7]) & 0xffffffff; + ch = (regs->regs[8]) >> 32; + cl = (regs->regs[8]) & 0xffffffff; + printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[9]) >> 32; + al = (regs->regs[9]) & 0xffffffff; + bh = (regs->regs[10]) >> 32; + bl = (regs->regs[10]) & 0xffffffff; + ch = (regs->regs[11]) >> 32; + cl = (regs->regs[11]) & 0xffffffff; + printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[12]) >> 32; + al = (regs->regs[12]) & 0xffffffff; + bh = (regs->regs[13]) >> 32; + bl = (regs->regs[13]) & 0xffffffff; + ch = (regs->regs[14]) >> 32; + cl = (regs->regs[14]) & 0xffffffff; + printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[16]) >> 32; + al = (regs->regs[16]) & 0xffffffff; + bh = (regs->regs[17]) >> 32; + bl = (regs->regs[17]) & 0xffffffff; + ch = (regs->regs[19]) >> 32; + cl = (regs->regs[19]) & 0xffffffff; + printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[20]) >> 32; + al = (regs->regs[20]) & 0xffffffff; + bh = (regs->regs[21]) >> 32; + bl = (regs->regs[21]) & 0xffffffff; + ch = (regs->regs[22]) >> 32; + cl = (regs->regs[22]) & 0xffffffff; + printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[23]) >> 32; + al = (regs->regs[23]) & 0xffffffff; + bh = (regs->regs[24]) >> 32; + bl = (regs->regs[24]) & 0xffffffff; + ch = (regs->regs[25]) >> 32; + cl = (regs->regs[25]) & 0xffffffff; + printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[26]) >> 32; + al = (regs->regs[26]) & 0xffffffff; + bh = (regs->regs[27]) >> 32; + bl = (regs->regs[27]) & 0xffffffff; + ch = (regs->regs[28]) >> 32; + cl = (regs->regs[28]) & 0xffffffff; + printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[29]) >> 32; + al = (regs->regs[29]) & 0xffffffff; + bh = (regs->regs[30]) >> 32; + bl = (regs->regs[30]) & 0xffffffff; + ch = (regs->regs[31]) >> 32; + cl = (regs->regs[31]) & 0xffffffff; + printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[32]) >> 32; + al = (regs->regs[32]) & 0xffffffff; + bh = (regs->regs[33]) >> 32; + bl = (regs->regs[33]) & 0xffffffff; + ch = (regs->regs[34]) >> 32; + cl = (regs->regs[34]) & 0xffffffff; + printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[35]) >> 32; + al = (regs->regs[35]) & 0xffffffff; + bh = (regs->regs[36]) >> 32; + bl = (regs->regs[36]) & 0xffffffff; + ch = (regs->regs[37]) >> 32; + cl = (regs->regs[37]) & 0xffffffff; + printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[38]) >> 32; + al = (regs->regs[38]) & 0xffffffff; + bh = (regs->regs[39]) >> 32; + bl = (regs->regs[39]) & 0xffffffff; + ch = (regs->regs[40]) >> 32; + cl = (regs->regs[40]) & 0xffffffff; + printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[41]) >> 32; + al = (regs->regs[41]) & 0xffffffff; + bh = (regs->regs[42]) >> 32; + bl = (regs->regs[42]) & 0xffffffff; + ch = (regs->regs[43]) >> 32; + cl = (regs->regs[43]) & 0xffffffff; + printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[44]) >> 32; + al = (regs->regs[44]) & 0xffffffff; + bh = (regs->regs[45]) >> 32; + bl = (regs->regs[45]) & 0xffffffff; + ch = (regs->regs[46]) >> 32; + cl = (regs->regs[46]) & 0xffffffff; + printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[47]) >> 32; + al = (regs->regs[47]) & 0xffffffff; + bh = (regs->regs[48]) >> 32; + bl = (regs->regs[48]) & 0xffffffff; + ch = (regs->regs[49]) >> 32; + cl = (regs->regs[49]) & 0xffffffff; + printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[50]) >> 32; + al = (regs->regs[50]) & 0xffffffff; + bh = (regs->regs[51]) >> 32; + bl = (regs->regs[51]) & 0xffffffff; + ch = (regs->regs[52]) >> 32; + cl = (regs->regs[52]) & 0xffffffff; + printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[53]) >> 32; + al = (regs->regs[53]) & 0xffffffff; + bh = (regs->regs[54]) >> 32; + bl = (regs->regs[54]) & 0xffffffff; + ch = (regs->regs[55]) >> 32; + cl = (regs->regs[55]) & 0xffffffff; + printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[56]) >> 32; + al = (regs->regs[56]) & 0xffffffff; + bh = (regs->regs[57]) >> 32; + bl = (regs->regs[57]) & 0xffffffff; + ch = (regs->regs[58]) >> 32; + cl = (regs->regs[58]) & 0xffffffff; + printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[59]) >> 32; + al = (regs->regs[59]) & 0xffffffff; + bh = (regs->regs[60]) >> 32; + bl = (regs->regs[60]) & 0xffffffff; + ch = (regs->regs[61]) >> 32; + cl = (regs->regs[61]) & 0xffffffff; + printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[62]) >> 32; + al = (regs->regs[62]) & 0xffffffff; + bh = (regs->tregs[0]) >> 32; + bl = (regs->tregs[0]) & 0xffffffff; + ch = (regs->tregs[1]) >> 32; + cl = (regs->tregs[1]) & 0xffffffff; + printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->tregs[2]) >> 32; + al = (regs->tregs[2]) & 0xffffffff; + bh = (regs->tregs[3]) >> 32; + bl = (regs->tregs[3]) & 0xffffffff; + ch = (regs->tregs[4]) >> 32; + cl = (regs->tregs[4]) & 0xffffffff; + printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->tregs[5]) >> 32; + al = (regs->tregs[5]) & 0xffffffff; + bh = (regs->tregs[6]) >> 32; + bl = (regs->tregs[6]) & 0xffffffff; + ch = (regs->tregs[7]) >> 32; + cl = (regs->tregs[7]) & 0xffffffff; + printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + /* + * If we're in kernel mode, dump the stack too.. + */ + if (!user_mode(regs)) { + void show_stack(struct task_struct *tsk, unsigned long *sp); + unsigned long sp = regs->regs[15] & 0xffffffff; + struct task_struct *tsk = get_current(); + + tsk->thread.kregs = regs; + + show_stack(tsk, (unsigned long *)sp); + } +} + +struct task_struct * alloc_task_struct(void) +{ + /* Get task descriptor pages */ + return (struct task_struct *) + __get_free_pages(GFP_KERNEL, get_order(THREAD_SIZE)); +} + +void free_task_struct(struct task_struct *p) +{ + free_pages((unsigned long) p, get_order(THREAD_SIZE)); +} + +/* + * Create a kernel thread + */ + +/* + * This is the mechanism for creating a new kernel thread. + * + * NOTE! Only a kernel-only process(ie the swapper or direct descendants + * who haven't done an "execve()") should use this: it will work within + * a system call from a "real" process, but the process memory space will + * not be free'd until both the parent and the child have exited. + */ +int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) +{ + /* A bit less processor dependent than older sh ... */ + + unsigned int reply; + +static __inline__ _syscall2(int,clone,unsigned long,flags,unsigned long,newsp) +static __inline__ _syscall1(int,exit,int,ret) + + reply = clone(flags | CLONE_VM, 0); + if (!reply) { + /* Child */ + reply = exit(fn(arg)); + } + + return reply; +} + +/* + * Free current thread data structures etc.. + */ +void exit_thread(void) +{ + /* See arch/sparc/kernel/process.c for the precedent for doing this -- RPC. + + The SH-5 FPU save/restore approach relies on last_task_used_math + pointing to a live task_struct. When another task tries to use the + FPU for the 1st time, the FPUDIS trap handling (see + arch/sh64/kernel/fpu.c) will save the existing FPU state to the + FP regs field within last_task_used_math before re-loading the new + task's FPU state (or initialising it if the FPU has been used + before). So if last_task_used_math is stale, and its page has already been + re-allocated for another use, the consequences are rather grim. Unless we + null it here, there is no other path through which it would get safely + nulled. */ + +#ifndef CONFIG_NOFPU_SUPPORT + if (last_task_used_math == current) { + last_task_used_math = NULL; + } +#endif +} + +void flush_thread(void) +{ + + /* Called by fs/exec.c (flush_old_exec) to remove traces of a + * previously running executable. */ +#ifndef CONFIG_NOFPU_SUPPORT + if (last_task_used_math == current) { + last_task_used_math = NULL; + } + /* Force FPU state to be reinitialised after exec */ + current->used_math = 0; +#endif + + /* if we are a kernel thread, about to change to user thread, + * update kreg + */ + if(current->thread.kregs==&fake_swapper_regs) { + current->thread.kregs = + ((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1); + current->thread.uregs = current->thread.kregs; + } +} + +void release_thread(struct task_struct *dead_task) +{ + /* do nothing */ +} + +/* Fill in the fpu structure for a core dump.. */ +int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) +{ +#ifndef CONFIG_NOFPU_SUPPORT + int fpvalid; + struct task_struct *tsk = current; + + fpvalid = tsk->used_math; + if (fpvalid) { + if (current == last_task_used_math) { + grab_fpu(); + fpsave(&tsk->thread.fpu.hard); + release_fpu(); + last_task_used_math = 0; + regs->sr |= SR_FD; + } + + memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu)); + } + + return fpvalid; +#else + return 0; /* Task didn't use the fpu at all. */ +#endif +} + +asmlinkage void ret_from_fork(void); + +int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, + unsigned long unused, + struct task_struct *p, struct pt_regs *regs) +{ + struct pt_regs *childregs; + unsigned long long se; /* Sign extension */ + +#ifndef CONFIG_NOFPU_SUPPORT + if(last_task_used_math == current) { + grab_fpu(); + fpsave(¤t->thread.fpu.hard); + release_fpu(); + last_task_used_math = NULL; + regs->sr |= SR_FD; + } +#endif + /* Copy from sh version */ + childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p->thread_info )) - 1; + + *childregs = *regs; + + if (user_mode(regs)) { + childregs->regs[15] = usp; + p->thread.uregs = childregs; + } else { + childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE; + } + + childregs->regs[9] = 0; /* Set return value for child */ + childregs->sr |= SR_FD; /* Invalidate FPU flag */ + + /* From sh */ + p->set_child_tid = p->clear_child_tid = NULL; + + p->thread.sp = (unsigned long) childregs; + p->thread.pc = (unsigned long) ret_from_fork; + + /* + * Sign extend the edited stack. + * Note that thread.pc and thread.pc will stay + * 32-bit wide and context switch must take care + * of NEFF sign extension. + */ + + se = childregs->regs[15]; + se = (se & NEFF_SIGN) ? (se | NEFF_MASK) : se; + childregs->regs[15] = se; + + return 0; +} + +/* + * fill in the user structure for a core dump.. + */ +void dump_thread(struct pt_regs * regs, struct user * dump) +{ + dump->magic = CMAGIC; + dump->start_code = current->mm->start_code; + dump->start_data = current->mm->start_data; + dump->start_stack = regs->regs[15] & ~(PAGE_SIZE - 1); + dump->u_tsize = (current->mm->end_code - dump->start_code) >> PAGE_SHIFT; + dump->u_dsize = (current->mm->brk + (PAGE_SIZE-1) - dump->start_data) >> PAGE_SHIFT; + dump->u_ssize = (current->mm->start_stack - dump->start_stack + + PAGE_SIZE - 1) >> PAGE_SHIFT; + /* Debug registers will come here. */ + + dump->regs = *regs; + + dump->u_fpvalid = dump_fpu(regs, &dump->fpu); +} + +asmlinkage int sys_fork(unsigned long r2, unsigned long r3, + unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs *pregs) +{ + return do_fork(SIGCHLD, pregs->regs[15], pregs, 0, 0, 0); +} + +asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, + unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs *pregs) +{ + if (!newsp) + newsp = pregs->regs[15]; + return do_fork(clone_flags & ~CLONE_IDLETASK, newsp, pregs, 0, 0, 0); +} + +/* + * This is trivial, and on the face of it looks like it + * could equally well be done in user mode. + * + * Not so, for quite unobvious reasons - register pressure. + * In user mode vfork() cannot have a stack frame, and if + * done by calling the "clone()" system call directly, you + * do not have enough call-clobbered registers to hold all + * the information you need. + */ +asmlinkage int sys_vfork(unsigned long r2, unsigned long r3, + unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs *pregs) +{ + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, pregs->regs[15], pregs, 0, 0, 0); +} + +/* + * sys_execve() executes a new program. + */ +asmlinkage int sys_execve(char *ufilename, char **uargv, + char **uenvp, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs *pregs) +{ + int error; + char *filename; + + lock_kernel(); + filename = getname((char __user *)ufilename); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + + error = do_execve(filename, + (char __user * __user *)uargv, + (char __user * __user *)uenvp, + pregs); + if (error == 0) + current->ptrace &= ~PT_DTRACE; + putname(filename); +out: + unlock_kernel(); + return error; +} + +/* + * These bracket the sleeping functions.. + */ +extern void interruptible_sleep_on(wait_queue_head_t *q); + +#define mid_sched ((unsigned long) interruptible_sleep_on) + +static int in_sh64_switch_to(unsigned long pc) +{ + extern char __sh64_switch_to_end; + /* For a sleeping task, the PC is somewhere in the middle of the function, + so we don't have to worry about masking the LSB off */ + return (pc >= (unsigned long) sh64_switch_to) && + (pc < (unsigned long) &__sh64_switch_to_end); +} + +unsigned long get_wchan(struct task_struct *p) +{ + unsigned long schedule_fp; + unsigned long sh64_switch_to_fp; + unsigned long schedule_caller_pc; + unsigned long pc; + + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + /* + * The same comment as on the Alpha applies here, too ... + */ + pc = thread_saved_pc(p); + +#if CONFIG_FRAME_POINTER + if (in_sh64_switch_to(pc)) { + sh64_switch_to_fp = (long) p->thread.sp; + /* r14 is saved at offset 4 in the sh64_switch_to frame */ + schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4); + + /* and the caller of 'schedule' is (currently!) saved at offset 24 + in the frame of schedule (from disasm) */ + schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24); + return schedule_caller_pc; + } +#endif + return pc; +} + +/* Provide a /proc/asids file that lists out the + ASIDs currently associated with the processes. (If the DM.PC register is + examined through the debug link, this shows ASID + PC. To make use of this, + the PID->ASID relationship needs to be known. This is primarily for + debugging.) + */ + +#if defined(CONFIG_SH64_PROC_ASIDS) +#include +#include + +static int +asids_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data) +{ + int len=0; + struct task_struct *p; + read_lock(&tasklist_lock); + for_each_task(p) { + int pid = p->pid; + struct mm_struct *mm; + if (!pid) continue; + mm = p->mm; + if (mm) { + unsigned long asid, context; + context = mm->context; + asid = (context & 0xff); + len += sprintf(buf+len, "%5d : %02x\n", pid, asid); + } else { + len += sprintf(buf+len, "%5d : (none)\n", pid); + } + } + read_unlock(&tasklist_lock); + *eof = 1; + return len; +} + +static int __init register_proc_asids(void) +{ + create_proc_read_entry("asids", 0, NULL, asids_proc_info, NULL); + return 0; +} + +__initcall(register_proc_asids); +#endif + diff --git a/arch/sh64/kernel/ptrace.c b/arch/sh64/kernel/ptrace.c new file mode 100644 index 000000000..887e89a45 --- /dev/null +++ b/arch/sh64/kernel/ptrace.c @@ -0,0 +1,362 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/ptrace.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + * Started from SH3/4 version: + * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka + * + * Original x86 implementation: + * By Ross Biro 1/23/92 + * edited by Linus Torvalds + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* This mask defines the bits of the SR which the user is not allowed to + change, which are everything except S, Q, M, PR, SZ, FR. */ +#define SR_MASK (0xffff8cfd) + +/* + * does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + +/* + * This routine will get a word from the user area in the process kernel stack. + */ +static inline int get_stack_long(struct task_struct *task, int offset) +{ + unsigned char *stack; + + stack = (unsigned char *)(task->thread.uregs); + stack += offset; + return (*((int *)stack)); +} + +static inline unsigned long +get_fpu_long(struct task_struct *task, unsigned long addr) +{ + unsigned long tmp; + struct pt_regs *regs; + regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1; + + if (!task->used_math) { + if (addr == offsetof(struct user_fpu_struct, fpscr)) { + tmp = FPSCR_INIT; + } else { + tmp = 0xffffffffUL; /* matches initial value in fpu.c */ + } + return tmp; + } + + if (last_task_used_math == task) { + grab_fpu(); + fpsave(&task->thread.fpu.hard); + release_fpu(); + last_task_used_math = 0; + regs->sr |= SR_FD; + } + + tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)]; + return tmp; +} + +/* + * This routine will put a word into the user area in the process kernel stack. + */ +static inline int put_stack_long(struct task_struct *task, int offset, + unsigned long data) +{ + unsigned char *stack; + + stack = (unsigned char *)(task->thread.uregs); + stack += offset; + *(unsigned long *) stack = data; + return 0; +} + +static inline int +put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data) +{ + struct pt_regs *regs; + + regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1; + + if (!task->used_math) { + fpinit(&task->thread.fpu.hard); + task->used_math = 1; + } else if (last_task_used_math == task) { + grab_fpu(); + fpsave(&task->thread.fpu.hard); + release_fpu(); + last_task_used_math = 0; + regs->sr |= SR_FD; + } + + ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data; + return 0; +} + +asmlinkage int sys_ptrace(long request, long pid, long addr, long data) +{ + struct task_struct *child; + int ret; + + lock_kernel(); + ret = -EPERM; + if (request == PTRACE_TRACEME) { + /* are we already being traced? */ + if (current->ptrace & PT_PTRACED) + goto out; + /* set the ptrace bit in the process flags. */ + current->ptrace |= PT_PTRACED; + ret = 0; + goto out; + } + ret = -ESRCH; + read_lock(&tasklist_lock); + child = find_task_by_pid(pid); + if (child) + get_task_struct(child); + read_unlock(&tasklist_lock); + if (!child) + goto out; + + ret = -EPERM; + if (pid == 1) /* you may not mess with init */ + goto out_tsk; + + if (request == PTRACE_ATTACH) { + ret = ptrace_attach(child); + goto out_tsk; + } + + ret = ptrace_check_attach(child, request == PTRACE_KILL); + if (ret < 0) + goto out_tsk; + + switch (request) { + /* when I and D space are separate, these will need to be fixed. */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: { + unsigned long tmp; + int copied; + + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); + ret = -EIO; + if (copied != sizeof(tmp)) + break; + ret = put_user(tmp,(unsigned long *) data); + break; + } + + /* read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: { + unsigned long tmp; + + ret = -EIO; + if ((addr & 3) || addr < 0) + break; + + if (addr < sizeof(struct pt_regs)) + tmp = get_stack_long(child, addr); + else if ((addr >= offsetof(struct user, fpu)) && + (addr < offsetof(struct user, u_fpvalid))) { + tmp = get_fpu_long(child, addr - offsetof(struct user, fpu)); + } else if (addr == offsetof(struct user, u_fpvalid)) { + tmp = child->used_math; + } else { + break; + } + ret = put_user(tmp, (unsigned long *)data); + break; + } + + /* when I and D space are separate, this will have to be fixed. */ + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + ret = 0; + if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) + break; + ret = -EIO; + break; + + case PTRACE_POKEUSR: + /* write the word at location addr in the USER area. We must + disallow any changes to certain SR bits or u_fpvalid, since + this could crash the kernel or result in a security + loophole. */ + ret = -EIO; + if ((addr & 3) || addr < 0) + break; + + if (addr < sizeof(struct pt_regs)) { + /* Ignore change of top 32 bits of SR */ + if (addr == offsetof (struct pt_regs, sr)+4) + { + ret = 0; + break; + } + /* If lower 32 bits of SR, ignore non-user bits */ + if (addr == offsetof (struct pt_regs, sr)) + { + long cursr = get_stack_long(child, addr); + data &= ~(SR_MASK); + data |= (cursr & SR_MASK); + } + ret = put_stack_long(child, addr, data); + } + else if ((addr >= offsetof(struct user, fpu)) && + (addr < offsetof(struct user, u_fpvalid))) { + ret = put_fpu_long(child, addr - offsetof(struct user, fpu), data); + } + break; + + case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ + case PTRACE_CONT: { /* restart after signal. */ + ret = -EIO; + if ((unsigned long) data > _NSIG) + break; + if (request == PTRACE_SYSCALL) + set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + else + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + child->exit_code = data; + wake_up_process(child); + ret = 0; + break; + } + +/* + * make the child exit. Best I can do is send it a sigkill. + * perhaps it should be put in the status that it wants to + * exit. + */ + case PTRACE_KILL: { + ret = 0; + if (child->state == TASK_ZOMBIE) /* already dead */ + break; + child->exit_code = SIGKILL; + wake_up_process(child); + break; + } + + case PTRACE_SINGLESTEP: { /* set the trap flag. */ + struct pt_regs *regs; + + ret = -EIO; + if ((unsigned long) data > _NSIG) + break; + clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); + if ((child->ptrace & PT_DTRACE) == 0) { + /* Spurious delayed TF traps may occur */ + child->ptrace |= PT_DTRACE; + } + + regs = child->thread.uregs; + + regs->sr |= SR_SSTEP; /* auto-resetting upon exception */ + + child->exit_code = data; + /* give it a chance to run. */ + wake_up_process(child); + ret = 0; + break; + } + + case PTRACE_DETACH: /* detach a process that was attached. */ + ret = ptrace_detach(child, data); + break; + + default: + ret = ptrace_request(child, request, addr, data); + break; + } +out_tsk: + put_task_struct(child); +out: + unlock_kernel(); + return ret; +} + +asmlinkage void syscall_trace(void) +{ + struct task_struct *tsk = current; + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + if (!(tsk->ptrace & PT_PTRACED)) + return; + + tsk->exit_code = SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) + ? 0x80 : 0); + tsk->state = TASK_STOPPED; + notify_parent(tsk, SIGCHLD); + schedule(); + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (tsk->exit_code) { + send_sig(tsk->exit_code, tsk, 1); + tsk->exit_code = 0; + } +} + +/* Called with interrupts disabled */ +asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs) +{ + /* This is called after a single step exception (DEBUGSS). + There is no need to change the PC, as it is a post-execution + exception, as entry.S does not do anything to the PC for DEBUGSS. + We need to clear the Single Step setting in SR to avoid + continually stepping. */ + local_irq_enable(); + regs->sr &= ~SR_SSTEP; + force_sig(SIGTRAP, current); +} + +/* Called with interrupts disabled */ +asmlinkage void do_software_break_point(unsigned long long vec, + struct pt_regs *regs) +{ + /* We need to forward step the PC, to counteract the backstep done + in signal.c. */ + local_irq_enable(); + force_sig(SIGTRAP, current); + regs->pc += 4; +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure single step bits etc are not set. + */ +void ptrace_disable(struct task_struct *child) +{ + /* nothing to do.. */ +} diff --git a/arch/sh64/kernel/semaphore.c b/arch/sh64/kernel/semaphore.c new file mode 100644 index 000000000..72c165334 --- /dev/null +++ b/arch/sh64/kernel/semaphore.c @@ -0,0 +1,140 @@ +/* + * Just taken from alpha implementation. + * This can't work well, perhaps. + */ +/* + * Generic semaphore code. Buyer beware. Do your own + * specific changes in + */ + +#include +#include +#include +#include +#include +#include +#include + +spinlock_t semaphore_wake_lock; + +/* + * Semaphores are implemented using a two-way counter: + * The "count" variable is decremented for each process + * that tries to sleep, while the "waking" variable is + * incremented when the "up()" code goes to wake up waiting + * processes. + * + * Notably, the inline "up()" and "down()" functions can + * efficiently test if they need to do any extra work (up + * needs to do something only if count was negative before + * the increment operation. + * + * waking_non_zero() (from asm/semaphore.h) must execute + * atomically. + * + * When __up() is called, the count was negative before + * incrementing it, and we need to wake up somebody. + * + * This routine adds one to the count of processes that need to + * wake up and exit. ALL waiting processes actually wake up but + * only the one that gets to the "waking" field first will gate + * through and acquire the semaphore. The others will go back + * to sleep. + * + * Note that these functions are only called when there is + * contention on the lock, and as such all this is the + * "non-critical" part of the whole semaphore business. The + * critical part is the inline stuff in + * where we want to avoid any extra jumps and calls. + */ +void __up(struct semaphore *sem) +{ + wake_one_more(sem); + wake_up(&sem->wait); +} + +/* + * Perform the "down" function. Return zero for semaphore acquired, + * return negative for signalled out of the function. + * + * If called from __down, the return is ignored and the wait loop is + * not interruptible. This means that a task waiting on a semaphore + * using "down()" cannot be killed until someone does an "up()" on + * the semaphore. + * + * If called from __down_interruptible, the return value gets checked + * upon return. If the return value is negative then the task continues + * with the negative value in the return register (it can be tested by + * the caller). + * + * Either form may be used in conjunction with "up()". + * + */ + +#define DOWN_VAR \ + struct task_struct *tsk = current; \ + wait_queue_t wait; \ + init_waitqueue_entry(&wait, tsk); + +#define DOWN_HEAD(task_state) \ + \ + \ + tsk->state = (task_state); \ + add_wait_queue(&sem->wait, &wait); \ + \ + /* \ + * Ok, we're set up. sem->count is known to be less than zero \ + * so we must wait. \ + * \ + * We can let go the lock for purposes of waiting. \ + * We re-acquire it after awaking so as to protect \ + * all semaphore operations. \ + * \ + * If "up()" is called before we call waking_non_zero() then \ + * we will catch it right away. If it is called later then \ + * we will have to go through a wakeup cycle to catch it. \ + * \ + * Multiple waiters contend for the semaphore lock to see \ + * who gets to gate through and who has to wait some more. \ + */ \ + for (;;) { + +#define DOWN_TAIL(task_state) \ + tsk->state = (task_state); \ + } \ + tsk->state = TASK_RUNNING; \ + remove_wait_queue(&sem->wait, &wait); + +void __sched __down(struct semaphore * sem) +{ + DOWN_VAR + DOWN_HEAD(TASK_UNINTERRUPTIBLE) + if (waking_non_zero(sem)) + break; + schedule(); + DOWN_TAIL(TASK_UNINTERRUPTIBLE) +} + +int __sched __down_interruptible(struct semaphore * sem) +{ + int ret = 0; + DOWN_VAR + DOWN_HEAD(TASK_INTERRUPTIBLE) + + ret = waking_non_zero_interruptible(sem, tsk); + if (ret) + { + if (ret == 1) + /* ret != 0 only if we get interrupted -arca */ + ret = 0; + break; + } + schedule(); + DOWN_TAIL(TASK_INTERRUPTIBLE) + return ret; +} + +int __down_trylock(struct semaphore * sem) +{ + return waking_non_zero_trylock(sem); +} diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c new file mode 100644 index 000000000..ce76634d6 --- /dev/null +++ b/arch/sh64/kernel/setup.c @@ -0,0 +1,389 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/setup.c + * + * sh64 Arch Support + * + * This file handles the architecture-dependent parts of initialization + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003, 2004 Paul Mundt + * + * benedict.gaster@superh.com: 2nd May 2002 + * Modified to use the empty_zero_page to pass command line arguments. + * + * benedict.gaster@superh.com: 3rd May 2002 + * Added support for ramdisk, removing statically linked romfs at the same time. + * + * lethal@linux-sh.org: 15th May 2003 + * Added generic procfs cpuinfo reporting. Make boards just export their name. + * + * lethal@linux-sh.org: 25th May 2003 + * Added generic get_cpu_subtype() for subtype reporting from cpu_data->type. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_VT +#include +#endif + +struct screen_info screen_info; + +/* On a PC this would be initialised as a result of the BIOS detecting the + * mouse. */ +unsigned char aux_device_present = 0xaa; + +#ifdef CONFIG_BLK_DEV_RAM +extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */ +extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */ +extern int rd_image_start; /* starting block # of image */ +#endif + +extern int root_mountflags; +extern char *get_system_type(void); +extern void platform_setup(void); +extern void platform_monitor(void); +extern void platform_reserve(void); +extern int sh64_cache_init(void); +extern int sh64_tlb_init(void); + +#define RAMDISK_IMAGE_START_MASK 0x07FF +#define RAMDISK_PROMPT_FLAG 0x8000 +#define RAMDISK_LOAD_FLAG 0x4000 + +static char command_line[COMMAND_LINE_SIZE] = { 0, }; +unsigned long long memory_start = CONFIG_MEMORY_START; +unsigned long long memory_end = CONFIG_MEMORY_START + (CONFIG_MEMORY_SIZE_IN_MB * 1024 * 1024); + +struct sh_cpuinfo boot_cpu_data; + +static inline void parse_mem_cmdline (char ** cmdline_p) +{ + char c = ' ', *to = command_line, *from = COMMAND_LINE; + int len = 0; + + /* Save unparsed command line copy for /proc/cmdline */ + memcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE); + saved_command_line[COMMAND_LINE_SIZE-1] = '\0'; + + for (;;) { + /* + * "mem=XXX[kKmM]" defines a size of memory. + */ + if (c == ' ' && !memcmp(from, "mem=", 4)) { + if (to != command_line) + to--; + { + unsigned long mem_size; + + mem_size = memparse(from+4, &from); + memory_end = memory_start + mem_size; + } + } + c = *(from++); + if (!c) + break; + if (COMMAND_LINE_SIZE <= ++len) + break; + *(to++) = c; + } + *to = '\0'; + + *cmdline_p = command_line; +} + +static void __init sh64_cpu_type_detect(void) +{ + extern unsigned long long peek_real_address_q(unsigned long long addr); + unsigned long long cir; + /* Do peeks in real mode to avoid having to set up a mapping for the + WPC registers. On SH5-101 cut2, such a mapping would be exposed to + an address translation erratum which would make it hard to set up + correctly. */ + cir = peek_real_address_q(0x0d000008); + + if ((cir & 0xffff) == 0x5103) { + boot_cpu_data.type = CPU_SH5_103; + } else if (((cir >> 32) & 0xffff) == 0x51e2) { + /* CPU.VCR aliased at CIR address on SH5-101 */ + boot_cpu_data.type = CPU_SH5_101; + } else { + boot_cpu_data.type = CPU_SH_NONE; + } +} + +void __init setup_arch(char **cmdline_p) +{ + unsigned long bootmap_size, i; + unsigned long first_pfn, start_pfn, last_pfn, pages; + +#ifdef CONFIG_EARLY_PRINTK + extern void enable_early_printk(void); + + /* + * Setup Early SCIF console + */ + enable_early_printk(); +#endif + + /* + * Setup TLB mappings + */ + sh64_tlb_init(); + + /* + * Caches are already initialized by the time we get here, so we just + * fill in cpu_data info for the caches. + */ + sh64_cache_init(); + + platform_setup(); + platform_monitor(); + + sh64_cpu_type_detect(); + + ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); + +#ifdef CONFIG_BLK_DEV_RAM + rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; + rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); + rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); +#endif + + if (!MOUNT_ROOT_RDONLY) + root_mountflags &= ~MS_RDONLY; + init_mm.start_code = (unsigned long) _text; + init_mm.end_code = (unsigned long) _etext; + init_mm.end_data = (unsigned long) _edata; + init_mm.brk = (unsigned long) _end; + + code_resource.start = __pa(_text); + code_resource.end = __pa(_etext)-1; + data_resource.start = __pa(_etext); + data_resource.end = __pa(_edata)-1; + + parse_mem_cmdline(cmdline_p); + + /* + * Find the lowest and highest page frame numbers we have available + */ + first_pfn = PFN_DOWN(memory_start); + last_pfn = PFN_DOWN(memory_end); + pages = last_pfn - first_pfn; + + /* + * Partially used pages are not usable - thus + * we are rounding upwards: + */ + start_pfn = PFN_UP(__pa(_end)); + + /* + * Find a proper area for the bootmem bitmap. After this + * bootstrap step all allocations (until the page allocator + * is intact) must be done via bootmem_alloc(). + */ + bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, + first_pfn, + last_pfn); + /* + * Round it up. + */ + bootmap_size = PFN_PHYS(PFN_UP(bootmap_size)); + + /* + * Register fully available RAM pages with the bootmem allocator. + */ + free_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn), PFN_PHYS(pages)); + + /* + * Reserve all kernel sections + bootmem bitmap + a guard page. + */ + reserve_bootmem_node(NODE_DATA(0), PFN_PHYS(first_pfn), + (PFN_PHYS(start_pfn) + bootmap_size + PAGE_SIZE) - PFN_PHYS(first_pfn)); + + /* + * Reserve platform dependent sections + */ + platform_reserve(); + +#ifdef CONFIG_BLK_DEV_INITRD + if (LOADER_TYPE && INITRD_START) { + if (INITRD_START + INITRD_SIZE <= (PFN_PHYS(last_pfn))) { + reserve_bootmem_node(NODE_DATA(0), INITRD_START + __MEMORY_START, INITRD_SIZE); + + initrd_start = + (long) INITRD_START ? INITRD_START + PAGE_OFFSET + __MEMORY_START : 0; + + initrd_end = initrd_start + INITRD_SIZE; + } else { + printk("initrd extends beyond end of memory " + "(0x%08lx > 0x%08lx)\ndisabling initrd\n", + (long) INITRD_START + INITRD_SIZE, + PFN_PHYS(last_pfn)); + initrd_start = 0; + } + } +#endif + + /* + * Claim all RAM, ROM, and I/O resources. + */ + + /* Kernel RAM */ + request_resource(&iomem_resource, &code_resource); + request_resource(&iomem_resource, &data_resource); + + /* Other KRAM space */ + for (i = 0; i < STANDARD_KRAM_RESOURCES - 2; i++) + request_resource(&iomem_resource, + &platform_parms.kram_res_p[i]); + + /* XRAM space */ + for (i = 0; i < STANDARD_XRAM_RESOURCES; i++) + request_resource(&iomem_resource, + &platform_parms.xram_res_p[i]); + + /* ROM space */ + for (i = 0; i < STANDARD_ROM_RESOURCES; i++) + request_resource(&iomem_resource, + &platform_parms.rom_res_p[i]); + + /* I/O space */ + for (i = 0; i < STANDARD_IO_RESOURCES; i++) + request_resource(&ioport_resource, + &platform_parms.io_res_p[i]); + + +#ifdef CONFIG_VT +#if defined(CONFIG_VGA_CONSOLE) + conswitchp = &vga_con; +#elif defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +#endif + + printk("Hardware FPU: %s\n", fpu_in_use ? "enabled" : "disabled"); + + paging_init(); +} + +void __xchg_called_with_bad_pointer(void) +{ + printk(KERN_EMERG "xchg() called with bad pointer !\n"); +} + +static struct cpu cpu[1]; + +static int __init topology_init(void) +{ + return register_cpu(cpu, 0, NULL); +} + +subsys_initcall(topology_init); + +/* + * Get CPU information + */ +static const char *cpu_name[] = { + [CPU_SH5_101] = "SH5-101", + [CPU_SH5_103] = "SH5-103", + [CPU_SH_NONE] = "Unknown", +}; + +const char *get_cpu_subtype(void) +{ + return cpu_name[boot_cpu_data.type]; +} + +#ifdef CONFIG_PROC_FS +static int show_cpuinfo(struct seq_file *m,void *v) +{ + unsigned int cpu = smp_processor_id(); + + if (!cpu) + seq_printf(m, "machine\t\t: %s\n", get_system_type()); + + seq_printf(m, "processor\t: %d\n", cpu); + seq_printf(m, "cpu family\t: SH-5\n"); + seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype()); + + seq_printf(m, "icache size\t: %dK-bytes\n", + (boot_cpu_data.icache.ways * + boot_cpu_data.icache.sets * + boot_cpu_data.icache.linesz) >> 10); + seq_printf(m, "dcache size\t: %dK-bytes\n", + (boot_cpu_data.dcache.ways * + boot_cpu_data.dcache.sets * + boot_cpu_data.dcache.linesz) >> 10); + seq_printf(m, "itlb entries\t: %d\n", boot_cpu_data.itlb.entries); + seq_printf(m, "dtlb entries\t: %d\n", boot_cpu_data.dtlb.entries); + +#define PRINT_CLOCK(name, value) \ + seq_printf(m, name " clock\t: %d.%02dMHz\n", \ + ((value) / 1000000), ((value) % 1000000)/10000) + + PRINT_CLOCK("cpu", boot_cpu_data.cpu_clock); + PRINT_CLOCK("bus", boot_cpu_data.bus_clock); + PRINT_CLOCK("module", boot_cpu_data.module_clock); + + seq_printf(m, "bogomips\t: %lu.%02lu\n\n", + (loops_per_jiffy*HZ+2500)/500000, + ((loops_per_jiffy*HZ+2500)/5000) % 100); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return (void*)(*pos == 0); +} +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + return NULL; +} +static void c_stop(struct seq_file *m, void *v) +{ +} +struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; +#endif /* CONFIG_PROC_FS */ diff --git a/arch/sh64/kernel/sh_ksyms.c b/arch/sh64/kernel/sh_ksyms.c new file mode 100644 index 000000000..80712f1ef --- /dev/null +++ b/arch/sh64/kernel/sh_ksyms.c @@ -0,0 +1,84 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/sh_ksyms.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +extern void dump_thread(struct pt_regs *, struct user *); +extern int dump_fpu(struct pt_regs *, elf_fpregset_t *); + +#if 0 +/* Not yet - there's no declaration of drive_info anywhere. */ +#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE) +extern struct drive_info_struct drive_info; +EXPORT_SYMBOL(drive_info); +#endif +#endif + +/* platform dependent support */ +EXPORT_SYMBOL(dump_thread); +EXPORT_SYMBOL(dump_fpu); +EXPORT_SYMBOL(iounmap); +EXPORT_SYMBOL(enable_irq); +EXPORT_SYMBOL(disable_irq); +EXPORT_SYMBOL(kernel_thread); + +/* Networking helper routines. */ +EXPORT_SYMBOL(csum_partial_copy); + +EXPORT_SYMBOL(strtok); +EXPORT_SYMBOL(strpbrk); +EXPORT_SYMBOL(strstr); + +#ifdef CONFIG_VT +EXPORT_SYMBOL(screen_info); +#endif + +EXPORT_SYMBOL_NOVERS(__down); +EXPORT_SYMBOL_NOVERS(__down_trylock); +EXPORT_SYMBOL_NOVERS(__up); +EXPORT_SYMBOL_NOVERS(__put_user_asm_l); +EXPORT_SYMBOL_NOVERS(__get_user_asm_l); +EXPORT_SYMBOL_NOVERS(memcmp); +EXPORT_SYMBOL_NOVERS(memcpy); +EXPORT_SYMBOL_NOVERS(memset); +EXPORT_SYMBOL_NOVERS(memscan); +EXPORT_SYMBOL_NOVERS(strchr); +EXPORT_SYMBOL_NOVERS(strlen); + +EXPORT_SYMBOL(flush_dcache_page); + +/* Ugh. These come in from libgcc.a at link time. */ + +extern void __sdivsi3(void); +extern void __muldi3(void); +extern void __udivsi3(void); +EXPORT_SYMBOL_NOVERS(__sdivsi3); +EXPORT_SYMBOL_NOVERS(__muldi3); +EXPORT_SYMBOL_NOVERS(__udivsi3); + diff --git a/arch/sh64/kernel/signal.c b/arch/sh64/kernel/signal.c new file mode 100644 index 000000000..376b8998b --- /dev/null +++ b/arch/sh64/kernel/signal.c @@ -0,0 +1,737 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/signal.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * Started from sh version. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define REG_RET 9 +#define REG_ARG1 2 +#define REG_ARG2 3 +#define REG_ARG3 4 +#define REG_SP 15 +#define REG_PR 18 +#define REF_REG_RET regs->regs[REG_RET] +#define REF_REG_SP regs->regs[REG_SP] +#define DEREF_REG_PR regs->regs[REG_PR] + +#define DEBUG_SIG 0 + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset); + +/* + * Atomically swap in the new signal mask, and wait for a signal. + */ + +asmlinkage int +sys_sigsuspend(old_sigset_t mask, + unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs * regs) +{ + sigset_t saveset; + + mask &= _BLOCKABLE; + spin_lock_irq(¤t->sighand->siglock); + saveset = current->blocked; + siginitset(¤t->blocked, mask); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + REF_REG_RET = -EINTR; + while (1) { + current->state = TASK_INTERRUPTIBLE; + schedule(); + regs->pc += 4; /* because sys_sigreturn decrements the pc */ + if (do_signal(regs, &saveset)) { + /* pc now points at signal handler. Need to decrement + it because entry.S will increment it. */ + regs->pc -= 4; + return -EINTR; + } + } +} + +asmlinkage int +sys_rt_sigsuspend(sigset_t *unewset, size_t sigsetsize, + unsigned long r4, unsigned long r5, unsigned long r6, + unsigned long r7, + struct pt_regs * regs) +{ + sigset_t saveset, newset; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + if (copy_from_user(&newset, unewset, sizeof(newset))) + return -EFAULT; + sigdelsetmask(&newset, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + saveset = current->blocked; + current->blocked = newset; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + REF_REG_RET = -EINTR; + while (1) { + current->state = TASK_INTERRUPTIBLE; + schedule(); + regs->pc += 4; /* because sys_sigreturn decrements the pc */ + if (do_signal(regs, &saveset)) { + /* pc now points at signal handler. Need to decrement + it because entry.S will increment it. */ + regs->pc -= 4; + return -EINTR; + } + } +} + +asmlinkage int +sys_sigaction(int sig, const struct old_sigaction __user *act, + struct old_sigaction __user *oact) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + if (act) { + old_sigset_t mask; + if (verify_area(VERIFY_READ, act, sizeof(*act)) || + __get_user(new_ka.sa.sa_handler, &act->sa_handler) || + __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) + return -EFAULT; + __get_user(new_ka.sa.sa_flags, &act->sa_flags); + __get_user(mask, &act->sa_mask); + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) + return -EFAULT; + __put_user(old_ka.sa.sa_flags, &oact->sa_flags); + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); + } + + return ret; +} + +asmlinkage int +sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, + unsigned long r4, unsigned long r5, unsigned long r6, + unsigned long r7, + struct pt_regs * regs) +{ + return do_sigaltstack(uss, uoss, REF_REG_SP); +} + + +/* + * Do a signal return; undo the signal stack. + */ + +struct sigframe +{ + struct sigcontext sc; + unsigned long extramask[_NSIG_WORDS-1]; + long long retcode[2]; +}; + +struct rt_sigframe +{ + struct siginfo __user *pinfo; + void *puc; + struct siginfo info; + struct ucontext uc; + long long retcode[2]; +}; + +#ifndef CONFIG_NOFPU_SUPPORT +static inline int +restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) +{ + int err = 0; + int fpvalid; + + err |= __get_user (fpvalid, &sc->sc_fpvalid); + current->used_math = fpvalid; + if (! fpvalid) + return err; + + if (current == last_task_used_math) { + last_task_used_math = NULL; + regs->sr |= SR_FD; + } + + err |= __copy_from_user(¤t->thread.fpu.hard, &sc->sc_fpregs[0], + (sizeof(long long) * 32) + (sizeof(int) * 1)); + + return err; +} + +static inline int +setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) +{ + int err = 0; + int fpvalid; + + fpvalid = current->used_math; + err |= __put_user(fpvalid, &sc->sc_fpvalid); + if (! fpvalid) + return err; + + if (current == last_task_used_math) { + grab_fpu(); + fpsave(¤t->thread.fpu.hard); + release_fpu(); + last_task_used_math = NULL; + regs->sr |= SR_FD; + } + + err |= __copy_to_user(&sc->sc_fpregs[0], ¤t->thread.fpu.hard, + (sizeof(long long) * 32) + (sizeof(int) * 1)); + current->used_math = 0; + + return err; +} +#else +static inline int +restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) +{} +static inline int +setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) +{} +#endif + +static int +restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p) +{ + unsigned int err = 0; + unsigned long long current_sr, new_sr; +#define SR_MASK 0xffff8cfd + +#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) + + COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]); + COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]); + COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]); + COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]); + COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]); + COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]); + COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]); + COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]); + COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]); + COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]); + COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]); + COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]); + COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]); + COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]); + COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]); + COPY(regs[60]); COPY(regs[61]); COPY(regs[62]); + COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]); + COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]); + + /* Prevent the signal handler manipulating SR in a way that can + crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be + modified */ + current_sr = regs->sr; + err |= __get_user(new_sr, &sc->sc_sr); + regs->sr &= SR_MASK; + regs->sr |= (new_sr & ~SR_MASK); + + COPY(pc); + +#undef COPY + + /* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr + * has been restored above.) */ + err |= restore_sigcontext_fpu(regs, sc); + + regs->syscall_nr = -1; /* disable syscall checks */ + err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]); + return err; +} + +asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3, + unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs * regs) +{ + struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP; + sigset_t set; + long long ret; + + if (verify_area(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + + if (__get_user(set.sig[0], &frame->sc.oldmask) + || (_NSIG_WORDS > 1 + && __copy_from_user(&set.sig[1], &frame->extramask, + sizeof(frame->extramask)))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigcontext(regs, &frame->sc, &ret)) + goto badframe; + regs->pc -= 4; + + return (int) ret; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3, + unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7, + struct pt_regs * regs) +{ + struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP; + sigset_t set; + stack_t __user st; + long long ret; + + if (verify_area(VERIFY_READ, frame, sizeof(*frame))) + goto badframe; + + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + sigdelsetmask(&set, ~_BLOCKABLE); + spin_lock_irq(¤t->sighand->siglock); + current->blocked = set; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret)) + goto badframe; + regs->pc -= 4; + + if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st))) + goto badframe; + /* It is more difficult to avoid calling this function than to + call it and ignore errors. */ + do_sigaltstack(&st, NULL, REF_REG_SP); + + return (int) ret; + +badframe: + force_sig(SIGSEGV, current); + return 0; +} + +/* + * Set up a signal frame. + */ + +static int +setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, + unsigned long mask) +{ + int err = 0; + + /* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */ + err |= setup_sigcontext_fpu(regs, sc); + +#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) + + COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]); + COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]); + COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]); + COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]); + COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]); + COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]); + COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]); + COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]); + COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]); + COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]); + COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]); + COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]); + COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]); + COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]); + COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]); + COPY(regs[60]); COPY(regs[61]); COPY(regs[62]); + COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]); + COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]); + COPY(sr); COPY(pc); + +#undef COPY + + err |= __put_user(mask, &sc->oldmask); + + return err; +} + +/* + * Determine which stack to use.. + */ +static inline void __user * +get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) +{ + if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) + sp = current->sas_ss_sp + current->sas_ss_size; + + return (void __user *)((sp - frame_size) & -8ul); +} + +void sa_default_restorer(void); /* See comments below */ +void sa_default_rt_restorer(void); /* See comments below */ + +static void setup_frame(int sig, struct k_sigaction *ka, + sigset_t *set, struct pt_regs *regs) +{ + struct sigframe __user *frame; + int err = 0; + int signal; + + frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame)); + + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + goto give_sigsegv; + + signal = current_thread_info()->exec_domain + && current_thread_info()->exec_domain->signal_invmap + && sig < 32 + ? current_thread_info()->exec_domain->signal_invmap[sig] + : sig; + + err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); + + /* Give up earlier as i386, in case */ + if (err) + goto give_sigsegv; + + if (_NSIG_WORDS > 1) { + err |= __copy_to_user(frame->extramask, &set->sig[1], + sizeof(frame->extramask)); } + + /* Give up earlier as i386, in case */ + if (err) + goto give_sigsegv; + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ka->sa.sa_flags & SA_RESTORER) { + DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; + + /* + * On SH5 all edited pointers are subject to NEFF + */ + DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? + (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + } else { + /* + * Different approach on SH5. + * . Endianness independent asm code gets placed in entry.S . + * This is limited to four ASM instructions corresponding + * to two long longs in size. + * . err checking is done on the else branch only + * . flush_icache_range() is called upon __put_user() only + * . all edited pointers are subject to NEFF + * . being code, linker turns ShMedia bit on, always + * dereference index -1. + */ + DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; + DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? + (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + + if (__copy_to_user(frame->retcode, + (unsigned long long)sa_default_restorer & (~1), 16) != 0) + goto give_sigsegv; + + /* Cohere the trampoline with the I-cache. */ + flush_cache_sigtramp(DEREF_REG_PR-1, DEREF_REG_PR-1+16); + } + + /* + * Set up registers for signal handler. + * All edited pointers are subject to NEFF. + */ + regs->regs[REG_SP] = (unsigned long) frame; + regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? + (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; + regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ + + /* FIXME: + The glibc profiling support for SH-5 needs to be passed a sigcontext + so it can retrieve the PC. At some point during 2003 the glibc + support was changed to receive the sigcontext through the 2nd + argument, but there are still versions of libc.so in use that use + the 3rd argument. Until libc.so is stabilised, pass the sigcontext + through both 2nd and 3rd arguments. + */ + + regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc; + regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc; + + regs->pc = (unsigned long) ka->sa.sa_handler; + regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; + + set_fs(USER_DS); + +#if DEBUG_SIG + /* Broken %016Lx */ + printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", + signal, + current->comm, current->pid, frame, + regs->pc >> 32, regs->pc & 0xffffffff, + DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); +#endif + + return; + +give_sigsegv: + if (sig == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); +} + +static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + int err = 0; + int signal; + + frame = get_sigframe(ka, regs->regs[REG_SP], sizeof(*frame)); + + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + goto give_sigsegv; + + signal = current_thread_info()->exec_domain + && current_thread_info()->exec_domain->signal_invmap + && sig < 32 + ? current_thread_info()->exec_domain->signal_invmap[sig] + : sig; + + err |= __put_user(&frame->info, &frame->pinfo); + err |= __put_user(&frame->uc, &frame->puc); + err |= copy_siginfo_to_user(&frame->info, info); + + /* Give up earlier as i386, in case */ + if (err) + goto give_sigsegv; + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user((void *)current->sas_ss_sp, + &frame->uc.uc_stack.ss_sp); + err |= __put_user(sas_ss_flags(regs->regs[REG_SP]), + &frame->uc.uc_stack.ss_flags); + err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + err |= setup_sigcontext(&frame->uc.uc_mcontext, + regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + /* Give up earlier as i386, in case */ + if (err) + goto give_sigsegv; + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ka->sa.sa_flags & SA_RESTORER) { + DEREF_REG_PR = (unsigned long) ka->sa.sa_restorer | 0x1; + + /* + * On SH5 all edited pointers are subject to NEFF + */ + DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? + (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + } else { + /* + * Different approach on SH5. + * . Endianness independent asm code gets placed in entry.S . + * This is limited to four ASM instructions corresponding + * to two long longs in size. + * . err checking is done on the else branch only + * . flush_icache_range() is called upon __put_user() only + * . all edited pointers are subject to NEFF + * . being code, linker turns ShMedia bit on, always + * dereference index -1. + */ + + DEREF_REG_PR = (unsigned long) frame->retcode | 0x01; + DEREF_REG_PR = (DEREF_REG_PR & NEFF_SIGN) ? + (DEREF_REG_PR | NEFF_MASK) : DEREF_REG_PR; + + if (__copy_to_user(frame->retcode, + (unsigned long long)sa_default_rt_restorer & (~1), 16) != 0) + goto give_sigsegv; + + flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15); + } + + /* + * Set up registers for signal handler. + * All edited pointers are subject to NEFF. + */ + regs->regs[REG_SP] = (unsigned long) frame; + regs->regs[REG_SP] = (regs->regs[REG_SP] & NEFF_SIGN) ? + (regs->regs[REG_SP] | NEFF_MASK) : regs->regs[REG_SP]; + regs->regs[REG_ARG1] = signal; /* Arg for signal handler */ + regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info; + regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext; + regs->pc = (unsigned long) ka->sa.sa_handler; + regs->pc = (regs->pc & NEFF_SIGN) ? (regs->pc | NEFF_MASK) : regs->pc; + + set_fs(USER_DS); + +#if DEBUG_SIG + /* Broken %016Lx */ + printk("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n", + signal, + current->comm, current->pid, frame, + regs->pc >> 32, regs->pc & 0xffffffff, + DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff); +#endif + + return; + +give_sigsegv: + if (sig == SIGSEGV) + ka->sa.sa_handler = SIG_DFL; + force_sig(SIGSEGV, current); +} + +/* + * OK, we're invoking a handler + */ + +static void +handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset, + struct pt_regs * regs) +{ + struct k_sigaction *ka = ¤t->sighand->action[sig-1]; + + /* Are we from a system call? */ + if (regs->syscall_nr >= 0) { + /* If so, check system call restarting.. */ + switch (regs->regs[REG_RET]) { + case -ERESTARTNOHAND: + regs->regs[REG_RET] = -EINTR; + break; + + case -ERESTARTSYS: + if (!(ka->sa.sa_flags & SA_RESTART)) { + regs->regs[REG_RET] = -EINTR; + break; + } + /* fallthrough */ + case -ERESTARTNOINTR: + /* Decode syscall # */ + regs->regs[REG_RET] = regs->syscall_nr; + regs->pc -= 4; + } + } + + /* Set up the stack frame */ + if (ka->sa.sa_flags & SA_SIGINFO) + setup_rt_frame(sig, ka, info, oldset, regs); + else + setup_frame(sig, ka, oldset, regs); + + if (ka->sa.sa_flags & SA_ONESHOT) + ka->sa.sa_handler = SIG_DFL; + + if (!(ka->sa.sa_flags & SA_NODEFER)) { + spin_lock_irq(¤t->sighand->siglock); + sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); + sigaddset(¤t->blocked,sig); + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + } +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +int do_signal(struct pt_regs *regs, sigset_t *oldset) +{ + siginfo_t info; + int signr; + + /* + * We want the common case to go fast, which + * is why we may in certain cases get here from + * kernel mode. Just return without doing anything + * if so. + */ + if (!user_mode(regs)) + return 1; + + if (current->flags & PF_FREEZE) { + refrigerator(0); + goto no_signal; + } + + if (!oldset) + oldset = ¤t->blocked; + + signr = get_signal_to_deliver(&info, regs, 0); + + if (signr > 0) { + /* Whee! Actually deliver the signal. */ + handle_signal(signr, &info, oldset, regs); + return 1; + } + +no_signal: + /* Did we come from a system call? */ + if (regs->syscall_nr >= 0) { + /* Restart the system call - no handlers present */ + if (regs->regs[REG_RET] == -ERESTARTNOHAND || + regs->regs[REG_RET] == -ERESTARTSYS || + regs->regs[REG_RET] == -ERESTARTNOINTR) { + /* Decode Syscall # */ + regs->regs[REG_RET] = regs->syscall_nr; + regs->pc -= 4; + } + } + return 0; +} diff --git a/arch/sh64/kernel/switchto.S b/arch/sh64/kernel/switchto.S new file mode 100644 index 000000000..24ef14c83 --- /dev/null +++ b/arch/sh64/kernel/switchto.S @@ -0,0 +1,199 @@ +/* + * arch/sh64/kernel/switchto.S + * + * sh64 context switch + * + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. +*/ + + .section .text..SHmedia32,"ax" + .little + + .balign 32 + + .type sh64_switch_to,@function + .global sh64_switch_to + .global __sh64_switch_to_end +sh64_switch_to: + +/* Incoming args + r2 - prev + r3 - &prev->thread + r4 - next + r5 - &next->thread + + Outgoing results + r2 - last (=prev) + + Want to create a full (struct pt_regs) on the stack to allow backtracing + functions to work. However, we only need to populate the callee-save + register slots in this structure; since we're a function our ancestors must + have themselves preserved all caller saved state in the stack. This saves + some wasted effort since we won't need to look at the values. + + In particular, all caller-save registers are immediately available for + scratch use. + +*/ + +#define FRAME_SIZE (76*8 + 8) + + movi FRAME_SIZE, r0 + sub.l r15, r0, r15 + ! Do normal-style register save to support backtrace + + st.l r15, 0, r18 ! save link reg + st.l r15, 4, r14 ! save fp + add.l r15, r63, r14 ! setup frame pointer + + ! hopefully this looks normal to the backtrace now. + + addi.l r15, 8, r1 ! base of pt_regs + addi.l r1, 24, r0 ! base of pt_regs.regs + addi.l r0, (63*8), r8 ! base of pt_regs.trregs + + /* Note : to be fixed? + struct pt_regs is really designed for holding the state on entry + to an exception, i.e. pc,sr,regs etc. However, for the context + switch state, some of this is not required. But the unwinder takes + struct pt_regs * as an arg so we have to build this structure + to allow unwinding switched tasks in show_state() */ + + st.q r0, ( 9*8), r9 + st.q r0, (10*8), r10 + st.q r0, (11*8), r11 + st.q r0, (12*8), r12 + st.q r0, (13*8), r13 + st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at + ! the point where the process is left in suspended animation, i.e. current + ! fp here, not the saved one. + st.q r0, (16*8), r16 + + st.q r0, (24*8), r24 + st.q r0, (25*8), r25 + st.q r0, (26*8), r26 + st.q r0, (27*8), r27 + st.q r0, (28*8), r28 + st.q r0, (29*8), r29 + st.q r0, (30*8), r30 + st.q r0, (31*8), r31 + st.q r0, (32*8), r32 + st.q r0, (33*8), r33 + st.q r0, (34*8), r34 + st.q r0, (35*8), r35 + + st.q r0, (44*8), r44 + st.q r0, (45*8), r45 + st.q r0, (46*8), r46 + st.q r0, (47*8), r47 + st.q r0, (48*8), r48 + st.q r0, (49*8), r49 + st.q r0, (50*8), r50 + st.q r0, (51*8), r51 + st.q r0, (52*8), r52 + st.q r0, (53*8), r53 + st.q r0, (54*8), r54 + st.q r0, (55*8), r55 + st.q r0, (56*8), r56 + st.q r0, (57*8), r57 + st.q r0, (58*8), r58 + st.q r0, (59*8), r59 + + ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency) + ! Use a local label to avoid creating a symbol that will confuse the ! + ! backtrace + pta .Lsave_pc, tr0 + + gettr tr5, r45 + gettr tr6, r46 + gettr tr7, r47 + st.q r8, (5*8), r45 + st.q r8, (6*8), r46 + st.q r8, (7*8), r47 + + ! Now switch context + gettr tr0, r9 + st.l r3, 0, r15 ! prev->thread.sp + st.l r3, 8, r1 ! prev->thread.kregs + st.l r3, 4, r9 ! prev->thread.pc + st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc + + ! Load PC for next task (init value or save_pc later) + ld.l r5, 4, r18 ! next->thread.pc + ! Switch stacks + ld.l r5, 0, r15 ! next->thread.sp + ptabs r18, tr0 + + ! Update current + ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct) + putcon r9, kcr0 ! current = next->thread_info + + ! go to save_pc for a reschedule, or the initial thread.pc for a new process + blink tr0, r63 + + ! Restore (when we come back to a previously saved task) +.Lsave_pc: + addi.l r15, 32, r0 ! r0 = next's regs + addi.l r0, (63*8), r8 ! r8 = next's tr_regs + + ld.q r8, (5*8), r45 + ld.q r8, (6*8), r46 + ld.q r8, (7*8), r47 + ptabs r45, tr5 + ptabs r46, tr6 + ptabs r47, tr7 + + ld.q r0, ( 9*8), r9 + ld.q r0, (10*8), r10 + ld.q r0, (11*8), r11 + ld.q r0, (12*8), r12 + ld.q r0, (13*8), r13 + ld.q r0, (14*8), r14 + ld.q r0, (16*8), r16 + + ld.q r0, (24*8), r24 + ld.q r0, (25*8), r25 + ld.q r0, (26*8), r26 + ld.q r0, (27*8), r27 + ld.q r0, (28*8), r28 + ld.q r0, (29*8), r29 + ld.q r0, (30*8), r30 + ld.q r0, (31*8), r31 + ld.q r0, (32*8), r32 + ld.q r0, (33*8), r33 + ld.q r0, (34*8), r34 + ld.q r0, (35*8), r35 + + ld.q r0, (44*8), r44 + ld.q r0, (45*8), r45 + ld.q r0, (46*8), r46 + ld.q r0, (47*8), r47 + ld.q r0, (48*8), r48 + ld.q r0, (49*8), r49 + ld.q r0, (50*8), r50 + ld.q r0, (51*8), r51 + ld.q r0, (52*8), r52 + ld.q r0, (53*8), r53 + ld.q r0, (54*8), r54 + ld.q r0, (55*8), r55 + ld.q r0, (56*8), r56 + ld.q r0, (57*8), r57 + ld.q r0, (58*8), r58 + ld.q r0, (59*8), r59 + + ! epilogue + ld.l r15, 0, r18 + ld.l r15, 4, r14 + ori r4, 0, r2 ! last = prev + ptabs r18, tr0 + movi FRAME_SIZE, r0 + add r15, r0, r15 + blink tr0, r63 +__sh64_switch_to_end: +.LFE1: + .size sh64_switch_to,.LFE1-sh64_switch_to + diff --git a/arch/sh64/kernel/sys_sh64.c b/arch/sh64/kernel/sys_sh64.c new file mode 100644 index 000000000..6d99a9701 --- /dev/null +++ b/arch/sh64/kernel/sys_sh64.c @@ -0,0 +1,286 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/sys_sh64.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * This file contains various random system calls that + * have a non-standard calling sequence on the Linux/SH5 + * platform. + * + * Mostly taken from i386 version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define REG_3 3 + +/* + * sys_pipe() is the normal C calling standard for creating + * a pipe. It's not the way Unix traditionally does this, though. + */ +#ifdef NEW_PIPE_IMPLEMENTATION +asmlinkage int sys_pipe(unsigned long * fildes, + unsigned long dummy_r3, + unsigned long dummy_r4, + unsigned long dummy_r5, + unsigned long dummy_r6, + unsigned long dummy_r7, + struct pt_regs * regs) /* r8 = pt_regs forced by entry.S */ +{ + int fd[2]; + int ret; + + ret = do_pipe(fd); + if (ret == 0) + /* + *********************************************************************** + * To avoid the copy_to_user we prefer to break the ABIs convention, * + * packing the valid pair of file IDs into a single register (r3); * + * while r2 is the return code as defined by the sh5-ABIs. * + * BE CAREFUL: pipe stub, into glibc, must be aware of this solution * + *********************************************************************** + +#ifdef __LITTLE_ENDIAN__ + regs->regs[REG_3] = (((unsigned long long) fd[1]) << 32) | ((unsigned long long) fd[0]); +#else + regs->regs[REG_3] = (((unsigned long long) fd[0]) << 32) | ((unsigned long long) fd[1]); +#endif + + */ + /* although not very clever this is endianess independent */ + regs->regs[REG_3] = (unsigned long long) *((unsigned long long *) fd); + + return ret; +} + +#else +asmlinkage int sys_pipe(unsigned long * fildes) +{ + int fd[2]; + int error; + + error = do_pipe(fd); + if (!error) { + if (copy_to_user(fildes, fd, 2*sizeof(int))) + error = -EFAULT; + } + return error; +} + +#endif + +/* + * To avoid cache alias, we map the shard page with same color. + */ +#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1)) + +unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct vm_area_struct *vma; + + if (flags & MAP_FIXED) { + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ + if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1))) + return -EINVAL; + return addr; + } + + if (len > TASK_SIZE) + return -ENOMEM; + if (!addr) + addr = TASK_UNMAPPED_BASE; + + if (flags & MAP_PRIVATE) + addr = PAGE_ALIGN(addr); + else + addr = COLOUR_ALIGN(addr); + + for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; + if (!vma || addr + len <= vma->vm_start) + return addr; + addr = vma->vm_end; + if (!(flags & MAP_PRIVATE)) + addr = COLOUR_ALIGN(addr); + } +} + +/* common code for old and new mmaps */ +static inline long do_mmap2( + unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + int error = -EBADF; + struct file * file = NULL; + + flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); + if (!(flags & MAP_ANONYMOUS)) { + file = fget(fd); + if (!file) + goto out; + } + + down_write(¤t->mm->mmap_sem); + error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); + up_write(¤t->mm->mmap_sem); + + if (file) + fput(file); +out: + return error; +} + +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + return do_mmap2(addr, len, prot, flags, fd, pgoff); +} + +asmlinkage int old_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + int fd, unsigned long off) +{ + if (off & ~PAGE_MASK) + return -EINVAL; + return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT); +} + +/* + * sys_ipc() is the de-multiplexer for the SysV IPC calls.. + * + * This is really horribly ugly. + */ +asmlinkage int sys_ipc(uint call, int first, int second, + int third, void __user *ptr, long fifth) +{ + int version, ret; + + version = call >> 16; /* hack for backward compatibility */ + call &= 0xffff; + + if (call <= SEMCTL) + switch (call) { + case SEMOP: + return sys_semtimedop(first, (struct sembuf __user *)ptr, + second, NULL); + case SEMTIMEDOP: + return sys_semtimedop(first, (struct sembuf __user *)ptr, + second, + (const struct timespec __user *)fifth); + case SEMGET: + return sys_semget (first, second, third); + case SEMCTL: { + union semun fourth; + if (!ptr) + return -EINVAL; + if (get_user(fourth.__pad, (void * __user *) ptr)) + return -EFAULT; + return sys_semctl (first, second, third, fourth); + } + default: + return -EINVAL; + } + + if (call <= MSGCTL) + switch (call) { + case MSGSND: + return sys_msgsnd (first, (struct msgbuf __user *) ptr, + second, third); + case MSGRCV: + switch (version) { + case 0: { + struct ipc_kludge tmp; + if (!ptr) + return -EINVAL; + + if (copy_from_user(&tmp, + (struct ipc_kludge __user *) ptr, + sizeof (tmp))) + return -EFAULT; + return sys_msgrcv (first, tmp.msgp, second, + tmp.msgtyp, third); + } + default: + return sys_msgrcv (first, + (struct msgbuf __user *) ptr, + second, fifth, third); + } + case MSGGET: + return sys_msgget ((key_t) first, second); + case MSGCTL: + return sys_msgctl (first, second, + (struct msqid_ds __user *) ptr); + default: + return -EINVAL; + } + if (call <= SHMCTL) + switch (call) { + case SHMAT: + switch (version) { + default: { + ulong raddr; + ret = do_shmat (first, (char __user *) ptr, + second, &raddr); + if (ret) + return ret; + return put_user (raddr, (ulong __user *) third); + } + case 1: /* iBCS2 emulator entry point */ + if (!segment_eq(get_fs(), get_ds())) + return -EINVAL; + return do_shmat (first, (char __user *) ptr, + second, (ulong *) third); + } + case SHMDT: + return sys_shmdt ((char __user *)ptr); + case SHMGET: + return sys_shmget (first, second, third); + case SHMCTL: + return sys_shmctl (first, second, + (struct shmid_ds __user *) ptr); + default: + return -EINVAL; + } + + return -EINVAL; +} + +asmlinkage int sys_uname(struct old_utsname * name) +{ + int err; + if (!name) + return -EFAULT; + down_read(&uts_sem); + err=copy_to_user(name, &system_utsname, sizeof (*name)); + up_read(&uts_sem); + return err?-EFAULT:0; +} + diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S new file mode 100644 index 000000000..819e335af --- /dev/null +++ b/arch/sh64/kernel/syscalls.S @@ -0,0 +1,340 @@ +/* + * arch/sh64/kernel/syscalls.S + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2004 Paul Mundt + * Copyright (C) 2003, 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include + + .section .data, "aw" + .balign 32 + +/* + * System calls jump table + */ + .globl sys_call_table +sys_call_table: + .long sys_ni_syscall /* 0 - old "setup()" system call */ + .long sys_exit + .long sys_fork + .long sys_read + .long sys_write + .long sys_open /* 5 */ + .long sys_close + .long sys_waitpid + .long sys_creat + .long sys_link + .long sys_unlink /* 10 */ + .long sys_execve + .long sys_chdir + .long sys_time + .long sys_mknod + .long sys_chmod /* 15 */ + .long sys_lchown16 + .long sys_ni_syscall /* old break syscall holder */ + .long sys_stat + .long sys_lseek + .long sys_getpid /* 20 */ + .long sys_mount + .long sys_oldumount + .long sys_setuid16 + .long sys_getuid16 + .long sys_stime /* 25 */ + .long sys_ptrace + .long sys_alarm + .long sys_fstat + .long sys_pause + .long sys_utime /* 30 */ + .long sys_ni_syscall /* old stty syscall holder */ + .long sys_ni_syscall /* old gtty syscall holder */ + .long sys_access + .long sys_nice + .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ + .long sys_sync + .long sys_kill + .long sys_rename + .long sys_mkdir + .long sys_rmdir /* 40 */ + .long sys_dup + .long sys_pipe + .long sys_times + .long sys_ni_syscall /* old prof syscall holder */ + .long sys_brk /* 45 */ + .long sys_setgid16 + .long sys_getgid16 + .long sys_signal + .long sys_geteuid16 + .long sys_getegid16 /* 50 */ + .long sys_acct + .long sys_umount /* recycled never used phys( */ + .long sys_ni_syscall /* old lock syscall holder */ + .long sys_ioctl + .long sys_fcntl /* 55 */ + .long sys_ni_syscall /* old mpx syscall holder */ + .long sys_setpgid + .long sys_ni_syscall /* old ulimit syscall holder */ + .long sys_ni_syscall /* sys_olduname */ + .long sys_umask /* 60 */ + .long sys_chroot + .long sys_ustat + .long sys_dup2 + .long sys_getppid + .long sys_getpgrp /* 65 */ + .long sys_setsid + .long sys_sigaction + .long sys_sgetmask + .long sys_ssetmask + .long sys_setreuid16 /* 70 */ + .long sys_setregid16 + .long sys_sigsuspend + .long sys_sigpending + .long sys_sethostname + .long sys_setrlimit /* 75 */ + .long sys_old_getrlimit + .long sys_getrusage + .long sys_gettimeofday + .long sys_settimeofday + .long sys_getgroups16 /* 80 */ + .long sys_setgroups16 + .long sys_ni_syscall /* sys_oldselect */ + .long sys_symlink + .long sys_lstat + .long sys_readlink /* 85 */ + .long sys_uselib + .long sys_swapon + .long sys_reboot + .long old_readdir + .long old_mmap /* 90 */ + .long sys_munmap + .long sys_truncate + .long sys_ftruncate + .long sys_fchmod + .long sys_fchown16 /* 95 */ + .long sys_getpriority + .long sys_setpriority + .long sys_ni_syscall /* old profil syscall holder */ + .long sys_statfs + .long sys_fstatfs /* 100 */ + .long sys_ni_syscall /* ioperm */ + .long sys_socketcall /* Obsolete implementation of socket syscall */ + .long sys_syslog + .long sys_setitimer + .long sys_getitimer /* 105 */ + .long sys_newstat + .long sys_newlstat + .long sys_newfstat + .long sys_uname + .long sys_ni_syscall /* 110 */ /* iopl */ + .long sys_vhangup + .long sys_ni_syscall /* idle */ + .long sys_ni_syscall /* vm86old */ + .long sys_wait4 + .long sys_swapoff /* 115 */ + .long sys_sysinfo + .long sys_ipc /* Obsolete ipc syscall implementation */ + .long sys_fsync + .long sys_sigreturn + .long sys_clone /* 120 */ + .long sys_setdomainname + .long sys_newuname + .long sys_ni_syscall /* sys_modify_ldt */ + .long sys_adjtimex + .long sys_mprotect /* 125 */ + .long sys_sigprocmask + .long sys_ni_syscall /* old "create_module" */ + .long sys_init_module + .long sys_delete_module + .long sys_ni_syscall /* 130: old "get_kernel_syms" */ + .long sys_quotactl + .long sys_getpgid + .long sys_fchdir + .long sys_bdflush + .long sys_sysfs /* 135 */ + .long sys_personality + .long sys_ni_syscall /* for afs_syscall */ + .long sys_setfsuid16 + .long sys_setfsgid16 + .long sys_llseek /* 140 */ + .long sys_getdents + .long sys_select + .long sys_flock + .long sys_msync + .long sys_readv /* 145 */ + .long sys_writev + .long sys_getsid + .long sys_fdatasync + .long sys_sysctl + .long sys_mlock /* 150 */ + .long sys_munlock + .long sys_mlockall + .long sys_munlockall + .long sys_sched_setparam + .long sys_sched_getparam /* 155 */ + .long sys_sched_setscheduler + .long sys_sched_getscheduler + .long sys_sched_yield + .long sys_sched_get_priority_max + .long sys_sched_get_priority_min /* 160 */ + .long sys_sched_rr_get_interval + .long sys_nanosleep + .long sys_mremap + .long sys_setresuid16 + .long sys_getresuid16 /* 165 */ + .long sys_ni_syscall /* vm86 */ + .long sys_ni_syscall /* old "query_module" */ + .long sys_poll + .long sys_nfsservctl + .long sys_setresgid16 /* 170 */ + .long sys_getresgid16 + .long sys_prctl + .long sys_rt_sigreturn + .long sys_rt_sigaction + .long sys_rt_sigprocmask /* 175 */ + .long sys_rt_sigpending + .long sys_rt_sigtimedwait + .long sys_rt_sigqueueinfo + .long sys_rt_sigsuspend + .long sys_pread64 /* 180 */ + .long sys_pwrite64 + .long sys_chown16 + .long sys_getcwd + .long sys_capget + .long sys_capset /* 185 */ + .long sys_sigaltstack + .long sys_sendfile + .long sys_ni_syscall /* streams1 */ + .long sys_ni_syscall /* streams2 */ + .long sys_vfork /* 190 */ + .long sys_getrlimit + .long sys_mmap2 + .long sys_truncate64 + .long sys_ftruncate64 + .long sys_stat64 /* 195 */ + .long sys_lstat64 + .long sys_fstat64 + .long sys_lchown + .long sys_getuid + .long sys_getgid /* 200 */ + .long sys_geteuid + .long sys_getegid + .long sys_setreuid + .long sys_setregid + .long sys_getgroups /* 205 */ + .long sys_setgroups + .long sys_fchown + .long sys_setresuid + .long sys_getresuid + .long sys_setresgid /* 210 */ + .long sys_getresgid + .long sys_chown + .long sys_setuid + .long sys_setgid + .long sys_setfsuid /* 215 */ + .long sys_setfsgid + .long sys_pivot_root + .long sys_mincore + .long sys_madvise + /* Broken-out socket family (maintain backwards compatibility in syscall + numbering with 2.4) */ + .long sys_socket /* 220 */ + .long sys_bind + .long sys_connect + .long sys_listen + .long sys_accept + .long sys_getsockname /* 225 */ + .long sys_getpeername + .long sys_socketpair + .long sys_send + .long sys_sendto + .long sys_recv /* 230*/ + .long sys_recvfrom + .long sys_shutdown + .long sys_setsockopt + .long sys_getsockopt + .long sys_sendmsg /* 235 */ + .long sys_recvmsg + /* Broken-out IPC family (maintain backwards compatibility in syscall + numbering with 2.4) */ + .long sys_semop + .long sys_semget + .long sys_semctl + .long sys_msgsnd /* 240 */ + .long sys_msgrcv + .long sys_msgget + .long sys_msgctl + .long sys_ni_syscall /* sys_shmatcall */ + .long sys_shmdt /* 245 */ + .long sys_shmget + .long sys_shmctl + /* Rest of syscalls listed in 2.4 i386 unistd.h */ + .long sys_getdents64 + .long sys_fcntl64 + .long sys_ni_syscall /* 250 reserved for TUX */ + .long sys_ni_syscall /* Reserved for Security */ + .long sys_gettid + .long sys_readahead + .long sys_setxattr + .long sys_lsetxattr /* 255 */ + .long sys_fsetxattr + .long sys_getxattr + .long sys_lgetxattr + .long sys_fgetxattr + .long sys_listxattr /* 260 */ + .long sys_llistxattr + .long sys_flistxattr + .long sys_removexattr + .long sys_lremovexattr + .long sys_fremovexattr /* 265 */ + .long sys_tkill + .long sys_sendfile64 + .long sys_futex + .long sys_sched_setaffinity + .long sys_sched_getaffinity /* 270 */ + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_io_setup + .long sys_io_destroy + .long sys_io_getevents /* 275 */ + .long sys_io_submit + .long sys_io_cancel + .long sys_fadvise64 + .long sys_ni_syscall + .long sys_exit_group /* 280 */ + /* Rest of new 2.6 syscalls */ + .long sys_lookup_dcookie + .long sys_epoll_create + .long sys_epoll_ctl + .long sys_epoll_wait + .long sys_remap_file_pages /* 285 */ + .long sys_set_tid_address + .long sys_timer_create + .long sys_timer_settime + .long sys_timer_gettime + .long sys_timer_getoverrun /* 290 */ + .long sys_timer_delete + .long sys_clock_settime + .long sys_clock_gettime + .long sys_clock_getres + .long sys_clock_nanosleep /* 295 */ + .long sys_statfs64 + .long sys_fstatfs64 + .long sys_tgkill + .long sys_utimes + .long sys_fadvise64_64 /* 300 */ + .long sys_ni_syscall /* Reserved for vserver */ + .long sys_ni_syscall /* Reserved for mbind */ + .long sys_ni_syscall /* get_mempolicy */ + .long sys_ni_syscall /* set_mempolicy */ + .long sys_mq_open /* 305 */ + .long sys_mq_unlink + .long sys_mq_timedsend + .long sys_mq_timedreceive + .long sys_mq_notify + .long sys_mq_getsetattr /* 310 */ + diff --git a/arch/sh64/kernel/time.c b/arch/sh64/kernel/time.c new file mode 100644 index 000000000..128bd22af --- /dev/null +++ b/arch/sh64/kernel/time.c @@ -0,0 +1,637 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/time.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003, 2004 Paul Mundt + * Copyright (C) 2003 Richard Curnow + * + * Original TMU/RTC code taken from sh version. + * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka + * Some code taken from i386 version. + * Copyright (C) 1991, 1992, 1995 Linus Torvalds + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* required by inline __asm__ stmt. */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#define TMU_TOCR_INIT 0x00 +#define TMU0_TCR_INIT 0x0020 +#define TMU_TSTR_INIT 1 + +/* RCR1 Bits */ +#define RCR1_CF 0x80 /* Carry Flag */ +#define RCR1_CIE 0x10 /* Carry Interrupt Enable */ +#define RCR1_AIE 0x08 /* Alarm Interrupt Enable */ +#define RCR1_AF 0x01 /* Alarm Flag */ + +/* RCR2 Bits */ +#define RCR2_PEF 0x80 /* PEriodic interrupt Flag */ +#define RCR2_PESMASK 0x70 /* Periodic interrupt Set */ +#define RCR2_RTCEN 0x08 /* ENable RTC */ +#define RCR2_ADJ 0x04 /* ADJustment (30-second) */ +#define RCR2_RESET 0x02 /* Reset bit */ +#define RCR2_START 0x01 /* Start bit */ + +/* Clock, Power and Reset Controller */ +#define CPRC_BLOCK_OFF 0x01010000 +#define CPRC_BASE PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF + +#define FRQCR (cprc_base+0x0) +#define WTCSR (cprc_base+0x0018) +#define STBCR (cprc_base+0x0030) + +/* Time Management Unit */ +#define TMU_BLOCK_OFF 0x01020000 +#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF +#define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0) +#define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1) +#define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2) + +#define TMU_TOCR tmu_base+0x0 /* Byte access */ +#define TMU_TSTR tmu_base+0x4 /* Byte access */ + +#define TMU0_TCOR TMU0_BASE+0x0 /* Long access */ +#define TMU0_TCNT TMU0_BASE+0x4 /* Long access */ +#define TMU0_TCR TMU0_BASE+0x8 /* Word access */ + +/* Real Time Clock */ +#define RTC_BLOCK_OFF 0x01040000 +#define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF + +#define R64CNT rtc_base+0x00 +#define RSECCNT rtc_base+0x04 +#define RMINCNT rtc_base+0x08 +#define RHRCNT rtc_base+0x0c +#define RWKCNT rtc_base+0x10 +#define RDAYCNT rtc_base+0x14 +#define RMONCNT rtc_base+0x18 +#define RYRCNT rtc_base+0x1c /* 16bit */ +#define RSECAR rtc_base+0x20 +#define RMINAR rtc_base+0x24 +#define RHRAR rtc_base+0x28 +#define RWKAR rtc_base+0x2c +#define RDAYAR rtc_base+0x30 +#define RMONAR rtc_base+0x34 +#define RCR1 rtc_base+0x38 +#define RCR2 rtc_base+0x3c + +#ifndef BCD_TO_BIN +#define BCD_TO_BIN(val) ((val)=((val)&15) + ((val)>>4)*10) +#endif + +#ifndef BIN_TO_BCD +#define BIN_TO_BCD(val) ((val)=(((val)/10)<<4) + (val)%10) +#endif + +#define TICK_SIZE (tick_nsec / 1000) + +extern unsigned long wall_jiffies; + +u64 jiffies_64 = INITIAL_JIFFIES; + +static unsigned long tmu_base, rtc_base; +unsigned long cprc_base; + +/* Variables to allow interpolation of time of day to resolution better than a + * jiffy. */ + +/* This is effectively protected by xtime_lock */ +static unsigned long ctc_last_interrupt; +static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */ + +#define CTC_JIFFY_SCALE_SHIFT 40 + +/* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */ +static unsigned long long scaled_recip_ctc_ticks_per_jiffy; + +/* Estimate number of microseconds that have elapsed since the last timer tick, + by scaling the delta that has occured in the CTC register. + + WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at + the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this + in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm + probably needs to use TMU.TCNT0 instead. This will work even if the CPU is + sleeping, though will be coarser. + + FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime + is running or if the freq or tick arguments of adjtimex are modified after + we have calibrated the scaling factor? This will result in either a jump at + the end of a tick period, or a wrap backwards at the start of the next one, + if the application is reading the time of day often enough. I think we + ought to do better than this. For this reason, usecs_per_jiffy is left + separated out in the calculation below. This allows some future hook into + the adjtime-related stuff in kernel/timer.c to remove this hazard. + +*/ + +static unsigned long usecs_since_tick(void) +{ + unsigned long long current_ctc; + long ctc_ticks_since_interrupt; + unsigned long long ull_ctc_ticks_since_interrupt; + unsigned long result; + + unsigned long long mul1_out; + unsigned long long mul1_out_high; + unsigned long long mul2_out_low, mul2_out_high; + + /* Read CTC register */ + asm ("getcon cr62, %0" : "=r" (current_ctc)); + /* Note, the CTC counts down on each CPU clock, not up. + Note(2), use long type to get correct wraparound arithmetic when + the counter crosses zero. */ + ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc; + ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt; + + /* Inline assembly to do 32x32x32->64 multiplier */ + asm volatile ("mulu.l %1, %2, %0" : + "=r" (mul1_out) : + "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy)); + + mul1_out_high = mul1_out >> 32; + + asm volatile ("mulu.l %1, %2, %0" : + "=r" (mul2_out_low) : + "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy)); + +#if 1 + asm volatile ("mulu.l %1, %2, %0" : + "=r" (mul2_out_high) : + "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy)); +#endif + + result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT); + + return result; +} + +void do_gettimeofday(struct timeval *tv) +{ + unsigned long flags; + unsigned long seq; + unsigned long usec, sec; + + do { + seq = read_seqbegin_irqsave(&xtime_lock, flags); + usec = usecs_since_tick(); + { + unsigned long lost = jiffies - wall_jiffies; + + if (lost) + usec += lost * (1000000 / HZ); + } + + sec = xtime.tv_sec; + usec += xtime.tv_nsec / 1000; + } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); + + while (usec >= 1000000) { + usec -= 1000000; + sec++; + } + + tv->tv_sec = sec; + tv->tv_usec = usec; +} + +int do_settimeofday(struct timespec *tv) +{ + time_t wtm_sec, sec = tv->tv_sec; + long wtm_nsec, nsec = tv->tv_nsec; + + if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + + write_seqlock_irq(&xtime_lock); + /* + * This is revolting. We need to set "xtime" correctly. However, the + * value in this location is the value at the most recent update of + * wall time. Discover what correction gettimeofday() would have + * made, and then undo it! + */ + nsec -= 1000 * (usecs_since_tick() + + (jiffies - wall_jiffies) * (1000000 / HZ)); + + wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); + wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); + + set_normalized_timespec(&xtime, sec, nsec); + set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); + + time_adjust = 0; /* stop active adjtime() */ + time_status |= STA_UNSYNC; + time_maxerror = NTP_PHASE_LIMIT; + time_esterror = NTP_PHASE_LIMIT; + write_sequnlock_irq(&xtime_lock); + clock_was_set(); + + return 0; +} + +static int set_rtc_time(unsigned long nowtime) +{ + int retval = 0; + int real_seconds, real_minutes, cmos_minutes; + + ctrl_outb(RCR2_RESET, RCR2); /* Reset pre-scaler & stop RTC */ + + cmos_minutes = ctrl_inb(RMINCNT); + BCD_TO_BIN(cmos_minutes); + + /* + * since we're only adjusting minutes and seconds, + * don't interfere with hour overflow. This avoids + * messing with unknown time zones but requires your + * RTC not to be off by more than 15 minutes + */ + real_seconds = nowtime % 60; + real_minutes = nowtime / 60; + if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1) + real_minutes += 30; /* correct for half hour time zone */ + real_minutes %= 60; + + if (abs(real_minutes - cmos_minutes) < 30) { + BIN_TO_BCD(real_seconds); + BIN_TO_BCD(real_minutes); + ctrl_outb(real_seconds, RSECCNT); + ctrl_outb(real_minutes, RMINCNT); + } else { + printk(KERN_WARNING + "set_rtc_time: can't update from %d to %d\n", + cmos_minutes, real_minutes); + retval = -1; + } + + ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start RTC */ + + return retval; +} + +/* last time the RTC clock got updated */ +static long last_rtc_update = 0; + +static inline void sh64_do_profile(struct pt_regs *regs) +{ + extern int _stext; + unsigned long pc; + + profile_hook(regs); + + if (user_mode(regs)) + return; + + /* Don't profile cpu_idle.. */ + if (!prof_buffer || !current->pid) + return; + + pc = instruction_pointer(regs); + pc -= (unsigned long) &_stext; + pc >>= prof_shift; + + /* + * Don't ignore out-of-bounds PC values silently, put them into the + * last histogram slot, so if present, they will show up as a sharp + * peak. + */ + if (pc > prof_len - 1) + pc = prof_len - 1; + + /* We could just be sloppy and not lock against a re-entry on this + increment, but the profiling code won't always be linked in anyway. */ + atomic_inc((atomic_t *)&prof_buffer[pc]); +} + +/* + * timer_interrupt() needs to keep up the real-time clock, + * as well as call the "do_timer()" routine every clocktick + */ +static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + unsigned long long current_ctc; + asm ("getcon cr62, %0" : "=r" (current_ctc)); + ctc_last_interrupt = (unsigned long) current_ctc; + + do_timer(regs); + + sh64_do_profile(regs); + +#ifdef CONFIG_HEARTBEAT + { + extern void heartbeat(void); + + heartbeat(); + } +#endif + + /* + * If we have an externally synchronized Linux clock, then update + * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be + * called as close as possible to 500 ms before the new second starts. + */ + if ((time_status & STA_UNSYNC) == 0 && + xtime.tv_sec > last_rtc_update + 660 && + (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && + (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { + if (set_rtc_time(xtime.tv_sec) == 0) + last_rtc_update = xtime.tv_sec; + else + last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */ + } +} + +/* + * This is the same as the above, except we _also_ save the current + * Time Stamp Counter value at the time of the timer interrupt, so that + * we later on can estimate the time of day more exactly. + */ +static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + unsigned long timer_status; + + /* Clear UNF bit */ + timer_status = ctrl_inw(TMU0_TCR); + timer_status &= ~0x100; + ctrl_outw(timer_status, TMU0_TCR); + + /* + * Here we are in the timer irq handler. We just have irqs locally + * disabled but we don't know if the timer_bh is running on the other + * CPU. We need to avoid to SMP race with it. NOTE: we don' t need + * the irq version of write_lock because as just said we have irq + * locally disabled. -arca + */ + write_lock(&xtime_lock); + do_timer_interrupt(irq, NULL, regs); + write_unlock(&xtime_lock); + + return IRQ_HANDLED; +} + +static unsigned long get_rtc_time(void) +{ + unsigned int sec, min, hr, wk, day, mon, yr, yr100; + + again: + do { + ctrl_outb(0, RCR1); /* Clear CF-bit */ + sec = ctrl_inb(RSECCNT); + min = ctrl_inb(RMINCNT); + hr = ctrl_inb(RHRCNT); + wk = ctrl_inb(RWKCNT); + day = ctrl_inb(RDAYCNT); + mon = ctrl_inb(RMONCNT); + yr = ctrl_inw(RYRCNT); + yr100 = (yr >> 8); + yr &= 0xff; + } while ((ctrl_inb(RCR1) & RCR1_CF) != 0); + + BCD_TO_BIN(yr100); + BCD_TO_BIN(yr); + BCD_TO_BIN(mon); + BCD_TO_BIN(day); + BCD_TO_BIN(hr); + BCD_TO_BIN(min); + BCD_TO_BIN(sec); + + if (yr > 99 || mon < 1 || mon > 12 || day > 31 || day < 1 || + hr > 23 || min > 59 || sec > 59) { + printk(KERN_ERR + "SH RTC: invalid value, resetting to 1 Jan 2000\n"); + ctrl_outb(RCR2_RESET, RCR2); /* Reset & Stop */ + ctrl_outb(0, RSECCNT); + ctrl_outb(0, RMINCNT); + ctrl_outb(0, RHRCNT); + ctrl_outb(6, RWKCNT); + ctrl_outb(1, RDAYCNT); + ctrl_outb(1, RMONCNT); + ctrl_outw(0x2000, RYRCNT); + ctrl_outb(RCR2_RTCEN|RCR2_START, RCR2); /* Start */ + goto again; + } + + return mktime(yr100 * 100 + yr, mon, day, hr, min, sec); +} + +static __init unsigned int get_cpu_hz(void) +{ + unsigned int count; + unsigned long __dummy; + unsigned long ctc_val_init, ctc_val; + + /* + ** Regardless the toolchain, force the compiler to use the + ** arbitrary register r3 as a clock tick counter. + ** NOTE: r3 must be in accordance with rtc_interrupt() + */ + register unsigned long long __rtc_irq_flag __asm__ ("r3"); + + sti(); + do {} while (ctrl_inb(R64CNT) != 0); + ctrl_outb(RCR1_CIE, RCR1); /* Enable carry interrupt */ + + /* + * r3 is arbitrary. CDC does not support "=z". + */ + ctc_val_init = 0xffffffff; + ctc_val = ctc_val_init; + + asm volatile("gettr tr0, %1\n\t" + "putcon %0, " __CTC "\n\t" + "and %2, r63, %2\n\t" + "pta $+4, tr0\n\t" + "beq/l %2, r63, tr0\n\t" + "ptabs %1, tr0\n\t" + "getcon " __CTC ", %0\n\t" + : "=r"(ctc_val), "=r" (__dummy), "=r" (__rtc_irq_flag) + : "0" (0)); + cli(); + /* + * SH-3: + * CPU clock = 4 stages * loop + * tst rm,rm if id ex + * bt/s 1b if id ex + * add #1,rd if id ex + * (if) pipe line stole + * tst rm,rm if id ex + * .... + * + * + * SH-4: + * CPU clock = 6 stages * loop + * I don't know why. + * .... + * + * SH-5: + * Use CTC register to count. This approach returns the right value + * even if the I-cache is disabled (e.g. whilst debugging.) + * + */ + + count = ctc_val_init - ctc_val; /* CTC counts down */ + +#if defined (CONFIG_SH_SIMULATOR) + /* + * Let's pretend we are a 5MHz SH-5 to avoid a too + * little timer interval. Also to keep delay + * calibration within a reasonable time. + */ + return 5000000; +#else + /* + * This really is count by the number of clock cycles + * by the ratio between a complete R64CNT + * wrap-around (128) and CUI interrupt being raised (64). + */ + return count*2; +#endif +} + +static irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + ctrl_outb(0, RCR1); /* Disable Carry Interrupts */ + regs->regs[3] = 1; /* Using r3 */ + + return IRQ_HANDLED; +} + +static struct irqaction irq0 = { timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL}; +static struct irqaction irq1 = { rtc_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "rtc", NULL, NULL}; + +void __init time_init(void) +{ + unsigned int cpu_clock, master_clock, bus_clock, module_clock; + unsigned long interval; + unsigned long frqcr, ifc, pfc; + static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 }; +#define bfc_table ifc_table /* Same */ +#define pfc_table ifc_table /* Same */ + + tmu_base = onchip_remap(TMU_BASE, 1024, "TMU"); + if (!tmu_base) { + panic("Unable to remap TMU\n"); + } + + rtc_base = onchip_remap(RTC_BASE, 1024, "RTC"); + if (!rtc_base) { + panic("Unable to remap RTC\n"); + } + + cprc_base = onchip_remap(CPRC_BASE, 1024, "CPRC"); + if (!cprc_base) { + panic("Unable to remap CPRC\n"); + } + + xtime.tv_sec = get_rtc_time(); + xtime.tv_nsec = 0; + + setup_irq(TIMER_IRQ, &irq0); + setup_irq(RTC_IRQ, &irq1); + + /* Check how fast it is.. */ + cpu_clock = get_cpu_hz(); + + /* Note careful order of operations to maintain reasonable precision and avoid overflow. */ + scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / (unsigned long long)(cpu_clock / HZ)); + + disable_irq(RTC_IRQ); + + printk("CPU clock: %d.%02dMHz\n", + (cpu_clock / 1000000), (cpu_clock % 1000000)/10000); + { + unsigned short bfc; + frqcr = ctrl_inl(FRQCR); + ifc = ifc_table[(frqcr>> 6) & 0x0007]; + bfc = bfc_table[(frqcr>> 3) & 0x0007]; + pfc = pfc_table[(frqcr>> 12) & 0x0007]; + master_clock = cpu_clock * ifc; + bus_clock = master_clock/bfc; + } + + printk("Bus clock: %d.%02dMHz\n", + (bus_clock/1000000), (bus_clock % 1000000)/10000); + module_clock = master_clock/pfc; + printk("Module clock: %d.%02dMHz\n", + (module_clock/1000000), (module_clock % 1000000)/10000); + interval = (module_clock/(HZ*4)); + + printk("Interval = %ld\n", interval); + + current_cpu_data.cpu_clock = cpu_clock; + current_cpu_data.master_clock = master_clock; + current_cpu_data.bus_clock = bus_clock; + current_cpu_data.module_clock = module_clock; + + /* Start TMU0 */ + ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); + ctrl_outw(TMU0_TCR_INIT, TMU0_TCR); + ctrl_outl(interval, TMU0_TCOR); + ctrl_outl(interval, TMU0_TCNT); + ctrl_outb(TMU_TSTR_INIT, TMU_TSTR); +} + +void enter_deep_standby(void) +{ + /* Disable watchdog timer */ + ctrl_outl(0xa5000000, WTCSR); + /* Configure deep standby on sleep */ + ctrl_outl(0x03, STBCR); + +#ifdef CONFIG_SH_ALPHANUMERIC + { + extern void mach_alphanum(int position, unsigned char value); + extern void mach_alphanum_brightness(int setting); + char halted[] = "Halted. "; + int i; + mach_alphanum_brightness(6); /* dimmest setting above off */ + for (i=0; i<8; i++) { + mach_alphanum(i, halted[i]); + } + asm __volatile__ ("synco"); + } +#endif + + asm __volatile__ ("sleep"); + asm __volatile__ ("synci"); + asm __volatile__ ("nop"); + asm __volatile__ ("nop"); + asm __volatile__ ("nop"); + asm __volatile__ ("nop"); + panic("Unexpected wakeup!\n"); +} + +/* + * Scheduler clock - returns current time in nanosec units. + */ +unsigned long long sched_clock(void) +{ + return (unsigned long long)jiffies * (1000000000 / HZ); +} + diff --git a/arch/sh64/kernel/traps.c b/arch/sh64/kernel/traps.c new file mode 100644 index 000000000..b2b2bde30 --- /dev/null +++ b/arch/sh64/kernel/traps.c @@ -0,0 +1,958 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/traps.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003, 2004 Paul Mundt + * Copyright (C) 2003, 2004 Richard Curnow + * + */ + +/* + * 'Traps.c' handles hardware traps and faults after we have saved some + * state in 'entry.S'. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#undef DEBUG_EXCEPTION +#ifdef DEBUG_EXCEPTION +/* implemented in ../lib/dbg.c */ +extern void show_excp_regs(char *fname, int trapnr, int signr, + struct pt_regs *regs); +#else +#define show_excp_regs(a, b, c, d) +#endif + +static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, + unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk); + +#define DO_ERROR(trapnr, signr, str, name, tsk) \ +asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \ +{ \ + do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \ +} + +spinlock_t die_lock; + +void die(const char * str, struct pt_regs * regs, long err) +{ + console_verbose(); + spin_lock_irq(&die_lock); + printk("%s: %lx\n", str, (err & 0xffffff)); + show_regs(regs); + spin_unlock_irq(&die_lock); + do_exit(SIGSEGV); +} + +static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) +{ + if (!user_mode(regs)) + die(str, regs, err); +} + +static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err) +{ + if (!user_mode(regs)) { + const struct exception_table_entry *fixup; + fixup = search_exception_tables(regs->pc); + if (fixup) { + regs->pc = fixup->fixup; + return; + } + die(str, regs, err); + } +} + +DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current) +DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current) + + +/* Implement misaligned load/store handling for kernel (and optionally for user + mode too). Limitation : only SHmedia mode code is handled - there is no + handling at all for misaligned accesses occurring in SHcompact code yet. */ + +static int misaligned_fixup(struct pt_regs *regs); + +asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs) +{ + if (misaligned_fixup(regs) < 0) { + do_unhandled_exception(7, SIGSEGV, "address error(load)", + "do_address_error_load", + error_code, regs, current); + } + return; +} + +asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs) +{ + if (misaligned_fixup(regs) < 0) { + do_unhandled_exception(8, SIGSEGV, "address error(store)", + "do_address_error_store", + error_code, regs, current); + } + return; +} + +#if defined(CONFIG_SH64_ID2815_WORKAROUND) + +#define OPCODE_INVALID 0 +#define OPCODE_USER_VALID 1 +#define OPCODE_PRIV_VALID 2 + +/* getcon/putcon - requires checking which control register is referenced. */ +#define OPCODE_CTRL_REG 3 + +/* Table of valid opcodes for SHmedia mode. + Form a 10-bit value by concatenating the major/minor opcodes i.e. + opcode[31:26,20:16]. The 6 MSBs of this value index into the following + array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to + LSBs==4'b0000 etc). */ +static unsigned long shmedia_opcode_table[64] = { + 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015, + 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000, + 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000, + 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000, + 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, + 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, + 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, + 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000 +}; + +void do_reserved_inst(unsigned long error_code, struct pt_regs *regs) +{ + /* Workaround SH5-101 cut2 silicon defect #2815 : + in some situations, inter-mode branches from SHcompact -> SHmedia + which should take ITLBMISS or EXECPROT exceptions at the target + falsely take RESINST at the target instead. */ + + unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */ + unsigned long pc, aligned_pc; + int get_user_error; + int trapnr = 12; + int signr = SIGILL; + char *exception_name = "reserved_instruction"; + + pc = regs->pc; + if ((pc & 3) == 1) { + /* SHmedia : check for defect. This requires executable vmas + to be readable too. */ + aligned_pc = pc & ~3; + if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { + get_user_error = -EFAULT; + } else { + get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); + } + if (get_user_error >= 0) { + unsigned long index, shift; + unsigned long major, minor, combined; + unsigned long reserved_field; + reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */ + major = (opcode >> 26) & 0x3f; + minor = (opcode >> 16) & 0xf; + combined = (major << 4) | minor; + index = major; + shift = minor << 1; + if (reserved_field == 0) { + int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3; + switch (opcode_state) { + case OPCODE_INVALID: + /* Trap. */ + break; + case OPCODE_USER_VALID: + /* Restart the instruction : the branch to the instruction will now be from an RTE + not from SHcompact so the silicon defect won't be triggered. */ + return; + case OPCODE_PRIV_VALID: + if (!user_mode(regs)) { + /* Should only ever get here if a module has + SHcompact code inside it. If so, the same fix up is needed. */ + return; /* same reason */ + } + /* Otherwise, user mode trying to execute a privileged instruction - + fall through to trap. */ + break; + case OPCODE_CTRL_REG: + /* If in privileged mode, return as above. */ + if (!user_mode(regs)) return; + /* In user mode ... */ + if (combined == 0x9f) { /* GETCON */ + unsigned long regno = (opcode >> 20) & 0x3f; + if (regno >= 62) { + return; + } + /* Otherwise, reserved or privileged control register, => trap */ + } else if (combined == 0x1bf) { /* PUTCON */ + unsigned long regno = (opcode >> 4) & 0x3f; + if (regno >= 62) { + return; + } + /* Otherwise, reserved or privileged control register, => trap */ + } else { + /* Trap */ + } + break; + default: + /* Fall through to trap. */ + break; + } + } + /* fall through to normal resinst processing */ + } else { + /* Error trying to read opcode. This typically means a + real fault, not a RESINST any more. So change the + codes. */ + trapnr = 87; + exception_name = "address error (exec)"; + signr = SIGSEGV; + } + } + + do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current); +} + +#else /* CONFIG_SH64_ID2815_WORKAROUND */ + +/* If the workaround isn't needed, this is just a straightforward reserved + instruction */ +DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current) + +#endif /* CONFIG_SH64_ID2815_WORKAROUND */ + + +#include + +/* Called with interrupts disabled */ +asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs) +{ + PLS(); + show_excp_regs(__FUNCTION__, -1, -1, regs); + die_if_kernel("exception", regs, ex); +} + +int do_unknown_trapa(unsigned long scId, struct pt_regs *regs) +{ + /* Syscall debug */ + printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId); + + die_if_kernel("unknown trapa", regs, scId); + + return -ENOSYS; +} + +void show_stack(struct task_struct *tsk, unsigned long *sp) +{ +#ifdef CONFIG_KALLSYMS + extern void sh64_unwind(struct pt_regs *regs); + struct pt_regs *regs; + + regs = tsk ? tsk->thread.kregs : NULL; + + sh64_unwind(regs); +#else + printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n"); +#endif +} + +void show_task(unsigned long *sp) +{ + show_stack(NULL, sp); +} + +void dump_stack(void) +{ + show_task(NULL); +} + +static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, + unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk) +{ + show_excp_regs(fn_name, trapnr, signr, regs); + tsk->thread.error_code = error_code; + tsk->thread.trap_no = trapnr; + + if (user_mode(regs)) + force_sig(signr, tsk); + + die_if_no_fixup(str, regs, error_code); +} + +static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode) +{ + int get_user_error; + unsigned long aligned_pc; + unsigned long opcode; + + if ((pc & 3) == 1) { + /* SHmedia */ + aligned_pc = pc & ~3; + if (from_user_mode) { + if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { + get_user_error = -EFAULT; + } else { + get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); + *result_opcode = opcode; + } + return get_user_error; + } else { + /* If the fault was in the kernel, we can either read + * this directly, or if not, we fault. + */ + *result_opcode = *(unsigned long *) aligned_pc; + return 0; + } + } else if ((pc & 1) == 0) { + /* SHcompact */ + /* TODO : provide handling for this. We don't really support + user-mode SHcompact yet, and for a kernel fault, this would + have to come from a module built for SHcompact. */ + return -EFAULT; + } else { + /* misaligned */ + return -EFAULT; + } +} + +static int address_is_sign_extended(__u64 a) +{ + __u64 b; +#if (NEFF == 32) + b = (__u64)(__s64)(__s32)(a & 0xffffffffUL); + return (b == a) ? 1 : 0; +#else +#error "Sign extend check only works for NEFF==32" +#endif +} + +static int generate_and_check_address(struct pt_regs *regs, + __u32 opcode, + int displacement_not_indexed, + int width_shift, + __u64 *address) +{ + /* return -1 for fault, 0 for OK */ + + __u64 base_address, addr; + int basereg; + + basereg = (opcode >> 20) & 0x3f; + base_address = regs->regs[basereg]; + if (displacement_not_indexed) { + __s64 displacement; + displacement = (opcode >> 10) & 0x3ff; + displacement = ((displacement << 54) >> 54); /* sign extend */ + addr = (__u64)((__s64)base_address + (displacement << width_shift)); + } else { + __u64 offset; + int offsetreg; + offsetreg = (opcode >> 10) & 0x3f; + offset = regs->regs[offsetreg]; + addr = base_address + offset; + } + + /* Check sign extended */ + if (!address_is_sign_extended(addr)) { + return -1; + } + +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + /* Check accessible. For misaligned access in the kernel, assume the + address is always accessible (and if not, just fault when the + load/store gets done.) */ + if (user_mode(regs)) { + if (addr >= TASK_SIZE) { + return -1; + } + /* Do access_ok check later - it depends on whether it's a load or a store. */ + } +#endif + + *address = addr; + return 0; +} + +/* Default value as for sh */ +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) +static int user_mode_unaligned_fixup_count = 10; +static int user_mode_unaligned_fixup_enable = 1; +#endif + +static int kernel_mode_unaligned_fixup_count = 32; + +static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) +{ + unsigned short x; + unsigned char *p, *q; + p = (unsigned char *) (int) address; + q = (unsigned char *) &x; + q[0] = p[0]; + q[1] = p[1]; + + if (do_sign_extend) { + *result = (__u64)(__s64) *(short *) &x; + } else { + *result = (__u64) x; + } +} + +static void misaligned_kernel_word_store(__u64 address, __u64 value) +{ + unsigned short x; + unsigned char *p, *q; + p = (unsigned char *) (int) address; + q = (unsigned char *) &x; + + x = (__u16) value; + p[0] = q[0]; + p[1] = q[1]; +} + +static int misaligned_load(struct pt_regs *regs, + __u32 opcode, + int displacement_not_indexed, + int width_shift, + int do_sign_extend) +{ + /* Return -1 for a fault, 0 for OK */ + int error; + int destreg; + __u64 address; + + error = generate_and_check_address(regs, opcode, + displacement_not_indexed, width_shift, &address); + if (error < 0) { + return error; + } + + destreg = (opcode >> 4) & 0x3f; +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + if (user_mode(regs)) { + __u64 buffer; + + if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL< 0) { + return -1; /* fault */ + } + switch (width_shift) { + case 1: + if (do_sign_extend) { + regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer; + } else { + regs->regs[destreg] = (__u64) *(__u16 *) &buffer; + } + break; + case 2: + regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer; + break; + case 3: + regs->regs[destreg] = buffer; + break; + default: + printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + } else +#endif + { + /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ + __u64 lo, hi; + + switch (width_shift) { + case 1: + misaligned_kernel_word_load(address, do_sign_extend, ®s->regs[destreg]); + break; + case 2: + asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address)); + asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address)); + regs->regs[destreg] = lo | hi; + break; + case 3: + asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address)); + asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address)); + regs->regs[destreg] = lo | hi; + break; + + default: + printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + } + + return 0; + +} + +static int misaligned_store(struct pt_regs *regs, + __u32 opcode, + int displacement_not_indexed, + int width_shift) +{ + /* Return -1 for a fault, 0 for OK */ + int error; + int srcreg; + __u64 address; + + error = generate_and_check_address(regs, opcode, + displacement_not_indexed, width_shift, &address); + if (error < 0) { + return error; + } + + srcreg = (opcode >> 4) & 0x3f; +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + if (user_mode(regs)) { + __u64 buffer; + + if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<regs[srcreg]; + break; + case 2: + *(__u32 *) &buffer = (__u32) regs->regs[srcreg]; + break; + case 3: + buffer = regs->regs[srcreg]; + break; + default: + printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + + if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { + return -1; /* fault */ + } + } else +#endif + { + /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ + __u64 val = regs->regs[srcreg]; + + switch (width_shift) { + case 1: + misaligned_kernel_word_store(address, val); + break; + case 2: + asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address)); + asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address)); + break; + case 3: + asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address)); + asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address)); + break; + + default: + printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + } + + return 0; + +} + +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) +/* Never need to fix up misaligned FPU accesses within the kernel since that's a real + error. */ +static int misaligned_fpu_load(struct pt_regs *regs, + __u32 opcode, + int displacement_not_indexed, + int width_shift, + int do_paired_load) +{ + /* Return -1 for a fault, 0 for OK */ + int error; + int destreg; + __u64 address; + + error = generate_and_check_address(regs, opcode, + displacement_not_indexed, width_shift, &address); + if (error < 0) { + return error; + } + + destreg = (opcode >> 4) & 0x3f; + if (user_mode(regs)) { + __u64 buffer; + __u32 buflo, bufhi; + + if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL< 0) { + return -1; /* fault */ + } + /* 'current' may be the current owner of the FPU state, so + context switch the registers into memory so they can be + indexed by register number. */ + if (last_task_used_math == current) { + grab_fpu(); + fpsave(¤t->thread.fpu.hard); + release_fpu(); + last_task_used_math = NULL; + regs->sr |= SR_FD; + } + + buflo = *(__u32*) &buffer; + bufhi = *(1 + (__u32*) &buffer); + + switch (width_shift) { + case 2: + current->thread.fpu.hard.fp_regs[destreg] = buflo; + break; + case 3: + if (do_paired_load) { + current->thread.fpu.hard.fp_regs[destreg] = buflo; + current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; + } else { +#if defined(CONFIG_LITTLE_ENDIAN) + current->thread.fpu.hard.fp_regs[destreg] = bufhi; + current->thread.fpu.hard.fp_regs[destreg+1] = buflo; +#else + current->thread.fpu.hard.fp_regs[destreg] = buflo; + current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; +#endif + } + break; + default: + printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + return 0; + } else { + die ("Misaligned FPU load inside kernel", regs, 0); + return -1; + } + + +} + +static int misaligned_fpu_store(struct pt_regs *regs, + __u32 opcode, + int displacement_not_indexed, + int width_shift, + int do_paired_load) +{ + /* Return -1 for a fault, 0 for OK */ + int error; + int srcreg; + __u64 address; + + error = generate_and_check_address(regs, opcode, + displacement_not_indexed, width_shift, &address); + if (error < 0) { + return error; + } + + srcreg = (opcode >> 4) & 0x3f; + if (user_mode(regs)) { + __u64 buffer; + /* Initialise these to NaNs. */ + __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; + + if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<thread.fpu.hard); + release_fpu(); + last_task_used_math = NULL; + regs->sr |= SR_FD; + } + + switch (width_shift) { + case 2: + buflo = current->thread.fpu.hard.fp_regs[srcreg]; + break; + case 3: + if (do_paired_load) { + buflo = current->thread.fpu.hard.fp_regs[srcreg]; + bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; + } else { +#if defined(CONFIG_LITTLE_ENDIAN) + bufhi = current->thread.fpu.hard.fp_regs[srcreg]; + buflo = current->thread.fpu.hard.fp_regs[srcreg+1]; +#else + buflo = current->thread.fpu.hard.fp_regs[srcreg]; + bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; +#endif + } + break; + default: + printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + + *(__u32*) &buffer = buflo; + *(1 + (__u32*) &buffer) = bufhi; + if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { + return -1; /* fault */ + } + return 0; + } else { + die ("Misaligned FPU load inside kernel", regs, 0); + return -1; + } +} +#endif + +static int misaligned_fixup(struct pt_regs *regs) +{ + unsigned long opcode; + int error; + int major, minor; + +#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + /* Never fixup user mode misaligned accesses without this option enabled. */ + return -1; +#else + if (!user_mode_unaligned_fixup_enable) return -1; +#endif + + error = read_opcode(regs->pc, &opcode, user_mode(regs)); + if (error < 0) { + return error; + } + major = (opcode >> 26) & 0x3f; + minor = (opcode >> 16) & 0xf; + +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) { + --user_mode_unaligned_fixup_count; + /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */ + printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", + current->comm, current->pid, (__u32)regs->pc, opcode); + } else +#endif + if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) { + --kernel_mode_unaligned_fixup_count; + if (in_interrupt()) { + printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n", + (__u32)regs->pc, opcode); + } else { + printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", + current->comm, current->pid, (__u32)regs->pc, opcode); + } + } + + + switch (major) { + case (0x84>>2): /* LD.W */ + error = misaligned_load(regs, opcode, 1, 1, 1); + break; + case (0xb0>>2): /* LD.UW */ + error = misaligned_load(regs, opcode, 1, 1, 0); + break; + case (0x88>>2): /* LD.L */ + error = misaligned_load(regs, opcode, 1, 2, 1); + break; + case (0x8c>>2): /* LD.Q */ + error = misaligned_load(regs, opcode, 1, 3, 0); + break; + + case (0xa4>>2): /* ST.W */ + error = misaligned_store(regs, opcode, 1, 1); + break; + case (0xa8>>2): /* ST.L */ + error = misaligned_store(regs, opcode, 1, 2); + break; + case (0xac>>2): /* ST.Q */ + error = misaligned_store(regs, opcode, 1, 3); + break; + + case (0x40>>2): /* indexed loads */ + switch (minor) { + case 0x1: /* LDX.W */ + error = misaligned_load(regs, opcode, 0, 1, 1); + break; + case 0x5: /* LDX.UW */ + error = misaligned_load(regs, opcode, 0, 1, 0); + break; + case 0x2: /* LDX.L */ + error = misaligned_load(regs, opcode, 0, 2, 1); + break; + case 0x3: /* LDX.Q */ + error = misaligned_load(regs, opcode, 0, 3, 0); + break; + default: + error = -1; + break; + } + break; + + case (0x60>>2): /* indexed stores */ + switch (minor) { + case 0x1: /* STX.W */ + error = misaligned_store(regs, opcode, 0, 1); + break; + case 0x2: /* STX.L */ + error = misaligned_store(regs, opcode, 0, 2); + break; + case 0x3: /* STX.Q */ + error = misaligned_store(regs, opcode, 0, 3); + break; + default: + error = -1; + break; + } + break; + +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + case (0x94>>2): /* FLD.S */ + error = misaligned_fpu_load(regs, opcode, 1, 2, 0); + break; + case (0x98>>2): /* FLD.P */ + error = misaligned_fpu_load(regs, opcode, 1, 3, 1); + break; + case (0x9c>>2): /* FLD.D */ + error = misaligned_fpu_load(regs, opcode, 1, 3, 0); + break; + case (0x1c>>2): /* floating indexed loads */ + switch (minor) { + case 0x8: /* FLDX.S */ + error = misaligned_fpu_load(regs, opcode, 0, 2, 0); + break; + case 0xd: /* FLDX.P */ + error = misaligned_fpu_load(regs, opcode, 0, 3, 1); + break; + case 0x9: /* FLDX.D */ + error = misaligned_fpu_load(regs, opcode, 0, 3, 0); + break; + default: + error = -1; + break; + } + break; + case (0xb4>>2): /* FLD.S */ + error = misaligned_fpu_store(regs, opcode, 1, 2, 0); + break; + case (0xb8>>2): /* FLD.P */ + error = misaligned_fpu_store(regs, opcode, 1, 3, 1); + break; + case (0xbc>>2): /* FLD.D */ + error = misaligned_fpu_store(regs, opcode, 1, 3, 0); + break; + case (0x3c>>2): /* floating indexed stores */ + switch (minor) { + case 0x8: /* FSTX.S */ + error = misaligned_fpu_store(regs, opcode, 0, 2, 0); + break; + case 0xd: /* FSTX.P */ + error = misaligned_fpu_store(regs, opcode, 0, 3, 1); + break; + case 0x9: /* FSTX.D */ + error = misaligned_fpu_store(regs, opcode, 0, 3, 0); + break; + default: + error = -1; + break; + } + break; +#endif + + default: + /* Fault */ + error = -1; + break; + } + + if (error < 0) { + return error; + } else { + regs->pc += 4; /* Skip the instruction that's just been emulated */ + return 0; + } + +} + +static ctl_table unaligned_table[] = { + {1, "kernel_reports", &kernel_mode_unaligned_fixup_count, + sizeof(int), 0644, NULL, &proc_dointvec}, +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + {2, "user_reports", &user_mode_unaligned_fixup_count, + sizeof(int), 0644, NULL, &proc_dointvec}, + {3, "user_enable", &user_mode_unaligned_fixup_enable, + sizeof(int), 0644, NULL, &proc_dointvec}, +#endif + {0} +}; + +static ctl_table unaligned_root[] = { + {1, "unaligned_fixup", NULL, 0, 0555, unaligned_table}, + {0} +}; + +static ctl_table sh64_root[] = { + {1, "sh64", NULL, 0, 0555, unaligned_root}, + {0} +}; +static struct ctl_table_header *sysctl_header; +static int __init init_sysctl(void) +{ + sysctl_header = register_sysctl_table(sh64_root, 0); + return 0; +} + +__initcall(init_sysctl); + + +asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) +{ + u64 peek_real_address_q(u64 addr); + u64 poke_real_address_q(u64 addr, u64 val); + unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010; + unsigned long long exp_cause; + /* It's not worth ioremapping the debug module registers for the amount + of access we make to them - just go direct to their physical + addresses. */ + exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY); + if (exp_cause & ~4) { + printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n", + (unsigned long)(exp_cause & 0xffffffff)); + } + show_state(); + /* Clear all DEBUGINT causes */ + poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); +} + diff --git a/arch/sh64/kernel/unwind.c b/arch/sh64/kernel/unwind.c new file mode 100644 index 000000000..f934f97f9 --- /dev/null +++ b/arch/sh64/kernel/unwind.c @@ -0,0 +1,326 @@ +/* + * arch/sh64/kernel/unwind.c + * + * Copyright (C) 2004 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +static u8 regcache[63]; + +/* + * Finding the previous stack frame isn't horribly straightforward as it is + * on some other platforms. In the sh64 case, we don't have "linked" stack + * frames, so we need to do a bit of work to determine the previous frame, + * and in turn, the previous r14/r18 pair. + * + * There are generally a few cases which determine where we can find out + * the r14/r18 values. In the general case, this can be determined by poking + * around the prologue of the symbol PC is in (note that we absolutely must + * have frame pointer support as well as the kernel symbol table mapped, + * otherwise we can't even get this far). + * + * In other cases, such as the interrupt/exception path, we can poke around + * the sp/fp. + * + * Notably, this entire approach is somewhat error prone, and in the event + * that the previous frame cannot be determined, that's all we can do. + * Either way, this still leaves us with a more correct backtrace then what + * we would be able to come up with by walking the stack (which is garbage + * for anything beyond the first frame). + * -- PFM. + */ +static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc, + unsigned long *pprev_fp, unsigned long *pprev_pc, + struct pt_regs *regs) +{ + const char *sym; + char *modname, namebuf[128]; + unsigned long offset, size; + unsigned long prologue = 0; + unsigned long fp_displacement = 0; + unsigned long fp_prev = 0; + unsigned long offset_r14 = 0, offset_r18 = 0; + int i, found_prologue_end = 0; + + sym = kallsyms_lookup(pc, &size, &offset, &modname, namebuf); + if (!sym) + return -EINVAL; + + prologue = pc - offset; + if (!prologue) + return -EINVAL; + + /* Validate fp, to avoid risk of dereferencing a bad pointer later. + Assume 128Mb since that's the amount of RAM on a Cayman. Modify + when there is an SH-5 board with more. */ + if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) || + (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) || + ((fp & 7) != 0)) { + return -EINVAL; + } + + /* + * Depth to walk, depth is completely arbitrary. + */ + for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) { + unsigned long op; + u8 major, minor; + u8 src, dest, disp; + + op = *(unsigned long *)prologue; + + major = (op >> 26) & 0x3f; + src = (op >> 20) & 0x3f; + minor = (op >> 16) & 0xf; + disp = (op >> 10) & 0x3f; + dest = (op >> 4) & 0x3f; + + /* + * Stack frame creation happens in a number of ways.. in the + * general case when the stack frame is less than 511 bytes, + * it's generally created by an addi or addi.l: + * + * addi/addi.l r15, -FRAME_SIZE, r15 + * + * in the event that the frame size is bigger than this, it's + * typically created using a movi/sub pair as follows: + * + * movi FRAME_SIZE, rX + * sub r15, rX, r15 + */ + + switch (major) { + case (0x00 >> 2): + switch (minor) { + case 0x8: /* add.l */ + case 0x9: /* add */ + /* Look for r15, r63, r14 */ + if (src == 15 && disp == 63 && dest == 14) + found_prologue_end = 1; + + break; + case 0xa: /* sub.l */ + case 0xb: /* sub */ + if (src != 15 || dest != 15) + continue; + + fp_displacement -= regcache[disp]; + fp_prev = fp - fp_displacement; + break; + } + break; + case (0xa8 >> 2): /* st.l */ + if (src != 15) + continue; + + switch (dest) { + case 14: + if (offset_r14 || fp_displacement == 0) + continue; + + offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); + offset_r14 *= sizeof(unsigned long); + offset_r14 += fp_displacement; + break; + case 18: + if (offset_r18 || fp_displacement == 0) + continue; + + offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); + offset_r18 *= sizeof(unsigned long); + offset_r18 += fp_displacement; + break; + } + + break; + case (0xcc >> 2): /* movi */ + if (dest >= 63) { + printk(KERN_NOTICE "%s: Invalid dest reg %d " + "specified in movi handler. Failed " + "opcode was 0x%lx: ", __FUNCTION__, + dest, op); + + continue; + } + + /* Sign extend */ + regcache[dest] = + ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54; + break; + case (0xd0 >> 2): /* addi */ + case (0xd4 >> 2): /* addi.l */ + /* Look for r15, -FRAME_SIZE, r15 */ + if (src != 15 || dest != 15) + continue; + + /* Sign extended frame size.. */ + fp_displacement += + (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); + fp_prev = fp - fp_displacement; + break; + } + + if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev) + break; + } + + if (offset_r14 == 0 || fp_prev == 0) { + if (!offset_r14) + pr_debug("Unable to find r14 offset\n"); + if (!fp_prev) + pr_debug("Unable to find previous fp\n"); + + return -EINVAL; + } + + /* For innermost leaf function, there might not be a offset_r18 */ + if (!*pprev_pc && (offset_r18 == 0)) + return -EINVAL; + + *pprev_fp = *(unsigned long *)(fp_prev + offset_r14); + + if (offset_r18) + *pprev_pc = *(unsigned long *)(fp_prev + offset_r18); + + *pprev_pc &= ~1; + + return 0; +} + +/* Don't put this on the stack since we'll want to call sh64_unwind + * when we're close to underflowing the stack anyway. */ +static struct pt_regs here_regs; + +extern const char syscall_ret; +extern const char ret_from_syscall; +extern const char ret_from_exception; +extern const char ret_from_irq; + +static void sh64_unwind_inner(struct pt_regs *regs); + +static void unwind_nested (unsigned long pc, unsigned long fp) +{ + if ((fp >= __MEMORY_START) && + ((fp & 7) == 0)) { + sh64_unwind_inner((struct pt_regs *) fp); + } +} + +static void sh64_unwind_inner(struct pt_regs *regs) +{ + unsigned long pc, fp; + int ofs = 0; + int first_pass; + + pc = regs->pc & ~1; + fp = regs->regs[14]; + + first_pass = 1; + for (;;) { + int cond; + unsigned long next_fp, next_pc; + + if (pc == ((unsigned long) &syscall_ret & ~1)) { + printk("SYSCALL\n"); + unwind_nested(pc,fp); + return; + } + + if (pc == ((unsigned long) &ret_from_syscall & ~1)) { + printk("SYSCALL (PREEMPTED)\n"); + unwind_nested(pc,fp); + return; + } + + /* In this case, the PC is discovered by lookup_prev_stack_frame but + it has 4 taken off it to look like the 'caller' */ + if (pc == ((unsigned long) &ret_from_exception & ~1)) { + printk("EXCEPTION\n"); + unwind_nested(pc,fp); + return; + } + + if (pc == ((unsigned long) &ret_from_irq & ~1)) { + printk("IRQ\n"); + unwind_nested(pc,fp); + return; + } + + cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) && + ((pc & 3) == 0) && ((fp & 7) == 0)); + + pc -= ofs; + + printk("[<%08lx>] ", pc); + print_symbol("%s\n", pc); + + if (first_pass) { + /* If the innermost frame is a leaf function, it's + * possible that r18 is never saved out to the stack. + */ + next_pc = regs->regs[18]; + } else { + next_pc = 0; + } + + if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) { + ofs = sizeof(unsigned long); + pc = next_pc & ~1; + fp = next_fp; + } else { + printk("Unable to lookup previous stack frame\n"); + break; + } + first_pass = 0; + } + + printk("\n"); + +} + +void sh64_unwind(struct pt_regs *regs) +{ + if (!regs) { + /* + * Fetch current regs if we have no other saved state to back + * trace from. + */ + regs = &here_regs; + + __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14])); + __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15])); + __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18])); + + __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0])); + __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1])); + __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2])); + __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3])); + __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4])); + __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5])); + __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6])); + __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7])); + + __asm__ __volatile__ ( + "pta 0f, tr0\n\t" + "blink tr0, %0\n\t" + "0: nop" + : "=r" (regs->pc) + ); + } + + printk("\nCall Trace:\n"); + sh64_unwind_inner(regs); +} + diff --git a/arch/sh64/kernel/vmlinux.lds.S b/arch/sh64/kernel/vmlinux.lds.S new file mode 100644 index 000000000..a3fba816b --- /dev/null +++ b/arch/sh64/kernel/vmlinux.lds.S @@ -0,0 +1,183 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh5/vmlinux.lds.S + * + * ld script to make ST50 Linux kernel + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * benedict.gaster@superh.com: 2nd May 2002 + * Add definition of empty_zero_page to be the first page of kernel image. + * + * benedict.gaster@superh.com: 3rd May 2002 + * Added support for ramdisk, removing statically linked romfs at the same time. + * + * lethal@linux-sh.org: 9th May 2003 + * Kill off GLOBAL_NAME() usage and other CDC-isms. + * + * lethal@linux-sh.org: 19th May 2003 + * Remove support for ancient toolchains. + */ + +#include +#include +#include +#include +#include + +#define LOAD_OFFSET CONFIG_CACHED_MEMORY_OFFSET +#include + +#ifdef NOTDEF +#ifdef CONFIG_LITTLE_ENDIAN +OUTPUT_FORMAT("elf32-sh64l-linux", "elf32-sh64l-linux", "elf32-sh64l-linux") +#else +OUTPUT_FORMAT("elf32-sh64", "elf32-sh64", "elf32-sh64") +#endif +#endif + +OUTPUT_ARCH(sh:sh5) + +#define C_PHYS(x) AT (ADDR(x) - LOAD_OFFSET) + +ENTRY(__start) +SECTIONS +{ + . = CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START + PAGE_SIZE; + _text = .; /* Text and read-only data */ + text = .; /* Text and read-only data */ + + .empty_zero_page : C_PHYS(.empty_zero_page) { + *(.empty_zero_page) + } = 0 + + .text : C_PHYS(.text) { + *(.text) + *(.text64) + *(.text..SHmedia32) + SCHED_TEXT + *(.fixup) + *(.gnu.warning) +#ifdef CONFIG_LITTLE_ENDIAN + } = 0x6ff0fff0 +#else + } = 0xf0fff06f +#endif + + /* We likely want __ex_table to be Cache Line aligned */ + . = ALIGN(L1_CACHE_BYTES); /* Exception table */ + __start___ex_table = .; + __ex_table : C_PHYS(__ex_table) { *(__ex_table) } + __stop___ex_table = .; + + RODATA + + _etext = .; /* End of text section */ + + .data : C_PHYS(.data) { /* Data */ + *(.data) + CONSTRUCTORS + } + + . = ALIGN(PAGE_SIZE); + .data.page_aligned : C_PHYS(.data.page_aligned) { *(.data.page_aligned) } + + . = ALIGN(L1_CACHE_BYTES); + __per_cpu_start = .; + .data.percpu : C_PHYS(.data.percpu) { *(.data.percpu) } + __per_cpu_end = . ; + .data.cacheline_aligned : C_PHYS(.data.cacheline_aligned) { *(.data.cacheline_aligned) } + + _edata = .; /* End of data section */ + + . = ALIGN(THREAD_SIZE); /* init_task: structure size aligned */ + .data.init_task : C_PHYS(.data.init_task) { *(.data.init_task) } + + . = ALIGN(PAGE_SIZE); /* Init code and data */ + __init_begin = .; + _sinittext = .; + .init.text : C_PHYS(.init.text) { *(.init.text) } + _einittext = .; + .init.data : C_PHYS(.init.data) { *(.init.data) } + . = ALIGN(L1_CACHE_BYTES); /* Better if Cache Line aligned */ + __setup_start = .; + .init.setup : C_PHYS(.init.setup) { *(.init.setup) } + __setup_end = .; + __start___param = .; + __param : C_PHYS(__param) { *(__param) } + __stop___param = .; + __initcall_start = .; + .initcall.init : C_PHYS(.initcall.init) { + *(.initcall1.init) + *(.initcall2.init) + *(.initcall3.init) + *(.initcall4.init) + *(.initcall5.init) + *(.initcall6.init) + *(.initcall7.init) + } + __initcall_end = .; + __con_initcall_start = .; + .con_initcall.init : C_PHYS(.con_initcall.init) { *(.con_initcall.init) } + __con_initcall_end = .; + SECURITY_INIT + __initramfs_start = .; + .init.ramfs : C_PHYS(.init.ramfs) { *(.init.ramfs) } + __initramfs_end = .; + . = ALIGN(PAGE_SIZE); + __init_end = .; + + /* Align to the biggest single data representation, head and tail */ + . = ALIGN(8); + __bss_start = .; /* BSS */ + .bss : C_PHYS(.bss) { + *(.bss) + } + . = ALIGN(8); + _end = . ; + + /* Sections to be discarded */ + /DISCARD/ : { + *(.exit.text) + *(.exit.data) + *(.exitcall.exit) + } + + /* Stabs debugging sections. */ + .stab 0 : C_PHYS(.stab) { *(.stab) } + .stabstr 0 : C_PHYS(.stabstr) { *(.stabstr) } + .stab.excl 0 : C_PHYS(.stab.excl) { *(.stab.excl) } + .stab.exclstr 0 : C_PHYS(.stab.exclstr) { *(.stab.exclstr) } + .stab.index 0 : C_PHYS(.stab.index) { *(.stab.index) } + .stab.indexstr 0 : C_PHYS(.stab.indexstr) { *(.stab.indexstr) } + .comment 0 : C_PHYS(.comment) { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging section are relative to the beginning + of the section so we begin .debug at 0. */ + /* DWARF 1 */ + .debug 0 : C_PHYS(.debug) { *(.debug) } + .line 0 : C_PHYS(.line) { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : C_PHYS(.debug_srcinfo) { *(.debug_srcinfo) } + .debug_sfnames 0 : C_PHYS(.debug_sfnames) { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : C_PHYS(.debug_aranges) { *(.debug_aranges) } + .debug_pubnames 0 : C_PHYS(.debug_pubnames) { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : C_PHYS(.debug_info) { *(.debug_info) } + .debug_abbrev 0 : C_PHYS(.debug_abbrev) { *(.debug_abbrev) } + .debug_line 0 : C_PHYS(.debug_line) { *(.debug_line) } + .debug_frame 0 : C_PHYS(.debug_frame) { *(.debug_frame) } + .debug_str 0 : C_PHYS(.debug_str) { *(.debug_str) } + .debug_loc 0 : C_PHYS(.debug_loc) { *(.debug_loc) } + .debug_macinfo 0 : C_PHYS(.debug_macinfo) { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : C_PHYS(.debug_weaknames) { *(.debug_weaknames) } + .debug_funcnames 0 : C_PHYS(.debug_funcnames) { *(.debug_funcnames) } + .debug_typenames 0 : C_PHYS(.debug_typenames) { *(.debug_typenames) } + .debug_varnames 0 : C_PHYS(.debug_varnames) { *(.debug_varnames) } + /* These must appear regardless of . */ +} diff --git a/arch/sh64/lib/Makefile b/arch/sh64/lib/Makefile new file mode 100644 index 000000000..0a2dc69be --- /dev/null +++ b/arch/sh64/lib/Makefile @@ -0,0 +1,19 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2000, 2001 Paolo Alberelli +# Coprygith (C) 2003 Paul Mundt +# +# Makefile for the SH-5 specific library files.. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +# Panic should really be compiled as PIC +lib-y := udelay.o c-checksum.o dbg.o io.o panic.o memcpy.o copy_user_memcpy.o \ + page_copy.o page_clear.o + diff --git a/arch/sh64/lib/c-checksum.c b/arch/sh64/lib/c-checksum.c new file mode 100644 index 000000000..327595472 --- /dev/null +++ b/arch/sh64/lib/c-checksum.c @@ -0,0 +1,231 @@ +/* + * arch/sh/lib/csum_parial.c + * + * This file contains network checksum routines that are better done + * in an architecture-specific manner due to speed.. + */ + +#undef DEBUG + +#include +#include +#include +#include +#include +#include + +static inline unsigned short from64to16(unsigned long long x) +{ + /* add up 32-bit words for 33 bits */ + x = (x & 0xffffffff) + (x >> 32); + /* add up 16-bit and 17-bit words for 17+c bits */ + x = (x & 0xffff) + (x >> 16); + /* add up 16-bit and 2-bit for 16+c bit */ + x = (x & 0xffff) + (x >> 16); + /* add up carry.. */ + x = (x & 0xffff) + (x >> 16); + return x; +} + +static inline unsigned short foldto16(unsigned long x) +{ + /* add up 16-bit for 17 bits */ + x = (x & 0xffff) + (x >> 16); + /* add up carry.. */ + x = (x & 0xffff) + (x >> 16); + return x; +} + +static inline unsigned short myfoldto16(unsigned long long x) +{ + /* Fold down to 32-bits so we don't loose in the typedef-less + network stack. */ + /* 64 to 33 */ + x = (x & 0xffffffff) + (x >> 32); + /* 33 to 32 */ + x = (x & 0xffffffff) + (x >> 32); + + /* add up 16-bit for 17 bits */ + x = (x & 0xffff) + (x >> 16); + /* add up carry.. */ + x = (x & 0xffff) + (x >> 16); + return x; +} + +#define odd(x) ((x)&1) +#define U16(x) ntohs(x) + +static unsigned long do_csum(const unsigned char *buff, int len) +{ + int odd, count; + unsigned long result = 0; + + pr_debug("do_csum buff %p, len %d (0x%x)\n", buff, len, len); +#ifdef DEBUG + for (i = 0; i < len; i++) { + if ((i % 26) == 0) + printk("\n"); + printk("%02X ", buff[i]); + } +#endif + + if (len <= 0) + goto out; + + odd = 1 & (unsigned long) buff; + if (odd) { + result = *buff << 8; + len--; + buff++; + } + count = len >> 1; /* nr of 16-bit words.. */ + if (count) { + if (2 & (unsigned long) buff) { + result += *(unsigned short *) buff; + count--; + len -= 2; + buff += 2; + } + count >>= 1; /* nr of 32-bit words.. */ + if (count) { + unsigned long carry = 0; + do { + unsigned long w = *(unsigned long *) buff; + buff += 4; + count--; + result += carry; + result += w; + carry = (w > result); + } while (count); + result += carry; + result = (result & 0xffff) + (result >> 16); + } + if (len & 2) { + result += *(unsigned short *) buff; + buff += 2; + } + } + if (len & 1) + result += *buff; + result = foldto16(result); + if (odd) + result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); + + pr_debug("\nCHECKSUM is 0x%x\n", result); + + out: + return result; +} + +/* computes the checksum of a memory block at buff, length len, + and adds in "sum" (32-bit) */ +unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum) +{ + unsigned long long result = do_csum(buff, len); + + /* add in old sum, and carry.. */ + result += sum; + /* 32+c bits -> 32 bits */ + result = (result & 0xffffffff) + (result >> 32); + + pr_debug("csum_partial, buff %p len %d sum 0x%x result=0x%016Lx\n", + buff, len, sum, result); + + return result; +} + +/* Copy while checksumming, otherwise like csum_partial. */ +unsigned int +csum_partial_copy(const char *src, char *dst, int len, unsigned int sum) +{ + sum = csum_partial(src, len, sum); + memcpy(dst, src, len); + + return sum; +} + +/* Copy from userspace and compute checksum. If we catch an exception + then zero the rest of the buffer. */ +unsigned int +csum_partial_copy_from_user(const char *src, char *dst, int len, + unsigned int sum, int *err_ptr) +{ + int missing; + + pr_debug + ("csum_partial_copy_from_user src %p, dest %p, len %d, sum %08x, err_ptr %p\n", + src, dst, len, sum, err_ptr); + missing = copy_from_user(dst, src, len); + pr_debug(" access_ok %d\n", __access_ok((unsigned long) src, len)); + pr_debug(" missing %d\n", missing); + if (missing) { + memset(dst + len - missing, 0, missing); + *err_ptr = -EFAULT; + } + + return csum_partial(dst, len, sum); +} + +/* Copy to userspace and compute checksum. */ +unsigned int +csum_partial_copy_to_user(const char *src, char *dst, int len, + unsigned int sum, int *err_ptr) +{ + sum = csum_partial(src, len, sum); + + if (copy_to_user(dst, src, len)) + *err_ptr = -EFAULT; + + return sum; +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl) +{ + pr_debug("ip_fast_csum %p,%d\n", iph, ihl); + + return ~do_csum(iph, ihl * 4); +} + +unsigned int csum_tcpudp_nofold(unsigned long saddr, + unsigned long daddr, + unsigned short len, + unsigned short proto, unsigned int sum) +{ + unsigned long long result; + + pr_debug("ntohs(0x%x)=0x%x\n", 0xdead, ntohs(0xdead)); + pr_debug("htons(0x%x)=0x%x\n", 0xdead, htons(0xdead)); + + result = ((unsigned long long) saddr + + (unsigned long long) daddr + + (unsigned long long) sum + + ((unsigned long long) ntohs(len) << 16) + + ((unsigned long long) proto << 8)); + + /* Fold down to 32-bits so we don't loose in the typedef-less + network stack. */ + /* 64 to 33 */ + result = (result & 0xffffffff) + (result >> 32); + /* 33 to 32 */ + result = (result & 0xffffffff) + (result >> 32); + + pr_debug("%s saddr %x daddr %x len %x proto %x sum %x result %08Lx\n", + __FUNCTION__, saddr, daddr, len, proto, sum, result); + + return result; +} + +// Post SIM: +unsigned int +csum_partial_copy_nocheck(const char *src, char *dst, int len, unsigned int sum) +{ + // unsigned dummy; + pr_debug("csum_partial_copy_nocheck src %p dst %p len %d\n", src, dst, + len); + + return csum_partial_copy(src, dst, len, sum); +} diff --git a/arch/sh64/lib/copy_user_memcpy.S b/arch/sh64/lib/copy_user_memcpy.S new file mode 100644 index 000000000..5b6ca3923 --- /dev/null +++ b/arch/sh64/lib/copy_user_memcpy.S @@ -0,0 +1,213 @@ +! +! Fast SH memcpy +! +! by Toshiyasu Morita (tm@netcom.com) +! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut) +! SH5 code Copyright 2002 SuperH Ltd. +! +! Entry: ARG0: destination pointer +! ARG1: source pointer +! ARG2: byte count +! +! Exit: RESULT: destination pointer +! any other registers in the range r0-r7: trashed +! +! Notes: Usually one wants to do small reads and write a longword, but +! unfortunately it is difficult in some cases to concatanate bytes +! into a longword on the SH, so this does a longword read and small +! writes. +! +! This implementation makes two assumptions about how it is called: +! +! 1.: If the byte count is nonzero, the address of the last byte to be +! copied is unsigned greater than the address of the first byte to +! be copied. This could be easily swapped for a signed comparison, +! but the algorithm used needs some comparison. +! +! 2.: When there are two or three bytes in the last word of an 11-or-more +! bytes memory chunk to b copied, the rest of the word can be read +! without side effects. +! This could be easily changed by increasing the minumum size of +! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2, +! however, this would cost a few extra cyles on average. +! For SHmedia, the assumption is that any quadword can be read in its +! enirety if at least one byte is included in the copy. + +/* Imported into Linux kernel by Richard Curnow. This is used to implement the + __copy_user function in the general case, so it has to be a distinct + function from intra-kernel memcpy to allow for exception fix-ups in the + event that the user pointer is bad somewhere in the copy (e.g. due to + running off the end of the vma). + + Note, this algorithm will be slightly wasteful in the case where the source + and destination pointers are equally aligned, because the stlo/sthi pairs + could then be merged back into single stores. If there are a lot of cache + misses, this is probably offset by the stall lengths on the preloads. + +*/ + + .section .text..SHmedia32,"ax" + .little + .balign 32 + .global copy_user_memcpy + .global copy_user_memcpy_end +copy_user_memcpy: + +#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1 +#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1 +#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1 +#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1 + + ld.b r3,0,r63 + pta/l Large,tr0 + movi 25,r0 + bgeu/u r4,r0,tr0 + nsb r4,r0 + shlli r0,5,r0 + movi (L1-L0+63*32 + 1) & 0xffff,r1 + sub r1, r0, r0 +L0: ptrel r0,tr0 + add r2,r4,r5 + ptabs r18,tr1 + add r3,r4,r6 + blink tr0,r63 + +/* Rearranged to make cut2 safe */ + .balign 8 +L4_7: /* 4..7 byte memcpy cntd. */ + stlo.l r2, 0, r0 + or r6, r7, r6 + sthi.l r5, -1, r6 + stlo.l r5, -4, r6 + blink tr1,r63 + + .balign 8 +L1: /* 0 byte memcpy */ + nop + blink tr1,r63 + nop + nop + nop + nop + +L2_3: /* 2 or 3 byte memcpy cntd. */ + st.b r5,-1,r6 + blink tr1,r63 + + /* 1 byte memcpy */ + ld.b r3,0,r0 + st.b r2,0,r0 + blink tr1,r63 + +L8_15: /* 8..15 byte memcpy cntd. */ + stlo.q r2, 0, r0 + or r6, r7, r6 + sthi.q r5, -1, r6 + stlo.q r5, -8, r6 + blink tr1,r63 + + /* 2 or 3 byte memcpy */ + ld.b r3,0,r0 + ld.b r2,0,r63 + ld.b r3,1,r1 + st.b r2,0,r0 + pta/l L2_3,tr0 + ld.b r6,-1,r6 + st.b r2,1,r1 + blink tr0, r63 + + /* 4 .. 7 byte memcpy */ + LDUAL (r3, 0, r0, r1) + pta L4_7, tr0 + ldlo.l r6, -4, r7 + or r0, r1, r0 + sthi.l r2, 3, r0 + ldhi.l r6, -1, r6 + blink tr0, r63 + + /* 8 .. 15 byte memcpy */ + LDUAQ (r3, 0, r0, r1) + pta L8_15, tr0 + ldlo.q r6, -8, r7 + or r0, r1, r0 + sthi.q r2, 7, r0 + ldhi.q r6, -1, r6 + blink tr0, r63 + + /* 16 .. 24 byte memcpy */ + LDUAQ (r3, 0, r0, r1) + LDUAQ (r3, 8, r8, r9) + or r0, r1, r0 + sthi.q r2, 7, r0 + or r8, r9, r8 + sthi.q r2, 15, r8 + ldlo.q r6, -8, r7 + ldhi.q r6, -1, r6 + stlo.q r2, 8, r8 + stlo.q r2, 0, r0 + or r6, r7, r6 + sthi.q r5, -1, r6 + stlo.q r5, -8, r6 + blink tr1,r63 + +Large: + ld.b r2, 0, r63 + pta/l Loop_ua, tr1 + ori r3, -8, r7 + sub r2, r7, r22 + sub r3, r2, r6 + add r2, r4, r5 + ldlo.q r3, 0, r0 + addi r5, -16, r5 + movi 64+8, r27 ! could subtract r7 from that. + stlo.q r2, 0, r0 + sthi.q r2, 7, r0 + ldx.q r22, r6, r0 + bgtu/l r27, r4, tr1 + + addi r5, -48, r27 + pta/l Loop_line, tr0 + addi r6, 64, r36 + addi r6, -24, r19 + addi r6, -16, r20 + addi r6, -8, r21 + +Loop_line: + ldx.q r22, r36, r63 + synco + alloco r22, 32 + synco + addi r22, 32, r22 + ldx.q r22, r19, r23 + sthi.q r22, -25, r0 + ldx.q r22, r20, r24 + ldx.q r22, r21, r25 + stlo.q r22, -32, r0 + ldx.q r22, r6, r0 + sthi.q r22, -17, r23 + sthi.q r22, -9, r24 + sthi.q r22, -1, r25 + stlo.q r22, -24, r23 + stlo.q r22, -16, r24 + stlo.q r22, -8, r25 + bgeu r27, r22, tr0 + +Loop_ua: + addi r22, 8, r22 + sthi.q r22, -1, r0 + stlo.q r22, -8, r0 + ldx.q r22, r6, r0 + bgtu/l r5, r22, tr1 + + add r3, r4, r7 + ldlo.q r7, -8, r1 + sthi.q r22, 7, r0 + ldhi.q r7, -1, r7 + ptabs r18,tr1 + stlo.q r22, 0, r0 + or r1, r7, r1 + sthi.q r5, 15, r1 + stlo.q r5, 8, r1 + blink tr1, r63 +copy_user_memcpy_end: + nop diff --git a/arch/sh64/lib/dbg.c b/arch/sh64/lib/dbg.c new file mode 100644 index 000000000..d74913fe3 --- /dev/null +++ b/arch/sh64/lib/dbg.c @@ -0,0 +1,394 @@ +/*-------------------------------------------------------------------------- +-- +-- Identity : Linux50 Debug Funcions +-- +-- File : arch/sh64/lib/dbg.C +-- +-- Copyright 2000, 2001 STMicroelectronics Limited. +-- Copyright 2004 Richard Curnow (evt_debug etc) +-- +--------------------------------------------------------------------------*/ +#include +#include +#include +#include +#include + +typedef u64 regType_t; + +static regType_t getConfigReg(u64 id) +{ + register u64 reg __asm__("r2"); + asm volatile ("getcfg %1, 0, %0":"=r" (reg):"r"(id)); + return (reg); +} + +/* ======================================================================= */ + +static char *szTab[] = { "4k", "64k", "1M", "512M" }; +static char *protTab[] = { "----", + "---R", + "--X-", + "--XR", + "-W--", + "-W-R", + "-WX-", + "-WXR", + "U---", + "U--R", + "U-X-", + "U-XR", + "UW--", + "UW-R", + "UWX-", + "UWXR" +}; +#define ITLB_BASE 0x00000000 +#define DTLB_BASE 0x00800000 +#define MAX_TLBs 64 +/* PTE High */ +#define GET_VALID(pte) ((pte) & 0x1) +#define GET_SHARED(pte) ((pte) & 0x2) +#define GET_ASID(pte) ((pte >> 2) & 0x0ff) +#define GET_EPN(pte) ((pte) & 0xfffff000) + +/* PTE Low */ +#define GET_CBEHAVIOR(pte) ((pte) & 0x3) +#define GET_PAGE_SIZE(pte) szTab[((pte >> 3) & 0x3)] +#define GET_PROTECTION(pte) protTab[((pte >> 6) & 0xf)] +#define GET_PPN(pte) ((pte) & 0xfffff000) + +#define PAGE_1K_MASK 0x00000000 +#define PAGE_4K_MASK 0x00000010 +#define PAGE_64K_MASK 0x00000080 +#define MMU_PAGESIZE_MASK (PAGE_64K_MASK | PAGE_4K_MASK) +#define PAGE_1MB_MASK MMU_PAGESIZE_MASK +#define PAGE_1K (1024) +#define PAGE_4K (1024 * 4) +#define PAGE_64K (1024 * 64) +#define PAGE_1MB (1024 * 1024) + +#define HOW_TO_READ_TLB_CONTENT \ + "[ ID] PPN EPN ASID Share CB P.Size PROT.\n" + +void print_single_tlb(unsigned long tlb, int single_print) +{ + regType_t pteH; + regType_t pteL; + unsigned int valid, shared, asid, epn, cb, ppn; + char *pSize; + char *pProt; + + /* + ** in case of single print is true, this implies: + ** 1) print the TLB in any case also if NOT VALID + ** 2) print out the header + */ + + pteH = getConfigReg(tlb); + valid = GET_VALID(pteH); + if (single_print) + printk(HOW_TO_READ_TLB_CONTENT); + else if (!valid) + return; + + pteL = getConfigReg(tlb + 1); + + shared = GET_SHARED(pteH); + asid = GET_ASID(pteH); + epn = GET_EPN(pteH); + cb = GET_CBEHAVIOR(pteL); + pSize = GET_PAGE_SIZE(pteL); + pProt = GET_PROTECTION(pteL); + ppn = GET_PPN(pteL); + printk("[%c%2ld] 0x%08x 0x%08x %03d %02x %02x %4s %s\n", + ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP), + ppn, epn, asid, shared, cb, pSize, pProt); +} + +void print_dtlb(void) +{ + int count; + unsigned long tlb; + + printk(" ================= SH-5 D-TLBs Status ===================\n"); + printk(HOW_TO_READ_TLB_CONTENT); + tlb = DTLB_BASE; + for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP) + print_single_tlb(tlb, 0); + printk + (" =============================================================\n"); +} + +void print_itlb(void) +{ + int count; + unsigned long tlb; + + printk(" ================= SH-5 I-TLBs Status ===================\n"); + printk(HOW_TO_READ_TLB_CONTENT); + tlb = ITLB_BASE; + for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP) + print_single_tlb(tlb, 0); + printk + (" =============================================================\n"); +} + +/* ======================================================================= */ + +#include "syscalltab.h" + +struct ring_node { + int evt; + int ret_addr; + int event; + int tra; + int pid; + unsigned long sp; + unsigned long pc; +}; + +static struct ring_node event_ring[16]; +static int event_ptr = 0; + +void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs) +{ + int syscallno = tra & 0xff; + unsigned long sp; + unsigned long stack_bottom; + int pid; + struct ring_node *rr; + + pid = current->pid; + stack_bottom = (unsigned long) current->thread_info; + asm volatile("ori r15, 0, %0" : "=r" (sp)); + rr = event_ring + event_ptr; + rr->evt = evt; + rr->ret_addr = ret_addr; + rr->event = event; + rr->tra = tra; + rr->pid = pid; + rr->sp = sp; + rr->pc = regs->pc; + + if (sp < stack_bottom + 3092) { + printk("evt_debug : stack underflow report\n"); + int i, j; + for (j=0, i = event_ptr; j<16; j++) { + rr = event_ring + i; + printk("evt=%08x event=%08x tra=%08x pid=%5d sp=%08lx pc=%08lx\n", + rr->evt, rr->event, rr->tra, rr->pid, rr->sp, rr->pc); + i--; + i &= 15; + } + panic("STACK UNDERFLOW\n"); + } + + event_ptr = (event_ptr + 1) & 15; + + if ((event == 2) && (evt == 0x160)) { + if (syscallno < NUM_SYSCALL_INFO_ENTRIES) + printk("Task %d: %s()\n", + current->pid, + syscall_info_table[syscallno].name); + } +} + +void evt_debug2(unsigned int ret) +{ + printk("Task %d: syscall returns %08x\n", current->pid, ret); +} + +void evt_debug_ret_from_irq(struct pt_regs *regs) +{ + int pid; + struct ring_node *rr; + + pid = current->pid; + rr = event_ring + event_ptr; + rr->evt = 0xffff; + rr->ret_addr = 0; + rr->event = 0; + rr->tra = 0; + rr->pid = pid; + rr->pc = regs->pc; + event_ptr = (event_ptr + 1) & 15; +} + +void evt_debug_ret_from_exc(struct pt_regs *regs) +{ + int pid; + struct ring_node *rr; + + pid = current->pid; + rr = event_ring + event_ptr; + rr->evt = 0xfffe; + rr->ret_addr = 0; + rr->event = 0; + rr->tra = 0; + rr->pid = pid; + rr->pc = regs->pc; + event_ptr = (event_ptr + 1) & 15; +} + +/* ======================================================================= */ + +void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs) +{ + + unsigned long long ah, al, bh, bl, ch, cl; + + printk("\n"); + printk("EXCEPTION - %s: task %d; Linux trap # %d; signal = %d\n", + ((from) ? from : "???"), current->pid, trapnr, signr); + + asm volatile ("getcon " __EXPEVT ", %0":"=r"(ah)); + asm volatile ("getcon " __EXPEVT ", %0":"=r"(al)); + ah = (ah) >> 32; + al = (al) & 0xffffffff; + asm volatile ("getcon " __KCR1 ", %0":"=r"(bh)); + asm volatile ("getcon " __KCR1 ", %0":"=r"(bl)); + bh = (bh) >> 32; + bl = (bl) & 0xffffffff; + asm volatile ("getcon " __INTEVT ", %0":"=r"(ch)); + asm volatile ("getcon " __INTEVT ", %0":"=r"(cl)); + ch = (ch) >> 32; + cl = (cl) & 0xffffffff; + printk("EXPE: %08Lx%08Lx KCR1: %08Lx%08Lx INTE: %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + asm volatile ("getcon " __PEXPEVT ", %0":"=r"(ah)); + asm volatile ("getcon " __PEXPEVT ", %0":"=r"(al)); + ah = (ah) >> 32; + al = (al) & 0xffffffff; + asm volatile ("getcon " __PSPC ", %0":"=r"(bh)); + asm volatile ("getcon " __PSPC ", %0":"=r"(bl)); + bh = (bh) >> 32; + bl = (bl) & 0xffffffff; + asm volatile ("getcon " __PSSR ", %0":"=r"(ch)); + asm volatile ("getcon " __PSSR ", %0":"=r"(cl)); + ch = (ch) >> 32; + cl = (cl) & 0xffffffff; + printk("PEXP: %08Lx%08Lx PSPC: %08Lx%08Lx PSSR: %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->pc) >> 32; + al = (regs->pc) & 0xffffffff; + bh = (regs->regs[18]) >> 32; + bl = (regs->regs[18]) & 0xffffffff; + ch = (regs->regs[15]) >> 32; + cl = (regs->regs[15]) & 0xffffffff; + printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->sr) >> 32; + al = (regs->sr) & 0xffffffff; + asm volatile ("getcon " __TEA ", %0":"=r"(bh)); + asm volatile ("getcon " __TEA ", %0":"=r"(bl)); + bh = (bh) >> 32; + bl = (bl) & 0xffffffff; + asm volatile ("getcon " __KCR0 ", %0":"=r"(ch)); + asm volatile ("getcon " __KCR0 ", %0":"=r"(cl)); + ch = (ch) >> 32; + cl = (cl) & 0xffffffff; + printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[0]) >> 32; + al = (regs->regs[0]) & 0xffffffff; + bh = (regs->regs[1]) >> 32; + bl = (regs->regs[1]) & 0xffffffff; + ch = (regs->regs[2]) >> 32; + cl = (regs->regs[2]) & 0xffffffff; + printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[3]) >> 32; + al = (regs->regs[3]) & 0xffffffff; + bh = (regs->regs[4]) >> 32; + bl = (regs->regs[4]) & 0xffffffff; + ch = (regs->regs[5]) >> 32; + cl = (regs->regs[5]) & 0xffffffff; + printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[6]) >> 32; + al = (regs->regs[6]) & 0xffffffff; + bh = (regs->regs[7]) >> 32; + bl = (regs->regs[7]) & 0xffffffff; + ch = (regs->regs[8]) >> 32; + cl = (regs->regs[8]) & 0xffffffff; + printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + + ah = (regs->regs[9]) >> 32; + al = (regs->regs[9]) & 0xffffffff; + bh = (regs->regs[10]) >> 32; + bl = (regs->regs[10]) & 0xffffffff; + ch = (regs->regs[11]) >> 32; + cl = (regs->regs[11]) & 0xffffffff; + printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + printk("....\n"); + + ah = (regs->tregs[0]) >> 32; + al = (regs->tregs[0]) & 0xffffffff; + bh = (regs->tregs[1]) >> 32; + bl = (regs->tregs[1]) & 0xffffffff; + ch = (regs->tregs[2]) >> 32; + cl = (regs->tregs[2]) & 0xffffffff; + printk("T0 : %08Lx%08Lx T1 : %08Lx%08Lx T2 : %08Lx%08Lx\n", + ah, al, bh, bl, ch, cl); + printk("....\n"); + + print_dtlb(); + print_itlb(); +} + +/* ======================================================================= */ + +/* +** Depending on scan the MMU, Data or Instrction side +** looking for a valid mapping matching Eaddr & asid. +** Return -1 if not found or the TLB id entry otherwise. +** Note: it works only for 4k pages! +*/ +static unsigned long +lookup_mmu_side(unsigned long base, unsigned long Eaddr, unsigned long asid) +{ + regType_t pteH; + unsigned long epn; + int count; + + epn = Eaddr & 0xfffff000; + + for (count = 0; count < MAX_TLBs; count++, base += TLB_STEP) { + pteH = getConfigReg(base); + if (GET_VALID(pteH)) + if ((unsigned long) GET_EPN(pteH) == epn) + if ((unsigned long) GET_ASID(pteH) == asid) + break; + } + return ((unsigned long) ((count < MAX_TLBs) ? base : -1)); +} + +unsigned long lookup_dtlb(unsigned long Eaddr) +{ + unsigned long asid = get_asid(); + return (lookup_mmu_side((u64) DTLB_BASE, Eaddr, asid)); +} + +unsigned long lookup_itlb(unsigned long Eaddr) +{ + unsigned long asid = get_asid(); + return (lookup_mmu_side((u64) ITLB_BASE, Eaddr, asid)); +} + +void print_page(struct page *page) +{ + printk(" page[%p] -> index 0x%lx, count 0x%x, flags 0x%lx\n", + page, page->index, page_count(page), page->flags); + printk(" address_space = %p, pages =%ld\n", page->mapping, + page->mapping->nrpages); + +} diff --git a/arch/sh64/lib/io.c b/arch/sh64/lib/io.c new file mode 100644 index 000000000..7e8af3a9e --- /dev/null +++ b/arch/sh64/lib/io.c @@ -0,0 +1,207 @@ +/* + * Copyright (C) 2000 David J. Mckay (david.mckay@st.com) + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * This file contains the I/O routines for use on the overdrive board + * + */ + +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_SH_CAYMAN +#include +#endif + +/* + * readX/writeX() are used to access memory mapped devices. On some + * architectures the memory mapped IO stuff needs to be accessed + * differently. On the SuperH architecture, we just read/write the + * memory location directly. + */ + +#define dprintk(x...) + +static int io_addr(int x) { + if (x < 0x400) { +#ifdef CONFIG_SH_CAYMAN + return (x << 2) | smsc_superio_virt; +#else + panic ("Illegal access to I/O port 0x%04x\n", x); + return 0; +#endif + } else { +#ifdef CONFIG_PCI + return (x + pciio_virt); +#else + panic ("Illegal access to I/O port 0x%04x\n", x); + return 0; +#endif + } +} + +unsigned long inb(unsigned long port) +{ + unsigned long r; + + r = ctrl_inb(io_addr(port)); + dprintk("inb(0x%x)=0x%x (0x%x)\n", port, r, io_addr(port)); + return r; +} + +unsigned long inw(unsigned long port) +{ + unsigned long r; + + r = ctrl_inw(io_addr(port)); + dprintk("inw(0x%x)=0x%x (0x%x)\n", port, r, io_addr(port)); + return r; +} + +unsigned long inl(unsigned long port) +{ + unsigned long r; + + r = ctrl_inl(io_addr(port)); + dprintk("inl(0x%x)=0x%x (0x%x)\n", port, r, io_addr(port)); + return r; +} + +void outb(unsigned long value, unsigned long port) +{ + dprintk("outb(0x%x,0x%x) (0x%x)\n", value, port, io_addr(port)); + ctrl_outb(value, io_addr(port)); +} + +void outw(unsigned long value, unsigned long port) +{ + dprintk("outw(0x%x,0x%x) (0x%x)\n", value, port, io_addr(port)); + ctrl_outw(value, io_addr(port)); +} + +void outl(unsigned long value, unsigned long port) +{ + dprintk("outw(0x%x,0x%x) (0x%x)\n", value, port, io_addr(port)); + ctrl_outl(value, io_addr(port)); +} + +/* This is horrible at the moment - needs more work to do something sensible */ +#define IO_DELAY() + +#define OUT_DELAY(x,type) \ +void out##x##_p(unsigned type value,unsigned long port){out##x(value,port);IO_DELAY();} + +#define IN_DELAY(x,type) \ +unsigned type in##x##_p(unsigned long port) {unsigned type tmp=in##x(port);IO_DELAY();return tmp;} + +#if 1 +OUT_DELAY(b, long) OUT_DELAY(w, long) OUT_DELAY(l, long) + IN_DELAY(b, long) IN_DELAY(w, long) IN_DELAY(l, long) +#endif +/* Now for the string version of these functions */ +void outsb(unsigned long port, const void *addr, unsigned long count) +{ + int i; + unsigned char *p = (unsigned char *) addr; + + for (i = 0; i < count; i++, p++) { + outb(*p, port); + } +} + +void insb(unsigned long port, void *addr, unsigned long count) +{ + int i; + unsigned char *p = (unsigned char *) addr; + + for (i = 0; i < count; i++, p++) { + *p = inb(port); + } +} + +/* For the 16 and 32 bit string functions, we have to worry about alignment. + * The SH does not do unaligned accesses, so we have to read as bytes and + * then write as a word or dword. + * This can be optimised a lot more, especially in the case where the data + * is aligned + */ + +void outsw(unsigned long port, const void *addr, unsigned long count) +{ + int i; + unsigned short tmp; + unsigned char *p = (unsigned char *) addr; + + for (i = 0; i < count; i++, p += 2) { + tmp = (*p) | ((*(p + 1)) << 8); + outw(tmp, port); + } +} + +void insw(unsigned long port, void *addr, unsigned long count) +{ + int i; + unsigned short tmp; + unsigned char *p = (unsigned char *) addr; + + for (i = 0; i < count; i++, p += 2) { + tmp = inw(port); + p[0] = tmp & 0xff; + p[1] = (tmp >> 8) & 0xff; + } +} + +void outsl(unsigned long port, const void *addr, unsigned long count) +{ + int i; + unsigned tmp; + unsigned char *p = (unsigned char *) addr; + + for (i = 0; i < count; i++, p += 4) { + tmp = (*p) | ((*(p + 1)) << 8) | ((*(p + 2)) << 16) | + ((*(p + 3)) << 24); + outl(tmp, port); + } +} + +void insl(unsigned long port, void *addr, unsigned long count) +{ + int i; + unsigned tmp; + unsigned char *p = (unsigned char *) addr; + + for (i = 0; i < count; i++, p += 4) { + tmp = inl(port); + p[0] = tmp & 0xff; + p[1] = (tmp >> 8) & 0xff; + p[2] = (tmp >> 16) & 0xff; + p[3] = (tmp >> 24) & 0xff; + + } +} + +void memcpy_toio(unsigned long to, const void *from, long count) +{ + unsigned char *p = (unsigned char *) from; + + while (count) { + count--; + writeb(*p++, to++); + } +} + +void memcpy_fromio(void *to, unsigned long from, long count) +{ + int i; + unsigned char *p = (unsigned char *) to; + + for (i = 0; i < count; i++) { + p[i] = readb(from); + from++; + } +} diff --git a/arch/sh64/lib/memcpy.c b/arch/sh64/lib/memcpy.c new file mode 100644 index 000000000..c785d0aa1 --- /dev/null +++ b/arch/sh64/lib/memcpy.c @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2002 Mark Debbage (Mark.Debbage@superh.com) + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + */ + +#include +#include +#include + +// This is a simplistic optimization of memcpy to increase the +// granularity of access beyond one byte using aligned +// loads and stores. This is not an optimal implementation +// for SH-5 (especially with regard to prefetching and the cache), +// and a better version should be provided later ... + +void *memcpy(void *dest, const void *src, size_t count) +{ + char *d = (char *) dest, *s = (char *) src; + + if (count >= 32) { + int i = 8 - (((unsigned long) d) & 0x7); + + if (i != 8) + while (i-- && count--) { + *d++ = *s++; + } + + if (((((unsigned long) d) & 0x7) == 0) && + ((((unsigned long) s) & 0x7) == 0)) { + while (count >= 32) { + unsigned long long t1, t2, t3, t4; + t1 = *(unsigned long long *) (s); + t2 = *(unsigned long long *) (s + 8); + t3 = *(unsigned long long *) (s + 16); + t4 = *(unsigned long long *) (s + 24); + *(unsigned long long *) (d) = t1; + *(unsigned long long *) (d + 8) = t2; + *(unsigned long long *) (d + 16) = t3; + *(unsigned long long *) (d + 24) = t4; + d += 32; + s += 32; + count -= 32; + } + while (count >= 8) { + *(unsigned long long *) d = + *(unsigned long long *) s; + d += 8; + s += 8; + count -= 8; + } + } + + if (((((unsigned long) d) & 0x3) == 0) && + ((((unsigned long) s) & 0x3) == 0)) { + while (count >= 4) { + *(unsigned long *) d = *(unsigned long *) s; + d += 4; + s += 4; + count -= 4; + } + } + + if (((((unsigned long) d) & 0x1) == 0) && + ((((unsigned long) s) & 0x1) == 0)) { + while (count >= 2) { + *(unsigned short *) d = *(unsigned short *) s; + d += 2; + s += 2; + count -= 2; + } + } + } + + while (count--) { + *d++ = *s++; + } + + return d; +} diff --git a/arch/sh64/lib/old-checksum.c b/arch/sh64/lib/old-checksum.c new file mode 100644 index 000000000..df741335d --- /dev/null +++ b/arch/sh64/lib/old-checksum.c @@ -0,0 +1,17 @@ +/* + * FIXME: old compatibility stuff, will be removed soon. + */ + +#include + +unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum) +{ + int src_err=0, dst_err=0; + + sum = csum_partial_copy_generic ( src, dst, len, sum, &src_err, &dst_err); + + if (src_err || dst_err) + printk("old csum_partial_copy_fromuser(), tell mingo to convert me.\n"); + + return sum; +} diff --git a/arch/sh64/lib/page_clear.S b/arch/sh64/lib/page_clear.S new file mode 100644 index 000000000..2aadd2c0f --- /dev/null +++ b/arch/sh64/lib/page_clear.S @@ -0,0 +1,51 @@ +/* + Copyright 2003 Richard Curnow, SuperH (UK) Ltd. + + This file is subject to the terms and conditions of the GNU General Public + License. See the file "COPYING" in the main directory of this archive + for more details. + + Tight version of memset for the case of just clearing a page. It turns out + that having the alloco's spaced out slightly due to the increment/branch + pair causes them to contend less for access to the cache. Similarly, + keeping the stores apart from the allocos causes less contention. => Do two + separate loops. Do multiple stores per loop to amortise the + increment/branch cost a little. + + Parameters: + r2 : source effective address (start of page) + + Always clears 4096 bytes. + +*/ + + .section .text..SHmedia32,"ax" + .little + + .balign 8 + .global sh64_page_clear +sh64_page_clear: + pta/l 1f, tr1 + pta/l 2f, tr2 + ptabs/l r18, tr0 + + movi 4096, r7 + add r2, r7, r7 + add r2, r63, r6 +1: + alloco r6, 0 + addi r6, 32, r6 + bgt/l r7, r6, tr1 + + add r2, r63, r6 +2: + st.q r6, 0, r63 + st.q r6, 8, r63 + st.q r6, 16, r63 + st.q r6, 24, r63 + addi r6, 32, r6 + bgt/l r7, r6, tr2 + + blink tr0, r63 + + diff --git a/arch/sh64/lib/page_copy.S b/arch/sh64/lib/page_copy.S new file mode 100644 index 000000000..804d2a00d --- /dev/null +++ b/arch/sh64/lib/page_copy.S @@ -0,0 +1,82 @@ +/* + Copyright 2003 Richard Curnow, SuperH (UK) Ltd. + + This file is subject to the terms and conditions of the GNU General Public + License. See the file "COPYING" in the main directory of this archive + for more details. + + Tight version of mempy for the case of just copying a page. + Prefetch strategy empirically optimised against RTL simulations + of SH5-101 cut2 eval chip with Cayman board DDR memory. + + Parameters: + r2 : source effective address (start of page) + r3 : destination effective address (start of page) + + Always copies 4096 bytes. + + Points to review. + * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead. + It seems like the prefetch needs to be at at least 4 lines ahead to get + the data into the cache in time, and the allocos contend with outstanding + prefetches for the same cache set, so it's better to have the numbers + different. + */ + + .section .text..SHmedia32,"ax" + .little + + .balign 8 + .global sh64_page_copy +sh64_page_copy: + + /* Copy 4096 bytes worth of data from r2 to r3. + Do prefetches 4 lines ahead. + Do alloco 2 lines ahead */ + + pta 1f, tr1 + pta 2f, tr2 + pta 3f, tr3 + ptabs r18, tr0 + + ld.q r2, 0x00, r63 + ld.q r2, 0x20, r63 + ld.q r2, 0x40, r63 + ld.q r2, 0x60, r63 + alloco r3, 0x00 + alloco r3, 0x20 + + movi 3968, r6 + add r3, r6, r6 + addi r6, 64, r7 + addi r7, 64, r8 + sub r2, r3, r60 + addi r60, 8, r61 + addi r61, 8, r62 + addi r62, 8, r23 + addi r60, 0x80, r22 + +/* Minimal code size. The extra branches inside the loop don't cost much + because they overlap with the time spent waiting for prefetches to + complete. */ +1: + bge/u r3, r6, tr2 ! skip prefetch for last 4 lines + ldx.q r3, r22, r63 ! prefetch 4 lines hence +2: + bge/u r3, r7, tr3 ! skip alloco for last 2 lines + alloco r3, 0x40 ! alloc destination line 2 lines ahead +3: + ldx.q r3, r60, r36 + ldx.q r3, r61, r37 + ldx.q r3, r62, r38 + ldx.q r3, r23, r39 + st.q r3, 0, r36 + st.q r3, 8, r37 + st.q r3, 16, r38 + st.q r3, 24, r39 + addi r3, 32, r3 + bgt/l r8, r3, tr1 + + blink tr0, r63 ! return + + diff --git a/arch/sh64/lib/panic.c b/arch/sh64/lib/panic.c new file mode 100644 index 000000000..c9eb1cb50 --- /dev/null +++ b/arch/sh64/lib/panic.c @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2003 Richard Curnow, SuperH UK Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include + +/* THIS IS A PHYSICAL ADDRESS */ +#define HDSP2534_ADDR (0x04002100) + +#ifdef CONFIG_SH_CAYMAN + +static void poor_mans_delay(void) +{ + int i; + for (i = 0; i < 2500000; i++) { + } /* poor man's delay */ +} + +static void show_value(unsigned long x) +{ + int i; + unsigned nibble; + for (i = 0; i < 8; i++) { + nibble = ((x >> (i * 4)) & 0xf); + + ctrl_outb(nibble + ((nibble > 9) ? 55 : 48), + HDSP2534_ADDR + 0xe0 + ((7 - i) << 2)); + } +} + +#endif + +void +panic_handler(unsigned long panicPC, unsigned long panicSSR, + unsigned long panicEXPEVT) +{ +#ifdef CONFIG_SH_CAYMAN + while (1) { + /* This piece of code displays the PC on the LED display */ + show_value(panicPC); + poor_mans_delay(); + show_value(panicSSR); + poor_mans_delay(); + show_value(panicEXPEVT); + poor_mans_delay(); + } +#endif + + /* Never return from the panic handler */ + for (;;) ; + +} diff --git a/arch/sh64/lib/udelay.c b/arch/sh64/lib/udelay.c new file mode 100644 index 000000000..dad2f254e --- /dev/null +++ b/arch/sh64/lib/udelay.c @@ -0,0 +1,60 @@ +/* + * arch/sh64/lib/udelay.c + * + * Delay routines, using a pre-computed "loops_per_jiffy" value. + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003, 2004 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include + +extern unsigned long loops_per_jiffy; + +/* + * Use only for very small delays (< 1 msec). + * + * The active part of our cycle counter is only 32-bits wide, and + * we're treating the difference between two marks as signed. On + * a 1GHz box, that's about 2 seconds. + */ + +void __delay(int loops) +{ + long long dummy; + __asm__ __volatile__("gettr tr0, %1\n\t" + "pta $+4, tr0\n\t" + "addi %0, -1, %0\n\t" + "bne %0, r63, tr0\n\t" + "ptabs %1, tr0\n\t":"=r"(loops), + "=r"(dummy) + :"0"(loops)); +} + +void __udelay(unsigned long long usecs, unsigned long lpj) +{ + usecs *= (((unsigned long long) HZ << 32) / 1000000) * lpj; + __delay((long long) usecs >> 32); +} + +void __ndelay(unsigned long long nsecs, unsigned long lpj) +{ + nsecs *= (((unsigned long long) HZ << 32) / 1000000000) * lpj; + __delay((long long) nsecs >> 32); +} + +void udelay(unsigned long usecs) +{ + __udelay(usecs, loops_per_jiffy); +} + +void ndelay(unsigned long nsecs) +{ + __ndelay(nsecs, loops_per_jiffy); +} + diff --git a/arch/sh64/mach-cayman/Makefile b/arch/sh64/mach-cayman/Makefile new file mode 100644 index 000000000..4a48b53fc --- /dev/null +++ b/arch/sh64/mach-cayman/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Hitachi Cayman specific parts of the kernel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +obj-y := setup.o irq.o +obj-$(CONFIG_HEARTBEAT) += led.o + diff --git a/arch/sh64/mach-cayman/irq.c b/arch/sh64/mach-cayman/irq.c new file mode 100644 index 000000000..4de91077d --- /dev/null +++ b/arch/sh64/mach-cayman/irq.c @@ -0,0 +1,196 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/irq_cayman.c + * + * SH-5 Cayman Interrupt Support + * + * This file handles the board specific parts of the Cayman interrupt system + * + * Copyright (C) 2002 Stuart Menefy + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +unsigned long epld_virt; + +#define EPLD_BASE 0x04002000 +#define EPLD_STATUS_BASE (epld_virt + 0x10) +#define EPLD_MASK_BASE (epld_virt + 0x20) + +/* Note the SMSC SuperIO chip and SMSC LAN chip interrupts are all muxed onto + the same SH-5 interrupt */ + +static irqreturn_t cayman_interrupt_smsc(int irq, void *dev_id, struct pt_regs *regs) +{ + printk(KERN_INFO "CAYMAN: spurious SMSC interrupt\n"); + return IRQ_NONE; +} + +static irqreturn_t cayman_interrupt_pci2(int irq, void *dev_id, struct pt_regs *regs) +{ + printk(KERN_INFO "CAYMAN: spurious PCI interrupt, IRQ %d\n", irq); + return IRQ_NONE; +} + +static struct irqaction cayman_action_smsc = { + .name = "Cayman SMSC Mux", + .handler = cayman_interrupt_smsc, + .flags = SA_INTERRUPT, +}; + +static struct irqaction cayman_action_pci2 = { + .name = "Cayman PCI2 Mux", + .handler = cayman_interrupt_pci2, + .flags = SA_INTERRUPT, +}; + +static void enable_cayman_irq(unsigned int irq) +{ + unsigned long flags; + unsigned long mask; + unsigned int reg; + unsigned char bit; + + irq -= START_EXT_IRQS; + reg = EPLD_MASK_BASE + ((irq / 8) << 2); + bit = 1<<(irq % 8); + save_and_cli(flags); + mask = ctrl_inl(reg); + mask |= bit; + ctrl_outl(mask, reg); + restore_flags(flags); +} + +void disable_cayman_irq(unsigned int irq) +{ + unsigned long flags; + unsigned long mask; + unsigned int reg; + unsigned char bit; + + irq -= START_EXT_IRQS; + reg = EPLD_MASK_BASE + ((irq / 8) << 2); + bit = 1<<(irq % 8); + save_and_cli(flags); + mask = ctrl_inl(reg); + mask &= ~bit; + ctrl_outl(mask, reg); + restore_flags(flags); +} + +static void ack_cayman_irq(unsigned int irq) +{ + disable_cayman_irq(irq); +} + +static void end_cayman_irq(unsigned int irq) +{ + if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) + enable_cayman_irq(irq); +} + +static unsigned int startup_cayman_irq(unsigned int irq) +{ + enable_cayman_irq(irq); + return 0; /* never anything pending */ +} + +static void shutdown_cayman_irq(unsigned int irq) +{ + disable_cayman_irq(irq); +} + +struct hw_interrupt_type cayman_irq_type = { + .typename = "Cayman-IRQ", + .startup = startup_cayman_irq, + .shutdown = shutdown_cayman_irq, + .enable = enable_cayman_irq, + .disable = disable_cayman_irq, + .ack = ack_cayman_irq, + .end = end_cayman_irq, +}; + +int cayman_irq_demux(int evt) +{ + int irq = intc_evt_to_irq[evt]; + + if (irq == SMSC_IRQ) { + unsigned long status; + int i; + + status = ctrl_inl(EPLD_STATUS_BASE) & + ctrl_inl(EPLD_MASK_BASE) & 0xff; + if (status == 0) { + irq = -1; + } else { + for (i=0; i<8; i++) { + if (status & (1<= NR_INTC_IRQS + 24) && (irq < NR_INTC_IRQS + 32)) { + return sprintf(p, "(PCI2 %d)", irq - (NR_INTC_IRQS + 24)); + } + + return 0; +} +#endif + +void init_cayman_irq(void) +{ + int i; + + epld_virt = onchip_remap(EPLD_BASE, 1024, "EPLD"); + if (!epld_virt) { + printk(KERN_ERR "Cayman IRQ: Unable to remap EPLD\n"); + return; + } + + for (i=0; i + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Flash the LEDs + */ +#include + +/* +** It is supposed these functions to be used for a low level +** debugging (via Cayman LEDs), hence to be available as soon +** as possible. +** Unfortunately Cayman LEDs relies on Cayman EPLD to be mapped +** (this happen when IRQ are initialized... quite late). +** These triky dependencies should be removed. Temporary, it +** may be enough to NOP until EPLD is mapped. +*/ + +extern unsigned long epld_virt; + +#define LED_ADDR (epld_virt + 0x008) +#define HDSP2534_ADDR (epld_virt + 0x100) + +void mach_led(int position, int value) +{ + if (!epld_virt) + return; + + if (value) + ctrl_outl(0, LED_ADDR); + else + ctrl_outl(1, LED_ADDR); + +} + +void mach_alphanum(int position, unsigned char value) +{ + if (!epld_virt) + return; + + ctrl_outb(value, HDSP2534_ADDR + 0xe0 + (position << 2)); +} + +void mach_alphanum_brightness(int setting) +{ + ctrl_outb(setting & 7, HDSP2534_ADDR + 0xc0); +} diff --git a/arch/sh64/mach-cayman/setup.c b/arch/sh64/mach-cayman/setup.c new file mode 100644 index 000000000..53dfd6099 --- /dev/null +++ b/arch/sh64/mach-cayman/setup.c @@ -0,0 +1,209 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mach-cayman/setup.c + * + * SH5 Cayman support + * + * This file handles the architecture-dependent parts of initialization + * + * Copyright David J. Mckay. + * Needs major work! + * + * benedict.gaster@superh.com: 3rd May 2002 + * Added support for ramdisk, removing statically linked romfs at the same time. + * + * lethal@linux-sh.org: 15th May 2003 + * Use the generic procfs cpuinfo interface, just return a valid board name. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RES_COUNT(res) ((sizeof((res))/sizeof(struct resource))) + +/* + * Platform Dependent Interrupt Priorities. + */ + +/* Using defaults defined in irq.h */ +#define RES NO_PRIORITY /* Disabled */ +#define IR0 IRL0_PRIORITY /* IRLs */ +#define IR1 IRL1_PRIORITY +#define IR2 IRL2_PRIORITY +#define IR3 IRL3_PRIORITY +#define PCA INTA_PRIORITY /* PCI Ints */ +#define PCB INTB_PRIORITY +#define PCC INTC_PRIORITY +#define PCD INTD_PRIORITY +#define SER TOP_PRIORITY +#define ERR TOP_PRIORITY +#define PW0 TOP_PRIORITY +#define PW1 TOP_PRIORITY +#define PW2 TOP_PRIORITY +#define PW3 TOP_PRIORITY +#define DM0 NO_PRIORITY /* DMA Ints */ +#define DM1 NO_PRIORITY +#define DM2 NO_PRIORITY +#define DM3 NO_PRIORITY +#define DAE NO_PRIORITY +#define TU0 TIMER_PRIORITY /* TMU Ints */ +#define TU1 NO_PRIORITY +#define TU2 NO_PRIORITY +#define TI2 NO_PRIORITY +#define ATI NO_PRIORITY /* RTC Ints */ +#define PRI NO_PRIORITY +#define CUI RTC_PRIORITY +#define ERI SCIF_PRIORITY /* SCIF Ints */ +#define RXI SCIF_PRIORITY +#define BRI SCIF_PRIORITY +#define TXI SCIF_PRIORITY +#define ITI TOP_PRIORITY /* WDT Ints */ + +/* Setup for the SMSC FDC37C935 */ +#define SMSC_SUPERIO_BASE 0x04000000 +#define SMSC_CONFIG_PORT_ADDR 0x3f0 +#define SMSC_INDEX_PORT_ADDR SMSC_CONFIG_PORT_ADDR +#define SMSC_DATA_PORT_ADDR 0x3f1 + +#define SMSC_ENTER_CONFIG_KEY 0x55 +#define SMSC_EXIT_CONFIG_KEY 0xaa + +#define SMCS_LOGICAL_DEV_INDEX 0x07 +#define SMSC_DEVICE_ID_INDEX 0x20 +#define SMSC_DEVICE_REV_INDEX 0x21 +#define SMSC_ACTIVATE_INDEX 0x30 +#define SMSC_PRIMARY_INT_INDEX 0x70 +#define SMSC_SECONDARY_INT_INDEX 0x72 + +#define SMSC_KEYBOARD_DEVICE 7 + +#define SMSC_SUPERIO_READ_INDEXED(index) ({ \ + outb((index), SMSC_INDEX_PORT_ADDR); \ + inb(SMSC_DATA_PORT_ADDR); }) +#define SMSC_SUPERIO_WRITE_INDEXED(val, index) ({ \ + outb((index), SMSC_INDEX_PORT_ADDR); \ + outb((val), SMSC_DATA_PORT_ADDR); }) + +unsigned long smsc_superio_virt; + +/* + * Platform dependent structures: maps and parms block. + */ +struct resource io_resources[] = { + /* To be updated with external devices */ +}; + +struct resource kram_resources[] = { + { "Kernel code", 0, 0 }, /* These must be last in the array */ + { "Kernel data", 0, 0 } /* These must be last in the array */ +}; + +struct resource xram_resources[] = { + /* To be updated with external devices */ +}; + +struct resource rom_resources[] = { + /* To be updated with external devices */ +}; + +struct sh64_platform platform_parms = { + .readonly_rootfs = 1, + .initial_root_dev = 0x0100, + .loader_type = 1, + .io_res_p = io_resources, + .io_res_count = RES_COUNT(io_resources), + .kram_res_p = kram_resources, + .kram_res_count = RES_COUNT(kram_resources), + .xram_res_p = xram_resources, + .xram_res_count = RES_COUNT(xram_resources), + .rom_res_p = rom_resources, + .rom_res_count = RES_COUNT(rom_resources), +}; + +int platform_int_priority[NR_INTC_IRQS] = { + IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD, /* IRQ 0- 7 */ + RES, RES, RES, RES, SER, ERR, PW3, PW2, /* IRQ 8-15 */ + PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES, /* IRQ 16-23 */ + RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 24-31 */ + TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI, /* IRQ 32-39 */ + RXI, BRI, TXI, RES, RES, RES, RES, RES, /* IRQ 40-47 */ + RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 48-55 */ + RES, RES, RES, RES, RES, RES, RES, ITI, /* IRQ 56-63 */ +}; + +static int __init smsc_superio_setup(void) +{ + unsigned char devid, devrev; + + smsc_superio_virt = onchip_remap(SMSC_SUPERIO_BASE, 1024, "SMSC SuperIO"); + if (!smsc_superio_virt) { + panic("Unable to remap SMSC SuperIO\n"); + } + + /* Initially the chip is in run state */ + /* Put it into configuration state */ + outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); + outb(SMSC_ENTER_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); + + /* Read device ID info */ + devid = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_ID_INDEX); + devrev = SMSC_SUPERIO_READ_INDEXED(SMSC_DEVICE_REV_INDEX); + printk("SMSC SuperIO devid %02x rev %02x\n", devid, devrev); + + /* Select the keyboard device */ + SMSC_SUPERIO_WRITE_INDEXED(SMSC_KEYBOARD_DEVICE, SMCS_LOGICAL_DEV_INDEX); + + /* enable it */ + SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_ACTIVATE_INDEX); + + /* Select the interrupts */ + /* On a PC keyboard is IRQ1, mouse is IRQ12 */ + SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX); + SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX); + + /* Exit the configuraton state */ + outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); + + return 0; +} + +/* This is grotty, but, because kernel is always referenced on the link line + * before any devices, this is safe. + */ +__initcall(smsc_superio_setup); + +void __init platform_setup(void) +{ + /* Cayman platform leaves the decision to head.S, for now */ + platform_parms.fpu_flags = fpu_in_use; +} + +void __init platform_monitor(void) +{ + /* Nothing yet .. */ +} + +void __init platform_reserve(void) +{ + /* Nothing yet .. */ +} + +const char *get_system_type(void) +{ + return "Hitachi Cayman"; +} + diff --git a/arch/sh64/mach-harp/Makefile b/arch/sh64/mach-harp/Makefile new file mode 100644 index 000000000..63f065bad --- /dev/null +++ b/arch/sh64/mach-harp/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the ST50 Harp specific parts of the kernel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +O_TARGET := harp.o + +obj-y := setup.o + +include $(TOPDIR)/Rules.make + diff --git a/arch/sh64/mach-harp/setup.c b/arch/sh64/mach-harp/setup.c new file mode 100644 index 000000000..3938a65c4 --- /dev/null +++ b/arch/sh64/mach-harp/setup.c @@ -0,0 +1,139 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mach-harp/setup.c + * + * SH-5 Simulator Platform Support + * + * This file handles the architecture-dependent parts of initialization + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * benedict.gaster@superh.com: 3rd May 2002 + * Added support for ramdisk, removing statically linked romfs at the same time. * + * + * lethal@linux-sh.org: 15th May 2003 + * Use the generic procfs cpuinfo interface, just return a valid board name. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RES_COUNT(res) ((sizeof((res))/sizeof(struct resource))) + +/* + * Platform Dependent Interrupt Priorities. + */ + +/* Using defaults defined in irq.h */ +#define RES NO_PRIORITY /* Disabled */ +#define IR0 IRL0_PRIORITY /* IRLs */ +#define IR1 IRL1_PRIORITY +#define IR2 IRL2_PRIORITY +#define IR3 IRL3_PRIORITY +#define PCA INTA_PRIORITY /* PCI Ints */ +#define PCB INTB_PRIORITY +#define PCC INTC_PRIORITY +#define PCD INTD_PRIORITY +#define SER TOP_PRIORITY +#define ERR TOP_PRIORITY +#define PW0 TOP_PRIORITY +#define PW1 TOP_PRIORITY +#define PW2 TOP_PRIORITY +#define PW3 TOP_PRIORITY +#define DM0 NO_PRIORITY /* DMA Ints */ +#define DM1 NO_PRIORITY +#define DM2 NO_PRIORITY +#define DM3 NO_PRIORITY +#define DAE NO_PRIORITY +#define TU0 TIMER_PRIORITY /* TMU Ints */ +#define TU1 NO_PRIORITY +#define TU2 NO_PRIORITY +#define TI2 NO_PRIORITY +#define ATI NO_PRIORITY /* RTC Ints */ +#define PRI NO_PRIORITY +#define CUI RTC_PRIORITY +#define ERI SCIF_PRIORITY /* SCIF Ints */ +#define RXI SCIF_PRIORITY +#define BRI SCIF_PRIORITY +#define TXI SCIF_PRIORITY +#define ITI TOP_PRIORITY /* WDT Ints */ + +/* + * Platform dependent structures: maps and parms block. + */ +struct resource io_resources[] = { + /* To be updated with external devices */ +}; + +struct resource kram_resources[] = { + { "Kernel code", 0, 0 }, /* These must be last in the array */ + { "Kernel data", 0, 0 } /* These must be last in the array */ +}; + +struct resource xram_resources[] = { + /* To be updated with external devices */ +}; + +struct resource rom_resources[] = { + /* To be updated with external devices */ +}; + +struct sh64_platform platform_parms = { + .readonly_rootfs = 1, + .initial_root_dev = 0x0100, + .loader_type = 1, + .io_res_p = io_resources, + .io_res_count = RES_COUNT(io_resources), + .kram_res_p = kram_resources, + .kram_res_count = RES_COUNT(kram_resources), + .xram_res_p = xram_resources, + .xram_res_count = RES_COUNT(xram_resources), + .rom_res_p = rom_resources, + .rom_res_count = RES_COUNT(rom_resources), +}; + +int platform_int_priority[NR_INTC_IRQS] = { + IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD, /* IRQ 0- 7 */ + RES, RES, RES, RES, SER, ERR, PW3, PW2, /* IRQ 8-15 */ + PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES, /* IRQ 16-23 */ + RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 24-31 */ + TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI, /* IRQ 32-39 */ + RXI, BRI, TXI, RES, RES, RES, RES, RES, /* IRQ 40-47 */ + RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 48-55 */ + RES, RES, RES, RES, RES, RES, RES, ITI, /* IRQ 56-63 */ +}; + +void __init platform_setup(void) +{ + /* Harp platform leaves the decision to head.S, for now */ + platform_parms.fpu_flags = fpu_in_use; +} + +void __init platform_monitor(void) +{ + /* Nothing yet .. */ +} + +void __init platform_reserve(void) +{ + /* Nothing yet .. */ +} + +const char *get_system_type(void) +{ + return "ST50 Harp"; +} + diff --git a/arch/sh64/mach-romram/Makefile b/arch/sh64/mach-romram/Makefile new file mode 100644 index 000000000..02d05c05a --- /dev/null +++ b/arch/sh64/mach-romram/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the SH-5 ROM/RAM specific parts of the kernel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +O_TARGET := romram.o + +obj-y := setup.o + +include $(TOPDIR)/Rules.make + diff --git a/arch/sh64/mach-romram/setup.c b/arch/sh64/mach-romram/setup.c new file mode 100644 index 000000000..a9ba03fc5 --- /dev/null +++ b/arch/sh64/mach-romram/setup.c @@ -0,0 +1,142 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mach-romram/setup.c + * + * SH-5 ROM/RAM Platform Support + * + * This file handles the architecture-dependent parts of initialization + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * benedict.gaster@superh.com: 3rd May 2002 + * Added support for ramdisk, removing statically linked romfs at the same time. * + * + * lethal@linux-sh.org: 15th May 2003 + * Use the generic procfs cpuinfo interface, just return a valid board name. + * + * Sean.McGoogan@superh.com 17th Feb 2004 + * copied from arch/sh64/mach-harp/setup.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RES_COUNT(res) ((sizeof((res))/sizeof(struct resource))) + +/* + * Platform Dependent Interrupt Priorities. + */ + +/* Using defaults defined in irq.h */ +#define RES NO_PRIORITY /* Disabled */ +#define IR0 IRL0_PRIORITY /* IRLs */ +#define IR1 IRL1_PRIORITY +#define IR2 IRL2_PRIORITY +#define IR3 IRL3_PRIORITY +#define PCA INTA_PRIORITY /* PCI Ints */ +#define PCB INTB_PRIORITY +#define PCC INTC_PRIORITY +#define PCD INTD_PRIORITY +#define SER TOP_PRIORITY +#define ERR TOP_PRIORITY +#define PW0 TOP_PRIORITY +#define PW1 TOP_PRIORITY +#define PW2 TOP_PRIORITY +#define PW3 TOP_PRIORITY +#define DM0 NO_PRIORITY /* DMA Ints */ +#define DM1 NO_PRIORITY +#define DM2 NO_PRIORITY +#define DM3 NO_PRIORITY +#define DAE NO_PRIORITY +#define TU0 TIMER_PRIORITY /* TMU Ints */ +#define TU1 NO_PRIORITY +#define TU2 NO_PRIORITY +#define TI2 NO_PRIORITY +#define ATI NO_PRIORITY /* RTC Ints */ +#define PRI NO_PRIORITY +#define CUI RTC_PRIORITY +#define ERI SCIF_PRIORITY /* SCIF Ints */ +#define RXI SCIF_PRIORITY +#define BRI SCIF_PRIORITY +#define TXI SCIF_PRIORITY +#define ITI TOP_PRIORITY /* WDT Ints */ + +/* + * Platform dependent structures: maps and parms block. + */ +struct resource io_resources[] = { + /* To be updated with external devices */ +}; + +struct resource kram_resources[] = { + { "Kernel code", 0, 0 }, /* These must be last in the array */ + { "Kernel data", 0, 0 } /* These must be last in the array */ +}; + +struct resource xram_resources[] = { + /* To be updated with external devices */ +}; + +struct resource rom_resources[] = { + /* To be updated with external devices */ +}; + +struct sh64_platform platform_parms = { + .readonly_rootfs = 1, + .initial_root_dev = 0x0100, + .loader_type = 1, + .io_res_p = io_resources, + .io_res_count = RES_COUNT(io_resources), + .kram_res_p = kram_resources, + .kram_res_count = RES_COUNT(kram_resources), + .xram_res_p = xram_resources, + .xram_res_count = RES_COUNT(xram_resources), + .rom_res_p = rom_resources, + .rom_res_count = RES_COUNT(rom_resources), +}; + +int platform_int_priority[NR_INTC_IRQS] = { + IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD, /* IRQ 0- 7 */ + RES, RES, RES, RES, SER, ERR, PW3, PW2, /* IRQ 8-15 */ + PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES, /* IRQ 16-23 */ + RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 24-31 */ + TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI, /* IRQ 32-39 */ + RXI, BRI, TXI, RES, RES, RES, RES, RES, /* IRQ 40-47 */ + RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 48-55 */ + RES, RES, RES, RES, RES, RES, RES, ITI, /* IRQ 56-63 */ +}; + +void __init platform_setup(void) +{ + /* ROM/RAM platform leaves the decision to head.S, for now */ + platform_parms.fpu_flags = fpu_in_use; +} + +void __init platform_monitor(void) +{ + /* Nothing yet .. */ +} + +void __init platform_reserve(void) +{ + /* Nothing yet .. */ +} + +const char *get_system_type(void) +{ + return "ROM/RAM"; +} + diff --git a/arch/sh64/mach-sim/Makefile b/arch/sh64/mach-sim/Makefile new file mode 100644 index 000000000..819c4078f --- /dev/null +++ b/arch/sh64/mach-sim/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the SH-5 Simulator specific parts of the kernel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +O_TARGET := sim.o + +obj-y := setup.o + +include $(TOPDIR)/Rules.make + diff --git a/arch/sh64/mach-sim/setup.c b/arch/sh64/mach-sim/setup.c new file mode 100644 index 000000000..a68639cb4 --- /dev/null +++ b/arch/sh64/mach-sim/setup.c @@ -0,0 +1,164 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mach-sim/setup.c + * + * ST50 Simulator Platform Support + * + * This file handles the architecture-dependent parts of initialization + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * lethal@linux-sh.org: 15th May 2003 + * Use the generic procfs cpuinfo interface, just return a valid board name. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_BLK_DEV_INITRD +#include "../rootfs/rootfs.h" +#endif + +static __init void platform_monitor(void); +static __init void platform_setup(void); +static __init void platform_reserve(void); + + +#define PHYS_MEMORY CONFIG_MEMORY_SIZE_IN_MB*1024*1024 + +#if (PHYS_MEMORY < P1SEG_FOOTPRINT_RAM) +#error "Invalid kernel configuration. Physical memory below footprint requirements." +#endif + +#define RAM_DISK_START CONFIG_MEMORY_START+P1SEG_INITRD_BLOCK /* Top of 4MB */ +#ifdef PLATFORM_ROMFS_SIZE +#define RAM_DISK_SIZE (PAGE_ALIGN(PLATFORM_ROMFS_SIZE)) /* Variable Top */ +#if ((RAM_DISK_START + RAM_DISK_SIZE) > (CONFIG_MEMORY_START + PHYS_MEMORY)) +#error "Invalid kernel configuration. ROM RootFS exceeding physical memory." +#endif +#else +#define RAM_DISK_SIZE P1SEG_INITRD_BLOCK_SIZE /* Top of 4MB */ +#endif + +#define RES_COUNT(res) ((sizeof((res))/sizeof(struct resource))) + +/* + * Platform Dependent Interrupt Priorities. + */ + +/* Using defaults defined in irq.h */ +#define RES NO_PRIORITY /* Disabled */ +#define IR0 IRL0_PRIORITY /* IRLs */ +#define IR1 IRL1_PRIORITY +#define IR2 IRL2_PRIORITY +#define IR3 IRL3_PRIORITY +#define PCA INTA_PRIORITY /* PCI Ints */ +#define PCB INTB_PRIORITY +#define PCC INTC_PRIORITY +#define PCD INTD_PRIORITY +#define SER TOP_PRIORITY +#define ERR TOP_PRIORITY +#define PW0 TOP_PRIORITY +#define PW1 TOP_PRIORITY +#define PW2 TOP_PRIORITY +#define PW3 TOP_PRIORITY +#define DM0 NO_PRIORITY /* DMA Ints */ +#define DM1 NO_PRIORITY +#define DM2 NO_PRIORITY +#define DM3 NO_PRIORITY +#define DAE NO_PRIORITY +#define TU0 TIMER_PRIORITY /* TMU Ints */ +#define TU1 NO_PRIORITY +#define TU2 NO_PRIORITY +#define TI2 NO_PRIORITY +#define ATI NO_PRIORITY /* RTC Ints */ +#define PRI NO_PRIORITY +#define CUI RTC_PRIORITY +#define ERI SCIF_PRIORITY /* SCIF Ints */ +#define RXI SCIF_PRIORITY +#define BRI SCIF_PRIORITY +#define TXI SCIF_PRIORITY +#define ITI TOP_PRIORITY /* WDT Ints */ + +/* + * Platform dependent structures: maps and parms block. + */ +struct resource io_resources[] = { + /* Nothing yet .. */ +}; + +struct resource kram_resources[] = { + { "Kernel code", 0, 0 }, /* These must be last in the array */ + { "Kernel data", 0, 0 } /* These must be last in the array */ +}; + +struct resource xram_resources[] = { + /* Nothing yet .. */ +}; + +struct resource rom_resources[] = { + /* Nothing yet .. */ +}; + +struct sh64_platform platform_parms = { + .readonly_rootfs = 1, + .initial_root_dev = 0x0100, + .loader_type = 1, + .initrd_start = RAM_DISK_START, + .initrd_size = RAM_DISK_SIZE, + .io_res_p = io_resources, + .io_res_count = RES_COUNT(io_resources), + .kram_res_p = kram_resources, + .kram_res_count = RES_COUNT(kram_resources), + .xram_res_p = xram_resources, + .xram_res_count = RES_COUNT(xram_resources), + .rom_res_p = rom_resources, + .rom_res_count = RES_COUNT(rom_resources), +}; + +int platform_int_priority[NR_IRQS] = { + IR0, IR1, IR2, IR3, PCA, PCB, PCC, PCD, /* IRQ 0- 7 */ + RES, RES, RES, RES, SER, ERR, PW3, PW2, /* IRQ 8-15 */ + PW1, PW0, DM0, DM1, DM2, DM3, DAE, RES, /* IRQ 16-23 */ + RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 24-31 */ + TU0, TU1, TU2, TI2, ATI, PRI, CUI, ERI, /* IRQ 32-39 */ + RXI, BRI, TXI, RES, RES, RES, RES, RES, /* IRQ 40-47 */ + RES, RES, RES, RES, RES, RES, RES, RES, /* IRQ 48-55 */ + RES, RES, RES, RES, RES, RES, RES, ITI, /* IRQ 56-63 */ +}; + +void __init platform_setup(void) +{ + /* Simulator platform leaves the decision to head.S */ + platform_parms.fpu_flags = fpu_in_use; +} + +void __init platform_monitor(void) +{ + /* Nothing yet .. */ +} + +void __init platform_reserve(void) +{ + /* Nothing yet .. */ +} + +const char *get_system_type(void) +{ + return "SH-5 Simulator"; +} + diff --git a/arch/sh64/mm/Makefile b/arch/sh64/mm/Makefile new file mode 100644 index 000000000..ff19378ac --- /dev/null +++ b/arch/sh64/mm/Makefile @@ -0,0 +1,44 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2000, 2001 Paolo Alberelli +# Copyright (C) 2003, 2004 Paul Mundt +# +# Makefile for the sh64-specific parts of the Linux memory manager. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +obj-y := init.o fault.o ioremap.o extable.o cache.o tlbmiss.o tlb.o + +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o + +# Special flags for tlbmiss.o. This puts restrictions on the number of +# caller-save registers that the compiler can target when building this file. +# This is required because the code is called from a context in entry.S where +# very few registers have been saved in the exception handler (for speed +# reasons). +# The caller save registers that have been saved and which can be used are +# r2,r3,r4,r5 : argument passing +# r15, r18 : SP and LINK +# tr0-4 : allow all caller-save TR's. The compiler seems to be able to make +# use of them, so it's probably beneficial to performance to save them +# and have them available for it. +# +# The resources not listed below are callee save, i.e. the compiler is free to +# use any of them and will spill them to the stack itself. + +CFLAGS_tlbmiss.o += -ffixed-r7 \ + -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \ + -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \ + -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \ + -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \ + -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \ + -ffixed-r41 -ffixed-r42 -ffixed-r43 \ + -ffixed-r60 -ffixed-r61 -ffixed-r62 \ + -fomit-frame-pointer + diff --git a/arch/sh64/mm/cache.c b/arch/sh64/mm/cache.c new file mode 100644 index 000000000..56fbbff5c --- /dev/null +++ b/arch/sh64/mm/cache.c @@ -0,0 +1,1055 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mm/cache.c + * + * Original version Copyright (C) 2000, 2001 Paolo Alberelli + * Second version Copyright (C) benedict.gaster@superh.com 2002 + * Third version Copyright Richard.Curnow@superh.com 2003 + * Hacks to third version Copyright (C) 2003 Paul Mundt + */ + +/****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for flush_itlb_range */ + +#include + +/* This function is in entry.S */ +extern unsigned long switch_and_save_asid(unsigned long new_asid); + +/* Wired TLB entry for the D-cache */ +static unsigned long long dtlb_cache_slot; + +/** + * sh64_cache_init() + * + * This is pretty much just a straightforward clone of the SH + * detect_cpu_and_cache_system(). + * + * This function is responsible for setting up all of the cache + * info dynamically as well as taking care of CPU probing and + * setting up the relevant subtype data. + * + * FIXME: For the time being, we only really support the SH5-101 + * out of the box, and don't support dynamic probing for things + * like the SH5-103 or even cut2 of the SH5-101. Implement this + * later! + */ +int __init sh64_cache_init(void) +{ + /* + * First, setup some sane values for the I-cache. + */ + cpu_data->icache.ways = 4; + cpu_data->icache.sets = 256; + cpu_data->icache.linesz = L1_CACHE_BYTES; + + /* + * FIXME: This can probably be cleaned up a bit as well.. for example, + * do we really need the way shift _and_ the way_step_shift ?? Judging + * by the existing code, I would guess no.. is there any valid reason + * why we need to be tracking this around? + */ + cpu_data->icache.way_shift = 13; + cpu_data->icache.entry_shift = 5; + cpu_data->icache.set_shift = 4; + cpu_data->icache.way_step_shift = 16; + cpu_data->icache.asid_shift = 2; + + /* + * way offset = cache size / associativity, so just don't factor in + * associativity in the first place.. + */ + cpu_data->icache.way_ofs = cpu_data->icache.sets * + cpu_data->icache.linesz; + + cpu_data->icache.asid_mask = 0x3fc; + cpu_data->icache.idx_mask = 0x1fe0; + cpu_data->icache.epn_mask = 0xffffe000; + cpu_data->icache.flags = 0; + + /* + * Next, setup some sane values for the D-cache. + * + * On the SH5, these are pretty consistent with the I-cache settings, + * so we just copy over the existing definitions.. these can be fixed + * up later, especially if we add runtime CPU probing. + * + * Though in the meantime it saves us from having to duplicate all of + * the above definitions.. + */ + cpu_data->dcache = cpu_data->icache; + + /* + * Setup any cache-related flags here + */ +#if defined(CONFIG_DCACHE_WRITE_THROUGH) + set_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)); +#elif defined(CONFIG_DCACHE_WRITE_BACK) + set_bit(SH_CACHE_MODE_WB, &(cpu_data->dcache.flags)); +#endif + + /* + * We also need to reserve a slot for the D-cache in the DTLB, so we + * do this now .. + */ + dtlb_cache_slot = sh64_get_wired_dtlb_entry(); + + return 0; +} + +/*##########################################################################*/ + +/* From here onwards, a rewrite of the implementation, + by Richard.Curnow@superh.com. + + The major changes in this compared to the old version are; + 1. use more selective purging through OCBP instead of using ALLOCO to purge + by natural replacement. This avoids purging out unrelated cache lines + that happen to be in the same set. + 2. exploit the APIs copy_user_page and clear_user_page better + 3. be more selective about I-cache purging, in particular use invalidate_all + more sparingly. + + */ + +/*########################################################################## + SUPPORT FUNCTIONS + ##########################################################################*/ + +/****************************************************************************/ +/* The following group of functions deal with mapping and unmapping a temporary + page into the DTLB slot that have been set aside for our exclusive use. */ +/* In order to accomplish this, we use the generic interface for adding and + removing a wired slot entry as defined in arch/sh64/mm/tlb.c */ +/****************************************************************************/ + +static unsigned long slot_own_flags; + +static inline void sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, unsigned long paddr) +{ + local_irq_save(slot_own_flags); + sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); +} + +static inline void sh64_teardown_dtlb_cache_slot(void) +{ + sh64_teardown_tlb_slot(dtlb_cache_slot); + local_irq_restore(slot_own_flags); +} + +/****************************************************************************/ + +#ifndef CONFIG_ICACHE_DISABLED + +static void __inline__ sh64_icache_inv_all(void) +{ + unsigned long long addr, flag, data; + unsigned int flags; + + addr=ICCR0; + flag=ICCR0_ICI; + data=0; + + /* Make this a critical section for safety (probably not strictly necessary.) */ + local_irq_save(flags); + + /* Without %1 it gets unexplicably wrong */ + asm volatile("getcfg %3, 0, %0\n\t" + "or %0, %2, %0\n\t" + "putcfg %3, 0, %0\n\t" + "synci" + : "=&r" (data) + : "0" (data), "r" (flag), "r" (addr)); + + local_irq_restore(flags); +} + +static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) +{ + /* Invalidate range of addresses [start,end] from the I-cache, where + * the addresses lie in the kernel superpage. */ + + unsigned long long ullend, addr, aligned_start; +#if (NEFF == 32) + aligned_start = (unsigned long long)(signed long long)(signed long) start; +#else +#error "NEFF != 32" +#endif + aligned_start &= L1_CACHE_ALIGN_MASK; + addr = aligned_start; +#if (NEFF == 32) + ullend = (unsigned long long) (signed long long) (signed long) end; +#else +#error "NEFF != 32" +#endif + while (addr <= ullend) { + asm __volatile__ ("icbi %0, 0" : : "r" (addr)); + addr += L1_CACHE_BYTES; + } +} + +static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) +{ + /* If we get called, we know that vma->vm_flags contains VM_EXEC. + Also, eaddr is page-aligned. */ + + unsigned long long addr, end_addr; + unsigned long flags = 0; + unsigned long running_asid, vma_asid; + addr = eaddr; + end_addr = addr + PAGE_SIZE; + + /* Check whether we can use the current ASID for the I-cache + invalidation. For example, if we're called via + access_process_vm->flush_cache_page->here, (e.g. when reading from + /proc), 'running_asid' will be that of the reader, not of the + victim. + + Also, note the risk that we might get pre-empted between the ASID + compare and blocking IRQs, and before we regain control, the + pid->ASID mapping changes. However, the whole cache will get + invalidated when the mapping is renewed, so the worst that can + happen is that the loop below ends up invalidating somebody else's + cache entries. + */ + + running_asid = get_asid(); + vma_asid = (vma->vm_mm->context & MMU_CONTEXT_ASID_MASK); + if (running_asid != vma_asid) { + local_irq_save(flags); + switch_and_save_asid(vma_asid); + } + while (addr < end_addr) { + /* Worth unrolling a little */ + asm __volatile__("icbi %0, 0" : : "r" (addr)); + asm __volatile__("icbi %0, 32" : : "r" (addr)); + asm __volatile__("icbi %0, 64" : : "r" (addr)); + asm __volatile__("icbi %0, 96" : : "r" (addr)); + addr += 128; + } + if (running_asid != vma_asid) { + switch_and_save_asid(running_asid); + local_irq_restore(flags); + } +} + +/****************************************************************************/ + +static void sh64_icache_inv_user_page_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + /* Used for invalidating big chunks of I-cache, i.e. assume the range + is whole pages. If 'start' or 'end' is not page aligned, the code + is conservative and invalidates to the ends of the enclosing pages. + This is functionally OK, just a performance loss. */ + + /* See the comments below in sh64_dcache_purge_user_range() regarding + the choice of algorithm. However, for the I-cache option (2) isn't + available because there are no physical tags so aliases can't be + resolved. The icbi instruction has to be used through the user + mapping. Because icbi is cheaper than ocbp on a cache hit, it + would be cheaper to use the selective code for a large range than is + possible with the D-cache. Just assume 64 for now as a working + figure. + */ + + int n_pages; + + if (!mm) return; + + n_pages = ((end - start) >> PAGE_SHIFT); + if (n_pages >= 64) { + sh64_icache_inv_all(); + } else { + unsigned long aligned_start; + unsigned long eaddr; + unsigned long after_last_page_start; + unsigned long mm_asid, current_asid; + unsigned long long flags = 0ULL; + + mm_asid = mm->context & MMU_CONTEXT_ASID_MASK; + current_asid = get_asid(); + + if (mm_asid != current_asid) { + /* Switch ASID and run the invalidate loop under cli */ + local_irq_save(flags); + switch_and_save_asid(mm_asid); + } + + aligned_start = start & PAGE_MASK; + after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); + + while (aligned_start < after_last_page_start) { + struct vm_area_struct *vma; + unsigned long vma_end; + vma = find_vma(mm, aligned_start); + if (!vma || (aligned_start <= vma->vm_end)) { + /* Avoid getting stuck in an error condition */ + aligned_start += PAGE_SIZE; + continue; + } + vma_end = vma->vm_end; + if (vma->vm_flags & VM_EXEC) { + /* Executable */ + eaddr = aligned_start; + while (eaddr < vma_end) { + sh64_icache_inv_user_page(vma, eaddr); + eaddr += PAGE_SIZE; + } + } + aligned_start = vma->vm_end; /* Skip to start of next region */ + } + if (mm_asid != current_asid) { + switch_and_save_asid(current_asid); + local_irq_restore(flags); + } + } +} + +static void sh64_icache_inv_user_small_range(struct mm_struct *mm, + unsigned long start, int len) +{ + + /* Invalidate a small range of user context I-cache, not necessarily + page (or even cache-line) aligned. */ + + unsigned long long eaddr = start; + unsigned long long eaddr_end = start + len; + unsigned long current_asid, mm_asid; + unsigned long long flags; + unsigned long long epage_start; + + /* Since this is used inside ptrace, the ASID in the mm context + typically won't match current_asid. We'll have to switch ASID to do + this. For safety, and given that the range will be small, do all + this under cli. + + Note, there is a hazard that the ASID in mm->context is no longer + actually associated with mm, i.e. if the mm->context has started a + new cycle since mm was last active. However, this is just a + performance issue: all that happens is that we invalidate lines + belonging to another mm, so the owning process has to refill them + when that mm goes live again. mm itself can't have any cache + entries because there will have been a flush_cache_all when the new + mm->context cycle started. */ + + /* Align to start of cache line. Otherwise, suppose len==8 and start + was at 32N+28 : the last 4 bytes wouldn't get invalidated. */ + eaddr = start & L1_CACHE_ALIGN_MASK; + eaddr_end = start + len; + + local_irq_save(flags); + mm_asid = mm->context & MMU_CONTEXT_ASID_MASK; + current_asid = switch_and_save_asid(mm_asid); + + epage_start = eaddr & PAGE_MASK; + + while (eaddr < eaddr_end) + { + asm __volatile__("icbi %0, 0" : : "r" (eaddr)); + eaddr += L1_CACHE_BYTES; + } + switch_and_save_asid(current_asid); + local_irq_restore(flags); +} + +static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) +{ + /* The icbi instruction never raises ITLBMISS. i.e. if there's not a + cache hit on the virtual tag the instruction ends there, without a + TLB lookup. */ + + unsigned long long aligned_start; + unsigned long long ull_end; + unsigned long long addr; + + ull_end = end; + + /* Just invalidate over the range using the natural addresses. TLB + miss handling will be OK (TBC). Since it's for the current process, + either we're already in the right ASID context, or the ASIDs have + been recycled since we were last active in which case we might just + invalidate another processes I-cache entries : no worries, just a + performance drop for him. */ + aligned_start = start & L1_CACHE_ALIGN_MASK; + addr = aligned_start; + while (addr < ull_end) { + asm __volatile__ ("icbi %0, 0" : : "r" (addr)); + asm __volatile__ ("nop"); + asm __volatile__ ("nop"); + addr += L1_CACHE_BYTES; + } +} + +#endif /* !CONFIG_ICACHE_DISABLED */ + +/****************************************************************************/ + +#ifndef CONFIG_DCACHE_DISABLED + +/* Buffer used as the target of alloco instructions to purge data from cache + sets by natural eviction. -- RPC */ +#define DUMMY_ALLOCO_AREA_SIZE L1_CACHE_SIZE_BYTES + (1024 * 4) +static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, }; + +/****************************************************************************/ + +static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) +{ + /* Purge all ways in a particular block of sets, specified by the base + set number and number of sets. Can handle wrap-around, if that's + needed. */ + + int dummy_buffer_base_set; + unsigned long long eaddr, eaddr0, eaddr1; + int j; + int set_offset; + + dummy_buffer_base_set = ((int)&dummy_alloco_area & cpu_data->dcache.idx_mask) >> cpu_data->dcache.entry_shift; + set_offset = sets_to_purge_base - dummy_buffer_base_set; + + for (j=0; jdcache.sets - 1); + eaddr0 = (unsigned long long)dummy_alloco_area + (set_offset << cpu_data->dcache.entry_shift); + + /* Do one alloco which hits the required set per cache way. For + write-back mode, this will purge the #ways resident lines. There's + little point unrolling this loop because the allocos stall more if + they're too close together. */ + eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways; + for (eaddr=eaddr0; eaddrdcache.way_ofs) { + asm __volatile__ ("alloco %0, 0" : : "r" (eaddr)); + } + + eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways; + for (eaddr=eaddr0; eaddrdcache.way_ofs) { + /* Load from each address. Required because alloco is a NOP if + the cache is write-through. Write-through is a config option. */ + if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags))) + *(volatile unsigned char *)(int)eaddr; + } + } + + /* Don't use OCBI to invalidate the lines. That costs cycles directly. + If the dummy block is just left resident, it will naturally get + evicted as required. */ + + return; +} + +/****************************************************************************/ + +static void sh64_dcache_purge_all(void) +{ + /* Purge the entire contents of the dcache. The most efficient way to + achieve this is to use alloco instructions on a region of unused + memory equal in size to the cache, thereby causing the current + contents to be discarded by natural eviction. The alternative, + namely reading every tag, setting up a mapping for the corresponding + page and doing an OCBP for the line, would be much more expensive. + */ + + sh64_dcache_purge_sets(0, cpu_data->dcache.sets); + + return; + +} + +/****************************************************************************/ + +static void sh64_dcache_purge_kernel_range(unsigned long start, unsigned long end) +{ + /* Purge the range of addresses [start,end] from the D-cache. The + addresses lie in the superpage mapping. There's no harm if we + overpurge at either end - just a small performance loss. */ + unsigned long long ullend, addr, aligned_start; +#if (NEFF == 32) + aligned_start = (unsigned long long)(signed long long)(signed long) start; +#else +#error "NEFF != 32" +#endif + aligned_start &= L1_CACHE_ALIGN_MASK; + addr = aligned_start; +#if (NEFF == 32) + ullend = (unsigned long long) (signed long long) (signed long) end; +#else +#error "NEFF != 32" +#endif + while (addr <= ullend) { + asm __volatile__ ("ocbp %0, 0" : : "r" (addr)); + addr += L1_CACHE_BYTES; + } + return; +} + +/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for + anything else in the kernel */ +#define MAGIC_PAGE0_START 0xffffffffec000000ULL + +static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, unsigned long eaddr) +{ + /* Purge the physical page 'paddr' from the cache. It's known that any + cache lines requiring attention have the same page colour as the the + address 'eaddr'. + + This relies on the fact that the D-cache matches on physical tags + when no virtual tag matches. So we create an alias for the original + page and purge through that. (Alternatively, we could have done + this by switching ASID to match the original mapping and purged + through that, but that involves ASID switching cost + probably a + TLBMISS + refill anyway.) + */ + + unsigned long long magic_page_start; + unsigned long long magic_eaddr, magic_eaddr_end; + + magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK); + + /* As long as the kernel is not pre-emptible, this doesn't need to be + under cli/sti. */ + + sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr); + + magic_eaddr = magic_page_start; + magic_eaddr_end = magic_eaddr + PAGE_SIZE; + while (magic_eaddr < magic_eaddr_end) { + /* Little point in unrolling this loop - the OCBPs are blocking + and won't go any quicker (i.e. the loop overhead is parallel + to part of the OCBP execution.) */ + asm __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr)); + magic_eaddr += L1_CACHE_BYTES; + } + + sh64_teardown_dtlb_cache_slot(); +} + +/****************************************************************************/ + +static void sh64_dcache_purge_phy_page(unsigned long paddr) +{ + /* Pure a page given its physical start address, by creating a + temporary 1 page mapping and purging across that. Even if we know + the virtual address (& vma or mm) of the page, the method here is + more elegant because it avoids issues of coping with page faults on + the purge instructions (i.e. no special-case code required in the + critical path in the TLB miss handling). */ + + unsigned long long eaddr_start, eaddr, eaddr_end; + int i; + + /* As long as the kernel is not pre-emptible, this doesn't need to be + under cli/sti. */ + + eaddr_start = MAGIC_PAGE0_START; + for (i=0; i < (1 << CACHE_OC_N_SYNBITS); i++) { + sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr); + + eaddr = eaddr_start; + eaddr_end = eaddr + PAGE_SIZE; + while (eaddr < eaddr_end) { + asm __volatile__ ("ocbp %0, 0" : : "r" (eaddr)); + eaddr += L1_CACHE_BYTES; + } + + sh64_teardown_dtlb_cache_slot(); + eaddr_start += PAGE_SIZE; + } +} + +static void sh64_dcache_purge_virt_page(struct mm_struct *mm, unsigned long eaddr) +{ + unsigned long phys; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + pte_t entry; + + pgd = pgd_offset(mm, eaddr); + pmd = pmd_offset(pgd, eaddr); + + if (pmd_none(*pmd) || pmd_bad(*pmd)) + return; + + pte = pte_offset_kernel(pmd, eaddr); + entry = *pte; + + if (pte_none(entry) || !pte_present(entry)) + return; + + phys = pte_val(entry) & PAGE_MASK; + + sh64_dcache_purge_phy_page(phys); +} + +static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + pte_t entry; + unsigned long paddr; + + /* NOTE : all the callers of this have mm->page_table_lock held, so the + following page table traversal is safe even on SMP/pre-emptible. */ + + if (!mm) return; /* No way to find physical address of page */ + pgd = pgd_offset(mm, eaddr); + if (pgd_bad(*pgd)) return; + + pmd = pmd_offset(pgd, eaddr); + if (pmd_none(*pmd) || pmd_bad(*pmd)) return; + + pte = pte_offset_kernel(pmd, eaddr); + entry = *pte; + if (pte_none(entry) || !pte_present(entry)) return; + + paddr = pte_val(entry) & PAGE_MASK; + + sh64_dcache_purge_coloured_phy_page(paddr, eaddr); + +} +/****************************************************************************/ + +static void sh64_dcache_purge_user_range(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + /* There are at least 5 choices for the implementation of this, with + pros (+), cons(-), comments(*): + + 1. ocbp each line in the range through the original user's ASID + + no lines spuriously evicted + - tlbmiss handling (must either handle faults on demand => extra + special-case code in tlbmiss critical path), or map the page in + advance (=> flush_tlb_range in advance to avoid multiple hits) + - ASID switching + - expensive for large ranges + + 2. temporarily map each page in the range to a special effective + address and ocbp through the temporary mapping; relies on the + fact that SH-5 OCB* always do TLB lookup and match on ptags (they + never look at the etags) + + no spurious evictions + - expensive for large ranges + * surely cheaper than (1) + + 3. walk all the lines in the cache, check the tags, if a match + occurs create a page mapping to ocbp the line through + + no spurious evictions + - tag inspection overhead + - (especially for small ranges) + - potential cost of setting up/tearing down page mapping for + every line that matches the range + * cost partly independent of range size + + 4. walk all the lines in the cache, check the tags, if a match + occurs use 4 * alloco to purge the line (+3 other probably + innocent victims) by natural eviction + + no tlb mapping overheads + - spurious evictions + - tag inspection overhead + + 5. implement like flush_cache_all + + no tag inspection overhead + - spurious evictions + - bad for small ranges + + (1) can be ruled out as more expensive than (2). (2) appears best + for small ranges. The choice between (3), (4) and (5) for large + ranges and the range size for the large/small boundary need + benchmarking to determine. + + For now use approach (2) for small ranges and (5) for large ones. + + */ + + int n_pages; + + n_pages = ((end - start) >> PAGE_SHIFT); + if (n_pages >= 64) { +#if 1 + sh64_dcache_purge_all(); +#else + unsigned long long set, way; + unsigned long mm_asid = mm->context & MMU_CONTEXT_ASID_MASK; + for (set = 0; set < cpu_data->dcache.sets; set++) { + unsigned long long set_base_config_addr = CACHE_OC_ADDRESS_ARRAY + (set << cpu_data->dcache.set_shift); + for (way = 0; way < cpu_data->dcache.ways; way++) { + unsigned long long config_addr = set_base_config_addr + (way << cpu_data->dcache.way_step_shift); + unsigned long long tag0; + unsigned long line_valid; + + asm __volatile__("getcfg %1, 0, %0" : "=r" (tag0) : "r" (config_addr)); + line_valid = tag0 & SH_CACHE_VALID; + if (line_valid) { + unsigned long cache_asid; + unsigned long epn; + + cache_asid = (tag0 & cpu_data->dcache.asid_mask) >> cpu_data->dcache.asid_shift; + /* The next line needs some + explanation. The virtual tags + encode bits [31:13] of the virtual + address, bit [12] of the 'tag' being + implied by the cache set index. */ + epn = (tag0 & cpu_data->dcache.epn_mask) | ((set & 0x80) << cpu_data->dcache.entry_shift); + + if ((cache_asid == mm_asid) && (start <= epn) && (epn < end)) { + /* TODO : could optimise this + call by batching multiple + adjacent sets together. */ + sh64_dcache_purge_sets(set, 1); + break; /* Don't waste time inspecting other ways for this set */ + } + } + } + } +#endif + } else { + /* 'Small' range */ + unsigned long aligned_start; + unsigned long eaddr; + unsigned long last_page_start; + + aligned_start = start & PAGE_MASK; + /* 'end' is 1 byte beyond the end of the range */ + last_page_start = (end - 1) & PAGE_MASK; + + eaddr = aligned_start; + while (eaddr <= last_page_start) { + sh64_dcache_purge_user_page(mm, eaddr); + eaddr += PAGE_SIZE; + } + } + return; +} + +static void sh64_dcache_wback_current_user_range(unsigned long start, unsigned long end) +{ + unsigned long long aligned_start; + unsigned long long ull_end; + unsigned long long addr; + + ull_end = end; + + /* Just wback over the range using the natural addresses. TLB miss + handling will be OK (TBC) : the range has just been written to by + the signal frame setup code, so the PTEs must exist. + + Note, if we have CONFIG_PREEMPT and get preempted inside this loop, + it doesn't matter, even if the pid->ASID mapping changes whilst + we're away. In that case the cache will have been flushed when the + mapping was renewed. So the writebacks below will be nugatory (and + we'll doubtless have to fault the TLB entry/ies in again with the + new ASID), but it's a rare case. + */ + aligned_start = start & L1_CACHE_ALIGN_MASK; + addr = aligned_start; + while (addr < ull_end) { + asm __volatile__ ("ocbwb %0, 0" : : "r" (addr)); + addr += L1_CACHE_BYTES; + } +} + +#endif /* !CONFIG_DCACHE_DISABLED */ + +/****************************************************************************/ + +/* These *MUST* lie in an area of virtual address space that's otherwise unused. */ +#define UNIQUE_EADDR_START 0xe0000000UL +#define UNIQUE_EADDR_END 0xe8000000UL + +static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr, unsigned long paddr) +{ + /* Given a physical address paddr, and a user virtual address + user_eaddr which will eventually be mapped to it, create a one-off + kernel-private eaddr mapped to the same paddr. This is used for + creating special destination pages for copy_user_page and + clear_user_page */ + + static unsigned long current_pointer = UNIQUE_EADDR_START; + unsigned long coloured_pointer; + + if (current_pointer == UNIQUE_EADDR_END) { + sh64_dcache_purge_all(); + current_pointer = UNIQUE_EADDR_START; + } + + coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) | (user_eaddr & CACHE_OC_SYN_MASK); + sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr); + + current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS); + + return coloured_pointer; +} + +/****************************************************************************/ + +static void sh64_copy_user_page_coloured(void *to, void *from, unsigned long address) +{ + void *coloured_to; + + /* Discard any existing cache entries of the wrong colour. These are + present quite often, if the kernel has recently used the page + internally, then given it up, then it's been allocated to the user. + */ + sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to); + + coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to)); + sh64_page_copy(from, coloured_to); + + sh64_teardown_dtlb_cache_slot(); +} + +static void sh64_clear_user_page_coloured(void *to, unsigned long address) +{ + void *coloured_to; + + /* Discard any existing kernel-originated lines of the wrong colour (as + above) */ + sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long) to); + + coloured_to = (void *) sh64_make_unique_eaddr(address, __pa(to)); + sh64_page_clear(coloured_to); + + sh64_teardown_dtlb_cache_slot(); +} + +/****************************************************************************/ + +/*########################################################################## + EXTERNALLY CALLABLE API. + ##########################################################################*/ + +/* These functions are described in Documentation/cachetlb.txt. + Each one of these functions varies in behaviour depending on whether the + I-cache and/or D-cache are configured out. + + Note that the Linux term 'flush' corresponds to what is termed 'purge' in + the sh/sh64 jargon for the D-cache, i.e. write back dirty data then + invalidate the cache lines, and 'invalidate' for the I-cache. + */ + +#undef FLUSH_TRACE + +void flush_cache_all(void) +{ + /* Invalidate the entire contents of both caches, after writing back to + memory any dirty data from the D-cache. */ + sh64_dcache_purge_all(); + sh64_icache_inv_all(); +} + +/****************************************************************************/ + +void flush_cache_mm(struct mm_struct *mm) +{ + /* Invalidate an entire user-address space from both caches, after + writing back dirty data (e.g. for shared mmap etc). */ + + /* This could be coded selectively by inspecting all the tags then + doing 4*alloco on any set containing a match (as for + flush_cache_range), but fork/exit/execve (where this is called from) + are expensive anyway. */ + + /* Have to do a purge here, despite the comments re I-cache below. + There could be odd-coloured dirty data associated with the mm still + in the cache - if this gets written out through natural eviction + after the kernel has reused the page there will be chaos. + */ + + sh64_dcache_purge_all(); + + /* The mm being torn down won't ever be active again, so any Icache + lines tagged with its ASID won't be visible for the rest of the + lifetime of this ASID cycle. Before the ASID gets reused, there + will be a flush_cache_all. Hence we don't need to touch the + I-cache. This is similar to the lack of action needed in + flush_tlb_mm - see fault.c. */ +} + +/****************************************************************************/ + +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + /* Invalidate (from both caches) the range [start,end) of virtual + addresses from the user address space specified by mm, after writing + back any dirty data. + + Note(1), 'end' is 1 byte beyond the end of the range to flush. + + Note(2), this is called with mm->page_table_lock held.*/ + + sh64_dcache_purge_user_range(mm, start, end); + sh64_icache_inv_user_page_range(mm, start, end); +} + +/****************************************************************************/ + +void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr) +{ + /* Invalidate any entries in either cache for the vma within the user + address space vma->vm_mm for the page starting at virtual address + 'eaddr'. This seems to be used primarily in breaking COW. Note, + the I-cache must be searched too in case the page in question is + both writable and being executed from (e.g. stack trampolines.) + + Note(1), this is called with mm->page_table_lock held. + */ + + sh64_dcache_purge_virt_page(vma->vm_mm, eaddr); + + if (vma->vm_flags & VM_EXEC) { + sh64_icache_inv_user_page(vma, eaddr); + } +} + +/****************************************************************************/ + +#ifndef CONFIG_DCACHE_DISABLED + +void copy_user_page(void *to, void *from, unsigned long address, struct page *page) +{ + /* 'from' and 'to' are kernel virtual addresses (within the superpage + mapping of the physical RAM). 'address' is the user virtual address + where the copy 'to' will be mapped after. This allows a custom + mapping to be used to ensure that the new copy is placed in the + right cache sets for the user to see it without having to bounce it + out via memory. Note however : the call to flush_page_to_ram in + (generic)/mm/memory.c:(break_cow) undoes all this good work in that one + very important case! + + TBD : can we guarantee that on every call, any cache entries for + 'from' are in the same colour sets as 'address' also? i.e. is this + always used just to deal with COW? (I suspect not). */ + + /* There are two possibilities here for when the page 'from' was last accessed: + * by the kernel : this is OK, no purge required. + * by the/a user (e.g. for break_COW) : need to purge. + + If the potential user mapping at 'address' is the same colour as + 'from' there is no need to purge any cache lines from the 'from' + page mapped into cache sets of colour 'address'. (The copy will be + accessing the page through 'from'). + */ + + if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) { + sh64_dcache_purge_coloured_phy_page(__pa(from), address); + } + + if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) { + /* No synonym problem on destination */ + sh64_page_copy(from, to); + } else { + sh64_copy_user_page_coloured(to, from, address); + } + + /* Note, don't need to flush 'from' page from the cache again - it's + done anyway by the generic code */ +} + +void clear_user_page(void *to, unsigned long address, struct page *page) +{ + /* 'to' is a kernel virtual address (within the superpage + mapping of the physical RAM). 'address' is the user virtual address + where the 'to' page will be mapped after. This allows a custom + mapping to be used to ensure that the new copy is placed in the + right cache sets for the user to see it without having to bounce it + out via memory. + */ + + if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) { + /* No synonym problem on destination */ + sh64_page_clear(to); + } else { + sh64_clear_user_page_coloured(to, address); + } +} + +#endif /* !CONFIG_DCACHE_DISABLED */ + +/****************************************************************************/ + +void flush_dcache_page(struct page *page) +{ + sh64_dcache_purge_phy_page(page_to_phys(page)); + wmb(); +} + +/****************************************************************************/ + +void flush_icache_range(unsigned long start, unsigned long end) +{ + /* Flush the range [start,end] of kernel virtual adddress space from + the I-cache. The corresponding range must be purged from the + D-cache also because the SH-5 doesn't have cache snooping between + the caches. The addresses will be visible through the superpage + mapping, therefore it's guaranteed that there no cache entries for + the range in cache sets of the wrong colour. + + Primarily used for cohering the I-cache after a module has + been loaded. */ + + /* We also make sure to purge the same range from the D-cache since + flush_page_to_ram() won't be doing this for us! */ + + sh64_dcache_purge_kernel_range(start, end); + wmb(); + sh64_icache_inv_kernel_range(start, end); +} + +/****************************************************************************/ + +void flush_icache_user_range(struct vm_area_struct *vma, + struct page *page, unsigned long addr, int len) +{ + /* Flush the range of user (defined by vma->vm_mm) address space + starting at 'addr' for 'len' bytes from the cache. The range does + not straddle a page boundary, the unique physical page containing + the range is 'page'. This seems to be used mainly for invalidating + an address range following a poke into the program text through the + ptrace() call from another process (e.g. for BRK instruction + insertion). */ + + sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr); + mb(); + + if (vma->vm_flags & VM_EXEC) { + sh64_icache_inv_user_small_range(vma->vm_mm, addr, len); + } +} + +/*########################################################################## + ARCH/SH64 PRIVATE CALLABLE API. + ##########################################################################*/ + +void flush_cache_sigtramp(unsigned long start, unsigned long end) +{ + /* For the address range [start,end), write back the data from the + D-cache and invalidate the corresponding region of the I-cache for + the current process. Used to flush signal trampolines on the stack + to make them executable. */ + + sh64_dcache_wback_current_user_range(start, end); + wmb(); + sh64_icache_inv_current_user_range(start, end); +} + diff --git a/arch/sh64/mm/extable.c b/arch/sh64/mm/extable.c new file mode 100644 index 000000000..802eff88b --- /dev/null +++ b/arch/sh64/mm/extable.c @@ -0,0 +1,80 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mm/extable.c + * + * Copyright (C) 2003 Richard Curnow + * Copyright (C) 2003, 2004 Paul Mundt + * + * Cloned from the 2.5 SH version.. + */ +#include +#include +#include +#include + +extern unsigned long copy_user_memcpy, copy_user_memcpy_end, __copy_user_fixup; + +static const struct exception_table_entry __copy_user_fixup_ex = { + .fixup = (unsigned long)&__copy_user_fixup, +}; + +/* Some functions that may trap due to a bad user-mode address have too many loads + and stores in them to make it at all practical to label each one and put them all in + the main exception table. + + In particular, the fast memcpy routine is like this. It's fix-up is just to fall back + to a slow byte-at-a-time copy, which is handled the conventional way. So it's functionally + OK to just handle any trap occurring in the fast memcpy with that fixup. */ +static const struct exception_table_entry *check_exception_ranges(unsigned long addr) +{ + if ((addr >= (unsigned long)©_user_memcpy) && + (addr <= (unsigned long)©_user_memcpy_end)) + return &__copy_user_fixup_ex; + + return NULL; +} + +/* Simple binary search */ +const struct exception_table_entry * +search_extable(const struct exception_table_entry *first, + const struct exception_table_entry *last, + unsigned long value) +{ + const struct exception_table_entry *mid; + + mid = check_exception_ranges(value); + if (mid) + return mid; + + while (first <= last) { + long diff; + + mid = (last - first) / 2 + first; + diff = mid->insn - value; + if (diff == 0) + return mid; + else if (diff < 0) + first = mid+1; + else + last = mid-1; + } + + return NULL; +} + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(regs->pc); + if (fixup) { + regs->pc = fixup->fixup; + return 1; + } + + return 0; +} + diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c new file mode 100644 index 000000000..b5ab4a88c --- /dev/null +++ b/arch/sh64/mm/fault.c @@ -0,0 +1,591 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mm/fault.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) + * Copyright (C) 2003 Paul Mundt + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* required by inline asm statements */ + +#if defined(CONFIG_SH64_PROC_TLB) +#include +#include +/* Count numbers of tlb refills in each region */ +static unsigned long long calls_to_update_mmu_cache = 0ULL; +static unsigned long long calls_to_flush_tlb_page = 0ULL; +static unsigned long long calls_to_flush_tlb_range = 0ULL; +static unsigned long long calls_to_flush_tlb_mm = 0ULL; +static unsigned long long calls_to_flush_tlb_all = 0ULL; +unsigned long long calls_to_do_slow_page_fault = 0ULL; +unsigned long long calls_to_do_fast_page_fault = 0ULL; + +/* Count size of ranges for flush_tlb_range */ +static unsigned long long flush_tlb_range_1 = 0ULL; +static unsigned long long flush_tlb_range_2 = 0ULL; +static unsigned long long flush_tlb_range_3_4 = 0ULL; +static unsigned long long flush_tlb_range_5_7 = 0ULL; +static unsigned long long flush_tlb_range_8_11 = 0ULL; +static unsigned long long flush_tlb_range_12_15 = 0ULL; +static unsigned long long flush_tlb_range_16_up = 0ULL; + +static unsigned long long page_not_present = 0ULL; + +#endif + +extern void die(const char *,struct pt_regs *,long); + +#define PFLAG(val,flag) (( (val) & (flag) ) ? #flag : "" ) +#define PPROT(flag) PFLAG(pgprot_val(prot),flag) + +static inline void print_prots(pgprot_t prot) +{ + printk("prot is 0x%08lx\n",pgprot_val(prot)); + + printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), + PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); +} + +static inline void print_vma(struct vm_area_struct *vma) +{ + printk("vma start 0x%08lx\n", vma->vm_start); + printk("vma end 0x%08lx\n", vma->vm_end); + + print_prots(vma->vm_page_prot); + printk("vm_flags 0x%08lx\n", vma->vm_flags); +} + +static inline void print_task(struct task_struct *tsk) +{ + printk("Task pid %d\n", tsk->pid); +} + +static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address) +{ + pgd_t *dir; + pmd_t *pmd; + pte_t *pte; + pte_t entry; + + dir = pgd_offset(mm, address); + if (pgd_none(*dir)) { + return NULL; + } + + pmd = pmd_offset(dir, address); + if (pmd_none(*pmd)) { + return NULL; + } + + pte = pte_offset_kernel(pmd, address); + entry = *pte; + + if (pte_none(entry)) { + return NULL; + } + if (!pte_present(entry)) { + return NULL; + } + + return pte; +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate + * routines. + */ +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess, + unsigned long textaccess, unsigned long address) +{ + struct task_struct *tsk; + struct mm_struct *mm; + struct vm_area_struct * vma; + const struct exception_table_entry *fixup; + pte_t *pte; + +#if defined(CONFIG_SH64_PROC_TLB) + ++calls_to_do_slow_page_fault; +#endif + + /* SIM + * Note this is now called with interrupts still disabled + * This is to cope with being called for a missing IO port + * address with interupts disabled. This should be fixed as + * soon as we have a better 'fast path' miss handler. + * + * Plus take care how you try and debug this stuff. + * For example, writing debug data to a port which you + * have just faulted on is not going to work. + */ + + tsk = current; + mm = tsk->mm; + + /* Not an IO address, so reenable interrupts */ + sti(); + + /* + * If we're in an interrupt or have no user + * context, we must not take the fault.. + */ + if (in_interrupt() || !mm) + goto no_context; + + /* TLB misses upon some cache flushes get done under cli() */ + down_read(&mm->mmap_sem); + + vma = find_vma(mm, address); + + if (!vma) { +#ifdef DEBUG_FAULT + print_task(tsk); + printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", + __FUNCTION__,__LINE__, + address,regs->pc,textaccess,writeaccess); + show_regs(regs); +#endif + goto bad_area; + } + if (vma->vm_start <= address) { + goto good_area; + } + + if (!(vma->vm_flags & VM_GROWSDOWN)) { +#ifdef DEBUG_FAULT + print_task(tsk); + printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", + __FUNCTION__,__LINE__, + address,regs->pc,textaccess,writeaccess); + show_regs(regs); + + print_vma(vma); +#endif + goto bad_area; + } + if (expand_stack(vma, address)) { +#ifdef DEBUG_FAULT + print_task(tsk); + printk("%s:%d fault, address is 0x%08x PC %016Lx textaccess %d writeaccess %d\n", + __FUNCTION__,__LINE__, + address,regs->pc,textaccess,writeaccess); + show_regs(regs); +#endif + goto bad_area; + } +/* + * Ok, we have a good vm_area for this memory access, so + * we can handle it.. + */ +good_area: + if (writeaccess) { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { + if (!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto bad_area; + } + + if (textaccess) { + if (!(vma->vm_flags & VM_EXEC)) + goto bad_area; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ +survive: + switch (handle_mm_fault(mm, vma, address, writeaccess)) { + case 1: + tsk->min_flt++; + break; + case 2: + tsk->maj_flt++; + break; + case 0: + goto do_sigbus; + default: + goto out_of_memory; + } + /* If we get here, the page fault has been handled. Do the TLB refill + now from the newly-setup PTE, to avoid having to fault again right + away on the same instruction. */ + pte = lookup_pte (mm, address); + if (!pte) { + /* From empirical evidence, we can get here, due to + !pte_present(pte). (e.g. if a swap-in occurs, and the page + is swapped back out again before the process that wanted it + gets rescheduled?) */ + goto no_pte; + } + + __do_tlb_refill(address, textaccess, pte); + +no_pte: + + up_read(&mm->mmap_sem); + return; + +/* + * Something tried to access memory that isn't in our memory map.. + * Fix it, but check if it's kernel or user first.. + */ +bad_area: +#ifdef DEBUG_FAULT + printk("fault:bad area\n"); +#endif + up_read(&mm->mmap_sem); + + if (user_mode(regs)) { + printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx opcode=%08lx\n", + address, current->pid, current->comm, + (unsigned long) regs->pc, + *(unsigned long *)(u32)(regs->pc & ~3)); + show_regs(regs); + if (tsk->pid == 1) { + panic("INIT had user mode bad_area\n"); + } + tsk->thread.address = address; + tsk->thread.error_code = writeaccess; + force_sig(SIGSEGV, tsk); + return; + } + +no_context: +#ifdef DEBUG_FAULT + printk("fault:No context\n"); +#endif + /* Are we prepared to handle this kernel fault? */ + fixup = search_exception_tables(regs->pc); + if (fixup) { + regs->pc = fixup->fixup; + return; + } + +/* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + * + */ + if (address < PAGE_SIZE) + printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); + else + printk(KERN_ALERT "Unable to handle kernel paging request"); + printk(" at virtual address %08lx\n", address); + printk(KERN_ALERT "pc = %08Lx%08Lx\n", regs->pc >> 32, regs->pc & 0xffffffff); + die("Oops", regs, writeaccess); + do_exit(SIGKILL); + +/* + * We ran out of memory, or some other thing happened to us that made + * us unable to handle the page fault gracefully. + */ +out_of_memory: + if (current->pid == 1) { + panic("INIT out of memory\n"); + yield(); + goto survive; + } + printk("fault:Out of memory\n"); + up_read(&mm->mmap_sem); + if (current->pid == 1) { + yield(); + down_read(&mm->mmap_sem); + goto survive; + } + printk("VM: killing process %s\n", tsk->comm); + if (user_mode(regs)) + do_exit(SIGKILL); + goto no_context; + +do_sigbus: + printk("fault:Do sigbus\n"); + up_read(&mm->mmap_sem); + + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + tsk->thread.address = address; + tsk->thread.error_code = writeaccess; + tsk->thread.trap_no = 14; + force_sig(SIGBUS, tsk); + + /* Kernel mode? Handle exceptions or die */ + if (!user_mode(regs)) + goto no_context; +} + + +void flush_tlb_all(void); + +void update_mmu_cache(struct vm_area_struct * vma, + unsigned long address, pte_t pte) +{ +#if defined(CONFIG_SH64_PROC_TLB) + ++calls_to_update_mmu_cache; +#endif + + /* + * This appears to get called once for every pte entry that gets + * established => I don't think it's efficient to try refilling the + * TLBs with the pages - some may not get accessed even. Also, for + * executable pages, it is impossible to determine reliably here which + * TLB they should be mapped into (or both even). + * + * So, just do nothing here and handle faults on demand. In the + * TLBMISS handling case, the refill is now done anyway after the pte + * has been fixed up, so that deals with most useful cases. + */ +} + +static void __flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + unsigned long long match, pteh=0, lpage; + unsigned long tlb; + struct mm_struct *mm; + + mm = vma->vm_mm; + + if (mm->context == NO_CONTEXT) + return; + + /* + * Sign-extend based on neff. + */ + lpage = (page & NEFF_SIGN) ? (page | NEFF_MASK) : page; + match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID; + match |= lpage; + + /* Do ITLB : don't bother for pages in non-exectutable VMAs */ + if (vma->vm_flags & VM_EXEC) { + for_each_itlb_entry(tlb) { + asm volatile ("getcfg %1, 0, %0" + : "=r" (pteh) + : "r" (tlb) ); + + if (pteh == match) { + __flush_tlb_slot(tlb); + break; + } + + } + } + + /* Do DTLB : any page could potentially be in here. */ + for_each_dtlb_entry(tlb) { + asm volatile ("getcfg %1, 0, %0" + : "=r" (pteh) + : "r" (tlb) ); + + if (pteh == match) { + __flush_tlb_slot(tlb); + break; + } + + } +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + unsigned long flags; + +#if defined(CONFIG_SH64_PROC_TLB) + ++calls_to_flush_tlb_page; +#endif + + if (vma->vm_mm) { + page &= PAGE_MASK; + local_irq_save(flags); + __flush_tlb_page(vma, page); + local_irq_restore(flags); + } +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + unsigned long flags; + unsigned long long match, pteh=0, pteh_epn, pteh_low; + unsigned long tlb; + struct mm_struct *mm; + + mm = vma->vm_mm; + +#if defined(CONFIG_SH64_PROC_TLB) + ++calls_to_flush_tlb_range; + + { + unsigned long size = (end - 1) - start; + size >>= 12; /* divide by PAGE_SIZE */ + size++; /* end=start+4096 => 1 page */ + switch (size) { + case 1 : flush_tlb_range_1++; break; + case 2 : flush_tlb_range_2++; break; + case 3 ... 4 : flush_tlb_range_3_4++; break; + case 5 ... 7 : flush_tlb_range_5_7++; break; + case 8 ... 11 : flush_tlb_range_8_11++; break; + case 12 ... 15 : flush_tlb_range_12_15++; break; + default : flush_tlb_range_16_up++; break; + } + } +#endif + + if (mm->context == NO_CONTEXT) + return; + + local_irq_save(flags); + + start &= PAGE_MASK; + end &= PAGE_MASK; + + match = ((mm->context & MMU_CONTEXT_ASID_MASK) << PTEH_ASID_SHIFT) | PTEH_VALID; + + /* Flush ITLB */ + for_each_itlb_entry(tlb) { + asm volatile ("getcfg %1, 0, %0" + : "=r" (pteh) + : "r" (tlb) ); + + pteh_epn = pteh & PAGE_MASK; + pteh_low = pteh & ~PAGE_MASK; + + if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) + __flush_tlb_slot(tlb); + } + + /* Flush DTLB */ + for_each_dtlb_entry(tlb) { + asm volatile ("getcfg %1, 0, %0" + : "=r" (pteh) + : "r" (tlb) ); + + pteh_epn = pteh & PAGE_MASK; + pteh_low = pteh & ~PAGE_MASK; + + if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) + __flush_tlb_slot(tlb); + } + + local_irq_restore(flags); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + unsigned long flags; + +#if defined(CONFIG_SH64_PROC_TLB) + ++calls_to_flush_tlb_mm; +#endif + + if (mm->context == NO_CONTEXT) + return; + + local_irq_save(flags); + + mm->context=NO_CONTEXT; + if(mm==current->mm) + activate_context(mm); + + local_irq_restore(flags); + +} + +void flush_tlb_all(void) +{ + /* Invalidate all, including shared pages, excluding fixed TLBs */ + + unsigned long flags, tlb; + +#if defined(CONFIG_SH64_PROC_TLB) + ++calls_to_flush_tlb_all; +#endif + + local_irq_save(flags); + + /* Flush each ITLB entry */ + for_each_itlb_entry(tlb) { + __flush_tlb_slot(tlb); + } + + /* Flush each DTLB entry */ + for_each_dtlb_entry(tlb) { + __flush_tlb_slot(tlb); + } + + local_irq_restore(flags); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + /* FIXME: Optimize this later.. */ + flush_tlb_all(); +} + +#if defined(CONFIG_SH64_PROC_TLB) +/* Procfs interface to read the performance information */ + +static int +tlb_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, void *data) +{ + int len=0; + len += sprintf(buf+len, "do_fast_page_fault called %12lld times\n", calls_to_do_fast_page_fault); + len += sprintf(buf+len, "do_slow_page_fault called %12lld times\n", calls_to_do_slow_page_fault); + len += sprintf(buf+len, "update_mmu_cache called %12lld times\n", calls_to_update_mmu_cache); + len += sprintf(buf+len, "flush_tlb_page called %12lld times\n", calls_to_flush_tlb_page); + len += sprintf(buf+len, "flush_tlb_range called %12lld times\n", calls_to_flush_tlb_range); + len += sprintf(buf+len, "flush_tlb_mm called %12lld times\n", calls_to_flush_tlb_mm); + len += sprintf(buf+len, "flush_tlb_all called %12lld times\n", calls_to_flush_tlb_all); + len += sprintf(buf+len, "flush_tlb_range_sizes\n" + " 1 : %12lld\n" + " 2 : %12lld\n" + " 3 - 4 : %12lld\n" + " 5 - 7 : %12lld\n" + " 8 - 11 : %12lld\n" + "12 - 15 : %12lld\n" + "16+ : %12lld\n", + flush_tlb_range_1, flush_tlb_range_2, flush_tlb_range_3_4, + flush_tlb_range_5_7, flush_tlb_range_8_11, flush_tlb_range_12_15, + flush_tlb_range_16_up); + len += sprintf(buf+len, "page not present %12lld times\n", page_not_present); + *eof = 1; + return len; +} + +static int __init register_proc_tlb(void) +{ + create_proc_read_entry("tlb", 0, NULL, tlb_proc_info, NULL); + return 0; +} + +__initcall(register_proc_tlb); + +#endif diff --git a/arch/sh64/mm/hugetlbpage.c b/arch/sh64/mm/hugetlbpage.c new file mode 100644 index 000000000..50b25735f --- /dev/null +++ b/arch/sh64/mm/hugetlbpage.c @@ -0,0 +1,264 @@ +/* + * arch/sh64/mm/hugetlbpage.c + * + * SuperH HugeTLB page support. + * + * Cloned from sparc64 by Paul Mundt. + * + * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd) { + pmd = pmd_alloc(mm, pgd, addr); + if (pmd) + pte = pte_alloc_map(mm, pmd, addr); + } + return pte; +} + +static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd) { + pmd = pmd_offset(pgd, addr); + if (pmd) + pte = pte_offset_map(pmd, addr); + } + return pte; +} + +#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) + +static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, + struct page *page, pte_t * page_table, int write_access) +{ + unsigned long i; + pte_t entry; + + mm->rss += (HPAGE_SIZE / PAGE_SIZE); + + if (write_access) + entry = pte_mkwrite(pte_mkdirty(mk_pte(page, + vma->vm_page_prot))); + else + entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); + entry = pte_mkyoung(entry); + mk_pte_huge(entry); + + for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { + set_pte(page_table, entry); + page_table++; + + pte_val(entry) += PAGE_SIZE; + } +} + +/* + * This function checks for proper alignment of input addr and len parameters. + */ +int is_aligned_hugepage_range(unsigned long addr, unsigned long len) +{ + if (len & ~HPAGE_MASK) + return -EINVAL; + if (addr & ~HPAGE_MASK) + return -EINVAL; + return 0; +} + +int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, + struct vm_area_struct *vma) +{ + pte_t *src_pte, *dst_pte, entry; + struct page *ptepage; + unsigned long addr = vma->vm_start; + unsigned long end = vma->vm_end; + int i; + + while (addr < end) { + dst_pte = huge_pte_alloc(dst, addr); + if (!dst_pte) + goto nomem; + src_pte = huge_pte_offset(src, addr); + BUG_ON(!src_pte || pte_none(*src_pte)); + entry = *src_pte; + ptepage = pte_page(entry); + get_page(ptepage); + for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { + set_pte(dst_pte, entry); + pte_val(entry) += PAGE_SIZE; + dst_pte++; + } + dst->rss += (HPAGE_SIZE / PAGE_SIZE); + addr += HPAGE_SIZE; + } + return 0; + +nomem: + return -ENOMEM; +} + +int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, + struct page **pages, struct vm_area_struct **vmas, + unsigned long *position, int *length, int i) +{ + unsigned long vaddr = *position; + int remainder = *length; + + WARN_ON(!is_vm_hugetlb_page(vma)); + + while (vaddr < vma->vm_end && remainder) { + if (pages) { + pte_t *pte; + struct page *page; + + pte = huge_pte_offset(mm, vaddr); + + /* hugetlb should be locked, and hence, prefaulted */ + BUG_ON(!pte || pte_none(*pte)); + + page = pte_page(*pte); + + WARN_ON(!PageCompound(page)); + + get_page(page); + pages[i] = page; + } + + if (vmas) + vmas[i] = vma; + + vaddr += PAGE_SIZE; + --remainder; + ++i; + } + + *length = remainder; + *position = vaddr; + + return i; +} + +struct page *follow_huge_addr(struct mm_struct *mm, + unsigned long address, int write) +{ + return ERR_PTR(-EINVAL); +} + +int pmd_huge(pmd_t pmd) +{ + return 0; +} + +struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, + pmd_t *pmd, int write) +{ + return NULL; +} + +void unmap_hugepage_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long address; + pte_t *pte; + struct page *page; + int i; + + BUG_ON(start & (HPAGE_SIZE - 1)); + BUG_ON(end & (HPAGE_SIZE - 1)); + + for (address = start; address < end; address += HPAGE_SIZE) { + pte = huge_pte_offset(mm, address); + BUG_ON(!pte); + if (pte_none(*pte)) + continue; + page = pte_page(*pte); + put_page(page); + for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { + pte_clear(pte); + pte++; + } + } + mm->rss -= (end - start) >> PAGE_SHIFT; + flush_tlb_range(vma, start, end); +} + +int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) +{ + struct mm_struct *mm = current->mm; + unsigned long addr; + int ret = 0; + + BUG_ON(vma->vm_start & ~HPAGE_MASK); + BUG_ON(vma->vm_end & ~HPAGE_MASK); + + spin_lock(&mm->page_table_lock); + for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { + unsigned long idx; + pte_t *pte = huge_pte_alloc(mm, addr); + struct page *page; + + if (!pte) { + ret = -ENOMEM; + goto out; + } + if (!pte_none(*pte)) + continue; + + idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) + + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); + page = find_get_page(mapping, idx); + if (!page) { + /* charge the fs quota first */ + if (hugetlb_get_quota(mapping)) { + ret = -ENOMEM; + goto out; + } + page = alloc_huge_page(); + if (!page) { + hugetlb_put_quota(mapping); + ret = -ENOMEM; + goto out; + } + ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); + if (! ret) { + unlock_page(page); + } else { + hugetlb_put_quota(mapping); + free_huge_page(page); + goto out; + } + } + set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); + } +out: + spin_unlock(&mm->page_table_lock); + return ret; +} diff --git a/arch/sh64/mm/init.c b/arch/sh64/mm/init.c new file mode 100644 index 000000000..17cdeb425 --- /dev/null +++ b/arch/sh64/mm/init.c @@ -0,0 +1,199 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mm/init.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003, 2004 Paul Mundt + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifdef CONFIG_BLK_DEV_INITRD +#include +#endif + +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); + +/* + * Cache of MMU context last used. + */ +unsigned long mmu_context_cache; +pgd_t * mmu_pdtp_cache; +int after_bootmem = 0; + +/* + * BAD_PAGE is the page that is used for page faults when linux + * is out-of-memory. Older versions of linux just did a + * do_exit(), but using this instead means there is less risk + * for a process dying in kernel mode, possibly leaving an inode + * unused etc.. + * + * BAD_PAGETABLE is the accompanying page-table: it is initialized + * to point to BAD_PAGE entries. + * + * ZERO_PAGE is a special page that is used for zero-initialized + * data and COW. + */ + +extern unsigned char empty_zero_page[PAGE_SIZE]; +extern unsigned char empty_bad_page[PAGE_SIZE]; +extern pte_t empty_bad_pte_table[PTRS_PER_PTE]; +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; + +extern char _text, _etext, _edata, __bss_start, _end; +extern char __init_begin, __init_end; + +/* It'd be good if these lines were in the standard header file. */ +#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT) +#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn) + + +void show_mem(void) +{ + int i, total = 0, reserved = 0; + int shared = 0, cached = 0; + + printk("Mem-info:\n"); + show_free_areas(); + printk("Free swap: %6ldkB\n",nr_swap_pages<<(PAGE_SHIFT-10)); + i = max_mapnr; + while (i-- > 0) { + total++; + if (PageReserved(mem_map+i)) + reserved++; + else if (PageSwapCache(mem_map+i)) + cached++; + else if (page_count(mem_map+i)) + shared += page_count(mem_map+i) - 1; + } + printk("%d pages of RAM\n",total); + printk("%d reserved pages\n",reserved); + printk("%d pages shared\n",shared); + printk("%d pages swap cached\n",cached); + printk("%ld pages in page table cache\n",pgtable_cache_size); +} + +/* + * paging_init() sets up the page tables. + * + * head.S already did a lot to set up address translation for the kernel. + * Here we comes with: + * . MMU enabled + * . ASID set (SR) + * . some 512MB regions being mapped of which the most relevant here is: + * . CACHED segment (ASID 0 [irrelevant], shared AND NOT user) + * . possible variable length regions being mapped as: + * . UNCACHED segment (ASID 0 [irrelevant], shared AND NOT user) + * . All of the memory regions are placed, independently from the platform + * on high addresses, above 0x80000000. + * . swapper_pg_dir is already cleared out by the .space directive + * in any case swapper does not require a real page directory since + * it's all kernel contained. + * + * Those pesky NULL-reference errors in the kernel are then + * dealt with by not mapping address 0x00000000 at all. + * + */ +void __init paging_init(void) +{ + unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; + + pgd_init((unsigned long)swapper_pg_dir); + pgd_init((unsigned long)swapper_pg_dir + + sizeof(pgd_t) * USER_PTRS_PER_PGD); + + mmu_context_cache = MMU_CONTEXT_FIRST_VERSION; + + /* + * All memory is good as ZONE_NORMAL (fall-through) and ZONE_DMA. + */ + zones_size[ZONE_DMA] = MAX_LOW_PFN - START_PFN; + + free_area_init_node(0, NODE_DATA(0), 0, zones_size, __MEMORY_START >> PAGE_SHIFT, 0); + + /* XXX: MRB-remove - this doesn't seem sane, should this be done somewhere else ?*/ + mem_map = NODE_DATA(0)->node_mem_map; +} + +void __init mem_init(void) +{ + int codesize, reservedpages, datasize, initsize; + int tmp; + + max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN; + high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE); + + /* + * Clear the zero-page. + * This is not required but we might want to re-use + * this very page to pass boot parameters, one day. + */ + memset(empty_zero_page, 0, PAGE_SIZE); + + /* this will put all low memory onto the freelists */ + totalram_pages += free_all_bootmem_node(NODE_DATA(0)); + reservedpages = 0; + for (tmp = 0; tmp < num_physpages; tmp++) + /* + * Only count reserved RAM pages + */ + if (PageReserved(mem_map+tmp)) + reservedpages++; + + after_bootmem = 1; + + codesize = (unsigned long) &_etext - (unsigned long) &_text; + datasize = (unsigned long) &_edata - (unsigned long) &_etext; + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; + + printk("Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", + (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), + max_mapnr << (PAGE_SHIFT-10), + codesize >> 10, + reservedpages << (PAGE_SHIFT-10), + datasize >> 10, + initsize >> 10); +} + +void free_initmem(void) +{ + unsigned long addr; + + addr = (unsigned long)(&__init_begin); + for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { + ClearPageReserved(virt_to_page(addr)); + set_page_count(virt_to_page(addr), 1); + free_page(addr); + totalram_pages++; + } + printk ("Freeing unused kernel memory: %ldk freed\n", (&__init_end - &__init_begin) >> 10); +} + +#ifdef CONFIG_BLK_DEV_INITRD +void free_initrd_mem(unsigned long start, unsigned long end) +{ + unsigned long p; + for (p = start; p < end; p += PAGE_SIZE) { + ClearPageReserved(virt_to_page(p)); + set_page_count(virt_to_page(p), 1); + free_page(p); + totalram_pages++; + } + printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); +} +#endif + diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c new file mode 100644 index 000000000..8e0549025 --- /dev/null +++ b/arch/sh64/mm/ioremap.c @@ -0,0 +1,469 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mm/ioremap.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003, 2004 Paul Mundt + * + * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly + * derived from arch/i386/mm/ioremap.c . + * + * (C) Copyright 1995 1996 Linus Torvalds + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void shmedia_mapioaddr(unsigned long, unsigned long); +static unsigned long shmedia_ioremap(struct resource *, u32, int); + +static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, + unsigned long phys_addr, unsigned long flags) +{ + unsigned long end; + unsigned long pfn; + pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ | + _PAGE_WRITE | _PAGE_DIRTY | + _PAGE_ACCESSED | _PAGE_SHARED | flags); + + address &= ~PMD_MASK; + end = address + size; + if (end > PMD_SIZE) + end = PMD_SIZE; + if (address >= end) + BUG(); + + pfn = phys_addr >> PAGE_SHIFT; + + pr_debug(" %s: pte %p address %lx size %lx phys_addr %lx\n", + __FUNCTION__,pte,address,size,phys_addr); + + do { + if (!pte_none(*pte)) { + printk("remap_area_pte: page already exists\n"); + BUG(); + } + + set_pte(pte, pfn_pte(pfn, pgprot)); + address += PAGE_SIZE; + pfn++; + pte++; + } while (address && (address < end)); +} + +static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, + unsigned long phys_addr, unsigned long flags) +{ + unsigned long end; + + address &= ~PGDIR_MASK; + end = address + size; + + if (end > PGDIR_SIZE) + end = PGDIR_SIZE; + + phys_addr -= address; + + if (address >= end) + BUG(); + + do { + pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); + if (!pte) + return -ENOMEM; + remap_area_pte(pte, address, end - address, address + phys_addr, flags); + address = (address + PMD_SIZE) & PMD_MASK; + pmd++; + } while (address && (address < end)); + return 0; +} + +static int remap_area_pages(unsigned long address, unsigned long phys_addr, + unsigned long size, unsigned long flags) +{ + int error; + pgd_t * dir; + unsigned long end = address + size; + + phys_addr -= address; + dir = pgd_offset_k(address); + flush_cache_all(); + if (address >= end) + BUG(); + spin_lock(&init_mm.page_table_lock); + do { + pmd_t *pmd = pmd_alloc(&init_mm, dir, address); + error = -ENOMEM; + if (!pmd) + break; + if (remap_area_pmd(pmd, address, end - address, + phys_addr + address, flags)) { + break; + } + error = 0; + address = (address + PGDIR_SIZE) & PGDIR_MASK; + dir++; + } while (address && (address < end)); + spin_unlock(&init_mm.page_table_lock); + flush_tlb_all(); + return 0; +} + +/* + * Generic mapping function (not visible outside): + */ + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ +void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) +{ + void * addr; + struct vm_struct * area; + unsigned long offset, last_addr; + + /* Don't allow wraparound or zero size */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr + 1) - phys_addr; + + /* + * Ok, go for it.. + */ + area = get_vm_area(size, VM_IOREMAP); + pr_debug("Get vm_area returns %p addr %p\n",area,area->addr); + if (!area) + return NULL; + area->phys_addr = phys_addr; + addr = area->addr; + if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) { + vunmap(addr); + return NULL; + } + return (void *) (offset + (char *)addr); +} + +void iounmap(void *addr) +{ + struct vm_struct *area; + + vfree((void *) (PAGE_MASK & (unsigned long) addr)); + area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr)); + if (!area) { + printk(KERN_ERR "iounmap: bad address %p\n", addr); + return; + } + + kfree(area); +} + +static struct resource shmedia_iomap = { + .name = "shmedia_iomap", + .start = IOBASE_VADDR, + .end = IOBASE_END - 1, +}; + +static void shmedia_mapioaddr(unsigned long pa, unsigned long va); +static void shmedia_unmapioaddr(unsigned long vaddr); +static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz); + +/* + * We have the same problem as the SPARC, so lets have the same comment: + * Our mini-allocator... + * Boy this is gross! We need it because we must map I/O for + * timers and interrupt controller before the kmalloc is available. + */ + +#define XNMLN 15 +#define XNRES 10 + +struct xresource { + struct resource xres; /* Must be first */ + int xflag; /* 1 == used */ + char xname[XNMLN+1]; +}; + +static struct xresource xresv[XNRES]; + +static struct xresource *xres_alloc(void) +{ + struct xresource *xrp; + int n; + + xrp = xresv; + for (n = 0; n < XNRES; n++) { + if (xrp->xflag == 0) { + xrp->xflag = 1; + return xrp; + } + xrp++; + } + return NULL; +} + +static void xres_free(struct xresource *xrp) +{ + xrp->xflag = 0; +} + +static struct resource *shmedia_find_resource(struct resource *root, + unsigned long vaddr) +{ + struct resource *res; + + for (res = root->child; res; res = res->sibling) + if (res->start <= vaddr && res->end >= vaddr) + return res; + + return NULL; +} + +static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size, + const char *name) +{ + static int printed_full = 0; + struct xresource *xres; + struct resource *res; + char *tack; + int tlen; + + if (name == NULL) name = "???"; + + if ((xres = xres_alloc()) != 0) { + tack = xres->xname; + res = &xres->xres; + } else { + if (!printed_full) { + printk("%s: done with statics, switching to kmalloc\n", + __FUNCTION__); + printed_full = 1; + } + tlen = strlen(name); + tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); + if (!tack) + return -ENOMEM; + memset(tack, 0, sizeof(struct resource)); + res = (struct resource *) tack; + tack += sizeof (struct resource); + } + + strncpy(tack, name, XNMLN); + tack[XNMLN] = 0; + res->name = tack; + + return shmedia_ioremap(res, phys, size); +} + +static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz) +{ + unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); + unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK; + unsigned long va; + unsigned int psz; + + if (allocate_resource(&shmedia_iomap, res, round_sz, + shmedia_iomap.start, shmedia_iomap.end, + PAGE_SIZE, NULL, NULL) != 0) { + panic("alloc_io_res(%s): cannot occupy\n", + (res->name != NULL)? res->name: "???"); + } + + va = res->start; + pa &= PAGE_MASK; + + psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; + + /* log at boot time ... */ + printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n", + ((res->name != NULL) ? res->name : "???"), + psz, psz == 1 ? " " : "s", va, pa); + + for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { + shmedia_mapioaddr(pa, va); + va += PAGE_SIZE; + pa += PAGE_SIZE; + } + + res->start += offset; + res->end = res->start + sz - 1; /* not strictly necessary.. */ + + return res->start; +} + +static void shmedia_free_io(struct resource *res) +{ + unsigned long len = res->end - res->start + 1; + + BUG_ON((len & (PAGE_SIZE - 1)) != 0); + + while (len) { + len -= PAGE_SIZE; + shmedia_unmapioaddr(res->start + len); + } + + release_resource(res); +} + +static void *sh64_get_page(void) +{ + extern int after_bootmem; + void *page; + + if (after_bootmem) { + page = (void *)get_zeroed_page(GFP_ATOMIC); + } else { + page = alloc_bootmem_pages(PAGE_SIZE); + } + + if (!page || ((unsigned long)page & ~PAGE_MASK)) + panic("sh64_get_page: Out of memory already?\n"); + + return page; +} + +static void shmedia_mapioaddr(unsigned long pa, unsigned long va) +{ + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep, pte; + pgprot_t prot; + unsigned long flags = 1; /* 1 = CB0-1 device */ + + pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); + + pgdp = pgd_offset_k(va); + if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { + pmdp = (pmd_t *)sh64_get_page(); + set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE)); + } + + pmdp = pmd_offset(pgdp, va); + if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) { + ptep = (pte_t *)sh64_get_page(); + set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); + } + + prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | + _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); + + pte = pfn_pte(pa >> PAGE_SHIFT, prot); + ptep = pte_offset_kernel(pmdp, va); + + if (!pte_none(*ptep) && + pte_val(*ptep) != pte_val(pte)) + pte_ERROR(*ptep); + + set_pte(ptep, pte); + + flush_tlb_kernel_range(va, PAGE_SIZE); +} + +static void shmedia_unmapioaddr(unsigned long vaddr) +{ + pgd_t *pgdp; + pmd_t *pmdp; + pte_t *ptep; + + pgdp = pgd_offset_k(vaddr); + pmdp = pmd_offset(pgdp, vaddr); + + if (pmd_none(*pmdp) || pmd_bad(*pmdp)) + return; + + ptep = pte_offset_kernel(pmdp, vaddr); + + if (pte_none(*ptep) || !pte_present(*ptep)) + return; + + clear_page((void *)ptep); + pte_clear(ptep); +} + +unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name) +{ + if (size < PAGE_SIZE) + size = PAGE_SIZE; + + return shmedia_alloc_io(phys, size, name); +} + +void onchip_unmap(unsigned long vaddr) +{ + struct resource *res; + unsigned int psz; + + res = shmedia_find_resource(&shmedia_iomap, vaddr); + if (!res) { + printk(KERN_ERR "%s: Failed to free 0x%08lx\n", + __FUNCTION__, vaddr); + return; + } + + psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; + + printk(KERN_DEBUG "unmapioaddr: %6s [%2d page%s] freed\n", + res->name, psz, psz == 1 ? " " : "s"); + + shmedia_free_io(res); + + if ((char *)res >= (char *)xresv && + (char *)res < (char *)&xresv[XNRES]) { + xres_free((struct xresource *)res); + } else { + kfree(res); + } +} + +#ifdef CONFIG_PROC_FS +static int +ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, + void *data) +{ + char *p = buf, *e = buf + length; + struct resource *r; + const char *nm; + + for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { + if (p + 32 >= e) /* Better than nothing */ + break; + if ((nm = r->name) == 0) nm = "???"; + p += sprintf(p, "%08lx-%08lx: %s\n", r->start, r->end, nm); + } + + return p-buf; +} +#endif /* CONFIG_PROC_FS */ + +static int __init register_proc_onchip(void) +{ +#ifdef CONFIG_PROC_FS + create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap); +#endif + return 0; +} + +__initcall(register_proc_onchip); diff --git a/arch/sh64/mm/tlb.c b/arch/sh64/mm/tlb.c new file mode 100644 index 000000000..d517e7d70 --- /dev/null +++ b/arch/sh64/mm/tlb.c @@ -0,0 +1,166 @@ +/* + * arch/sh64/mm/tlb.c + * + * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2003 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ +#include +#include +#include +#include +#include + +/** + * sh64_tlb_init + * + * Perform initial setup for the DTLB and ITLB. + */ +int __init sh64_tlb_init(void) +{ + /* Assign some sane DTLB defaults */ + cpu_data->dtlb.entries = 64; + cpu_data->dtlb.step = 0x10; + + cpu_data->dtlb.first = DTLB_FIXED | cpu_data->dtlb.step; + cpu_data->dtlb.next = cpu_data->dtlb.first; + + cpu_data->dtlb.last = DTLB_FIXED | + ((cpu_data->dtlb.entries - 1) * + cpu_data->dtlb.step); + + /* And again for the ITLB */ + cpu_data->itlb.entries = 64; + cpu_data->itlb.step = 0x10; + + cpu_data->itlb.first = ITLB_FIXED | cpu_data->itlb.step; + cpu_data->itlb.next = cpu_data->itlb.first; + cpu_data->itlb.last = ITLB_FIXED | + ((cpu_data->itlb.entries - 1) * + cpu_data->itlb.step); + + return 0; +} + +/** + * sh64_next_free_dtlb_entry + * + * Find the next available DTLB entry + */ +unsigned long long sh64_next_free_dtlb_entry(void) +{ + return cpu_data->dtlb.next; +} + +/** + * sh64_get_wired_dtlb_entry + * + * Allocate a wired (locked-in) entry in the DTLB + */ +unsigned long long sh64_get_wired_dtlb_entry(void) +{ + unsigned long long entry = sh64_next_free_dtlb_entry(); + + cpu_data->dtlb.first += cpu_data->dtlb.step; + cpu_data->dtlb.next += cpu_data->dtlb.step; + + return entry; +} + +/** + * sh64_put_wired_dtlb_entry + * + * @entry: Address of TLB slot. + * + * Free a wired (locked-in) entry in the DTLB. + * + * Works like a stack, last one to allocate must be first one to free. + */ +int sh64_put_wired_dtlb_entry(unsigned long long entry) +{ + __flush_tlb_slot(entry); + + /* + * We don't do any particularly useful tracking of wired entries, + * so this approach works like a stack .. last one to be allocated + * has to be the first one to be freed. + * + * We could potentially load wired entries into a list and work on + * rebalancing the list periodically (which also entails moving the + * contents of a TLB entry) .. though I have a feeling that this is + * more trouble than it's worth. + */ + + /* + * Entry must be valid .. we don't want any ITLB addresses! + */ + if (entry <= DTLB_FIXED) + return -EINVAL; + + /* + * Next, check if we're within range to be freed. (ie, must be the + * entry beneath the first 'free' entry! + */ + if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step)) + return -EINVAL; + + /* If we are, then bring this entry back into the list */ + cpu_data->dtlb.first -= cpu_data->dtlb.step; + cpu_data->dtlb.next = entry; + + return 0; +} + +/** + * sh64_setup_tlb_slot + * + * @config_addr: Address of TLB slot. + * @eaddr: Virtual address. + * @asid: Address Space Identifier. + * @paddr: Physical address. + * + * Load up a virtual<->physical translation for @eaddr<->@paddr in the + * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). + */ +inline void sh64_setup_tlb_slot(unsigned long long config_addr, + unsigned long eaddr, + unsigned long asid, + unsigned long paddr) +{ + unsigned long long pteh, ptel; + + /* Sign extension */ +#if (NEFF == 32) + pteh = (unsigned long long)(signed long long)(signed long) eaddr; +#else +#error "Can't sign extend more than 32 bits yet" +#endif + pteh &= PAGE_MASK; + pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; +#if (NEFF == 32) + ptel = (unsigned long long)(signed long long)(signed long) paddr; +#else +#error "Can't sign extend more than 32 bits yet" +#endif + ptel &= PAGE_MASK; + ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); + + asm volatile("putcfg %0, 1, %1\n\t" + "putcfg %0, 0, %2\n" + : : "r" (config_addr), "r" (ptel), "r" (pteh)); +} + +/** + * sh64_teardown_tlb_slot + * + * @config_addr: Address of TLB slot. + * + * Teardown any existing mapping in the TLB slot @config_addr. + */ +inline void sh64_teardown_tlb_slot(unsigned long long config_addr) + __attribute__ ((alias("__flush_tlb_slot"))); + diff --git a/arch/sh64/mm/tlbmiss.c b/arch/sh64/mm/tlbmiss.c new file mode 100644 index 000000000..a69f7751c --- /dev/null +++ b/arch/sh64/mm/tlbmiss.c @@ -0,0 +1,282 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mm/tlbmiss.c + * + * Original code from fault.c + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * Fast PTE->TLB refill path + * Copyright (C) 2003 Richard.Curnow@superh.com + * + * IMPORTANT NOTES : + * The do_fast_page_fault function is called from a context in entry.S where very few registers + * have been saved. In particular, the code in this file must be compiled not to use ANY + * caller-save regiseters that are not part of the restricted save set. Also, it means that + * code in this file must not make calls to functions elsewhere in the kernel, or else the + * excepting context will see corruption in its caller-save registers. Plus, the entry.S save + * area is non-reentrant, so this code has to run with SR.BL==1, i.e. no interrupts taken inside + * it and panic on any exception. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* required by inline asm statements */ + +/* Callable from fault.c, so not static */ +inline void __do_tlb_refill(unsigned long address, + unsigned long long is_text_not_data, pte_t *pte) +{ + unsigned long long ptel; + unsigned long long pteh=0; + struct tlb_info *tlbp; + unsigned long long next; + + /* Get PTEL first */ + ptel = pte_val(*pte); + + /* + * Set PTEH register + */ + pteh = address & MMU_VPN_MASK; + + /* Sign extend based on neff. */ +#if (NEFF == 32) + /* Faster sign extension */ + pteh = (unsigned long long)(signed long long)(signed long)pteh; +#else + /* General case */ + pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh; +#endif + + /* Set the ASID. */ + pteh |= get_asid() << PTEH_ASID_SHIFT; + pteh |= PTEH_VALID; + + /* Set PTEL register, set_pte has performed the sign extension */ + ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ + ptel |= _PAGE_FLAGS_HARDWARE_DEFAULT; /* add default flags */ + + tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb); + next = tlbp->next; + __flush_tlb_slot(next); + asm volatile ("putcfg %0,1,%2\n\n\t" + "putcfg %0,0,%1\n" + : : "r" (next), "r" (pteh), "r" (ptel) ); + + next += TLB_STEP; + if (next > tlbp->last) next = tlbp->first; + tlbp->next = next; + +} + +static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long protection_flags, + unsigned long long textaccess, + unsigned long address) +{ + pgd_t *dir; + pmd_t *pmd; + static pte_t *pte; + pte_t entry; + + dir = pgd_offset_k(address); + pmd = pmd_offset(dir, address); + + if (pmd_none(*pmd)) { + return 0; + } + + if (pmd_bad(*pmd)) { + pmd_clear(pmd); + return 0; + } + + pte = pte_offset_kernel(pmd, address); + entry = *pte; + + if (pte_none(entry) || !pte_present(entry)) { + return 0; + } + + if ((pte_val(entry) & protection_flags) != protection_flags) { + return 0; + } + + __do_tlb_refill(address, textaccess, pte); + + return 1; +} + +static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_flags, + unsigned long long textaccess, + unsigned long address) +{ + pgd_t *dir; + pmd_t *pmd; + pte_t *pte; + pte_t entry; + + /* NB. The PGD currently only contains a single entry - there is no + page table tree stored for the top half of the address space since + virtual pages in that region should never be mapped in user mode. + (In kernel mode, the only things in that region are the 512Mb super + page (locked in), and vmalloc (modules) + I/O device pages (handled + by handle_vmalloc_fault), so no PGD for the upper half is required + by kernel mode either). + + See how mm->pgd is allocated and initialised in pgd_alloc to see why + the next test is necessary. - RPC */ + if (address >= (unsigned long) TASK_SIZE) { + /* upper half - never has page table entries. */ + return 0; + } + dir = pgd_offset(mm, address); + if (pgd_none(*dir)) { + return 0; + } + if (!pgd_present(*dir)) { + return 0; + } + + pmd = pmd_offset(dir, address); + if (pmd_none(*pmd)) { + return 0; + } + if (!pmd_present(*pmd)) { + return 0; + } + pte = pte_offset_kernel(pmd, address); + entry = *pte; + if (pte_none(entry)) { + return 0; + } + if (!pte_present(entry)) { + return 0; + } + + /* If the page doesn't have sufficient protection bits set to service the + kind of fault being handled, there's not much point doing the TLB refill. + Punt the fault to the general handler. */ + if ((pte_val(entry) & protection_flags) != protection_flags) { + return 0; + } + + __do_tlb_refill(address, textaccess, pte); + + return 1; +} + +/* Put all this information into one structure so that everything is just arithmetic + relative to a single base address. This reduces the number of movi/shori pairs needed + just to load addresses of static data. */ +struct expevt_lookup { + unsigned short protection_flags[8]; + unsigned char is_text_access[8]; + unsigned char is_write_access[8]; +}; + +#define PRU (1<<9) +#define PRW (1<<8) +#define PRX (1<<7) +#define PRR (1<<6) + +#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED) +#define YOUNG (_PAGE_ACCESSED) + +/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether + the fault happened in user mode or privileged mode. */ +static struct expevt_lookup expevt_lookup_table = { + .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW}, + .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0} +}; + +/* + This routine handles page faults that can be serviced just by refilling a + TLB entry from an existing page table entry. (This case represents a very + large majority of page faults.) Return 1 if the fault was successfully + handled. Return 0 if the fault could not be handled. (This leads into the + general fault handling in fault.c which deals with mapping file-backed + pages, stack growth, segmentation faults, swapping etc etc) + */ +asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt, + unsigned long address) +{ + struct task_struct *tsk; + struct mm_struct *mm; + unsigned long long textaccess; + unsigned long long protection_flags; + unsigned long long index; + unsigned long long expevt4; + + /* The next few lines implement a way of hashing EXPEVT into a small array index + which can be used to lookup parameters specific to the type of TLBMISS being + handled. Note: + ITLBMISS has EXPEVT==0xa40 + RTLBMISS has EXPEVT==0x040 + WTLBMISS has EXPEVT==0x060 + */ + + expevt4 = (expevt >> 4); + /* TODO : xor ssr_md into this expression too. Then we can check that PRU is set + when it needs to be. */ + index = expevt4 ^ (expevt4 >> 5); + index &= 7; + protection_flags = expevt_lookup_table.protection_flags[index]; + textaccess = expevt_lookup_table.is_text_access[index]; + +#ifdef CONFIG_SH64_PROC_TLB + ++calls_to_do_fast_page_fault; +#endif + + /* SIM + * Note this is now called with interrupts still disabled + * This is to cope with being called for a missing IO port + * address with interupts disabled. This should be fixed as + * soon as we have a better 'fast path' miss handler. + * + * Plus take care how you try and debug this stuff. + * For example, writing debug data to a port which you + * have just faulted on is not going to work. + */ + + tsk = current; + mm = tsk->mm; + + if ((address >= VMALLOC_START && address < VMALLOC_END) || + (address >= IOBASE_VADDR && address < IOBASE_END)) { + if (ssr_md) { + /* Process-contexts can never have this address range mapped */ + if (handle_vmalloc_fault(mm, protection_flags, textaccess, address)) { + return 1; + } + } + } else if (!in_interrupt() && mm) { + if (handle_tlbmiss(mm, protection_flags, textaccess, address)) { + return 1; + } + } + + return 0; +} + diff --git a/arch/sh64/oprofile/Kconfig b/arch/sh64/oprofile/Kconfig new file mode 100644 index 000000000..19d37730b --- /dev/null +++ b/arch/sh64/oprofile/Kconfig @@ -0,0 +1,23 @@ + +menu "Profiling support" + depends on EXPERIMENTAL + +config PROFILING + bool "Profiling support (EXPERIMENTAL)" + help + Say Y here to enable the extended profiling support mechanisms used + by profilers such as OProfile. + + +config OPROFILE + tristate "OProfile system profiling (EXPERIMENTAL)" + depends on PROFILING + help + OProfile is a profiling system capable of profiling the + whole system, include the kernel, kernel modules, libraries, + and applications. + + If unsure, say N. + +endmenu + diff --git a/arch/sh64/oprofile/Makefile b/arch/sh64/oprofile/Makefile new file mode 100644 index 000000000..11a451f6a --- /dev/null +++ b/arch/sh64/oprofile/Makefile @@ -0,0 +1,12 @@ +obj-$(CONFIG_OPROFILE) += oprofile.o + +DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ + oprof.o cpu_buffer.o buffer_sync.o \ + event_buffer.o oprofile_files.o \ + oprofilefs.o oprofile_stats.o \ + timer_int.o ) + +profdrvr-y := op_model_null.o + +oprofile-y := $(DRIVER_OBJS) $(profdrvr-y) + diff --git a/arch/sh64/oprofile/op_model_null.c b/arch/sh64/oprofile/op_model_null.c new file mode 100644 index 000000000..f1c795fe9 --- /dev/null +++ b/arch/sh64/oprofile/op_model_null.c @@ -0,0 +1,23 @@ +/* + * arch/sh/oprofile/op_model_null.c + * + * Copyright (C) 2003 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include + +int __init oprofile_arch_init(struct oprofile_operations **ops) +{ + return -ENODEV; +} + +void oprofile_arch_exit(void) +{ +} + diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S new file mode 100644 index 000000000..b59884ef0 --- /dev/null +++ b/arch/sparc64/lib/clear_page.S @@ -0,0 +1,105 @@ +/* clear_page.S: UltraSparc optimized clear page. + * + * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com) + */ + +#include +#include +#include +#include +#include + + /* What we used to do was lock a TLB entry into a specific + * TLB slot, clear the page with interrupts disabled, then + * restore the original TLB entry. This was great for + * disturbing the TLB as little as possible, but it meant + * we had to keep interrupts disabled for a long time. + * + * Now, we simply use the normal TLB loading mechanism, + * and this makes the cpu choose a slot all by itself. + * Then we do a normal TLB flush on exit. We need only + * disable preemption during the clear. + */ + +#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS) +#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W) + + .text + + .globl _clear_page +_clear_page: /* %o0=dest */ + ba,pt %xcc, clear_page_common + clr %o4 + + /* This thing is pretty important, it shows up + * on the profiles via do_anonymous_page(). + */ + .align 32 + .globl clear_user_page +clear_user_page: /* %o0=dest, %o1=vaddr */ + lduw [%g6 + TI_PRE_COUNT], %o2 + sethi %uhi(PAGE_OFFSET), %g2 + sethi %hi(PAGE_SIZE), %o4 + + sllx %g2, 32, %g2 + sethi %uhi(TTE_BITS_TOP), %g3 + + sllx %g3, 32, %g3 + sub %o0, %g2, %g1 ! paddr + + or %g3, TTE_BITS_BOTTOM, %g3 + and %o1, %o4, %o0 ! vaddr D-cache alias bit + + or %g1, %g3, %g1 ! TTE data + sethi %hi(TLBTEMP_BASE), %o3 + + add %o2, 1, %o4 + add %o0, %o3, %o0 ! TTE vaddr + + /* Disable preemption. */ + mov TLB_TAG_ACCESS, %g3 + stw %o4, [%g6 + TI_PRE_COUNT] + + /* Load TLB entry. */ + rdpr %pstate, %o4 + wrpr %o4, PSTATE_IE, %pstate + stxa %o0, [%g3] ASI_DMMU + stxa %g1, [%g0] ASI_DTLB_DATA_IN + flush %g6 + wrpr %o4, 0x0, %pstate + + mov 1, %o4 + +clear_page_common: + VISEntryHalf + membar #StoreLoad | #StoreStore | #LoadStore + fzero %f0 + sethi %hi(PAGE_SIZE/64), %o1 + mov %o0, %g1 ! remember vaddr for tlbflush + fzero %f2 + or %o1, %lo(PAGE_SIZE/64), %o1 + faddd %f0, %f2, %f4 + fmuld %f0, %f2, %f6 + faddd %f0, %f2, %f8 + fmuld %f0, %f2, %f10 + + faddd %f0, %f2, %f12 + fmuld %f0, %f2, %f14 +1: stda %f0, [%o0 + %g0] ASI_BLK_P + subcc %o1, 1, %o1 + bne,pt %icc, 1b + add %o0, 0x40, %o0 + membar #Sync + VISExitHalf + + brz,pn %o4, out + nop + + stxa %g0, [%g1] ASI_DMMU_DEMAP + membar #Sync + stw %o2, [%g6 + TI_PRE_COUNT] + +out: retl + nop + diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S new file mode 100644 index 000000000..862eefb17 --- /dev/null +++ b/arch/sparc64/lib/copy_page.S @@ -0,0 +1,239 @@ +/* clear_page.S: UltraSparc optimized copy page. + * + * Copyright (C) 1996, 1998, 1999, 2000, 2004 David S. Miller (davem@redhat.com) + * Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com) + */ + +#include +#include +#include +#include +#include +#include + + /* What we used to do was lock a TLB entry into a specific + * TLB slot, clear the page with interrupts disabled, then + * restore the original TLB entry. This was great for + * disturbing the TLB as little as possible, but it meant + * we had to keep interrupts disabled for a long time. + * + * Now, we simply use the normal TLB loading mechanism, + * and this makes the cpu choose a slot all by itself. + * Then we do a normal TLB flush on exit. We need only + * disable preemption during the clear. + */ + +#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS) +#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W) +#define DCACHE_SIZE (PAGE_SIZE * 2) + +#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19) +#define PAGE_SIZE_REM 0x80 +#elif (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22) +#define PAGE_SIZE_REM 0x100 +#else +#error Wrong PAGE_SHIFT specified +#endif + +#define TOUCH(reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7) \ + fmovd %reg0, %f48; fmovd %reg1, %f50; \ + fmovd %reg2, %f52; fmovd %reg3, %f54; \ + fmovd %reg4, %f56; fmovd %reg5, %f58; \ + fmovd %reg6, %f60; fmovd %reg7, %f62; + + .text + + .align 32 + .globl copy_user_page +copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */ + lduw [%g6 + TI_PRE_COUNT], %o4 + sethi %uhi(PAGE_OFFSET), %g2 + sethi %hi(PAGE_SIZE), %o3 + + sllx %g2, 32, %g2 + sethi %uhi(TTE_BITS_TOP), %g3 + + sllx %g3, 32, %g3 + sub %o0, %g2, %g1 ! dest paddr + + sub %o1, %g2, %g2 ! src paddr + or %g3, TTE_BITS_BOTTOM, %g3 + + and %o2, %o3, %o0 ! vaddr D-cache alias bit + or %g1, %g3, %g1 ! dest TTE data + + or %g2, %g3, %g2 ! src TTE data + sethi %hi(TLBTEMP_BASE), %o3 + + sethi %hi(DCACHE_SIZE), %o1 + add %o0, %o3, %o0 ! dest TTE vaddr + + add %o4, 1, %o2 + add %o0, %o1, %o1 ! src TTE vaddr + + /* Disable preemption. */ + mov TLB_TAG_ACCESS, %g3 + stw %o2, [%g6 + TI_PRE_COUNT] + + /* Load TLB entries. */ + rdpr %pstate, %o2 + wrpr %o2, PSTATE_IE, %pstate + stxa %o0, [%g3] ASI_DMMU + stxa %g1, [%g0] ASI_DTLB_DATA_IN + membar #Sync + stxa %o1, [%g3] ASI_DMMU + stxa %g2, [%g0] ASI_DTLB_DATA_IN + membar #Sync + wrpr %o2, 0x0, %pstate + + BRANCH_IF_ANY_CHEETAH(g3,o2,1f) + ba,pt %xcc, 9f + nop + +1: + VISEntryHalf + membar #StoreLoad | #StoreStore | #LoadStore + sethi %hi((PAGE_SIZE/64)-2), %o2 + mov %o0, %g1 + prefetch [%o1 + 0x000], #one_read + or %o2, %lo((PAGE_SIZE/64)-2), %o2 + prefetch [%o1 + 0x040], #one_read + prefetch [%o1 + 0x080], #one_read + prefetch [%o1 + 0x0c0], #one_read + ldd [%o1 + 0x000], %f0 + prefetch [%o1 + 0x100], #one_read + ldd [%o1 + 0x008], %f2 + prefetch [%o1 + 0x140], #one_read + ldd [%o1 + 0x010], %f4 + prefetch [%o1 + 0x180], #one_read + fmovd %f0, %f16 + ldd [%o1 + 0x018], %f6 + fmovd %f2, %f18 + ldd [%o1 + 0x020], %f8 + fmovd %f4, %f20 + ldd [%o1 + 0x028], %f10 + fmovd %f6, %f22 + ldd [%o1 + 0x030], %f12 + fmovd %f8, %f24 + ldd [%o1 + 0x038], %f14 + fmovd %f10, %f26 + ldd [%o1 + 0x040], %f0 +1: ldd [%o1 + 0x048], %f2 + fmovd %f12, %f28 + ldd [%o1 + 0x050], %f4 + fmovd %f14, %f30 + stda %f16, [%o0] ASI_BLK_P + ldd [%o1 + 0x058], %f6 + fmovd %f0, %f16 + ldd [%o1 + 0x060], %f8 + fmovd %f2, %f18 + ldd [%o1 + 0x068], %f10 + fmovd %f4, %f20 + ldd [%o1 + 0x070], %f12 + fmovd %f6, %f22 + ldd [%o1 + 0x078], %f14 + fmovd %f8, %f24 + ldd [%o1 + 0x080], %f0 + prefetch [%o1 + 0x180], #one_read + fmovd %f10, %f26 + subcc %o2, 1, %o2 + add %o0, 0x40, %o0 + bne,pt %xcc, 1b + add %o1, 0x40, %o1 + + ldd [%o1 + 0x048], %f2 + fmovd %f12, %f28 + ldd [%o1 + 0x050], %f4 + fmovd %f14, %f30 + stda %f16, [%o0] ASI_BLK_P + ldd [%o1 + 0x058], %f6 + fmovd %f0, %f16 + ldd [%o1 + 0x060], %f8 + fmovd %f2, %f18 + ldd [%o1 + 0x068], %f10 + fmovd %f4, %f20 + ldd [%o1 + 0x070], %f12 + fmovd %f6, %f22 + add %o0, 0x40, %o0 + ldd [%o1 + 0x078], %f14 + fmovd %f8, %f24 + fmovd %f10, %f26 + fmovd %f12, %f28 + fmovd %f14, %f30 + stda %f16, [%o0] ASI_BLK_P + membar #Sync + VISExitHalf + ba,pt %xcc, 5f + nop + +9: + VISEntry + ldub [%g6 + TI_FAULT_CODE], %g3 + mov %o0, %g1 + cmp %g3, 0 + rd %asi, %g3 + be,a,pt %icc, 1f + wr %g0, ASI_BLK_P, %asi + wr %g0, ASI_BLK_COMMIT_P, %asi +1: ldda [%o1] ASI_BLK_P, %f0 + add %o1, 0x40, %o1 + ldda [%o1] ASI_BLK_P, %f16 + add %o1, 0x40, %o1 + sethi %hi(PAGE_SIZE), %o2 +1: TOUCH(f0, f2, f4, f6, f8, f10, f12, f14) + ldda [%o1] ASI_BLK_P, %f32 + stda %f48, [%o0] %asi + add %o1, 0x40, %o1 + sub %o2, 0x40, %o2 + add %o0, 0x40, %o0 + TOUCH(f16, f18, f20, f22, f24, f26, f28, f30) + ldda [%o1] ASI_BLK_P, %f0 + stda %f48, [%o0] %asi + add %o1, 0x40, %o1 + sub %o2, 0x40, %o2 + add %o0, 0x40, %o0 + TOUCH(f32, f34, f36, f38, f40, f42, f44, f46) + ldda [%o1] ASI_BLK_P, %f16 + stda %f48, [%o0] %asi + sub %o2, 0x40, %o2 + add %o1, 0x40, %o1 + cmp %o2, PAGE_SIZE_REM + bne,pt %xcc, 1b + add %o0, 0x40, %o0 +#if (PAGE_SHIFT == 16) || (PAGE_SHIFT == 22) + TOUCH(f0, f2, f4, f6, f8, f10, f12, f14) + ldda [%o1] ASI_BLK_P, %f32 + stda %f48, [%o0] %asi + add %o1, 0x40, %o1 + sub %o2, 0x40, %o2 + add %o0, 0x40, %o0 + TOUCH(f16, f18, f20, f22, f24, f26, f28, f30) + ldda [%o1] ASI_BLK_P, %f0 + stda %f48, [%o0] %asi + add %o1, 0x40, %o1 + sub %o2, 0x40, %o2 + add %o0, 0x40, %o0 + membar #Sync + stda %f32, [%o0] %asi + add %o0, 0x40, %o0 + stda %f0, [%o0] %asi +#else + membar #Sync + stda %f0, [%o0] %asi + add %o0, 0x40, %o0 + stda %f16, [%o0] %asi +#endif + membar #Sync + wr %g3, 0x0, %asi + VISExit + +5: + stxa %g0, [%g1] ASI_DMMU_DEMAP + membar #Sync + + sethi %hi(DCACHE_SIZE), %g2 + stxa %g0, [%g1 + %g2] ASI_DMMU_DEMAP + membar #Sync + + retl + stw %o4, [%g6 + TI_PRE_COUNT] diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c new file mode 100644 index 000000000..420dfbafd --- /dev/null +++ b/arch/sparc64/lib/find_bit.c @@ -0,0 +1,125 @@ +#include + +/** + * find_next_bit - find the next set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = addr + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp &= (~0UL << offset); + if (size < 64) + goto found_first; + if (tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if ((tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp &= (~0UL >> (64 - size)); + if (tmp == 0UL) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + __ffs(tmp); +} + +/* find_next_zero_bit() finds the first zero bit in a bit string of length + * 'size' bits, starting the search at bit 'offset'. This is largely based + * on Linus's ALPHA routines, which are pretty portable BTW. + */ + +unsigned long find_next_zero_bit(unsigned long *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = addr + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (64-offset); + if (size < 64) + goto found_first; + if (~tmp) + goto found_middle; + size -= 64; + result += 64; + } + while (size & ~63UL) { + if (~(tmp = *(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL << size; + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); +} + +unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = addr + (offset >> 6); + unsigned long result = offset & ~63UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 63UL; + if(offset) { + tmp = __swab64p(p++); + tmp |= (~0UL >> (64-offset)); + if(size < 64) + goto found_first; + if(~tmp) + goto found_middle; + size -= 64; + result += 64; + } + while(size & ~63) { + if(~(tmp = __swab64p(p++))) + goto found_middle; + result += 64; + size -= 64; + } + if(!size) + return result; + tmp = __swab64p(p); +found_first: + tmp |= (~0UL << size); + if (tmp == ~0UL) /* Are any bits zero? */ + return result + size; /* Nope. */ +found_middle: + return result + ffz(tmp); +} diff --git a/arch/sparc64/lib/splock.S b/arch/sparc64/lib/splock.S new file mode 100644 index 000000000..d17a3badd --- /dev/null +++ b/arch/sparc64/lib/splock.S @@ -0,0 +1,23 @@ +/* splock.S: Spinlock primitives too large to inline. + * + * Copyright (C) 2004 David S. Miller (davem@redhat.com) + */ + + .text + .align 64 + + .globl _raw_spin_lock_flags +_raw_spin_lock_flags: /* %o0 = lock_ptr, %o1 = irq_flags */ +1: ldstub [%o0], %g7 + brnz,pn %g7, 2f + membar #StoreLoad | #StoreStore + retl + nop + +2: rdpr %pil, %g2 ! Save PIL + wrpr %o1, %pil ! Set previous PIL +3: ldub [%o0], %g7 ! Spin on lock set + brnz,pt %g7, 3b + membar #LoadLoad + ba,pt %xcc, 1b ! Retry lock acquire + wrpr %g2, %pil ! Restore PIL diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c new file mode 100644 index 000000000..e2d79fc14 --- /dev/null +++ b/arch/sparc64/mm/tlb.c @@ -0,0 +1,158 @@ +/* arch/sparc64/mm/tlb.c + * + * Copyright (C) 2004 David S. Miller + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Heavily inspired by the ppc64 code. */ + +DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = + { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, }; + +void flush_tlb_pending(void) +{ + struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); + + if (mp->tlb_nr) { + unsigned long context = mp->mm->context; + + if (CTX_VALID(context)) { +#ifdef CONFIG_SMP + smp_flush_tlb_pending(mp->mm, mp->tlb_nr, + &mp->vaddrs[0]); +#else + __flush_tlb_pending(CTX_HWBITS(context), mp->tlb_nr, + &mp->vaddrs[0]); +#endif + } + mp->tlb_nr = 0; + } +} + +void tlb_batch_add(pte_t *ptep, pte_t orig) +{ + struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); + struct page *ptepage; + struct mm_struct *mm; + unsigned long vaddr, nr; + + ptepage = virt_to_page(ptep); + mm = (struct mm_struct *) ptepage->mapping; + + /* It is more efficient to let flush_tlb_kernel_range() + * handle these cases. + */ + if (mm == &init_mm) + return; + + vaddr = ptepage->index + + (((unsigned long)ptep & ~PAGE_MASK) * PTRS_PER_PTE); + if (pte_exec(orig)) + vaddr |= 0x1UL; + + if (pte_dirty(orig)) { + unsigned long paddr, pfn = pte_pfn(orig); + struct address_space *mapping; + struct page *page; + + if (!pfn_valid(pfn)) + goto no_cache_flush; + + page = pfn_to_page(pfn); + if (PageReserved(page)) + goto no_cache_flush; + + /* A real file page? */ + mapping = page_mapping(page); + if (!mapping) + goto no_cache_flush; + + paddr = (unsigned long) page_address(page); + if ((paddr ^ vaddr) & (1 << 13)) + flush_dcache_page_all(mm, page); + } + +no_cache_flush: + if (mp->tlb_frozen) + return; + + nr = mp->tlb_nr; + + if (unlikely(nr != 0 && mm != mp->mm)) { + flush_tlb_pending(); + nr = 0; + } + + if (nr == 0) + mp->mm = mm; + + mp->vaddrs[nr] = vaddr; + mp->tlb_nr = ++nr; + if (nr >= TLB_BATCH_NR) + flush_tlb_pending(); +} + +void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end) +{ + struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); + unsigned long nr = mp->tlb_nr; + long s = start, e = end, vpte_base; + + if (mp->tlb_frozen) + return; + + /* Nobody should call us with start below VM hole and end above. + * See if it is really true. + */ + BUG_ON(s > e); + +#if 0 + /* Currently free_pgtables guarantees this. */ + s &= PMD_MASK; + e = (e + PMD_SIZE - 1) & PMD_MASK; +#endif + vpte_base = (tlb_type == spitfire ? + VPTE_BASE_SPITFIRE : + VPTE_BASE_CHEETAH); + + if (unlikely(nr != 0 && mm != mp->mm)) { + flush_tlb_pending(); + nr = 0; + } + + if (nr == 0) + mp->mm = mm; + + start = vpte_base + (s >> (PAGE_SHIFT - 3)); + end = vpte_base + (e >> (PAGE_SHIFT - 3)); + while (start < end) { + mp->vaddrs[nr] = start; + mp->tlb_nr = ++nr; + if (nr >= TLB_BATCH_NR) { + flush_tlb_pending(); + nr = 0; + } + start += PAGE_SIZE; + } + if (nr) + flush_tlb_pending(); +} + +unsigned long __ptrs_per_pmd(void) +{ + if (test_thread_flag(TIF_32BIT)) + return (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT)); + return REAL_PTRS_PER_PMD; +} diff --git a/arch/um/drivers/cow_kern.c b/arch/um/drivers/cow_kern.c new file mode 100644 index 000000000..ad843f3b6 --- /dev/null +++ b/arch/um/drivers/cow_kern.c @@ -0,0 +1,630 @@ +#define COW_MAJOR 60 +#define MAJOR_NR COW_MAJOR + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "2_5compat.h" +#include "cow.h" +#include "ubd_user.h" + +#define COW_SHIFT 4 + +struct cow { + int count; + char *cow_path; + dev_t cow_dev; + struct block_device *cow_bdev; + char *backing_path; + dev_t backing_dev; + struct block_device *backing_bdev; + int sectorsize; + unsigned long *bitmap; + unsigned long bitmap_len; + int bitmap_offset; + int data_offset; + devfs_handle_t devfs; + struct semaphore sem; + struct semaphore io_sem; + atomic_t working; + spinlock_t io_lock; + struct buffer_head *bh; + struct buffer_head *bhtail; + void *end_io; +}; + +#define DEFAULT_COW { \ + .count = 0, \ + .cow_path = NULL, \ + .cow_dev = 0, \ + .backing_path = NULL, \ + .backing_dev = 0, \ + .bitmap = NULL, \ + .bitmap_len = 0, \ + .bitmap_offset = 0, \ + .data_offset = 0, \ + .devfs = NULL, \ + .working = ATOMIC_INIT(0), \ + .io_lock = SPIN_LOCK_UNLOCKED, \ +} + +#define MAX_DEV (8) +#define MAX_MINOR (MAX_DEV << COW_SHIFT) + +struct cow cow_dev[MAX_DEV] = { [ 0 ... MAX_DEV - 1 ] = DEFAULT_COW }; + +/* Not modified by this driver */ +static int blk_sizes[MAX_MINOR] = { [ 0 ... MAX_MINOR - 1 ] = BLOCK_SIZE }; +static int hardsect_sizes[MAX_MINOR] = { [ 0 ... MAX_MINOR - 1 ] = 512 }; + +/* Protected by cow_lock */ +static int sizes[MAX_MINOR] = { [ 0 ... MAX_MINOR - 1 ] = 0 }; + +static struct hd_struct cow_part[MAX_MINOR] = + { [ 0 ... MAX_MINOR - 1 ] = { 0, 0, 0 } }; + +/* Protected by io_request_lock */ +static request_queue_t *cow_queue; + +static int cow_open(struct inode *inode, struct file *filp); +static int cow_release(struct inode * inode, struct file * file); +static int cow_ioctl(struct inode * inode, struct file * file, + unsigned int cmd, unsigned long arg); +static int cow_revalidate(kdev_t rdev); + +static struct block_device_operations cow_blops = { + .open = cow_open, + .release = cow_release, + .ioctl = cow_ioctl, + .revalidate = cow_revalidate, +}; + +/* Initialized in an initcall, and unchanged thereafter */ +devfs_handle_t cow_dir_handle; + +#define INIT_GENDISK(maj, name, parts, shift, bsizes, max, blops) \ +{ \ + .major = maj, \ + .major_name = name, \ + .minor_shift = shift, \ + .max_p = 1 << shift, \ + .part = parts, \ + .sizes = bsizes, \ + .nr_real = max, \ + .real_devices = NULL, \ + .next = NULL, \ + .fops = blops, \ + .de_arr = NULL, \ + .flags = 0 \ +} + +static spinlock_t cow_lock = SPIN_LOCK_UNLOCKED; + +static struct gendisk cow_gendisk = INIT_GENDISK(MAJOR_NR, "cow", cow_part, + COW_SHIFT, sizes, MAX_DEV, + &cow_blops); + +static int cow_add(int n) +{ + struct cow *dev = &cow_dev[n]; + char name[sizeof("nnnnnn\0")]; + int err = -ENODEV; + + if(dev->cow_path == NULL) + goto out; + + sprintf(name, "%d", n); + dev->devfs = devfs_register(cow_dir_handle, name, DEVFS_FL_REMOVABLE, + MAJOR_NR, n << COW_SHIFT, S_IFBLK | + S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP, + &cow_blops, NULL); + + init_MUTEX_LOCKED(&dev->sem); + init_MUTEX(&dev->io_sem); + + return(0); + + out: + return(err); +} + +/* + * Add buffer_head to back of pending list + */ +static void cow_add_bh(struct cow *cow, struct buffer_head *bh) +{ + unsigned long flags; + + spin_lock_irqsave(&cow->io_lock, flags); + if(cow->bhtail != NULL){ + cow->bhtail->b_reqnext = bh; + cow->bhtail = bh; + } + else { + cow->bh = bh; + cow->bhtail = bh; + } + spin_unlock_irqrestore(&cow->io_lock, flags); +} + +/* +* Grab first pending buffer +*/ +static struct buffer_head *cow_get_bh(struct cow *cow) +{ + struct buffer_head *bh; + + spin_lock_irq(&cow->io_lock); + bh = cow->bh; + if(bh != NULL){ + if(bh == cow->bhtail) + cow->bhtail = NULL; + cow->bh = bh->b_reqnext; + bh->b_reqnext = NULL; + } + spin_unlock_irq(&cow->io_lock); + + return(bh); +} + +static void cow_handle_bh(struct cow *cow, struct buffer_head *bh, + struct buffer_head **cow_bh, int ncow_bh) +{ + int i; + + if(ncow_bh > 0) + ll_rw_block(WRITE, ncow_bh, cow_bh); + + for(i = 0; i < ncow_bh ; i++){ + wait_on_buffer(cow_bh[i]); + brelse(cow_bh[i]); + } + + ll_rw_block(WRITE, 1, &bh); + brelse(bh); +} + +static struct buffer_head *cow_new_bh(struct cow *dev, int sector) +{ + struct buffer_head *bh; + + sector = (dev->bitmap_offset + sector / 8) / dev->sectorsize; + bh = getblk(dev->cow_dev, sector, dev->sectorsize); + memcpy(bh->b_data, dev->bitmap + sector / (8 * sizeof(dev->bitmap[0])), + dev->sectorsize); + return(bh); +} + +/* Copied from loop.c, needed to avoid deadlocking in make_request. */ + +static int cow_thread(void *data) +{ + struct cow *dev = data; + struct buffer_head *bh; + + daemonize(); + exit_files(current); + + sprintf(current->comm, "cow%d", dev - cow_dev); + + spin_lock_irq(¤t->sigmask_lock); + sigfillset(¤t->blocked); + flush_signals(current); + spin_unlock_irq(¤t->sigmask_lock); + + atomic_inc(&dev->working); + + current->policy = SCHED_OTHER; + current->nice = -20; + + current->flags |= PF_NOIO; + + /* + * up sem, we are running + */ + up(&dev->sem); + + for(;;){ + int start, len, nbh, i, update_bitmap = 0; + struct buffer_head *cow_bh[2]; + + down_interruptible(&dev->io_sem); + /* + * could be upped because of tear-down, not because of + * pending work + */ + if(!atomic_read(&dev->working)) + break; + + bh = cow_get_bh(dev); + if(bh == NULL){ + printk(KERN_ERR "cow: missing bh\n"); + continue; + } + + start = bh->b_blocknr * bh->b_size / dev->sectorsize; + len = bh->b_size / dev->sectorsize; + for(i = 0; i < len ; i++){ + if(ubd_test_bit(start + i, + (unsigned char *) dev->bitmap)) + continue; + + update_bitmap = 1; + ubd_set_bit(start + i, (unsigned char *) dev->bitmap); + } + + cow_bh[0] = NULL; + cow_bh[1] = NULL; + nbh = 0; + if(update_bitmap){ + cow_bh[0] = cow_new_bh(dev, start); + nbh++; + if(start / dev->sectorsize != + (start + len) / dev->sectorsize){ + cow_bh[1] = cow_new_bh(dev, start + len); + nbh++; + } + } + + bh->b_dev = dev->cow_dev; + bh->b_blocknr += dev->data_offset / dev->sectorsize; + + cow_handle_bh(dev, bh, cow_bh, nbh); + + /* + * upped both for pending work and tear-down, lo_pending + * will hit zero then + */ + if(atomic_dec_and_test(&dev->working)) + break; + } + + up(&dev->sem); + return(0); +} + +static int cow_make_request(request_queue_t *q, int rw, struct buffer_head *bh) +{ + struct cow *dev; + int n, minor; + + minor = MINOR(bh->b_rdev); + n = minor >> COW_SHIFT; + dev = &cow_dev[n]; + + dev->end_io = NULL; + if(ubd_test_bit(bh->b_rsector, (unsigned char *) dev->bitmap)){ + bh->b_rdev = dev->cow_dev; + bh->b_rsector += dev->data_offset / dev->sectorsize; + } + else if(rw == WRITE){ + bh->b_dev = dev->cow_dev; + bh->b_blocknr += dev->data_offset / dev->sectorsize; + + cow_add_bh(dev, bh); + up(&dev->io_sem); + return(0); + } + else { + bh->b_rdev = dev->backing_dev; + } + + return(1); +} + +int cow_init(void) +{ + int i; + + cow_dir_handle = devfs_mk_dir (NULL, "cow", NULL); + if (devfs_register_blkdev(MAJOR_NR, "cow", &cow_blops)) { + printk(KERN_ERR "cow: unable to get major %d\n", MAJOR_NR); + return -1; + } + read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */ + blksize_size[MAJOR_NR] = blk_sizes; + blk_size[MAJOR_NR] = sizes; + INIT_HARDSECT(hardsect_size, MAJOR_NR, hardsect_sizes); + + cow_queue = BLK_DEFAULT_QUEUE(MAJOR_NR); + blk_init_queue(cow_queue, NULL); + INIT_ELV(cow_queue, &cow_queue->elevator); + blk_queue_make_request(cow_queue, cow_make_request); + + add_gendisk(&cow_gendisk); + + for(i=0;i 0){ + n = (left > blocksize) ? blocksize : left; + + bh = bread(dev, block, (n < 512) ? 512 : n); + if(bh == NULL) + return(-EIO); + + n -= offset; + memcpy(&buf[cur], bh->b_data + offset, n); + block++; + left -= n; + cur += n; + offset = 0; + brelse(bh); + } + + return(count); +} + +static int cow_open(struct inode *inode, struct file *filp) +{ + int (*dev_ioctl)(struct inode *, struct file *, unsigned int, + unsigned long); + mm_segment_t fs; + struct cow *dev; + __u64 size; + __u32 version, align; + time_t mtime; + char *backing_file; + int n, offset, err = 0; + + n = DEVICE_NR(inode->i_rdev); + if(n >= MAX_DEV) + return(-ENODEV); + dev = &cow_dev[n]; + offset = n << COW_SHIFT; + + spin_lock(&cow_lock); + + if(dev->count == 0){ + dev->cow_dev = name_to_kdev_t(dev->cow_path); + if(dev->cow_dev == 0){ + printk(KERN_ERR "cow_open - name_to_kdev_t(\"%s\") " + "failed\n", dev->cow_path); + err = -ENODEV; + } + + dev->backing_dev = name_to_kdev_t(dev->backing_path); + if(dev->backing_dev == 0){ + printk(KERN_ERR "cow_open - name_to_kdev_t(\"%s\") " + "failed\n", dev->backing_path); + err = -ENODEV; + } + + if(err) + goto out; + + dev->cow_bdev = bdget(dev->cow_dev); + if(dev->cow_bdev == NULL){ + printk(KERN_ERR "cow_open - bdget(\"%s\") failed\n", + dev->cow_path); + err = -ENOMEM; + } + dev->backing_bdev = bdget(dev->backing_dev); + if(dev->backing_bdev == NULL){ + printk(KERN_ERR "cow_open - bdget(\"%s\") failed\n", + dev->backing_path); + err = -ENOMEM; + } + + if(err) + goto out; + + err = blkdev_get(dev->cow_bdev, FMODE_READ|FMODE_WRITE, 0, + BDEV_RAW); + if(err){ + printk("cow_open - blkdev_get of COW device failed, " + "error = %d\n", err); + goto out; + } + + err = blkdev_get(dev->backing_bdev, FMODE_READ, 0, BDEV_RAW); + if(err){ + printk("cow_open - blkdev_get of backing device " + "failed, error = %d\n", err); + goto out; + } + + err = read_cow_header(reader, &dev->cow_dev, &version, + &backing_file, &mtime, &size, + &dev->sectorsize, &align, + &dev->bitmap_offset); + if(err){ + printk(KERN_ERR "cow_open - read_cow_header failed, " + "err = %d\n", err); + goto out; + } + + cow_sizes(version, size, dev->sectorsize, align, + dev->bitmap_offset, &dev->bitmap_len, + &dev->data_offset); + dev->bitmap = (void *) vmalloc(dev->bitmap_len); + if(dev->bitmap == NULL){ + err = -ENOMEM; + printk(KERN_ERR "Failed to vmalloc COW bitmap\n"); + goto out; + } + flush_tlb_kernel_vm(); + + err = reader(dev->bitmap_offset, (char *) dev->bitmap, + dev->bitmap_len, &dev->cow_dev); + if(err < 0){ + printk(KERN_ERR "Failed to read COW bitmap\n"); + vfree(dev->bitmap); + goto out; + } + + dev_ioctl = dev->backing_bdev->bd_op->ioctl; + fs = get_fs(); + set_fs(KERNEL_DS); + err = (*dev_ioctl)(inode, filp, BLKGETSIZE, + (unsigned long) &sizes[offset]); + set_fs(fs); + if(err){ + printk(KERN_ERR "cow_open - BLKGETSIZE failed, " + "error = %d\n", err); + goto out; + } + + kernel_thread(cow_thread, dev, + CLONE_FS | CLONE_FILES | CLONE_SIGHAND); + down(&dev->sem); + } + dev->count++; + out: + spin_unlock(&cow_lock); + return(err); +} + +static int cow_release(struct inode * inode, struct file * file) +{ + struct cow *dev; + int n, err; + + n = DEVICE_NR(inode->i_rdev); + if(n >= MAX_DEV) + return(-ENODEV); + dev = &cow_dev[n]; + + spin_lock(&cow_lock); + + if(--dev->count > 0) + goto out; + + err = blkdev_put(dev->cow_bdev, BDEV_RAW); + if(err) + printk("cow_release - blkdev_put of cow device failed, " + "error = %d\n", err); + bdput(dev->cow_bdev); + dev->cow_bdev = 0; + + err = blkdev_put(dev->backing_bdev, BDEV_RAW); + if(err) + printk("cow_release - blkdev_put of backing device failed, " + "error = %d\n", err); + bdput(dev->backing_bdev); + dev->backing_bdev = 0; + + out: + spin_unlock(&cow_lock); + return(0); +} + +static int cow_ioctl(struct inode * inode, struct file * file, + unsigned int cmd, unsigned long arg) +{ + struct cow *dev; + int (*dev_ioctl)(struct inode *, struct file *, unsigned int, + unsigned long); + int n; + + n = DEVICE_NR(inode->i_rdev); + if(n >= MAX_DEV) + return(-ENODEV); + dev = &cow_dev[n]; + + dev_ioctl = dev->backing_bdev->bd_op->ioctl; + return((*dev_ioctl)(inode, file, cmd, arg)); +} + +static int cow_revalidate(kdev_t rdev) +{ + printk(KERN_ERR "Need to implement cow_revalidate\n"); + return(0); +} + +static int parse_unit(char **ptr) +{ + char *str = *ptr, *end; + int n = -1; + + if(isdigit(*str)) { + n = simple_strtoul(str, &end, 0); + if(end == str) + return(-1); + *ptr = end; + } + else if (('a' <= *str) && (*str <= 'h')) { + n = *str - 'a'; + str++; + *ptr = str; + } + return(n); +} + +static int cow_setup(char *str) +{ + struct cow *dev; + char *cow_name, *backing_name; + int unit; + + unit = parse_unit(&str); + if(unit < 0){ + printk(KERN_ERR "cow_setup - Couldn't parse unit number\n"); + return(1); + } + + if(*str != '='){ + printk(KERN_ERR "cow_setup - Missing '=' after unit " + "number\n"); + return(1); + } + str++; + + cow_name = str; + backing_name = strchr(str, ','); + if(backing_name == NULL){ + printk(KERN_ERR "cow_setup - missing backing device name\n"); + return(0); + } + *backing_name = '\0'; + backing_name++; + + spin_lock(&cow_lock); + + dev = &cow_dev[unit]; + dev->cow_path = cow_name; + dev->backing_path = backing_name; + + spin_unlock(&cow_lock); + return(0); +} + +__setup("cow", cow_setup); + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/drivers/cow_sys.h b/arch/um/drivers/cow_sys.h new file mode 100644 index 000000000..ce251f083 --- /dev/null +++ b/arch/um/drivers/cow_sys.h @@ -0,0 +1,48 @@ +#ifndef __COW_SYS_H__ +#define __COW_SYS_H__ + +#include "kern_util.h" +#include "user_util.h" +#include "os.h" +#include "user.h" + +static inline void *cow_malloc(int size) +{ + return(um_kmalloc(size)); +} + +static inline void cow_free(void *ptr) +{ + kfree(ptr); +} + +#define cow_printf printk + +static inline char *cow_strdup(char *str) +{ + return(uml_strdup(str)); +} + +static inline int cow_seek_file(int fd, __u64 offset) +{ + return(os_seek_file(fd, offset)); +} + +static inline int cow_file_size(char *file, __u64 *size_out) +{ + return(os_file_size(file, size_out)); +} + +static inline int cow_write_file(int fd, char *buf, int size) +{ + return(os_write_file(fd, buf, size)); +} + +#endif + +/* + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/drivers/net_kern.c.orig b/arch/um/drivers/net_kern.c.orig new file mode 100644 index 000000000..4c2ee09b0 --- /dev/null +++ b/arch/um/drivers/net_kern.c.orig @@ -0,0 +1,870 @@ +/* + * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and + * James Leu (jleu@mindspring.net). + * Copyright (C) 2001 by various other people who didn't put their name here. + * Licensed under the GPL. + */ + +#include "linux/config.h" +#include "linux/kernel.h" +#include "linux/netdevice.h" +#include "linux/rtnetlink.h" +#include "linux/skbuff.h" +#include "linux/socket.h" +#include "linux/spinlock.h" +#include "linux/module.h" +#include "linux/init.h" +#include "linux/etherdevice.h" +#include "linux/list.h" +#include "linux/inetdevice.h" +#include "linux/ctype.h" +#include "linux/bootmem.h" +#include "user_util.h" +#include "kern_util.h" +#include "net_kern.h" +#include "net_user.h" +#include "mconsole_kern.h" +#include "init.h" +#include "irq_user.h" + +static spinlock_t opened_lock = SPIN_LOCK_UNLOCKED; +LIST_HEAD(opened); + +static int uml_net_rx(struct net_device *dev) +{ + struct uml_net_private *lp = dev->priv; + int pkt_len; + struct sk_buff *skb; + + /* If we can't allocate memory, try again next round. */ + if ((skb = dev_alloc_skb(dev->mtu)) == NULL) { + lp->stats.rx_dropped++; + return 0; + } + + skb->dev = dev; + skb_put(skb, dev->mtu); + skb->mac.raw = skb->data; + pkt_len = (*lp->read)(lp->fd, &skb, lp); + + if (pkt_len > 0) { + skb_trim(skb, pkt_len); + skb->protocol = (*lp->protocol)(skb); + netif_rx(skb); + + lp->stats.rx_bytes += skb->len; + lp->stats.rx_packets++; + return pkt_len; + } + + kfree_skb(skb); + return pkt_len; +} + +void uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct uml_net_private *lp = dev->priv; + int err; + + if(!netif_running(dev)) + return; + + spin_lock(&lp->lock); + while((err = uml_net_rx(dev)) > 0) ; + if(err < 0) { + printk(KERN_ERR + "Device '%s' read returned %d, shutting it down\n", + dev->name, err); + dev_close(dev); + goto out; + } + reactivate_fd(lp->fd, UM_ETH_IRQ); + + out: + spin_unlock(&lp->lock); +} + +static int uml_net_open(struct net_device *dev) +{ + struct uml_net_private *lp = dev->priv; + char addr[sizeof("255.255.255.255\0")]; + int err; + + spin_lock(&lp->lock); + + if(lp->fd >= 0){ + err = -ENXIO; + goto out; + } + + if(!lp->have_mac){ + dev_ip_addr(dev, addr, &lp->mac[2]); + set_ether_mac(dev, lp->mac); + } + + lp->fd = (*lp->open)(&lp->user); + if(lp->fd < 0){ + err = lp->fd; + goto out; + } + + err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt, + SA_INTERRUPT | SA_SHIRQ, dev->name, dev); + if(err != 0){ + printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err); + if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user); + lp->fd = -1; + err = -ENETUNREACH; + } + + lp->tl.data = (unsigned long) &lp->user; + netif_start_queue(dev); + + spin_lock(&opened_lock); + list_add(&lp->list, &opened); + spin_unlock(&opened_lock); + MOD_INC_USE_COUNT; + out: + spin_unlock(&lp->lock); + return(err); +} + +static int uml_net_close(struct net_device *dev) +{ + struct uml_net_private *lp = dev->priv; + + netif_stop_queue(dev); + spin_lock(&lp->lock); + + free_irq(dev->irq, dev); + if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user); + lp->fd = -1; + spin_lock(&opened_lock); + list_del(&lp->list); + spin_unlock(&opened_lock); + + MOD_DEC_USE_COUNT; + spin_unlock(&lp->lock); + return 0; +} + +static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct uml_net_private *lp = dev->priv; + unsigned long flags; + int len; + + netif_stop_queue(dev); + + spin_lock_irqsave(&lp->lock, flags); + + len = (*lp->write)(lp->fd, &skb, lp); + + if(len == skb->len) { + lp->stats.tx_packets++; + lp->stats.tx_bytes += skb->len; + dev->trans_start = jiffies; + netif_start_queue(dev); + + /* this is normally done in the interrupt when tx finishes */ + netif_wake_queue(dev); + } + else if(len == 0){ + netif_start_queue(dev); + lp->stats.tx_dropped++; + } + else { + netif_start_queue(dev); + printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len); + } + + spin_unlock_irqrestore(&lp->lock, flags); + + dev_kfree_skb(skb); + + return 0; +} + +static struct net_device_stats *uml_net_get_stats(struct net_device *dev) +{ + struct uml_net_private *lp = dev->priv; + return &lp->stats; +} + +static void uml_net_set_multicast_list(struct net_device *dev) +{ + if (dev->flags & IFF_PROMISC) return; + else if (dev->mc_count) dev->flags |= IFF_ALLMULTI; + else dev->flags &= ~IFF_ALLMULTI; +} + +static void uml_net_tx_timeout(struct net_device *dev) +{ + dev->trans_start = jiffies; + netif_wake_queue(dev); +} + +static int uml_net_set_mac(struct net_device *dev, void *addr) +{ + struct uml_net_private *lp = dev->priv; + struct sockaddr *hwaddr = addr; + + spin_lock(&lp->lock); + memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN); + spin_unlock(&lp->lock); + + return(0); +} + +static int uml_net_change_mtu(struct net_device *dev, int new_mtu) +{ + struct uml_net_private *lp = dev->priv; + int err = 0; + + spin_lock(&lp->lock); + + new_mtu = (*lp->set_mtu)(new_mtu, &lp->user); + if(new_mtu < 0){ + err = new_mtu; + goto out; + } + + dev->mtu = new_mtu; + + out: + spin_unlock(&lp->lock); + return err; +} + +static int uml_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return(-EINVAL); +} + +void uml_net_user_timer_expire(unsigned long _conn) +{ +#ifdef undef + struct connection *conn = (struct connection *)_conn; + + dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn); + do_connect(conn); +#endif +} + +/* + * default do nothing hard header packet routines for struct net_device init. + * real ethernet transports will overwrite with real routines. + */ +static int uml_net_hard_header(struct sk_buff *skb, struct net_device *dev, + unsigned short type, void *daddr, void *saddr, unsigned len) +{ + return(0); /* no change */ +} + +static int uml_net_rebuild_header(struct sk_buff *skb) +{ + return(0); /* ignore */ +} + +static int uml_net_header_cache(struct neighbour *neigh, struct hh_cache *hh) +{ + return(-1); /* fail */ +} + +static void uml_net_header_cache_update(struct hh_cache *hh, + struct net_device *dev, unsigned char * haddr) +{ + /* ignore */ +} + +static int uml_net_header_parse(struct sk_buff *skb, unsigned char *haddr) +{ + return(0); /* nothing */ +} + +static spinlock_t devices_lock = SPIN_LOCK_UNLOCKED; +static struct list_head devices = LIST_HEAD_INIT(devices); + +static int eth_configure(int n, void *init, char *mac, + struct transport *transport) +{ + struct uml_net *device; + struct net_device *dev; + struct uml_net_private *lp; + int err, size; + + size = transport->private_size + sizeof(struct uml_net_private) + + sizeof(((struct uml_net_private *) 0)->user); + + device = kmalloc(sizeof(*device), GFP_KERNEL); + if (device == NULL) { + printk(KERN_ERR "eth_configure failed to allocate uml_net\n"); + return(1); + } + + memset(device, 0, sizeof(*device)); + INIT_LIST_HEAD(&device->list); + device->index = n; + + spin_lock(&devices_lock); + list_add(&device->list, &devices); + spin_unlock(&devices_lock); + + if (setup_etheraddr(mac, device->mac)) + device->have_mac = 1; + + printk(KERN_INFO "Netdevice %d ", n); + if (device->have_mac) + printk("(%02x:%02x:%02x:%02x:%02x:%02x) ", + device->mac[0], device->mac[1], + device->mac[2], device->mac[3], + device->mac[4], device->mac[5]); + printk(": "); + dev = alloc_etherdev(size); + if (dev == NULL) { + printk(KERN_ERR "eth_configure: failed to allocate device\n"); + return 1; + } + + /* If this name ends up conflicting with an existing registered + * netdevice, that is OK, register_netdev{,ice}() will notice this + * and fail. + */ + snprintf(dev->name, sizeof(dev->name), "eth%d", n); + device->dev = dev; + + dev->hard_header = uml_net_hard_header; + dev->rebuild_header = uml_net_rebuild_header; + dev->hard_header_cache = uml_net_header_cache; + dev->header_cache_update= uml_net_header_cache_update; + dev->hard_header_parse = uml_net_header_parse; + + (*transport->kern->init)(dev, init); + + dev->mtu = transport->user->max_packet; + dev->open = uml_net_open; + dev->hard_start_xmit = uml_net_start_xmit; + dev->stop = uml_net_close; + dev->get_stats = uml_net_get_stats; + dev->set_multicast_list = uml_net_set_multicast_list; + dev->tx_timeout = uml_net_tx_timeout; + dev->set_mac_address = uml_net_set_mac; + dev->change_mtu = uml_net_change_mtu; + dev->do_ioctl = uml_net_ioctl; + dev->watchdog_timeo = (HZ >> 1); + dev->irq = UM_ETH_IRQ; + + rtnl_lock(); + err = register_netdevice(dev); + rtnl_unlock(); + if (err) { + device->dev = NULL; + /* XXX: should we call ->remove() here? */ + free_netdev(dev); + return 1; + } + lp = dev->priv; + + INIT_LIST_HEAD(&lp->list); + spin_lock_init(&lp->lock); + lp->dev = dev; + lp->fd = -1; + lp->mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0 }; + lp->have_mac = device->have_mac; + lp->protocol = transport->kern->protocol; + lp->open = transport->user->open; + lp->close = transport->user->close; + lp->remove = transport->user->remove; + lp->read = transport->kern->read; + lp->write = transport->kern->write; + lp->add_address = transport->user->add_address; + lp->delete_address = transport->user->delete_address; + lp->set_mtu = transport->user->set_mtu; + + init_timer(&lp->tl); + lp->tl.function = uml_net_user_timer_expire; + if (lp->have_mac) + memcpy(lp->mac, device->mac, sizeof(lp->mac)); + + if (transport->user->init) + (*transport->user->init)(&lp->user, dev); + + if (device->have_mac) + set_ether_mac(dev, device->mac); + return(0); +} + +static struct uml_net *find_device(int n) +{ + struct uml_net *device; + struct list_head *ele; + + spin_lock(&devices_lock); + list_for_each(ele, &devices){ + device = list_entry(ele, struct uml_net, list); + if(device->index == n) + goto out; + } + device = NULL; + out: + spin_unlock(&devices_lock); + return(device); +} + +static int eth_parse(char *str, int *index_out, char **str_out) +{ + char *end; + int n; + + n = simple_strtoul(str, &end, 0); + if(end == str){ + printk(KERN_ERR "eth_setup: Failed to parse '%s'\n", str); + return(1); + } + if(n < 0){ + printk(KERN_ERR "eth_setup: device %d is negative\n", n); + return(1); + } + str = end; + if(*str != '='){ + printk(KERN_ERR + "eth_setup: expected '=' after device number\n"); + return(1); + } + str++; + if(find_device(n)){ + printk(KERN_ERR "eth_setup: Device %d already configured\n", + n); + return(1); + } + if(index_out) *index_out = n; + *str_out = str; + return(0); +} + +struct eth_init { + struct list_head list; + char *init; + int index; +}; + +/* Filled in at boot time. Will need locking if the transports become + * modular. + */ +struct list_head transports = LIST_HEAD_INIT(transports); + +/* Filled in during early boot */ +struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line); + +static int check_transport(struct transport *transport, char *eth, int n, + void **init_out, char **mac_out) +{ + int len; + + len = strlen(transport->name); + if(strncmp(eth, transport->name, len)) + return(0); + + eth += len; + if(*eth == ',') + eth++; + else if(*eth != '\0') + return(0); + + *init_out = kmalloc(transport->setup_size, GFP_KERNEL); + if(*init_out == NULL) + return(1); + + if(!transport->setup(eth, mac_out, *init_out)){ + kfree(*init_out); + *init_out = NULL; + } + return(1); +} + +void register_transport(struct transport *new) +{ + struct list_head *ele, *next; + struct eth_init *eth; + void *init; + char *mac = NULL; + int match; + + list_add(&new->list, &transports); + + list_for_each_safe(ele, next, ð_cmd_line){ + eth = list_entry(ele, struct eth_init, list); + match = check_transport(new, eth->init, eth->index, &init, + &mac); + if(!match) + continue; + else if(init != NULL){ + eth_configure(eth->index, init, mac, new); + kfree(init); + } + list_del(ð->list); + } +} + +static int eth_setup_common(char *str, int index) +{ + struct list_head *ele; + struct transport *transport; + void *init; + char *mac = NULL; + + list_for_each(ele, &transports){ + transport = list_entry(ele, struct transport, list); + if(!check_transport(transport, str, index, &init, &mac)) + continue; + if(init != NULL){ + eth_configure(index, init, mac, transport); + kfree(init); + } + return(1); + } + return(0); +} + +static int eth_setup(char *str) +{ + struct eth_init *new; + int n, err; + + err = eth_parse(str, &n, &str); + if(err) return(1); + + new = alloc_bootmem(sizeof(new)); + if (new == NULL){ + printk("eth_init : alloc_bootmem failed\n"); + return(1); + } + + INIT_LIST_HEAD(&new->list); + new->index = n; + new->init = str; + + list_add_tail(&new->list, ð_cmd_line); + return(1); +} + +__setup("eth", eth_setup); +__uml_help(eth_setup, +"eth[0-9]+=,\n" +" Configure a network device.\n\n" +); + +static int eth_init(void) +{ + struct list_head *ele, *next; + struct eth_init *eth; + + list_for_each_safe(ele, next, ð_cmd_line){ + eth = list_entry(ele, struct eth_init, list); + + if(eth_setup_common(eth->init, eth->index)) + list_del(ð->list); + } + + return(1); +} + +__initcall(eth_init); + +static int net_config(char *str) +{ + int n, err; + + err = eth_parse(str, &n, &str); + if(err) return(err); + + str = uml_strdup(str); + if(str == NULL){ + printk(KERN_ERR "net_config failed to strdup string\n"); + return(-1); + } + err = !eth_setup_common(str, n); + if(err) + kfree(str); + return(err); +} + +static int net_remove(char *str) +{ + struct uml_net *device; + struct net_device *dev; + struct uml_net_private *lp; + char *end; + int n; + + n = simple_strtoul(str, &end, 0); + if((*end != '\0') || (end == str)) + return(-1); + + device = find_device(n); + if(device == NULL) + return(0); + + dev = device->dev; + lp = dev->priv; + if(lp->fd > 0) return(-1); + if(lp->remove != NULL) (*lp->remove)(&lp->user); + unregister_netdev(dev); + + list_del(&device->list); + free_netdev(device); + return(0); +} + +static struct mc_device net_mc = { + .name = "eth", + .config = net_config, + .get_config = NULL, + .remove = net_remove, +}; + +static int uml_inetaddr_event(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct in_ifaddr *ifa = ptr; + u32 addr = ifa->ifa_address; + u32 netmask = ifa->ifa_mask; + struct net_device *dev = ifa->ifa_dev->dev; + struct uml_net_private *lp; + void (*proc)(unsigned char *, unsigned char *, void *); + unsigned char addr_buf[4], netmask_buf[4]; + + if(dev->open != uml_net_open) return(NOTIFY_DONE); + + lp = dev->priv; + + proc = NULL; + switch (event){ + case NETDEV_UP: + proc = lp->add_address; + break; + case NETDEV_DOWN: + proc = lp->delete_address; + break; + } + if(proc != NULL){ + addr_buf[0] = addr & 0xff; + addr_buf[1] = (addr >> 8) & 0xff; + addr_buf[2] = (addr >> 16) & 0xff; + addr_buf[3] = addr >> 24; + netmask_buf[0] = netmask & 0xff; + netmask_buf[1] = (netmask >> 8) & 0xff; + netmask_buf[2] = (netmask >> 16) & 0xff; + netmask_buf[3] = netmask >> 24; + (*proc)(addr_buf, netmask_buf, &lp->user); + } + return(NOTIFY_DONE); +} + +struct notifier_block uml_inetaddr_notifier = { + .notifier_call = uml_inetaddr_event, +}; + +static int uml_net_init(void) +{ + struct list_head *ele; + struct uml_net_private *lp; + struct in_device *ip; + struct in_ifaddr *in; + + mconsole_register_dev(&net_mc); + register_inetaddr_notifier(¨_inetaddr_notifier); + + /* Devices may have been opened already, so the uml_inetaddr_notifier + * didn't get a chance to run for them. This fakes it so that + * addresses which have already been set up get handled properly. + */ + list_for_each(ele, &opened){ + lp = list_entry(ele, struct uml_net_private, list); + ip = lp->dev->ip_ptr; + if(ip == NULL) continue; + in = ip->ifa_list; + while(in != NULL){ + uml_inetaddr_event(NULL, NETDEV_UP, in); + in = in->ifa_next; + } + } + + return(0); +} + +__initcall(uml_net_init); + +static void close_devices(void) +{ + struct list_head *ele; + struct uml_net_private *lp; + + list_for_each(ele, &opened){ + lp = list_entry(ele, struct uml_net_private, list); + if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user); + if(lp->remove != NULL) (*lp->remove)(&lp->user); + } +} + +__uml_exitcall(close_devices); + +int setup_etheraddr(char *str, unsigned char *addr) +{ + char *end; + int i; + + if(str == NULL) + return(0); + for(i=0;i<6;i++){ + addr[i] = simple_strtoul(str, &end, 16); + if((end == str) || + ((*end != ':') && (*end != ',') && (*end != '\0'))){ + printk(KERN_ERR + "setup_etheraddr: failed to parse '%s' " + "as an ethernet address\n", str); + return(0); + } + str = end + 1; + } + if(addr[0] & 1){ + printk(KERN_ERR + "Attempt to assign a broadcast ethernet address to a " + "device disallowed\n"); + return(0); + } + return(1); +} + +void dev_ip_addr(void *d, char *buf, char *bin_buf) +{ + struct net_device *dev = d; + struct in_device *ip = dev->ip_ptr; + struct in_ifaddr *in; + u32 addr; + + if((ip == NULL) || ((in = ip->ifa_list) == NULL)){ + printk(KERN_WARNING "dev_ip_addr - device not assigned an " + "IP address\n"); + return; + } + addr = in->ifa_address; + sprintf(buf, "%d.%d.%d.%d", addr & 0xff, (addr >> 8) & 0xff, + (addr >> 16) & 0xff, addr >> 24); + if(bin_buf){ + bin_buf[0] = addr & 0xff; + bin_buf[1] = (addr >> 8) & 0xff; + bin_buf[2] = (addr >> 16) & 0xff; + bin_buf[3] = addr >> 24; + } +} + +void set_ether_mac(void *d, unsigned char *addr) +{ + struct net_device *dev = d; + + memcpy(dev->dev_addr, addr, ETH_ALEN); +} + +struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra) +{ + if((skb != NULL) && (skb_tailroom(skb) < extra)){ + struct sk_buff *skb2; + + skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC); + dev_kfree_skb(skb); + skb = skb2; + } + if(skb != NULL) skb_put(skb, extra); + return(skb); +} + +void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *, + void *), + void *arg) +{ + struct net_device *dev = d; + struct in_device *ip = dev->ip_ptr; + struct in_ifaddr *in; + unsigned char address[4], netmask[4]; + + if(ip == NULL) return; + in = ip->ifa_list; + while(in != NULL){ + address[0] = in->ifa_address & 0xff; + address[1] = (in->ifa_address >> 8) & 0xff; + address[2] = (in->ifa_address >> 16) & 0xff; + address[3] = in->ifa_address >> 24; + netmask[0] = in->ifa_mask & 0xff; + netmask[1] = (in->ifa_mask >> 8) & 0xff; + netmask[2] = (in->ifa_mask >> 16) & 0xff; + netmask[3] = in->ifa_mask >> 24; + (*cb)(address, netmask, arg); + in = in->ifa_next; + } +} + +int dev_netmask(void *d, void *m) +{ + struct net_device *dev = d; + struct in_device *ip = dev->ip_ptr; + struct in_ifaddr *in; + __u32 *mask_out = m; + + if(ip == NULL) + return(1); + + in = ip->ifa_list; + if(in == NULL) + return(1); + + *mask_out = in->ifa_mask; + return(0); +} + +void *get_output_buffer(int *len_out) +{ + void *ret; + + ret = (void *) __get_free_pages(GFP_KERNEL, 0); + if(ret) *len_out = PAGE_SIZE; + else *len_out = 0; + return(ret); +} + +void free_output_buffer(void *buffer) +{ + free_pages((unsigned long) buffer, 0); +} + +int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out, + char **gate_addr) +{ + char *remain; + + remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL); + if(remain != NULL){ + printk("tap_setup_common - Extra garbage on specification : " + "'%s'\n", remain); + return(1); + } + + return(0); +} + +unsigned short eth_protocol(struct sk_buff *skb) +{ + return(eth_type_trans(skb, skb->dev)); +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/aio.h b/arch/um/include/aio.h new file mode 100644 index 000000000..6096f4f4e --- /dev/null +++ b/arch/um/include/aio.h @@ -0,0 +1,36 @@ +/* + * Copyright (C) 2004 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#ifndef AIO_H__ +#define AIO_H__ + +enum aio_type { AIO_READ, AIO_WRITE, AIO_MMAP }; + +struct aio_thread_reply { + void *data; + int err; +}; + +struct aio_context { + int reply_fd; +}; + +#define INIT_AIO_CONTEXT { .reply_fd = -1 } + +extern int submit_aio(enum aio_type type, int fd, char *buf, int len, + unsigned long long offset, int reply_fd, void *data); + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/filehandle.h b/arch/um/include/filehandle.h new file mode 100644 index 000000000..adc51087a --- /dev/null +++ b/arch/um/include/filehandle.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) + * Licensed under the GPL + */ + +#ifndef __FILEHANDLE_H__ +#define __FILEHANDLE_H__ + +#include "linux/list.h" +#include "linux/fs.h" +#include "os.h" + +struct file_handle { + struct list_head list; + int fd; + char *(*get_name)(struct inode *); + struct inode *inode; + struct openflags flags; +}; + +extern struct file_handle bad_filehandle; + +extern int open_file(char *name, struct openflags flags, int mode); +extern void *open_dir(char *file); +extern int open_filehandle(char *name, struct openflags flags, int mode, + struct file_handle *fh); +extern int read_file(struct file_handle *fh, unsigned long long offset, + char *buf, int len); +extern int write_file(struct file_handle *fh, unsigned long long offset, + const char *buf, int len); +extern int truncate_file(struct file_handle *fh, unsigned long long size); +extern int close_file(struct file_handle *fh); +extern void not_reclaimable(struct file_handle *fh); +extern void is_reclaimable(struct file_handle *fh, + char *(name_proc)(struct inode *), + struct inode *inode); +extern int filehandle_fd(struct file_handle *fh); +extern int make_pipe(struct file_handle *fhs); + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/include/skas_ptregs.h b/arch/um/include/skas_ptregs.h new file mode 100644 index 000000000..afd5fc34f --- /dev/null +++ b/arch/um/include/skas_ptregs.h @@ -0,0 +1,26 @@ +/* Automatically generated by arch/um/kernel/skas/util/mk_ptregs */ + +#ifndef __SKAS_PT_REGS_ +#define __SKAS_PT_REGS_ + +#define HOST_FRAME_SIZE 17 +#define HOST_FP_SIZE 27 +#define HOST_XFP_SIZE 128 +#define HOST_IP 12 +#define HOST_SP 15 +#define HOST_EFLAGS 14 +#define HOST_EAX 6 +#define HOST_EBX 0 +#define HOST_ECX 1 +#define HOST_EDX 2 +#define HOST_ESI 3 +#define HOST_EDI 4 +#define HOST_EBP 5 +#define HOST_CS 13 +#define HOST_SS 16 +#define HOST_DS 7 +#define HOST_FS 9 +#define HOST_ES 8 +#define HOST_GS 10 + +#endif diff --git a/arch/um/kernel/filehandle.c b/arch/um/kernel/filehandle.c new file mode 100644 index 000000000..a44dccf08 --- /dev/null +++ b/arch/um/kernel/filehandle.c @@ -0,0 +1,250 @@ +/* + * Copyright (C) 2004 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#include "linux/slab.h" +#include "linux/list.h" +#include "linux/spinlock.h" +#include "linux/fs.h" +#include "linux/errno.h" +#include "filehandle.h" +#include "os.h" +#include "kern_util.h" + +static spinlock_t open_files_lock = SPIN_LOCK_UNLOCKED; +static struct list_head open_files = LIST_HEAD_INIT(open_files); + +#define NUM_RECLAIM 128 + +static void reclaim_fds(void) +{ + struct file_handle *victim; + int closed = NUM_RECLAIM; + + spin_lock(&open_files_lock); + while(!list_empty(&open_files) && closed--){ + victim = list_entry(open_files.prev, struct file_handle, list); + os_close_file(victim->fd); + victim->fd = -1; + list_del_init(&victim->list); + } + spin_unlock(&open_files_lock); +} + +int open_file(char *name, struct openflags flags, int mode) +{ + int fd; + + fd = os_open_file(name, flags, mode); + if(fd != -EMFILE) + return(fd); + + reclaim_fds(); + fd = os_open_file(name, flags, mode); + + return(fd); +} + +void *open_dir(char *file) +{ + void *dir; + int err; + + dir = os_open_dir(file, &err); + if(dir != NULL) + return(dir); + if(err != -EMFILE) + return(ERR_PTR(err)); + + reclaim_fds(); + + dir = os_open_dir(file, &err); + if(dir == NULL) + dir = ERR_PTR(err); + + return(dir); +} + +void not_reclaimable(struct file_handle *fh) +{ + char *name; + + if(fh->get_name == NULL) + return; + + if(list_empty(&fh->list)){ + name = (*fh->get_name)(fh->inode); + if(name != NULL){ + fh->fd = open_file(name, fh->flags, 0); + kfree(name); + } + else printk("File descriptor %d has no name\n", fh->fd); + } + else { + spin_lock(&open_files_lock); + list_del_init(&fh->list); + spin_unlock(&open_files_lock); + } +} + +void is_reclaimable(struct file_handle *fh, char *(name_proc)(struct inode *), + struct inode *inode) +{ + fh->get_name = name_proc; + fh->inode = inode; + + spin_lock(&open_files_lock); + list_add(&fh->list, &open_files); + spin_unlock(&open_files_lock); +} + +static int active_handle(struct file_handle *fh) +{ + int fd; + char *name; + + if(!list_empty(&fh->list)) + list_move(&fh->list, &open_files); + + if(fh->fd != -1) + return(0); + + if(fh->inode == NULL) + return(-ENOENT); + + name = (*fh->get_name)(fh->inode); + if(name == NULL) + return(-ENOMEM); + + fd = open_file(name, fh->flags, 0); + kfree(name); + if(fd < 0) + return(fd); + + fh->fd = fd; + is_reclaimable(fh, fh->get_name, fh->inode); + + return(0); +} + +int filehandle_fd(struct file_handle *fh) +{ + int err; + + err = active_handle(fh); + if(err) + return(err); + + return(fh->fd); +} + +static void init_fh(struct file_handle *fh, int fd, struct openflags flags) +{ + flags.c = 0; + *fh = ((struct file_handle) { .list = LIST_HEAD_INIT(fh->list), + .fd = fd, + .get_name = NULL, + .inode = NULL, + .flags = flags }); +} + +int open_filehandle(char *name, struct openflags flags, int mode, + struct file_handle *fh) +{ + int fd; + + fd = open_file(name, flags, mode); + if(fd < 0) + return(fd); + + init_fh(fh, fd, flags); + return(0); +} + +int close_file(struct file_handle *fh) +{ + spin_lock(&open_files_lock); + list_del(&fh->list); + spin_unlock(&open_files_lock); + + os_close_file(fh->fd); + + fh->fd = -1; + return(0); +} + +int read_file(struct file_handle *fh, unsigned long long offset, char *buf, + int len) +{ + int err; + + err = active_handle(fh); + if(err) + return(err); + + err = os_seek_file(fh->fd, offset); + if(err) + return(err); + + return(os_read_file(fh->fd, buf, len)); +} + +int write_file(struct file_handle *fh, unsigned long long offset, + const char *buf, int len) +{ + int err; + + err = active_handle(fh); + if(err) + return(err); + + if(offset != -1) + err = os_seek_file(fh->fd, offset); + if(err) + return(err); + + return(os_write_file(fh->fd, buf, len)); +} + +int truncate_file(struct file_handle *fh, unsigned long long size) +{ + int err; + + err = active_handle(fh); + if(err) + return(err); + + return(os_truncate_fd(fh->fd, size)); +} + +int make_pipe(struct file_handle *fhs) +{ + int fds[2], err; + + err = os_pipe(fds, 1, 1); + if(err && (err != -EMFILE)) + return(err); + + if(err){ + reclaim_fds(); + err = os_pipe(fds, 1, 1); + } + if(err) + return(err); + + init_fh(&fhs[0], fds[0], OPENFLAGS()); + init_fh(&fhs[1], fds[1], OPENFLAGS()); + return(0); +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/kernel/smp.c.orig b/arch/um/kernel/smp.c.orig new file mode 100644 index 000000000..34f826c08 --- /dev/null +++ b/arch/um/kernel/smp.c.orig @@ -0,0 +1,302 @@ +/* + * Copyright (C) 2000 Jeff Dike (jdike@karaya.com) + * Licensed under the GPL + */ + +#include "linux/config.h" + +#ifdef CONFIG_SMP + +#include "linux/sched.h" +#include "linux/module.h" +#include "linux/threads.h" +#include "linux/interrupt.h" +#include "linux/err.h" +#include "asm/smp.h" +#include "asm/processor.h" +#include "asm/spinlock.h" +#include "asm/hardirq.h" +#include "user_util.h" +#include "kern_util.h" +#include "kern.h" +#include "irq_user.h" +#include "os.h" + +/* CPU online map, set by smp_boot_cpus */ +unsigned long cpu_online_map = cpumask_of_cpu(0); + +EXPORT_SYMBOL(cpu_online_map); + +/* Per CPU bogomips and other parameters + * The only piece used here is the ipi pipe, which is set before SMP is + * started and never changed. + */ +struct cpuinfo_um cpu_data[NR_CPUS]; + +spinlock_t um_bh_lock = SPIN_LOCK_UNLOCKED; + +atomic_t global_bh_count; + +/* Not used by UML */ +unsigned char global_irq_holder = NO_PROC_ID; +unsigned volatile long global_irq_lock; + +/* Set when the idlers are all forked */ +int smp_threads_ready = 0; + +/* A statistic, can be a little off */ +int num_reschedules_sent = 0; + +/* Small, random number, never changed */ +unsigned long cache_decay_ticks = 5; + +/* Not changed after boot */ +struct task_struct *idle_threads[NR_CPUS]; + +void smp_send_reschedule(int cpu) +{ + write(cpu_data[cpu].ipi_pipe[1], "R", 1); + num_reschedules_sent++; +} + +static void show(char * str) +{ + int cpu = smp_processor_id(); + + printk(KERN_INFO "\n%s, CPU %d:\n", str, cpu); +} + +#define MAXCOUNT 100000000 + +static inline void wait_on_bh(void) +{ + int count = MAXCOUNT; + do { + if (!--count) { + show("wait_on_bh"); + count = ~0; + } + /* nothing .. wait for the other bh's to go away */ + } while (atomic_read(&global_bh_count) != 0); +} + +/* + * This is called when we want to synchronize with + * bottom half handlers. We need to wait until + * no other CPU is executing any bottom half handler. + * + * Don't wait if we're already running in an interrupt + * context or are inside a bh handler. + */ +void synchronize_bh(void) +{ + if (atomic_read(&global_bh_count) && !in_interrupt()) + wait_on_bh(); +} + +void smp_send_stop(void) +{ + int i; + + printk(KERN_INFO "Stopping all CPUs..."); + for(i = 0; i < num_online_cpus(); i++){ + if(i == current->thread_info->cpu) + continue; + write(cpu_data[i].ipi_pipe[1], "S", 1); + } + printk("done\n"); +} + +static cpumask_t smp_commenced_mask; +static cpumask_t smp_callin_map = CPU_MASK_NONE; + +static int idle_proc(void *cpup) +{ + int cpu = (int) cpup, err; + + err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1); + if(err) + panic("CPU#%d failed to create IPI pipe, errno = %d", cpu, + -err); + + activate_ipi(cpu_data[cpu].ipi_pipe[0], + current->thread.mode.tt.extern_pid); + + wmb(); + if (cpu_test_and_set(cpu, &smp_callin_map)) { + printk("huh, CPU#%d already present??\n", cpu); + BUG(); + } + + while (!cpu_isset(cpu, &smp_commenced_mask)) + cpu_relax(); + + cpu_set(cpu, cpu_online_map); + default_idle(); + return(0); +} + +static struct task_struct *idle_thread(int cpu) +{ + struct task_struct *new_task; + unsigned char c; + + current->thread.request.u.thread.proc = idle_proc; + current->thread.request.u.thread.arg = (void *) cpu; + new_task = do_fork(CLONE_VM | CLONE_IDLETASK, 0, NULL, 0, NULL, NULL); + if(IS_ERR(new_task)) panic("do_fork failed in idle_thread"); + + cpu_tasks[cpu] = ((struct cpu_task) + { .pid = new_task->thread.mode.tt.extern_pid, + .task = new_task } ); + idle_threads[cpu] = new_task; + CHOOSE_MODE(write(new_task->thread.mode.tt.switch_pipe[1], &c, + sizeof(c)), + ({ panic("skas mode doesn't support SMP"); })); + return(new_task); +} + +void smp_prepare_cpus(unsigned int maxcpus) +{ + struct task_struct *idle; + unsigned long waittime; + int err, cpu; + + cpu_set(0, cpu_online_map); + cpu_set(0, smp_callin_map); + + err = os_pipe(cpu_data[0].ipi_pipe, 1, 1); + if(err) panic("CPU#0 failed to create IPI pipe, errno = %d", -err); + + activate_ipi(cpu_data[0].ipi_pipe[0], + current->thread.mode.tt.extern_pid); + + for(cpu = 1; cpu < ncpus; cpu++){ + printk("Booting processor %d...\n", cpu); + + idle = idle_thread(cpu); + + init_idle(idle, cpu); + unhash_process(idle); + + waittime = 200000000; + while (waittime-- && !cpu_isset(cpu, smp_callin_map)) + cpu_relax(); + + if (cpu_isset(cpu, smp_callin_map)) + printk("done\n"); + else printk("failed\n"); + } +} + +void smp_prepare_boot_cpu(void) +{ + cpu_set(smp_processor_id(), cpu_online_map); +} + +int __cpu_up(unsigned int cpu) +{ + cpu_set(cpu, smp_commenced_mask); + while (!cpu_isset(cpu, cpu_online_map)) + mb(); + return(0); +} + +int setup_profiling_timer(unsigned int multiplier) +{ + printk(KERN_INFO "setup_profiling_timer\n"); + return(0); +} + +void smp_call_function_slave(int cpu); + +void IPI_handler(int cpu) +{ + unsigned char c; + int fd; + + fd = cpu_data[cpu].ipi_pipe[0]; + while (read(fd, &c, 1) == 1) { + switch (c) { + case 'C': + smp_call_function_slave(cpu); + break; + + case 'R': + set_tsk_need_resched(current); + break; + + case 'S': + printk("CPU#%d stopping\n", cpu); + while(1) + pause(); + break; + + default: + printk("CPU#%d received unknown IPI [%c]!\n", cpu, c); + break; + } + } +} + +int hard_smp_processor_id(void) +{ + return(pid_to_processor_id(os_getpid())); +} + +static spinlock_t call_lock = SPIN_LOCK_UNLOCKED; +static atomic_t scf_started; +static atomic_t scf_finished; +static void (*func)(void *info); +static void *info; + +void smp_call_function_slave(int cpu) +{ + atomic_inc(&scf_started); + (*func)(info); + atomic_inc(&scf_finished); +} + +int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, + int wait) +{ + int cpus = num_online_cpus() - 1; + int i; + + if (!cpus) + return 0; + + spin_lock_bh(&call_lock); + atomic_set(&scf_started, 0); + atomic_set(&scf_finished, 0); + func = _func; + info = _info; + + for (i=0;ithread_info->cpu) && + cpu_isset(i, cpu_online_map)) + write(cpu_data[i].ipi_pipe[1], "C", 1); + + while (atomic_read(&scf_started) != cpus) + barrier(); + + if (wait) + while (atomic_read(&scf_finished) != cpus) + barrier(); + + spin_unlock_bh(&call_lock); + return 0; +} + +#endif + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/kernel/user_syms.c b/arch/um/kernel/user_syms.c deleted file mode 100644 index 2d32ea3c5..000000000 --- a/arch/um/kernel/user_syms.c +++ /dev/null @@ -1,113 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "user_util.h" -#include "mem_user.h" -#include "uml-config.h" - -/* Had to steal this from linux/module.h because that file can't be included - * since this includes various user-level headers. - */ - -struct module_symbol -{ - unsigned long value; - const char *name; -}; - -/* Indirect stringification. */ - -#define __MODULE_STRING_1(x) #x -#define __MODULE_STRING(x) __MODULE_STRING_1(x) - -#if !defined(__AUTOCONF_INCLUDED__) - -#define __EXPORT_SYMBOL(sym,str) error config_must_be_included_before_module -#define EXPORT_SYMBOL(var) error config_must_be_included_before_module -#define EXPORT_SYMBOL_NOVERS(var) error config_must_be_included_before_module - -#elif !defined(UML_CONFIG_MODULES) - -#define __EXPORT_SYMBOL(sym,str) -#define EXPORT_SYMBOL(var) -#define EXPORT_SYMBOL_NOVERS(var) - -#else - -#define __EXPORT_SYMBOL(sym, str) \ -const char __kstrtab_##sym[] \ -__attribute__((section(".kstrtab"))) = str; \ -const struct module_symbol __ksymtab_##sym \ -__attribute__((section("__ksymtab"))) = \ -{ (unsigned long)&sym, __kstrtab_##sym } - -#if defined(__MODVERSIONS__) || !defined(UML_CONFIG_MODVERSIONS) -#define EXPORT_SYMBOL(var) __EXPORT_SYMBOL(var, __MODULE_STRING(var)) -#else -#define EXPORT_SYMBOL(var) __EXPORT_SYMBOL(var, __MODULE_STRING(__VERSIONED_SYMBOL(var))) -#endif - -#define EXPORT_SYMBOL_NOVERS(var) __EXPORT_SYMBOL(var, __MODULE_STRING(var)) - -#endif - -EXPORT_SYMBOL(__errno_location); - -EXPORT_SYMBOL(access); -EXPORT_SYMBOL(open); -EXPORT_SYMBOL(open64); -EXPORT_SYMBOL(close); -EXPORT_SYMBOL(read); -EXPORT_SYMBOL(write); -EXPORT_SYMBOL(dup2); -EXPORT_SYMBOL(__xstat); -EXPORT_SYMBOL(__lxstat); -EXPORT_SYMBOL(__lxstat64); -EXPORT_SYMBOL(lseek); -EXPORT_SYMBOL(lseek64); -EXPORT_SYMBOL(chown); -EXPORT_SYMBOL(truncate); -EXPORT_SYMBOL(utime); -EXPORT_SYMBOL(chmod); -EXPORT_SYMBOL(rename); -EXPORT_SYMBOL(__xmknod); - -EXPORT_SYMBOL(symlink); -EXPORT_SYMBOL(link); -EXPORT_SYMBOL(unlink); -EXPORT_SYMBOL(readlink); - -EXPORT_SYMBOL(mkdir); -EXPORT_SYMBOL(rmdir); -EXPORT_SYMBOL(opendir); -EXPORT_SYMBOL(readdir); -EXPORT_SYMBOL(closedir); -EXPORT_SYMBOL(seekdir); -EXPORT_SYMBOL(telldir); - -EXPORT_SYMBOL(ioctl); - -extern ssize_t pread64 (int __fd, void *__buf, size_t __nbytes, - __off64_t __offset); -extern ssize_t pwrite64 (int __fd, __const void *__buf, size_t __n, - __off64_t __offset); -EXPORT_SYMBOL(pread64); -EXPORT_SYMBOL(pwrite64); - -EXPORT_SYMBOL(statfs); -EXPORT_SYMBOL(statfs64); - -EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(getuid); - -EXPORT_SYMBOL(memset); -EXPORT_SYMBOL(strstr); - -EXPORT_SYMBOL(find_iomem); diff --git a/arch/um/os-Linux/aio.c b/arch/um/os-Linux/aio.c new file mode 100644 index 000000000..56b378276 --- /dev/null +++ b/arch/um/os-Linux/aio.c @@ -0,0 +1,404 @@ +/* + * Copyright (C) 2004 Jeff Dike (jdike@addtoit.com) + * Licensed under the GPL + */ + +#include +#include +#include +#include +#include +#include +#include "os.h" +#include "helper.h" +#include "aio.h" +#include "init.h" +#include "user.h" +#include "mode.h" + +struct aio_thread_req { + enum aio_type type; + int io_fd; + unsigned long long offset; + char *buf; + int len; + int reply_fd; + void *data; +}; + +static int aio_req_fd_r = -1; +static int aio_req_fd_w = -1; + +#if defined(HAVE_AIO_ABI) +#include + +/* If we have the headers, we are going to build with AIO enabled. + * If we don't have aio in libc, we define the necessary stubs here. + */ + +#if !defined(HAVE_AIO_LIBC) + +#define __NR_io_setup 245 +#define __NR_io_getevents 247 +#define __NR_io_submit 248 + +static long io_setup(int n, aio_context_t *ctxp) +{ + return(syscall(__NR_io_setup, n, ctxp)); +} + +static long io_submit(aio_context_t ctx, long nr, struct iocb **iocbpp) +{ + return(syscall(__NR_io_submit, ctx, nr, iocbpp)); +} + +static long io_getevents(aio_context_t ctx_id, long min_nr, long nr, + struct io_event *events, struct timespec *timeout) +{ + return(syscall(__NR_io_getevents, ctx_id, min_nr, nr, events, timeout)); +} + +#endif + +/* The AIO_MMAP cases force the mmapped page into memory here + * rather than in whatever place first touches the data. I used + * to do this by touching the page, but that's delicate because + * gcc is prone to optimizing that away. So, what's done here + * is we read from the descriptor from which the page was + * mapped. The caller is required to pass an offset which is + * inside the page that was mapped. Thus, when the read + * returns, we know that the page is in the page cache, and + * that it now backs the mmapped area. + */ + +static int do_aio(aio_context_t ctx, enum aio_type type, int fd, char *buf, + int len, unsigned long long offset, void *data) +{ + struct iocb iocb, *iocbp = &iocb; + char c; + int err; + + iocb = ((struct iocb) { .aio_data = (unsigned long) data, + .aio_reqprio = 0, + .aio_fildes = fd, + .aio_buf = (unsigned long) buf, + .aio_nbytes = len, + .aio_offset = offset, + .aio_reserved1 = 0, + .aio_reserved2 = 0, + .aio_reserved3 = 0 }); + + switch(type){ + case AIO_READ: + iocb.aio_lio_opcode = IOCB_CMD_PREAD; + err = io_submit(ctx, 1, &iocbp); + break; + case AIO_WRITE: + iocb.aio_lio_opcode = IOCB_CMD_PWRITE; + err = io_submit(ctx, 1, &iocbp); + break; + case AIO_MMAP: + iocb.aio_lio_opcode = IOCB_CMD_PREAD; + iocb.aio_buf = (unsigned long) &c; + iocb.aio_nbytes = sizeof(c); + err = io_submit(ctx, 1, &iocbp); + break; + default: + printk("Bogus op in do_aio - %d\n", type); + err = -EINVAL; + break; + } + if(err > 0) + err = 0; + + return(err); +} + +static aio_context_t ctx = 0; + +static int aio_thread(void *arg) +{ + struct aio_thread_reply reply; + struct io_event event; + int err, n, reply_fd; + + signal(SIGWINCH, SIG_IGN); + + while(1){ + n = io_getevents(ctx, 1, 1, &event, NULL); + if(n < 0){ + if(errno == EINTR) + continue; + printk("aio_thread - io_getevents failed, " + "errno = %d\n", errno); + } + else { + reply = ((struct aio_thread_reply) + { .data = (void *) event.data, + .err = event.res }); + reply_fd = + ((struct aio_context *) event.data)->reply_fd; + err = os_write_file(reply_fd, &reply, sizeof(reply)); + if(err != sizeof(reply)) + printk("not_aio_thread - write failed, " + "fd = %d, err = %d\n", + aio_req_fd_r, -err); + } + } + return(0); +} + +#endif + +static int do_not_aio(struct aio_thread_req *req) +{ + char c; + int err; + + switch(req->type){ + case AIO_READ: + err = os_seek_file(req->io_fd, req->offset); + if(err) + goto out; + + err = os_read_file(req->io_fd, req->buf, req->len); + break; + case AIO_WRITE: + err = os_seek_file(req->io_fd, req->offset); + if(err) + goto out; + + err = os_write_file(req->io_fd, req->buf, req->len); + break; + case AIO_MMAP: + err = os_seek_file(req->io_fd, req->offset); + if(err) + goto out; + + err = os_read_file(req->io_fd, &c, sizeof(c)); + break; + default: + printk("do_not_aio - bad request type : %d\n", req->type); + err = -EINVAL; + break; + } + + out: + return(err); +} + +static int not_aio_thread(void *arg) +{ + struct aio_thread_req req; + struct aio_thread_reply reply; + int err; + + signal(SIGWINCH, SIG_IGN); + while(1){ + err = os_read_file(aio_req_fd_r, &req, sizeof(req)); + if(err != sizeof(req)){ + if(err < 0) + printk("not_aio_thread - read failed, fd = %d, " + "err = %d\n", aio_req_fd_r, -err); + else { + printk("not_aio_thread - short read, fd = %d, " + "length = %d\n", aio_req_fd_r, err); + } + continue; + } + err = do_not_aio(&req); + reply = ((struct aio_thread_reply) { .data = req.data, + .err = err }); + err = os_write_file(req.reply_fd, &reply, sizeof(reply)); + if(err != sizeof(reply)) + printk("not_aio_thread - write failed, fd = %d, " + "err = %d\n", aio_req_fd_r, -err); + } +} + +static int aio_pid = -1; + +static int init_aio_24(void) +{ + unsigned long stack; + int fds[2], err; + + err = os_pipe(fds, 1, 1); + if(err) + goto out; + + aio_req_fd_w = fds[0]; + aio_req_fd_r = fds[1]; + err = run_helper_thread(not_aio_thread, NULL, + CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0); + if(err < 0) + goto out_close_pipe; + + aio_pid = err; + goto out; + + out_close_pipe: + os_close_file(fds[0]); + os_close_file(fds[1]); + aio_req_fd_w = -1; + aio_req_fd_r = -1; + out: + return(0); +} + +#ifdef HAVE_AIO_ABI +#define DEFAULT_24_AIO 0 +static int init_aio_26(void) +{ + unsigned long stack; + int err; + + if(io_setup(256, &ctx)){ + printk("aio_thread failed to initialize context, err = %d\n", + errno); + return(-errno); + } + + err = run_helper_thread(aio_thread, NULL, + CLONE_FILES | CLONE_VM | SIGCHLD, &stack, 0); + if(err < 0) + return(-errno); + + aio_pid = err; + err = 0; + out: + return(err); +} + +int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, + unsigned long long offset, int reply_fd, void *data) +{ + struct aio_thread_reply reply; + int err; + + ((struct aio_context *) data)->reply_fd = reply_fd; + + err = do_aio(ctx, type, io_fd, buf, len, offset, data); + if(err){ + reply = ((struct aio_thread_reply) { .data = data, + .err = err }); + err = os_write_file(reply_fd, &reply, sizeof(reply)); + if(err != sizeof(reply)) + printk("submit_aio_26 - write failed, " + "fd = %d, err = %d\n", reply_fd, -err); + else err = 0; + } + + return(err); +} + +#else +#define DEFAULT_24_AIO 1 +static int init_aio_26(void) +{ + return(-ENOSYS); +} + +int submit_aio_26(enum aio_type type, int io_fd, char *buf, int len, + unsigned long long offset, int reply_fd, void *data) +{ + return(-ENOSYS); +} +#endif + +static int aio_24 = DEFAULT_24_AIO; + +static int __init set_aio_24(char *name, int *add) +{ + aio_24 = 1; + return(0); +} + +__uml_setup("aio=2.4", set_aio_24, +"aio=2.4\n" +" This is used to force UML to use 2.4-style AIO even when 2.6 AIO is\n" +" available. 2.4 AIO is a single thread that handles one request at a\n" +" time, synchronously. 2.6 AIO is a thread which uses 2.5 AIO interface\n" +" to handle an arbitrary number of pending requests. 2.6 AIO is not\n" +" available in tt mode, on 2.4 hosts, or when UML is built with\n" +" /usr/include/linux/aio_abi no available.\n\n" +); + +static int init_aio(void) +{ + int err; + + CHOOSE_MODE(({ + if(!aio_24){ + printk("Disabling 2.6 AIO in tt mode\n"); + aio_24 = 1; + } }), (void) 0); + + if(!aio_24){ + err = init_aio_26(); + if(err && (errno == ENOSYS)){ + printk("2.6 AIO not supported on the host - " + "reverting to 2.4 AIO\n"); + aio_24 = 1; + } + else return(err); + } + + if(aio_24) + return(init_aio_24()); + + return(0); +} + +__initcall(init_aio); + +static void exit_aio(void) +{ + if(aio_pid != -1) + os_kill_process(aio_pid, 1); +} + +__uml_exitcall(exit_aio); + +int submit_aio_24(enum aio_type type, int io_fd, char *buf, int len, + unsigned long long offset, int reply_fd, void *data) +{ + struct aio_thread_req req = { .type = type, + .io_fd = io_fd, + .offset = offset, + .buf = buf, + .len = len, + .reply_fd = reply_fd, + .data = data, + }; + int err; + + err = os_write_file(aio_req_fd_w, &req, sizeof(req)); + if(err == sizeof(req)) + err = 0; + + return(err); +} + +int submit_aio(enum aio_type type, int io_fd, char *buf, int len, + unsigned long long offset, int reply_fd, void *data) +{ + if(aio_24) + return(submit_aio_24(type, io_fd, buf, len, offset, reply_fd, + data)); + else { + return(submit_aio_26(type, io_fd, buf, len, offset, reply_fd, + data)); + } +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c new file mode 100644 index 000000000..cf30a39bc --- /dev/null +++ b/arch/um/os-Linux/time.c @@ -0,0 +1,21 @@ +#include +#include + +unsigned long long os_usecs(void) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + return((unsigned long long) tv.tv_sec * 1000000 + tv.tv_usec); +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/sys-i386/bitops.c b/arch/um/sys-i386/bitops.c new file mode 100644 index 000000000..97db3853d --- /dev/null +++ b/arch/um/sys-i386/bitops.c @@ -0,0 +1,70 @@ +#include +#include + +/** + * find_next_bit - find the first set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +int find_next_bit(const unsigned long *addr, int size, int offset) +{ + const unsigned long *p = addr + (offset >> 5); + int set = 0, bit = offset & 31, res; + + if (bit) { + /* + * Look for nonzero in the first 32 bits: + */ + __asm__("bsfl %1,%0\n\t" + "jne 1f\n\t" + "movl $32, %0\n" + "1:" + : "=r" (set) + : "r" (*p >> bit)); + if (set < (32 - bit)) + return set + offset; + set = 32 - bit; + p++; + } + /* + * No set bit yet, search remaining full words for a bit + */ + res = find_first_bit (p, size - 32 * (p - addr)); + return (offset + set + res); +} +EXPORT_SYMBOL(find_next_bit); + +/** + * find_next_zero_bit - find the first zero bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +int find_next_zero_bit(const unsigned long *addr, int size, int offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 5); + int set = 0, bit = offset & 31, res; + + if (bit) { + /* + * Look for zero in the first 32 bits. + */ + __asm__("bsfl %1,%0\n\t" + "jne 1f\n\t" + "movl $32, %0\n" + "1:" + : "=r" (set) + : "r" (~(*p >> bit))); + if (set < (32 - bit)) + return set + offset; + set = 32 - bit; + p++; + } + /* + * No zero yet, search remaining full bytes for a zero + */ + res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr)); + return (offset + set + res); +} +EXPORT_SYMBOL(find_next_zero_bit); diff --git a/arch/um/sys-i386/extable.c b/arch/um/sys-i386/extable.c deleted file mode 100644 index 946e7ad6f..000000000 --- a/arch/um/sys-i386/extable.c +++ /dev/null @@ -1,30 +0,0 @@ -/* - * linux/arch/i386/mm/extable.c - */ - -#include -#include -#include -#include - -/* Simple binary search */ -const struct exception_table_entry * -search_extable(const struct exception_table_entry *first, - const struct exception_table_entry *last, - unsigned long value) -{ - while (first <= last) { - const struct exception_table_entry *mid; - long diff; - - mid = (last - first) / 2 + first; - diff = mid->insn - value; - if (diff == 0) - return mid; - else if (diff < 0) - first = mid+1; - else - last = mid-1; - } - return NULL; -} diff --git a/arch/um/sys-i386/semaphore.c b/arch/um/sys-i386/semaphore.c new file mode 100644 index 000000000..073912cfc --- /dev/null +++ b/arch/um/sys-i386/semaphore.c @@ -0,0 +1,301 @@ +/* + * i386 semaphore implementation. + * + * (C) Copyright 1999 Linus Torvalds + * + * Portions Copyright 1999 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * rw semaphores implemented November 1999 by Benjamin LaHaise + */ +#include +#include +#include +#include +#include + +/* + * Semaphores are implemented using a two-way counter: + * The "count" variable is decremented for each process + * that tries to acquire the semaphore, while the "sleeping" + * variable is a count of such acquires. + * + * Notably, the inline "up()" and "down()" functions can + * efficiently test if they need to do any extra work (up + * needs to do something only if count was negative before + * the increment operation. + * + * "sleeping" and the contention routine ordering is protected + * by the spinlock in the semaphore's waitqueue head. + * + * Note that these functions are only called when there is + * contention on the lock, and as such all this is the + * "non-critical" part of the whole semaphore business. The + * critical part is the inline stuff in + * where we want to avoid any extra jumps and calls. + */ + +/* + * Logic: + * - only on a boundary condition do we need to care. When we go + * from a negative count to a non-negative, we wake people up. + * - when we go from a non-negative count to a negative do we + * (a) synchronize with the "sleeper" count and (b) make sure + * that we're on the wakeup list before we synchronize so that + * we cannot lose wakeup events. + */ + +asmlinkage void __up(struct semaphore *sem) +{ + wake_up(&sem->wait); +} + +asmlinkage void __sched __down(struct semaphore * sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + unsigned long flags; + + tsk->state = TASK_UNINTERRUPTIBLE; + spin_lock_irqsave(&sem->wait.lock, flags); + add_wait_queue_exclusive_locked(&sem->wait, &wait); + + sem->sleepers++; + for (;;) { + int sleepers = sem->sleepers; + + /* + * Add "everybody else" into it. They aren't + * playing, because we own the spinlock in + * the wait_queue_head. + */ + if (!atomic_add_negative(sleepers - 1, &sem->count)) { + sem->sleepers = 0; + break; + } + sem->sleepers = 1; /* us - see -1 above */ + spin_unlock_irqrestore(&sem->wait.lock, flags); + + schedule(); + + spin_lock_irqsave(&sem->wait.lock, flags); + tsk->state = TASK_UNINTERRUPTIBLE; + } + remove_wait_queue_locked(&sem->wait, &wait); + wake_up_locked(&sem->wait); + spin_unlock_irqrestore(&sem->wait.lock, flags); + tsk->state = TASK_RUNNING; +} + +asmlinkage int __sched __down_interruptible(struct semaphore * sem) +{ + int retval = 0; + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + unsigned long flags; + + tsk->state = TASK_INTERRUPTIBLE; + spin_lock_irqsave(&sem->wait.lock, flags); + add_wait_queue_exclusive_locked(&sem->wait, &wait); + + sem->sleepers++; + for (;;) { + int sleepers = sem->sleepers; + + /* + * With signals pending, this turns into + * the trylock failure case - we won't be + * sleeping, and we* can't get the lock as + * it has contention. Just correct the count + * and exit. + */ + if (signal_pending(current)) { + retval = -EINTR; + sem->sleepers = 0; + atomic_add(sleepers, &sem->count); + break; + } + + /* + * Add "everybody else" into it. They aren't + * playing, because we own the spinlock in + * wait_queue_head. The "-1" is because we're + * still hoping to get the semaphore. + */ + if (!atomic_add_negative(sleepers - 1, &sem->count)) { + sem->sleepers = 0; + break; + } + sem->sleepers = 1; /* us - see -1 above */ + spin_unlock_irqrestore(&sem->wait.lock, flags); + + schedule(); + + spin_lock_irqsave(&sem->wait.lock, flags); + tsk->state = TASK_INTERRUPTIBLE; + } + remove_wait_queue_locked(&sem->wait, &wait); + wake_up_locked(&sem->wait); + spin_unlock_irqrestore(&sem->wait.lock, flags); + + tsk->state = TASK_RUNNING; + return retval; +} + +/* + * Trylock failed - make sure we correct for + * having decremented the count. + * + * We could have done the trylock with a + * single "cmpxchg" without failure cases, + * but then it wouldn't work on a 386. + */ +asmlinkage int __down_trylock(struct semaphore * sem) +{ + int sleepers; + unsigned long flags; + + spin_lock_irqsave(&sem->wait.lock, flags); + sleepers = sem->sleepers + 1; + sem->sleepers = 0; + + /* + * Add "everybody else" and us into it. They aren't + * playing, because we own the spinlock in the + * wait_queue_head. + */ + if (!atomic_add_negative(sleepers, &sem->count)) { + wake_up_locked(&sem->wait); + } + + spin_unlock_irqrestore(&sem->wait.lock, flags); + return 1; +} + + +/* + * The semaphore operations have a special calling sequence that + * allow us to do a simpler in-line version of them. These routines + * need to convert that sequence back into the C sequence when + * there is contention on the semaphore. + * + * %ecx contains the semaphore pointer on entry. Save the C-clobbered + * registers (%eax, %edx and %ecx) except %eax when used as a return + * value.. + */ +asm( +".section .sched.text\n" +".align 4\n" +".globl __down_failed\n" +"__down_failed:\n\t" +#if defined(CONFIG_FRAME_POINTER) + "pushl %ebp\n\t" + "movl %esp,%ebp\n\t" +#endif + "pushl %eax\n\t" + "pushl %edx\n\t" + "pushl %ecx\n\t" + "call __down\n\t" + "popl %ecx\n\t" + "popl %edx\n\t" + "popl %eax\n\t" +#if defined(CONFIG_FRAME_POINTER) + "movl %ebp,%esp\n\t" + "popl %ebp\n\t" +#endif + "ret" +); + +asm( +".section .sched.text\n" +".align 4\n" +".globl __down_failed_interruptible\n" +"__down_failed_interruptible:\n\t" +#if defined(CONFIG_FRAME_POINTER) + "pushl %ebp\n\t" + "movl %esp,%ebp\n\t" +#endif + "pushl %edx\n\t" + "pushl %ecx\n\t" + "call __down_interruptible\n\t" + "popl %ecx\n\t" + "popl %edx\n\t" +#if defined(CONFIG_FRAME_POINTER) + "movl %ebp,%esp\n\t" + "popl %ebp\n\t" +#endif + "ret" +); + +asm( +".section .sched.text\n" +".align 4\n" +".globl __down_failed_trylock\n" +"__down_failed_trylock:\n\t" +#if defined(CONFIG_FRAME_POINTER) + "pushl %ebp\n\t" + "movl %esp,%ebp\n\t" +#endif + "pushl %edx\n\t" + "pushl %ecx\n\t" + "call __down_trylock\n\t" + "popl %ecx\n\t" + "popl %edx\n\t" +#if defined(CONFIG_FRAME_POINTER) + "movl %ebp,%esp\n\t" + "popl %ebp\n\t" +#endif + "ret" +); + +asm( +".section .sched.text\n" +".align 4\n" +".globl __up_wakeup\n" +"__up_wakeup:\n\t" + "pushl %eax\n\t" + "pushl %edx\n\t" + "pushl %ecx\n\t" + "call __up\n\t" + "popl %ecx\n\t" + "popl %edx\n\t" + "popl %eax\n\t" + "ret" +); + +/* + * rw spinlock fallbacks + */ +#if defined(CONFIG_SMP) +asm( +".section .sched.text\n" +".align 4\n" +".globl __write_lock_failed\n" +"__write_lock_failed:\n\t" + LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)\n" +"1: rep; nop\n\t" + "cmpl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" + "jne 1b\n\t" + LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)\n\t" + "jnz __write_lock_failed\n\t" + "ret" +); + +asm( +".section .sched.text\n" +".align 4\n" +".globl __read_lock_failed\n" +"__read_lock_failed:\n\t" + LOCK "incl (%eax)\n" +"1: rep; nop\n\t" + "cmpl $1,(%eax)\n\t" + "js 1b\n\t" + LOCK "decl (%eax)\n\t" + "js __read_lock_failed\n\t" + "ret" +); +#endif diff --git a/arch/um/sys-i386/time.c b/arch/um/sys-i386/time.c new file mode 100644 index 000000000..a6a5ba755 --- /dev/null +++ b/arch/um/sys-i386/time.c @@ -0,0 +1,24 @@ +/* + * sys-i386/time.c + * Created 25.9.2002 Sapan Bhatia + * + */ + +unsigned long long time_stamp(void) +{ + unsigned long low, high; + + asm("rdtsc" : "=a" (low), "=d" (high)); + return((((unsigned long long) high) << 32) + low); +} + +/* + * Overrides for Emacs so that we follow Linus's tabbing style. + * Emacs will notice this stuff at the end of the file and automatically + * adjust the settings for this buffer only. This must remain at the end + * of the file. + * --------------------------------------------------------------------------- + * Local variables: + * c-file-style: "linux" + * End: + */ diff --git a/arch/um/util/mk_constants b/arch/um/util/mk_constants new file mode 100644 index 0000000000000000000000000000000000000000..2cc98420452fa3a05d3af7f26b78a09eb60aeaff GIT binary patch literal 15114 zcmd6OeRLefmG7^6dU{$?Yc%?>E!(m!du%Yq9$AtR0vzy{u*DkNk!3yuozYB>rqO)4 zyGND@3C6-pSe6%fB*DpUJ~*6woXrQE4JTm}&c-Yxgd{H*LPB<5a>8bTm_Pyq9-9Ti zyz{H-9*w-op7+jqf4tIZP2F3!?!8rY>sEEu?HfmSU#ut!!lWVs0nKX0CIC-wkR_YY zix}2o6;?n8z_Lw8G=tBx0Qf`!XaFJrx)h-6Ce7f}34jmF$h$7>h-UCv9|QQXJ`%EA z;O^E8K86CA3WQvis{n>$!0_Fg!RLDb_)ss(;Imc%*50HUe0l-!3CrI5yq^u*+nB=o zeFbNtuaMnPa7xvglvhp-$hwI@_nmtVh-%b>5A|l7V}TCLi?%c*(lLd7aC&M1aN?+D z{1!k5fF%G<9@UH+6$z)<$452e^`M;n`RY;4I4^)#0C?k=W{`h7;@Hw4fNe*?naHp$ zL#R~!Og`>cy4UjjtW&b8o}Gndk=*k9wCh{Nv{Qm*O%}_g0Ape2?$K@A zt%1}=vQ~pXIkt-UWnVER9H*5Odjojmam~Der#ghUi=RDPU z@q~aWA>U2-{#vcp|J!SC{ny#&Ufnl7mMN8cjy0bOfyb()@rrKwAF7jACvj+ztoIVZ@(+>&Fr&Z%6t#;|0eV2F#k*g z&9~mI)n*^MB5(}q;!mwXvpL_Mx zwpy(=`?8UzY2>S<`eLnCyW*<+Eq|#Q`P+#2K&@80@{0T?i1-u{*GY;m5pf?8S4xV9 zh&VySE=lo2B2E%#J>}c zuMv6idGES!%sy&feXaHQBL{AM{<69ueYyjX6>pyv)gONNKh$H7-+r)GZ8>#5jqO_6 zM!@IR-|sp0RQ2~SJx^O4y9dWLWA+cVZ}HbRXJ684CvS_){-IWV(INdcc)#l-L!9KdnXR}7N@LCx#aojlJB8+T_2K8u9UTNy_R+0 zQfvF(J^RPE?it^2?LsnZ=bV!L=L^<&y{p!EtD5VSYrNhy*=8i|Qr5}gz@^q-jEwEE z_HErcV(lNjY6P1%URcMDTsktg6Px?zFk5$zjE!USz#L}#*yuPm56&S*#>TLD(;R&H z*0DXKdv;>;Idix@d&ft&k6`oA9BOpW#e1>&+&RRKk!=TdQk6aXN48u0M#ilDBilJ4 z8|H;fa6Csqm^rA;D2}Bq#5-8r*6^=t|>D&X$I%xm4NW`bVdsTeqyaWt{Ge( z=%>4c{{|MZ-cikH2T(bx8C(r8uIw=QO)@2QO$VasAjww^540A`}w_VckVfGR_|bHFg4ISuyNyH|3Lq6@7ggt z+q*06_wMG}DN8q;a~A&JRoOT&@TaBp|5+*j-&XmrdWTEMSWWX)U&yON3#7AFOAe-ofB zpcJ+r+Cq^nv-Tp9tufmzXunNKi$!*tdM**!wZty{loEEfdLt1%LW>tEf0@Ycpz_P9 zklIS@3Xx5a&q_h?7R9d;*&~#h6xq*^*%@!M#_JKlh(;HyfT8u7)9C`Vvk8fjJ4*$q zj~F=x>POhdK9$AyM}Vj<*nM9GqOF&(?3dCee#gT2`-u4kF^2j}9f&4M#5@`S)TsJ# z4T#EqU1b1K*^5pTh<1r)e2h#MQA_pjxZCgkGHdVE03+7w4I{diB3rA&h@M4JtzQ2S zqGwY^Yn9pc)UMSV96&V1Rgv0eYH1NuTvJoRshUGzVg{Q|Mw>>Tx34bT_= zhS?{XUGfgIH6ujaFW}j#R^xkifb{Pqh z|3EDF$@=O?nf)+n*WAnO?}4j3~9%6%=VMRmH(aDbJ>b(I4tTL_aw80iPW;9w@Pio8 zrGH$%h+TQN7{es=i>cjlVF*c0U%_^Lg4Al#(7B-NpOjD>v5!OndQ>sXqG+5j2E<;LP_KSB>-|z@>A4kN}4=|oqk zi2a@3&^0l{f2;$0qkv&F>2E23(LBIqDGB|(ZL)QmzPrV z*3e@yME{LTLQB8?r6!=IUus2P6D3IEXq4W+Wx%WyvuGKpQ@9GyGVnW6oGB;<&B|mJ zErWH6H)24`V1mwiR8VZuM|GfOOP%8C2*CjZh(Bon-;4nTbG?~4l!dt=uuRhmW#-Br%s~lt>+g>O<|fKilRb$2s{yb` zm{BJ>zR(1GhkF~d zmuaWx>UEo059VIkH$8fmZ4990raf2GtG8%#MXSu52XmjSwOapM76s55=Hy(_TD^w@ z8WgS5KTN{+%GRtmZ5l}cZ8T{kW0K=0{S_5;Xh?s`0L=Ze+OTPRl{&Q9^gNj3vf37X zE7cF6ZRmKX1$agUdKB|2{Ss<&4f~q2uNVVNOFD%e`c)jT^tdp)UHZpZmf>WGBp*=p z_poACf)6VC-*7Z-2|lFgUm+;gv`F%VqOYL_ld|+tMc>UM%qeOfNq$d}MpK~FNb-k@ z{svoFR84{>72RdsvLV2y75zN6x*}QqOwlhO@xiz#{kfvIlGt4?z;_gMc1HjuqOr*l zjV+0>tiBwrKrCf8YyxA5UuFWs^o^~H^hs{Exb^1z(^E$RBc@* z`muF6QEC!qu7cJTF9~3WIYX%TZK2csioUK1Xk95AaDP<)s~FI_N`enW^&fL5mz3ax zQK`Xd*;5Zi^{1)O8rf4PqKHuhX~mpLAVwi|cnM-Htqt&vSdeN39*hW;7cBIBv@T)u z7tLxw>ls(@?L9u)3VbRCbhlose~!9Y#EAX!m5tn@h!4ksV-a9;e2JKm@ugu`aYn{_ zBqV2Ke3^vgjEparkere66%vv>tj8IV0mMCDd)sWIe=JN!#o(XKWAgq=b6K42p|x zQj@F242rLj_12m*84vL@B;|TBY2s(fvW;TW#Meq_lR4vgh@T}XhY?T5fqS`Twl(Qo z!P>;D8mGy|CZMf_9W55w<8fe!hSwI?FVTRuHpv5K#dpz`2y8}IRG_V0Rx{A?Kpc2T z3`U|!*iZX1ju*Q+8fgQ%6C3oeH35mW0XzNk8juJS)b!7*Kw_PIn=y25FcW7>%7p#~ zDdl@)vJ26nHo#15(mxjg68%yi=#TLYcp%hnMGQy`N(KhH$Atz%w35UIsw%7`v5A@? zI@QKE$jxTO$s%zzv0JF@!psB`J3cRzA55?} zE;h>*B!&as%_&L*MnCHKX_*z3PkfP{RhO&Vel z6H93hYSJ__?f?=!q>Lm_Morswkyu7?k>t}+{U6D7xh(xzRCtBN3N|#7{CQOGU}>)e zpN;Anf-6_Iv-G*B{!K~^e8#D$4wfb*@yk*D4mNuAUEFOYUybT_ax8{u0g>cKb^VGc zkhqxL7D;|tN9Q+G;UU@L=+<_Q>eZ%aPa<)ZY$Kp3->CqdZPcC}c%2S(CVnFzP5-e0bcS=n%v7rAlrNx3 zQ~xsSb&A)TWI}(4^%hV?HQA+euiDwoEv1_5Mkkpn5sgwinmYhn0q#(`K5C}X^-&^( z16$J3;0`lbe6$02lj}*>LS@NY2GF%gAx=$V$!B?G*ByY`623-tEmp*fRnk~;y#{nG z3BaZ$JGgUR8i379`L@v26M!vBQKpi91)K6LI$r!Lm z;~2%4cL5h@z>)56>lf0&p9qCKXaL<0OE0MDI{d|>!)%`sQ_3C_u?*0K^cGTo*y`N=2lx6Gnt(5*F z32oFrMCb_#ZPHJX@}!iy;+gX2xyN5pn+vy z?E;j8G)q%BJ03am=utVa`~#eFEutNF8q5D)KqOc1G!fBwEEfBAm)w=CKyQS*mNOq9 zJEikQzEpLd)5QNdjsJU@UwH=CsqI2jMS;?;B=ioYU5Uh(#@EC(EhGW-);jfYGZ0f0rIQtnl^E^pYXO=TC`#vmU%P5K5F{+O&CKVl5)^o)(~8awp@Cphi^I=n%$uO6NtU?8r0C z(av_IL)p>{ERWXvOKk>LtmOU0raJB-lPa$a5Yf>9fkixT!5a#P_U(8x6d8MyYdCHgZ<3lFm%Qp^)wg^$BvY=Ie9K3nT&{Rl#dfXhoLug311Hb9-nM*P z=lH(uqLR*Lozf(-_C$5kwI@Bxvk#(b39_(f$XdixWXf3^UcT)5z15QEOqT3yFE2x? zPRSp(eE4pXI(ZwSuEfdxT}pWb%_%wpuJ6Mx{tm{xl;+ zGGJ{tT_jv0T&=L<1=>55uE4b`g>=R)+9e+q*S3ol-xA#m-!4`dPm-Au7H#EbJlFDk zx0>-$OwZV^TXs?M4ttq&q2SGHTmO*MfC48D`?h3QEs1W*7SltUtaLW(T5MmglrGvx zyOSIFub=-0_-_zZD^o5OQ7l@XJxK*EKbV_=TAovyEZClJSFAt{F_AoEDpe;-CJn}Q z+6d%4F(Oql&hQR7ekN~KoQhCoTJ(3=SkIr}*jSb~4131T(3m;L=bBvc-K>+#i4o3t zs9I@vqv+ziGnt29F4%6ml(A8@LJO2#TJbGfV_Q;Ffz(iHh}IF>hVV?__zF|YRZE!$-u1aBgrq0! zaQ=;r>>uAcHV&uc`EJ2332%$C>r7faP-FY3Xa5i^E9VJ|;grtMK}=M0IolQUWzOBt zjk8n9Se5xRxh#f#s%RJ4eHp)C`Ix4~9%M*XshY8-(^kRpgmdw$(l$%EvQ;gTz!P2t zGh$K(4rJfn%SXnny%%3hd!Zwf>xHls(g~;O+6Sw);A(l5bVf93a9~c=OyDi&d7-R5 z-ROmap_Z$Rl}lF(zHldj9%Bdgh!73ArqDcn+e5{tdHB~`MWq|Q-76Wfv6>>d}s`M1XS8{|JTx&I0HtI~K?88*rDfwbK z;nbf_7o4oHlk3YRTlSyu^kRI2b<6UlT}kI{rQJzN;l-?Iie%>5uEkCcd#y?fK<03! zU~`48D&T=F(rE{VP<7<0RF@-duMQUEM9yVBYr^q8lyf<$Tu@)k zCtPYskEq6DPWiZ8eZQVUC zY@5!HGrLee1bfD}-4YE{Zgolx9&46P-XO))ve8i$m3xag-Rkn((2{^rA+7?TQ8 zrLu3Qwr(5U;HM{%%BQ_NQrW{L?{HE6`YuwFrD|&0cEx(p2v{ysu3aF#%v1_KQV!>E z%C~2Hq;gKlLCP&>(|#H$J8$J&ZY)v~&K@qMi%te9`Xvvkto*qyLz!hCsR_?Riu(<2 zgi>Oew$g68G-;EX`$nr$o#0-j4hvryl1@yx_H+mpoDwCATA>;OU>pC>(^7_V@JN?& zXN=Y6=~F{>5j+HdF)ut0CY^|>R%EPNm}Gzw(9a!yK*t!kT?)YXqOhF5e9jHx;K+}$ zalFB;1a&EsG`v#3h$n{uKgQCXQe+HWm_q$_%J?_2UzkY8 z#6F-Nq`Uv9X56m=`+_ni(((B~10Cb^*-5-72~)_A!P6l>#wI=;g&6ZNg><(BbY$`B zG0k|D@e5(+HnjUA4RmX7(F}%hgghGQ80y9996J%i&wI2`Xam2`1Z^dqh~~2KNe(9fNIX$n80hX}%5&lNPvZLr% zB+N)-=|3b)PXy^FguZen(jQ2ek;`)4OPHP$Vtz}Q7f5m*OPD=37b6@Iqe90P@?)ej z#QfuLBv?^6&m_zUXNdVEVHpW6zMByA3q^UDA|eucE=9!A^<0Wz@f;xP^UImJ6q)Do zs84vJBvS;-=7>f4^3z~`Su>X+@@txUieRO%lhyRY!tTtlM?})_T1`j{+OMDk>l*q; zK_?hO684XRE{xER1TokO65ySQ_*g{P_(JgOqg@U1rU2aB0CS#%iSiH8<{RYS%ut0o z+=)9I##=#~@XL+*e2MlH*oz7i4~NXfTzakclhfcA0qnS0GdL0*F!4r%y|kkpe+=x6 zGlAbKF-m#u&6>fHB>f^~&fXI0Yc=jSHU|CCt|oAf0`OZM-rJC#5sZ!BPF` z$aR-$cAyz0c4W|BtY1}t`)4(S@%e-qPRDhN^pt-Wzf(G<8H{OWIm0hQdrjcA+I`C0 z`6SC9Q09ND_2g;%e$@c?B7xs3^S|Y)De_wasc$QyjQD8ipKf)&ehg7-)Mt=4cft6! zV*y50U^sxO@1E1@r`7r2j&Z+JZ$A?FV1rNOHwt$)maDJTzHnMOUyce5--{vR`kr6S zgOR;*_terC(;E=?5?eJbp~Sh z8yrjqtVj0jnByD*7msbdbfgYZOjt6&p6=dO8dRw;4 zeQL)q^%`MIyr1yk#N}I>Q}V66ohIJ~L9#UXppV>TC2PX-g0kR5iVgZ$mrPq@_N2pOz3qjx=ZOQ3`GUhfnfFQK{|2cO2O$6e literal 0 HcmV?d00001 diff --git a/arch/um/util/mk_task b/arch/um/util/mk_task new file mode 100644 index 0000000000000000000000000000000000000000..1f4f750215e417b31d1a97bea8929c375bef8996 GIT binary patch literal 31218 zcmd75dw3khl{ftBsUA&BYK=zHNUfW(?2&EBmSoEkvIGWfY=cbV6aU{APGrWAlY2Vn;W^1kjo~7kPQhWfrVsAAPKu63rT=A2^$E3EC~sEpI=q? zXiUC+pZ9y7?~kwjj8&(r>eQ)Ir_MQbs=E8CZ97lXG!4NOf`Eh$>BcC4Co(E$6g`Nb z56iI>H2|15dQdm`dkO%5VE{S+0U)jc@vC)%zZL-eF^{+_tsT@2{%)uN_+x#&nre$BME|%jJ@ZgPCJ%D?! z(T)EC&izvLCKnCmvM#4aDc@fO{vsR0h&q8kwnILyBx;5z;lzzzJX z1LwZunM2PTN3RBO^a=iVeF%8)J}ewRe~WG`thn+P-5|<^6~r{dMW)ADd z(Qhv-EWCWd(4&C2<-@E4U#{8$4 zAF0l7pNU_q8%G~~ePQ7TdauRw4^(KQX>11e9R1nf78azyaERqaU7=O9!{1+tv@_-AoSqe4Un%L$_V=vr>s#UVtnTkw z+1J;{dCauR>%VA4`jY;B4qfH?yZZkZ)z_{@&+4Ajot#rlmYj6Y7Z(=tr(UCU&i|W> zDu?BRzr1L(0vzAJMmK(Tjcz=9jc)uYz<>IdEhqH!oqpE2{XHW?BSY(Z)~{PPGQ56x zyr*xElkVA`EcNW9ZBn^|$M@s^yDIC}um68;W%z$=<^Q);{!bjE23JOzCKI5--x7?9 zFCZ%as#AamPX?qN4az9Zw$LN=-K18l=1VoFh{ni#h#AWi+tli31BHLM@ zO(tJ70Ab1GhZ+FUF3Ub20z`*QCP-(eOxAV+qU#EcoH)q#hBZK|iRi@KEs4306_?25 zMKM4eN4g?A*oUPu`K$$qWwO8+?=F|gH18&4a;^ptE1x9#Yij@_9Qqp@6l=}7WCmgl zAvu5zA%JAb0n`vlu&ZmIWqS=A#2Sh2qdK5B426K2)r#7EWN~y8@6b8bj7Yd1&|`ae zXL}703P+Z;0lH%E>IOhp%)HbB^tkNCL=4a^QW^PK2cWm#$l9N11&m08Hx9jzO*WLr zq4%??25)!_`Wm*;P-b#~)HQe`>!A;^7Y$`5*AjVyH#Q1=n8+K-Os*&L$SgZG@i3EX z1)!h!YbGCz0Q#x2t0#2=;pRN=zL&}Pg&gaB77%WY3Lvsj2ZY-`%Dc;Ifw1*iCf~`s z?e{QwUjztu{D8@34G4Gs8_#BGWSnx z7aP7pCf~#4Tdv^Ux05@ENlr8s2A(y6c=%>R12rrCnqr{?s9B{TUA!;MnW-QHk#RQC|ANGG2l1TF zRcaiFM;gqj93s(6IA;?OQPlf}y`6TVq2} zu7CGLfnLKD{Y`+;z&mX2TaADrb#y4v1N}oaRrVx;&{wEtYKKKvH&8pQsN$$a2(obK zr?SL)vye@rc73HpECkf9|7Vu?Z&_l*EKH|SJ5njJr5&gpd5R@EIQ?rkiAQUJ+D(-b z9|;o-MS$qXBEU~gAX7JBruL^%H^`oHz&t`j3ewFIp|uJ!%qh2kx^)UN&8d`!x?u&y z%&B}Hb?X%rH>dI*>P8gQE`DSHb)#%El<0>3r3k<*DLpNS?Kgl&=<0>)HYjSwIiD)j zOeiR%s5wDFx;d3EpzcHk)hOCFsum4J+er#C6>TRgh?IJ$J4HcpG13auZBkIXcr6Un zZB|gXqWe^m7fSSqZ`T5KTL_87ZJPK{BT%>X?FtM*XNnbl*FnFf8s32DQ?)>*8Q5BP zrs+)1qwXx#3`{4r2DUT)hoWwEFW5FPA_KliGOZe%vvorM}WGsRc{7NheF~* z>rBeX9))pKe6R%-XiU7j0jN7iRU0=QuTX(Dnx2Qcy{g(Kv7dwDL#HD4a2@bn9q88T zE)>^Mv@Rj9Is5Wapw3p>W~H1Us|=#B#K z(ZnwaO8qSo_iAE2DVSEd_iN(QY-W~}i^Kz(5}FKK6^Vy6@lzeB%l?EOQsNO!+{jMn zAD7@`n)nA|E+|xwYvKw4)b0BZnfrt$zQMx9-%IeX8k+8Jp)aDd%ZARL=*+9A-lfoA zud8x+B8aZ62VS=Tquvrf2m|%)ftYzI)ORqOnz&hyeXbrjWB~Vv>X%49)*r`PoOV+M z)Gz&}1h$&YlLp zwhq*{gJPk>(NwsTpY= zryw;W&7}%bGt%r)keZR^G6l7p^Jx#JZwb52dB=m9P?FjsXOJn`6iW2U8Dy?d_4>^D zlm~OADmfr0jk!wYt&@|+>{HOFIq!Ke`&G$tM28!IzFHcmTG7@BG|1Hnr%6{U&`?iC zQxu{z4L~vsm<>_!N)6D^sBpk6l!|DG`7$F~nt_HURn0)`a04(S2P0Z5<)?88$BV2E zJ=y@YM+e1$C=l)Q(TO88JwAcDaC9KLT6ykgB10;1zS?PeL#ZFk64 zb;N$(2<)l@#zoW;k!~O=S8yEr9}7+rf9%B|8<6WbdU;f)(JqQZC}EnZ;(A28SyCh( z37bx_i0BeFClZf^#jUJ)oXUMXEVV*(DLX0>PlSch2t<1n_*7VYw+o0a`(7J!pAL&J zbKrcPaWpLSRv?;Cg`W?L-^77vZ;C6v#EW5JksV`{0Fk(-RX6)o?Y3TM1jcUh!Sk)l*zT=9cv*Wki?I)CH=RICGuDH3 zSOhcHIDoMaYrl~lDp;SW2gW|)(^#^uVr~G=Ten4lv5)$>S6V3z82gy)fk+(GI(|hy z|1X)XYfDAqu-48+!q~^Piv$PtI?dWh{GagQ8#L?nW?<~Inv$OzHS0$vF!nhgzDcwG z#GZX#c10v^)htfSu{(VDHqELhzAyOYZ`Z60x%egRxs#+2Y1Sxv`xWhXO9($4vIaxI z*jKfCP9XeP$U4j({gd`*S^n{m^#H}}Yd-u$$odYuc$eSBr$W|s9EWfC@Y5mddX^91 zqaiCEuY#WsSr<@V?)J;S7_$C}^ak+DA*+r<6~M2Ctb@e&O|5r?^!+ttozM2}@$26z ztR3A|@P~x;0^1AnZxhyMDBA)231MBq{BQa7Zx>dKLlnTD7uH5fYyjUWtYze10Do0j z^X&gWYYaVz#9hLAwgniw*T;9SuwJN!?-$lH)$jwt`h7M0uxKw+9=@yHF6H47VQs0w zBK(-Jwy}JW|G2QWR>Mz-_G#9?Py33j|CF!}vVQ^mw6OLxRKZ7ub#^uUys)ObfU*0v z1v%a?3hPg-|2^$(9qixB!g@FRAHc5)>otx~0RL5Vyqoa%7e)M_ZqZ>K`+*idnJ5nH z*4HW04{G%izD~E+bOK}lqS56QiKleyN{YloT9?fKwQhZeBK<>&QY2p1t&dm3uj|}GWB;b@knsKW)>@ML@7gX2KTvPI#Q?^h(7q;{d$``3tA-z|w|>GI`AaP|z~-N* zx9;Q&d(xMP=j*Kl|5{Tv;njNUc;^2`J6E>< z*Lv&a2r%}HpMTJ_a-1>GYDt-Y*t9fi(dRTqkVN7-(>g>6{jCq*U|QTr7<*nz%MrcN zw34L%_lt0W67k|9d@(i7AN}@jG_7T1_n)+^Z0{!1dV$0Hs+idS|K0iKTT9326*R|_q{_Q3VohCf=YepQS7nzoiUP7q8&{{ zsu?<4jr>OhXkPOnE=pvCIPwf{xbL-q&c`j_>{

OI@#XTd3=13(&e=>jFYu6I@+) zy~Tp9Qr1 zRzdAzs10cOor1c>O(xLtyn=ef9P9m;f_lZ0W}xNwYC+m(7LsYSyr7`f;@_h{%ZsYy zfcS+DwERIq>%>K*lNgYvoL$@%rZ#Aldv7q37 z7Hpzkl#72Bhj;rci?$<%&ES8!*r4#68ES*=(dYfiF*$G%zX*c;SXE@n9=z|8j zztzbhVA+fPvWGP?gu^4t9`Qi~(Z{-gf8z;^j&F;5TY-*y1B(CF33PnNXOx(01UkN} zAVd7B3Fx>_L8f>s>)o%Qn7AYYbbL>-BQCxk13JF1^4i7i#Q6gSb&J<@pyL4r^@z7= zK*vLh&R(&s9_V;jjaPMbo{G=M#XFm(D8_BZ(Q8U7JjVqHi{dG^Ct@0 zBzX3u<52}|75_l!F$HZGx005hDrhI79o@hx0mM2T@sJ60PVo-4*ZU(t=d?fC;%hq4 zdAZ6n#JxPKGpiu#R5CYu0zQuLR0i!as!OPBBt`&b)Wgmf4>*BRaV65t69(3d`~Sx;HO(r2`F zOBC*ICT`+J+0th;OCozN$g~V%drljZWr>cM44qXXCv^cmEvGMGjC%=iY8?>oX^}>u zr#+BFnvb4NWj=HnG3e=HX)Ynjyq<2g5Hb;29tV2jx5x%QyaZ^xXGySlW~z=TWmqCu z6mMkHbi`j^&=pXy?;16aOXVbBpx9|!za17e8*@nj2-7*xwch>Jo%Lhi!RsN|WqmV|{826~qB zh(K>`^zFw1Z)yhg6{h%B2w2geBoS!){vx3ov8EAN6;*kLSY-jL8dYAM zTr#hUaUYNCc+c5~RdU-hl!zkw{iVRa*8;tLjk3AESaoxKP27+RB~){L&D@jYxU#vv z7L{kp=KA6)FD9GoYgNs~#i}--uT2@pHraTerJ#24av11qS5T)|5I~BG7E}hCv)KtN*Ib`r*IiS??rNK zlgf)Jj%`+XamBGyPmm?s700$HQo03w%~f0Jib}$s=mE|T0|Wi-Vx$S^@3`|gWFZpE};Lo&q%08JkSC3FMU}; zz2c#Epub0PtPjhGz;m)N`n$zdY<#cJ2XQ10^q;FBPHK`FWeW=-ps#<4R4M&as!gR* z`lnT!y7+Mo&_APQw4o?)6$PdYk@a7$5G!laKdYd)>1C7X&vf?6UYTwh{aKaOEpF3* z{#=jD>Jc9$lvjDZF!cImR5>Ij%69JW0Al@1#b;}Q{yEh$#EK};zh6PRcuOPDKd(?2 zqPqp?Kd@4wr)vKqQSDz2JgozpR!8N~tZwAo3WjENObte_6ui~GkJ~2)YIU>98<1kS zxE|w5#Q+XQ^{laqK3-#QVTqRBc(BG#<4dPQA`+n~7 zhY}&NI}G%%R*;V6FD(ZsTYCRcoM##O*RqT8$?_EX*D@eiP>;Bf61ho19P`(gGuGG(a0+hf zzd+5N3#pHSA?m+Kd3(KT_FSx>J~ewTQL|@2&7Mn@B(GDm$5xU&s$};vB{Sn{_9TNL zGV~88$R~=4p>qVHTrZ!%E!+O9#Dknq2Lts_2eJR^z-pXA9$e##BBF2W1(tCew*QDY znR0n8@6f6n`amxbS=kD#QBw4&UcgvmnVvh1HEo2X56`7D36tKxhRABxu-?PUzcPR| zhj_Lq+Oz^Fb^)<9?-Z{@fi>5EM z&CrP_dg%&at_z5-d9OnDz5to5{(eff@iUIwPD*#HnWS&5D^-3UC3&aLGRK?C?Dt4)vXvjU= zfo_%#CF-D`u(AqG2+?y_0!Qg(4lEUC7{EXe@5teDR#pv{K*nPL@t*sq|NIIjU1SgoMAXsQDS)+ngmw7n7r z2H0dM(JdYNfkD-!jNDHg7*bG9tZM`YhSiQ&LGEo2$TI{CtIB=K0eN6RByP~e8C}4@ z=+r8PEN;}q$+T(5&nYmZST%O70tIxIyL*Mt@(6Sm4>eSv8tz*UoUF18@z!=!pgQh$51gX1 zOz}w~^r5KSG9U2IK*Z#B`oLzN%W?yK;M9Q2+<_hltiIe39N4PzdgP}3z%~W-%I(2{ z(-hPvH{J(MSI~g?rzkM6T|w*Qrpdq#1&zv$y@4|nG|sK@fj23lH;VVMnh%{Uclic( zs)C!umRexoOa*P_*7(3#s@it(CC&>UIzzm#1sK>xPKOdZ#n&lOyH&N_ayMt-Yz6Iw zAq9H*S^dDDd1!4g(mEOk1`STq){AO@!CGlsB6qX_gLP|&^KJcrF*qu&iUNb{8Lj8k zZ0rUGN0eKoBlN?5AU1fCcr*qKj=$Y6lr?}s-`eQn1NFe*2`ow1@>P~Rk)#m%vH%8s zYhyxxqF)6?dU+rdp~lsKIk;7ku=yAgPW4Gp__p}?5V~|V5Fb2KRoQ+_l^sEqizC3` z8A?GJ;yeo&eA6-+pfJUD_HU`&c*Bo)qEsNx-KE=74vJ@HC?l8Ru5^1*T{jlK3WS53Ka;;JAU#zY5Q~*jiHNaYbe_j`61Xx?oU@})Rh5hLYWD2fogS?p7XaQ@@epxau zj>Z?Eb}>(enp^OF99wWk|FMU|lp==X1@wu2HRdb#xvY z1Rh{OdN|zmZ9byYwWWih+6~-w?n*Q>RGVO^1IvCo2yj!cdpK(5iWm+K6i6Nn#}p5A z8Aupz3YHxV9Sk=sMw()8D=^%mpg5xD5Kw9X`i3o01~A+naQwepfZ-12OJ=!g3`rp-dr*SH2t6%(4sqsfCp;6{oUdihD#5KP&(9I3N@U1RjWSuXIVqX{-84l!#5;wA55gkMH-XUP19#}VWyje&U zFfztF($#om2-q{^TT%_N!aCqe6X?}O&XJ-za$c|$exV*1IbSJth-YenkqZ>0i&Whr zWQebmG(XQ2o0@@UJEdi zQ_wo`A1s+yt&fTmm z7>(RO2GF2A76L{-b#NUWmPa&ka~m-7X`&H4SkSH)oRZ*;A#pdU{i>>aQ%G>vY~&j}(k>FWhQvP_z{uSy_d_8OA^1%N z^57o{N(AFo2SewK(QjS{P;R6@1jN|%CoSLsG14vmoelivgTr#f#bX?er#~x+dA>%R zV*(@3kOd;~p@{ecbDveD-WCypa`d0~%EF(Bh=-!Uh}`TDiQ6M0MB3zThe&)rB5o!q zw>w1Q&WIQ$_`JgW)rj~Vd;MRECwE1}42SdgBwi%$iHOb`VB`f(PLa4j0z>ww>6@)m zb6heEL=O)GAMFHUBR>{TvPVDR9Vx>%4Fml74!R+QT4xt2-STb-`rX5T8z-Wgu<8kq*28?zps9pT90~qy> zigkV1s~t2RD&tk8pp>}o+&PC0FH0| z=m<;3#AU?d$4%nm1I@tkt)G*5?WkoTO+NDiLucDfkMl`lL;vIm5Pf(ANX7wu>>4q{ zqXvi6{0Aam=Fa~6xfdE89|cyDg&U%R+shmLL*0-a*bq}4Q1R#uP3i!aDaqQ35@r!cm0=-0{Zy!u5~;t*7cWqnmL!p2)i!fhI-cz zbUx1<{o*Kaw*~Z!uaymruj9dcMsD+ymt%ZbL55j$JdCgBNqlnZ!%e{Wh=SS?ee?0a zYi+=$@vY)2l5m=jM0~ad7(d-V^DZ)Ni;yY4TMvxCi4Ou0<&Rl%Cm$gQC7Q*hQDFQ` zRWB}T1u(u#;cOQtb_3(P71SgC%=XS!r1Xg^*uq|ww@zF~=v)Pjqpf^AA2iwA%7Y9O z@-SSyI+JM_8)`^!|fr;Tjpqe7UgnuyH5dW(QnD9^Onc`&)m>5<5 zMoiov2PVc&9%C=!;;R5AHiS12Y8O8@fQj(|33ZFxYk-M~Qzg_R+Csp@iGhupW9ysA z)lgzUw6fl*L7(bOU}DQ}<)UO(i0Vy`;K{OyYZcTd>X~<);@3Ly=@>Ba&KD*9qvrm7n7HBRQkXL^^sQq+W6y?Q zw}q*?l^djQ&Y?7P%DAD|ZvYy}NcE@#i^0&>Zvgc3H&pFM+lc;Z1Mnek?@XM}Q18SA zzMn`BZ{k7)=;E)nz{EugGQ{6F_!lck>5qv^6%-SXv1{_t8V>hQ2wkS~+Qrj^k_ze; z7qtNsX|^vPs$h>vy;>7SGvmO=W5D>t<>E~iFfpq%6vS?hTt-2<;4|tIIhLko1M~8M zX6x((CJIW&nzBR1fK1s1ABu}{A}JLml6Dw6?XrH&IKYG-`-o8>e%}B*rbU~?n|Rpj zzT4!%ItZ5sYwlkvH}-V#QL4A^sXRmMmdA#C^(r6l82iDeWWAWk@j%N1N(kfftn1i= z3ThX>U<>~e$j?rmBzovia;K_Ce1rP*Vb#q6@ddX2BPC4(@{s%3qpH18afC-ud}v(k z;MqYR+9)pO@f9E1BoK-=X>`52(J5A78LJY?lV z=L@SU?_#s)c^Lbds&|?AJ^SZFY0*iXJ~Sf^G4JQ9WJd5AZG6d8V?Btj#o(BLe$TyLx8u1L=#VBJ-b$(@cBeY zJjoMD&vE@G5_g8gcDD6f&UOjz;CZCyIR{1Jt05RtM4Da{(&&Bg1kN$}7!R?Ncquun zVCcjjd+h{ZkHKd?$NnG}9Ahs9>gNEt_D2?=$8&(ks$S;kip1eM==CQ8jPgo!ry%;= ziNIT%f$d|jD?AH69&0U+2!=Ek2qkpuA)eO?%kx@17V;$?a5MZPobA>}*qd4(?6xlE zVX#JdoHCT?v69S<`MJH;2xYp(2m7ooMAhaWGaazDl8YVkXeUp*ZY0?0gX7kJljU7L zxY24N<|RI=P3@)90gNrxUXe6!g`v}NXu4K;q%WKZbno2=9N{^Su?fxkB?&n}3wnG9 zsXoyMb#(%6qYoO^>nwbVPo}9(Aa0T;5P1N-uYsezUAt@}(DWi#z1^ps1l*+oyT{Jd zEH3WG&hl%+`cHCmmpna2ZY~hqt-Vi0ysaqDj-9Qk(3ok>@+jCIzmqX*2RU+%55}#l zDOY=au-$r+968qqyR9&p`(_{Pv2LW2I8Rf7l3wd0?D6^XkXb0vXYD3)FYt$L!1`C# zy-*$^ z-Ah58k%i@%-W7yh?d$SP#sTul7Cld#!lc9fCPFIF>nt04mGxUpCl1BhFL!V}v$Ne-z_n78UKh4muk{B2_ zT65RQz>|Dhebd-op;huw`Zq%C)2hcg*ms9m06L#yr!E*^sB@tagW4&8b~3$^I>|>q z#m=YMwR49~W+U>M^U!;Zp+9roRLkO__cG|gm8;%-iV6#^#kxW#(6TB*bXwE(U4SLmbKhJ?S+We*PmR^138tlfYFHhdX}th)wGsXCQug!;rd<(Lsc*vAdE%i4Q&+= zh>{9oR&+EfEJP6VbHXa8sRbgs(FB^=BG6)~ihg!{u??Vgu{S^)3kLWtKPRmE+3wf! z!H&h;`uc`?v!1-@)Q_RCs}CaDT@Q3uQ62`Cv><5YxRsIvOOKa-dPsPXwMs7+AGPCr@>#;Rv%%KZy;{tzKlqVt3Y5@r`>+a)24T z9n!RxQ%ofVtIQoOOOJ&MwXRUl|R0T?U*|jWIeFH(gpo(|EOFM-(-p1Op zSCsFfnogw#`A#Z#WPEI}=%iNw1Oz|Aj9$Glx=qrBfmAx$}1EK*8~bX83B1V{Zr`$g4_*fK;-OoOCm8>42(M znkhQTwC(1m@;;toKIM2`zBn|)cX1Sw)6UQgk(51$_XJQV6_Y964OgWFK$`7JW(`om z74+Z7DeE75FI6rUom@#3IF=|K@Ji`qae8QmFZEIkQp9=(ypoeuRCxtAm&vDQ{eHPw zXWqx=W=U*_Cu#QmeTfiIoGcX`zYNErkSr#NRR>aqayID^sZjy!R6ggG?73v7%t{83 zEakHm1N5GgUA*Z|=aS?U$Htvb<#Q!xo)jtAA9F)ys)8Y4%Co&vv7GWL zPvx_Pj8k&+In`*vP5ZpeBqvoWo4q2R^SM`aynMMRg(C!z^OPB~J~iX`HTUO>v-`@9 z8fH$*B3(|9HRXbwc5MiZ0s=7=Y@30H{SxK1Rc}Trs zPrA9OY>7;@bLA|`Ir*wK&)JvCmvbe%1TQ(~r0ryReh!zrrIJ%z@46esPBGgU&d zY`eKhNwhD^q(QzHE|pI^fwc1GOuks^Dd*U#lkVXgYHX=KoQ&-jQZ|VzNeQ(}$;pgk zPr4-!mGZKml?wSx+MbpbDz~QH^vHU)djtws*Tl3gge`698~_LMgxS&`42B2}7Kg5VbNQ&X6m$`@zt zbUsHFUUG`LWX7f%^vGU&E^AXEV5;ai#bg#<#&OtpBr~OAKCgr&CA-Wb-u`3(MW>KS zrkt#kE1~G-rhRmtJMGLlN{*bYBuqBVvtT7BTj2ZsQnScrxvgBy`Qfo+#xBs8@(o{$ z1yht0Ick(&IpgGHBYMdd*~L^cHRIS6L62gVqZp;L$+1y8nNAn&5=x~5QUD6m#R5k* zKj##S<(!>KdL_a|Cz-L`T&jpzi5vRF(WfbAXdIOKE5l0&1#Hz5|~C_5xd83CU`xuUHw!k*7&i_SFW zh{j0|rDN5(`RI;3@nn!9%IJvZ) zPL`7J_PeFjj9qXGj*<#HIX8{TWNMZ}o_6M>l?C7277*r@COHpm+Z(4svdg)mlgcFB zET<%gqv%f0l;C=@``EE-%c=I3Gq>7j?b@{)Wjk41CxvXrot`1@-1OL}o%W>NwbM?n zR6KxEKI0UVxs=1Pk`gC1QJ%u=+0=sjoitK8cP{1a$5h%=nm{QPC4l5=;9pRqdvctn zGSjL}*`hS*b|x*=o`=a~DK*1>d6=?Gu9PY->8lD3r<((qL@JrduqTobsRZV!1X87p(qk3n4BIXw)9Y!GoZ_65 zMlN5Xevxn5%quCQAaR1GsTk&{JAy&v90@9@`R?#=w$iFprc}hGaf+-vNBvMLLOKfY zX51+@ULIeZiJAP=lwCq5pPTNH{}F@ITZ%L8w3?o?u3dCEYHMfmS!eC#X?NClVb`u7 z8e3b=W(L;}jSY3NzJRmCxjhQYBF-!nV`i49WptooqJeF7i;Za5n2?IXH5> zDSp|Uq$5{O*>g!nzPww8=cLNgUX+q<24sR=pfKc0zWSYYl~R$6$!4jk@bEsVwB@8hu-l}y28%7xpK+z z_B&pQDxKPz0~6?J_X?u-oKz`K^<7a#(uanfER~AxWVz%hM&_t>0~gsYVV*`p_LwGB zs&-##&>J2brKDx$QpF=7u%*FQCQo?=?#TECl9~2O(lm3-Jtu?0v>blBN~@=RyGNhF zrWYc$P&LZH-YP#Cw$v8VL$G;1Bv-Tp6Gt(TGqS>oob>RhHk8L!$v9^GyrgG)p6wMt z*ILa%yD*hg?9l#596ry=^o)y2M^`C(#S^uLp^y}c`6=JGaPxM?&D;5cbabVARPhKa;-tt> zQleb8ipku4-ku$Mw{g-J^3q#f>}17lR_i+HW@o)=FIVMt%{sIei;9+`%(3xsdA=-N zL^)q7&7F461!rxMQ=O$qVUc_&I~6xbI{eZ-R4nCsU5ziDT;J5$$>KCst6feznIuUl zPTFcR`(rXPzM;ZYYGS0z*@AQ*C{l%dCWF+0RL1cr0aFF(9Zh*zxx`2n%B0%n(#9^y zWr%I_ZG@F>?B26&^H$l7;-Yf(GMMrx6`V!%m~EOGt`U(+X6VrPu82`^Zv@J~#@|m>swToqrM=>==u6wCuF7Ws0gR+~F_nvDz#bPcm zT}rux4b=CZZRhOWyk~ERhHF(ICx2$CMv^4sXfJY zr!Q$KI%iIiZsq=>TXI;k=y;`k(J3NDKgXV$_lmZYo8w%x%egcspI4ZIwcAPbPC|6bjx{(b-1?vb%vL@J)eK1}`ri4JzY>`xFKB%7Ii#J* z@-!`{%_rNunPidX(qBwScBqLc6}A*P-(E^?7o8kupEQIh6pGFqK{u^j3AOqu!FRir zK~uvb=c8@&HJn~j`G8UxO9iX^Ag=8yO1A8;h@JUDKF5_Fr=L%y^aPa48fZ0T`N-b9 zv`7>pxi*xtOwV}+QXeIACHl~07*ZK0S*((x(wvjBOZH^ab8NY~oud!S>5{yBic1yG zo#p~tB9^RClEXb*I$C3+G)1Lk$xTUZy2xuunUwTYPOc1Wdp;Yu6$$}gy z-|TTt=IG=w%Co4KOLKGtQp80nN^^FOVA{zzbScUO+4G{~tq+_rPfkiX+Hy`(iTeQx zNq*6pqRx<}M9Qc?(X+{Ur&!DvX$yTrDA(@(N;6e-(k@4NpBJc($pa-v*)IBSi*x;) z9a{tE4#oX!lzve$UviW*Ry&Fecvf34`BH94n-W*0gw(7KDCOX|ISvlTo+Bsak?UtE z3=qvMltCT+9Xvj&T3=u;n z(kaU()0Z#G@suZH2Fg$HO8J71a>}D-kQU!}lPlV|;_j%`l=Qx2_>0cYBIZJ=n0Dn# zhzgQo=SV%M#7Xu`P6LK*3EK zz@$}GNU)M+;@ z7k8X664#tl^k_Wks83Qk%q^}GTSmwelMKPVs=S$Qaa*+L?CWVU1 z$mQZLSx{=LShkg%**0HWT``0dBSyPQv|lAxMLQL`AY7=7pFb{p&OPfaj!R*hqvf|N zH&mp@t=Q|^eK{xvDk8a$;sgq`$kX9`16iPD{ zttSnRlbf@hTw2C?_&4QoO|1MVP8?b;cgjuCp9^T%OJBIM4EMEm5dLE&=_CcFCs@dG zhl37>+%uSUoC3M6HX>bysu-{exL!3Ja@qwd!}mlfcdo~vpKbHS=#rfhFInf6l3AM2 zAjBu9F#Qmp zk86ZXNd>9tV`k^&^zglIwL{@6neCf*?v<)uMjB--*U6T>D`Yg1QJX-%&)j^z+>EwM z$>Ox5IGCfeNYi7h=_$h|sd5n*@(yH6$w>@(rK0*f6Qtyb6fl&_mz<%^r|uZ!v%y2O znixtS$ax2{>Tjutq3K+CDA3r|fL+8;(aEs9N)$3B47pS{Lvm|kXv)pG7%Jwacnvu- z_EeDzy&(ms59E?rH-#a_-8>AX)t}2#Y%^cN(4^;Kh&wb~unb9G%T5-PIqr7(V4*xI z_bdZg4sTF&a*_)wAIi8nw#?;Pg$xO>kN?+K7tbT{aRj~=f!{3C)OTQlOMc`7K!UGT z@yp3?i)recGQp)jBmxX`FWE20Z`Ewq0DdSd$md`FcoLd#S75vRh7DhG$Y=6dmu<2P z_n!H#2Y$zf<@n8;qni5GO>hPD@tXnsUJc9fTQ#Th{R36)@;-li{qNf_&2QZ74yk@r zx63QVua5Y2_>G)GNPQcpx*vQ?4L=bVl;byb=6!rD$Nupz%kil?(#n_J^P4`mhSYa> zf-AsF9t7p?yhb2#{6r`vZPCj-C9LT@B>-jDjnu!w({m zCoCtwX;-7ZXUFm2#d74|?N#{r#i`38>V9wq_`XW{80Gz)IIG^7s@-O1!j~_CqY6f@C0&9ZCs%Yn}DzY(9 z1lENCo_ZVc$Bg=gbGI7b#rpw1)*M__sQ>PwzT{2+tC3PKDp(<){!_4OfqG8Cs@K$K z3Rcn{d}B+<8KLx(%BSX4-|AAZ{3G>^g4qkDR}`#1gGT+KU}_>LJt67km&MdK#T3j3 zvDCa*Ff}LS{8n(IggK8DOwKKS%S%4(0myl&^7%zE$oZ#Wz7Q00o++4LB2(WcQ?UAC znf!jBq>m3hEnf1m)v8N?&qps_0(_8e@e=Y2l8cu(M&oh3ssu~IOBM@Dm@g4qyoCJ1 zG;C}TcqCNJq^v^y9KD96x;&71DbkB!EtP(9`X6D zA%-2kUZFf{Xam2(xxK1AJ_did3g$csF1EiPK)Qmz8HZ`RDsT(lTQ%Mq8l_&Yrss12 z#*Zxa?>hlnBCjsm{tQ&lhV>3+r7Qmf{bc38G{9={-G@(iA9oikv41U9t z3=QNh7W%_NlLp-A^P?#g!*LqGZ&vaHdc-%R9s3Q@&Dycw5ItL4oFC1o!+B(+&o9=$ zR0Cc-q#Jz3g|PdE_OH|cejanXpMQg<^gO#A!yguI)fUggu|D7BR{hP=yWfECA<|dH zkDpfM_aKI+NKe%_PG8a%=W8<>@M=|g^2ZDvOHT_A1p4FKgjV#2fLnc-^zu_#)%@KY zI`$i?^ouI_h~bhdok%;huR1^UhlRJlA)k9wnX2!mB2_GTrSjC&PztuaW$&&%_RbyW z?6qy!_SR#dGqzN+3PbCH^`4U+;(4NU-k#3nCzBalI@df42iMfwzIY_Vh(`Q z_G~_LTLoe>LCzvp>A6d+LFADE=Al#trOSC#BZZf`RVsy^iRcb~XDrXg9WigMK$L%V)}fOzE1sblKN#gWZpA8fS>f$gZ} z_~|=$ow|7^4|bfhZLhs|^Qk+x+3=ZDRmJbb8^6k1)wq4eoV~}Hb{Xp2l1a+w{Tmw& KSaHm!d;d3D4q=P{ literal 0 HcmV?d00001 diff --git a/arch/x86_64/kernel/domain.c b/arch/x86_64/kernel/domain.c new file mode 100644 index 000000000..0694958c7 --- /dev/null +++ b/arch/x86_64/kernel/domain.c @@ -0,0 +1,93 @@ +#include +#include + +/* Don't do any NUMA setup on Opteron right now. They seem to be + better off with flat scheduling. This is just for SMT. */ + +#ifdef CONFIG_SCHED_SMT + +static struct sched_group sched_group_cpus[NR_CPUS]; +static struct sched_group sched_group_phys[NR_CPUS]; +static DEFINE_PER_CPU(struct sched_domain, cpu_domains); +static DEFINE_PER_CPU(struct sched_domain, phys_domains); +__init void arch_init_sched_domains(void) +{ + int i; + struct sched_group *first = NULL, *last = NULL; + + /* Set up domains */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + struct sched_domain *phys_domain = &per_cpu(phys_domains, i); + + *cpu_domain = SD_SIBLING_INIT; + /* Disable SMT NICE for CMP */ + /* RED-PEN use a generic flag */ + if (cpu_data[i].x86_vendor == X86_VENDOR_AMD) + cpu_domain->flags &= ~SD_SHARE_CPUPOWER; + cpu_domain->span = cpu_sibling_map[i]; + cpu_domain->parent = phys_domain; + cpu_domain->groups = &sched_group_cpus[i]; + + *phys_domain = SD_CPU_INIT; + phys_domain->span = cpu_possible_map; + phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)]; + } + + /* Set up CPU (sibling) groups */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + int j; + first = last = NULL; + + if (i != first_cpu(cpu_domain->span)) + continue; + + for_each_cpu_mask(j, cpu_domain->span) { + struct sched_group *cpu = &sched_group_cpus[j]; + + cpus_clear(cpu->cpumask); + cpu_set(j, cpu->cpumask); + cpu->cpu_power = SCHED_LOAD_SCALE; + + if (!first) + first = cpu; + if (last) + last->next = cpu; + last = cpu; + } + last->next = first; + } + + first = last = NULL; + /* Set up physical groups */ + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + struct sched_group *cpu = &sched_group_phys[i]; + + if (i != first_cpu(cpu_domain->span)) + continue; + + cpu->cpumask = cpu_domain->span; + /* + * Make each extra sibling increase power by 10% of + * the basic CPU. This is very arbitrary. + */ + cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10; + + if (!first) + first = cpu; + if (last) + last->next = cpu; + last = cpu; + } + last->next = first; + + mb(); + for_each_cpu(i) { + struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i); + cpu_attach_domain(cpu_domain, i); + } +} + +#endif diff --git a/crypto/khazad.c b/crypto/khazad.c new file mode 100644 index 000000000..738cb0dd1 --- /dev/null +++ b/crypto/khazad.c @@ -0,0 +1,915 @@ +/* + * Cryptographic API. + * + * Khazad Algorithm + * + * The Khazad algorithm was developed by Paulo S. L. M. Barreto and + * Vincent Rijmen. It was a finalist in the NESSIE encryption contest. + * + * The original authors have disclaimed all copyright interest in this + * code and thus put it in the public domain. The subsequent authors + * have put this under the GNU General Public License. + * + * By Aaron Grothe ajgrothe@yahoo.com, August 1, 2004 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include + +#define KHAZAD_KEY_SIZE 16 +#define KHAZAD_BLOCK_SIZE 8 +#define KHAZAD_ROUNDS 8 + +struct khazad_ctx { + u64 E[KHAZAD_ROUNDS + 1]; + u64 D[KHAZAD_ROUNDS + 1]; +}; + +static const u64 T0[256] = { + 0xbad3d268bbb96a01ULL, 0x54fc4d19e59a66b1ULL, 0x2f71bc93e26514cdULL, + 0x749ccdb925871b51ULL, 0x53f55102f7a257a4ULL, 0xd3686bb8d0d6be03ULL, + 0xd26b6fbdd6deb504ULL, 0x4dd72964b35285feULL, 0x50f05d0dfdba4aadULL, + 0xace98a26cf09e063ULL, 0x8d8a0e83091c9684ULL, 0xbfdcc679a5914d1aULL, + 0x7090ddad3da7374dULL, 0x52f65507f1aa5ca3ULL, 0x9ab352c87ba417e1ULL, + 0x4cd42d61b55a8ef9ULL, 0xea238f65460320acULL, 0xd56273a6c4e68411ULL, + 0x97a466f155cc68c2ULL, 0xd16e63b2dcc6a80dULL, 0x3355ccffaa85d099ULL, + 0x51f35908fbb241aaULL, 0x5bed712ac7e20f9cULL, 0xa6f7a204f359ae55ULL, + 0xde7f5f81febec120ULL, 0x48d83d75ad7aa2e5ULL, 0xa8e59a32d729cc7fULL, + 0x99b65ec771bc0ae8ULL, 0xdb704b90e096e63bULL, 0x3256c8faac8ddb9eULL, + 0xb7c4e65195d11522ULL, 0xfc19d72b32b3aaceULL, 0xe338ab48704b7393ULL, + 0x9ebf42dc63843bfdULL, 0x91ae7eef41fc52d0ULL, 0x9bb056cd7dac1ce6ULL, + 0xe23baf4d76437894ULL, 0xbbd0d66dbdb16106ULL, 0x41c319589b32f1daULL, + 0x6eb2a5cb7957e517ULL, 0xa5f2ae0bf941b35cULL, 0xcb400bc08016564bULL, + 0x6bbdb1da677fc20cULL, 0x95a26efb59dc7eccULL, 0xa1febe1fe1619f40ULL, + 0xf308eb1810cbc3e3ULL, 0xb1cefe4f81e12f30ULL, 0x0206080a0c10160eULL, + 0xcc4917db922e675eULL, 0xc45137f3a26e3f66ULL, 0x1d2774694ee8cf53ULL, + 0x143c504478a09c6cULL, 0xc3582be8b0560e73ULL, 0x63a591f2573f9a34ULL, + 0xda734f95e69eed3cULL, 0x5de76934d3d2358eULL, 0x5fe1613edfc22380ULL, + 0xdc79578bf2aed72eULL, 0x7d87e99413cf486eULL, 0xcd4a13de94266c59ULL, + 0x7f81e19e1fdf5e60ULL, 0x5aee752fc1ea049bULL, 0x6cb4adc17547f319ULL, + 0x5ce46d31d5da3e89ULL, 0xf704fb0c08ebefffULL, 0x266a98bed42d47f2ULL, + 0xff1cdb2438abb7c7ULL, 0xed2a937e543b11b9ULL, 0xe825876f4a1336a2ULL, + 0x9dba4ed3699c26f4ULL, 0x6fb1a1ce7f5fee10ULL, 0x8e8f028c03048b8dULL, + 0x192b647d56c8e34fULL, 0xa0fdba1ae7699447ULL, 0xf00de7171ad3deeaULL, + 0x89861e97113cba98ULL, 0x0f113c332278692dULL, 0x07091c1b12383115ULL, + 0xafec8629c511fd6aULL, 0xfb10cb30208b9bdbULL, 0x0818202830405838ULL, + 0x153f54417ea8976bULL, 0x0d1734392e687f23ULL, 0x040c101418202c1cULL, + 0x0103040506080b07ULL, 0x64ac8de94507ab21ULL, 0xdf7c5b84f8b6ca27ULL, + 0x769ac5b329970d5fULL, 0x798bf9800bef6472ULL, 0xdd7a538ef4a6dc29ULL, + 0x3d47f4c98ef5b2b3ULL, 0x163a584e74b08a62ULL, 0x3f41fcc382e5a4bdULL, + 0x3759dcebb2a5fc85ULL, 0x6db7a9c4734ff81eULL, 0x3848e0d890dd95a8ULL, + 0xb9d6de67b1a17708ULL, 0x7395d1a237bf2a44ULL, 0xe926836a4c1b3da5ULL, + 0x355fd4e1beb5ea8bULL, 0x55ff491ce3926db6ULL, 0x7193d9a83baf3c4aULL, + 0x7b8df18a07ff727cULL, 0x8c890a860f149d83ULL, 0x7296d5a731b72143ULL, + 0x88851a921734b19fULL, 0xf607ff090ee3e4f8ULL, 0x2a7ea882fc4d33d6ULL, + 0x3e42f8c684edafbaULL, 0x5ee2653bd9ca2887ULL, 0x27699cbbd2254cf5ULL, + 0x46ca0543890ac0cfULL, 0x0c14303c28607424ULL, 0x65af89ec430fa026ULL, + 0x68b8bdd56d67df05ULL, 0x61a399f85b2f8c3aULL, 0x03050c0f0a181d09ULL, + 0xc15e23e2bc46187dULL, 0x57f94116ef827bb8ULL, 0xd6677fa9cefe9918ULL, + 0xd976439aec86f035ULL, 0x58e87d25cdfa1295ULL, 0xd875479fea8efb32ULL, + 0x66aa85e34917bd2fULL, 0xd7647bacc8f6921fULL, 0x3a4ee8d29ccd83a6ULL, + 0xc84507cf8a0e4b42ULL, 0x3c44f0cc88fdb9b4ULL, 0xfa13cf35268390dcULL, + 0x96a762f453c463c5ULL, 0xa7f4a601f551a552ULL, 0x98b55ac277b401efULL, + 0xec29977b52331abeULL, 0xb8d5da62b7a97c0fULL, 0xc7543bfca876226fULL, + 0xaeef822cc319f66dULL, 0x69bbb9d06b6fd402ULL, 0x4bdd317aa762bfecULL, + 0xabe0963ddd31d176ULL, 0xa9e69e37d121c778ULL, 0x67a981e64f1fb628ULL, + 0x0a1e28223c504e36ULL, 0x47c901468f02cbc8ULL, 0xf20bef1d16c3c8e4ULL, + 0xb5c2ee5b99c1032cULL, 0x226688aacc0d6beeULL, 0xe532b356647b4981ULL, + 0xee2f9f715e230cb0ULL, 0xbedfc27ca399461dULL, 0x2b7dac87fa4538d1ULL, + 0x819e3ebf217ce2a0ULL, 0x1236485a6c90a67eULL, 0x839836b52d6cf4aeULL, + 0x1b2d6c775ad8f541ULL, 0x0e1238362470622aULL, 0x23658cafca0560e9ULL, + 0xf502f30604fbf9f1ULL, 0x45cf094c8312ddc6ULL, 0x216384a5c61576e7ULL, + 0xce4f1fd19e3e7150ULL, 0x49db3970ab72a9e2ULL, 0x2c74b09ce87d09c4ULL, + 0xf916c33a2c9b8dd5ULL, 0xe637bf596e635488ULL, 0xb6c7e25493d91e25ULL, + 0x2878a088f05d25d8ULL, 0x17395c4b72b88165ULL, 0x829b32b02b64ffa9ULL, + 0x1a2e68725cd0fe46ULL, 0x8b80169d1d2cac96ULL, 0xfe1fdf213ea3bcc0ULL, + 0x8a8312981b24a791ULL, 0x091b242d3648533fULL, 0xc94603ca8c064045ULL, + 0x879426a1354cd8b2ULL, 0x4ed2256bb94a98f7ULL, 0xe13ea3427c5b659dULL, + 0x2e72b896e46d1fcaULL, 0xe431b75362734286ULL, 0xe03da7477a536e9aULL, + 0xeb208b60400b2babULL, 0x90ad7aea47f459d7ULL, 0xa4f1aa0eff49b85bULL, + 0x1e22786644f0d25aULL, 0x85922eab395ccebcULL, 0x60a09dfd5d27873dULL, + 0x0000000000000000ULL, 0x256f94b1de355afbULL, 0xf401f70302f3f2f6ULL, + 0xf10ee3121cdbd5edULL, 0x94a16afe5fd475cbULL, 0x0b1d2c273a584531ULL, + 0xe734bb5c686b5f8fULL, 0x759fc9bc238f1056ULL, 0xef2c9b74582b07b7ULL, + 0x345cd0e4b8bde18cULL, 0x3153c4f5a695c697ULL, 0xd46177a3c2ee8f16ULL, + 0xd06d67b7dacea30aULL, 0x869722a43344d3b5ULL, 0x7e82e59b19d75567ULL, + 0xadea8e23c901eb64ULL, 0xfd1ad32e34bba1c9ULL, 0x297ba48df6552edfULL, + 0x3050c0f0a09dcd90ULL, 0x3b4decd79ac588a1ULL, 0x9fbc46d9658c30faULL, + 0xf815c73f2a9386d2ULL, 0xc6573ff9ae7e2968ULL, 0x13354c5f6a98ad79ULL, + 0x060a181e14303a12ULL, 0x050f14111e28271bULL, 0xc55233f6a4663461ULL, + 0x113344556688bb77ULL, 0x7799c1b62f9f0658ULL, 0x7c84ed9115c74369ULL, + 0x7a8ef58f01f7797bULL, 0x7888fd850de76f75ULL, 0x365ad8eeb4adf782ULL, + 0x1c24706c48e0c454ULL, 0x394be4dd96d59eafULL, 0x59eb7920cbf21992ULL, + 0x1828607850c0e848ULL, 0x56fa4513e98a70bfULL, 0xb3c8f6458df1393eULL, + 0xb0cdfa4a87e92437ULL, 0x246c90b4d83d51fcULL, 0x206080a0c01d7de0ULL, + 0xb2cbf2408bf93239ULL, 0x92ab72e04be44fd9ULL, 0xa3f8b615ed71894eULL, + 0xc05d27e7ba4e137aULL, 0x44cc0d49851ad6c1ULL, 0x62a695f751379133ULL, + 0x103040506080b070ULL, 0xb4c1ea5e9fc9082bULL, 0x84912aae3f54c5bbULL, + 0x43c511529722e7d4ULL, 0x93a876e54dec44deULL, 0xc25b2fedb65e0574ULL, + 0x4ade357fa16ab4ebULL, 0xbddace73a9815b14ULL, 0x8f8c0689050c808aULL, + 0x2d77b499ee7502c3ULL, 0xbcd9ca76af895013ULL, 0x9cb94ad66f942df3ULL, + 0x6abeb5df6177c90bULL, 0x40c01d5d9d3afaddULL, 0xcf4c1bd498367a57ULL, + 0xa2fbb210eb798249ULL, 0x809d3aba2774e9a7ULL, 0x4fd1216ebf4293f0ULL, + 0x1f217c6342f8d95dULL, 0xca430fc5861e5d4cULL, 0xaae39238db39da71ULL, + 0x42c61557912aecd3ULL +}; + +static const u64 T1[256] = { + 0xd3ba68d2b9bb016aULL, 0xfc54194d9ae5b166ULL, 0x712f93bc65e2cd14ULL, + 0x9c74b9cd8725511bULL, 0xf5530251a2f7a457ULL, 0x68d3b86bd6d003beULL, + 0x6bd2bd6fded604b5ULL, 0xd74d642952b3fe85ULL, 0xf0500d5dbafdad4aULL, + 0xe9ac268a09cf63e0ULL, 0x8a8d830e1c098496ULL, 0xdcbf79c691a51a4dULL, + 0x9070addda73d4d37ULL, 0xf6520755aaf1a35cULL, 0xb39ac852a47be117ULL, + 0xd44c612d5ab5f98eULL, 0x23ea658f0346ac20ULL, 0x62d5a673e6c41184ULL, + 0xa497f166cc55c268ULL, 0x6ed1b263c6dc0da8ULL, 0x5533ffcc85aa99d0ULL, + 0xf3510859b2fbaa41ULL, 0xed5b2a71e2c79c0fULL, 0xf7a604a259f355aeULL, + 0x7fde815fbefe20c1ULL, 0xd848753d7aade5a2ULL, 0xe5a8329a29d77fccULL, + 0xb699c75ebc71e80aULL, 0x70db904b96e03be6ULL, 0x5632fac88dac9edbULL, + 0xc4b751e6d1952215ULL, 0x19fc2bd7b332ceaaULL, 0x38e348ab4b709373ULL, + 0xbf9edc428463fd3bULL, 0xae91ef7efc41d052ULL, 0xb09bcd56ac7de61cULL, + 0x3be24daf43769478ULL, 0xd0bb6dd6b1bd0661ULL, 0xc3415819329bdaf1ULL, + 0xb26ecba5577917e5ULL, 0xf2a50bae41f95cb3ULL, 0x40cbc00b16804b56ULL, + 0xbd6bdab17f670cc2ULL, 0xa295fb6edc59cc7eULL, 0xfea11fbe61e1409fULL, + 0x08f318ebcb10e3c3ULL, 0xceb14ffee181302fULL, 0x06020a08100c0e16ULL, + 0x49ccdb172e925e67ULL, 0x51c4f3376ea2663fULL, 0x271d6974e84e53cfULL, + 0x3c144450a0786c9cULL, 0x58c3e82b56b0730eULL, 0xa563f2913f57349aULL, + 0x73da954f9ee63cedULL, 0xe75d3469d2d38e35ULL, 0xe15f3e61c2df8023ULL, + 0x79dc8b57aef22ed7ULL, 0x877d94e9cf136e48ULL, 0x4acdde132694596cULL, + 0x817f9ee1df1f605eULL, 0xee5a2f75eac19b04ULL, 0xb46cc1ad477519f3ULL, + 0xe45c316ddad5893eULL, 0x04f70cfbeb08ffefULL, 0x6a26be982dd4f247ULL, + 0x1cff24dbab38c7b7ULL, 0x2aed7e933b54b911ULL, 0x25e86f87134aa236ULL, + 0xba9dd34e9c69f426ULL, 0xb16fcea15f7f10eeULL, 0x8f8e8c0204038d8bULL, + 0x2b197d64c8564fe3ULL, 0xfda01aba69e74794ULL, 0x0df017e7d31aeadeULL, + 0x8689971e3c1198baULL, 0x110f333c78222d69ULL, 0x09071b1c38121531ULL, + 0xecaf298611c56afdULL, 0x10fb30cb8b20db9bULL, 0x1808282040303858ULL, + 0x3f154154a87e6b97ULL, 0x170d3934682e237fULL, 0x0c04141020181c2cULL, + 0x030105040806070bULL, 0xac64e98d074521abULL, 0x7cdf845bb6f827caULL, + 0x9a76b3c597295f0dULL, 0x8b7980f9ef0b7264ULL, 0x7add8e53a6f429dcULL, + 0x473dc9f4f58eb3b2ULL, 0x3a164e58b074628aULL, 0x413fc3fce582bda4ULL, + 0x5937ebdca5b285fcULL, 0xb76dc4a94f731ef8ULL, 0x4838d8e0dd90a895ULL, + 0xd6b967dea1b10877ULL, 0x9573a2d1bf37442aULL, 0x26e96a831b4ca53dULL, + 0x5f35e1d4b5be8beaULL, 0xff551c4992e3b66dULL, 0x9371a8d9af3b4a3cULL, + 0x8d7b8af1ff077c72ULL, 0x898c860a140f839dULL, 0x9672a7d5b7314321ULL, + 0x8588921a34179fb1ULL, 0x07f609ffe30ef8e4ULL, 0x7e2a82a84dfcd633ULL, + 0x423ec6f8ed84baafULL, 0xe25e3b65cad98728ULL, 0x6927bb9c25d2f54cULL, + 0xca4643050a89cfc0ULL, 0x140c3c3060282474ULL, 0xaf65ec890f4326a0ULL, + 0xb868d5bd676d05dfULL, 0xa361f8992f5b3a8cULL, 0x05030f0c180a091dULL, + 0x5ec1e22346bc7d18ULL, 0xf957164182efb87bULL, 0x67d6a97ffece1899ULL, + 0x76d99a4386ec35f0ULL, 0xe858257dfacd9512ULL, 0x75d89f478eea32fbULL, + 0xaa66e38517492fbdULL, 0x64d7ac7bf6c81f92ULL, 0x4e3ad2e8cd9ca683ULL, + 0x45c8cf070e8a424bULL, 0x443cccf0fd88b4b9ULL, 0x13fa35cf8326dc90ULL, + 0xa796f462c453c563ULL, 0xf4a701a651f552a5ULL, 0xb598c25ab477ef01ULL, + 0x29ec7b973352be1aULL, 0xd5b862daa9b70f7cULL, 0x54c7fc3b76a86f22ULL, + 0xefae2c8219c36df6ULL, 0xbb69d0b96f6b02d4ULL, 0xdd4b7a3162a7ecbfULL, + 0xe0ab3d9631dd76d1ULL, 0xe6a9379e21d178c7ULL, 0xa967e6811f4f28b6ULL, + 0x1e0a2228503c364eULL, 0xc9474601028fc8cbULL, 0x0bf21defc316e4c8ULL, + 0xc2b55beec1992c03ULL, 0x6622aa880dccee6bULL, 0x32e556b37b648149ULL, + 0x2fee719f235eb00cULL, 0xdfbe7cc299a31d46ULL, 0x7d2b87ac45fad138ULL, + 0x9e81bf3e7c21a0e2ULL, 0x36125a48906c7ea6ULL, 0x9883b5366c2daef4ULL, + 0x2d1b776cd85a41f5ULL, 0x120e363870242a62ULL, 0x6523af8c05cae960ULL, + 0x02f506f3fb04f1f9ULL, 0xcf454c091283c6ddULL, 0x6321a58415c6e776ULL, + 0x4fced11f3e9e5071ULL, 0xdb49703972abe2a9ULL, 0x742c9cb07de8c409ULL, + 0x16f93ac39b2cd58dULL, 0x37e659bf636e8854ULL, 0xc7b654e2d993251eULL, + 0x782888a05df0d825ULL, 0x39174b5cb8726581ULL, 0x9b82b032642ba9ffULL, + 0x2e1a7268d05c46feULL, 0x808b9d162c1d96acULL, 0x1ffe21dfa33ec0bcULL, + 0x838a9812241b91a7ULL, 0x1b092d2448363f53ULL, 0x46c9ca03068c4540ULL, + 0x9487a1264c35b2d8ULL, 0xd24e6b254ab9f798ULL, 0x3ee142a35b7c9d65ULL, + 0x722e96b86de4ca1fULL, 0x31e453b773628642ULL, 0x3de047a7537a9a6eULL, + 0x20eb608b0b40ab2bULL, 0xad90ea7af447d759ULL, 0xf1a40eaa49ff5bb8ULL, + 0x221e6678f0445ad2ULL, 0x9285ab2e5c39bcceULL, 0xa060fd9d275d3d87ULL, + 0x0000000000000000ULL, 0x6f25b19435defb5aULL, 0x01f403f7f302f6f2ULL, + 0x0ef112e3db1cedd5ULL, 0xa194fe6ad45fcb75ULL, 0x1d0b272c583a3145ULL, + 0x34e75cbb6b688f5fULL, 0x9f75bcc98f235610ULL, 0x2cef749b2b58b707ULL, + 0x5c34e4d0bdb88ce1ULL, 0x5331f5c495a697c6ULL, 0x61d4a377eec2168fULL, + 0x6dd0b767ceda0aa3ULL, 0x9786a4224433b5d3ULL, 0x827e9be5d7196755ULL, + 0xeaad238e01c964ebULL, 0x1afd2ed3bb34c9a1ULL, 0x7b298da455f6df2eULL, + 0x5030f0c09da090cdULL, 0x4d3bd7ecc59aa188ULL, 0xbc9fd9468c65fa30ULL, + 0x15f83fc7932ad286ULL, 0x57c6f93f7eae6829ULL, 0x35135f4c986a79adULL, + 0x0a061e183014123aULL, 0x0f051114281e1b27ULL, 0x52c5f63366a46134ULL, + 0x33115544886677bbULL, 0x9977b6c19f2f5806ULL, 0x847c91edc7156943ULL, + 0x8e7a8ff5f7017b79ULL, 0x887885fde70d756fULL, 0x5a36eed8adb482f7ULL, + 0x241c6c70e04854c4ULL, 0x4b39dde4d596af9eULL, 0xeb592079f2cb9219ULL, + 0x28187860c05048e8ULL, 0xfa5613458ae9bf70ULL, 0xc8b345f6f18d3e39ULL, + 0xcdb04afae9873724ULL, 0x6c24b4903dd8fc51ULL, 0x6020a0801dc0e07dULL, + 0xcbb240f2f98b3932ULL, 0xab92e072e44bd94fULL, 0xf8a315b671ed4e89ULL, + 0x5dc0e7274eba7a13ULL, 0xcc44490d1a85c1d6ULL, 0xa662f79537513391ULL, + 0x30105040806070b0ULL, 0xc1b45eeac99f2b08ULL, 0x9184ae2a543fbbc5ULL, + 0xc54352112297d4e7ULL, 0xa893e576ec4dde44ULL, 0x5bc2ed2f5eb67405ULL, + 0xde4a7f356aa1ebb4ULL, 0xdabd73ce81a9145bULL, 0x8c8f89060c058a80ULL, + 0x772d99b475eec302ULL, 0xd9bc76ca89af1350ULL, 0xb99cd64a946ff32dULL, + 0xbe6adfb577610bc9ULL, 0xc0405d1d3a9dddfaULL, 0x4ccfd41b3698577aULL, + 0xfba210b279eb4982ULL, 0x9d80ba3a7427a7e9ULL, 0xd14f6e2142bff093ULL, + 0x211f637cf8425dd9ULL, 0x43cac50f1e864c5dULL, 0xe3aa389239db71daULL, + 0xc64257152a91d3ecULL +}; + +static const u64 T2[256] = { + 0xd268bad36a01bbb9ULL, 0x4d1954fc66b1e59aULL, 0xbc932f7114cde265ULL, + 0xcdb9749c1b512587ULL, 0x510253f557a4f7a2ULL, 0x6bb8d368be03d0d6ULL, + 0x6fbdd26bb504d6deULL, 0x29644dd785feb352ULL, 0x5d0d50f04aadfdbaULL, + 0x8a26ace9e063cf09ULL, 0x0e838d8a9684091cULL, 0xc679bfdc4d1aa591ULL, + 0xddad7090374d3da7ULL, 0x550752f65ca3f1aaULL, 0x52c89ab317e17ba4ULL, + 0x2d614cd48ef9b55aULL, 0x8f65ea2320ac4603ULL, 0x73a6d5628411c4e6ULL, + 0x66f197a468c255ccULL, 0x63b2d16ea80ddcc6ULL, 0xccff3355d099aa85ULL, + 0x590851f341aafbb2ULL, 0x712a5bed0f9cc7e2ULL, 0xa204a6f7ae55f359ULL, + 0x5f81de7fc120febeULL, 0x3d7548d8a2e5ad7aULL, 0x9a32a8e5cc7fd729ULL, + 0x5ec799b60ae871bcULL, 0x4b90db70e63be096ULL, 0xc8fa3256db9eac8dULL, + 0xe651b7c4152295d1ULL, 0xd72bfc19aace32b3ULL, 0xab48e3387393704bULL, + 0x42dc9ebf3bfd6384ULL, 0x7eef91ae52d041fcULL, 0x56cd9bb01ce67dacULL, + 0xaf4de23b78947643ULL, 0xd66dbbd06106bdb1ULL, 0x195841c3f1da9b32ULL, + 0xa5cb6eb2e5177957ULL, 0xae0ba5f2b35cf941ULL, 0x0bc0cb40564b8016ULL, + 0xb1da6bbdc20c677fULL, 0x6efb95a27ecc59dcULL, 0xbe1fa1fe9f40e161ULL, + 0xeb18f308c3e310cbULL, 0xfe4fb1ce2f3081e1ULL, 0x080a0206160e0c10ULL, + 0x17dbcc49675e922eULL, 0x37f3c4513f66a26eULL, 0x74691d27cf534ee8ULL, + 0x5044143c9c6c78a0ULL, 0x2be8c3580e73b056ULL, 0x91f263a59a34573fULL, + 0x4f95da73ed3ce69eULL, 0x69345de7358ed3d2ULL, 0x613e5fe12380dfc2ULL, + 0x578bdc79d72ef2aeULL, 0xe9947d87486e13cfULL, 0x13decd4a6c599426ULL, + 0xe19e7f815e601fdfULL, 0x752f5aee049bc1eaULL, 0xadc16cb4f3197547ULL, + 0x6d315ce43e89d5daULL, 0xfb0cf704efff08ebULL, 0x98be266a47f2d42dULL, + 0xdb24ff1cb7c738abULL, 0x937eed2a11b9543bULL, 0x876fe82536a24a13ULL, + 0x4ed39dba26f4699cULL, 0xa1ce6fb1ee107f5fULL, 0x028c8e8f8b8d0304ULL, + 0x647d192be34f56c8ULL, 0xba1aa0fd9447e769ULL, 0xe717f00ddeea1ad3ULL, + 0x1e978986ba98113cULL, 0x3c330f11692d2278ULL, 0x1c1b070931151238ULL, + 0x8629afecfd6ac511ULL, 0xcb30fb109bdb208bULL, 0x2028081858383040ULL, + 0x5441153f976b7ea8ULL, 0x34390d177f232e68ULL, 0x1014040c2c1c1820ULL, + 0x040501030b070608ULL, 0x8de964acab214507ULL, 0x5b84df7cca27f8b6ULL, + 0xc5b3769a0d5f2997ULL, 0xf980798b64720befULL, 0x538edd7adc29f4a6ULL, + 0xf4c93d47b2b38ef5ULL, 0x584e163a8a6274b0ULL, 0xfcc33f41a4bd82e5ULL, + 0xdceb3759fc85b2a5ULL, 0xa9c46db7f81e734fULL, 0xe0d8384895a890ddULL, + 0xde67b9d67708b1a1ULL, 0xd1a273952a4437bfULL, 0x836ae9263da54c1bULL, + 0xd4e1355fea8bbeb5ULL, 0x491c55ff6db6e392ULL, 0xd9a871933c4a3bafULL, + 0xf18a7b8d727c07ffULL, 0x0a868c899d830f14ULL, 0xd5a77296214331b7ULL, + 0x1a928885b19f1734ULL, 0xff09f607e4f80ee3ULL, 0xa8822a7e33d6fc4dULL, + 0xf8c63e42afba84edULL, 0x653b5ee22887d9caULL, 0x9cbb27694cf5d225ULL, + 0x054346cac0cf890aULL, 0x303c0c1474242860ULL, 0x89ec65afa026430fULL, + 0xbdd568b8df056d67ULL, 0x99f861a38c3a5b2fULL, 0x0c0f03051d090a18ULL, + 0x23e2c15e187dbc46ULL, 0x411657f97bb8ef82ULL, 0x7fa9d6679918cefeULL, + 0x439ad976f035ec86ULL, 0x7d2558e81295cdfaULL, 0x479fd875fb32ea8eULL, + 0x85e366aabd2f4917ULL, 0x7bacd764921fc8f6ULL, 0xe8d23a4e83a69ccdULL, + 0x07cfc8454b428a0eULL, 0xf0cc3c44b9b488fdULL, 0xcf35fa1390dc2683ULL, + 0x62f496a763c553c4ULL, 0xa601a7f4a552f551ULL, 0x5ac298b501ef77b4ULL, + 0x977bec291abe5233ULL, 0xda62b8d57c0fb7a9ULL, 0x3bfcc754226fa876ULL, + 0x822caeeff66dc319ULL, 0xb9d069bbd4026b6fULL, 0x317a4bddbfeca762ULL, + 0x963dabe0d176dd31ULL, 0x9e37a9e6c778d121ULL, 0x81e667a9b6284f1fULL, + 0x28220a1e4e363c50ULL, 0x014647c9cbc88f02ULL, 0xef1df20bc8e416c3ULL, + 0xee5bb5c2032c99c1ULL, 0x88aa22666beecc0dULL, 0xb356e5324981647bULL, + 0x9f71ee2f0cb05e23ULL, 0xc27cbedf461da399ULL, 0xac872b7d38d1fa45ULL, + 0x3ebf819ee2a0217cULL, 0x485a1236a67e6c90ULL, 0x36b58398f4ae2d6cULL, + 0x6c771b2df5415ad8ULL, 0x38360e12622a2470ULL, 0x8caf236560e9ca05ULL, + 0xf306f502f9f104fbULL, 0x094c45cfddc68312ULL, 0x84a5216376e7c615ULL, + 0x1fd1ce4f71509e3eULL, 0x397049dba9e2ab72ULL, 0xb09c2c7409c4e87dULL, + 0xc33af9168dd52c9bULL, 0xbf59e63754886e63ULL, 0xe254b6c71e2593d9ULL, + 0xa088287825d8f05dULL, 0x5c4b1739816572b8ULL, 0x32b0829bffa92b64ULL, + 0x68721a2efe465cd0ULL, 0x169d8b80ac961d2cULL, 0xdf21fe1fbcc03ea3ULL, + 0x12988a83a7911b24ULL, 0x242d091b533f3648ULL, 0x03cac94640458c06ULL, + 0x26a18794d8b2354cULL, 0x256b4ed298f7b94aULL, 0xa342e13e659d7c5bULL, + 0xb8962e721fcae46dULL, 0xb753e43142866273ULL, 0xa747e03d6e9a7a53ULL, + 0x8b60eb202bab400bULL, 0x7aea90ad59d747f4ULL, 0xaa0ea4f1b85bff49ULL, + 0x78661e22d25a44f0ULL, 0x2eab8592cebc395cULL, 0x9dfd60a0873d5d27ULL, + 0x0000000000000000ULL, 0x94b1256f5afbde35ULL, 0xf703f401f2f602f3ULL, + 0xe312f10ed5ed1cdbULL, 0x6afe94a175cb5fd4ULL, 0x2c270b1d45313a58ULL, + 0xbb5ce7345f8f686bULL, 0xc9bc759f1056238fULL, 0x9b74ef2c07b7582bULL, + 0xd0e4345ce18cb8bdULL, 0xc4f53153c697a695ULL, 0x77a3d4618f16c2eeULL, + 0x67b7d06da30adaceULL, 0x22a48697d3b53344ULL, 0xe59b7e82556719d7ULL, + 0x8e23adeaeb64c901ULL, 0xd32efd1aa1c934bbULL, 0xa48d297b2edff655ULL, + 0xc0f03050cd90a09dULL, 0xecd73b4d88a19ac5ULL, 0x46d99fbc30fa658cULL, + 0xc73ff81586d22a93ULL, 0x3ff9c6572968ae7eULL, 0x4c5f1335ad796a98ULL, + 0x181e060a3a121430ULL, 0x1411050f271b1e28ULL, 0x33f6c5523461a466ULL, + 0x44551133bb776688ULL, 0xc1b6779906582f9fULL, 0xed917c84436915c7ULL, + 0xf58f7a8e797b01f7ULL, 0xfd8578886f750de7ULL, 0xd8ee365af782b4adULL, + 0x706c1c24c45448e0ULL, 0xe4dd394b9eaf96d5ULL, 0x792059eb1992cbf2ULL, + 0x60781828e84850c0ULL, 0x451356fa70bfe98aULL, 0xf645b3c8393e8df1ULL, + 0xfa4ab0cd243787e9ULL, 0x90b4246c51fcd83dULL, 0x80a020607de0c01dULL, + 0xf240b2cb32398bf9ULL, 0x72e092ab4fd94be4ULL, 0xb615a3f8894eed71ULL, + 0x27e7c05d137aba4eULL, 0x0d4944ccd6c1851aULL, 0x95f762a691335137ULL, + 0x40501030b0706080ULL, 0xea5eb4c1082b9fc9ULL, 0x2aae8491c5bb3f54ULL, + 0x115243c5e7d49722ULL, 0x76e593a844de4decULL, 0x2fedc25b0574b65eULL, + 0x357f4adeb4eba16aULL, 0xce73bdda5b14a981ULL, 0x06898f8c808a050cULL, + 0xb4992d7702c3ee75ULL, 0xca76bcd95013af89ULL, 0x4ad69cb92df36f94ULL, + 0xb5df6abec90b6177ULL, 0x1d5d40c0fadd9d3aULL, 0x1bd4cf4c7a579836ULL, + 0xb210a2fb8249eb79ULL, 0x3aba809de9a72774ULL, 0x216e4fd193f0bf42ULL, + 0x7c631f21d95d42f8ULL, 0x0fc5ca435d4c861eULL, 0x9238aae3da71db39ULL, + 0x155742c6ecd3912aULL +}; + +static const u64 T3[256] = { + 0x68d2d3ba016ab9bbULL, 0x194dfc54b1669ae5ULL, 0x93bc712fcd1465e2ULL, + 0xb9cd9c74511b8725ULL, 0x0251f553a457a2f7ULL, 0xb86b68d303bed6d0ULL, + 0xbd6f6bd204b5ded6ULL, 0x6429d74dfe8552b3ULL, 0x0d5df050ad4abafdULL, + 0x268ae9ac63e009cfULL, 0x830e8a8d84961c09ULL, 0x79c6dcbf1a4d91a5ULL, + 0xaddd90704d37a73dULL, 0x0755f652a35caaf1ULL, 0xc852b39ae117a47bULL, + 0x612dd44cf98e5ab5ULL, 0x658f23eaac200346ULL, 0xa67362d51184e6c4ULL, + 0xf166a497c268cc55ULL, 0xb2636ed10da8c6dcULL, 0xffcc553399d085aaULL, + 0x0859f351aa41b2fbULL, 0x2a71ed5b9c0fe2c7ULL, 0x04a2f7a655ae59f3ULL, + 0x815f7fde20c1befeULL, 0x753dd848e5a27aadULL, 0x329ae5a87fcc29d7ULL, + 0xc75eb699e80abc71ULL, 0x904b70db3be696e0ULL, 0xfac856329edb8dacULL, + 0x51e6c4b72215d195ULL, 0x2bd719fcceaab332ULL, 0x48ab38e393734b70ULL, + 0xdc42bf9efd3b8463ULL, 0xef7eae91d052fc41ULL, 0xcd56b09be61cac7dULL, + 0x4daf3be294784376ULL, 0x6dd6d0bb0661b1bdULL, 0x5819c341daf1329bULL, + 0xcba5b26e17e55779ULL, 0x0baef2a55cb341f9ULL, 0xc00b40cb4b561680ULL, + 0xdab1bd6b0cc27f67ULL, 0xfb6ea295cc7edc59ULL, 0x1fbefea1409f61e1ULL, + 0x18eb08f3e3c3cb10ULL, 0x4ffeceb1302fe181ULL, 0x0a0806020e16100cULL, + 0xdb1749cc5e672e92ULL, 0xf33751c4663f6ea2ULL, 0x6974271d53cfe84eULL, + 0x44503c146c9ca078ULL, 0xe82b58c3730e56b0ULL, 0xf291a563349a3f57ULL, + 0x954f73da3ced9ee6ULL, 0x3469e75d8e35d2d3ULL, 0x3e61e15f8023c2dfULL, + 0x8b5779dc2ed7aef2ULL, 0x94e9877d6e48cf13ULL, 0xde134acd596c2694ULL, + 0x9ee1817f605edf1fULL, 0x2f75ee5a9b04eac1ULL, 0xc1adb46c19f34775ULL, + 0x316de45c893edad5ULL, 0x0cfb04f7ffefeb08ULL, 0xbe986a26f2472dd4ULL, + 0x24db1cffc7b7ab38ULL, 0x7e932aedb9113b54ULL, 0x6f8725e8a236134aULL, + 0xd34eba9df4269c69ULL, 0xcea1b16f10ee5f7fULL, 0x8c028f8e8d8b0403ULL, + 0x7d642b194fe3c856ULL, 0x1abafda0479469e7ULL, 0x17e70df0eaded31aULL, + 0x971e868998ba3c11ULL, 0x333c110f2d697822ULL, 0x1b1c090715313812ULL, + 0x2986ecaf6afd11c5ULL, 0x30cb10fbdb9b8b20ULL, 0x2820180838584030ULL, + 0x41543f156b97a87eULL, 0x3934170d237f682eULL, 0x14100c041c2c2018ULL, + 0x05040301070b0806ULL, 0xe98dac6421ab0745ULL, 0x845b7cdf27cab6f8ULL, + 0xb3c59a765f0d9729ULL, 0x80f98b797264ef0bULL, 0x8e537add29dca6f4ULL, + 0xc9f4473db3b2f58eULL, 0x4e583a16628ab074ULL, 0xc3fc413fbda4e582ULL, + 0xebdc593785fca5b2ULL, 0xc4a9b76d1ef84f73ULL, 0xd8e04838a895dd90ULL, + 0x67ded6b90877a1b1ULL, 0xa2d19573442abf37ULL, 0x6a8326e9a53d1b4cULL, + 0xe1d45f358beab5beULL, 0x1c49ff55b66d92e3ULL, 0xa8d993714a3caf3bULL, + 0x8af18d7b7c72ff07ULL, 0x860a898c839d140fULL, 0xa7d596724321b731ULL, + 0x921a85889fb13417ULL, 0x09ff07f6f8e4e30eULL, 0x82a87e2ad6334dfcULL, + 0xc6f8423ebaafed84ULL, 0x3b65e25e8728cad9ULL, 0xbb9c6927f54c25d2ULL, + 0x4305ca46cfc00a89ULL, 0x3c30140c24746028ULL, 0xec89af6526a00f43ULL, + 0xd5bdb86805df676dULL, 0xf899a3613a8c2f5bULL, 0x0f0c0503091d180aULL, + 0xe2235ec17d1846bcULL, 0x1641f957b87b82efULL, 0xa97f67d61899feceULL, + 0x9a4376d935f086ecULL, 0x257de8589512facdULL, 0x9f4775d832fb8eeaULL, + 0xe385aa662fbd1749ULL, 0xac7b64d71f92f6c8ULL, 0xd2e84e3aa683cd9cULL, + 0xcf0745c8424b0e8aULL, 0xccf0443cb4b9fd88ULL, 0x35cf13fadc908326ULL, + 0xf462a796c563c453ULL, 0x01a6f4a752a551f5ULL, 0xc25ab598ef01b477ULL, + 0x7b9729ecbe1a3352ULL, 0x62dad5b80f7ca9b7ULL, 0xfc3b54c76f2276a8ULL, + 0x2c82efae6df619c3ULL, 0xd0b9bb6902d46f6bULL, 0x7a31dd4becbf62a7ULL, + 0x3d96e0ab76d131ddULL, 0x379ee6a978c721d1ULL, 0xe681a96728b61f4fULL, + 0x22281e0a364e503cULL, 0x4601c947c8cb028fULL, 0x1def0bf2e4c8c316ULL, + 0x5beec2b52c03c199ULL, 0xaa886622ee6b0dccULL, 0x56b332e581497b64ULL, + 0x719f2feeb00c235eULL, 0x7cc2dfbe1d4699a3ULL, 0x87ac7d2bd13845faULL, + 0xbf3e9e81a0e27c21ULL, 0x5a4836127ea6906cULL, 0xb5369883aef46c2dULL, + 0x776c2d1b41f5d85aULL, 0x3638120e2a627024ULL, 0xaf8c6523e96005caULL, + 0x06f302f5f1f9fb04ULL, 0x4c09cf45c6dd1283ULL, 0xa5846321e77615c6ULL, + 0xd11f4fce50713e9eULL, 0x7039db49e2a972abULL, 0x9cb0742cc4097de8ULL, + 0x3ac316f9d58d9b2cULL, 0x59bf37e68854636eULL, 0x54e2c7b6251ed993ULL, + 0x88a07828d8255df0ULL, 0x4b5c39176581b872ULL, 0xb0329b82a9ff642bULL, + 0x72682e1a46fed05cULL, 0x9d16808b96ac2c1dULL, 0x21df1ffec0bca33eULL, + 0x9812838a91a7241bULL, 0x2d241b093f534836ULL, 0xca0346c94540068cULL, + 0xa1269487b2d84c35ULL, 0x6b25d24ef7984ab9ULL, 0x42a33ee19d655b7cULL, + 0x96b8722eca1f6de4ULL, 0x53b731e486427362ULL, 0x47a73de09a6e537aULL, + 0x608b20ebab2b0b40ULL, 0xea7aad90d759f447ULL, 0x0eaaf1a45bb849ffULL, + 0x6678221e5ad2f044ULL, 0xab2e9285bcce5c39ULL, 0xfd9da0603d87275dULL, + 0x0000000000000000ULL, 0xb1946f25fb5a35deULL, 0x03f701f4f6f2f302ULL, + 0x12e30ef1edd5db1cULL, 0xfe6aa194cb75d45fULL, 0x272c1d0b3145583aULL, + 0x5cbb34e78f5f6b68ULL, 0xbcc99f7556108f23ULL, 0x749b2cefb7072b58ULL, + 0xe4d05c348ce1bdb8ULL, 0xf5c4533197c695a6ULL, 0xa37761d4168feec2ULL, + 0xb7676dd00aa3cedaULL, 0xa4229786b5d34433ULL, 0x9be5827e6755d719ULL, + 0x238eeaad64eb01c9ULL, 0x2ed31afdc9a1bb34ULL, 0x8da47b29df2e55f6ULL, + 0xf0c0503090cd9da0ULL, 0xd7ec4d3ba188c59aULL, 0xd946bc9ffa308c65ULL, + 0x3fc715f8d286932aULL, 0xf93f57c668297eaeULL, 0x5f4c351379ad986aULL, + 0x1e180a06123a3014ULL, 0x11140f051b27281eULL, 0xf63352c5613466a4ULL, + 0x5544331177bb8866ULL, 0xb6c1997758069f2fULL, 0x91ed847c6943c715ULL, + 0x8ff58e7a7b79f701ULL, 0x85fd8878756fe70dULL, 0xeed85a3682f7adb4ULL, + 0x6c70241c54c4e048ULL, 0xdde44b39af9ed596ULL, 0x2079eb599219f2cbULL, + 0x7860281848e8c050ULL, 0x1345fa56bf708ae9ULL, 0x45f6c8b33e39f18dULL, + 0x4afacdb03724e987ULL, 0xb4906c24fc513dd8ULL, 0xa0806020e07d1dc0ULL, + 0x40f2cbb23932f98bULL, 0xe072ab92d94fe44bULL, 0x15b6f8a34e8971edULL, + 0xe7275dc07a134ebaULL, 0x490dcc44c1d61a85ULL, 0xf795a66233913751ULL, + 0x5040301070b08060ULL, 0x5eeac1b42b08c99fULL, 0xae2a9184bbc5543fULL, + 0x5211c543d4e72297ULL, 0xe576a893de44ec4dULL, 0xed2f5bc274055eb6ULL, + 0x7f35de4aebb46aa1ULL, 0x73cedabd145b81a9ULL, 0x89068c8f8a800c05ULL, + 0x99b4772dc30275eeULL, 0x76cad9bc135089afULL, 0xd64ab99cf32d946fULL, + 0xdfb5be6a0bc97761ULL, 0x5d1dc040ddfa3a9dULL, 0xd41b4ccf577a3698ULL, + 0x10b2fba2498279ebULL, 0xba3a9d80a7e97427ULL, 0x6e21d14ff09342bfULL, + 0x637c211f5dd9f842ULL, 0xc50f43ca4c5d1e86ULL, 0x3892e3aa71da39dbULL, + 0x5715c642d3ec2a91ULL +}; + +static const u64 T4[256] = { + 0xbbb96a01bad3d268ULL, 0xe59a66b154fc4d19ULL, 0xe26514cd2f71bc93ULL, + 0x25871b51749ccdb9ULL, 0xf7a257a453f55102ULL, 0xd0d6be03d3686bb8ULL, + 0xd6deb504d26b6fbdULL, 0xb35285fe4dd72964ULL, 0xfdba4aad50f05d0dULL, + 0xcf09e063ace98a26ULL, 0x091c96848d8a0e83ULL, 0xa5914d1abfdcc679ULL, + 0x3da7374d7090ddadULL, 0xf1aa5ca352f65507ULL, 0x7ba417e19ab352c8ULL, + 0xb55a8ef94cd42d61ULL, 0x460320acea238f65ULL, 0xc4e68411d56273a6ULL, + 0x55cc68c297a466f1ULL, 0xdcc6a80dd16e63b2ULL, 0xaa85d0993355ccffULL, + 0xfbb241aa51f35908ULL, 0xc7e20f9c5bed712aULL, 0xf359ae55a6f7a204ULL, + 0xfebec120de7f5f81ULL, 0xad7aa2e548d83d75ULL, 0xd729cc7fa8e59a32ULL, + 0x71bc0ae899b65ec7ULL, 0xe096e63bdb704b90ULL, 0xac8ddb9e3256c8faULL, + 0x95d11522b7c4e651ULL, 0x32b3aacefc19d72bULL, 0x704b7393e338ab48ULL, + 0x63843bfd9ebf42dcULL, 0x41fc52d091ae7eefULL, 0x7dac1ce69bb056cdULL, + 0x76437894e23baf4dULL, 0xbdb16106bbd0d66dULL, 0x9b32f1da41c31958ULL, + 0x7957e5176eb2a5cbULL, 0xf941b35ca5f2ae0bULL, 0x8016564bcb400bc0ULL, + 0x677fc20c6bbdb1daULL, 0x59dc7ecc95a26efbULL, 0xe1619f40a1febe1fULL, + 0x10cbc3e3f308eb18ULL, 0x81e12f30b1cefe4fULL, 0x0c10160e0206080aULL, + 0x922e675ecc4917dbULL, 0xa26e3f66c45137f3ULL, 0x4ee8cf531d277469ULL, + 0x78a09c6c143c5044ULL, 0xb0560e73c3582be8ULL, 0x573f9a3463a591f2ULL, + 0xe69eed3cda734f95ULL, 0xd3d2358e5de76934ULL, 0xdfc223805fe1613eULL, + 0xf2aed72edc79578bULL, 0x13cf486e7d87e994ULL, 0x94266c59cd4a13deULL, + 0x1fdf5e607f81e19eULL, 0xc1ea049b5aee752fULL, 0x7547f3196cb4adc1ULL, + 0xd5da3e895ce46d31ULL, 0x08ebeffff704fb0cULL, 0xd42d47f2266a98beULL, + 0x38abb7c7ff1cdb24ULL, 0x543b11b9ed2a937eULL, 0x4a1336a2e825876fULL, + 0x699c26f49dba4ed3ULL, 0x7f5fee106fb1a1ceULL, 0x03048b8d8e8f028cULL, + 0x56c8e34f192b647dULL, 0xe7699447a0fdba1aULL, 0x1ad3deeaf00de717ULL, + 0x113cba9889861e97ULL, 0x2278692d0f113c33ULL, 0x1238311507091c1bULL, + 0xc511fd6aafec8629ULL, 0x208b9bdbfb10cb30ULL, 0x3040583808182028ULL, + 0x7ea8976b153f5441ULL, 0x2e687f230d173439ULL, 0x18202c1c040c1014ULL, + 0x06080b0701030405ULL, 0x4507ab2164ac8de9ULL, 0xf8b6ca27df7c5b84ULL, + 0x29970d5f769ac5b3ULL, 0x0bef6472798bf980ULL, 0xf4a6dc29dd7a538eULL, + 0x8ef5b2b33d47f4c9ULL, 0x74b08a62163a584eULL, 0x82e5a4bd3f41fcc3ULL, + 0xb2a5fc853759dcebULL, 0x734ff81e6db7a9c4ULL, 0x90dd95a83848e0d8ULL, + 0xb1a17708b9d6de67ULL, 0x37bf2a447395d1a2ULL, 0x4c1b3da5e926836aULL, + 0xbeb5ea8b355fd4e1ULL, 0xe3926db655ff491cULL, 0x3baf3c4a7193d9a8ULL, + 0x07ff727c7b8df18aULL, 0x0f149d838c890a86ULL, 0x31b721437296d5a7ULL, + 0x1734b19f88851a92ULL, 0x0ee3e4f8f607ff09ULL, 0xfc4d33d62a7ea882ULL, + 0x84edafba3e42f8c6ULL, 0xd9ca28875ee2653bULL, 0xd2254cf527699cbbULL, + 0x890ac0cf46ca0543ULL, 0x286074240c14303cULL, 0x430fa02665af89ecULL, + 0x6d67df0568b8bdd5ULL, 0x5b2f8c3a61a399f8ULL, 0x0a181d0903050c0fULL, + 0xbc46187dc15e23e2ULL, 0xef827bb857f94116ULL, 0xcefe9918d6677fa9ULL, + 0xec86f035d976439aULL, 0xcdfa129558e87d25ULL, 0xea8efb32d875479fULL, + 0x4917bd2f66aa85e3ULL, 0xc8f6921fd7647bacULL, 0x9ccd83a63a4ee8d2ULL, + 0x8a0e4b42c84507cfULL, 0x88fdb9b43c44f0ccULL, 0x268390dcfa13cf35ULL, + 0x53c463c596a762f4ULL, 0xf551a552a7f4a601ULL, 0x77b401ef98b55ac2ULL, + 0x52331abeec29977bULL, 0xb7a97c0fb8d5da62ULL, 0xa876226fc7543bfcULL, + 0xc319f66daeef822cULL, 0x6b6fd40269bbb9d0ULL, 0xa762bfec4bdd317aULL, + 0xdd31d176abe0963dULL, 0xd121c778a9e69e37ULL, 0x4f1fb62867a981e6ULL, + 0x3c504e360a1e2822ULL, 0x8f02cbc847c90146ULL, 0x16c3c8e4f20bef1dULL, + 0x99c1032cb5c2ee5bULL, 0xcc0d6bee226688aaULL, 0x647b4981e532b356ULL, + 0x5e230cb0ee2f9f71ULL, 0xa399461dbedfc27cULL, 0xfa4538d12b7dac87ULL, + 0x217ce2a0819e3ebfULL, 0x6c90a67e1236485aULL, 0x2d6cf4ae839836b5ULL, + 0x5ad8f5411b2d6c77ULL, 0x2470622a0e123836ULL, 0xca0560e923658cafULL, + 0x04fbf9f1f502f306ULL, 0x8312ddc645cf094cULL, 0xc61576e7216384a5ULL, + 0x9e3e7150ce4f1fd1ULL, 0xab72a9e249db3970ULL, 0xe87d09c42c74b09cULL, + 0x2c9b8dd5f916c33aULL, 0x6e635488e637bf59ULL, 0x93d91e25b6c7e254ULL, + 0xf05d25d82878a088ULL, 0x72b8816517395c4bULL, 0x2b64ffa9829b32b0ULL, + 0x5cd0fe461a2e6872ULL, 0x1d2cac968b80169dULL, 0x3ea3bcc0fe1fdf21ULL, + 0x1b24a7918a831298ULL, 0x3648533f091b242dULL, 0x8c064045c94603caULL, + 0x354cd8b2879426a1ULL, 0xb94a98f74ed2256bULL, 0x7c5b659de13ea342ULL, + 0xe46d1fca2e72b896ULL, 0x62734286e431b753ULL, 0x7a536e9ae03da747ULL, + 0x400b2babeb208b60ULL, 0x47f459d790ad7aeaULL, 0xff49b85ba4f1aa0eULL, + 0x44f0d25a1e227866ULL, 0x395ccebc85922eabULL, 0x5d27873d60a09dfdULL, + 0x0000000000000000ULL, 0xde355afb256f94b1ULL, 0x02f3f2f6f401f703ULL, + 0x1cdbd5edf10ee312ULL, 0x5fd475cb94a16afeULL, 0x3a5845310b1d2c27ULL, + 0x686b5f8fe734bb5cULL, 0x238f1056759fc9bcULL, 0x582b07b7ef2c9b74ULL, + 0xb8bde18c345cd0e4ULL, 0xa695c6973153c4f5ULL, 0xc2ee8f16d46177a3ULL, + 0xdacea30ad06d67b7ULL, 0x3344d3b5869722a4ULL, 0x19d755677e82e59bULL, + 0xc901eb64adea8e23ULL, 0x34bba1c9fd1ad32eULL, 0xf6552edf297ba48dULL, + 0xa09dcd903050c0f0ULL, 0x9ac588a13b4decd7ULL, 0x658c30fa9fbc46d9ULL, + 0x2a9386d2f815c73fULL, 0xae7e2968c6573ff9ULL, 0x6a98ad7913354c5fULL, + 0x14303a12060a181eULL, 0x1e28271b050f1411ULL, 0xa4663461c55233f6ULL, + 0x6688bb7711334455ULL, 0x2f9f06587799c1b6ULL, 0x15c743697c84ed91ULL, + 0x01f7797b7a8ef58fULL, 0x0de76f757888fd85ULL, 0xb4adf782365ad8eeULL, + 0x48e0c4541c24706cULL, 0x96d59eaf394be4ddULL, 0xcbf2199259eb7920ULL, + 0x50c0e84818286078ULL, 0xe98a70bf56fa4513ULL, 0x8df1393eb3c8f645ULL, + 0x87e92437b0cdfa4aULL, 0xd83d51fc246c90b4ULL, 0xc01d7de0206080a0ULL, + 0x8bf93239b2cbf240ULL, 0x4be44fd992ab72e0ULL, 0xed71894ea3f8b615ULL, + 0xba4e137ac05d27e7ULL, 0x851ad6c144cc0d49ULL, 0x5137913362a695f7ULL, + 0x6080b07010304050ULL, 0x9fc9082bb4c1ea5eULL, 0x3f54c5bb84912aaeULL, + 0x9722e7d443c51152ULL, 0x4dec44de93a876e5ULL, 0xb65e0574c25b2fedULL, + 0xa16ab4eb4ade357fULL, 0xa9815b14bddace73ULL, 0x050c808a8f8c0689ULL, + 0xee7502c32d77b499ULL, 0xaf895013bcd9ca76ULL, 0x6f942df39cb94ad6ULL, + 0x6177c90b6abeb5dfULL, 0x9d3afadd40c01d5dULL, 0x98367a57cf4c1bd4ULL, + 0xeb798249a2fbb210ULL, 0x2774e9a7809d3abaULL, 0xbf4293f04fd1216eULL, + 0x42f8d95d1f217c63ULL, 0x861e5d4cca430fc5ULL, 0xdb39da71aae39238ULL, + 0x912aecd342c61557ULL +}; + +static const u64 T5[256] = { + 0xb9bb016ad3ba68d2ULL, 0x9ae5b166fc54194dULL, 0x65e2cd14712f93bcULL, + 0x8725511b9c74b9cdULL, 0xa2f7a457f5530251ULL, 0xd6d003be68d3b86bULL, + 0xded604b56bd2bd6fULL, 0x52b3fe85d74d6429ULL, 0xbafdad4af0500d5dULL, + 0x09cf63e0e9ac268aULL, 0x1c0984968a8d830eULL, 0x91a51a4ddcbf79c6ULL, + 0xa73d4d379070adddULL, 0xaaf1a35cf6520755ULL, 0xa47be117b39ac852ULL, + 0x5ab5f98ed44c612dULL, 0x0346ac2023ea658fULL, 0xe6c4118462d5a673ULL, + 0xcc55c268a497f166ULL, 0xc6dc0da86ed1b263ULL, 0x85aa99d05533ffccULL, + 0xb2fbaa41f3510859ULL, 0xe2c79c0fed5b2a71ULL, 0x59f355aef7a604a2ULL, + 0xbefe20c17fde815fULL, 0x7aade5a2d848753dULL, 0x29d77fcce5a8329aULL, + 0xbc71e80ab699c75eULL, 0x96e03be670db904bULL, 0x8dac9edb5632fac8ULL, + 0xd1952215c4b751e6ULL, 0xb332ceaa19fc2bd7ULL, 0x4b70937338e348abULL, + 0x8463fd3bbf9edc42ULL, 0xfc41d052ae91ef7eULL, 0xac7de61cb09bcd56ULL, + 0x437694783be24dafULL, 0xb1bd0661d0bb6dd6ULL, 0x329bdaf1c3415819ULL, + 0x577917e5b26ecba5ULL, 0x41f95cb3f2a50baeULL, 0x16804b5640cbc00bULL, + 0x7f670cc2bd6bdab1ULL, 0xdc59cc7ea295fb6eULL, 0x61e1409ffea11fbeULL, + 0xcb10e3c308f318ebULL, 0xe181302fceb14ffeULL, 0x100c0e1606020a08ULL, + 0x2e925e6749ccdb17ULL, 0x6ea2663f51c4f337ULL, 0xe84e53cf271d6974ULL, + 0xa0786c9c3c144450ULL, 0x56b0730e58c3e82bULL, 0x3f57349aa563f291ULL, + 0x9ee63ced73da954fULL, 0xd2d38e35e75d3469ULL, 0xc2df8023e15f3e61ULL, + 0xaef22ed779dc8b57ULL, 0xcf136e48877d94e9ULL, 0x2694596c4acdde13ULL, + 0xdf1f605e817f9ee1ULL, 0xeac19b04ee5a2f75ULL, 0x477519f3b46cc1adULL, + 0xdad5893ee45c316dULL, 0xeb08ffef04f70cfbULL, 0x2dd4f2476a26be98ULL, + 0xab38c7b71cff24dbULL, 0x3b54b9112aed7e93ULL, 0x134aa23625e86f87ULL, + 0x9c69f426ba9dd34eULL, 0x5f7f10eeb16fcea1ULL, 0x04038d8b8f8e8c02ULL, + 0xc8564fe32b197d64ULL, 0x69e74794fda01abaULL, 0xd31aeade0df017e7ULL, + 0x3c1198ba8689971eULL, 0x78222d69110f333cULL, 0x3812153109071b1cULL, + 0x11c56afdecaf2986ULL, 0x8b20db9b10fb30cbULL, 0x4030385818082820ULL, + 0xa87e6b973f154154ULL, 0x682e237f170d3934ULL, 0x20181c2c0c041410ULL, + 0x0806070b03010504ULL, 0x074521abac64e98dULL, 0xb6f827ca7cdf845bULL, + 0x97295f0d9a76b3c5ULL, 0xef0b72648b7980f9ULL, 0xa6f429dc7add8e53ULL, + 0xf58eb3b2473dc9f4ULL, 0xb074628a3a164e58ULL, 0xe582bda4413fc3fcULL, + 0xa5b285fc5937ebdcULL, 0x4f731ef8b76dc4a9ULL, 0xdd90a8954838d8e0ULL, + 0xa1b10877d6b967deULL, 0xbf37442a9573a2d1ULL, 0x1b4ca53d26e96a83ULL, + 0xb5be8bea5f35e1d4ULL, 0x92e3b66dff551c49ULL, 0xaf3b4a3c9371a8d9ULL, + 0xff077c728d7b8af1ULL, 0x140f839d898c860aULL, 0xb73143219672a7d5ULL, + 0x34179fb18588921aULL, 0xe30ef8e407f609ffULL, 0x4dfcd6337e2a82a8ULL, + 0xed84baaf423ec6f8ULL, 0xcad98728e25e3b65ULL, 0x25d2f54c6927bb9cULL, + 0x0a89cfc0ca464305ULL, 0x60282474140c3c30ULL, 0x0f4326a0af65ec89ULL, + 0x676d05dfb868d5bdULL, 0x2f5b3a8ca361f899ULL, 0x180a091d05030f0cULL, + 0x46bc7d185ec1e223ULL, 0x82efb87bf9571641ULL, 0xfece189967d6a97fULL, + 0x86ec35f076d99a43ULL, 0xfacd9512e858257dULL, 0x8eea32fb75d89f47ULL, + 0x17492fbdaa66e385ULL, 0xf6c81f9264d7ac7bULL, 0xcd9ca6834e3ad2e8ULL, + 0x0e8a424b45c8cf07ULL, 0xfd88b4b9443cccf0ULL, 0x8326dc9013fa35cfULL, + 0xc453c563a796f462ULL, 0x51f552a5f4a701a6ULL, 0xb477ef01b598c25aULL, + 0x3352be1a29ec7b97ULL, 0xa9b70f7cd5b862daULL, 0x76a86f2254c7fc3bULL, + 0x19c36df6efae2c82ULL, 0x6f6b02d4bb69d0b9ULL, 0x62a7ecbfdd4b7a31ULL, + 0x31dd76d1e0ab3d96ULL, 0x21d178c7e6a9379eULL, 0x1f4f28b6a967e681ULL, + 0x503c364e1e0a2228ULL, 0x028fc8cbc9474601ULL, 0xc316e4c80bf21defULL, + 0xc1992c03c2b55beeULL, 0x0dccee6b6622aa88ULL, 0x7b64814932e556b3ULL, + 0x235eb00c2fee719fULL, 0x99a31d46dfbe7cc2ULL, 0x45fad1387d2b87acULL, + 0x7c21a0e29e81bf3eULL, 0x906c7ea636125a48ULL, 0x6c2daef49883b536ULL, + 0xd85a41f52d1b776cULL, 0x70242a62120e3638ULL, 0x05cae9606523af8cULL, + 0xfb04f1f902f506f3ULL, 0x1283c6ddcf454c09ULL, 0x15c6e7766321a584ULL, + 0x3e9e50714fced11fULL, 0x72abe2a9db497039ULL, 0x7de8c409742c9cb0ULL, + 0x9b2cd58d16f93ac3ULL, 0x636e885437e659bfULL, 0xd993251ec7b654e2ULL, + 0x5df0d825782888a0ULL, 0xb872658139174b5cULL, 0x642ba9ff9b82b032ULL, + 0xd05c46fe2e1a7268ULL, 0x2c1d96ac808b9d16ULL, 0xa33ec0bc1ffe21dfULL, + 0x241b91a7838a9812ULL, 0x48363f531b092d24ULL, 0x068c454046c9ca03ULL, + 0x4c35b2d89487a126ULL, 0x4ab9f798d24e6b25ULL, 0x5b7c9d653ee142a3ULL, + 0x6de4ca1f722e96b8ULL, 0x7362864231e453b7ULL, 0x537a9a6e3de047a7ULL, + 0x0b40ab2b20eb608bULL, 0xf447d759ad90ea7aULL, 0x49ff5bb8f1a40eaaULL, + 0xf0445ad2221e6678ULL, 0x5c39bcce9285ab2eULL, 0x275d3d87a060fd9dULL, + 0x0000000000000000ULL, 0x35defb5a6f25b194ULL, 0xf302f6f201f403f7ULL, + 0xdb1cedd50ef112e3ULL, 0xd45fcb75a194fe6aULL, 0x583a31451d0b272cULL, + 0x6b688f5f34e75cbbULL, 0x8f2356109f75bcc9ULL, 0x2b58b7072cef749bULL, + 0xbdb88ce15c34e4d0ULL, 0x95a697c65331f5c4ULL, 0xeec2168f61d4a377ULL, + 0xceda0aa36dd0b767ULL, 0x4433b5d39786a422ULL, 0xd7196755827e9be5ULL, + 0x01c964ebeaad238eULL, 0xbb34c9a11afd2ed3ULL, 0x55f6df2e7b298da4ULL, + 0x9da090cd5030f0c0ULL, 0xc59aa1884d3bd7ecULL, 0x8c65fa30bc9fd946ULL, + 0x932ad28615f83fc7ULL, 0x7eae682957c6f93fULL, 0x986a79ad35135f4cULL, + 0x3014123a0a061e18ULL, 0x281e1b270f051114ULL, 0x66a4613452c5f633ULL, + 0x886677bb33115544ULL, 0x9f2f58069977b6c1ULL, 0xc7156943847c91edULL, + 0xf7017b798e7a8ff5ULL, 0xe70d756f887885fdULL, 0xadb482f75a36eed8ULL, + 0xe04854c4241c6c70ULL, 0xd596af9e4b39dde4ULL, 0xf2cb9219eb592079ULL, + 0xc05048e828187860ULL, 0x8ae9bf70fa561345ULL, 0xf18d3e39c8b345f6ULL, + 0xe9873724cdb04afaULL, 0x3dd8fc516c24b490ULL, 0x1dc0e07d6020a080ULL, + 0xf98b3932cbb240f2ULL, 0xe44bd94fab92e072ULL, 0x71ed4e89f8a315b6ULL, + 0x4eba7a135dc0e727ULL, 0x1a85c1d6cc44490dULL, 0x37513391a662f795ULL, + 0x806070b030105040ULL, 0xc99f2b08c1b45eeaULL, 0x543fbbc59184ae2aULL, + 0x2297d4e7c5435211ULL, 0xec4dde44a893e576ULL, 0x5eb674055bc2ed2fULL, + 0x6aa1ebb4de4a7f35ULL, 0x81a9145bdabd73ceULL, 0x0c058a808c8f8906ULL, + 0x75eec302772d99b4ULL, 0x89af1350d9bc76caULL, 0x946ff32db99cd64aULL, + 0x77610bc9be6adfb5ULL, 0x3a9dddfac0405d1dULL, 0x3698577a4ccfd41bULL, + 0x79eb4982fba210b2ULL, 0x7427a7e99d80ba3aULL, 0x42bff093d14f6e21ULL, + 0xf8425dd9211f637cULL, 0x1e864c5d43cac50fULL, 0x39db71dae3aa3892ULL, + 0x2a91d3ecc6425715ULL +}; + +static const u64 T6[256] = { + 0x6a01bbb9d268bad3ULL, 0x66b1e59a4d1954fcULL, 0x14cde265bc932f71ULL, + 0x1b512587cdb9749cULL, 0x57a4f7a2510253f5ULL, 0xbe03d0d66bb8d368ULL, + 0xb504d6de6fbdd26bULL, 0x85feb35229644dd7ULL, 0x4aadfdba5d0d50f0ULL, + 0xe063cf098a26ace9ULL, 0x9684091c0e838d8aULL, 0x4d1aa591c679bfdcULL, + 0x374d3da7ddad7090ULL, 0x5ca3f1aa550752f6ULL, 0x17e17ba452c89ab3ULL, + 0x8ef9b55a2d614cd4ULL, 0x20ac46038f65ea23ULL, 0x8411c4e673a6d562ULL, + 0x68c255cc66f197a4ULL, 0xa80ddcc663b2d16eULL, 0xd099aa85ccff3355ULL, + 0x41aafbb2590851f3ULL, 0x0f9cc7e2712a5bedULL, 0xae55f359a204a6f7ULL, + 0xc120febe5f81de7fULL, 0xa2e5ad7a3d7548d8ULL, 0xcc7fd7299a32a8e5ULL, + 0x0ae871bc5ec799b6ULL, 0xe63be0964b90db70ULL, 0xdb9eac8dc8fa3256ULL, + 0x152295d1e651b7c4ULL, 0xaace32b3d72bfc19ULL, 0x7393704bab48e338ULL, + 0x3bfd638442dc9ebfULL, 0x52d041fc7eef91aeULL, 0x1ce67dac56cd9bb0ULL, + 0x78947643af4de23bULL, 0x6106bdb1d66dbbd0ULL, 0xf1da9b32195841c3ULL, + 0xe5177957a5cb6eb2ULL, 0xb35cf941ae0ba5f2ULL, 0x564b80160bc0cb40ULL, + 0xc20c677fb1da6bbdULL, 0x7ecc59dc6efb95a2ULL, 0x9f40e161be1fa1feULL, + 0xc3e310cbeb18f308ULL, 0x2f3081e1fe4fb1ceULL, 0x160e0c10080a0206ULL, + 0x675e922e17dbcc49ULL, 0x3f66a26e37f3c451ULL, 0xcf534ee874691d27ULL, + 0x9c6c78a05044143cULL, 0x0e73b0562be8c358ULL, 0x9a34573f91f263a5ULL, + 0xed3ce69e4f95da73ULL, 0x358ed3d269345de7ULL, 0x2380dfc2613e5fe1ULL, + 0xd72ef2ae578bdc79ULL, 0x486e13cfe9947d87ULL, 0x6c59942613decd4aULL, + 0x5e601fdfe19e7f81ULL, 0x049bc1ea752f5aeeULL, 0xf3197547adc16cb4ULL, + 0x3e89d5da6d315ce4ULL, 0xefff08ebfb0cf704ULL, 0x47f2d42d98be266aULL, + 0xb7c738abdb24ff1cULL, 0x11b9543b937eed2aULL, 0x36a24a13876fe825ULL, + 0x26f4699c4ed39dbaULL, 0xee107f5fa1ce6fb1ULL, 0x8b8d0304028c8e8fULL, + 0xe34f56c8647d192bULL, 0x9447e769ba1aa0fdULL, 0xdeea1ad3e717f00dULL, + 0xba98113c1e978986ULL, 0x692d22783c330f11ULL, 0x311512381c1b0709ULL, + 0xfd6ac5118629afecULL, 0x9bdb208bcb30fb10ULL, 0x5838304020280818ULL, + 0x976b7ea85441153fULL, 0x7f232e6834390d17ULL, 0x2c1c18201014040cULL, + 0x0b07060804050103ULL, 0xab2145078de964acULL, 0xca27f8b65b84df7cULL, + 0x0d5f2997c5b3769aULL, 0x64720beff980798bULL, 0xdc29f4a6538edd7aULL, + 0xb2b38ef5f4c93d47ULL, 0x8a6274b0584e163aULL, 0xa4bd82e5fcc33f41ULL, + 0xfc85b2a5dceb3759ULL, 0xf81e734fa9c46db7ULL, 0x95a890dde0d83848ULL, + 0x7708b1a1de67b9d6ULL, 0x2a4437bfd1a27395ULL, 0x3da54c1b836ae926ULL, + 0xea8bbeb5d4e1355fULL, 0x6db6e392491c55ffULL, 0x3c4a3bafd9a87193ULL, + 0x727c07fff18a7b8dULL, 0x9d830f140a868c89ULL, 0x214331b7d5a77296ULL, + 0xb19f17341a928885ULL, 0xe4f80ee3ff09f607ULL, 0x33d6fc4da8822a7eULL, + 0xafba84edf8c63e42ULL, 0x2887d9ca653b5ee2ULL, 0x4cf5d2259cbb2769ULL, + 0xc0cf890a054346caULL, 0x74242860303c0c14ULL, 0xa026430f89ec65afULL, + 0xdf056d67bdd568b8ULL, 0x8c3a5b2f99f861a3ULL, 0x1d090a180c0f0305ULL, + 0x187dbc4623e2c15eULL, 0x7bb8ef82411657f9ULL, 0x9918cefe7fa9d667ULL, + 0xf035ec86439ad976ULL, 0x1295cdfa7d2558e8ULL, 0xfb32ea8e479fd875ULL, + 0xbd2f491785e366aaULL, 0x921fc8f67bacd764ULL, 0x83a69ccde8d23a4eULL, + 0x4b428a0e07cfc845ULL, 0xb9b488fdf0cc3c44ULL, 0x90dc2683cf35fa13ULL, + 0x63c553c462f496a7ULL, 0xa552f551a601a7f4ULL, 0x01ef77b45ac298b5ULL, + 0x1abe5233977bec29ULL, 0x7c0fb7a9da62b8d5ULL, 0x226fa8763bfcc754ULL, + 0xf66dc319822caeefULL, 0xd4026b6fb9d069bbULL, 0xbfeca762317a4bddULL, + 0xd176dd31963dabe0ULL, 0xc778d1219e37a9e6ULL, 0xb6284f1f81e667a9ULL, + 0x4e363c5028220a1eULL, 0xcbc88f02014647c9ULL, 0xc8e416c3ef1df20bULL, + 0x032c99c1ee5bb5c2ULL, 0x6beecc0d88aa2266ULL, 0x4981647bb356e532ULL, + 0x0cb05e239f71ee2fULL, 0x461da399c27cbedfULL, 0x38d1fa45ac872b7dULL, + 0xe2a0217c3ebf819eULL, 0xa67e6c90485a1236ULL, 0xf4ae2d6c36b58398ULL, + 0xf5415ad86c771b2dULL, 0x622a247038360e12ULL, 0x60e9ca058caf2365ULL, + 0xf9f104fbf306f502ULL, 0xddc68312094c45cfULL, 0x76e7c61584a52163ULL, + 0x71509e3e1fd1ce4fULL, 0xa9e2ab72397049dbULL, 0x09c4e87db09c2c74ULL, + 0x8dd52c9bc33af916ULL, 0x54886e63bf59e637ULL, 0x1e2593d9e254b6c7ULL, + 0x25d8f05da0882878ULL, 0x816572b85c4b1739ULL, 0xffa92b6432b0829bULL, + 0xfe465cd068721a2eULL, 0xac961d2c169d8b80ULL, 0xbcc03ea3df21fe1fULL, + 0xa7911b2412988a83ULL, 0x533f3648242d091bULL, 0x40458c0603cac946ULL, + 0xd8b2354c26a18794ULL, 0x98f7b94a256b4ed2ULL, 0x659d7c5ba342e13eULL, + 0x1fcae46db8962e72ULL, 0x42866273b753e431ULL, 0x6e9a7a53a747e03dULL, + 0x2bab400b8b60eb20ULL, 0x59d747f47aea90adULL, 0xb85bff49aa0ea4f1ULL, + 0xd25a44f078661e22ULL, 0xcebc395c2eab8592ULL, 0x873d5d279dfd60a0ULL, + 0x0000000000000000ULL, 0x5afbde3594b1256fULL, 0xf2f602f3f703f401ULL, + 0xd5ed1cdbe312f10eULL, 0x75cb5fd46afe94a1ULL, 0x45313a582c270b1dULL, + 0x5f8f686bbb5ce734ULL, 0x1056238fc9bc759fULL, 0x07b7582b9b74ef2cULL, + 0xe18cb8bdd0e4345cULL, 0xc697a695c4f53153ULL, 0x8f16c2ee77a3d461ULL, + 0xa30adace67b7d06dULL, 0xd3b5334422a48697ULL, 0x556719d7e59b7e82ULL, + 0xeb64c9018e23adeaULL, 0xa1c934bbd32efd1aULL, 0x2edff655a48d297bULL, + 0xcd90a09dc0f03050ULL, 0x88a19ac5ecd73b4dULL, 0x30fa658c46d99fbcULL, + 0x86d22a93c73ff815ULL, 0x2968ae7e3ff9c657ULL, 0xad796a984c5f1335ULL, + 0x3a121430181e060aULL, 0x271b1e281411050fULL, 0x3461a46633f6c552ULL, + 0xbb77668844551133ULL, 0x06582f9fc1b67799ULL, 0x436915c7ed917c84ULL, + 0x797b01f7f58f7a8eULL, 0x6f750de7fd857888ULL, 0xf782b4add8ee365aULL, + 0xc45448e0706c1c24ULL, 0x9eaf96d5e4dd394bULL, 0x1992cbf2792059ebULL, + 0xe84850c060781828ULL, 0x70bfe98a451356faULL, 0x393e8df1f645b3c8ULL, + 0x243787e9fa4ab0cdULL, 0x51fcd83d90b4246cULL, 0x7de0c01d80a02060ULL, + 0x32398bf9f240b2cbULL, 0x4fd94be472e092abULL, 0x894eed71b615a3f8ULL, + 0x137aba4e27e7c05dULL, 0xd6c1851a0d4944ccULL, 0x9133513795f762a6ULL, + 0xb070608040501030ULL, 0x082b9fc9ea5eb4c1ULL, 0xc5bb3f542aae8491ULL, + 0xe7d49722115243c5ULL, 0x44de4dec76e593a8ULL, 0x0574b65e2fedc25bULL, + 0xb4eba16a357f4adeULL, 0x5b14a981ce73bddaULL, 0x808a050c06898f8cULL, + 0x02c3ee75b4992d77ULL, 0x5013af89ca76bcd9ULL, 0x2df36f944ad69cb9ULL, + 0xc90b6177b5df6abeULL, 0xfadd9d3a1d5d40c0ULL, 0x7a5798361bd4cf4cULL, + 0x8249eb79b210a2fbULL, 0xe9a727743aba809dULL, 0x93f0bf42216e4fd1ULL, + 0xd95d42f87c631f21ULL, 0x5d4c861e0fc5ca43ULL, 0xda71db399238aae3ULL, + 0xecd3912a155742c6ULL +}; + +static const u64 T7[256] = { + 0x016ab9bb68d2d3baULL, 0xb1669ae5194dfc54ULL, 0xcd1465e293bc712fULL, + 0x511b8725b9cd9c74ULL, 0xa457a2f70251f553ULL, 0x03bed6d0b86b68d3ULL, + 0x04b5ded6bd6f6bd2ULL, 0xfe8552b36429d74dULL, 0xad4abafd0d5df050ULL, + 0x63e009cf268ae9acULL, 0x84961c09830e8a8dULL, 0x1a4d91a579c6dcbfULL, + 0x4d37a73daddd9070ULL, 0xa35caaf10755f652ULL, 0xe117a47bc852b39aULL, + 0xf98e5ab5612dd44cULL, 0xac200346658f23eaULL, 0x1184e6c4a67362d5ULL, + 0xc268cc55f166a497ULL, 0x0da8c6dcb2636ed1ULL, 0x99d085aaffcc5533ULL, + 0xaa41b2fb0859f351ULL, 0x9c0fe2c72a71ed5bULL, 0x55ae59f304a2f7a6ULL, + 0x20c1befe815f7fdeULL, 0xe5a27aad753dd848ULL, 0x7fcc29d7329ae5a8ULL, + 0xe80abc71c75eb699ULL, 0x3be696e0904b70dbULL, 0x9edb8dacfac85632ULL, + 0x2215d19551e6c4b7ULL, 0xceaab3322bd719fcULL, 0x93734b7048ab38e3ULL, + 0xfd3b8463dc42bf9eULL, 0xd052fc41ef7eae91ULL, 0xe61cac7dcd56b09bULL, + 0x947843764daf3be2ULL, 0x0661b1bd6dd6d0bbULL, 0xdaf1329b5819c341ULL, + 0x17e55779cba5b26eULL, 0x5cb341f90baef2a5ULL, 0x4b561680c00b40cbULL, + 0x0cc27f67dab1bd6bULL, 0xcc7edc59fb6ea295ULL, 0x409f61e11fbefea1ULL, + 0xe3c3cb1018eb08f3ULL, 0x302fe1814ffeceb1ULL, 0x0e16100c0a080602ULL, + 0x5e672e92db1749ccULL, 0x663f6ea2f33751c4ULL, 0x53cfe84e6974271dULL, + 0x6c9ca07844503c14ULL, 0x730e56b0e82b58c3ULL, 0x349a3f57f291a563ULL, + 0x3ced9ee6954f73daULL, 0x8e35d2d33469e75dULL, 0x8023c2df3e61e15fULL, + 0x2ed7aef28b5779dcULL, 0x6e48cf1394e9877dULL, 0x596c2694de134acdULL, + 0x605edf1f9ee1817fULL, 0x9b04eac12f75ee5aULL, 0x19f34775c1adb46cULL, + 0x893edad5316de45cULL, 0xffefeb080cfb04f7ULL, 0xf2472dd4be986a26ULL, + 0xc7b7ab3824db1cffULL, 0xb9113b547e932aedULL, 0xa236134a6f8725e8ULL, + 0xf4269c69d34eba9dULL, 0x10ee5f7fcea1b16fULL, 0x8d8b04038c028f8eULL, + 0x4fe3c8567d642b19ULL, 0x479469e71abafda0ULL, 0xeaded31a17e70df0ULL, + 0x98ba3c11971e8689ULL, 0x2d697822333c110fULL, 0x153138121b1c0907ULL, + 0x6afd11c52986ecafULL, 0xdb9b8b2030cb10fbULL, 0x3858403028201808ULL, + 0x6b97a87e41543f15ULL, 0x237f682e3934170dULL, 0x1c2c201814100c04ULL, + 0x070b080605040301ULL, 0x21ab0745e98dac64ULL, 0x27cab6f8845b7cdfULL, + 0x5f0d9729b3c59a76ULL, 0x7264ef0b80f98b79ULL, 0x29dca6f48e537addULL, + 0xb3b2f58ec9f4473dULL, 0x628ab0744e583a16ULL, 0xbda4e582c3fc413fULL, + 0x85fca5b2ebdc5937ULL, 0x1ef84f73c4a9b76dULL, 0xa895dd90d8e04838ULL, + 0x0877a1b167ded6b9ULL, 0x442abf37a2d19573ULL, 0xa53d1b4c6a8326e9ULL, + 0x8beab5bee1d45f35ULL, 0xb66d92e31c49ff55ULL, 0x4a3caf3ba8d99371ULL, + 0x7c72ff078af18d7bULL, 0x839d140f860a898cULL, 0x4321b731a7d59672ULL, + 0x9fb13417921a8588ULL, 0xf8e4e30e09ff07f6ULL, 0xd6334dfc82a87e2aULL, + 0xbaafed84c6f8423eULL, 0x8728cad93b65e25eULL, 0xf54c25d2bb9c6927ULL, + 0xcfc00a894305ca46ULL, 0x247460283c30140cULL, 0x26a00f43ec89af65ULL, + 0x05df676dd5bdb868ULL, 0x3a8c2f5bf899a361ULL, 0x091d180a0f0c0503ULL, + 0x7d1846bce2235ec1ULL, 0xb87b82ef1641f957ULL, 0x1899fecea97f67d6ULL, + 0x35f086ec9a4376d9ULL, 0x9512facd257de858ULL, 0x32fb8eea9f4775d8ULL, + 0x2fbd1749e385aa66ULL, 0x1f92f6c8ac7b64d7ULL, 0xa683cd9cd2e84e3aULL, + 0x424b0e8acf0745c8ULL, 0xb4b9fd88ccf0443cULL, 0xdc90832635cf13faULL, + 0xc563c453f462a796ULL, 0x52a551f501a6f4a7ULL, 0xef01b477c25ab598ULL, + 0xbe1a33527b9729ecULL, 0x0f7ca9b762dad5b8ULL, 0x6f2276a8fc3b54c7ULL, + 0x6df619c32c82efaeULL, 0x02d46f6bd0b9bb69ULL, 0xecbf62a77a31dd4bULL, + 0x76d131dd3d96e0abULL, 0x78c721d1379ee6a9ULL, 0x28b61f4fe681a967ULL, + 0x364e503c22281e0aULL, 0xc8cb028f4601c947ULL, 0xe4c8c3161def0bf2ULL, + 0x2c03c1995beec2b5ULL, 0xee6b0dccaa886622ULL, 0x81497b6456b332e5ULL, + 0xb00c235e719f2feeULL, 0x1d4699a37cc2dfbeULL, 0xd13845fa87ac7d2bULL, + 0xa0e27c21bf3e9e81ULL, 0x7ea6906c5a483612ULL, 0xaef46c2db5369883ULL, + 0x41f5d85a776c2d1bULL, 0x2a6270243638120eULL, 0xe96005caaf8c6523ULL, + 0xf1f9fb0406f302f5ULL, 0xc6dd12834c09cf45ULL, 0xe77615c6a5846321ULL, + 0x50713e9ed11f4fceULL, 0xe2a972ab7039db49ULL, 0xc4097de89cb0742cULL, + 0xd58d9b2c3ac316f9ULL, 0x8854636e59bf37e6ULL, 0x251ed99354e2c7b6ULL, + 0xd8255df088a07828ULL, 0x6581b8724b5c3917ULL, 0xa9ff642bb0329b82ULL, + 0x46fed05c72682e1aULL, 0x96ac2c1d9d16808bULL, 0xc0bca33e21df1ffeULL, + 0x91a7241b9812838aULL, 0x3f5348362d241b09ULL, 0x4540068cca0346c9ULL, + 0xb2d84c35a1269487ULL, 0xf7984ab96b25d24eULL, 0x9d655b7c42a33ee1ULL, + 0xca1f6de496b8722eULL, 0x8642736253b731e4ULL, 0x9a6e537a47a73de0ULL, + 0xab2b0b40608b20ebULL, 0xd759f447ea7aad90ULL, 0x5bb849ff0eaaf1a4ULL, + 0x5ad2f0446678221eULL, 0xbcce5c39ab2e9285ULL, 0x3d87275dfd9da060ULL, + 0x0000000000000000ULL, 0xfb5a35deb1946f25ULL, 0xf6f2f30203f701f4ULL, + 0xedd5db1c12e30ef1ULL, 0xcb75d45ffe6aa194ULL, 0x3145583a272c1d0bULL, + 0x8f5f6b685cbb34e7ULL, 0x56108f23bcc99f75ULL, 0xb7072b58749b2cefULL, + 0x8ce1bdb8e4d05c34ULL, 0x97c695a6f5c45331ULL, 0x168feec2a37761d4ULL, + 0x0aa3cedab7676dd0ULL, 0xb5d34433a4229786ULL, 0x6755d7199be5827eULL, + 0x64eb01c9238eeaadULL, 0xc9a1bb342ed31afdULL, 0xdf2e55f68da47b29ULL, + 0x90cd9da0f0c05030ULL, 0xa188c59ad7ec4d3bULL, 0xfa308c65d946bc9fULL, + 0xd286932a3fc715f8ULL, 0x68297eaef93f57c6ULL, 0x79ad986a5f4c3513ULL, + 0x123a30141e180a06ULL, 0x1b27281e11140f05ULL, 0x613466a4f63352c5ULL, + 0x77bb886655443311ULL, 0x58069f2fb6c19977ULL, 0x6943c71591ed847cULL, + 0x7b79f7018ff58e7aULL, 0x756fe70d85fd8878ULL, 0x82f7adb4eed85a36ULL, + 0x54c4e0486c70241cULL, 0xaf9ed596dde44b39ULL, 0x9219f2cb2079eb59ULL, + 0x48e8c05078602818ULL, 0xbf708ae91345fa56ULL, 0x3e39f18d45f6c8b3ULL, + 0x3724e9874afacdb0ULL, 0xfc513dd8b4906c24ULL, 0xe07d1dc0a0806020ULL, + 0x3932f98b40f2cbb2ULL, 0xd94fe44be072ab92ULL, 0x4e8971ed15b6f8a3ULL, + 0x7a134ebae7275dc0ULL, 0xc1d61a85490dcc44ULL, 0x33913751f795a662ULL, + 0x70b0806050403010ULL, 0x2b08c99f5eeac1b4ULL, 0xbbc5543fae2a9184ULL, + 0xd4e722975211c543ULL, 0xde44ec4de576a893ULL, 0x74055eb6ed2f5bc2ULL, + 0xebb46aa17f35de4aULL, 0x145b81a973cedabdULL, 0x8a800c0589068c8fULL, + 0xc30275ee99b4772dULL, 0x135089af76cad9bcULL, 0xf32d946fd64ab99cULL, + 0x0bc97761dfb5be6aULL, 0xddfa3a9d5d1dc040ULL, 0x577a3698d41b4ccfULL, + 0x498279eb10b2fba2ULL, 0xa7e97427ba3a9d80ULL, 0xf09342bf6e21d14fULL, + 0x5dd9f842637c211fULL, 0x4c5d1e86c50f43caULL, 0x71da39db3892e3aaULL, + 0xd3ec2a915715c642ULL +}; + +static const u64 c[KHAZAD_ROUNDS + 1] = { + 0xba542f7453d3d24dULL, 0x50ac8dbf70529a4cULL, 0xead597d133515ba6ULL, + 0xde48a899db32b7fcULL, 0xe39e919be2bb416eULL, 0xa5cb6b95a1f3b102ULL, + 0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL +}; + +static int khazad_setkey(void *ctx_arg, const u8 *in_key, + unsigned int key_len, u32 *flags) +{ + + struct khazad_ctx *ctx = ctx_arg; + int r; + const u64 *S = T7; + u64 K2, K1; + + if (key_len != 16) + { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + K2 = ((u64)in_key[ 0] << 56) ^ + ((u64)in_key[ 1] << 48) ^ + ((u64)in_key[ 2] << 40) ^ + ((u64)in_key[ 3] << 32) ^ + ((u64)in_key[ 4] << 24) ^ + ((u64)in_key[ 5] << 16) ^ + ((u64)in_key[ 6] << 8) ^ + ((u64)in_key[ 7] ); + K1 = ((u64)in_key[ 8] << 56) ^ + ((u64)in_key[ 9] << 48) ^ + ((u64)in_key[10] << 40) ^ + ((u64)in_key[11] << 32) ^ + ((u64)in_key[12] << 24) ^ + ((u64)in_key[13] << 16) ^ + ((u64)in_key[14] << 8) ^ + ((u64)in_key[15] ); + + /* setup the encrypt key */ + for (r = 0; r <= KHAZAD_ROUNDS; r++) { + ctx->E[r] = T0[(int)(K1 >> 56) ] ^ + T1[(int)(K1 >> 48) & 0xff] ^ + T2[(int)(K1 >> 40) & 0xff] ^ + T3[(int)(K1 >> 32) & 0xff] ^ + T4[(int)(K1 >> 24) & 0xff] ^ + T5[(int)(K1 >> 16) & 0xff] ^ + T6[(int)(K1 >> 8) & 0xff] ^ + T7[(int)(K1 ) & 0xff] ^ + c[r] ^ K2; + K2 = K1; + K1 = ctx->E[r]; + } + /* Setup the decrypt key */ + ctx->D[0] = ctx->E[KHAZAD_ROUNDS]; + for (r = 1; r < KHAZAD_ROUNDS; r++) { + K1 = ctx->E[KHAZAD_ROUNDS - r]; + ctx->D[r] = T0[(int)S[(int)(K1 >> 56) ] & 0xff] ^ + T1[(int)S[(int)(K1 >> 48) & 0xff] & 0xff] ^ + T2[(int)S[(int)(K1 >> 40) & 0xff] & 0xff] ^ + T3[(int)S[(int)(K1 >> 32) & 0xff] & 0xff] ^ + T4[(int)S[(int)(K1 >> 24) & 0xff] & 0xff] ^ + T5[(int)S[(int)(K1 >> 16) & 0xff] & 0xff] ^ + T6[(int)S[(int)(K1 >> 8) & 0xff] & 0xff] ^ + T7[(int)S[(int)(K1 ) & 0xff] & 0xff]; + } + ctx->D[KHAZAD_ROUNDS] = ctx->E[0]; + + return 0; + +} + +static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1], + u8 *ciphertext, const u8 *plaintext) +{ + + int r; + u64 state; + + state = ((u64)plaintext[0] << 56) ^ + ((u64)plaintext[1] << 48) ^ + ((u64)plaintext[2] << 40) ^ + ((u64)plaintext[3] << 32) ^ + ((u64)plaintext[4] << 24) ^ + ((u64)plaintext[5] << 16) ^ + ((u64)plaintext[6] << 8) ^ + ((u64)plaintext[7] ) ^ + roundKey[0]; + + for (r = 1; r < KHAZAD_ROUNDS; r++) { + state = T0[(int)(state >> 56) ] ^ + T1[(int)(state >> 48) & 0xff] ^ + T2[(int)(state >> 40) & 0xff] ^ + T3[(int)(state >> 32) & 0xff] ^ + T4[(int)(state >> 24) & 0xff] ^ + T5[(int)(state >> 16) & 0xff] ^ + T6[(int)(state >> 8) & 0xff] ^ + T7[(int)(state ) & 0xff] ^ + roundKey[r]; + } + + state = (T0[(int)(state >> 56) ] & 0xff00000000000000ULL) ^ + (T1[(int)(state >> 48) & 0xff] & 0x00ff000000000000ULL) ^ + (T2[(int)(state >> 40) & 0xff] & 0x0000ff0000000000ULL) ^ + (T3[(int)(state >> 32) & 0xff] & 0x000000ff00000000ULL) ^ + (T4[(int)(state >> 24) & 0xff] & 0x00000000ff000000ULL) ^ + (T5[(int)(state >> 16) & 0xff] & 0x0000000000ff0000ULL) ^ + (T6[(int)(state >> 8) & 0xff] & 0x000000000000ff00ULL) ^ + (T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^ + roundKey[KHAZAD_ROUNDS]; + + ciphertext[0] = (u8)(state >> 56); + ciphertext[1] = (u8)(state >> 48); + ciphertext[2] = (u8)(state >> 40); + ciphertext[3] = (u8)(state >> 32); + ciphertext[4] = (u8)(state >> 24); + ciphertext[5] = (u8)(state >> 16); + ciphertext[6] = (u8)(state >> 8); + ciphertext[7] = (u8)(state ); + +} + +static void khazad_encrypt(void *ctx_arg, u8 *dst, const u8 *src) +{ + struct khazad_ctx *ctx = ctx_arg; + khazad_crypt(ctx->E, dst, src); +} + +static void khazad_decrypt(void *ctx_arg, u8 *dst, const u8 *src) +{ + struct khazad_ctx *ctx = ctx_arg; + khazad_crypt(ctx->D, dst, src); +} + +static struct crypto_alg khazad_alg = { + .cra_name = "khazad", + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = KHAZAD_BLOCK_SIZE, + .cra_ctxsize = sizeof (struct khazad_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(khazad_alg.cra_list), + .cra_u = { .cipher = { + .cia_min_keysize = KHAZAD_KEY_SIZE, + .cia_max_keysize = KHAZAD_KEY_SIZE, + .cia_setkey = khazad_setkey, + .cia_encrypt = khazad_encrypt, + .cia_decrypt = khazad_decrypt } } +}; + +static int __init init(void) +{ + int ret = 0; + + ret = crypto_register_alg(&khazad_alg); + return ret; +} + +static void __exit fini(void) +{ + crypto_unregister_alg(&khazad_alg); +} + + +module_init(init); +module_exit(fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Khazad Cryptographic Algorithm"); diff --git a/crypto/tea.c b/crypto/tea.c new file mode 100644 index 000000000..bf943294d --- /dev/null +++ b/crypto/tea.c @@ -0,0 +1,248 @@ +/* + * Cryptographic API. + * + * TEA and Xtended TEA Algorithms + * + * The TEA and Xtended TEA algorithms were developed by David Wheeler + * and Roger Needham at the Computer Laboratory of Cambridge University. + * + * Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include + +#define TEA_KEY_SIZE 16 +#define TEA_BLOCK_SIZE 8 +#define TEA_ROUNDS 32 +#define TEA_DELTA 0x9e3779b9 + +#define XTEA_KEY_SIZE 16 +#define XTEA_BLOCK_SIZE 8 +#define XTEA_ROUNDS 32 +#define XTEA_DELTA 0x9e3779b9 + +#define u32_in(x) le32_to_cpu(*(const u32 *)(x)) +#define u32_out(to, from) (*(u32 *)(to) = cpu_to_le32(from)) + +struct tea_ctx { + u32 KEY[4]; +}; + +struct xtea_ctx { + u32 KEY[4]; +}; + +static int tea_setkey(void *ctx_arg, const u8 *in_key, + unsigned int key_len, u32 *flags) +{ + + struct tea_ctx *ctx = ctx_arg; + + if (key_len != 16) + { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + ctx->KEY[0] = u32_in (in_key); + ctx->KEY[1] = u32_in (in_key + 4); + ctx->KEY[2] = u32_in (in_key + 8); + ctx->KEY[3] = u32_in (in_key + 12); + + return 0; + +} + +static void tea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) +{ + u32 y, z, n, sum = 0; + u32 k0, k1, k2, k3; + + struct tea_ctx *ctx = ctx_arg; + + y = u32_in (src); + z = u32_in (src + 4); + + k0 = ctx->KEY[0]; + k1 = ctx->KEY[1]; + k2 = ctx->KEY[2]; + k3 = ctx->KEY[3]; + + n = TEA_ROUNDS; + + while (n-- > 0) { + sum += TEA_DELTA; + y += ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1); + z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); + } + + u32_out (dst, y); + u32_out (dst + 4, z); +} + +static void tea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) +{ + u32 y, z, n, sum; + u32 k0, k1, k2, k3; + + struct tea_ctx *ctx = ctx_arg; + + y = u32_in (src); + z = u32_in (src + 4); + + k0 = ctx->KEY[0]; + k1 = ctx->KEY[1]; + k2 = ctx->KEY[2]; + k3 = ctx->KEY[3]; + + sum = TEA_DELTA << 5; + + n = TEA_ROUNDS; + + while (n-- > 0) { + z -= ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); + y -= ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1); + sum -= TEA_DELTA; + } + + u32_out (dst, y); + u32_out (dst + 4, z); + +} + +static int xtea_setkey(void *ctx_arg, const u8 *in_key, + unsigned int key_len, u32 *flags) +{ + + struct xtea_ctx *ctx = ctx_arg; + + if (key_len != 16) + { + *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + return -EINVAL; + } + + ctx->KEY[0] = u32_in (in_key); + ctx->KEY[1] = u32_in (in_key + 4); + ctx->KEY[2] = u32_in (in_key + 8); + ctx->KEY[3] = u32_in (in_key + 12); + + return 0; + +} + +static void xtea_encrypt(void *ctx_arg, u8 *dst, const u8 *src) +{ + + u32 y, z, sum = 0; + u32 limit = XTEA_DELTA * XTEA_ROUNDS; + + struct xtea_ctx *ctx = ctx_arg; + + y = u32_in (src); + z = u32_in (src + 4); + + while (sum != limit) { + y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; + sum += TEA_DELTA; + z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; + } + + u32_out (dst, y); + u32_out (dst + 4, z); + +} + +static void xtea_decrypt(void *ctx_arg, u8 *dst, const u8 *src) +{ + + u32 y, z, sum; + struct tea_ctx *ctx = ctx_arg; + + y = u32_in (src); + z = u32_in (src + 4); + + sum = XTEA_DELTA * XTEA_ROUNDS; + + while (sum) { + z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3]; + sum -= XTEA_DELTA; + y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; + } + + u32_out (dst, y); + u32_out (dst + 4, z); + +} + +static struct crypto_alg tea_alg = { + .cra_name = "tea", + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = TEA_BLOCK_SIZE, + .cra_ctxsize = sizeof (struct tea_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(tea_alg.cra_list), + .cra_u = { .cipher = { + .cia_min_keysize = TEA_KEY_SIZE, + .cia_max_keysize = TEA_KEY_SIZE, + .cia_setkey = tea_setkey, + .cia_encrypt = tea_encrypt, + .cia_decrypt = tea_decrypt } } +}; + +static struct crypto_alg xtea_alg = { + .cra_name = "xtea", + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = XTEA_BLOCK_SIZE, + .cra_ctxsize = sizeof (struct xtea_ctx), + .cra_module = THIS_MODULE, + .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), + .cra_u = { .cipher = { + .cia_min_keysize = XTEA_KEY_SIZE, + .cia_max_keysize = XTEA_KEY_SIZE, + .cia_setkey = xtea_setkey, + .cia_encrypt = xtea_encrypt, + .cia_decrypt = xtea_decrypt } } +}; + +static int __init init(void) +{ + int ret = 0; + + ret = crypto_register_alg(&tea_alg); + if (ret < 0) + goto out; + + ret = crypto_register_alg(&xtea_alg); + if (ret < 0) { + crypto_unregister_alg(&tea_alg); + goto out; + } + +out: + return ret; +} + +static void __exit fini(void) +{ + crypto_unregister_alg(&tea_alg); + crypto_unregister_alg(&xtea_alg); +} + +MODULE_ALIAS("xtea"); + +module_init(init); +module_exit(fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TEA & XTEA Cryptographic Algorithms"); diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c new file mode 100644 index 000000000..0a0234129 --- /dev/null +++ b/drivers/block/sx8.c @@ -0,0 +1,1763 @@ +/* + * sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware + * + * Copyright 2004 Red Hat, Inc. + * + * Author/maintainer: Jeff Garzik + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Promise SATA SX8 block driver"); + +#if 0 +#define CARM_DEBUG +#define CARM_VERBOSE_DEBUG +#else +#undef CARM_DEBUG +#undef CARM_VERBOSE_DEBUG +#endif +#undef CARM_NDEBUG + +#define DRV_NAME "sx8" +#define DRV_VERSION "0.8" +#define PFX DRV_NAME ": " + +#define NEXT_RESP(idx) ((idx + 1) % RMSG_Q_LEN) + +/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */ +#define TAG_ENCODE(tag) (((tag) << 16) | 0xf) +#define TAG_DECODE(tag) (((tag) >> 16) & 0x1f) +#define TAG_VALID(tag) ((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32)) + +/* note: prints function name for you */ +#ifdef CARM_DEBUG +#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#ifdef CARM_VERBOSE_DEBUG +#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#else +#define VPRINTK(fmt, args...) +#endif /* CARM_VERBOSE_DEBUG */ +#else +#define DPRINTK(fmt, args...) +#define VPRINTK(fmt, args...) +#endif /* CARM_DEBUG */ + +#ifdef CARM_NDEBUG +#define assert(expr) +#else +#define assert(expr) \ + if(unlikely(!(expr))) { \ + printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__FUNCTION__,__LINE__); \ + } +#endif + +/* defines only for the constants which don't work well as enums */ +struct carm_host; + +enum { + /* adapter-wide limits */ + CARM_MAX_PORTS = 8, + CARM_SHM_SIZE = (4096 << 7), + CARM_MINORS_PER_MAJOR = 256 / CARM_MAX_PORTS, + CARM_MAX_WAIT_Q = CARM_MAX_PORTS + 1, + + /* command message queue limits */ + CARM_MAX_REQ = 64, /* max command msgs per host */ + CARM_MAX_Q = 1, /* one command at a time */ + CARM_MSG_LOW_WATER = (CARM_MAX_REQ / 4), /* refill mark */ + + /* S/G limits, host-wide and per-request */ + CARM_MAX_REQ_SG = 32, /* max s/g entries per request */ + CARM_SG_BOUNDARY = 0xffffUL, /* s/g segment boundary */ + CARM_MAX_HOST_SG = 600, /* max s/g entries per host */ + CARM_SG_LOW_WATER = (CARM_MAX_HOST_SG / 4), /* re-fill mark */ + + /* hardware registers */ + CARM_IHQP = 0x1c, + CARM_INT_STAT = 0x10, /* interrupt status */ + CARM_INT_MASK = 0x14, /* interrupt mask */ + CARM_HMUC = 0x18, /* host message unit control */ + RBUF_ADDR_LO = 0x20, /* response msg DMA buf low 32 bits */ + RBUF_ADDR_HI = 0x24, /* response msg DMA buf high 32 bits */ + RBUF_BYTE_SZ = 0x28, + CARM_RESP_IDX = 0x2c, + CARM_CMS0 = 0x30, /* command message size reg 0 */ + CARM_LMUC = 0x48, + CARM_HMPHA = 0x6c, + CARM_INITC = 0xb5, + + /* bits in CARM_INT_{STAT,MASK} */ + INT_RESERVED = 0xfffffff0, + INT_WATCHDOG = (1 << 3), /* watchdog timer */ + INT_Q_OVERFLOW = (1 << 2), /* cmd msg q overflow */ + INT_Q_AVAILABLE = (1 << 1), /* cmd msg q has free space */ + INT_RESPONSE = (1 << 0), /* response msg available */ + INT_ACK_MASK = INT_WATCHDOG | INT_Q_OVERFLOW, + INT_DEF_MASK = INT_RESERVED | INT_Q_OVERFLOW | + INT_RESPONSE, + + /* command messages, and related register bits */ + CARM_HAVE_RESP = 0x01, + CARM_MSG_READ = 1, + CARM_MSG_WRITE = 2, + CARM_MSG_VERIFY = 3, + CARM_MSG_GET_CAPACITY = 4, + CARM_MSG_FLUSH = 5, + CARM_MSG_IOCTL = 6, + CARM_MSG_ARRAY = 8, + CARM_MSG_MISC = 9, + CARM_CME = (1 << 2), + CARM_RME = (1 << 1), + CARM_WZBC = (1 << 0), + CARM_RMI = (1 << 0), + CARM_Q_FULL = (1 << 3), + CARM_MSG_SIZE = 288, + CARM_Q_LEN = 48, + + /* CARM_MSG_IOCTL messages */ + CARM_IOC_SCAN_CHAN = 5, /* scan channels for devices */ + CARM_IOC_GET_TCQ = 13, /* get tcq/ncq depth */ + CARM_IOC_SET_TCQ = 14, /* set tcq/ncq depth */ + + IOC_SCAN_CHAN_NODEV = 0x1f, + IOC_SCAN_CHAN_OFFSET = 0x40, + + /* CARM_MSG_ARRAY messages */ + CARM_ARRAY_INFO = 0, + + ARRAY_NO_EXIST = (1 << 31), + + /* response messages */ + RMSG_SZ = 8, /* sizeof(struct carm_response) */ + RMSG_Q_LEN = 48, /* resp. msg list length */ + RMSG_OK = 1, /* bit indicating msg was successful */ + /* length of entire resp. msg buffer */ + RBUF_LEN = RMSG_SZ * RMSG_Q_LEN, + + PDC_SHM_SIZE = (4096 << 7), /* length of entire h/w buffer */ + + /* CARM_MSG_MISC messages */ + MISC_GET_FW_VER = 2, + MISC_ALLOC_MEM = 3, + MISC_SET_TIME = 5, + + /* MISC_GET_FW_VER feature bits */ + FW_VER_4PORT = (1 << 2), /* 1=4 ports, 0=8 ports */ + FW_VER_NON_RAID = (1 << 1), /* 1=non-RAID firmware, 0=RAID */ + FW_VER_ZCR = (1 << 0), /* zero channel RAID (whatever that is) */ + + /* carm_host flags */ + FL_NON_RAID = FW_VER_NON_RAID, + FL_4PORT = FW_VER_4PORT, + FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT), + FL_DAC = (1 << 16), + FL_DYN_MAJOR = (1 << 17), +}; + +enum scatter_gather_types { + SGT_32BIT = 0, + SGT_64BIT = 1, +}; + +enum host_states { + HST_INVALID, /* invalid state; never used */ + HST_ALLOC_BUF, /* setting up master SHM area */ + HST_ERROR, /* we never leave here */ + HST_PORT_SCAN, /* start dev scan */ + HST_DEV_SCAN_START, /* start per-device probe */ + HST_DEV_SCAN, /* continue per-device probe */ + HST_DEV_ACTIVATE, /* activate devices we found */ + HST_PROBE_FINISHED, /* probe is complete */ + HST_PROBE_START, /* initiate probe */ + HST_SYNC_TIME, /* tell firmware what time it is */ + HST_GET_FW_VER, /* get firmware version, adapter port cnt */ +}; + +#ifdef CARM_DEBUG +static const char *state_name[] = { + "HST_INVALID", + "HST_ALLOC_BUF", + "HST_ERROR", + "HST_PORT_SCAN", + "HST_DEV_SCAN_START", + "HST_DEV_SCAN", + "HST_DEV_ACTIVATE", + "HST_PROBE_FINISHED", + "HST_PROBE_START", + "HST_SYNC_TIME", + "HST_GET_FW_VER", +}; +#endif + +struct carm_port { + unsigned int port_no; + unsigned int n_queued; + struct gendisk *disk; + struct carm_host *host; + + /* attached device characteristics */ + u64 capacity; + char name[41]; + u16 dev_geom_head; + u16 dev_geom_sect; + u16 dev_geom_cyl; +}; + +struct carm_request { + unsigned int tag; + int n_elem; + unsigned int msg_type; + unsigned int msg_subtype; + unsigned int msg_bucket; + struct request *rq; + struct carm_port *port; + struct scatterlist sg[CARM_MAX_REQ_SG]; +}; + +struct carm_host { + unsigned long flags; + void *mmio; + void *shm; + dma_addr_t shm_dma; + + int major; + int id; + char name[32]; + + spinlock_t lock; + struct pci_dev *pdev; + unsigned int state; + u32 fw_ver; + + request_queue_t *oob_q; + unsigned int n_oob; + + unsigned int hw_sg_used; + + unsigned int resp_idx; + + unsigned int wait_q_prod; + unsigned int wait_q_cons; + request_queue_t *wait_q[CARM_MAX_WAIT_Q]; + + unsigned int n_msgs; + u64 msg_alloc; + struct carm_request req[CARM_MAX_REQ]; + void *msg_base; + dma_addr_t msg_dma; + + int cur_scan_dev; + unsigned long dev_active; + unsigned long dev_present; + struct carm_port port[CARM_MAX_PORTS]; + + struct work_struct fsm_task; + + struct semaphore probe_sem; +}; + +struct carm_response { + u32 ret_handle; + u32 status; +} __attribute__((packed)); + +struct carm_msg_sg { + u32 start; + u32 len; +} __attribute__((packed)); + +struct carm_msg_rw { + u8 type; + u8 id; + u8 sg_count; + u8 sg_type; + u32 handle; + u32 lba; + u16 lba_count; + u16 lba_high; + struct carm_msg_sg sg[32]; +} __attribute__((packed)); + +struct carm_msg_allocbuf { + u8 type; + u8 subtype; + u8 n_sg; + u8 sg_type; + u32 handle; + u32 addr; + u32 len; + u32 evt_pool; + u32 n_evt; + u32 rbuf_pool; + u32 n_rbuf; + u32 msg_pool; + u32 n_msg; + struct carm_msg_sg sg[8]; +} __attribute__((packed)); + +struct carm_msg_ioctl { + u8 type; + u8 subtype; + u8 array_id; + u8 reserved1; + u32 handle; + u32 data_addr; + u32 reserved2; +} __attribute__((packed)); + +struct carm_msg_sync_time { + u8 type; + u8 subtype; + u16 reserved1; + u32 handle; + u32 reserved2; + u32 timestamp; +} __attribute__((packed)); + +struct carm_msg_get_fw_ver { + u8 type; + u8 subtype; + u16 reserved1; + u32 handle; + u32 data_addr; + u32 reserved2; +} __attribute__((packed)); + +struct carm_fw_ver { + u32 version; + u8 features; + u8 reserved1; + u16 reserved2; +} __attribute__((packed)); + +struct carm_array_info { + u32 size; + + u16 size_hi; + u16 stripe_size; + + u32 mode; + + u16 stripe_blk_sz; + u16 reserved1; + + u16 cyl; + u16 head; + + u16 sect; + u8 array_id; + u8 reserved2; + + char name[40]; + + u32 array_status; + + /* device list continues beyond this point? */ +} __attribute__((packed)); + +static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static void carm_remove_one (struct pci_dev *pdev); +static int carm_bdev_ioctl(struct inode *ino, struct file *fil, + unsigned int cmd, unsigned long arg); + +static struct pci_device_id carm_pci_tbl[] = { + { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, + { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, carm_pci_tbl); + +static struct pci_driver carm_driver = { + .name = DRV_NAME, + .id_table = carm_pci_tbl, + .probe = carm_init_one, + .remove = carm_remove_one, +}; + +static struct block_device_operations carm_bd_ops = { + .owner = THIS_MODULE, + .ioctl = carm_bdev_ioctl, +}; + +static unsigned int carm_host_id; +static unsigned long carm_major_alloc; + + + +static int carm_bdev_ioctl(struct inode *ino, struct file *fil, + unsigned int cmd, unsigned long arg) +{ + void __user *usermem = (void __user *) arg; + struct carm_port *port = ino->i_bdev->bd_disk->private_data; + struct hd_geometry geom; + + switch (cmd) { + case HDIO_GETGEO: + if (!usermem) + return -EINVAL; + + geom.heads = (u8) port->dev_geom_head; + geom.sectors = (u8) port->dev_geom_sect; + geom.cylinders = port->dev_geom_cyl; + geom.start = get_start_sect(ino->i_bdev); + + if (copy_to_user(usermem, &geom, sizeof(geom))) + return -EFAULT; + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE }; + +static inline int carm_lookup_bucket(u32 msg_size) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) + if (msg_size <= msg_sizes[i]) + return i; + + return -ENOENT; +} + +static void carm_init_buckets(void *mmio) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(msg_sizes); i++) + writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i)); +} + +static inline void *carm_ref_msg(struct carm_host *host, + unsigned int msg_idx) +{ + return host->msg_base + (msg_idx * CARM_MSG_SIZE); +} + +static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host, + unsigned int msg_idx) +{ + return host->msg_dma + (msg_idx * CARM_MSG_SIZE); +} + +static int carm_send_msg(struct carm_host *host, + struct carm_request *crq) +{ + void *mmio = host->mmio; + u32 msg = (u32) carm_ref_msg_dma(host, crq->tag); + u32 cm_bucket = crq->msg_bucket; + u32 tmp; + int rc = 0; + + VPRINTK("ENTER\n"); + + tmp = readl(mmio + CARM_HMUC); + if (tmp & CARM_Q_FULL) { +#if 0 + tmp = readl(mmio + CARM_INT_MASK); + tmp |= INT_Q_AVAILABLE; + writel(tmp, mmio + CARM_INT_MASK); + readl(mmio + CARM_INT_MASK); /* flush */ +#endif + DPRINTK("host msg queue full\n"); + rc = -EBUSY; + } else { + writel(msg | (cm_bucket << 1), mmio + CARM_IHQP); + readl(mmio + CARM_IHQP); /* flush */ + } + + return rc; +} + +static struct carm_request *carm_get_request(struct carm_host *host) +{ + unsigned int i; + + /* obey global hardware limit on S/G entries */ + if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG)) + return NULL; + + for (i = 0; i < CARM_MAX_Q; i++) + if ((host->msg_alloc & (1ULL << i)) == 0) { + struct carm_request *crq = &host->req[i]; + crq->port = NULL; + crq->n_elem = 0; + + host->msg_alloc |= (1ULL << i); + host->n_msgs++; + + assert(host->n_msgs <= CARM_MAX_REQ); + return crq; + } + + DPRINTK("no request available, returning NULL\n"); + return NULL; +} + +static int carm_put_request(struct carm_host *host, struct carm_request *crq) +{ + assert(crq->tag < CARM_MAX_Q); + + if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0)) + return -EINVAL; /* tried to clear a tag that was not active */ + + assert(host->hw_sg_used >= crq->n_elem); + + host->msg_alloc &= ~(1ULL << crq->tag); + host->hw_sg_used -= crq->n_elem; + host->n_msgs--; + + return 0; +} + +static struct carm_request *carm_get_special(struct carm_host *host) +{ + unsigned long flags; + struct carm_request *crq = NULL; + struct request *rq; + int tries = 5000; + + while (tries-- > 0) { + spin_lock_irqsave(&host->lock, flags); + crq = carm_get_request(host); + spin_unlock_irqrestore(&host->lock, flags); + + if (crq) + break; + msleep(10); + } + + if (!crq) + return NULL; + + rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); + if (!rq) { + spin_lock_irqsave(&host->lock, flags); + carm_put_request(host, crq); + spin_unlock_irqrestore(&host->lock, flags); + return NULL; + } + + crq->rq = rq; + return crq; +} + +static int carm_array_info (struct carm_host *host, unsigned int array_idx) +{ + struct carm_msg_ioctl *ioc; + unsigned int idx; + u32 msg_data; + dma_addr_t msg_dma; + struct carm_request *crq; + int rc; + + crq = carm_get_special(host); + if (!crq) { + rc = -ENOMEM; + goto err_out; + } + + idx = crq->tag; + + ioc = carm_ref_msg(host, idx); + msg_dma = carm_ref_msg_dma(host, idx); + msg_data = (u32) (msg_dma + sizeof(struct carm_array_info)); + + crq->msg_type = CARM_MSG_ARRAY; + crq->msg_subtype = CARM_ARRAY_INFO; + rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) + + sizeof(struct carm_array_info)); + BUG_ON(rc < 0); + crq->msg_bucket = (u32) rc; + + memset(ioc, 0, sizeof(*ioc)); + ioc->type = CARM_MSG_ARRAY; + ioc->subtype = CARM_ARRAY_INFO; + ioc->array_id = (u8) array_idx; + ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); + ioc->data_addr = cpu_to_le32(msg_data); + + spin_lock_irq(&host->lock); + assert(host->state == HST_DEV_SCAN_START || + host->state == HST_DEV_SCAN); + spin_unlock_irq(&host->lock); + + DPRINTK("blk_insert_request, tag == %u\n", idx); + blk_insert_request(host->oob_q, crq->rq, 1, crq, 0); + + return 0; + +err_out: + spin_lock_irq(&host->lock); + host->state = HST_ERROR; + spin_unlock_irq(&host->lock); + return rc; +} + +typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *); + +static int carm_send_special (struct carm_host *host, carm_sspc_t func) +{ + struct carm_request *crq; + struct carm_msg_ioctl *ioc; + void *mem; + unsigned int idx, msg_size; + int rc; + + crq = carm_get_special(host); + if (!crq) + return -ENOMEM; + + idx = crq->tag; + + mem = carm_ref_msg(host, idx); + + msg_size = func(host, idx, mem); + + ioc = mem; + crq->msg_type = ioc->type; + crq->msg_subtype = ioc->subtype; + rc = carm_lookup_bucket(msg_size); + BUG_ON(rc < 0); + crq->msg_bucket = (u32) rc; + + DPRINTK("blk_insert_request, tag == %u\n", idx); + blk_insert_request(host->oob_q, crq->rq, 1, crq, 0); + + return 0; +} + +static unsigned int carm_fill_sync_time(struct carm_host *host, + unsigned int idx, void *mem) +{ + struct timeval tv; + struct carm_msg_sync_time *st = mem; + + do_gettimeofday(&tv); + + memset(st, 0, sizeof(*st)); + st->type = CARM_MSG_MISC; + st->subtype = MISC_SET_TIME; + st->handle = cpu_to_le32(TAG_ENCODE(idx)); + st->timestamp = cpu_to_le32(tv.tv_sec); + + return sizeof(struct carm_msg_sync_time); +} + +static unsigned int carm_fill_alloc_buf(struct carm_host *host, + unsigned int idx, void *mem) +{ + struct carm_msg_allocbuf *ab = mem; + + memset(ab, 0, sizeof(*ab)); + ab->type = CARM_MSG_MISC; + ab->subtype = MISC_ALLOC_MEM; + ab->handle = cpu_to_le32(TAG_ENCODE(idx)); + ab->n_sg = 1; + ab->sg_type = SGT_32BIT; + ab->addr = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); + ab->len = cpu_to_le32(PDC_SHM_SIZE >> 1); + ab->evt_pool = cpu_to_le32(host->shm_dma + (16 * 1024)); + ab->n_evt = cpu_to_le32(1024); + ab->rbuf_pool = cpu_to_le32(host->shm_dma); + ab->n_rbuf = cpu_to_le32(RMSG_Q_LEN); + ab->msg_pool = cpu_to_le32(host->shm_dma + RBUF_LEN); + ab->n_msg = cpu_to_le32(CARM_Q_LEN); + ab->sg[0].start = cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1)); + ab->sg[0].len = cpu_to_le32(65536); + + return sizeof(struct carm_msg_allocbuf); +} + +static unsigned int carm_fill_scan_channels(struct carm_host *host, + unsigned int idx, void *mem) +{ + struct carm_msg_ioctl *ioc = mem; + u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + + IOC_SCAN_CHAN_OFFSET); + + memset(ioc, 0, sizeof(*ioc)); + ioc->type = CARM_MSG_IOCTL; + ioc->subtype = CARM_IOC_SCAN_CHAN; + ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); + ioc->data_addr = cpu_to_le32(msg_data); + + /* fill output data area with "no device" default values */ + mem += IOC_SCAN_CHAN_OFFSET; + memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS); + + return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS; +} + +static unsigned int carm_fill_get_fw_ver(struct carm_host *host, + unsigned int idx, void *mem) +{ + struct carm_msg_get_fw_ver *ioc = mem; + u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc)); + + memset(ioc, 0, sizeof(*ioc)); + ioc->type = CARM_MSG_MISC; + ioc->subtype = MISC_GET_FW_VER; + ioc->handle = cpu_to_le32(TAG_ENCODE(idx)); + ioc->data_addr = cpu_to_le32(msg_data); + + return sizeof(struct carm_msg_get_fw_ver) + + sizeof(struct carm_fw_ver); +} + +static inline void carm_end_request_queued(struct carm_host *host, + struct carm_request *crq, + int uptodate) +{ + struct request *req = crq->rq; + int rc; + + rc = end_that_request_first(req, uptodate, req->hard_nr_sectors); + assert(rc == 0); + + end_that_request_last(req); + + rc = carm_put_request(host, crq); + assert(rc == 0); +} + +static inline void carm_push_q (struct carm_host *host, request_queue_t *q) +{ + unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q; + + blk_stop_queue(q); + VPRINTK("STOPPED QUEUE %p\n", q); + + host->wait_q[idx] = q; + host->wait_q_prod++; + BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */ +} + +static inline request_queue_t *carm_pop_q(struct carm_host *host) +{ + unsigned int idx; + + if (host->wait_q_prod == host->wait_q_cons) + return NULL; + + idx = host->wait_q_cons % CARM_MAX_WAIT_Q; + host->wait_q_cons++; + + return host->wait_q[idx]; +} + +static inline void carm_round_robin(struct carm_host *host) +{ + request_queue_t *q = carm_pop_q(host); + if (q) { + blk_start_queue(q); + VPRINTK("STARTED QUEUE %p\n", q); + } +} + +static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq, + int is_ok) +{ + carm_end_request_queued(host, crq, is_ok); + if (CARM_MAX_Q == 1) + carm_round_robin(host); + else if ((host->n_msgs <= CARM_MSG_LOW_WATER) && + (host->hw_sg_used <= CARM_SG_LOW_WATER)) { + carm_round_robin(host); + } +} + +static void carm_oob_rq_fn(request_queue_t *q) +{ + struct carm_host *host = q->queuedata; + struct carm_request *crq; + struct request *rq; + int rc; + + while (1) { + DPRINTK("get req\n"); + rq = elv_next_request(q); + if (!rq) + break; + + blkdev_dequeue_request(rq); + + crq = rq->special; + assert(crq != NULL); + assert(crq->rq == rq); + + crq->n_elem = 0; + + DPRINTK("send req\n"); + rc = carm_send_msg(host, crq); + if (rc) { + blk_requeue_request(q, rq); + carm_push_q(host, q); + return; /* call us again later, eventually */ + } + } +} + +static void carm_rq_fn(request_queue_t *q) +{ + struct carm_port *port = q->queuedata; + struct carm_host *host = port->host; + struct carm_msg_rw *msg; + struct carm_request *crq; + struct request *rq; + struct scatterlist *sg; + int writing = 0, pci_dir, i, n_elem, rc; + u32 tmp; + unsigned int msg_size; + +queue_one_request: + VPRINTK("get req\n"); + rq = elv_next_request(q); + if (!rq) + return; + + crq = carm_get_request(host); + if (!crq) { + carm_push_q(host, q); + return; /* call us again later, eventually */ + } + crq->rq = rq; + + blkdev_dequeue_request(rq); + + if (rq_data_dir(rq) == WRITE) { + writing = 1; + pci_dir = PCI_DMA_TODEVICE; + } else { + pci_dir = PCI_DMA_FROMDEVICE; + } + + /* get scatterlist from block layer */ + sg = &crq->sg[0]; + n_elem = blk_rq_map_sg(q, rq, sg); + if (n_elem <= 0) { + carm_end_rq(host, crq, 0); + return; /* request with no s/g entries? */ + } + + /* map scatterlist to PCI bus addresses */ + n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir); + if (n_elem <= 0) { + carm_end_rq(host, crq, 0); + return; /* request with no s/g entries? */ + } + crq->n_elem = n_elem; + crq->port = port; + host->hw_sg_used += n_elem; + + /* + * build read/write message + */ + + VPRINTK("build msg\n"); + msg = (struct carm_msg_rw *) carm_ref_msg(host, crq->tag); + + if (writing) { + msg->type = CARM_MSG_WRITE; + crq->msg_type = CARM_MSG_WRITE; + } else { + msg->type = CARM_MSG_READ; + crq->msg_type = CARM_MSG_READ; + } + + msg->id = port->port_no; + msg->sg_count = n_elem; + msg->sg_type = SGT_32BIT; + msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); + msg->lba = cpu_to_le32(rq->sector & 0xffffffff); + tmp = (rq->sector >> 16) >> 16; + msg->lba_high = cpu_to_le16( (u16) tmp ); + msg->lba_count = cpu_to_le16(rq->nr_sectors); + + msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); + for (i = 0; i < n_elem; i++) { + struct carm_msg_sg *carm_sg = &msg->sg[i]; + carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i])); + carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i])); + msg_size += sizeof(struct carm_msg_sg); + } + + rc = carm_lookup_bucket(msg_size); + BUG_ON(rc < 0); + crq->msg_bucket = (u32) rc; + + /* + * queue read/write message to hardware + */ + + VPRINTK("send msg, tag == %u\n", crq->tag); + rc = carm_send_msg(host, crq); + if (rc) { + carm_put_request(host, crq); + blk_requeue_request(q, rq); + carm_push_q(host, q); + return; /* call us again later, eventually */ + } + + goto queue_one_request; +} + +static void carm_handle_array_info(struct carm_host *host, + struct carm_request *crq, u8 *mem, + int is_ok) +{ + struct carm_port *port; + u8 *msg_data = mem + sizeof(struct carm_array_info); + struct carm_array_info *desc = (struct carm_array_info *) msg_data; + u64 lo, hi; + int cur_port; + size_t slen; + + DPRINTK("ENTER\n"); + + carm_end_rq(host, crq, is_ok); + + if (!is_ok) + goto out; + if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST) + goto out; + + cur_port = host->cur_scan_dev; + + /* should never occur */ + if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) { + printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n", + cur_port, (int) desc->array_id); + goto out; + } + + port = &host->port[cur_port]; + + lo = (u64) le32_to_cpu(desc->size); + hi = (u64) le32_to_cpu(desc->size_hi); + + port->capacity = lo | (hi << 32); + port->dev_geom_head = le16_to_cpu(desc->head); + port->dev_geom_sect = le16_to_cpu(desc->sect); + port->dev_geom_cyl = le16_to_cpu(desc->cyl); + + host->dev_active |= (1 << cur_port); + + strncpy(port->name, desc->name, sizeof(port->name)); + port->name[sizeof(port->name) - 1] = 0; + slen = strlen(port->name); + while (slen && (port->name[slen - 1] == ' ')) { + port->name[slen - 1] = 0; + slen--; + } + + printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n", + pci_name(host->pdev), port->port_no, + (unsigned long long) port->capacity); + printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n", + pci_name(host->pdev), port->port_no, port->name); + +out: + assert(host->state == HST_DEV_SCAN); + schedule_work(&host->fsm_task); +} + +static void carm_handle_scan_chan(struct carm_host *host, + struct carm_request *crq, u8 *mem, + int is_ok) +{ + u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET; + unsigned int i, dev_count = 0; + int new_state = HST_DEV_SCAN_START; + + DPRINTK("ENTER\n"); + + carm_end_rq(host, crq, is_ok); + + if (!is_ok) { + new_state = HST_ERROR; + goto out; + } + + /* TODO: scan and support non-disk devices */ + for (i = 0; i < 8; i++) + if (msg_data[i] == 0) { /* direct-access device (disk) */ + host->dev_present |= (1 << i); + dev_count++; + } + + printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n", + pci_name(host->pdev), dev_count); + +out: + assert(host->state == HST_PORT_SCAN); + host->state = new_state; + schedule_work(&host->fsm_task); +} + +static void carm_handle_generic(struct carm_host *host, + struct carm_request *crq, int is_ok, + int cur_state, int next_state) +{ + DPRINTK("ENTER\n"); + + carm_end_rq(host, crq, is_ok); + + assert(host->state == cur_state); + if (is_ok) + host->state = next_state; + else + host->state = HST_ERROR; + schedule_work(&host->fsm_task); +} + +static inline void carm_handle_rw(struct carm_host *host, + struct carm_request *crq, int is_ok) +{ + int pci_dir; + + VPRINTK("ENTER\n"); + + if (rq_data_dir(crq->rq) == WRITE) + pci_dir = PCI_DMA_TODEVICE; + else + pci_dir = PCI_DMA_FROMDEVICE; + + pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir); + + carm_end_rq(host, crq, is_ok); +} + +static inline void carm_handle_resp(struct carm_host *host, + u32 ret_handle_le, u32 status) +{ + u32 handle = le32_to_cpu(ret_handle_le); + unsigned int msg_idx; + struct carm_request *crq; + int is_ok = (status == RMSG_OK); + u8 *mem; + + VPRINTK("ENTER, handle == 0x%x\n", handle); + + if (unlikely(!TAG_VALID(handle))) { + printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n", + pci_name(host->pdev), handle); + return; + } + + msg_idx = TAG_DECODE(handle); + VPRINTK("tag == %u\n", msg_idx); + + crq = &host->req[msg_idx]; + + /* fast path */ + if (likely(crq->msg_type == CARM_MSG_READ || + crq->msg_type == CARM_MSG_WRITE)) { + carm_handle_rw(host, crq, is_ok); + return; + } + + mem = carm_ref_msg(host, msg_idx); + + switch (crq->msg_type) { + case CARM_MSG_IOCTL: { + switch (crq->msg_subtype) { + case CARM_IOC_SCAN_CHAN: + carm_handle_scan_chan(host, crq, mem, is_ok); + break; + default: + /* unknown / invalid response */ + goto err_out; + } + break; + } + + case CARM_MSG_MISC: { + switch (crq->msg_subtype) { + case MISC_ALLOC_MEM: + carm_handle_generic(host, crq, is_ok, + HST_ALLOC_BUF, HST_SYNC_TIME); + break; + case MISC_SET_TIME: + carm_handle_generic(host, crq, is_ok, + HST_SYNC_TIME, HST_GET_FW_VER); + break; + case MISC_GET_FW_VER: { + struct carm_fw_ver *ver = (struct carm_fw_ver *) + mem + sizeof(struct carm_msg_get_fw_ver); + if (is_ok) { + host->fw_ver = le32_to_cpu(ver->version); + host->flags |= (ver->features & FL_FW_VER_MASK); + } + carm_handle_generic(host, crq, is_ok, + HST_GET_FW_VER, HST_PORT_SCAN); + break; + } + default: + /* unknown / invalid response */ + goto err_out; + } + break; + } + + case CARM_MSG_ARRAY: { + switch (crq->msg_subtype) { + case CARM_ARRAY_INFO: + carm_handle_array_info(host, crq, mem, is_ok); + break; + default: + /* unknown / invalid response */ + goto err_out; + } + break; + } + + default: + /* unknown / invalid response */ + goto err_out; + } + + return; + +err_out: + printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n", + pci_name(host->pdev), crq->msg_type, crq->msg_subtype); + carm_end_rq(host, crq, 0); +} + +static inline void carm_handle_responses(struct carm_host *host) +{ + void *mmio = host->mmio; + struct carm_response *resp = (struct carm_response *) host->shm; + unsigned int work = 0; + unsigned int idx = host->resp_idx % RMSG_Q_LEN; + + while (1) { + u32 status = le32_to_cpu(resp[idx].status); + + if (status == 0xffffffff) { + VPRINTK("ending response on index %u\n", idx); + writel(idx << 3, mmio + CARM_RESP_IDX); + break; + } + + /* response to a message we sent */ + else if ((status & (1 << 31)) == 0) { + VPRINTK("handling msg response on index %u\n", idx); + carm_handle_resp(host, resp[idx].ret_handle, status); + resp[idx].status = 0xffffffff; + } + + /* asynchronous events the hardware throws our way */ + else if ((status & 0xff000000) == (1 << 31)) { + u8 *evt_type_ptr = (u8 *) &resp[idx]; + u8 evt_type = *evt_type_ptr; + printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n", + pci_name(host->pdev), (int) evt_type); + resp[idx].status = 0xffffffff; + } + + idx = NEXT_RESP(idx); + work++; + } + + VPRINTK("EXIT, work==%u\n", work); + host->resp_idx += work; +} + +static irqreturn_t carm_interrupt(int irq, void *__host, struct pt_regs *regs) +{ + struct carm_host *host = __host; + void *mmio; + u32 mask; + int handled = 0; + unsigned long flags; + + if (!host) { + VPRINTK("no host\n"); + return IRQ_NONE; + } + + spin_lock_irqsave(&host->lock, flags); + + mmio = host->mmio; + + /* reading should also clear interrupts */ + mask = readl(mmio + CARM_INT_STAT); + + if (mask == 0 || mask == 0xffffffff) { + VPRINTK("no work, mask == 0x%x\n", mask); + goto out; + } + + if (mask & INT_ACK_MASK) + writel(mask, mmio + CARM_INT_STAT); + + if (unlikely(host->state == HST_INVALID)) { + VPRINTK("not initialized yet, mask = 0x%x\n", mask); + goto out; + } + + if (mask & CARM_HAVE_RESP) { + handled = 1; + carm_handle_responses(host); + } + +out: + spin_unlock_irqrestore(&host->lock, flags); + VPRINTK("EXIT\n"); + return IRQ_RETVAL(handled); +} + +static void carm_fsm_task (void *_data) +{ + struct carm_host *host = _data; + unsigned long flags; + unsigned int state; + int rc, i, next_dev; + int reschedule = 0; + int new_state = HST_INVALID; + + spin_lock_irqsave(&host->lock, flags); + state = host->state; + spin_unlock_irqrestore(&host->lock, flags); + + DPRINTK("ENTER, state == %s\n", state_name[state]); + + switch (state) { + case HST_PROBE_START: + new_state = HST_ALLOC_BUF; + reschedule = 1; + break; + + case HST_ALLOC_BUF: + rc = carm_send_special(host, carm_fill_alloc_buf); + if (rc) { + new_state = HST_ERROR; + reschedule = 1; + } + break; + + case HST_SYNC_TIME: + rc = carm_send_special(host, carm_fill_sync_time); + if (rc) { + new_state = HST_ERROR; + reschedule = 1; + } + break; + + case HST_GET_FW_VER: + rc = carm_send_special(host, carm_fill_get_fw_ver); + if (rc) { + new_state = HST_ERROR; + reschedule = 1; + } + break; + + case HST_PORT_SCAN: + rc = carm_send_special(host, carm_fill_scan_channels); + if (rc) { + new_state = HST_ERROR; + reschedule = 1; + } + break; + + case HST_DEV_SCAN_START: + host->cur_scan_dev = -1; + new_state = HST_DEV_SCAN; + reschedule = 1; + break; + + case HST_DEV_SCAN: + next_dev = -1; + for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++) + if (host->dev_present & (1 << i)) { + next_dev = i; + break; + } + + if (next_dev >= 0) { + host->cur_scan_dev = next_dev; + rc = carm_array_info(host, next_dev); + if (rc) { + new_state = HST_ERROR; + reschedule = 1; + } + } else { + new_state = HST_DEV_ACTIVATE; + reschedule = 1; + } + break; + + case HST_DEV_ACTIVATE: { + int activated = 0; + for (i = 0; i < CARM_MAX_PORTS; i++) + if (host->dev_active & (1 << i)) { + struct carm_port *port = &host->port[i]; + struct gendisk *disk = port->disk; + + set_capacity(disk, port->capacity); + add_disk(disk); + activated++; + } + + printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n", + pci_name(host->pdev), activated); + + new_state = HST_PROBE_FINISHED; + reschedule = 1; + break; + } + + case HST_PROBE_FINISHED: + up(&host->probe_sem); + break; + + case HST_ERROR: + /* FIXME: TODO */ + break; + + default: + /* should never occur */ + printk(KERN_ERR PFX "BUG: unknown state %d\n", state); + assert(0); + break; + } + + if (new_state != HST_INVALID) { + spin_lock_irqsave(&host->lock, flags); + host->state = new_state; + spin_unlock_irqrestore(&host->lock, flags); + } + if (reschedule) + schedule_work(&host->fsm_task); +} + +static int carm_init_wait(void *mmio, u32 bits, unsigned int test_bit) +{ + unsigned int i; + + for (i = 0; i < 50000; i++) { + u32 tmp = readl(mmio + CARM_LMUC); + udelay(100); + + if (test_bit) { + if ((tmp & bits) == bits) + return 0; + } else { + if ((tmp & bits) == 0) + return 0; + } + + cond_resched(); + } + + printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n", + bits, test_bit ? "yes" : "no"); + return -EBUSY; +} + +static void carm_init_responses(struct carm_host *host) +{ + void *mmio = host->mmio; + unsigned int i; + struct carm_response *resp = (struct carm_response *) host->shm; + + for (i = 0; i < RMSG_Q_LEN; i++) + resp[i].status = 0xffffffff; + + writel(0, mmio + CARM_RESP_IDX); +} + +static int carm_init_host(struct carm_host *host) +{ + void *mmio = host->mmio; + u32 tmp; + u8 tmp8; + int rc; + + DPRINTK("ENTER\n"); + + writel(0, mmio + CARM_INT_MASK); + + tmp8 = readb(mmio + CARM_INITC); + if (tmp8 & 0x01) { + tmp8 &= ~0x01; + writeb(tmp8, CARM_INITC); + readb(mmio + CARM_INITC); /* flush */ + + DPRINTK("snooze...\n"); + msleep(5000); + } + + tmp = readl(mmio + CARM_HMUC); + if (tmp & CARM_CME) { + DPRINTK("CME bit present, waiting\n"); + rc = carm_init_wait(mmio, CARM_CME, 1); + if (rc) { + DPRINTK("EXIT, carm_init_wait 1 failed\n"); + return rc; + } + } + if (tmp & CARM_RME) { + DPRINTK("RME bit present, waiting\n"); + rc = carm_init_wait(mmio, CARM_RME, 1); + if (rc) { + DPRINTK("EXIT, carm_init_wait 2 failed\n"); + return rc; + } + } + + tmp &= ~(CARM_RME | CARM_CME); + writel(tmp, mmio + CARM_HMUC); + readl(mmio + CARM_HMUC); /* flush */ + + rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0); + if (rc) { + DPRINTK("EXIT, carm_init_wait 3 failed\n"); + return rc; + } + + carm_init_buckets(mmio); + + writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO); + writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI); + writel(RBUF_LEN, mmio + RBUF_BYTE_SZ); + + tmp = readl(mmio + CARM_HMUC); + tmp |= (CARM_RME | CARM_CME | CARM_WZBC); + writel(tmp, mmio + CARM_HMUC); + readl(mmio + CARM_HMUC); /* flush */ + + rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1); + if (rc) { + DPRINTK("EXIT, carm_init_wait 4 failed\n"); + return rc; + } + + writel(0, mmio + CARM_HMPHA); + writel(INT_DEF_MASK, mmio + CARM_INT_MASK); + + carm_init_responses(host); + + /* start initialization, probing state machine */ + spin_lock_irq(&host->lock); + assert(host->state == HST_INVALID); + host->state = HST_PROBE_START; + spin_unlock_irq(&host->lock); + schedule_work(&host->fsm_task); + + DPRINTK("EXIT\n"); + return 0; +} + +static int carm_init_disks(struct carm_host *host) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < CARM_MAX_PORTS; i++) { + struct gendisk *disk; + request_queue_t *q; + struct carm_port *port; + + port = &host->port[i]; + port->host = host; + port->port_no = i; + + disk = alloc_disk(CARM_MINORS_PER_MAJOR); + if (!disk) { + rc = -ENOMEM; + break; + } + + port->disk = disk; + sprintf(disk->disk_name, DRV_NAME "%u_%u", host->id, i); + sprintf(disk->devfs_name, DRV_NAME "/%u_%u", host->id, i); + disk->major = host->major; + disk->first_minor = i * CARM_MINORS_PER_MAJOR; + disk->fops = &carm_bd_ops; + disk->private_data = port; + + q = blk_init_queue(carm_rq_fn, &host->lock); + if (!q) { + rc = -ENOMEM; + break; + } + disk->queue = q; + blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG); + blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG); + blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); + + q->queuedata = port; + } + + return rc; +} + +static void carm_free_disks(struct carm_host *host) +{ + unsigned int i; + + for (i = 0; i < CARM_MAX_PORTS; i++) { + struct gendisk *disk = host->port[i].disk; + if (disk) { + request_queue_t *q = disk->queue; + + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + if (q) + blk_cleanup_queue(q); + put_disk(disk); + } + } +} + +static int carm_init_shm(struct carm_host *host) +{ + host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE, + &host->shm_dma); + if (!host->shm) + return -ENOMEM; + + host->msg_base = host->shm + RBUF_LEN; + host->msg_dma = host->shm_dma + RBUF_LEN; + + memset(host->shm, 0xff, RBUF_LEN); + memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN); + + return 0; +} + +static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static unsigned int printed_version; + struct carm_host *host; + unsigned int pci_dac; + int rc; + request_queue_t *q; + unsigned int i; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + +#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ + rc = pci_set_dma_mask(pdev, 0xffffffffffffffffULL); + if (!rc) { + rc = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL); + if (rc) { + printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n", + pci_name(pdev)); + goto err_out_regions; + } + pci_dac = 1; + } else { +#endif + rc = pci_set_dma_mask(pdev, 0xffffffffULL); + if (rc) { + printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n", + pci_name(pdev)); + goto err_out_regions; + } + pci_dac = 0; +#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */ + } +#endif + + host = kmalloc(sizeof(*host), GFP_KERNEL); + if (!host) { + printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n", + pci_name(pdev)); + rc = -ENOMEM; + goto err_out_regions; + } + + memset(host, 0, sizeof(*host)); + host->pdev = pdev; + host->flags = pci_dac ? FL_DAC : 0; + spin_lock_init(&host->lock); + INIT_WORK(&host->fsm_task, carm_fsm_task, host); + init_MUTEX_LOCKED(&host->probe_sem); + + for (i = 0; i < ARRAY_SIZE(host->req); i++) + host->req[i].tag = i; + + host->mmio = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!host->mmio) { + printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n", + pci_name(pdev)); + rc = -ENOMEM; + goto err_out_kfree; + } + + rc = carm_init_shm(host); + if (rc) { + printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n", + pci_name(pdev)); + goto err_out_iounmap; + } + + q = blk_init_queue(carm_oob_rq_fn, &host->lock); + if (!q) { + printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n", + pci_name(pdev)); + rc = -ENOMEM; + goto err_out_pci_free; + } + host->oob_q = q; + q->queuedata = host; + + /* + * Figure out which major to use: 160, 161, or dynamic + */ + if (!test_and_set_bit(0, &carm_major_alloc)) + host->major = 160; + else if (!test_and_set_bit(1, &carm_major_alloc)) + host->major = 161; + else + host->flags |= FL_DYN_MAJOR; + + host->id = carm_host_id; + sprintf(host->name, DRV_NAME "%d", carm_host_id); + + rc = register_blkdev(host->major, host->name); + if (rc < 0) + goto err_out_free_majors; + if (host->flags & FL_DYN_MAJOR) + host->major = rc; + + devfs_mk_dir(DRV_NAME); + + rc = carm_init_disks(host); + if (rc) + goto err_out_blkdev_disks; + + pci_set_master(pdev); + + rc = request_irq(pdev->irq, carm_interrupt, SA_SHIRQ, DRV_NAME, host); + if (rc) { + printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n", + pci_name(pdev)); + goto err_out_blkdev_disks; + } + + rc = carm_init_host(host); + if (rc) + goto err_out_free_irq; + + DPRINTK("waiting for probe_sem\n"); + down(&host->probe_sem); + + printk(KERN_INFO "%s: pci %s, ports %d, io %lx, irq %u, major %d\n", + host->name, pci_name(pdev), (int) CARM_MAX_PORTS, + pci_resource_start(pdev, 0), pdev->irq, host->major); + + carm_host_id++; + pci_set_drvdata(pdev, host); + return 0; + +err_out_free_irq: + free_irq(pdev->irq, host); +err_out_blkdev_disks: + carm_free_disks(host); + unregister_blkdev(host->major, host->name); +err_out_free_majors: + if (host->major == 160) + clear_bit(0, &carm_major_alloc); + else if (host->major == 161) + clear_bit(1, &carm_major_alloc); + blk_cleanup_queue(host->oob_q); +err_out_pci_free: + pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma); +err_out_iounmap: + iounmap(host->mmio); +err_out_kfree: + kfree(host); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + +static void carm_remove_one (struct pci_dev *pdev) +{ + struct carm_host *host = pci_get_drvdata(pdev); + + if (!host) { + printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n", + pci_name(pdev)); + return; + } + + free_irq(pdev->irq, host); + carm_free_disks(host); + devfs_remove(DRV_NAME); + unregister_blkdev(host->major, host->name); + if (host->major == 160) + clear_bit(0, &carm_major_alloc); + else if (host->major == 161) + clear_bit(1, &carm_major_alloc); + blk_cleanup_queue(host->oob_q); + pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma); + iounmap(host->mmio); + kfree(host); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +static int __init carm_init(void) +{ + return pci_module_init(&carm_driver); +} + +static void __exit carm_exit(void) +{ + pci_unregister_driver(&carm_driver); +} + +module_init(carm_init); +module_exit(carm_exit); + + diff --git a/drivers/char/drm/drm_irq.h b/drivers/char/drm/drm_irq.h new file mode 100644 index 000000000..1d1d95116 --- /dev/null +++ b/drivers/char/drm/drm_irq.h @@ -0,0 +1,371 @@ +/** + * \file drm_irq.h + * IRQ support + * + * \author Rickard E. (Rik) Faith + * \author Gareth Hughes + */ + +/* + * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com + * + * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +#include /* For task queue support */ + +#ifndef __HAVE_SHARED_IRQ +#define __HAVE_SHARED_IRQ 0 +#endif + +#if __HAVE_SHARED_IRQ +#define DRM_IRQ_TYPE SA_SHIRQ +#else +#define DRM_IRQ_TYPE 0 +#endif + +/** + * Get interrupt from bus id. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_irq_busid structure. + * \return zero on success or a negative number on failure. + * + * Finds the PCI device with the specified bus id and gets its IRQ number. + * This IOCTL is deprecated, and will now return EINVAL for any busid not equal + * to that of the device that this DRM instance attached to. + */ +int DRM(irq_by_busid)(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_irq_busid_t p; + + if (copy_from_user(&p, (drm_irq_busid_t *)arg, sizeof(p))) + return -EFAULT; + + if ((p.busnum >> 8) != dev->pci_domain || + (p.busnum & 0xff) != dev->pci_bus || + p.devnum != dev->pci_slot || + p.funcnum != dev->pci_func) + return -EINVAL; + + p.irq = dev->irq; + + DRM_DEBUG("%d:%d:%d => IRQ %d\n", + p.busnum, p.devnum, p.funcnum, p.irq); + if (copy_to_user((drm_irq_busid_t *)arg, &p, sizeof(p))) + return -EFAULT; + return 0; +} + +#if __HAVE_IRQ + +/** + * Install IRQ handler. + * + * \param dev DRM device. + * \param irq IRQ number. + * + * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver + * \c DRM(driver_irq_preinstall)() and \c DRM(driver_irq_postinstall)() functions + * before and after the installation. + */ +int DRM(irq_install)( drm_device_t *dev ) +{ + int ret; + + if ( dev->irq == 0 ) + return -EINVAL; + + down( &dev->struct_sem ); + + /* Driver must have been initialized */ + if ( !dev->dev_private ) { + up( &dev->struct_sem ); + return -EINVAL; + } + + if ( dev->irq_enabled ) { + up( &dev->struct_sem ); + return -EBUSY; + } + dev->irq_enabled = 1; + up( &dev->struct_sem ); + + DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq ); + +#if __HAVE_DMA + dev->dma->next_buffer = NULL; + dev->dma->next_queue = NULL; + dev->dma->this_buffer = NULL; +#endif + +#if __HAVE_IRQ_BH + INIT_WORK(&dev->work, DRM(irq_immediate_bh), dev); +#endif + +#if __HAVE_VBL_IRQ + init_waitqueue_head(&dev->vbl_queue); + + spin_lock_init( &dev->vbl_lock ); + + INIT_LIST_HEAD( &dev->vbl_sigs.head ); + + dev->vbl_pending = 0; +#endif + + /* Before installing handler */ + DRM(driver_irq_preinstall)(dev); + + /* Install handler */ + ret = request_irq( dev->irq, DRM(irq_handler), + DRM_IRQ_TYPE, dev->devname, dev ); + if ( ret < 0 ) { + down( &dev->struct_sem ); + dev->irq_enabled = 0; + up( &dev->struct_sem ); + return ret; + } + + /* After installing handler */ + DRM(driver_irq_postinstall)(dev); + + return 0; +} + +/** + * Uninstall the IRQ handler. + * + * \param dev DRM device. + * + * Calls the driver's \c DRM(driver_irq_uninstall)() function, and stops the irq. + */ +int DRM(irq_uninstall)( drm_device_t *dev ) +{ + int irq_enabled; + + down( &dev->struct_sem ); + irq_enabled = dev->irq_enabled; + dev->irq_enabled = 0; + up( &dev->struct_sem ); + + if ( !irq_enabled ) + return -EINVAL; + + DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq ); + + DRM(driver_irq_uninstall)( dev ); + + free_irq( dev->irq, dev ); + + return 0; +} + +/** + * IRQ control ioctl. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param arg user argument, pointing to a drm_control structure. + * \return zero on success or a negative number on failure. + * + * Calls irq_install() or irq_uninstall() according to \p arg. + */ +int DRM(control)( struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_control_t ctl; + + if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) ) + return -EFAULT; + + switch ( ctl.func ) { + case DRM_INST_HANDLER: + if (dev->if_version < DRM_IF_VERSION(1, 2) && + ctl.irq != dev->irq) + return -EINVAL; + return DRM(irq_install)( dev ); + case DRM_UNINST_HANDLER: + return DRM(irq_uninstall)( dev ); + default: + return -EINVAL; + } +} + +#if __HAVE_VBL_IRQ + +/** + * Wait for VBLANK. + * + * \param inode device inode. + * \param filp file pointer. + * \param cmd command. + * \param data user argument, pointing to a drm_wait_vblank structure. + * \return zero on success or a negative number on failure. + * + * Verifies the IRQ is installed. + * + * If a signal is requested checks if this task has already scheduled the same signal + * for the same vblank sequence number - nothing to be done in + * that case. If the number of tasks waiting for the interrupt exceeds 100 the + * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this + * task. + * + * If a signal is not requested, then calls vblank_wait(). + */ +int DRM(wait_vblank)( DRM_IOCTL_ARGS ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_wait_vblank_t vblwait; + struct timeval now; + int ret = 0; + unsigned int flags; + + if (!dev->irq) + return -EINVAL; + + DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data, + sizeof(vblwait) ); + + switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) { + case _DRM_VBLANK_RELATIVE: + vblwait.request.sequence += atomic_read( &dev->vbl_received ); + vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; + case _DRM_VBLANK_ABSOLUTE: + break; + default: + return -EINVAL; + } + + flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + + if ( flags & _DRM_VBLANK_SIGNAL ) { + unsigned long irqflags; + drm_vbl_sig_t *vbl_sig; + + vblwait.reply.sequence = atomic_read( &dev->vbl_received ); + + spin_lock_irqsave( &dev->vbl_lock, irqflags ); + + /* Check if this task has already scheduled the same signal + * for the same vblank sequence number; nothing to be done in + * that case + */ + list_for_each_entry( vbl_sig, &dev->vbl_sigs.head, head ) { + if (vbl_sig->sequence == vblwait.request.sequence + && vbl_sig->info.si_signo == vblwait.request.signal + && vbl_sig->task == current) + { + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + goto done; + } + } + + if ( dev->vbl_pending >= 100 ) { + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + return -EBUSY; + } + + dev->vbl_pending++; + + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + + if ( !( vbl_sig = DRM_MALLOC( sizeof( drm_vbl_sig_t ) ) ) ) { + return -ENOMEM; + } + + memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) ); + + vbl_sig->sequence = vblwait.request.sequence; + vbl_sig->info.si_signo = vblwait.request.signal; + vbl_sig->task = current; + + spin_lock_irqsave( &dev->vbl_lock, irqflags ); + + list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head ); + + spin_unlock_irqrestore( &dev->vbl_lock, irqflags ); + } else { + ret = DRM(vblank_wait)( dev, &vblwait.request.sequence ); + + do_gettimeofday( &now ); + vblwait.reply.tval_sec = now.tv_sec; + vblwait.reply.tval_usec = now.tv_usec; + } + +done: + DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait, + sizeof(vblwait) ); + + return ret; +} + +/** + * Send the VBLANK signals. + * + * \param dev DRM device. + * + * Sends a signal for each task in drm_device::vbl_sigs and empties the list. + * + * If a signal is not requested, then calls vblank_wait(). + */ +void DRM(vbl_send_signals)( drm_device_t *dev ) +{ + struct list_head *list, *tmp; + drm_vbl_sig_t *vbl_sig; + unsigned int vbl_seq = atomic_read( &dev->vbl_received ); + unsigned long flags; + + spin_lock_irqsave( &dev->vbl_lock, flags ); + + list_for_each_safe( list, tmp, &dev->vbl_sigs.head ) { + vbl_sig = list_entry( list, drm_vbl_sig_t, head ); + if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) { + vbl_sig->info.si_code = vbl_seq; + send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task ); + + list_del( list ); + + DRM_FREE( vbl_sig, sizeof(*vbl_sig) ); + + dev->vbl_pending--; + } + } + + spin_unlock_irqrestore( &dev->vbl_lock, flags ); +} + +#endif /* __HAVE_VBL_IRQ */ + +#endif /* __HAVE_IRQ */ diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h new file mode 100644 index 000000000..3b5f8d3a2 --- /dev/null +++ b/drivers/char/drm/drm_pciids.h @@ -0,0 +1,203 @@ +/* + This file is auto-generated from the drm_pciids.txt in the DRM CVS + Please contact dri-devel@lists.sf.net to add new cards to this list +*/ +#define radeon_PCI_IDS \ + {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C5A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5147, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5148, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x514F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5158, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5159, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x515A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5168, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5169, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5963, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5968, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define r128_PCI_IDS \ + {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4d46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5041, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5044, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5045, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5046, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5047, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5048, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5049, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x504F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5052, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5245, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5246, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5247, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x524b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x524c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x534d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define mga_PCI_IDS \ + {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define mach64_PCI_IDS \ + {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define sisdrv_PCI_IDS \ + {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x6300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define tdfx_PCI_IDS \ + {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define viadrv_PCI_IDS \ + {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define i810_PCI_IDS \ + {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define i830_PCI_IDS \ + {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x2562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define gamma_PCI_IDS \ + {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define savage_PCI_IDS \ + {0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0, 0, 0} + +#define ffb_PCI_IDS \ + {0, 0, 0} + diff --git a/drivers/char/ds1286.c b/drivers/char/ds1286.c new file mode 100644 index 000000000..bc042fb2d --- /dev/null +++ b/drivers/char/ds1286.c @@ -0,0 +1,578 @@ +/* + * DS1286 Real Time Clock interface for Linux + * + * Copyright (C) 1998, 1999, 2000 Ralf Baechle + * + * Based on code written by Paul Gortmaker. + * + * This driver allows use of the real time clock (built into nearly all + * computers) from user space. It exports the /dev/rtc interface supporting + * various ioctl() and also the /proc/rtc pseudo-file for status + * information. + * + * The ioctls can be used to set the interrupt behaviour and generation rate + * from the RTC via IRQ 8. Then the /dev/rtc interface can be used to make + * use of these timer interrupts, be they interval or alarm based. + * + * The /dev/rtc interface will block on reads until an interrupt has been + * received. If a RTC interrupt has already happened, it will output an + * unsigned long and then block. The output value contains the interrupt + * status in the low byte and the number of interrupts since the last read + * in the remaining high bytes. The /dev/rtc interface can also be used with + * the select(2) call. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DS1286_VERSION "1.0" + +/* + * We sponge a minor off of the misc major. No need slurping + * up another valuable major dev number for this. If you add + * an ioctl, make sure you don't conflict with SPARC's RTC + * ioctls. + */ + +static DECLARE_WAIT_QUEUE_HEAD(ds1286_wait); + +static ssize_t ds1286_read(struct file *file, char *buf, + size_t count, loff_t *ppos); + +static int ds1286_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg); + +static unsigned int ds1286_poll(struct file *file, poll_table *wait); + +static void ds1286_get_alm_time (struct rtc_time *alm_tm); +static void ds1286_get_time(struct rtc_time *rtc_tm); +static int ds1286_set_time(struct rtc_time *rtc_tm); + +static inline unsigned char ds1286_is_updating(void); + +static spinlock_t ds1286_lock = SPIN_LOCK_UNLOCKED; + +static int ds1286_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data); + +/* + * Bits in rtc_status. (7 bits of room for future expansion) + */ + +#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ +#define RTC_TIMER_ON 0x02 /* missed irq timer active */ + +static unsigned char ds1286_status; /* bitmapped status byte. */ + +static unsigned char days_in_mo[] = { + 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 +}; + +/* + * Now all the various file operations that we export. + */ + +static ssize_t ds1286_read(struct file *file, char *buf, + size_t count, loff_t *ppos) +{ + return -EIO; +} + +static int ds1286_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct rtc_time wtime; + + switch (cmd) { + case RTC_AIE_OFF: /* Mask alarm int. enab. bit */ + { + unsigned int flags; + unsigned char val; + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + spin_lock_irqsave(&ds1286_lock, flags); + val = rtc_read(RTC_CMD); + val |= RTC_TDM; + rtc_write(val, RTC_CMD); + spin_unlock_irqrestore(&ds1286_lock, flags); + + return 0; + } + case RTC_AIE_ON: /* Allow alarm interrupts. */ + { + unsigned int flags; + unsigned char val; + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + spin_lock_irqsave(&ds1286_lock, flags); + val = rtc_read(RTC_CMD); + val &= ~RTC_TDM; + rtc_write(val, RTC_CMD); + spin_unlock_irqrestore(&ds1286_lock, flags); + + return 0; + } + case RTC_WIE_OFF: /* Mask watchdog int. enab. bit */ + { + unsigned int flags; + unsigned char val; + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + spin_lock_irqsave(&ds1286_lock, flags); + val = rtc_read(RTC_CMD); + val |= RTC_WAM; + rtc_write(val, RTC_CMD); + spin_unlock_irqrestore(&ds1286_lock, flags); + + return 0; + } + case RTC_WIE_ON: /* Allow watchdog interrupts. */ + { + unsigned int flags; + unsigned char val; + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + spin_lock_irqsave(&ds1286_lock, flags); + val = rtc_read(RTC_CMD); + val &= ~RTC_WAM; + rtc_write(val, RTC_CMD); + spin_unlock_irqrestore(&ds1286_lock, flags); + + return 0; + } + case RTC_ALM_READ: /* Read the present alarm time */ + { + /* + * This returns a struct rtc_time. Reading >= 0xc0 + * means "don't care" or "match all". Only the tm_hour, + * tm_min, and tm_sec values are filled in. + */ + + memset(&wtime, 0, sizeof(wtime)); + ds1286_get_alm_time(&wtime); + break; + } + case RTC_ALM_SET: /* Store a time into the alarm */ + { + /* + * This expects a struct rtc_time. Writing 0xff means + * "don't care" or "match all". Only the tm_hour, + * tm_min and tm_sec are used. + */ + unsigned char hrs, min, sec; + struct rtc_time alm_tm; + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + if (copy_from_user(&alm_tm, (struct rtc_time*)arg, + sizeof(struct rtc_time))) + return -EFAULT; + + hrs = alm_tm.tm_hour; + min = alm_tm.tm_min; + + if (hrs >= 24) + hrs = 0xff; + + if (min >= 60) + min = 0xff; + + BIN_TO_BCD(sec); + BIN_TO_BCD(min); + BIN_TO_BCD(hrs); + + spin_lock(&ds1286_lock); + rtc_write(hrs, RTC_HOURS_ALARM); + rtc_write(min, RTC_MINUTES_ALARM); + spin_unlock(&ds1286_lock); + + return 0; + } + case RTC_RD_TIME: /* Read the time/date from RTC */ + { + memset(&wtime, 0, sizeof(wtime)); + ds1286_get_time(&wtime); + break; + } + case RTC_SET_TIME: /* Set the RTC */ + { + struct rtc_time rtc_tm; + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, + sizeof(struct rtc_time))) + return -EFAULT; + + return ds1286_set_time(&rtc_tm); + } + default: + return -EINVAL; + } + return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0; +} + +/* + * We enforce only one user at a time here with the open/close. + * Also clear the previous interrupt data on an open, and clean + * up things on a close. + */ + +static int ds1286_open(struct inode *inode, struct file *file) +{ + spin_lock_irq(&ds1286_lock); + + if (ds1286_status & RTC_IS_OPEN) + goto out_busy; + + ds1286_status |= RTC_IS_OPEN; + + spin_unlock_irq(&ds1286_lock); + return 0; + +out_busy: + spin_lock_irq(&ds1286_lock); + return -EBUSY; +} + +static int ds1286_release(struct inode *inode, struct file *file) +{ + ds1286_status &= ~RTC_IS_OPEN; + + return 0; +} + +static unsigned int ds1286_poll(struct file *file, poll_table *wait) +{ + poll_wait(file, &ds1286_wait, wait); + + return 0; +} + +/* + * The various file operations we support. + */ + +static struct file_operations ds1286_fops = { + .llseek = no_llseek, + .read = ds1286_read, + .poll = ds1286_poll, + .ioctl = ds1286_ioctl, + .open = ds1286_open, + .release = ds1286_release, +}; + +static struct miscdevice ds1286_dev= +{ + .minor = RTC_MINOR, + .name = "rtc", + .fops = &ds1286_fops, +}; + +static int __init ds1286_init(void) +{ + int err; + + printk(KERN_INFO "DS1286 Real Time Clock Driver v%s\n", DS1286_VERSION); + + err = misc_register(&ds1286_dev); + if (err) + goto out; + + if (!create_proc_read_entry("driver/rtc", 0, 0, ds1286_read_proc, NULL)) { + err = -ENOMEM; + + goto out_deregister; + } + + return 0; + +out_deregister: + misc_deregister(&ds1286_dev); + +out: + return err; +} + +static void __exit ds1286_exit(void) +{ + remove_proc_entry("driver/rtc", NULL); + misc_deregister(&ds1286_dev); +} + +static char *days[] = { + "***", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" +}; + +/* + * Info exported via "/proc/rtc". + */ +static int ds1286_proc_output(char *buf) +{ + char *p, *s; + struct rtc_time tm; + unsigned char hundredth, month, cmd, amode; + + p = buf; + + ds1286_get_time(&tm); + hundredth = rtc_read(RTC_HUNDREDTH_SECOND); + BCD_TO_BIN(hundredth); + + p += sprintf(p, + "rtc_time\t: %02d:%02d:%02d.%02d\n" + "rtc_date\t: %04d-%02d-%02d\n", + tm.tm_hour, tm.tm_min, tm.tm_sec, hundredth, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday); + + /* + * We implicitly assume 24hr mode here. Alarm values >= 0xc0 will + * match any value for that particular field. Values that are + * greater than a valid time, but less than 0xc0 shouldn't appear. + */ + ds1286_get_alm_time(&tm); + p += sprintf(p, "alarm\t\t: %s ", days[tm.tm_wday]); + if (tm.tm_hour <= 24) + p += sprintf(p, "%02d:", tm.tm_hour); + else + p += sprintf(p, "**:"); + + if (tm.tm_min <= 59) + p += sprintf(p, "%02d\n", tm.tm_min); + else + p += sprintf(p, "**\n"); + + month = rtc_read(RTC_MONTH); + p += sprintf(p, + "oscillator\t: %s\n" + "square_wave\t: %s\n", + (month & RTC_EOSC) ? "disabled" : "enabled", + (month & RTC_ESQW) ? "disabled" : "enabled"); + + amode = ((rtc_read(RTC_MINUTES_ALARM) & 0x80) >> 5) | + ((rtc_read(RTC_HOURS_ALARM) & 0x80) >> 6) | + ((rtc_read(RTC_DAY_ALARM) & 0x80) >> 7); + if (amode == 7) s = "each minute"; + else if (amode == 3) s = "minutes match"; + else if (amode == 1) s = "hours and minutes match"; + else if (amode == 0) s = "days, hours and minutes match"; + else s = "invalid"; + p += sprintf(p, "alarm_mode\t: %s\n", s); + + cmd = rtc_read(RTC_CMD); + p += sprintf(p, + "alarm_enable\t: %s\n" + "wdog_alarm\t: %s\n" + "alarm_mask\t: %s\n" + "wdog_alarm_mask\t: %s\n" + "interrupt_mode\t: %s\n" + "INTB_mode\t: %s_active\n" + "interrupt_pins\t: %s\n", + (cmd & RTC_TDF) ? "yes" : "no", + (cmd & RTC_WAF) ? "yes" : "no", + (cmd & RTC_TDM) ? "disabled" : "enabled", + (cmd & RTC_WAM) ? "disabled" : "enabled", + (cmd & RTC_PU_LVL) ? "pulse" : "level", + (cmd & RTC_IBH_LO) ? "low" : "high", + (cmd & RTC_IPSW) ? "unswapped" : "swapped"); + + return p - buf; +} + +static int ds1286_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = ds1286_proc_output (page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) + len = count; + if (len<0) + len = 0; + + return len; +} + +/* + * Returns true if a clock update is in progress + */ +static inline unsigned char ds1286_is_updating(void) +{ + return rtc_read(RTC_CMD) & RTC_TE; +} + + +static void ds1286_get_time(struct rtc_time *rtc_tm) +{ + unsigned char save_control; + unsigned int flags; + unsigned long uip_watchdog = jiffies; + + /* + * read RTC once any update in progress is done. The update + * can take just over 2ms. We wait 10 to 20ms. There is no need to + * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. + * If you need to know *exactly* when a second has started, enable + * periodic update complete interrupts, (via ioctl) and then + * immediately read /dev/rtc which will block until you get the IRQ. + * Once the read clears, read the RTC time (again via ioctl). Easy. + */ + + if (ds1286_is_updating() != 0) + while (jiffies - uip_watchdog < 2*HZ/100) + barrier(); + + /* + * Only the values that we read from the RTC are set. We leave + * tm_wday, tm_yday and tm_isdst untouched. Even though the + * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated + * by the RTC when initially set to a non-zero value. + */ + spin_lock_irqsave(&ds1286_lock, flags); + save_control = rtc_read(RTC_CMD); + rtc_write((save_control|RTC_TE), RTC_CMD); + + rtc_tm->tm_sec = rtc_read(RTC_SECONDS); + rtc_tm->tm_min = rtc_read(RTC_MINUTES); + rtc_tm->tm_hour = rtc_read(RTC_HOURS) & 0x3f; + rtc_tm->tm_mday = rtc_read(RTC_DATE); + rtc_tm->tm_mon = rtc_read(RTC_MONTH) & 0x1f; + rtc_tm->tm_year = rtc_read(RTC_YEAR); + + rtc_write(save_control, RTC_CMD); + spin_unlock_irqrestore(&ds1286_lock, flags); + + BCD_TO_BIN(rtc_tm->tm_sec); + BCD_TO_BIN(rtc_tm->tm_min); + BCD_TO_BIN(rtc_tm->tm_hour); + BCD_TO_BIN(rtc_tm->tm_mday); + BCD_TO_BIN(rtc_tm->tm_mon); + BCD_TO_BIN(rtc_tm->tm_year); + + /* + * Account for differences between how the RTC uses the values + * and how they are defined in a struct rtc_time; + */ + if (rtc_tm->tm_year < 45) + rtc_tm->tm_year += 30; + if ((rtc_tm->tm_year += 40) < 70) + rtc_tm->tm_year += 100; + + rtc_tm->tm_mon--; +} + +static int ds1286_set_time(struct rtc_time *rtc_tm) +{ + unsigned char mon, day, hrs, min, sec, leap_yr; + unsigned char save_control; + unsigned int yrs, flags; + + + yrs = rtc_tm->tm_year + 1900; + mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */ + day = rtc_tm->tm_mday; + hrs = rtc_tm->tm_hour; + min = rtc_tm->tm_min; + sec = rtc_tm->tm_sec; + + if (yrs < 1970) + return -EINVAL; + + leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); + + if ((mon > 12) || (day == 0)) + return -EINVAL; + + if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) + return -EINVAL; + + if ((hrs >= 24) || (min >= 60) || (sec >= 60)) + return -EINVAL; + + if ((yrs -= 1940) > 255) /* They are unsigned */ + return -EINVAL; + + if (yrs >= 100) + yrs -= 100; + + BIN_TO_BCD(sec); + BIN_TO_BCD(min); + BIN_TO_BCD(hrs); + BIN_TO_BCD(day); + BIN_TO_BCD(mon); + BIN_TO_BCD(yrs); + + spin_lock_irqsave(&ds1286_lock, flags); + save_control = rtc_read(RTC_CMD); + rtc_write((save_control|RTC_TE), RTC_CMD); + + rtc_write(yrs, RTC_YEAR); + rtc_write(mon, RTC_MONTH); + rtc_write(day, RTC_DATE); + rtc_write(hrs, RTC_HOURS); + rtc_write(min, RTC_MINUTES); + rtc_write(sec, RTC_SECONDS); + rtc_write(0, RTC_HUNDREDTH_SECOND); + + rtc_write(save_control, RTC_CMD); + spin_unlock_irqrestore(&ds1286_lock, flags); + + return 0; +} + +static void ds1286_get_alm_time(struct rtc_time *alm_tm) +{ + unsigned char cmd; + unsigned int flags; + + /* + * Only the values that we read from the RTC are set. That + * means only tm_wday, tm_hour, tm_min. + */ + spin_lock_irqsave(&ds1286_lock, flags); + alm_tm->tm_min = rtc_read(RTC_MINUTES_ALARM) & 0x7f; + alm_tm->tm_hour = rtc_read(RTC_HOURS_ALARM) & 0x1f; + alm_tm->tm_wday = rtc_read(RTC_DAY_ALARM) & 0x07; + cmd = rtc_read(RTC_CMD); + spin_unlock_irqrestore(&ds1286_lock, flags); + + BCD_TO_BIN(alm_tm->tm_min); + BCD_TO_BIN(alm_tm->tm_hour); + alm_tm->tm_sec = 0; +} + +module_init(ds1286_init); +module_exit(ds1286_exit); + +MODULE_AUTHOR("Ralf Baechle"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(RTC_MINOR); diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c new file mode 100644 index 000000000..cef6032b8 --- /dev/null +++ b/drivers/char/hpet.c @@ -0,0 +1,1076 @@ +/* + * Intel & MS High Precision Event Timer Implementation. + * Contributors: + * Venki Pallipadi + * Bob Picco + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * The High Precision Event Timer driver. + * This driver is closely modelled after the rtc.c driver. + * http://www.intel.com/labs/platcomp/hpet/hpetspec.htm + */ +#define HPET_USER_FREQ (64) +#define HPET_DRIFT (500) + +static u32 hpet_ntimer, hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; + +/* A lock for concurrent access by app and isr hpet activity. */ +static spinlock_t hpet_lock = SPIN_LOCK_UNLOCKED; +/* A lock for concurrent intermodule access to hpet and isr hpet activity. */ +static spinlock_t hpet_task_lock = SPIN_LOCK_UNLOCKED; + +struct hpet_dev { + struct hpets *hd_hpets; + struct hpet *hd_hpet; + struct hpet_timer *hd_timer; + unsigned long hd_ireqfreq; + unsigned long hd_irqdata; + wait_queue_head_t hd_waitqueue; + struct fasync_struct *hd_async_queue; + struct hpet_task *hd_task; + unsigned int hd_flags; + unsigned int hd_irq; + unsigned int hd_hdwirq; +}; + +struct hpets { + struct hpets *hp_next; + struct hpet *hp_hpet; + unsigned long hp_period; + unsigned long hp_delta; + unsigned int hp_ntimer; + unsigned int hp_which; + struct hpet_dev hp_dev[1]; +}; + +static struct hpets *hpets; + +#define HPET_OPEN 0x0001 +#define HPET_IE 0x0002 /* interrupt enabled */ +#define HPET_PERIODIC 0x0004 + +#if BITS_PER_LONG == 64 +#define write_counter(V, MC) writeq(V, MC) +#define read_counter(MC) readq(MC) +#else +#define write_counter(V, MC) writel(V, MC) +#define read_counter(MC) readl(MC) +#endif + +#ifndef readq +static unsigned long long __inline readq(void *addr) +{ + return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); +} +#endif + +#ifndef writeq +static void __inline writeq(unsigned long long v, void *addr) +{ + writel(v & 0xffffffff, addr); + writel(v >> 32, addr + 4); +} +#endif + +static irqreturn_t hpet_interrupt(int irq, void *data, struct pt_regs *regs) +{ + struct hpet_dev *devp; + unsigned long isr; + + devp = data; + + spin_lock(&hpet_lock); + devp->hd_irqdata++; + + /* + * For non-periodic timers, increment the accumulator. + * This has the effect of treating non-periodic like periodic. + */ + if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { + unsigned long m, t; + + t = devp->hd_ireqfreq; + m = read_counter(&devp->hd_hpet->hpet_mc); + write_counter(t + m + devp->hd_hpets->hp_delta, + &devp->hd_timer->hpet_compare); + } + + isr = (1 << (devp - devp->hd_hpets->hp_dev)); + writeq(isr, &devp->hd_hpet->hpet_isr); + spin_unlock(&hpet_lock); + + spin_lock(&hpet_task_lock); + if (devp->hd_task) + devp->hd_task->ht_func(devp->hd_task->ht_data); + spin_unlock(&hpet_task_lock); + + wake_up_interruptible(&devp->hd_waitqueue); + + kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); + + return IRQ_HANDLED; +} + +static int hpet_open(struct inode *inode, struct file *file) +{ + struct hpet_dev *devp; + struct hpets *hpetp; + int i; + + spin_lock_irq(&hpet_lock); + + for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) + for (i = 0; i < hpetp->hp_ntimer; i++) + if (hpetp->hp_dev[i].hd_flags & HPET_OPEN + || hpetp->hp_dev[i].hd_task) + continue; + else { + devp = &hpetp->hp_dev[i]; + break; + } + + if (!devp) { + spin_unlock_irq(&hpet_lock); + return -EBUSY; + } + + file->private_data = devp; + devp->hd_irqdata = 0; + devp->hd_flags |= HPET_OPEN; + spin_unlock_irq(&hpet_lock); + + return 0; +} + +static ssize_t +hpet_read(struct file *file, char *buf, size_t count, loff_t * ppos) +{ + DECLARE_WAITQUEUE(wait, current); + unsigned long data; + ssize_t retval; + struct hpet_dev *devp; + + devp = file->private_data; + if (!devp->hd_ireqfreq) + return -EIO; + + if (count < sizeof(unsigned long)) + return -EINVAL; + + add_wait_queue(&devp->hd_waitqueue, &wait); + + do { + __set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_irq(&hpet_lock); + data = devp->hd_irqdata; + devp->hd_irqdata = 0; + spin_unlock_irq(&hpet_lock); + + if (data) + break; + else if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } else if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + + schedule(); + + } while (1); + + retval = put_user(data, (unsigned long *)buf); + if (!retval) + retval = sizeof(unsigned long); + out: + current->state = TASK_RUNNING; + remove_wait_queue(&devp->hd_waitqueue, &wait); + + return retval; +} + +static unsigned int hpet_poll(struct file *file, poll_table * wait) +{ + unsigned long v; + struct hpet_dev *devp; + + devp = file->private_data; + + if (!devp->hd_ireqfreq) + return 0; + + poll_wait(file, &devp->hd_waitqueue, wait); + + spin_lock_irq(&hpet_lock); + v = devp->hd_irqdata; + spin_unlock_irq(&hpet_lock); + + if (v != 0) + return POLLIN | POLLRDNORM; + + return 0; +} + +static int hpet_mmap(struct file *file, struct vm_area_struct *vma) +{ +#ifdef CONFIG_HPET_NOMMAP + return -ENOSYS; +#else + struct hpet_dev *devp; + unsigned long addr; + + if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) + return -EINVAL; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + devp = file->private_data; + addr = (unsigned long)devp->hd_hpet; + + if (addr & (PAGE_SIZE - 1)) + return -ENOSYS; + + vma->vm_flags |= VM_IO; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + addr = __pa(addr); + + if (remap_page_range + (vma, vma->vm_start, addr, PAGE_SIZE, vma->vm_page_prot)) { + printk(KERN_ERR "remap_page_range failed in hpet.c\n"); + return -EAGAIN; + } + + return 0; +#endif +} + +static int hpet_fasync(int fd, struct file *file, int on) +{ + struct hpet_dev *devp; + + devp = file->private_data; + + if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0) + return 0; + else + return -EIO; +} + +static int hpet_release(struct inode *inode, struct file *file) +{ + struct hpet_dev *devp; + struct hpet_timer *timer; + int irq = 0; + + devp = file->private_data; + timer = devp->hd_timer; + + spin_lock_irq(&hpet_lock); + + writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), + &timer->hpet_config); + + irq = devp->hd_irq; + devp->hd_irq = 0; + + devp->hd_ireqfreq = 0; + + if (devp->hd_flags & HPET_PERIODIC + && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { + unsigned long v; + + v = readq(&timer->hpet_config); + v ^= Tn_TYPE_CNF_MASK; + writeq(v, &timer->hpet_config); + } + + devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC); + spin_unlock_irq(&hpet_lock); + + if (irq) + free_irq(irq, devp); + + if (file->f_flags & FASYNC) + hpet_fasync(-1, file, 0); + + file->private_data = 0; + return 0; +} + +static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); + +static int +hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct hpet_dev *devp; + + devp = file->private_data; + return hpet_ioctl_common(devp, cmd, arg, 0); +} + +static int hpet_ioctl_ieon(struct hpet_dev *devp) +{ + struct hpet_timer *timer; + struct hpet *hpet; + struct hpets *hpetp; + int irq; + unsigned long g, v, t, m; + unsigned long flags, isr; + + timer = devp->hd_timer; + hpet = devp->hd_hpet; + hpetp = devp->hd_hpets; + + v = readq(&timer->hpet_config); + spin_lock_irq(&hpet_lock); + + if (devp->hd_flags & HPET_IE) { + spin_unlock_irq(&hpet_lock); + return -EBUSY; + } + + devp->hd_flags |= HPET_IE; + spin_unlock_irq(&hpet_lock); + + t = readq(&timer->hpet_config); + irq = devp->hd_hdwirq; + + if (irq) { + char name[7]; + + sprintf(name, "hpet%d", (int)(devp - hpetp->hp_dev)); + + if (request_irq + (irq, hpet_interrupt, SA_INTERRUPT, name, (void *)devp)) { + printk(KERN_ERR "hpet: IRQ %d is not free\n", irq); + irq = 0; + } + } + + if (irq == 0) { + spin_lock_irq(&hpet_lock); + devp->hd_flags ^= HPET_IE; + spin_unlock_irq(&hpet_lock); + return -EIO; + } + + devp->hd_irq = irq; + t = devp->hd_ireqfreq; + v = readq(&timer->hpet_config); + g = v | Tn_INT_ENB_CNF_MASK; + + if (devp->hd_flags & HPET_PERIODIC) { + write_counter(t, &timer->hpet_compare); + g |= Tn_TYPE_CNF_MASK; + v |= Tn_TYPE_CNF_MASK; + writeq(v, &timer->hpet_config); + v |= Tn_VAL_SET_CNF_MASK; + writeq(v, &timer->hpet_config); + local_irq_save(flags); + m = read_counter(&hpet->hpet_mc); + write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); + } else { + local_irq_save(flags); + m = read_counter(&hpet->hpet_mc); + write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); + } + + isr = (1 << (devp - hpets->hp_dev)); + writeq(isr, &hpet->hpet_isr); + writeq(g, &timer->hpet_config); + local_irq_restore(flags); + + return 0; +} + +static inline unsigned long hpet_time_div(unsigned long dis) +{ + unsigned long long m = 1000000000000000ULL; + + do_div(m, dis); + + return (unsigned long)m; +} + +static int +hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) +{ + struct hpet_timer *timer; + struct hpet *hpet; + struct hpets *hpetp; + int err; + unsigned long v; + + switch (cmd) { + case HPET_IE_OFF: + case HPET_INFO: + case HPET_EPI: + case HPET_DPI: + case HPET_IRQFREQ: + timer = devp->hd_timer; + hpet = devp->hd_hpet; + hpetp = devp->hd_hpets; + break; + case HPET_IE_ON: + return hpet_ioctl_ieon(devp); + default: + return -EINVAL; + } + + err = 0; + + switch (cmd) { + case HPET_IE_OFF: + if ((devp->hd_flags & HPET_IE) == 0) + break; + v = readq(&timer->hpet_config); + v &= ~Tn_INT_ENB_CNF_MASK; + writeq(v, &timer->hpet_config); + if (devp->hd_irq) { + free_irq(devp->hd_irq, devp); + devp->hd_irq = 0; + } + devp->hd_flags ^= HPET_IE; + break; + case HPET_INFO: + { + struct hpet_info info; + + info.hi_ireqfreq = hpet_time_div(hpetp->hp_period * + devp->hd_ireqfreq); + info.hi_flags = + readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; + info.hi_hpet = devp->hd_hpets->hp_which; + info.hi_timer = devp - devp->hd_hpets->hp_dev; + if (copy_to_user((void *)arg, &info, sizeof(info))) + err = -EFAULT; + break; + } + case HPET_EPI: + v = readq(&timer->hpet_config); + if ((v & Tn_PER_INT_CAP_MASK) == 0) { + err = -ENXIO; + break; + } + devp->hd_flags |= HPET_PERIODIC; + break; + case HPET_DPI: + v = readq(&timer->hpet_config); + if ((v & Tn_PER_INT_CAP_MASK) == 0) { + err = -ENXIO; + break; + } + if (devp->hd_flags & HPET_PERIODIC && + readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { + v = readq(&timer->hpet_config); + v ^= Tn_TYPE_CNF_MASK; + writeq(v, &timer->hpet_config); + } + devp->hd_flags &= ~HPET_PERIODIC; + break; + case HPET_IRQFREQ: + if (!kernel && (arg > hpet_max_freq) && + !capable(CAP_SYS_RESOURCE)) { + err = -EACCES; + break; + } + + if (arg & (arg - 1)) { + err = -EINVAL; + break; + } + + devp->hd_ireqfreq = hpet_time_div(hpetp->hp_period * arg); + } + + return err; +} + +static struct file_operations hpet_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = hpet_read, + .poll = hpet_poll, + .ioctl = hpet_ioctl, + .open = hpet_open, + .release = hpet_release, + .fasync = hpet_fasync, + .mmap = hpet_mmap, +}; + +EXPORT_SYMBOL(hpet_alloc); +EXPORT_SYMBOL(hpet_register); +EXPORT_SYMBOL(hpet_unregister); +EXPORT_SYMBOL(hpet_control); + +int hpet_register(struct hpet_task *tp, int periodic) +{ + unsigned int i; + u64 mask; + struct hpet_timer *timer; + struct hpet_dev *devp; + struct hpets *hpetp; + + switch (periodic) { + case 1: + mask = Tn_PER_INT_CAP_MASK; + break; + case 0: + mask = 0; + break; + default: + return -EINVAL; + } + + spin_lock_irq(&hpet_task_lock); + spin_lock(&hpet_lock); + + for (devp = 0, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) + for (timer = hpetp->hp_hpet->hpet_timers, i = 0; + i < hpetp->hp_ntimer; i++, timer++) { + if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK) + != mask) + continue; + + devp = &hpetp->hp_dev[i]; + + if (devp->hd_flags & HPET_OPEN || devp->hd_task) { + devp = 0; + continue; + } + + tp->ht_opaque = devp; + devp->hd_task = tp; + break; + } + + spin_unlock(&hpet_lock); + spin_unlock_irq(&hpet_task_lock); + + if (tp->ht_opaque) + return 0; + else + return -EBUSY; +} + +static inline int hpet_tpcheck(struct hpet_task *tp) +{ + struct hpet_dev *devp; + struct hpets *hpetp; + + devp = tp->ht_opaque; + + if (!devp) + return -ENXIO; + + for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) + if (devp >= hpetp->hp_dev + && devp < (hpetp->hp_dev + hpetp->hp_ntimer) + && devp->hd_hpet == hpetp->hp_hpet) + return 0; + + return -ENXIO; +} + +int hpet_unregister(struct hpet_task *tp) +{ + struct hpet_dev *devp; + struct hpet_timer *timer; + int err; + + if ((err = hpet_tpcheck(tp))) + return err; + + spin_lock_irq(&hpet_task_lock); + spin_lock(&hpet_lock); + + devp = tp->ht_opaque; + if (devp->hd_task != tp) { + spin_unlock(&hpet_lock); + spin_unlock_irq(&hpet_task_lock); + return -ENXIO; + } + + timer = devp->hd_timer; + writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), + &timer->hpet_config); + devp->hd_flags &= ~(HPET_IE | HPET_PERIODIC); + devp->hd_task = 0; + spin_unlock(&hpet_lock); + spin_unlock_irq(&hpet_task_lock); + + return 0; +} + +int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg) +{ + struct hpet_dev *devp; + int err; + + if ((err = hpet_tpcheck(tp))) + return err; + + spin_lock_irq(&hpet_lock); + devp = tp->ht_opaque; + if (devp->hd_task != tp) { + spin_unlock_irq(&hpet_lock); + return -ENXIO; + } + spin_unlock_irq(&hpet_lock); + return hpet_ioctl_common(devp, cmd, arg, 1); +} + +#ifdef CONFIG_TIME_INTERPOLATION + +static unsigned long hpet_offset, last_wall_hpet; +static long hpet_nsecs_per_cycle, hpet_cycles_per_sec; + +static unsigned long hpet_getoffset(void) +{ + return hpet_offset + (read_counter(&hpets->hp_hpet->hpet_mc) - + last_wall_hpet) * hpet_nsecs_per_cycle; +} + +static void hpet_update(long delta) +{ + unsigned long mc; + unsigned long offset; + + mc = read_counter(&hpets->hp_hpet->hpet_mc); + offset = hpet_offset + (mc - last_wall_hpet) * hpet_nsecs_per_cycle; + + if (delta < 0 || (unsigned long)delta < offset) + hpet_offset = offset - delta; + else + hpet_offset = 0; + last_wall_hpet = mc; +} + +static void hpet_reset(void) +{ + hpet_offset = 0; + last_wall_hpet = read_counter(&hpets->hp_hpet->hpet_mc); +} + +static struct time_interpolator hpet_interpolator = { + .get_offset = hpet_getoffset, + .update = hpet_update, + .reset = hpet_reset +}; + +#endif + +static ctl_table hpet_table[] = { + { + .ctl_name = 1, + .procname = "max-user-freq", + .data = &hpet_max_freq, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + {.ctl_name = 0} +}; + +static ctl_table hpet_root[] = { + { + .ctl_name = 1, + .procname = "hpet", + .maxlen = 0, + .mode = 0555, + .child = hpet_table, + }, + {.ctl_name = 0} +}; + +static ctl_table dev_root[] = { + { + .ctl_name = CTL_DEV, + .procname = "dev", + .maxlen = 0, + .mode = 0555, + .child = hpet_root, + }, + {.ctl_name = 0} +}; + +static struct ctl_table_header *sysctl_header; + +static void *hpet_start(struct seq_file *s, loff_t * pos) +{ + struct hpets *hpetp; + loff_t n; + + for (n = *pos, hpetp = hpets; hpetp; hpetp = hpetp->hp_next) + if (!n--) + return hpetp; + + return 0; +} + +static void *hpet_next(struct seq_file *s, void *v, loff_t * pos) +{ + struct hpets *hpetp; + + hpetp = v; + ++*pos; + return hpetp->hp_next; +} + +static void hpet_stop(struct seq_file *s, void *v) +{ + return; +} + +static int hpet_show(struct seq_file *s, void *v) +{ + struct hpets *hpetp; + struct hpet *hpet; + u64 cap, vendor, period; + + hpetp = v; + hpet = hpetp->hp_hpet; + + cap = readq(&hpet->hpet_cap); + period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> + HPET_COUNTER_CLK_PERIOD_SHIFT; + vendor = (cap & HPET_VENDOR_ID_MASK) >> HPET_VENDOR_ID_SHIFT; + + seq_printf(s, + "HPET%d period = %d 10**-15 vendor = 0x%x number timer = %d\n", + hpetp->hp_which, (u32) period, (u32) vendor, + hpetp->hp_ntimer); + + return 0; +} + +static struct seq_operations hpet_seq_ops = { + .start = hpet_start, + .next = hpet_next, + .stop = hpet_stop, + .show = hpet_show +}; + +static int hpet_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &hpet_seq_ops); +} + +static struct file_operations hpet_proc_fops = { + .open = hpet_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +/* + * Adjustment for when arming the timer with + * initial conditions. That is, main counter + * ticks expired before interrupts are enabled. + */ +#define TICK_CALIBRATE (1000UL) + +static unsigned long __init hpet_calibrate(struct hpets *hpetp) +{ + struct hpet_timer *timer; + unsigned long t, m, count, i, flags, start; + struct hpet_dev *devp; + int j; + struct hpet *hpet; + + for (timer = 0, j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; + j++, devp++) + if ((devp->hd_flags & HPET_OPEN) == 0) { + timer = devp->hd_timer; + break; + } + + if (!timer) + return 0; + + hpet = hpets->hp_hpet; + t = read_counter(&timer->hpet_compare); + + i = 0; + count = hpet_time_div(hpetp->hp_period * TICK_CALIBRATE); + + local_irq_save(flags); + + start = read_counter(&hpet->hpet_mc); + + do { + m = read_counter(&hpet->hpet_mc); + write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); + } while (i++, (m - start) < count); + + local_irq_restore(flags); + + return (m - start) / i; +} + +int __init hpet_alloc(struct hpet_data *hdp) +{ + u64 cap, mcfg; + struct hpet_dev *devp; + u32 i, ntimer; + struct hpets *hpetp; + size_t siz; + struct hpet *hpet; + static struct hpets *last __initdata = (struct hpets *)0; + + /* + * hpet_alloc can be called by platform dependent code. + * if platform dependent code has allocated the hpet + * ACPI also reports hpet, then we catch it here. + */ + for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) + if (hpetp->hp_hpet == (struct hpet *)(hdp->hd_address)) + return 0; + + siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) * + sizeof(struct hpet_dev)); + + hpetp = kmalloc(siz, GFP_KERNEL); + + if (!hpetp) + return -ENOMEM; + + memset(hpetp, 0, siz); + + hpetp->hp_which = hpet_nhpet++; + hpetp->hp_hpet = (struct hpet *)hdp->hd_address; + + hpetp->hp_ntimer = hdp->hd_nirqs; + + for (i = 0; i < hdp->hd_nirqs; i++) + hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; + + hpet = hpetp->hp_hpet; + + cap = readq(&hpet->hpet_cap); + + ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1; + + if (hpetp->hp_ntimer != ntimer) { + printk(KERN_WARNING "hpet: number irqs doesn't agree" + " with number of timers\n"); + kfree(hpetp); + return -ENODEV; + } + + if (last) + last->hp_next = hpetp; + else + hpets = hpetp; + + last = hpetp; + + hpetp->hp_period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> + HPET_COUNTER_CLK_PERIOD_SHIFT; + + mcfg = readq(&hpet->hpet_config); + if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { + write_counter(0L, &hpet->hpet_mc); + mcfg |= HPET_ENABLE_CNF_MASK; + writeq(mcfg, &hpet->hpet_config); + } + + for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; + i++, hpet_ntimer++, devp++) { + unsigned long v; + struct hpet_timer *timer; + + timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; + v = readq(&timer->hpet_config); + + devp->hd_hpets = hpetp; + devp->hd_hpet = hpet; + devp->hd_timer = timer; + + /* + * If the timer was reserved by platform code, + * then make timer unavailable for opens. + */ + if (hdp->hd_state & (1 << i)) { + devp->hd_flags = HPET_OPEN; + continue; + } + + init_waitqueue_head(&devp->hd_waitqueue); + } + + hpetp->hp_delta = hpet_calibrate(hpetp); + + return 0; +} + +static acpi_status __init hpet_resources(struct acpi_resource *res, void *data) +{ + struct hpet_data *hdp; + acpi_status status; + struct acpi_resource_address64 addr; + struct hpets *hpetp; + + hdp = data; + + status = acpi_resource_to_address64(res, &addr); + + if (ACPI_SUCCESS(status)) { + unsigned long size; + + size = addr.max_address_range - addr.min_address_range + 1; + hdp->hd_address = + (unsigned long)ioremap(addr.min_address_range, size); + + for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) + if (hpetp->hp_hpet == (struct hpet *)(hdp->hd_address)) + return -EBUSY; + } else if (res->id == ACPI_RSTYPE_EXT_IRQ) { + struct acpi_resource_ext_irq *irqp; + int i; + + irqp = &res->data.extended_irq; + + if (irqp->number_of_interrupts > 0) { + hdp->hd_nirqs = irqp->number_of_interrupts; + + for (i = 0; i < hdp->hd_nirqs; i++) +#ifdef CONFIG_IA64 + hdp->hd_irq[i] = + acpi_register_gsi(irqp->interrupts[i], + irqp->edge_level, + irqp->active_high_low); +#else + hdp->hd_irq[i] = irqp->interrupts[i]; +#endif + } + } + + return AE_OK; +} + +static int __init hpet_acpi_add(struct acpi_device *device) +{ + acpi_status result; + struct hpet_data data; + + memset(&data, 0, sizeof(data)); + + result = + acpi_walk_resources(device->handle, METHOD_NAME__CRS, + hpet_resources, &data); + + if (ACPI_FAILURE(result)) + return -ENODEV; + + if (!data.hd_address || !data.hd_nirqs) { + printk("%s: no address or irqs in _CRS\n", __FUNCTION__); + return -ENODEV; + } + + return hpet_alloc(&data); +} + +static int __init hpet_acpi_remove(struct acpi_device *device, int type) +{ + return 0; +} + +static struct acpi_driver hpet_acpi_driver __initdata = { + .name = "hpet", + .class = "", + .ids = "PNP0103", + .ops = { + .add = hpet_acpi_add, + .remove = hpet_acpi_remove, + }, +}; + +static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; + +static int __init hpet_init(void) +{ + struct proc_dir_entry *entry; + + (void)acpi_bus_register_driver(&hpet_acpi_driver); + + if (hpets) { + if (misc_register(&hpet_misc)) + return -ENODEV; + + entry = create_proc_entry("driver/hpet", 0, 0); + + if (entry) + entry->proc_fops = &hpet_proc_fops; + + sysctl_header = register_sysctl_table(dev_root, 0); + +#ifdef CONFIG_TIME_INTERPOLATION + { + struct hpet *hpet; + + hpet = hpets->hp_hpet; + hpet_cycles_per_sec = hpet_time_div(hpets->hp_period); + hpet_interpolator.frequency = hpet_cycles_per_sec; + hpet_interpolator.drift = hpet_cycles_per_sec * + HPET_DRIFT / 1000000; + hpet_nsecs_per_cycle = 1000000000 / hpet_cycles_per_sec; + register_time_interpolator(&hpet_interpolator); + } +#endif + return 0; + } else + return -ENODEV; +} + +static void __exit hpet_exit(void) +{ + acpi_bus_unregister_driver(&hpet_acpi_driver); + + if (hpets) { + unregister_sysctl_table(sysctl_header); + remove_proc_entry("driver/hpet", NULL); + } + + return; +} + +module_init(hpet_init); +module_exit(hpet_exit); +MODULE_AUTHOR("Bob Picco "); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c new file mode 100644 index 000000000..96ca634ca --- /dev/null +++ b/drivers/char/hvcs.c @@ -0,0 +1,1579 @@ +/* + * IBM eServer Hypervisor Virtual Console Server Device Driver + * Copyright (C) 2003, 2004 IBM Corp. + * Ryan S. Arnold (rsa@us.ibm.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Author(s) : Ryan S. Arnold + * + * This is the device driver for the IBM Hypervisor Virtual Console Server, + * "hvcs". The IBM hvcs provides a tty driver interface to allow Linux + * user space applications access to the system consoles of logically + * partitioned operating systems, e.g. Linux, running on the same partitioned + * Power5 ppc64 system. Physical hardware consoles per partition are not + * practical on this hardware so system consoles are accessed by this driver + * using inter-partition firmware interfaces to virtual terminal devices. + * + * A vty is known to the HMC as a "virtual serial server adapter". It is a + * virtual terminal device that is created by firmware upon partition creation + * to act as a partitioned OS's console device. + * + * Firmware dynamically (via hotplug) exposes vty-servers to a running ppc64 + * Linux system upon their creation by the HMC or their exposure during boot. + * The non-user interactive backend of this driver is implemented as a vio + * device driver so that it can receive notification of vty-server lifetimes + * after it registers with the vio bus to handle vty-server probe and remove + * callbacks. + * + * Many vty-servers can be configured to connect to one vty, but a vty can + * only be actively connected to by a single vty-server, in any manner, at one + * time. If the HMC is currently hosting the console for a target Linux + * partition; attempts to open the tty device to the partition's console using + * the hvcs on any partition will return -EBUSY with every open attempt until + * the HMC frees the connection between its vty-server and the desired + * partition's vty device. Conversely, a vty-server may only be connected to + * a single vty at one time even though it may have several configured vty + * partner possibilities. + * + * Firmware does not provide notification of vty partner changes to this + * driver. This means that an HMC Super Admin may add or remove partner vtys + * from a vty-server's partner list but the changes will not be signaled to + * the vty-server. Firmware only notifies the driver when a vty-server is + * added or removed from the system. To compensate for this deficiency, this + * driver implements a sysfs update attribute which provides a method for + * rescanning partner information upon a user's request. + * + * Each vty-server, prior to being exposed to this driver is reference counted + * using the 2.6 Linux kernel kobject construct. This kobject is also used by + * the vio bus to provide a vio device sysfs entry that this driver attaches + * device specific attributes to, including partner information. The vio bus + * framework also provides a sysfs entry for each vio driver. The hvcs driver + * provides driver attributes in this entry. + * + * For direction on installation and usage of this driver please reference + * Documentation/powerpc/hvcs.txt. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * 1.0.0 -> 1.1.0 Added kernel_thread scheduling methodology to driver to + * replace wait_task constructs. + * + * 1.1.0 -> 1.2.0 Moved pi_buff initialization out of arch code into driver code + * and added locking to share this buffer between hvcs_struct instances. This + * is because the page_size kmalloc can't be done with a spin_lock held. + * + * Also added sysfs attribute to manually disconnect the vty-server from the vty + * due to stupid firmware behavior when opening the connection then sending data + * then then quickly closing the connection would cause data loss on the + * receiving side. This required some reordering of the termination code. + * + * Fixed the hangup scenario and fixed memory leaks on module_exit. + * + * 1.2.0 -> 1.3.0 Moved from manual kernel thread creation & execution to + * kthread construct which replaced in-kernel IPC for thread termination with + * kthread_stop and kthread_should_stop. Explicit wait_queue handling was + * removed because kthread handles this. Minor bug fix to postpone partner_info + * clearing on hvcs_close until adapter removal to preserve context data for + * printk on partner connection free. Added lock to protect hvcs_structs so + * that hvcs_struct instances aren't added or removed during list traversal. + * Cleaned up comment style, added spaces after commas, and broke function + * declaration lines to be under 80 columns. + */ +#define HVCS_DRIVER_VERSION "1.3.0" + +MODULE_AUTHOR("Ryan S. Arnold "); +MODULE_DESCRIPTION("IBM hvcs (Hypervisor Virtual Console Server) Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(HVCS_DRIVER_VERSION); + +/* + * Since the Linux TTY code does not currently (2-04-2004) support dynamic + * addition of tty derived devices and we shouldn't allocate thousands of + * tty_device pointers when the number of vty-server & vty partner connections + * will most often be much lower than this, we'll arbitrarily allocate + * HVCS_DEFAULT_SERVER_ADAPTERS tty_structs and cdev's by default when we + * register the tty_driver. This can be overridden using an insmod parameter. + */ +#define HVCS_DEFAULT_SERVER_ADAPTERS 64 + +/* + * The user can't insmod with more than HVCS_MAX_SERVER_ADAPTERS hvcs device + * nodes as a sanity check. Theoretically there can be over 1 Billion + * vty-server & vty partner connections. + */ +#define HVCS_MAX_SERVER_ADAPTERS 1024 + +/* + * We let Linux assign us a major number and we start the minors at zero. There + * is no intuitive mapping between minor number and the target partition. The + * mapping of minor number is related to the order the vty-servers are exposed + * to this driver via the hvcs_probe function. + */ +#define HVCS_MINOR_START 0 + +/* + * The hcall interface involves putting 8 chars into each of two registers. + * We load up those 2 registers (in arch/ppc64/hvconsole.c) by casting char[16] + * to long[2]. It would work without __ALIGNED__, but a little (tiny) bit + * slower because an unaligned load is slower than aligned load. + */ +#define __ALIGNED__ __attribute__((__aligned__(8))) + +/* Converged location code string length + 1 null terminator */ +#define CLC_LENGTH 80 + +/* + * How much data can firmware send with each hvc_put_chars()? Maybe this + * should be moved into an architecture specific area. + */ +#define HVCS_BUFF_LEN 16 + +/* + * This is the maximum amount of data we'll let the user send us (hvcs_write) at + * once in a chunk as a sanity check. + */ +#define HVCS_MAX_FROM_USER 4096 + +/* + * Be careful when adding flags to this line discipline. Don't add anything + * that will cause echoing or we'll go into recursive loop echoing chars back + * and forth with the console drivers. + */ +static struct termios hvcs_tty_termios = { + .c_iflag = IGNBRK | IGNPAR, + .c_oflag = OPOST, + .c_cflag = B38400 | CS8 | CREAD | HUPCL, + .c_cc = INIT_C_CC +}; + +/* + * This value is used to take the place of a command line parameter when the + * module is inserted. It starts as -1 and stays as such if the user doesn't + * specify a module insmod parameter. If they DO specify one then it is set to + * the value of the integer passed in. + */ +static int hvcs_parm_num_devs = -1; +module_param(hvcs_parm_num_devs, int, 0); + +char hvcs_driver_name[] = "hvcs"; +char hvcs_device_node[] = "hvcs"; +char hvcs_driver_string[] + = "IBM hvcs (Hypervisor Virtual Console Server) Driver"; + +/* Status of partner info rescan triggered via sysfs. */ +static int hvcs_rescan_status = 0; + +static struct tty_driver *hvcs_tty_driver; + +/* + * This is used to associate a vty-server, as it is exposed to this driver, with + * a preallocated tty_struct.index. The dev node and hvcs index numbers are not + * re-used after device removal otherwise removing and adding a new one would + * link a /dev/hvcs* entry to a different vty-server than it did before the + * removal. Incidentally, a newly exposed vty-server will always map to an + * incrementally higher /dev/hvcs* entry than the last exposed vty-server. + */ +static int hvcs_struct_count = -1; + +/* + * Used by the khvcsd to pick up I/O operations when the kernel_thread is + * already awake but potentially shifted to TASK_INTERRUPTIBLE state. + */ +static int hvcs_kicked = 0; + +/* Used the the kthread construct for task operations */ +static struct task_struct *hvcs_task; + +/* + * We allocate this for the use of all of the hvcs_structs when they fetch + * partner info. + */ +static unsigned long *hvcs_pi_buff; + +static spinlock_t hvcs_pi_lock; + +/* One vty-server per hvcs_struct */ +struct hvcs_struct { + spinlock_t lock; + + /* + * This index identifies this hvcs device as the complement to a + * specific tty index. + */ + unsigned int index; + + struct tty_struct *tty; + unsigned int open_count; + + /* + * Used to tell the driver kernel_thread what operations need to take + * place upon this hvcs_struct instance. + */ + int todo_mask; + + /* + * This buffer is required so that when hvcs_write_room() reports that + * it can send HVCS_BUFF_LEN characters that it will buffer the full + * HVCS_BUFF_LEN characters if need be. This is essential for opost + * writes since they do not do high level buffering and expect to be + * able to send what the driver commits to sending buffering + * [e.g. tab to space conversions in n_tty.c opost()]. + */ + char buffer[HVCS_BUFF_LEN]; + int chars_in_buffer; + + /* + * Any variable below the kobject is valid before a tty is connected and + * stays valid after the tty is disconnected. These shouldn't be + * whacked until the koject refcount reaches zero though some entries + * may be changed via sysfs initiatives. + */ + struct kobject kobj; /* ref count & hvcs_struct lifetime */ + int connected; /* is the vty-server currently connected to a vty? */ + unsigned int p_unit_address; /* partner unit address */ + unsigned int p_partition_ID; /* partner partition ID */ + char p_location_code[CLC_LENGTH]; + struct list_head next; /* list management */ + struct vio_dev *vdev; +}; + +/* Required to back map a kobject to its containing object */ +#define from_kobj(kobj) container_of(kobj, struct hvcs_struct, kobj) + +static struct list_head hvcs_structs = LIST_HEAD_INIT(hvcs_structs); +static spinlock_t hvcs_structs_lock; + +static void hvcs_unthrottle(struct tty_struct *tty); +static void hvcs_throttle(struct tty_struct *tty); +static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance, + struct pt_regs *regs); + +static int hvcs_write(struct tty_struct *tty, int from_user, + const unsigned char *buf, int count); +static int hvcs_write_room(struct tty_struct *tty); +static int hvcs_chars_in_buffer(struct tty_struct *tty); + +static int hvcs_has_pi(struct hvcs_struct *hvcsd); +static void hvcs_set_pi(struct hvcs_partner_info *pi, + struct hvcs_struct *hvcsd); +static int hvcs_get_pi(struct hvcs_struct *hvcsd); +static int hvcs_rescan_devices_list(void); + +static int hvcs_partner_connect(struct hvcs_struct *hvcsd); +static void hvcs_partner_free(struct hvcs_struct *hvcsd); + +static int hvcs_enable_device(struct hvcs_struct *hvcsd, + uint32_t unit_address, unsigned int irq, struct vio_dev *dev); +static void hvcs_final_close(struct hvcs_struct *hvcsd); + +static void destroy_hvcs_struct(struct kobject *kobj); +static int hvcs_open(struct tty_struct *tty, struct file *filp); +static void hvcs_close(struct tty_struct *tty, struct file *filp); +static void hvcs_hangup(struct tty_struct * tty); + +static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd); +static void hvcs_remove_device_attrs(struct vio_dev *vdev); +static void hvcs_create_driver_attrs(void); +static void hvcs_remove_driver_attrs(void); + +static int __devinit hvcs_probe(struct vio_dev *dev, + const struct vio_device_id *id); +static int __devexit hvcs_remove(struct vio_dev *dev); +static int __init hvcs_module_init(void); +static void __exit hvcs_module_exit(void); + +#define HVCS_SCHED_READ 0x00000001 +#define HVCS_QUICK_READ 0x00000002 +#define HVCS_TRY_WRITE 0x00000004 +#define HVCS_READ_MASK (HVCS_SCHED_READ | HVCS_QUICK_READ) + +static void hvcs_kick(void) +{ + hvcs_kicked = 1; + wmb(); + wake_up_process(hvcs_task); +} + +static void hvcs_unthrottle(struct tty_struct *tty) +{ + struct hvcs_struct *hvcsd = tty->driver_data; + unsigned long flags; + + spin_lock_irqsave(&hvcsd->lock, flags); + hvcsd->todo_mask |= HVCS_SCHED_READ; + spin_unlock_irqrestore(&hvcsd->lock, flags); + hvcs_kick(); +} + +static void hvcs_throttle(struct tty_struct *tty) +{ + struct hvcs_struct *hvcsd = tty->driver_data; + unsigned long flags; + + spin_lock_irqsave(&hvcsd->lock, flags); + vio_disable_interrupts(hvcsd->vdev); + spin_unlock_irqrestore(&hvcsd->lock, flags); +} + +/* + * If the device is being removed we don't have to worry about this interrupt + * handler taking any further interrupts because they are disabled which means + * the hvcs_struct will always be valid in this handler. + */ +static irqreturn_t hvcs_handle_interrupt(int irq, void *dev_instance, + struct pt_regs *regs) +{ + struct hvcs_struct *hvcsd = dev_instance; + unsigned long flags; + + spin_lock_irqsave(&hvcsd->lock, flags); + vio_disable_interrupts(hvcsd->vdev); + hvcsd->todo_mask |= HVCS_SCHED_READ; + spin_unlock_irqrestore(&hvcsd->lock, flags); + hvcs_kick(); + + return IRQ_HANDLED; +} + +/* This function must be called with the hvcsd->lock held */ +static void hvcs_try_write(struct hvcs_struct *hvcsd) +{ + unsigned int unit_address = hvcsd->vdev->unit_address; + struct tty_struct *tty = hvcsd->tty; + int sent; + + if (hvcsd->todo_mask & HVCS_TRY_WRITE) { + /* won't send partial writes */ + sent = hvc_put_chars(unit_address, + &hvcsd->buffer[0], + hvcsd->chars_in_buffer ); + if (sent > 0) { + hvcsd->chars_in_buffer = 0; + wmb(); + hvcsd->todo_mask &= ~(HVCS_TRY_WRITE); + wmb(); + + /* + * We are still obligated to deliver the data to the + * hypervisor even if the tty has been closed because + * we commited to delivering it. But don't try to wake + * a non-existent tty. + */ + if (tty) { + if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) + && tty->ldisc.write_wakeup) + (tty->ldisc.write_wakeup) (tty); + wake_up_interruptible(&tty->write_wait); + } + } + } +} + +static int hvcs_io(struct hvcs_struct *hvcsd) +{ + unsigned int unit_address; + struct tty_struct *tty; + char buf[HVCS_BUFF_LEN] __ALIGNED__; + unsigned long flags; + int got; + int i; + + spin_lock_irqsave(&hvcsd->lock, flags); + + unit_address = hvcsd->vdev->unit_address; + tty = hvcsd->tty; + + hvcs_try_write(hvcsd); + + if (!tty || test_bit(TTY_THROTTLED, &tty->flags)) { + hvcsd->todo_mask &= ~(HVCS_READ_MASK); + goto bail; + } else if (!(hvcsd->todo_mask & (HVCS_READ_MASK))) + goto bail; + + /* remove the read masks */ + hvcsd->todo_mask &= ~(HVCS_READ_MASK); + + if ((tty->flip.count + HVCS_BUFF_LEN) < TTY_FLIPBUF_SIZE) { + got = hvc_get_chars(unit_address, + &buf[0], + HVCS_BUFF_LEN); + for (i=0;got && itodo_mask |= HVCS_QUICK_READ; + + spin_unlock_irqrestore(&hvcsd->lock, flags); + if (tty->flip.count) { + /* This is synch because tty->low_latency == 1 */ + tty_flip_buffer_push(tty); + } + + if (!got) { + /* Do this _after_ the flip_buffer_push */ + spin_lock_irqsave(&hvcsd->lock, flags); + vio_enable_interrupts(hvcsd->vdev); + spin_unlock_irqrestore(&hvcsd->lock, flags); + } + + return hvcsd->todo_mask; + + bail: + spin_unlock_irqrestore(&hvcsd->lock, flags); + return hvcsd->todo_mask; +} + +static int khvcsd(void *unused) +{ + struct hvcs_struct *hvcsd = NULL; + struct list_head *element; + struct list_head *safe_temp; + int hvcs_todo_mask; + unsigned long structs_flags; + + __set_current_state(TASK_RUNNING); + + do { + hvcs_todo_mask = 0; + hvcs_kicked = 0; + wmb(); + + spin_lock_irqsave(&hvcs_structs_lock, structs_flags); + list_for_each_safe(element, safe_temp, &hvcs_structs) { + hvcsd = list_entry(element, struct hvcs_struct, next); + hvcs_todo_mask |= hvcs_io(hvcsd); + } + spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags); + + /* + * If any of the hvcs adapters want to try a write or quick read + * don't schedule(), yield a smidgen then execute the hvcs_io + * thread again for those that want the write. + */ + if (hvcs_todo_mask & (HVCS_TRY_WRITE | HVCS_QUICK_READ)) { + yield(); + continue; + } + + set_current_state(TASK_INTERRUPTIBLE); + if (!hvcs_kicked) + schedule(); + __set_current_state(TASK_RUNNING); + } while (!kthread_should_stop()); + + return 0; +} + +static struct vio_device_id hvcs_driver_table[] __devinitdata= { + {"serial-server", "hvterm2"}, + { 0, } +}; +MODULE_DEVICE_TABLE(vio, hvcs_driver_table); + +/* callback when the kboject ref count reaches zero */ +static void destroy_hvcs_struct(struct kobject *kobj) +{ + struct hvcs_struct *hvcsd = from_kobj(kobj); + struct vio_dev *vdev; + unsigned long flags; + + spin_lock_irqsave(&hvcsd->lock, flags); + + /* the list_del poisons the pointers */ + list_del(&(hvcsd->next)); + + if (hvcsd->connected == 1) { + hvcs_partner_free(hvcsd); + printk(KERN_INFO "HVCS: Closed vty-server@%X and" + " partner vty@%X:%d connection.\n", + hvcsd->vdev->unit_address, + hvcsd->p_unit_address, + (unsigned int)hvcsd->p_partition_ID); + } + printk(KERN_INFO "HVCS: Destroyed hvcs_struct for vty-server@%X.\n", + hvcsd->vdev->unit_address); + + vdev = hvcsd->vdev; + hvcsd->vdev = NULL; + + hvcsd->p_unit_address = 0; + hvcsd->p_partition_ID = 0; + memset(&hvcsd->p_location_code[0], 0x00, CLC_LENGTH); + + spin_unlock_irqrestore(&hvcsd->lock, flags); + + hvcs_remove_device_attrs(vdev); + + kfree(hvcsd); +} + +/* This function must be called with hvcsd->lock held. */ +static void hvcs_final_close(struct hvcs_struct *hvcsd) +{ + vio_disable_interrupts(hvcsd->vdev); + free_irq(hvcsd->vdev->irq, hvcsd); + + hvcsd->todo_mask = 0; + + /* These two may be redundant if the operation was a close. */ + if (hvcsd->tty) { + hvcsd->tty->driver_data = NULL; + hvcsd->tty = NULL; + } + + hvcsd->open_count = 0; + + memset(&hvcsd->buffer[0], 0x00, HVCS_BUFF_LEN); + hvcsd->chars_in_buffer = 0; +} + +static struct kobj_type hvcs_kobj_type = { + .release = destroy_hvcs_struct, +}; + +static int __devinit hvcs_probe( + struct vio_dev *dev, + const struct vio_device_id *id) +{ + struct hvcs_struct *hvcsd; + unsigned long structs_flags; + + if (!dev || !id) { + printk(KERN_ERR "HVCS: probed with invalid parameter.\n"); + return -EPERM; + } + + hvcsd = kmalloc(sizeof(*hvcsd), GFP_KERNEL); + if (!hvcsd) { + return -ENODEV; + } + + /* hvcsd->tty is zeroed out with the memset */ + memset(hvcsd, 0x00, sizeof(*hvcsd)); + + hvcsd->lock = SPIN_LOCK_UNLOCKED; + /* Automatically incs the refcount the first time */ + kobject_init(&hvcsd->kobj); + /* Set up the callback for terminating the hvcs_struct's life */ + hvcsd->kobj.ktype = &hvcs_kobj_type; + + hvcsd->vdev = dev; + dev->dev.driver_data = hvcsd; + + hvcsd->index = ++hvcs_struct_count; + hvcsd->chars_in_buffer = 0; + hvcsd->todo_mask = 0; + hvcsd->connected = 0; + + /* + * This will populate the hvcs_struct's partner info fields for the + * first time. + */ + if (hvcs_get_pi(hvcsd)) { + printk(KERN_ERR "HVCS: Failed to fetch partner" + " info for vty-server@%X on device probe.\n", + hvcsd->vdev->unit_address); + } + + /* + * If a user app opens a tty that corresponds to this vty-server before + * the hvcs_struct has been added to the devices list then the user app + * will get -ENODEV. + */ + + spin_lock_irqsave(&hvcs_structs_lock, structs_flags); + + list_add_tail(&(hvcsd->next), &hvcs_structs); + + spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags); + + hvcs_create_device_attrs(hvcsd); + + printk(KERN_INFO "HVCS: Added vty-server@%X.\n", dev->unit_address); + + /* + * DON'T enable interrupts here because there is no user to receive the + * data. + */ + return 0; +} + +static int __devexit hvcs_remove(struct vio_dev *dev) +{ + struct hvcs_struct *hvcsd = dev->dev.driver_data; + unsigned long flags; + struct kobject *kobjp; + struct tty_struct *tty; + + if (!hvcsd) + return -ENODEV; + + /* By this time the vty-server won't be getting any more interrups */ + + spin_lock_irqsave(&hvcsd->lock, flags); + + tty = hvcsd->tty; + + kobjp = &hvcsd->kobj; + + spin_unlock_irqrestore(&hvcsd->lock, flags); + + /* + * Let the last holder of this object cause it to be removed, which + * would probably be tty_hangup below. + */ + kobject_put (kobjp); + + /* + * The hangup is a scheduled function which will auto chain call + * hvcs_hangup. The tty should always be valid at this time unless a + * simultaneous tty close already cleaned up the hvcs_struct. + */ + if (tty) + tty_hangup(tty); + + printk(KERN_INFO "HVCS: vty-server@%X removed from the" + " vio bus.\n", dev->unit_address); + return 0; +}; + +static struct vio_driver hvcs_vio_driver = { + .name = hvcs_driver_name, + .id_table = hvcs_driver_table, + .probe = hvcs_probe, + .remove = hvcs_remove, +}; + +/* Only called from hvcs_get_pi please */ +static void hvcs_set_pi(struct hvcs_partner_info *pi, struct hvcs_struct *hvcsd) +{ + int clclength; + + hvcsd->p_unit_address = pi->unit_address; + hvcsd->p_partition_ID = pi->partition_ID; + clclength = strlen(&pi->location_code[0]); + if (clclength > CLC_LENGTH - 1) + clclength = CLC_LENGTH - 1; + + /* copy the null-term char too */ + strncpy(&hvcsd->p_location_code[0], + &pi->location_code[0], clclength + 1); +} + +/* + * Traverse the list and add the partner info that is found to the hvcs_struct + * struct entry. NOTE: At this time I know that partner info will return a + * single entry but in the future there may be multiple partner info entries per + * vty-server and you'll want to zero out that list and reset it. If for some + * reason you have an old version of this driver but there IS more than one + * partner info then hvcsd->p_* will hold the last partner info data from the + * firmware query. A good way to update this code would be to replace the three + * partner info fields in hvcs_struct with a list of hvcs_partner_info + * instances. + * + * This function must be called with the hvcsd->lock held. + */ +static int hvcs_get_pi(struct hvcs_struct *hvcsd) +{ + /* struct hvcs_partner_info *head_pi = NULL; */ + struct hvcs_partner_info *pi = NULL; + unsigned int unit_address = hvcsd->vdev->unit_address; + struct list_head head; + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcs_pi_lock, flags); + if (!hvcs_pi_buff) { + spin_unlock_irqrestore(&hvcs_pi_lock, flags); + return -EFAULT; + } + retval = hvcs_get_partner_info(unit_address, &head, hvcs_pi_buff); + spin_unlock_irqrestore(&hvcs_pi_lock, flags); + if (retval) { + printk(KERN_ERR "HVCS: Failed to fetch partner" + " info for vty-server@%x.\n", unit_address); + return retval; + } + + /* nixes the values if the partner vty went away */ + hvcsd->p_unit_address = 0; + hvcsd->p_partition_ID = 0; + + list_for_each_entry(pi, &head, node) + hvcs_set_pi(pi, hvcsd); + + hvcs_free_partner_info(&head); + return 0; +} + +/* + * This function is executed by the driver "rescan" sysfs entry. It shouldn't + * be executed elsewhere, in order to prevent deadlock issues. + */ +static int hvcs_rescan_devices_list(void) +{ + struct hvcs_struct *hvcsd = NULL; + unsigned long flags; + unsigned long structs_flags; + + spin_lock_irqsave(&hvcs_structs_lock, structs_flags); + + list_for_each_entry(hvcsd, &hvcs_structs, next) { + spin_lock_irqsave(&hvcsd->lock, flags); + hvcs_get_pi(hvcsd); + spin_unlock_irqrestore(&hvcsd->lock, flags); + } + + spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags); + + return 0; +} + +/* + * Farm this off into its own function because it could be more complex once + * multiple partners support is added. This function should be called with + * the hvcsd->lock held. + */ +static int hvcs_has_pi(struct hvcs_struct *hvcsd) +{ + if ((!hvcsd->p_unit_address) || (!hvcsd->p_partition_ID)) + return 0; + return 1; +} + +/* + * NOTE: It is possible that the super admin removed a partner vty and then + * added a different vty as the new partner. + * + * This function must be called with the hvcsd->lock held. + */ +static int hvcs_partner_connect(struct hvcs_struct *hvcsd) +{ + int retval; + unsigned int unit_address = hvcsd->vdev->unit_address; + + /* + * If there wasn't any pi when the device was added it doesn't meant + * there isn't any now. This driver isn't notified when a new partner + * vty is added to a vty-server so we discover changes on our own. + * Please see comments in hvcs_register_connection() for justification + * of this bizarre code. + */ + retval = hvcs_register_connection(unit_address, + hvcsd->p_partition_ID, + hvcsd->p_unit_address); + if (!retval) { + hvcsd->connected = 1; + return 0; + } else if (retval != -EINVAL) + return retval; + + /* + * As per the spec re-get the pi and try again if -EINVAL after the + * first connection attempt. + */ + if (hvcs_get_pi(hvcsd)) + return -ENOMEM; + + if (!hvcs_has_pi(hvcsd)) + return -ENODEV; + + retval = hvcs_register_connection(unit_address, + hvcsd->p_partition_ID, + hvcsd->p_unit_address); + if (retval != -EINVAL) { + hvcsd->connected = 1; + return retval; + } + + /* + * EBUSY is the most likely scenario though the vty could have been + * removed or there really could be an hcall error due to the parameter + * data but thanks to ambiguous firmware return codes we can't really + * tell. + */ + printk(KERN_INFO "HVCS: vty-server or partner" + " vty is busy. Try again later.\n"); + return -EBUSY; +} + +/* This function must be called with the hvcsd->lock held */ +static void hvcs_partner_free(struct hvcs_struct *hvcsd) +{ + int retval; + do { + retval = hvcs_free_connection(hvcsd->vdev->unit_address); + } while (retval == -EBUSY); + hvcsd->connected = 0; +} + +/* This helper function must be called WITHOUT the hvcsd->lock held */ +static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address, + unsigned int irq, struct vio_dev *vdev) +{ + unsigned long flags; + + /* + * It is possible that the vty-server was removed between the time that + * the conn was registered and now. + */ + if (!request_irq(irq, &hvcs_handle_interrupt, + SA_INTERRUPT, "ibmhvcs", hvcsd)) { + /* + * It is possible the vty-server was removed after the irq was + * requested but before we have time to enable interrupts. + */ + if (vio_enable_interrupts(vdev) == H_Success) + return 0; + else { + printk(KERN_ERR "HVCS: int enable failed for" + " vty-server@%X.\n", unit_address); + free_irq(irq, hvcsd); + } + } else + printk(KERN_ERR "HVCS: irq req failed for" + " vty-server@%X.\n", unit_address); + + spin_lock_irqsave(&hvcsd->lock, flags); + hvcs_partner_free(hvcsd); + spin_unlock_irqrestore(&hvcsd->lock, flags); + + return -ENODEV; + +} + +/* + * This always increments the kobject ref count if the call is successful. + * Please remember to dec when you are done with the instance. + * + * NOTICE: Do NOT hold either the hvcs_struct.lock or hvcs_structs_lock when + * calling this function or you will get deadlock. + */ +struct hvcs_struct *hvcs_get_by_index(int index) +{ + struct hvcs_struct *hvcsd = NULL; + struct list_head *element; + struct list_head *safe_temp; + unsigned long flags; + unsigned long structs_flags; + + spin_lock_irqsave(&hvcs_structs_lock, structs_flags); + /* We can immediately discard OOB requests */ + if (index >= 0 && index < HVCS_MAX_SERVER_ADAPTERS) { + list_for_each_safe(element, safe_temp, &hvcs_structs) { + hvcsd = list_entry(element, struct hvcs_struct, next); + spin_lock_irqsave(&hvcsd->lock, flags); + if (hvcsd->index == index) { + kobject_get(&hvcsd->kobj); + spin_unlock_irqrestore(&hvcsd->lock, flags); + spin_unlock_irqrestore(&hvcs_structs_lock, + structs_flags); + return hvcsd; + } + spin_unlock_irqrestore(&hvcsd->lock, flags); + } + hvcsd = NULL; + } + + spin_unlock_irqrestore(&hvcs_structs_lock, structs_flags); + return hvcsd; +} + +/* + * This is invoked via the tty_open interface when a user app connects to the + * /dev node. + */ +static int hvcs_open(struct tty_struct *tty, struct file *filp) +{ + struct hvcs_struct *hvcsd = NULL; + int retval = 0; + unsigned long flags; + unsigned int irq; + struct vio_dev *vdev; + unsigned long unit_address; + + if (tty->driver_data) + goto fast_open; + + /* + * Is there a vty-server that shares the same index? + * This function increments the kobject index. + */ + if (!(hvcsd = hvcs_get_by_index(tty->index))) { + printk(KERN_WARNING "HVCS: open failed, no index.\n"); + return -ENODEV; + } + + spin_lock_irqsave(&hvcsd->lock, flags); + + if (hvcsd->connected == 0) + if ((retval = hvcs_partner_connect(hvcsd))) + goto error_release; + + hvcsd->open_count = 1; + hvcsd->tty = tty; + tty->driver_data = hvcsd; + + /* + * Set this driver to low latency so that we actually have a chance at + * catching a throttled TTY after we flip_buffer_push. Otherwise the + * flush_to_async may not execute until after the kernel_thread has + * yielded and resumed the next flip_buffer_push resulting in data + * loss. + */ + tty->low_latency = 1; + + memset(&hvcsd->buffer[0], 0x3F, HVCS_BUFF_LEN); + + /* + * Save these in the spinlock for the enable operations that need them + * outside of the spinlock. + */ + irq = hvcsd->vdev->irq; + vdev = hvcsd->vdev; + unit_address = hvcsd->vdev->unit_address; + + hvcsd->todo_mask |= HVCS_SCHED_READ; + spin_unlock_irqrestore(&hvcsd->lock, flags); + + /* + * This must be done outside of the spinlock because it requests irqs + * and will grab the spinlcok and free the connection if it fails. + */ + if ((hvcs_enable_device(hvcsd, unit_address, irq, vdev))) { + kobject_put(&hvcsd->kobj); + printk(KERN_WARNING "HVCS: enable device failed.\n"); + return -ENODEV; + } + + goto open_success; + +fast_open: + hvcsd = tty->driver_data; + + spin_lock_irqsave(&hvcsd->lock, flags); + if (!kobject_get(&hvcsd->kobj)) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + printk(KERN_ERR "HVCS: Kobject of open" + " hvcs doesn't exist.\n"); + return -EFAULT; /* Is this the right return value? */ + } + + hvcsd->open_count++; + + hvcsd->todo_mask |= HVCS_SCHED_READ; + spin_unlock_irqrestore(&hvcsd->lock, flags); +open_success: + hvcs_kick(); + + printk(KERN_INFO "HVCS: vty-server@%X opened.\n", + hvcsd->vdev->unit_address ); + + return 0; + +error_release: + spin_unlock_irqrestore(&hvcsd->lock, flags); + kobject_put(&hvcsd->kobj); + + printk(KERN_WARNING "HVCS: HVCS partner connect failed.\n"); + return retval; +} + +static void hvcs_close(struct tty_struct *tty, struct file *filp) +{ + struct hvcs_struct *hvcsd; + unsigned long flags; + struct kobject *kobjp; + + /* + * Is someone trying to close the file associated with this device after + * we have hung up? If so tty->driver_data wouldn't be valid. + */ + if (tty_hung_up_p(filp)) + return; + + /* + * No driver_data means that this close was probably issued after a + * failed hvcs_open by the tty layer's release_dev() api and we can just + * exit cleanly. + */ + if (!tty->driver_data) + return; + + hvcsd = tty->driver_data; + + spin_lock_irqsave(&hvcsd->lock, flags); + if (--hvcsd->open_count == 0) { + + /* + * This line is important because it tells hvcs_open that this + * device needs to be re-configured the next time hvcs_open is + * called. + */ + hvcsd->tty->driver_data = NULL; + + /* + * NULL this early so that the kernel_thread doesn't try to + * execute any operations on the TTY even though it is obligated + * to deliver any pending I/O to the hypervisor. + */ + hvcsd->tty = NULL; + + /* + * Block the close until all the buffered data has been + * delivered. + */ + while(hvcsd->chars_in_buffer) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + + /* + * Give the kernel thread the hvcs_struct so that it can + * try to deliver the remaining data but block the close + * operation by spinning in this function so that other + * tty operations have to wait. + */ + yield(); + spin_lock_irqsave(&hvcsd->lock, flags); + } + + hvcs_final_close(hvcsd); + + } else if (hvcsd->open_count < 0) { + printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" + " is missmanaged.\n", + hvcsd->vdev->unit_address, hvcsd->open_count); + } + kobjp = &hvcsd->kobj; + + spin_unlock_irqrestore(&hvcsd->lock, flags); + + kobject_put(kobjp); +} + +static void hvcs_hangup(struct tty_struct * tty) +{ + struct hvcs_struct *hvcsd = tty->driver_data; + unsigned long flags; + int temp_open_count; + struct kobject *kobjp; + + spin_lock_irqsave(&hvcsd->lock, flags); + /* Preserve this so that we know how many kobject refs to put */ + temp_open_count = hvcsd->open_count; + + /* + * Don't kobject put inside the spinlock because the destruction + * callback may use the spinlock and it may get called before the + * spinlock has been released. Get a pointer to the kobject and + * kobject_put on that instead. + */ + kobjp = &hvcsd->kobj; + + /* Calling this will drop any buffered data on the floor. */ + hvcs_final_close(hvcsd); + + spin_unlock_irqrestore(&hvcsd->lock, flags); + + /* + * We need to kobject_put() for every open_count we have since the + * tty_hangup() function doesn't invoke a close per open connection on a + * non-console device. + */ + while(temp_open_count) { + --temp_open_count; + /* + * The final put will trigger destruction of the hvcs_struct. + * NOTE: If this hangup was signaled from user space then the + * final put will never happen. + */ + kobject_put(kobjp); + } +} + +/* + * NOTE: This is almost always from_user since user level apps interact with the + * /dev nodes. I'm trusting that if hvcs_write gets called and interrupted by + * hvcs_remove (which removes the target device and executes tty_hangup()) that + * tty_hangup will allow hvcs_write time to complete execution before it + * terminates our device. + */ +static int hvcs_write(struct tty_struct *tty, int from_user, + const unsigned char *buf, int count) +{ + struct hvcs_struct *hvcsd = tty->driver_data; + unsigned int unit_address; + unsigned char *charbuf; + unsigned long flags; + int total_sent = 0; + int tosend = 0; + int result = 0; + + /* + * If they don't check the return code off of their open they may + * attempt this even if there is no connected device. + */ + if (!hvcsd) + return -ENODEV; + + /* Reasonable size to prevent user level flooding */ + if (count > HVCS_MAX_FROM_USER) { + printk(KERN_WARNING "HVCS write: count being truncated to" + " HVCS_MAX_FROM_USER.\n"); + count = HVCS_MAX_FROM_USER; + } + + if (!from_user) + charbuf = (unsigned char *)buf; + else { + charbuf = kmalloc(count, GFP_KERNEL); + if (!charbuf) { + printk(KERN_WARNING "HVCS: write -ENOMEM.\n"); + return -ENOMEM; + } + + if (copy_from_user(charbuf, buf, count)) { + kfree(charbuf); + printk(KERN_WARNING "HVCS: write -EFAULT.\n"); + return -EFAULT; + } + } + + spin_lock_irqsave(&hvcsd->lock, flags); + + /* + * Somehow an open succedded but the device was removed or the + * connection terminated between the vty-server and partner vty during + * the middle of a write operation? This is a crummy place to do this + * but we want to keep it all in the spinlock. + */ + if (hvcsd->open_count <= 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + if (from_user) + kfree(charbuf); + return -ENODEV; + } + + unit_address = hvcsd->vdev->unit_address; + + while (count > 0) { + tosend = min(count, (HVCS_BUFF_LEN - hvcsd->chars_in_buffer)); + /* + * No more space, this probably means that the last call to + * hvcs_write() didn't succeed and the buffer was filled up. + */ + if (!tosend) + break; + + memcpy(&hvcsd->buffer[hvcsd->chars_in_buffer], + &charbuf[total_sent], + tosend); + + hvcsd->chars_in_buffer += tosend; + + result = 0; + + /* + * If this is true then we don't want to try writing to the + * hypervisor because that is the kernel_threads job now. We'll + * just add to the buffer. + */ + if (!(hvcsd->todo_mask & HVCS_TRY_WRITE)) + /* won't send partial writes */ + result = hvc_put_chars(unit_address, + &hvcsd->buffer[0], + hvcsd->chars_in_buffer); + + /* + * Since we know we have enough room in hvcsd->buffer for + * tosend we record that it was sent regardless of whether the + * hypervisor actually took it because we have it buffered. + */ + total_sent+=tosend; + count-=tosend; + if (result == 0) { + hvcsd->todo_mask |= HVCS_TRY_WRITE; + hvcs_kick(); + break; + } + + hvcsd->chars_in_buffer = 0; + /* + * Test after the chars_in_buffer reset otherwise this could + * deadlock our writes if hvc_put_chars fails. + */ + if (result < 0) + break; + } + + spin_unlock_irqrestore(&hvcsd->lock, flags); + if (from_user) + kfree(charbuf); + + if (result == -1) + return -EIO; + else + return total_sent; +} + +/* + * This is really asking how much can we guarentee that we can send or that we + * absolutely WILL BUFFER if we can't send it. This driver MUST honor the + * return value, hence the reason for hvcs_struct buffering. + */ +static int hvcs_write_room(struct tty_struct *tty) +{ + struct hvcs_struct *hvcsd = tty->driver_data; + unsigned long flags; + int retval; + + if (!hvcsd || hvcsd->open_count <= 0) + return 0; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = HVCS_BUFF_LEN - hvcsd->chars_in_buffer; + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} + +static int hvcs_chars_in_buffer(struct tty_struct *tty) +{ + struct hvcs_struct *hvcsd = tty->driver_data; + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = hvcsd->chars_in_buffer; + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} + +static struct tty_operations hvcs_ops = { + .open = hvcs_open, + .close = hvcs_close, + .hangup = hvcs_hangup, + .write = hvcs_write, + .write_room = hvcs_write_room, + .chars_in_buffer = hvcs_chars_in_buffer, + .unthrottle = hvcs_unthrottle, + .throttle = hvcs_throttle, +}; + +static int __init hvcs_module_init(void) +{ + int rc; + int num_ttys_to_alloc; + + printk(KERN_INFO "Initializing %s\n", hvcs_driver_string); + + /* Has the user specified an overload with an insmod param? */ + if (hvcs_parm_num_devs <= 0 || + (hvcs_parm_num_devs > HVCS_MAX_SERVER_ADAPTERS)) { + num_ttys_to_alloc = HVCS_DEFAULT_SERVER_ADAPTERS; + } else + num_ttys_to_alloc = hvcs_parm_num_devs; + + hvcs_tty_driver = alloc_tty_driver(num_ttys_to_alloc); + if (!hvcs_tty_driver) + return -ENOMEM; + + hvcs_tty_driver->owner = THIS_MODULE; + + hvcs_tty_driver->driver_name = hvcs_driver_name; + hvcs_tty_driver->name = hvcs_device_node; + + /* + * We'll let the system assign us a major number, indicated by leaving + * it blank. + */ + + hvcs_tty_driver->minor_start = HVCS_MINOR_START; + hvcs_tty_driver->type = TTY_DRIVER_TYPE_SYSTEM; + + /* + * We role our own so that we DONT ECHO. We can't echo because the + * device we are connecting to already echoes by default and this would + * throw us into a horrible recursive echo-echo-echo loop. + */ + hvcs_tty_driver->init_termios = hvcs_tty_termios; + hvcs_tty_driver->flags = TTY_DRIVER_REAL_RAW; + + tty_set_operations(hvcs_tty_driver, &hvcs_ops); + + /* + * The following call will result in sysfs entries that denote the + * dynamically assigned major and minor numbers for our devices. + */ + if (tty_register_driver(hvcs_tty_driver)) { + printk(KERN_ERR "HVCS: registration " + " as a tty driver failed.\n"); + put_tty_driver(hvcs_tty_driver); + return rc; + } + + hvcs_structs_lock = SPIN_LOCK_UNLOCKED; + + hvcs_pi_lock = SPIN_LOCK_UNLOCKED; + hvcs_pi_buff = kmalloc(PAGE_SIZE, GFP_KERNEL); + + hvcs_task = kthread_run(khvcsd, NULL, "khvcsd"); + if (IS_ERR(hvcs_task)) { + printk("khvcsd creation failed. Driver not loaded.\n"); + kfree(hvcs_pi_buff); + put_tty_driver(hvcs_tty_driver); + return -EIO; + } + + rc = vio_register_driver(&hvcs_vio_driver); + + /* + * This needs to be done AFTER the vio_register_driver() call or else + * the kobjects won't be initialized properly. + */ + hvcs_create_driver_attrs(); + + printk(KERN_INFO "HVCS: driver module inserted.\n"); + + return rc; +} + +static void __exit hvcs_module_exit(void) +{ + unsigned long flags; + + /* + * This driver receives hvcs_remove callbacks for each device upon + * module removal. + */ + + /* + * This synchronous operation will wake the khvcsd kthread if it is + * asleep and will return when khvcsd has terminated. + */ + kthread_stop(hvcs_task); + + spin_lock_irqsave(&hvcs_pi_lock, flags); + kfree(hvcs_pi_buff); + hvcs_pi_buff = NULL; + spin_unlock_irqrestore(&hvcs_pi_lock, flags); + + hvcs_remove_driver_attrs(); + + vio_unregister_driver(&hvcs_vio_driver); + + tty_unregister_driver(hvcs_tty_driver); + + put_tty_driver(hvcs_tty_driver); + + printk(KERN_INFO "HVCS: driver module removed.\n"); +} + +module_init(hvcs_module_init); +module_exit(hvcs_module_exit); + +static inline struct hvcs_struct *from_vio_dev(struct vio_dev *viod) +{ + return viod->dev.driver_data; +} +/* The sysfs interface for the driver and devices */ + +static ssize_t hvcs_partner_vtys_show(struct device *dev, char *buf) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = sprintf(buf, "%X\n", hvcsd->p_unit_address); + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} +static DEVICE_ATTR(partner_vtys, S_IRUGO, hvcs_partner_vtys_show, NULL); + +static ssize_t hvcs_partner_clcs_show(struct device *dev, char *buf) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} +static DEVICE_ATTR(partner_clcs, S_IRUGO, hvcs_partner_clcs_show, NULL); + +static ssize_t hvcs_current_vty_store(struct device *dev, const char * buf, + size_t count) +{ + /* + * Don't need this feature at the present time because firmware doesn't + * yet support multiple partners. + */ + printk(KERN_INFO "HVCS: Denied current_vty change: -EPERM.\n"); + return -EPERM; +} + +static ssize_t hvcs_current_vty_show(struct device *dev, char *buf) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = sprintf(buf, "%s\n", &hvcsd->p_location_code[0]); + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} + +static DEVICE_ATTR(current_vty, + S_IRUGO | S_IWUSR, hvcs_current_vty_show, hvcs_current_vty_store); + +static ssize_t hvcs_vterm_state_store(struct device *dev, const char *buf, + size_t count) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + + /* writing a '0' to this sysfs entry will result in the disconnect. */ + if (simple_strtol(buf, NULL, 0) != 0) + return -EINVAL; + + spin_lock_irqsave(&hvcsd->lock, flags); + + if (hvcsd->open_count > 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + printk(KERN_INFO "HVCS: vterm state unchanged. " + "The hvcs device node is still in use.\n"); + return -EPERM; + } + + if (hvcsd->connected == 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + printk(KERN_INFO "HVCS: vterm state unchanged. The" + " vty-server is not connected to a vty.\n"); + return -EPERM; + } + + hvcs_partner_free(hvcsd); + printk(KERN_INFO "HVCS: Closed vty-server@%X and" + " partner vty@%X:%d connection.\n", + hvcsd->vdev->unit_address, + hvcsd->p_unit_address, + (unsigned int)hvcsd->p_partition_ID); + + spin_unlock_irqrestore(&hvcsd->lock, flags); + return count; +} + +static ssize_t hvcs_vterm_state_show(struct device *dev, char *buf) +{ + struct vio_dev *viod = to_vio_dev(dev); + struct hvcs_struct *hvcsd = from_vio_dev(viod); + unsigned long flags; + int retval; + + spin_lock_irqsave(&hvcsd->lock, flags); + retval = sprintf(buf, "%d\n", hvcsd->connected); + spin_unlock_irqrestore(&hvcsd->lock, flags); + return retval; +} +static DEVICE_ATTR(vterm_state, S_IRUGO | S_IWUSR, + hvcs_vterm_state_show, hvcs_vterm_state_store); + +static struct attribute *hvcs_attrs[] = { + &dev_attr_partner_vtys.attr, + &dev_attr_partner_clcs.attr, + &dev_attr_current_vty.attr, + &dev_attr_vterm_state.attr, + NULL, +}; + +static struct attribute_group hvcs_attr_group = { + .attrs = hvcs_attrs, +}; + +static void hvcs_create_device_attrs(struct hvcs_struct *hvcsd) +{ + struct vio_dev *vdev = hvcsd->vdev; + sysfs_create_group(&vdev->dev.kobj, &hvcs_attr_group); +} + +static void hvcs_remove_device_attrs(struct vio_dev *vdev) +{ + sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group); +} + +static ssize_t hvcs_rescan_show(struct device_driver *ddp, char *buf) +{ + /* A 1 means it is updating, a 0 means it is done updating */ + return snprintf(buf, PAGE_SIZE, "%d\n", hvcs_rescan_status); +} + +static ssize_t hvcs_rescan_store(struct device_driver *ddp, const char * buf, + size_t count) +{ + if ((simple_strtol(buf, NULL, 0) != 1) + && (hvcs_rescan_status != 0)) + return -EINVAL; + + hvcs_rescan_status = 1; + printk(KERN_INFO "HVCS: rescanning partner info for all" + " vty-servers.\n"); + hvcs_rescan_devices_list(); + hvcs_rescan_status = 0; + return count; +} +static DRIVER_ATTR(rescan, + S_IRUGO | S_IWUSR, hvcs_rescan_show, hvcs_rescan_store); + +static void hvcs_create_driver_attrs(void) +{ + struct device_driver *driverfs = &(hvcs_vio_driver.driver); + driver_create_file(driverfs, &driver_attr_rescan); +} + +static void hvcs_remove_driver_attrs(void) +{ + struct device_driver *driverfs = &(hvcs_vio_driver.driver); + driver_remove_file(driverfs, &driver_attr_rescan); +} diff --git a/drivers/char/ip27-rtc.c b/drivers/char/ip27-rtc.c new file mode 100644 index 000000000..3acdac3c9 --- /dev/null +++ b/drivers/char/ip27-rtc.c @@ -0,0 +1,327 @@ +/* + * Driver for the SGS-Thomson M48T35 Timekeeper RAM chip + * + * Real Time Clock interface for Linux + * + * TODO: Implement periodic interrupts. + * + * Copyright (C) 2000 Silicon Graphics, Inc. + * Written by Ulf Carlsson (ulfc@engr.sgi.com) + * + * Based on code written by Paul Gortmaker. + * + * This driver allows use of the real time clock (built into + * nearly all computers) from user space. It exports the /dev/rtc + * interface supporting various ioctl() and also the /proc/rtc + * pseudo-file for status information. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#define RTC_VERSION "1.09b" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int rtc_ioctl(struct inode *inode, struct file *file, + unsigned int cmd, unsigned long arg); + +static int rtc_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data); + +static void get_rtc_time(struct rtc_time *rtc_tm); + +/* + * Bits in rtc_status. (6 bits of room for future expansion) + */ + +#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ +#define RTC_TIMER_ON 0x02 /* missed irq timer active */ + +static unsigned char rtc_status; /* bitmapped status byte. */ +static unsigned long rtc_freq; /* Current periodic IRQ rate */ +static struct m48t35_rtc *rtc; + +/* + * If this driver ever becomes modularised, it will be really nice + * to make the epoch retain its value across module reload... + */ + +static unsigned long epoch = 1970; /* year corresponding to 0x00 */ + +static const unsigned char days_in_mo[] = +{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}; + +static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + + struct rtc_time wtime; + + switch (cmd) { + case RTC_RD_TIME: /* Read the time/date from RTC */ + { + get_rtc_time(&wtime); + break; + } + case RTC_SET_TIME: /* Set the RTC */ + { + struct rtc_time rtc_tm; + unsigned char mon, day, hrs, min, sec, leap_yr; + unsigned int yrs; + + if (!capable(CAP_SYS_TIME)) + return -EACCES; + + if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, + sizeof(struct rtc_time))) + return -EFAULT; + + yrs = rtc_tm.tm_year + 1900; + mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */ + day = rtc_tm.tm_mday; + hrs = rtc_tm.tm_hour; + min = rtc_tm.tm_min; + sec = rtc_tm.tm_sec; + + if (yrs < 1970) + return -EINVAL; + + leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400)); + + if ((mon > 12) || (day == 0)) + return -EINVAL; + + if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr))) + return -EINVAL; + + if ((hrs >= 24) || (min >= 60) || (sec >= 60)) + return -EINVAL; + + if ((yrs -= epoch) > 255) /* They are unsigned */ + return -EINVAL; + + if (yrs > 169) + return -EINVAL; + + if (yrs >= 100) + yrs -= 100; + + sec = BIN2BCD(sec); + min = BIN2BCD(min); + hrs = BIN2BCD(hrs); + day = BIN2BCD(day); + mon = BIN2BCD(mon); + yrs = BIN2BCD(yrs); + + spin_lock_irq(&rtc_lock); + rtc->control |= M48T35_RTC_SET; + rtc->year = yrs; + rtc->month = mon; + rtc->date = day; + rtc->hour = hrs; + rtc->min = min; + rtc->sec = sec; + rtc->control &= ~M48T35_RTC_SET; + spin_unlock_irq(&rtc_lock); + + return 0; + } + default: + return -EINVAL; + } + return copy_to_user((void *)arg, &wtime, sizeof wtime) ? -EFAULT : 0; +} + +/* + * We enforce only one user at a time here with the open/close. + * Also clear the previous interrupt data on an open, and clean + * up things on a close. + */ + +static int rtc_open(struct inode *inode, struct file *file) +{ + spin_lock_irq(&rtc_lock); + + if (rtc_status & RTC_IS_OPEN) { + spin_unlock_irq(&rtc_lock); + return -EBUSY; + } + + rtc_status |= RTC_IS_OPEN; + spin_unlock_irq(&rtc_lock); + + return 0; +} + +static int rtc_release(struct inode *inode, struct file *file) +{ + /* + * Turn off all interrupts once the device is no longer + * in use, and clear the data. + */ + + spin_lock_irq(&rtc_lock); + rtc_status &= ~RTC_IS_OPEN; + spin_unlock_irq(&rtc_lock); + + return 0; +} + +/* + * The various file operations we support. + */ + +static struct file_operations rtc_fops = { + .owner = THIS_MODULE, + .ioctl = rtc_ioctl, + .open = rtc_open, + .release = rtc_release, +}; + +static struct miscdevice rtc_dev= +{ + RTC_MINOR, + "rtc", + &rtc_fops +}; + +static int __init rtc_init(void) +{ + rtc = (struct m48t35_rtc *) + (KL_CONFIG_CH_CONS_INFO(master_nasid)->memory_base + IOC3_BYTEBUS_DEV0); + + printk(KERN_INFO "Real Time Clock Driver v%s\n", RTC_VERSION); + if (misc_register(&rtc_dev)) { + printk(KERN_ERR "rtc: cannot register misc device.\n"); + return -ENODEV; + } + if (!create_proc_read_entry("driver/rtc", 0, NULL, rtc_read_proc, NULL)) { + printk(KERN_ERR "rtc: cannot create /proc/rtc.\n"); + misc_deregister(&rtc_dev); + return -ENOENT; + } + + rtc_freq = 1024; + + return 0; +} + +static void __exit rtc_exit (void) +{ + /* interrupts and timer disabled at this point by rtc_release */ + + remove_proc_entry ("rtc", NULL); + misc_deregister(&rtc_dev); +} + +module_init(rtc_init); +module_exit(rtc_exit); + +/* + * Info exported via "/proc/rtc". + */ + +static int rtc_get_status(char *buf) +{ + char *p; + struct rtc_time tm; + + /* + * Just emulate the standard /proc/rtc + */ + + p = buf; + + get_rtc_time(&tm); + + /* + * There is no way to tell if the luser has the RTC set for local + * time or for Universal Standard Time (GMT). Probably local though. + */ + p += sprintf(p, + "rtc_time\t: %02d:%02d:%02d\n" + "rtc_date\t: %04d-%02d-%02d\n" + "rtc_epoch\t: %04lu\n" + "24hr\t\t: yes\n", + tm.tm_hour, tm.tm_min, tm.tm_sec, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch); + + return p - buf; +} + +static int rtc_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + int len = rtc_get_status(page); + if (len <= off+count) *eof = 1; + *start = page + off; + len -= off; + if (len>count) len = count; + if (len<0) len = 0; + return len; +} + +static void get_rtc_time(struct rtc_time *rtc_tm) +{ + /* + * Do we need to wait for the last update to finish? + */ + + /* + * Only the values that we read from the RTC are set. We leave + * tm_wday, tm_yday and tm_isdst untouched. Even though the + * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated + * by the RTC when initially set to a non-zero value. + */ + spin_lock_irq(&rtc_lock); + rtc->control |= M48T35_RTC_READ; + rtc_tm->tm_sec = rtc->sec; + rtc_tm->tm_min = rtc->min; + rtc_tm->tm_hour = rtc->hour; + rtc_tm->tm_mday = rtc->date; + rtc_tm->tm_mon = rtc->month; + rtc_tm->tm_year = rtc->year; + rtc->control &= ~M48T35_RTC_READ; + spin_unlock_irq(&rtc_lock); + + rtc_tm->tm_sec = BCD2BIN(rtc_tm->tm_sec); + rtc_tm->tm_min = BCD2BIN(rtc_tm->tm_min); + rtc_tm->tm_hour = BCD2BIN(rtc_tm->tm_hour); + rtc_tm->tm_mday = BCD2BIN(rtc_tm->tm_mday); + rtc_tm->tm_mon = BCD2BIN(rtc_tm->tm_mon); + rtc_tm->tm_year = BCD2BIN(rtc_tm->tm_year); + + /* + * Account for differences between how the RTC uses the values + * and how they are defined in a struct rtc_time; + */ + if ((rtc_tm->tm_year += (epoch - 1900)) <= 69) + rtc_tm->tm_year += 100; + + rtc_tm->tm_mon--; +} diff --git a/drivers/char/lcd.h b/drivers/char/lcd.h new file mode 100644 index 000000000..8aed49850 --- /dev/null +++ b/drivers/char/lcd.h @@ -0,0 +1,184 @@ +/* + * LED, LCD and Button panel driver for Cobalt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1997 by Andrew Bose + * + * Linux kernel version history: + * March 2001: Ported from 2.0.34 by Liam Davies + * + */ + +// function headers + +static int dqpoll(volatile unsigned long, volatile unsigned char ); +static int timeout(volatile unsigned long); + +#define LCD_CHARS_PER_LINE 40 +#define FLASH_SIZE 524288 +#define MAX_IDLE_TIME 120 + +struct lcd_display { + unsigned long buttons; + int size1; + int size2; + unsigned char line1[LCD_CHARS_PER_LINE]; + unsigned char line2[LCD_CHARS_PER_LINE]; + unsigned char cursor_address; + unsigned char character; + unsigned char leds; + unsigned char *RomImage; +}; + + + +#define LCD_DRIVER "Cobalt LCD Driver v2.10" + +#define kLCD_IR 0x0F000000 +#define kLCD_DR 0x0F000010 +#define kGPI 0x0D000000 +#define kLED 0x0C000000 + +#define kDD_R00 0x00 +#define kDD_R01 0x27 +#define kDD_R10 0x40 +#define kDD_R11 0x67 + +#define kLCD_Addr 0x00000080 + +#define LCDTimeoutValue 0xfff + + +// Flash definitions AMD 29F040 +#define kFlashBase 0x0FC00000 + +#define kFlash_Addr1 0x5555 +#define kFlash_Addr2 0x2AAA +#define kFlash_Data1 0xAA +#define kFlash_Data2 0x55 +#define kFlash_Prog 0xA0 +#define kFlash_Erase3 0x80 +#define kFlash_Erase6 0x10 +#define kFlash_Read 0xF0 + +#define kFlash_ID 0x90 +#define kFlash_VenAddr 0x00 +#define kFlash_DevAddr 0x01 +#define kFlash_VenID 0x01 +#define kFlash_DevID 0xA4 // 29F040 +//#define kFlash_DevID 0xAD // 29F016 + + +// Macros + +#define LCDWriteData(x) outl((x << 24), kLCD_DR) +#define LCDWriteInst(x) outl((x << 24), kLCD_IR) + +#define LCDReadData (inl(kLCD_DR) >> 24) +#define LCDReadInst (inl(kLCD_IR) >> 24) + +#define GPIRead (inl(kGPI) >> 24) + +#define LEDSet(x) outb((char)x, kLED) + +#define WRITE_GAL(x,y) outl(y, 0x04000000 | (x)) +#define BusyCheck() while ((LCDReadInst & 0x80) == 0x80) + +#define WRITE_FLASH(x,y) outb((char)y, kFlashBase | (x)) +#define READ_FLASH(x) (inb(kFlashBase | (x))) + + + +/* + * Function command codes for io_ctl. + */ +#define LCD_On 1 +#define LCD_Off 2 +#define LCD_Clear 3 +#define LCD_Reset 4 +#define LCD_Cursor_Left 5 +#define LCD_Cursor_Right 6 +#define LCD_Disp_Left 7 +#define LCD_Disp_Right 8 +#define LCD_Get_Cursor 9 +#define LCD_Set_Cursor 10 +#define LCD_Home 11 +#define LCD_Read 12 +#define LCD_Write 13 +#define LCD_Cursor_Off 14 +#define LCD_Cursor_On 15 +#define LCD_Get_Cursor_Pos 16 +#define LCD_Set_Cursor_Pos 17 +#define LCD_Blink_Off 18 + +#define LED_Set 40 +#define LED_Bit_Set 41 +#define LED_Bit_Clear 42 + + +// Button defs +#define BUTTON_Read 50 + +// Flash command codes +#define FLASH_Erase 60 +#define FLASH_Burn 61 +#define FLASH_Read 62 + + +// Ethernet LINK check hackaroo +#define LINK_Check 90 +#define LINK_Check_2 91 + +// Button patterns _B - single layer lcd boards + +#define BUTTON_NONE 0x3F +#define BUTTON_NONE_B 0xFE + +#define BUTTON_Left 0x3B +#define BUTTON_Left_B 0xFA + +#define BUTTON_Right 0x37 +#define BUTTON_Right_B 0xDE + +#define BUTTON_Up 0x2F +#define BUTTON_Up_B 0xF6 + +#define BUTTON_Down 0x1F +#define BUTTON_Down_B 0xEE + +#define BUTTON_Next 0x3D +#define BUTTON_Next_B 0x7E + +#define BUTTON_Enter 0x3E +#define BUTTON_Enter_B 0xBE + +#define BUTTON_Reset_B 0xFC + + +// debounce constants + +#define BUTTON_SENSE 160000 +#define BUTTON_DEBOUNCE 5000 + + +// Galileo register stuff + +#define kGal_DevBank2Cfg 0x1466DB33 +#define kGal_DevBank2PReg 0x464 +#define kGal_DevBank3Cfg 0x146FDFFB +#define kGal_DevBank3PReg 0x468 + +// Network + +#define kIPADDR 1 +#define kNETMASK 2 +#define kGATEWAY 3 +#define kDNS 4 + +#define kClassA 5 +#define kClassB 6 +#define kClassC 7 + diff --git a/drivers/char/watchdog/ixp2000_wdt.c b/drivers/char/watchdog/ixp2000_wdt.c new file mode 100644 index 000000000..ebcaf79ce --- /dev/null +++ b/drivers/char/watchdog/ixp2000_wdt.c @@ -0,0 +1,219 @@ +/* + * drivers/watchdog/ixp2000_wdt.c + * + * Watchdog driver for Intel IXP2000 network processors + * + * Adapted from the IXP4xx watchdog driver by Lennert Buytenhek. + * The original version carries these notices: + * + * Author: Deepak Saxena + * + * Copyright 2004 (c) MontaVista, Software, Inc. + * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_WATCHDOG_NOWAYOUT +static int nowayout = 1; +#else +static int nowayout = 0; +#endif +static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */ +static unsigned long wdt_status; + +#define WDT_IN_USE 0 +#define WDT_OK_TO_CLOSE 1 + +static unsigned long wdt_tick_rate; + +static void +wdt_enable(void) +{ + ixp2000_reg_write(IXP2000_RESET0, *(IXP2000_RESET0) | WDT_RESET_ENABLE); + ixp2000_reg_write(IXP2000_TWDE, WDT_ENABLE); + ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate); + ixp2000_reg_write(IXP2000_T4_CTL, TIMER_DIVIDER_256 | TIMER_ENABLE); +} + +static void +wdt_disable(void) +{ + ixp2000_reg_write(IXP2000_T4_CTL, 0); +} + +static void +wdt_keepalive(void) +{ + ixp2000_reg_write(IXP2000_T4_CLD, heartbeat * wdt_tick_rate); +} + +static int +ixp2000_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(WDT_IN_USE, &wdt_status)) + return -EBUSY; + + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + wdt_enable(); + + return nonseekable_open(inode, file); +} + +static ssize_t +ixp2000_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) +{ + if (len) { + if (!nowayout) { + size_t i; + + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + for (i = 0; i != len; i++) { + char c; + + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + set_bit(WDT_OK_TO_CLOSE, &wdt_status); + } + } + wdt_keepalive(); + } + + return len; +} + + +static struct watchdog_info ident = { + .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING, + .identity = "IXP2000 Watchdog", +}; + +static int +ixp2000_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -ENOIOCTLCMD; + int time; + + switch (cmd) { + case WDIOC_GETSUPPORT: + ret = copy_to_user((struct watchdog_info *)arg, &ident, + sizeof(ident)) ? -EFAULT : 0; + break; + + case WDIOC_GETSTATUS: + ret = put_user(0, (int *)arg); + break; + + case WDIOC_GETBOOTSTATUS: + ret = put_user(0, (int *)arg); + break; + + case WDIOC_SETTIMEOUT: + ret = get_user(time, (int *)arg); + if (ret) + break; + + if (time <= 0 || time > 60) { + ret = -EINVAL; + break; + } + + heartbeat = time; + wdt_keepalive(); + /* Fall through */ + + case WDIOC_GETTIMEOUT: + ret = put_user(heartbeat, (int *)arg); + break; + + case WDIOC_KEEPALIVE: + wdt_enable(); + ret = 0; + break; + } + + return ret; +} + +static int +ixp2000_wdt_release(struct inode *inode, struct file *file) +{ + if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) { + wdt_disable(); + } else { + printk(KERN_CRIT "WATCHDOG: Device closed unexpectdly - " + "timer will not stop\n"); + } + + clear_bit(WDT_IN_USE, &wdt_status); + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + return 0; +} + + +static struct file_operations ixp2000_wdt_fops = +{ + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = ixp2000_wdt_write, + .ioctl = ixp2000_wdt_ioctl, + .open = ixp2000_wdt_open, + .release = ixp2000_wdt_release, +}; + +static struct miscdevice ixp2000_wdt_miscdev = +{ + .minor = WATCHDOG_MINOR, + .name = "IXP2000 Watchdog", + .fops = &ixp2000_wdt_fops, +}; + +static int __init ixp2000_wdt_init(void) +{ + wdt_tick_rate = (*IXP2000_T1_CLD * HZ)/ 256;; + + return misc_register(&ixp2000_wdt_miscdev); +} + +static void __exit ixp2000_wdt_exit(void) +{ + misc_deregister(&ixp2000_wdt_miscdev); +} + +module_init(ixp2000_wdt_init); +module_exit(ixp2000_wdt_exit); + +MODULE_AUTHOR("Deepak Saxena ); +MODULE_DESCRIPTION("IXP2000 Network Processor Watchdog"); + +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 60s)"); + +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); + diff --git a/drivers/char/watchdog/ixp4xx_wdt.c b/drivers/char/watchdog/ixp4xx_wdt.c new file mode 100644 index 000000000..79493650f --- /dev/null +++ b/drivers/char/watchdog/ixp4xx_wdt.c @@ -0,0 +1,233 @@ +/* + * drivers/watchdog/ixp4xx_wdt.c + * + * Watchdog driver for Intel IXP4xx network processors + * + * Author: Deepak Saxena + * + * Copyright 2004 (c) MontaVista, Software, Inc. + * Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_WATCHDOG_NOWAYOUT +static int nowayout = 1; +#else +static int nowayout = 0; +#endif +static int heartbeat = 60; /* (secs) Default is 1 minute */ +static unsigned long wdt_status; +static unsigned long boot_status; + +#define WDT_TICK_RATE (IXP4XX_PERIPHERAL_BUS_CLOCK * 1000000UL) + +#define WDT_IN_USE 0 +#define WDT_OK_TO_CLOSE 1 + +static void +wdt_enable(void) +{ + *IXP4XX_OSWK = IXP4XX_WDT_KEY; + *IXP4XX_OSWE = 0; + *IXP4XX_OSWT = WDT_TICK_RATE * heartbeat; + *IXP4XX_OSWE = IXP4XX_WDT_COUNT_ENABLE | IXP4XX_WDT_RESET_ENABLE; + *IXP4XX_OSWK = 0; +} + +static void +wdt_disable(void) +{ + *IXP4XX_OSWK = IXP4XX_WDT_KEY; + *IXP4XX_OSWE = 0; + *IXP4XX_OSWK = 0; +} + +static int +ixp4xx_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(WDT_IN_USE, &wdt_status)) + return -EBUSY; + + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + wdt_enable(); + + return 0; +} + +static ssize_t +ixp4xx_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) +{ + /* Can't seek (pwrite) on this device */ + if (ppos != &file->f_pos) + return -ESPIPE; + + if (len) { + if (!nowayout) { + size_t i; + + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + for (i = 0; i != len; i++) { + char c; + + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + set_bit(WDT_OK_TO_CLOSE, &wdt_status); + } + } + wdt_enable(); + } + + return len; +} + +static struct watchdog_info ident = { + .options = WDIOF_CARDRESET | WDIOF_MAGICCLOSE | + WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .identity = "IXP4xx Watchdog", +}; + + +static int +ixp4xx_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret = -ENOIOCTLCMD; + int time; + + switch (cmd) { + case WDIOC_GETSUPPORT: + ret = copy_to_user((struct watchdog_info *)arg, &ident, + sizeof(ident)) ? -EFAULT : 0; + break; + + case WDIOC_GETSTATUS: + ret = put_user(0, (int *)arg); + break; + + case WDIOC_GETBOOTSTATUS: + ret = put_user(boot_status, (int *)arg); + break; + + case WDIOC_SETTIMEOUT: + ret = get_user(time, (int *)arg); + if (ret) + break; + + if (time <= 0 || time > 60) { + ret = -EINVAL; + break; + } + + heartbeat = time; + wdt_enable(); + /* Fall through */ + + case WDIOC_GETTIMEOUT: + ret = put_user(heartbeat, (int *)arg); + break; + + case WDIOC_KEEPALIVE: + wdt_enable(); + ret = 0; + break; + } + return ret; +} + +static int +ixp4xx_wdt_release(struct inode *inode, struct file *file) +{ + if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) { + wdt_disable(); + } else { + printk(KERN_CRIT "WATCHDOG: Device closed unexpectdly - " + "timer will not stop\n"); + } + + clear_bit(WDT_IN_USE, &wdt_status); + clear_bit(WDT_OK_TO_CLOSE, &wdt_status); + + return 0; +} + + +static struct file_operations ixp4xx_wdt_fops = +{ + .owner = THIS_MODULE, + .write = ixp4xx_wdt_write, + .ioctl = ixp4xx_wdt_ioctl, + .open = ixp4xx_wdt_open, + .release = ixp4xx_wdt_release, +}; + +static struct miscdevice ixp4xx_wdt_miscdev = +{ + .minor = WATCHDOG_MINOR, + .name = "IXP4xx Watchdog", + .fops = &ixp4xx_wdt_fops, +}; + +static int __init ixp4xx_wdt_init(void) +{ + int ret; + unsigned long processor_id; + + asm("mrc p15, 0, %0, cr0, cr0, 0;" : "=r"(processor_id) :); + if (!(processor_id & 0xf)) { + printk("IXP4XXX Watchdog: Rev. A0 CPU detected - " + "watchdog disabled\n"); + + return -ENODEV; + } + + ret = misc_register(&ixp4xx_wdt_miscdev); + if (ret == 0) + printk("IXP4xx Watchdog Timer: heartbeat %d sec\n", heartbeat); + + boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ? + WDIOF_CARDRESET : 0; + + return ret; +} + +static void __exit ixp4xx_wdt_exit(void) +{ + misc_deregister(&ixp4xx_wdt_miscdev); +} + + +module_init(ixp4xx_wdt_init); +module_exit(ixp4xx_wdt_exit); + +MODULE_AUTHOR("Deepak Saxena ); +MODULE_DESCRIPTION("IXP4xx Network Processor Watchdog"); + +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 60s)"); + +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); + diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c new file mode 100644 index 000000000..61457c505 --- /dev/null +++ b/drivers/firmware/pcdp.c @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2002, 2003, 2004 Hewlett-Packard Co. + * Khalid Aziz + * Alex Williamson + * Bjorn Helgaas + * + * Parse the EFI PCDP table to locate the console device. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "pcdp.h" + +static inline int +uart_irq_supported(int rev, struct pcdp_uart *uart) +{ + if (rev < 3) + return uart->pci_func & PCDP_UART_IRQ; + return uart->flags & PCDP_UART_IRQ; +} + +static inline int +uart_pci(int rev, struct pcdp_uart *uart) +{ + if (rev < 3) + return uart->pci_func & PCDP_UART_PCI; + return uart->flags & PCDP_UART_PCI; +} + +static inline int +uart_active_high_low(int rev, struct pcdp_uart *uart) +{ + if (uart_pci(rev, uart) || uart->flags & PCDP_UART_ACTIVE_LOW) + return ACPI_ACTIVE_LOW; + return ACPI_ACTIVE_HIGH; +} + +static inline int +uart_edge_level(int rev, struct pcdp_uart *uart) +{ + if (uart_pci(rev, uart)) + return ACPI_LEVEL_SENSITIVE; + if (rev < 3 || uart->flags & PCDP_UART_EDGE_SENSITIVE) + return ACPI_EDGE_SENSITIVE; + return ACPI_LEVEL_SENSITIVE; +} + +static void __init +setup_serial_console(int rev, struct pcdp_uart *uart) +{ +#ifdef CONFIG_SERIAL_8250_CONSOLE + struct uart_port port; + static char options[16]; + int mapsize = 64; + + memset(&port, 0, sizeof(port)); + port.uartclk = uart->clock_rate; + if (!port.uartclk) /* some FW doesn't supply this */ + port.uartclk = BASE_BAUD * 16; + + if (uart->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + port.mapbase = uart->addr.address; + port.membase = ioremap(port.mapbase, mapsize); + if (!port.membase) { + printk(KERN_ERR "%s: couldn't ioremap 0x%lx-0x%lx\n", + __FUNCTION__, port.mapbase, port.mapbase + mapsize); + return; + } + port.iotype = UPIO_MEM; + } else if (uart->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + port.iobase = uart->addr.address; + port.iotype = UPIO_PORT; + } else + return; + + switch (uart->pci_prog_intfc) { + case 0x0: port.type = PORT_8250; break; + case 0x1: port.type = PORT_16450; break; + case 0x2: port.type = PORT_16550; break; + case 0x3: port.type = PORT_16650; break; + case 0x4: port.type = PORT_16750; break; + case 0x5: port.type = PORT_16850; break; + case 0x6: port.type = PORT_16C950; break; + default: port.type = PORT_UNKNOWN; break; + } + + port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; + + if (uart_irq_supported(rev, uart)) { + port.irq = acpi_register_gsi(uart->gsi, + uart_active_high_low(rev, uart), + uart_edge_level(rev, uart)); + port.flags |= UPF_AUTO_IRQ; /* some FW reported wrong GSI */ + if (uart_pci(rev, uart)) + port.flags |= UPF_SHARE_IRQ; + } + + if (early_serial_setup(&port) < 0) + return; + + snprintf(options, sizeof(options), "%lun%d", uart->baud, + uart->bits ? uart->bits : 8); + add_preferred_console("ttyS", port.line, options); + + printk(KERN_INFO "PCDP: serial console at %s 0x%lx (ttyS%d, options %s)\n", + port.iotype == UPIO_MEM ? "MMIO" : "I/O", + uart->addr.address, port.line, options); +#endif +} + +static void __init +setup_vga_console(struct pcdp_vga *vga) +{ +#ifdef CONFIG_VT +#ifdef CONFIG_VGA_CONSOLE + if (efi_mem_type(0xA0000) == EFI_CONVENTIONAL_MEMORY) { + printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n"); + return; + } + + conswitchp = &vga_con; + printk(KERN_INFO "PCDP: VGA console\n"); +#endif +#endif +} + +void __init +efi_setup_pcdp_console(char *cmdline) +{ + struct pcdp *pcdp; + struct pcdp_uart *uart; + struct pcdp_device *dev, *end; + int i, serial = 0; + + pcdp = efi.hcdp; + if (!pcdp) + return; + + printk(KERN_INFO "PCDP: v%d at 0x%p\n", pcdp->rev, pcdp); + + if (pcdp->rev < 3) { + if (strstr(cmdline, "console=ttyS0") || efi_uart_console_only()) + serial = 1; + } + + for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { + if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) { + if (uart->type == PCDP_CONSOLE_UART) { + setup_serial_console(pcdp->rev, uart); + return; + } + } + } + + end = (struct pcdp_device *) ((u8 *) pcdp + pcdp->length); + for (dev = (struct pcdp_device *) (pcdp->uart + pcdp->num_uarts); + dev < end; + dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) { + if (dev->flags & PCDP_PRIMARY_CONSOLE) { + if (dev->type == PCDP_CONSOLE_VGA) { + setup_vga_console((struct pcdp_vga *) dev); + return; + } + } + } +} + +#ifdef CONFIG_IA64_EARLY_PRINTK_UART +unsigned long +hcdp_early_uart (void) +{ + efi_system_table_t *systab; + efi_config_table_t *config_tables; + unsigned long addr = 0; + struct pcdp *pcdp = 0; + struct pcdp_uart *uart; + int i; + + systab = (efi_system_table_t *) ia64_boot_param->efi_systab; + if (!systab) + return 0; + systab = __va(systab); + + config_tables = (efi_config_table_t *) systab->tables; + if (!config_tables) + return 0; + config_tables = __va(config_tables); + + for (i = 0; i < systab->nr_tables; i++) { + if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { + pcdp = (struct pcdp *) config_tables[i].table; + break; + } + } + if (!pcdp) + return 0; + pcdp = __va(pcdp); + + for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { + if (uart->type == PCDP_CONSOLE_UART) { + addr = uart->addr.address; + break; + } + } + return addr; +} +#endif /* CONFIG_IA64_EARLY_PRINTK_UART */ diff --git a/drivers/firmware/pcdp.h b/drivers/firmware/pcdp.h new file mode 100644 index 000000000..4217c3b1c --- /dev/null +++ b/drivers/firmware/pcdp.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2002, 2004 Hewlett-Packard Co. + * Khalid Aziz + * Bjorn Helgaas + * + * Definitions for PCDP-defined console devices + * + * v1.0a: http://www.dig64.org/specifications/DIG64_HCDPv10a_01.pdf + * v2.0: http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf + */ + +#define PCDP_CONSOLE 0 +#define PCDP_DEBUG 1 +#define PCDP_CONSOLE_OUTPUT 2 +#define PCDP_CONSOLE_INPUT 3 + +#define PCDP_UART (0 << 3) +#define PCDP_VGA (1 << 3) +#define PCDP_USB (2 << 3) + +/* pcdp_uart.type and pcdp_device.type */ +#define PCDP_CONSOLE_UART (PCDP_UART | PCDP_CONSOLE) +#define PCDP_DEBUG_UART (PCDP_UART | PCDP_DEBUG) +#define PCDP_CONSOLE_VGA (PCDP_VGA | PCDP_CONSOLE_OUTPUT) +#define PCDP_CONSOLE_USB (PCDP_USB | PCDP_CONSOLE_INPUT) + +/* pcdp_uart.flags */ +#define PCDP_UART_EDGE_SENSITIVE (1 << 0) +#define PCDP_UART_ACTIVE_LOW (1 << 1) +#define PCDP_UART_PRIMARY_CONSOLE (1 << 2) +#define PCDP_UART_IRQ (1 << 6) /* in pci_func for rev < 3 */ +#define PCDP_UART_PCI (1 << 7) /* in pci_func for rev < 3 */ + +struct pcdp_uart { + u8 type; + u8 bits; + u8 parity; + u8 stop_bits; + u8 pci_seg; + u8 pci_bus; + u8 pci_dev; + u8 pci_func; + u64 baud; + struct acpi_generic_address addr; + u16 pci_dev_id; + u16 pci_vendor_id; + u32 gsi; + u32 clock_rate; + u8 pci_prog_intfc; + u8 flags; +}; + +struct pcdp_vga { + u8 count; /* address space descriptors */ +}; + +/* pcdp_device.flags */ +#define PCDP_PRIMARY_CONSOLE 1 + +struct pcdp_device { + u8 type; + u8 flags; + u16 length; + u16 efi_index; +}; + +struct pcdp { + u8 signature[4]; + u32 length; + u8 rev; /* PCDP v2.0 is rev 3 */ + u8 chksum; + u8 oemid[6]; + u8 oem_tabid[8]; + u32 oem_rev; + u8 creator_id[4]; + u32 creator_rev; + u32 num_uarts; + struct pcdp_uart uart[0]; /* actual size is num_uarts */ + /* remainder of table is pcdp_device structures */ +}; diff --git a/drivers/i2c/busses/i2c-ixp4xx.c b/drivers/i2c/busses/i2c-ixp4xx.c new file mode 100644 index 000000000..d8bfd59c1 --- /dev/null +++ b/drivers/i2c/busses/i2c-ixp4xx.c @@ -0,0 +1,181 @@ +/* + * drivers/i2c/i2c-adap-ixp4xx.c + * + * Intel's IXP4xx XScale NPU chipsets (IXP420, 421, 422, 425) do not have + * an on board I2C controller but provide 16 GPIO pins that are often + * used to create an I2C bus. This driver provides an i2c_adapter + * interface that plugs in under algo_bit and drives the GPIO pins + * as instructed by the alogorithm driver. + * + * Author: Deepak Saxena + * + * Copyright (c) 2003-2004 MontaVista Software Inc. + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * NOTE: Since different platforms will use different GPIO pins for + * I2C, this driver uses an IXP4xx-specific platform_data + * pointer to pass the GPIO numbers to the driver. This + * allows us to support all the different IXP4xx platforms + * w/o having to put #ifdefs in this driver. + * + * See arch/arm/mach-ixp4xx/ixdp425.c for an example of building a + * device list and filling in the ixp4xx_i2c_pins data structure + * that is passed as the platform_data to this driver. + */ + +#include +#ifdef CONFIG_I2C_DEBUG_BUS +#define DEBUG 1 +#endif + +#include +#include +#include +#include +#include +#include + +#include /* Pick up IXP4xx-specific bits */ + +static inline int ixp4xx_scl_pin(void *data) +{ + return ((struct ixp4xx_i2c_pins*)data)->scl_pin; +} + +static inline int ixp4xx_sda_pin(void *data) +{ + return ((struct ixp4xx_i2c_pins*)data)->sda_pin; +} + +static void ixp4xx_bit_setscl(void *data, int val) +{ + gpio_line_set(ixp4xx_scl_pin(data), 0); + gpio_line_config(ixp4xx_scl_pin(data), + val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT ); +} + +static void ixp4xx_bit_setsda(void *data, int val) +{ + gpio_line_set(ixp4xx_sda_pin(data), 0); + gpio_line_config(ixp4xx_sda_pin(data), + val ? IXP4XX_GPIO_IN : IXP4XX_GPIO_OUT ); +} + +static int ixp4xx_bit_getscl(void *data) +{ + int scl; + + gpio_line_config(ixp4xx_scl_pin(data), IXP4XX_GPIO_IN ); + gpio_line_get(ixp4xx_scl_pin(data), &scl); + + return scl; +} + +static int ixp4xx_bit_getsda(void *data) +{ + int sda; + + gpio_line_config(ixp4xx_sda_pin(data), IXP4XX_GPIO_IN ); + gpio_line_get(ixp4xx_sda_pin(data), &sda); + + return sda; +} + +struct ixp4xx_i2c_data { + struct ixp4xx_i2c_pins *gpio_pins; + struct i2c_adapter adapter; + struct i2c_algo_bit_data algo_data; +}; + +static int ixp4xx_i2c_remove(struct device *dev) +{ + struct platform_device *plat_dev = to_platform_device(dev); + struct ixp4xx_i2c_data *drv_data = dev_get_drvdata(&plat_dev->dev); + + dev_set_drvdata(&plat_dev->dev, NULL); + + i2c_bit_del_bus(&drv_data->adapter); + + kfree(drv_data); + + return 0; +} + +static int ixp4xx_i2c_probe(struct device *dev) +{ + int err; + struct platform_device *plat_dev = to_platform_device(dev); + struct ixp4xx_i2c_pins *gpio = plat_dev->dev.platform_data; + struct ixp4xx_i2c_data *drv_data = + kmalloc(sizeof(struct ixp4xx_i2c_data), GFP_KERNEL); + + if(!drv_data) + return -ENOMEM; + + memzero(drv_data, sizeof(struct ixp4xx_i2c_data)); + drv_data->gpio_pins = gpio; + + /* + * We could make a lot of these structures static, but + * certain platforms may have multiple GPIO-based I2C + * buses for various device domains, so we need per-device + * algo_data->data. + */ + drv_data->algo_data.data = gpio; + drv_data->algo_data.setsda = ixp4xx_bit_setsda; + drv_data->algo_data.setscl = ixp4xx_bit_setscl; + drv_data->algo_data.getsda = ixp4xx_bit_getsda; + drv_data->algo_data.getscl = ixp4xx_bit_getscl; + drv_data->algo_data.udelay = 10; + drv_data->algo_data.mdelay = 10; + drv_data->algo_data.timeout = 100; + + drv_data->adapter.id = I2C_HW_B_IXP4XX, + drv_data->adapter.algo_data = &drv_data->algo_data, + + drv_data->adapter.dev.parent = &plat_dev->dev; + + gpio_line_config(gpio->scl_pin, IXP4XX_GPIO_IN); + gpio_line_config(gpio->sda_pin, IXP4XX_GPIO_IN); + gpio_line_set(gpio->scl_pin, 0); + gpio_line_set(gpio->sda_pin, 0); + + if ((err = i2c_bit_add_bus(&drv_data->adapter) != 0)) { + printk(KERN_ERR "ERROR: Could not install %s\n", dev->bus_id); + + kfree(drv_data); + return err; + } + + dev_set_drvdata(&plat_dev->dev, drv_data); + + return 0; +} + +static struct device_driver ixp4xx_i2c_driver = { + .name = "IXP4XX-I2C", + .bus = &platform_bus_type, + .probe = ixp4xx_i2c_probe, + .remove = ixp4xx_i2c_remove, +}; + +static int __init ixp4xx_i2c_init(void) +{ + return driver_register(&ixp4xx_i2c_driver); +} + +static void __exit ixp4xx_i2c_exit(void) +{ + driver_unregister(&ixp4xx_i2c_driver); +} + +module_init(ixp4xx_i2c_init); +module_exit(ixp4xx_i2c_exit); + +MODULE_DESCRIPTION("GPIO-based I2C adapter for IXP4xx systems"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Deepak Saxena "); + diff --git a/drivers/i2c/chips/adm1025.c b/drivers/i2c/chips/adm1025.c new file mode 100644 index 000000000..d38336fbc --- /dev/null +++ b/drivers/i2c/chips/adm1025.c @@ -0,0 +1,570 @@ +/* + * adm1025.c + * + * Copyright (C) 2000 Chen-Yuan Wu + * Copyright (C) 2003-2004 Jean Delvare + * + * The ADM1025 is a sensor chip made by Analog Devices. It reports up to 6 + * voltages (including its own power source) and up to two temperatures + * (its own plus up to one external one). Voltages are scaled internally + * (which is not the common way) with ratios such that the nominal value + * of each voltage correspond to a register value of 192 (which means a + * resolution of about 0.5% of the nominal value). Temperature values are + * reported with a 1 deg resolution and a 3 deg accuracy. Complete + * datasheet can be obtained from Analog's website at: + * http://www.analog.com/Analog_Root/productPage/productHome/0,2121,ADM1025,00.html + * + * This driver also supports the ADM1025A, which differs from the ADM1025 + * only in that it has "open-drain VID inputs while the ADM1025 has + * on-chip 100k pull-ups on the VID inputs". It doesn't make any + * difference for us. + * + * This driver also supports the NE1619, a sensor chip made by Philips. + * That chip is similar to the ADM1025A, with a few differences. The only + * difference that matters to us is that the NE1619 has only two possible + * addresses while the ADM1025A has a third one. Complete datasheet can be + * obtained from Philips's website at: + * http://www.semiconductors.philips.com/pip/NE1619DS.html + * + * Since the ADM1025 was the first chipset supported by this driver, most + * comments will refer to this chipset, but are actually general and + * concern all supported chipsets, unless mentioned otherwise. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Addresses to scan + * ADM1025 and ADM1025A have three possible addresses: 0x2c, 0x2d and 0x2e. + * NE1619 has two possible addresses: 0x2c and 0x2d. + */ + +static unsigned short normal_i2c[] = { I2C_CLIENT_END }; +static unsigned short normal_i2c_range[] = { 0x2c, 0x2e, I2C_CLIENT_END }; +static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END }; +static unsigned int normal_isa_range[] = { I2C_CLIENT_ISA_END }; + +/* + * Insmod parameters + */ + +SENSORS_INSMOD_2(adm1025, ne1619); + +/* + * The ADM1025 registers + */ + +#define ADM1025_REG_MAN_ID 0x3E +#define ADM1025_REG_CHIP_ID 0x3F +#define ADM1025_REG_CONFIG 0x40 +#define ADM1025_REG_STATUS1 0x41 +#define ADM1025_REG_STATUS2 0x42 +#define ADM1025_REG_IN(nr) (0x20 + (nr)) +#define ADM1025_REG_IN_MAX(nr) (0x2B + (nr) * 2) +#define ADM1025_REG_IN_MIN(nr) (0x2C + (nr) * 2) +#define ADM1025_REG_TEMP(nr) (0x26 + (nr)) +#define ADM1025_REG_TEMP_HIGH(nr) (0x37 + (nr) * 2) +#define ADM1025_REG_TEMP_LOW(nr) (0x38 + (nr) * 2) +#define ADM1025_REG_VID 0x47 +#define ADM1025_REG_VID4 0x49 + +/* + * Conversions and various macros + * The ADM1025 uses signed 8-bit values for temperatures. + */ + +static int in_scale[6] = { 2500, 2250, 3300, 5000, 12000, 3300 }; + +#define IN_FROM_REG(reg,scale) (((reg) * (scale) + 96) / 192) +#define IN_TO_REG(val,scale) ((val) <= 0 ? 0 : \ + (val) * 192 >= (scale) * 255 ? 255 : \ + ((val) * 192 + (scale)/2) / (scale)) + +#define TEMP_FROM_REG(reg) ((reg) * 1000) +#define TEMP_TO_REG(val) ((val) <= -127500 ? -128 : \ + (val) >= 126500 ? 127 : \ + (((val) < 0 ? (val)-500 : (val)+500) / 1000)) + +/* + * Functions declaration + */ + +static int adm1025_attach_adapter(struct i2c_adapter *adapter); +static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind); +static void adm1025_init_client(struct i2c_client *client); +static int adm1025_detach_client(struct i2c_client *client); +static struct adm1025_data *adm1025_update_device(struct device *dev); + +/* + * Driver data (common to all clients) + */ + +static struct i2c_driver adm1025_driver = { + .owner = THIS_MODULE, + .name = "adm1025", + .id = I2C_DRIVERID_ADM1025, + .flags = I2C_DF_NOTIFY, + .attach_adapter = adm1025_attach_adapter, + .detach_client = adm1025_detach_client, +}; + +/* + * Client data (each client gets its own) + */ + +struct adm1025_data { + struct i2c_client client; + struct semaphore update_lock; + char valid; /* zero until following fields are valid */ + unsigned long last_updated; /* in jiffies */ + + u8 in[6]; /* register value */ + u8 in_max[6]; /* register value */ + u8 in_min[6]; /* register value */ + s8 temp[2]; /* register value */ + s8 temp_min[2]; /* register value */ + s8 temp_max[2]; /* register value */ + u16 alarms; /* register values, combined */ + u8 vid; /* register values, combined */ + u8 vrm; +}; + +/* + * Internal variables + */ + +static int adm1025_id = 0; + +/* + * Sysfs stuff + */ + +#define show_in(offset) \ +static ssize_t show_in##offset(struct device *dev, char *buf) \ +{ \ + struct adm1025_data *data = adm1025_update_device(dev); \ + return sprintf(buf, "%u\n", IN_FROM_REG(data->in[offset], \ + in_scale[offset])); \ +} \ +static ssize_t show_in##offset##_min(struct device *dev, char *buf) \ +{ \ + struct adm1025_data *data = adm1025_update_device(dev); \ + return sprintf(buf, "%u\n", IN_FROM_REG(data->in_min[offset], \ + in_scale[offset])); \ +} \ +static ssize_t show_in##offset##_max(struct device *dev, char *buf) \ +{ \ + struct adm1025_data *data = adm1025_update_device(dev); \ + return sprintf(buf, "%u\n", IN_FROM_REG(data->in_max[offset], \ + in_scale[offset])); \ +} \ +static DEVICE_ATTR(in##offset##_input, S_IRUGO, show_in##offset, NULL); +show_in(0); +show_in(1); +show_in(2); +show_in(3); +show_in(4); +show_in(5); + +#define show_temp(offset) \ +static ssize_t show_temp##offset(struct device *dev, char *buf) \ +{ \ + struct adm1025_data *data = adm1025_update_device(dev); \ + return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[offset-1])); \ +} \ +static ssize_t show_temp##offset##_min(struct device *dev, char *buf) \ +{ \ + struct adm1025_data *data = adm1025_update_device(dev); \ + return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[offset-1])); \ +} \ +static ssize_t show_temp##offset##_max(struct device *dev, char *buf) \ +{ \ + struct adm1025_data *data = adm1025_update_device(dev); \ + return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[offset-1])); \ +}\ +static DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp##offset, NULL); +show_temp(1); +show_temp(2); + +#define set_in(offset) \ +static ssize_t set_in##offset##_min(struct device *dev, const char *buf, \ + size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct adm1025_data *data = i2c_get_clientdata(client); \ + data->in_min[offset] = IN_TO_REG(simple_strtol(buf, NULL, 10), \ + in_scale[offset]); \ + i2c_smbus_write_byte_data(client, ADM1025_REG_IN_MIN(offset), \ + data->in_min[offset]); \ + return count; \ +} \ +static ssize_t set_in##offset##_max(struct device *dev, const char *buf, \ + size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct adm1025_data *data = i2c_get_clientdata(client); \ + data->in_max[offset] = IN_TO_REG(simple_strtol(buf, NULL, 10), \ + in_scale[offset]); \ + i2c_smbus_write_byte_data(client, ADM1025_REG_IN_MAX(offset), \ + data->in_max[offset]); \ + return count; \ +} \ +static DEVICE_ATTR(in##offset##_min, S_IWUSR | S_IRUGO, \ + show_in##offset##_min, set_in##offset##_min); \ +static DEVICE_ATTR(in##offset##_max, S_IWUSR | S_IRUGO, \ + show_in##offset##_max, set_in##offset##_max); +set_in(0); +set_in(1); +set_in(2); +set_in(3); +set_in(4); +set_in(5); + +#define set_temp(offset) \ +static ssize_t set_temp##offset##_min(struct device *dev, const char *buf, \ + size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct adm1025_data *data = i2c_get_clientdata(client); \ + data->temp_min[offset-1] = TEMP_TO_REG(simple_strtol(buf, NULL, 10)); \ + i2c_smbus_write_byte_data(client, ADM1025_REG_TEMP_LOW(offset-1), \ + data->temp_min[offset-1]); \ + return count; \ +} \ +static ssize_t set_temp##offset##_max(struct device *dev, const char *buf, \ + size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct adm1025_data *data = i2c_get_clientdata(client); \ + data->temp_max[offset-1] = TEMP_TO_REG(simple_strtol(buf, NULL, 10)); \ + i2c_smbus_write_byte_data(client, ADM1025_REG_TEMP_HIGH(offset-1), \ + data->temp_max[offset-1]); \ + return count; \ +} \ +static DEVICE_ATTR(temp##offset##_min, S_IWUSR | S_IRUGO, \ + show_temp##offset##_min, set_temp##offset##_min); \ +static DEVICE_ATTR(temp##offset##_max, S_IWUSR | S_IRUGO, \ + show_temp##offset##_max, set_temp##offset##_max); +set_temp(1); +set_temp(2); + +static ssize_t show_alarms(struct device *dev, char *buf) +{ + struct adm1025_data *data = adm1025_update_device(dev); + return sprintf(buf, "%u\n", data->alarms); +} +static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); + +static ssize_t show_vid(struct device *dev, char *buf) +{ + struct adm1025_data *data = adm1025_update_device(dev); + return sprintf(buf, "%u\n", vid_from_reg(data->vid, data->vrm)); +} +static DEVICE_ATTR(in1_ref, S_IRUGO, show_vid, NULL); + +static ssize_t show_vrm(struct device *dev, char *buf) +{ + struct adm1025_data *data = adm1025_update_device(dev); + return sprintf(buf, "%u\n", data->vrm); +} +static ssize_t set_vrm(struct device *dev, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1025_data *data = i2c_get_clientdata(client); + data->vrm = simple_strtoul(buf, NULL, 10); + return count; +} +static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm); + +/* + * Real code + */ + +static int adm1025_attach_adapter(struct i2c_adapter *adapter) +{ + if (!(adapter->class & I2C_CLASS_HWMON)) + return 0; + return i2c_detect(adapter, &addr_data, adm1025_detect); +} + +/* + * The following function does more than just detection. If detection + * succeeds, it also registers the new chip. + */ +static int adm1025_detect(struct i2c_adapter *adapter, int address, int kind) +{ + struct i2c_client *new_client; + struct adm1025_data *data; + int err = 0; + const char *name = ""; + u8 config; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + goto exit; + + if (!(data = kmalloc(sizeof(struct adm1025_data), GFP_KERNEL))) { + err = -ENOMEM; + goto exit; + } + memset(data, 0, sizeof(struct adm1025_data)); + + /* The common I2C client data is placed right before the + ADM1025-specific data. */ + new_client = &data->client; + i2c_set_clientdata(new_client, data); + new_client->addr = address; + new_client->adapter = adapter; + new_client->driver = &adm1025_driver; + new_client->flags = 0; + + /* + * Now we do the remaining detection. A negative kind means that + * the driver was loaded with no force parameter (default), so we + * must both detect and identify the chip. A zero kind means that + * the driver was loaded with the force parameter, the detection + * step shall be skipped. A positive kind means that the driver + * was loaded with the force parameter and a given kind of chip is + * requested, so both the detection and the identification steps + * are skipped. + */ + config = i2c_smbus_read_byte_data(new_client, ADM1025_REG_CONFIG); + if (kind < 0) { /* detection */ + if ((config & 0x80) != 0x00 + || (i2c_smbus_read_byte_data(new_client, + ADM1025_REG_STATUS1) & 0xC0) != 0x00 + || (i2c_smbus_read_byte_data(new_client, + ADM1025_REG_STATUS2) & 0xBC) != 0x00) { + dev_dbg(&adapter->dev, + "ADM1025 detection failed at 0x%02x.\n", + address); + goto exit_free; + } + } + + if (kind <= 0) { /* identification */ + u8 man_id, chip_id; + + man_id = i2c_smbus_read_byte_data(new_client, + ADM1025_REG_MAN_ID); + chip_id = i2c_smbus_read_byte_data(new_client, + ADM1025_REG_CHIP_ID); + + if (man_id == 0x41) { /* Analog Devices */ + if ((chip_id & 0xF0) == 0x20) { /* ADM1025/ADM1025A */ + kind = adm1025; + } + } else + if (man_id == 0xA1) { /* Philips */ + if (address != 0x2E + && (chip_id & 0xF0) == 0x20) { /* NE1619 */ + kind = ne1619; + } + } + + if (kind <= 0) { /* identification failed */ + dev_info(&adapter->dev, + "Unsupported chip (man_id=0x%02X, " + "chip_id=0x%02X).\n", man_id, chip_id); + goto exit_free; + } + } + + if (kind == adm1025) { + name = "adm1025"; + } else if (kind == ne1619) { + name = "ne1619"; + } + + /* We can fill in the remaining client fields */ + strlcpy(new_client->name, name, I2C_NAME_SIZE); + new_client->id = adm1025_id++; + data->valid = 0; + init_MUTEX(&data->update_lock); + + /* Tell the I2C layer a new client has arrived */ + if ((err = i2c_attach_client(new_client))) + goto exit_free; + + /* Initialize the ADM1025 chip */ + adm1025_init_client(new_client); + + /* Register sysfs hooks */ + device_create_file(&new_client->dev, &dev_attr_in0_input); + device_create_file(&new_client->dev, &dev_attr_in1_input); + device_create_file(&new_client->dev, &dev_attr_in2_input); + device_create_file(&new_client->dev, &dev_attr_in3_input); + device_create_file(&new_client->dev, &dev_attr_in5_input); + device_create_file(&new_client->dev, &dev_attr_in0_min); + device_create_file(&new_client->dev, &dev_attr_in1_min); + device_create_file(&new_client->dev, &dev_attr_in2_min); + device_create_file(&new_client->dev, &dev_attr_in3_min); + device_create_file(&new_client->dev, &dev_attr_in5_min); + device_create_file(&new_client->dev, &dev_attr_in0_max); + device_create_file(&new_client->dev, &dev_attr_in1_max); + device_create_file(&new_client->dev, &dev_attr_in2_max); + device_create_file(&new_client->dev, &dev_attr_in3_max); + device_create_file(&new_client->dev, &dev_attr_in5_max); + device_create_file(&new_client->dev, &dev_attr_temp1_input); + device_create_file(&new_client->dev, &dev_attr_temp2_input); + device_create_file(&new_client->dev, &dev_attr_temp1_min); + device_create_file(&new_client->dev, &dev_attr_temp2_min); + device_create_file(&new_client->dev, &dev_attr_temp1_max); + device_create_file(&new_client->dev, &dev_attr_temp2_max); + device_create_file(&new_client->dev, &dev_attr_alarms); + device_create_file(&new_client->dev, &dev_attr_in1_ref); + device_create_file(&new_client->dev, &dev_attr_vrm); + + /* Pin 11 is either in4 (+12V) or VID4 */ + if (!(config & 0x20)) { + device_create_file(&new_client->dev, &dev_attr_in4_input); + device_create_file(&new_client->dev, &dev_attr_in4_min); + device_create_file(&new_client->dev, &dev_attr_in4_max); + } + + return 0; + +exit_free: + kfree(data); +exit: + return err; +} + +static void adm1025_init_client(struct i2c_client *client) +{ + u8 reg; + struct adm1025_data *data = i2c_get_clientdata(client); + int i; + + data->vrm = 82; + + /* + * Set high limits + * Usually we avoid setting limits on driver init, but it happens + * that the ADM1025 comes with stupid default limits (all registers + * set to 0). In case the chip has not gone through any limit + * setting yet, we better set the high limits to the max so that + * no alarm triggers. + */ + for (i=0; i<6; i++) { + reg = i2c_smbus_read_byte_data(client, + ADM1025_REG_IN_MAX(i)); + if (reg == 0) + i2c_smbus_write_byte_data(client, + ADM1025_REG_IN_MAX(i), + 0xFF); + } + for (i=0; i<2; i++) { + reg = i2c_smbus_read_byte_data(client, + ADM1025_REG_TEMP_HIGH(i)); + if (reg == 0) + i2c_smbus_write_byte_data(client, + ADM1025_REG_TEMP_HIGH(i), + 0x7F); + } + + /* + * Start the conversions + */ + reg = i2c_smbus_read_byte_data(client, ADM1025_REG_CONFIG); + if (!(reg & 0x01)) + i2c_smbus_write_byte_data(client, ADM1025_REG_CONFIG, + (reg&0x7E)|0x01); +} + +static int adm1025_detach_client(struct i2c_client *client) +{ + int err; + + if ((err = i2c_detach_client(client))) { + dev_err(&client->dev, "Client deregistration failed, " + "client not detached.\n"); + return err; + } + + kfree(i2c_get_clientdata(client)); + return 0; +} + +static struct adm1025_data *adm1025_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1025_data *data = i2c_get_clientdata(client); + + down(&data->update_lock); + + if ((jiffies - data->last_updated > HZ * 2) || + (jiffies < data->last_updated) || + !data->valid) { + int i; + + dev_dbg(&client->dev, "Updating data.\n"); + for (i=0; i<6; i++) { + data->in[i] = i2c_smbus_read_byte_data(client, + ADM1025_REG_IN(i)); + data->in_min[i] = i2c_smbus_read_byte_data(client, + ADM1025_REG_IN_MIN(i)); + data->in_max[i] = i2c_smbus_read_byte_data(client, + ADM1025_REG_IN_MAX(i)); + } + for (i=0; i<2; i++) { + data->temp[i] = i2c_smbus_read_byte_data(client, + ADM1025_REG_TEMP(i)); + data->temp_min[i] = i2c_smbus_read_byte_data(client, + ADM1025_REG_TEMP_LOW(i)); + data->temp_max[i] = i2c_smbus_read_byte_data(client, + ADM1025_REG_TEMP_HIGH(i)); + } + data->alarms = i2c_smbus_read_byte_data(client, + ADM1025_REG_STATUS1) + | (i2c_smbus_read_byte_data(client, + ADM1025_REG_STATUS2) << 8); + data->vid = (i2c_smbus_read_byte_data(client, + ADM1025_REG_VID) & 0x0f) + | ((i2c_smbus_read_byte_data(client, + ADM1025_REG_VID4) & 0x01) << 4); + + data->last_updated = jiffies; + data->valid = 1; + } + + up(&data->update_lock); + + return data; +} + +static int __init sensors_adm1025_init(void) +{ + return i2c_add_driver(&adm1025_driver); +} + +static void __exit sensors_adm1025_exit(void) +{ + i2c_del_driver(&adm1025_driver); +} + +MODULE_AUTHOR("Jean Delvare "); +MODULE_DESCRIPTION("ADM1025 driver"); +MODULE_LICENSE("GPL"); + +module_init(sensors_adm1025_init); +module_exit(sensors_adm1025_exit); diff --git a/drivers/i2c/chips/adm1031.c b/drivers/i2c/chips/adm1031.c new file mode 100644 index 000000000..23c323e80 --- /dev/null +++ b/drivers/i2c/chips/adm1031.c @@ -0,0 +1,985 @@ +/* + adm1031.c - Part of lm_sensors, Linux kernel modules for hardware + monitoring + Based on lm75.c and lm85.c + Supports adm1030 / adm1031 + Copyright (C) 2004 Alexandre d'Alton + Reworked by Jean Delvare + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include +#include +#include +#include +#include + +/* Following macros takes channel parameter starting from 0 to 2 */ +#define ADM1031_REG_FAN_SPEED(nr) (0x08 + (nr)) +#define ADM1031_REG_FAN_DIV(nr) (0x20 + (nr)) +#define ADM1031_REG_PWM (0x22) +#define ADM1031_REG_FAN_MIN(nr) (0x10 + (nr)) + +#define ADM1031_REG_TEMP_MAX(nr) (0x14 + 4*(nr)) +#define ADM1031_REG_TEMP_MIN(nr) (0x15 + 4*(nr)) +#define ADM1031_REG_TEMP_CRIT(nr) (0x16 + 4*(nr)) + +#define ADM1031_REG_TEMP(nr) (0xa + (nr)) +#define ADM1031_REG_AUTO_TEMP(nr) (0x24 + (nr)) + +#define ADM1031_REG_STATUS(nr) (0x2 + (nr)) + +#define ADM1031_REG_CONF1 0x0 +#define ADM1031_REG_CONF2 0x1 +#define ADM1031_REG_EXT_TEMP 0x6 + +#define ADM1031_CONF1_MONITOR_ENABLE 0x01 /* Monitoring enable */ +#define ADM1031_CONF1_PWM_INVERT 0x08 /* PWM Invert */ +#define ADM1031_CONF1_AUTO_MODE 0x80 /* Auto FAN */ + +#define ADM1031_CONF2_PWM1_ENABLE 0x01 +#define ADM1031_CONF2_PWM2_ENABLE 0x02 +#define ADM1031_CONF2_TACH1_ENABLE 0x04 +#define ADM1031_CONF2_TACH2_ENABLE 0x08 +#define ADM1031_CONF2_TEMP_ENABLE(chan) (0x10 << (chan)) + +/* Addresses to scan */ +static unsigned short normal_i2c[] = { I2C_CLIENT_END }; +static unsigned short normal_i2c_range[] = { 0x2c, 0x2e, I2C_CLIENT_END }; +static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END }; +static unsigned int normal_isa_range[] = { I2C_CLIENT_ISA_END }; + +/* Insmod parameters */ +SENSORS_INSMOD_2(adm1030, adm1031); + +typedef u8 auto_chan_table_t[8][2]; + +/* Each client has this additional data */ +struct adm1031_data { + struct i2c_client client; + struct semaphore update_lock; + int chip_type; + char valid; /* !=0 if following fields are valid */ + unsigned long last_updated; /* In jiffies */ + /* The chan_select_table contains the possible configurations for + * auto fan control. + */ + auto_chan_table_t *chan_select_table; + u16 alarm; + u8 conf1; + u8 conf2; + u8 fan[2]; + u8 fan_div[2]; + u8 fan_min[2]; + u8 pwm[2]; + u8 old_pwm[2]; + s8 temp[3]; + u8 ext_temp[3]; + u8 auto_temp[3]; + u8 auto_temp_min[3]; + u8 auto_temp_off[3]; + u8 auto_temp_max[3]; + s8 temp_min[3]; + s8 temp_max[3]; + s8 temp_crit[3]; +}; + +static int adm1031_attach_adapter(struct i2c_adapter *adapter); +static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind); +static void adm1031_init_client(struct i2c_client *client); +static int adm1031_detach_client(struct i2c_client *client); +static struct adm1031_data *adm1031_update_device(struct device *dev); + +/* This is the driver that will be inserted */ +static struct i2c_driver adm1031_driver = { + .owner = THIS_MODULE, + .name = "adm1031", + .flags = I2C_DF_NOTIFY, + .attach_adapter = adm1031_attach_adapter, + .detach_client = adm1031_detach_client, +}; + +static int adm1031_id; + +static inline u8 adm1031_read_value(struct i2c_client *client, u8 reg) +{ + return i2c_smbus_read_byte_data(client, reg); +} + +static inline int +adm1031_write_value(struct i2c_client *client, u8 reg, unsigned int value) +{ + return i2c_smbus_write_byte_data(client, reg, value); +} + + +#define TEMP_TO_REG(val) (((val) < 0 ? ((val - 500) / 1000) : \ + ((val + 500) / 1000))) + +#define TEMP_FROM_REG(val) ((val) * 1000) + +#define TEMP_FROM_REG_EXT(val, ext) (TEMP_FROM_REG(val) + (ext) * 125) + +#define FAN_FROM_REG(reg, div) ((reg) ? (11250 * 60) / ((reg) * (div)) : 0) + +static int FAN_TO_REG(int reg, int div) +{ + int tmp; + tmp = FAN_FROM_REG(SENSORS_LIMIT(reg, 0, 65535), div); + return tmp > 255 ? 255 : tmp; +} + +#define FAN_DIV_FROM_REG(reg) (1<<(((reg)&0xc0)>>6)) + +#define PWM_TO_REG(val) (SENSORS_LIMIT((val), 0, 255) >> 4) +#define PWM_FROM_REG(val) ((val) << 4) + +#define FAN_CHAN_FROM_REG(reg) (((reg) >> 5) & 7) +#define FAN_CHAN_TO_REG(val, reg) \ + (((reg) & 0x1F) | (((val) << 5) & 0xe0)) + +#define AUTO_TEMP_MIN_TO_REG(val, reg) \ + ((((val)/500) & 0xf8)|((reg) & 0x7)) +#define AUTO_TEMP_RANGE_FROM_REG(reg) (5000 * (1<< ((reg)&0x7))) +#define AUTO_TEMP_MIN_FROM_REG(reg) (1000 * ((((reg) >> 3) & 0x1f) << 2)) + +#define AUTO_TEMP_MIN_FROM_REG_DEG(reg) ((((reg) >> 3) & 0x1f) << 2) + +#define AUTO_TEMP_OFF_FROM_REG(reg) \ + (AUTO_TEMP_MIN_FROM_REG(reg) - 5000) + +#define AUTO_TEMP_MAX_FROM_REG(reg) \ + (AUTO_TEMP_RANGE_FROM_REG(reg) + \ + AUTO_TEMP_MIN_FROM_REG(reg)) + +static int AUTO_TEMP_MAX_TO_REG(int val, int reg, int pwm) +{ + int ret; + int range = val - AUTO_TEMP_MIN_FROM_REG(reg); + + range = ((val - AUTO_TEMP_MIN_FROM_REG(reg))*10)/(16 - pwm); + ret = ((reg & 0xf8) | + (range < 10000 ? 0 : + range < 20000 ? 1 : + range < 40000 ? 2 : range < 80000 ? 3 : 4)); + return ret; +} + +/* FAN auto control */ +#define GET_FAN_AUTO_BITFIELD(data, idx) \ + (*(data)->chan_select_table)[FAN_CHAN_FROM_REG((data)->conf1)][idx%2] + +/* The tables below contains the possible values for the auto fan + * control bitfields. the index in the table is the register value. + * MSb is the auto fan control enable bit, so the four first entries + * in the table disables auto fan control when both bitfields are zero. + */ +static auto_chan_table_t auto_channel_select_table_adm1031 = { + {0, 0}, {0, 0}, {0, 0}, {0, 0}, + {2 /*0b010 */ , 4 /*0b100 */ }, + {2 /*0b010 */ , 2 /*0b010 */ }, + {4 /*0b100 */ , 4 /*0b100 */ }, + {7 /*0b111 */ , 7 /*0b111 */ }, +}; + +static auto_chan_table_t auto_channel_select_table_adm1030 = { + {0, 0}, {0, 0}, {0, 0}, {0, 0}, + {2 /*0b10 */ , 0}, + {0xff /*invalid */ , 0}, + {0xff /*invalid */ , 0}, + {3 /*0b11 */ , 0}, +}; + +/* That function checks if a bitfield is valid and returns the other bitfield + * nearest match if no exact match where found. + */ +static int +get_fan_auto_nearest(struct adm1031_data *data, + int chan, u8 val, u8 reg, u8 * new_reg) +{ + int i; + int first_match = -1, exact_match = -1; + u8 other_reg_val = + (*data->chan_select_table)[FAN_CHAN_FROM_REG(reg)][chan ? 0 : 1]; + + if (val == 0) { + *new_reg = 0; + return 0; + } + + for (i = 0; i < 8; i++) { + if ((val == (*data->chan_select_table)[i][chan]) && + ((*data->chan_select_table)[i][chan ? 0 : 1] == + other_reg_val)) { + /* We found an exact match */ + exact_match = i; + break; + } else if (val == (*data->chan_select_table)[i][chan] && + first_match == -1) { + /* Save the first match in case of an exact match has not been + * found + */ + first_match = i; + } + } + + if (exact_match >= 0) { + *new_reg = exact_match; + } else if (first_match >= 0) { + *new_reg = first_match; + } else { + return -EINVAL; + } + return 0; +} + +static ssize_t show_fan_auto_channel(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", GET_FAN_AUTO_BITFIELD(data, nr)); +} + +static ssize_t +set_fan_auto_channel(struct device *dev, const char *buf, size_t count, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + int val; + u8 reg; + int ret; + u8 old_fan_mode; + + old_fan_mode = data->conf1; + + down(&data->update_lock); + val = simple_strtol(buf, NULL, 10); + + if ((ret = get_fan_auto_nearest(data, nr, val, data->conf1, ®))) { + up(&data->update_lock); + return ret; + } + if (((data->conf1 = FAN_CHAN_TO_REG(reg, data->conf1)) & ADM1031_CONF1_AUTO_MODE) ^ + (old_fan_mode & ADM1031_CONF1_AUTO_MODE)) { + if (data->conf1 & ADM1031_CONF1_AUTO_MODE){ + /* Switch to Auto Fan Mode + * Save PWM registers + * Set PWM registers to 33% Both */ + data->old_pwm[0] = data->pwm[0]; + data->old_pwm[1] = data->pwm[1]; + adm1031_write_value(client, ADM1031_REG_PWM, 0x55); + } else { + /* Switch to Manual Mode */ + data->pwm[0] = data->old_pwm[0]; + data->pwm[1] = data->old_pwm[1]; + /* Restore PWM registers */ + adm1031_write_value(client, ADM1031_REG_PWM, + data->pwm[0] | (data->pwm[1] << 4)); + } + } + data->conf1 = FAN_CHAN_TO_REG(reg, data->conf1); + adm1031_write_value(client, ADM1031_REG_CONF1, data->conf1); + up(&data->update_lock); + return count; +} + +#define fan_auto_channel_offset(offset) \ +static ssize_t show_fan_auto_channel_##offset (struct device *dev, char *buf) \ +{ \ + return show_fan_auto_channel(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t set_fan_auto_channel_##offset (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + return set_fan_auto_channel(dev, buf, count, 0x##offset - 1); \ +} \ +static DEVICE_ATTR(auto_fan##offset##_channel, S_IRUGO | S_IWUSR, \ + show_fan_auto_channel_##offset, \ + set_fan_auto_channel_##offset) + +fan_auto_channel_offset(1); +fan_auto_channel_offset(2); + +/* Auto Temps */ +static ssize_t show_auto_temp_off(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", + AUTO_TEMP_OFF_FROM_REG(data->auto_temp[nr])); +} +static ssize_t show_auto_temp_min(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", + AUTO_TEMP_MIN_FROM_REG(data->auto_temp[nr])); +} +static ssize_t +set_auto_temp_min(struct device *dev, const char *buf, size_t count, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + int val; + + down(&data->update_lock); + val = simple_strtol(buf, NULL, 10); + data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); + adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), + data->auto_temp[nr]); + up(&data->update_lock); + return count; +} +static ssize_t show_auto_temp_max(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", + AUTO_TEMP_MAX_FROM_REG(data->auto_temp[nr])); +} +static ssize_t +set_auto_temp_max(struct device *dev, const char *buf, size_t count, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + int val; + + down(&data->update_lock); + val = simple_strtol(buf, NULL, 10); + data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], data->pwm[nr]); + adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), + data->temp_max[nr]); + up(&data->update_lock); + return count; +} + +#define auto_temp_reg(offset) \ +static ssize_t show_auto_temp_##offset##_off (struct device *dev, char *buf) \ +{ \ + return show_auto_temp_off(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t show_auto_temp_##offset##_min (struct device *dev, char *buf) \ +{ \ + return show_auto_temp_min(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t show_auto_temp_##offset##_max (struct device *dev, char *buf) \ +{ \ + return show_auto_temp_max(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t set_auto_temp_##offset##_min (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + return set_auto_temp_min(dev, buf, count, 0x##offset - 1); \ +} \ +static ssize_t set_auto_temp_##offset##_max (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + return set_auto_temp_max(dev, buf, count, 0x##offset - 1); \ +} \ +static DEVICE_ATTR(auto_temp##offset##_off, S_IRUGO, \ + show_auto_temp_##offset##_off, NULL); \ +static DEVICE_ATTR(auto_temp##offset##_min, S_IRUGO | S_IWUSR, \ + show_auto_temp_##offset##_min, set_auto_temp_##offset##_min);\ +static DEVICE_ATTR(auto_temp##offset##_max, S_IRUGO | S_IWUSR, \ + show_auto_temp_##offset##_max, set_auto_temp_##offset##_max) + +auto_temp_reg(1); +auto_temp_reg(2); +auto_temp_reg(3); + +/* pwm */ +static ssize_t show_pwm(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm[nr])); +} +static ssize_t +set_pwm(struct device *dev, const char *buf, size_t count, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + int val; + int reg; + down(&data->update_lock); + val = simple_strtol(buf, NULL, 10); + if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) && + (((val>>4) & 0xf) != 5)) { + /* In automatic mode, the only PWM accepted is 33% */ + up(&data->update_lock); + return -EINVAL; + } + data->pwm[nr] = PWM_TO_REG(val); + reg = adm1031_read_value(client, ADM1031_REG_PWM); + adm1031_write_value(client, ADM1031_REG_PWM, + nr ? ((data->pwm[nr] << 4) & 0xf0) | (reg & 0xf) + : (data->pwm[nr] & 0xf) | (reg & 0xf0)); + up(&data->update_lock); + return count; +} + +#define pwm_reg(offset) \ +static ssize_t show_pwm_##offset (struct device *dev, char *buf) \ +{ \ + return show_pwm(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t set_pwm_##offset (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + return set_pwm(dev, buf, count, 0x##offset - 1); \ +} \ +static DEVICE_ATTR(fan##offset##_pwm, S_IRUGO | S_IWUSR, \ + show_pwm_##offset, set_pwm_##offset) + +pwm_reg(1); +pwm_reg(2); + +/* Fans */ + +/* + * That function checks the cases where the fan reading is not + * relevent. It is used to provide 0 as fan reading when the fan is + * not supposed to run + */ +static int trust_fan_readings(struct adm1031_data *data, int chan) +{ + int res = 0; + + if (data->conf1 & ADM1031_CONF1_AUTO_MODE) { + switch (data->conf1 & 0x60) { + case 0x00: /* remote temp1 controls fan1 remote temp2 controls fan2 */ + res = data->temp[chan+1] >= + AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[chan+1]); + break; + case 0x20: /* remote temp1 controls both fans */ + res = + data->temp[1] >= + AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[1]); + break; + case 0x40: /* remote temp2 controls both fans */ + res = + data->temp[2] >= + AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[2]); + break; + case 0x60: /* max controls both fans */ + res = + data->temp[0] >= + AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[0]) + || data->temp[1] >= + AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[1]) + || (data->chip_type == adm1031 + && data->temp[2] >= + AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[2])); + break; + } + } else { + res = data->pwm[chan] > 0; + } + return res; +} + + +static ssize_t show_fan(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + int value; + + value = trust_fan_readings(data, nr) ? FAN_FROM_REG(data->fan[nr], + FAN_DIV_FROM_REG(data->fan_div[nr])) : 0; + return sprintf(buf, "%d\n", value); +} + +static ssize_t show_fan_div(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", FAN_DIV_FROM_REG(data->fan_div[nr])); +} +static ssize_t show_fan_min(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", + FAN_FROM_REG(data->fan_min[nr], + FAN_DIV_FROM_REG(data->fan_div[nr]))); +} +static ssize_t +set_fan_min(struct device *dev, const char *buf, size_t count, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + int val; + + down(&data->update_lock); + val = simple_strtol(buf, NULL, 10); + if (val) { + data->fan_min[nr] = + FAN_TO_REG(val, FAN_DIV_FROM_REG(data->fan_div[nr])); + } else { + data->fan_min[nr] = 0xff; + } + adm1031_write_value(client, ADM1031_REG_FAN_MIN(nr), data->fan_min[nr]); + up(&data->update_lock); + return count; +} +static ssize_t +set_fan_div(struct device *dev, const char *buf, size_t count, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + int val; + u8 tmp; + int old_div = FAN_DIV_FROM_REG(data->fan_div[nr]); + int new_min; + + val = simple_strtol(buf, NULL, 10); + tmp = val == 8 ? 0xc0 : + val == 4 ? 0x80 : + val == 2 ? 0x40 : + val == 1 ? 0x00 : + 0xff; + if (tmp == 0xff) + return -EINVAL; + down(&data->update_lock); + data->fan_div[nr] = (tmp & 0xC0) | (0x3f & data->fan_div[nr]); + new_min = data->fan_min[nr] * old_div / + FAN_DIV_FROM_REG(data->fan_div[nr]); + data->fan_min[nr] = new_min > 0xff ? 0xff : new_min; + data->fan[nr] = data->fan[nr] * old_div / + FAN_DIV_FROM_REG(data->fan_div[nr]); + + adm1031_write_value(client, ADM1031_REG_FAN_DIV(nr), + data->fan_div[nr]); + adm1031_write_value(client, ADM1031_REG_FAN_MIN(nr), + data->fan_min[nr]); + up(&data->update_lock); + return count; +} + +#define fan_offset(offset) \ +static ssize_t show_fan_##offset (struct device *dev, char *buf) \ +{ \ + return show_fan(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t show_fan_##offset##_min (struct device *dev, char *buf) \ +{ \ + return show_fan_min(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t show_fan_##offset##_div (struct device *dev, char *buf) \ +{ \ + return show_fan_div(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t set_fan_##offset##_min (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + return set_fan_min(dev, buf, count, 0x##offset - 1); \ +} \ +static ssize_t set_fan_##offset##_div (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + return set_fan_div(dev, buf, count, 0x##offset - 1); \ +} \ +static DEVICE_ATTR(fan##offset##_input, S_IRUGO, show_fan_##offset, \ + NULL); \ +static DEVICE_ATTR(fan##offset##_min, S_IRUGO | S_IWUSR, \ + show_fan_##offset##_min, set_fan_##offset##_min); \ +static DEVICE_ATTR(fan##offset##_div, S_IRUGO | S_IWUSR, \ + show_fan_##offset##_div, set_fan_##offset##_div); \ +static DEVICE_ATTR(auto_fan##offset##_min_pwm, S_IRUGO | S_IWUSR, \ + show_pwm_##offset, set_pwm_##offset) + +fan_offset(1); +fan_offset(2); + + +/* Temps */ +static ssize_t show_temp(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + int ext; + ext = nr == 0 ? + ((data->ext_temp[nr] >> 6) & 0x3) * 2 : + (((data->ext_temp[nr] >> ((nr - 1) * 3)) & 7)); + return sprintf(buf, "%d\n", TEMP_FROM_REG_EXT(data->temp[nr], ext)); +} +static ssize_t show_temp_min(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr])); +} +static ssize_t show_temp_max(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr])); +} +static ssize_t show_temp_crit(struct device *dev, char *buf, int nr) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_crit[nr])); +} +static ssize_t +set_temp_min(struct device *dev, const char *buf, size_t count, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + int val; + + val = simple_strtol(buf, NULL, 10); + val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); + down(&data->update_lock); + data->temp_min[nr] = TEMP_TO_REG(val); + adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), + data->temp_min[nr]); + up(&data->update_lock); + return count; +} +static ssize_t +set_temp_max(struct device *dev, const char *buf, size_t count, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + int val; + + val = simple_strtol(buf, NULL, 10); + val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); + down(&data->update_lock); + data->temp_max[nr] = TEMP_TO_REG(val); + adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), + data->temp_max[nr]); + up(&data->update_lock); + return count; +} +static ssize_t +set_temp_crit(struct device *dev, const char *buf, size_t count, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + int val; + + val = simple_strtol(buf, NULL, 10); + val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875); + down(&data->update_lock); + data->temp_crit[nr] = TEMP_TO_REG(val); + adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), + data->temp_crit[nr]); + up(&data->update_lock); + return count; +} + +#define temp_reg(offset) \ +static ssize_t show_temp_##offset (struct device *dev, char *buf) \ +{ \ + return show_temp(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t show_temp_##offset##_min (struct device *dev, char *buf) \ +{ \ + return show_temp_min(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t show_temp_##offset##_max (struct device *dev, char *buf) \ +{ \ + return show_temp_max(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t show_temp_##offset##_crit (struct device *dev, char *buf) \ +{ \ + return show_temp_crit(dev, buf, 0x##offset - 1); \ +} \ +static ssize_t set_temp_##offset##_min (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + return set_temp_min(dev, buf, count, 0x##offset - 1); \ +} \ +static ssize_t set_temp_##offset##_max (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + return set_temp_max(dev, buf, count, 0x##offset - 1); \ +} \ +static ssize_t set_temp_##offset##_crit (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + return set_temp_crit(dev, buf, count, 0x##offset - 1); \ +} \ +static DEVICE_ATTR(temp##offset##_input, S_IRUGO, show_temp_##offset, \ + NULL); \ +static DEVICE_ATTR(temp##offset##_min, S_IRUGO | S_IWUSR, \ + show_temp_##offset##_min, set_temp_##offset##_min); \ +static DEVICE_ATTR(temp##offset##_max, S_IRUGO | S_IWUSR, \ + show_temp_##offset##_max, set_temp_##offset##_max); \ +static DEVICE_ATTR(temp##offset##_crit, S_IRUGO | S_IWUSR, \ + show_temp_##offset##_crit, set_temp_##offset##_crit) + +temp_reg(1); +temp_reg(2); +temp_reg(3); + +/* Alarms */ +static ssize_t show_alarms(struct device *dev, char *buf) +{ + struct adm1031_data *data = adm1031_update_device(dev); + return sprintf(buf, "%d\n", data->alarm); +} + +static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); + + +static int adm1031_attach_adapter(struct i2c_adapter *adapter) +{ + if (!(adapter->class & I2C_CLASS_HWMON)) + return 0; + return i2c_detect(adapter, &addr_data, adm1031_detect); +} + +/* This function is called by i2c_detect */ +static int adm1031_detect(struct i2c_adapter *adapter, int address, int kind) +{ + struct i2c_client *new_client; + struct adm1031_data *data; + int err = 0; + const char *name = ""; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + goto exit; + + if (!(data = kmalloc(sizeof(struct adm1031_data), GFP_KERNEL))) { + err = -ENOMEM; + goto exit; + } + memset(data, 0, sizeof(struct adm1031_data)); + + new_client = &data->client; + i2c_set_clientdata(new_client, data); + new_client->addr = address; + new_client->adapter = adapter; + new_client->driver = &adm1031_driver; + new_client->flags = 0; + + if (kind < 0) { + int id, co; + id = i2c_smbus_read_byte_data(new_client, 0x3d); + co = i2c_smbus_read_byte_data(new_client, 0x3e); + + if (!((id == 0x31 || id == 0x30) && co == 0x41)) + goto exit_free; + kind = (id == 0x30) ? adm1030 : adm1031; + } + + if (kind <= 0) + kind = adm1031; + + /* Given the detected chip type, set the chip name and the + * auto fan control helper table. */ + if (kind == adm1030) { + name = "adm1030"; + data->chan_select_table = &auto_channel_select_table_adm1030; + } else if (kind == adm1031) { + name = "adm1031"; + data->chan_select_table = &auto_channel_select_table_adm1031; + } + data->chip_type = kind; + + strlcpy(new_client->name, name, I2C_NAME_SIZE); + + new_client->id = adm1031_id++; + data->valid = 0; + init_MUTEX(&data->update_lock); + + /* Tell the I2C layer a new client has arrived */ + if ((err = i2c_attach_client(new_client))) + goto exit_free; + + /* Initialize the ADM1031 chip */ + adm1031_init_client(new_client); + + /* Register sysfs hooks */ + device_create_file(&new_client->dev, &dev_attr_fan1_input); + device_create_file(&new_client->dev, &dev_attr_fan1_div); + device_create_file(&new_client->dev, &dev_attr_fan1_min); + device_create_file(&new_client->dev, &dev_attr_fan1_pwm); + device_create_file(&new_client->dev, &dev_attr_auto_fan1_channel); + device_create_file(&new_client->dev, &dev_attr_temp1_input); + device_create_file(&new_client->dev, &dev_attr_temp1_min); + device_create_file(&new_client->dev, &dev_attr_temp1_max); + device_create_file(&new_client->dev, &dev_attr_temp1_crit); + device_create_file(&new_client->dev, &dev_attr_temp2_input); + device_create_file(&new_client->dev, &dev_attr_temp2_min); + device_create_file(&new_client->dev, &dev_attr_temp2_max); + device_create_file(&new_client->dev, &dev_attr_temp2_crit); + + device_create_file(&new_client->dev, &dev_attr_auto_temp1_off); + device_create_file(&new_client->dev, &dev_attr_auto_temp1_min); + device_create_file(&new_client->dev, &dev_attr_auto_temp1_max); + + device_create_file(&new_client->dev, &dev_attr_auto_temp2_off); + device_create_file(&new_client->dev, &dev_attr_auto_temp2_min); + device_create_file(&new_client->dev, &dev_attr_auto_temp2_max); + + device_create_file(&new_client->dev, &dev_attr_auto_fan1_min_pwm); + + device_create_file(&new_client->dev, &dev_attr_alarms); + + if (kind == adm1031) { + device_create_file(&new_client->dev, &dev_attr_fan2_input); + device_create_file(&new_client->dev, &dev_attr_fan2_div); + device_create_file(&new_client->dev, &dev_attr_fan2_min); + device_create_file(&new_client->dev, &dev_attr_fan2_pwm); + device_create_file(&new_client->dev, + &dev_attr_auto_fan2_channel); + device_create_file(&new_client->dev, &dev_attr_temp3_input); + device_create_file(&new_client->dev, &dev_attr_temp3_min); + device_create_file(&new_client->dev, &dev_attr_temp3_max); + device_create_file(&new_client->dev, &dev_attr_temp3_crit); + device_create_file(&new_client->dev, &dev_attr_auto_temp3_off); + device_create_file(&new_client->dev, &dev_attr_auto_temp3_min); + device_create_file(&new_client->dev, &dev_attr_auto_temp3_max); + device_create_file(&new_client->dev, &dev_attr_auto_fan2_min_pwm); + } + + return 0; + +exit_free: + kfree(new_client); +exit: + return err; +} + +static int adm1031_detach_client(struct i2c_client *client) +{ + int ret; + if ((ret = i2c_detach_client(client)) != 0) { + return ret; + } + kfree(client); + return 0; +} + +static void adm1031_init_client(struct i2c_client *client) +{ + unsigned int read_val; + unsigned int mask; + struct adm1031_data *data = i2c_get_clientdata(client); + + mask = (ADM1031_CONF2_PWM1_ENABLE | ADM1031_CONF2_TACH1_ENABLE); + if (data->chip_type == adm1031) { + mask |= (ADM1031_CONF2_PWM2_ENABLE | + ADM1031_CONF2_TACH2_ENABLE); + } + /* Initialize the ADM1031 chip (enables fan speed reading ) */ + read_val = adm1031_read_value(client, ADM1031_REG_CONF2); + if ((read_val | mask) != read_val) { + adm1031_write_value(client, ADM1031_REG_CONF2, read_val | mask); + } + + read_val = adm1031_read_value(client, ADM1031_REG_CONF1); + if ((read_val | ADM1031_CONF1_MONITOR_ENABLE) != read_val) { + adm1031_write_value(client, ADM1031_REG_CONF1, read_val | + ADM1031_CONF1_MONITOR_ENABLE); + } + +} + +static struct adm1031_data *adm1031_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct adm1031_data *data = i2c_get_clientdata(client); + int chan; + + down(&data->update_lock); + + if ((jiffies - data->last_updated > HZ + HZ / 2) || + (jiffies < data->last_updated) || !data->valid) { + + dev_dbg(&client->dev, "Starting adm1031 update\n"); + for (chan = 0; + chan < ((data->chip_type == adm1031) ? 3 : 2); chan++) { + u8 oldh, newh; + + oldh = + adm1031_read_value(client, ADM1031_REG_TEMP(chan)); + data->ext_temp[chan] = + adm1031_read_value(client, ADM1031_REG_EXT_TEMP); + newh = + adm1031_read_value(client, ADM1031_REG_TEMP(chan)); + if (newh != oldh) { + data->ext_temp[chan] = + adm1031_read_value(client, + ADM1031_REG_EXT_TEMP); +#ifdef DEBUG + oldh = + adm1031_read_value(client, + ADM1031_REG_TEMP(chan)); + + /* oldh is actually newer */ + if (newh != oldh) + dev_warn(&client->dev, + "Remote temperature may be " + "wrong.\n"); +#endif + } + data->temp[chan] = newh; + + data->temp_min[chan] = + adm1031_read_value(client, + ADM1031_REG_TEMP_MIN(chan)); + data->temp_max[chan] = + adm1031_read_value(client, + ADM1031_REG_TEMP_MAX(chan)); + data->temp_crit[chan] = + adm1031_read_value(client, + ADM1031_REG_TEMP_CRIT(chan)); + data->auto_temp[chan] = + adm1031_read_value(client, + ADM1031_REG_AUTO_TEMP(chan)); + + } + + data->conf1 = adm1031_read_value(client, ADM1031_REG_CONF1); + data->conf2 = adm1031_read_value(client, ADM1031_REG_CONF2); + + data->alarm = adm1031_read_value(client, ADM1031_REG_STATUS(0)) + | (adm1031_read_value(client, ADM1031_REG_STATUS(1)) + << 8); + if (data->chip_type == adm1030) { + data->alarm &= 0xc0ff; + } + + for (chan=0; chan<(data->chip_type == adm1030 ? 1 : 2); chan++) { + data->fan_div[chan] = + adm1031_read_value(client, ADM1031_REG_FAN_DIV(chan)); + data->fan_min[chan] = + adm1031_read_value(client, ADM1031_REG_FAN_MIN(chan)); + data->fan[chan] = + adm1031_read_value(client, ADM1031_REG_FAN_SPEED(chan)); + data->pwm[chan] = + 0xf & (adm1031_read_value(client, ADM1031_REG_PWM) >> + (4*chan)); + } + data->last_updated = jiffies; + data->valid = 1; + } + + up(&data->update_lock); + + return data; +} + +static int __init sensors_adm1031_init(void) +{ + return i2c_add_driver(&adm1031_driver); +} + +static void __exit sensors_adm1031_exit(void) +{ + i2c_del_driver(&adm1031_driver); +} + +MODULE_AUTHOR("Alexandre d'Alton "); +MODULE_DESCRIPTION("ADM1031/ADM1030 driver"); +MODULE_LICENSE("GPL"); + +module_init(sensors_adm1031_init); +module_exit(sensors_adm1031_exit); diff --git a/drivers/i2c/chips/lm77.c b/drivers/i2c/chips/lm77.c new file mode 100644 index 000000000..a6fc78105 --- /dev/null +++ b/drivers/i2c/chips/lm77.c @@ -0,0 +1,413 @@ +/* + lm77.c - Part of lm_sensors, Linux kernel modules for hardware + monitoring + + Copyright (c) 2004 Andras BALI + + Heavily based on lm75.c by Frodo Looijaard . The LM77 + is a temperature sensor and thermal window comparator with 0.5 deg + resolution made by National Semiconductor. Complete datasheet can be + obtained at their site: + http://www.national.com/pf/LM/LM77.html + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +#include +#include +#include +#include +#include +#include + + +/* Addresses to scan */ +static unsigned short normal_i2c[] = { I2C_CLIENT_END }; +static unsigned short normal_i2c_range[] = { 0x48, 0x4b, I2C_CLIENT_END }; +static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END }; +static unsigned int normal_isa_range[] = { I2C_CLIENT_ISA_END }; + +/* Insmod parameters */ +SENSORS_INSMOD_1(lm77); + +/* The LM77 registers */ +#define LM77_REG_TEMP 0x00 +#define LM77_REG_CONF 0x01 +#define LM77_REG_TEMP_HYST 0x02 +#define LM77_REG_TEMP_CRIT 0x03 +#define LM77_REG_TEMP_MIN 0x04 +#define LM77_REG_TEMP_MAX 0x05 + +/* Each client has this additional data */ +struct lm77_data { + struct i2c_client client; + struct semaphore update_lock; + char valid; + unsigned long last_updated; /* In jiffies */ + int temp_input; /* Temperatures */ + int temp_crit; + int temp_min; + int temp_max; + int temp_hyst; + u8 alarms; +}; + +static int lm77_attach_adapter(struct i2c_adapter *adapter); +static int lm77_detect(struct i2c_adapter *adapter, int address, int kind); +static void lm77_init_client(struct i2c_client *client); +static int lm77_detach_client(struct i2c_client *client); +static u16 lm77_read_value(struct i2c_client *client, u8 reg); +static int lm77_write_value(struct i2c_client *client, u8 reg, u16 value); + +static struct lm77_data *lm77_update_device(struct device *dev); + + +/* This is the driver that will be inserted */ +static struct i2c_driver lm77_driver = { + .owner = THIS_MODULE, + .name = "lm77", + .flags = I2C_DF_NOTIFY, + .attach_adapter = lm77_attach_adapter, + .detach_client = lm77_detach_client, +}; + +static int lm77_id = 0; + +/* straight from the datasheet */ +#define LM77_TEMP_MIN (-55000) +#define LM77_TEMP_MAX 125000 + +/* In the temperature registers, the low 3 bits are not part of the + temperature values; they are the status bits. */ +static inline u16 LM77_TEMP_TO_REG(int temp) +{ + int ntemp = SENSORS_LIMIT(temp, LM77_TEMP_MIN, LM77_TEMP_MAX); + return (u16)((ntemp / 500) * 8); +} + +static inline int LM77_TEMP_FROM_REG(u16 reg) +{ + return ((int)reg / 8) * 500; +} + +/* sysfs stuff */ + +/* read routines for temperature limits */ +#define show(value) \ +static ssize_t show_##value(struct device *dev, char *buf) \ +{ \ + struct lm77_data *data = lm77_update_device(dev); \ + return sprintf(buf, "%d\n", data->value); \ +} + +show(temp_input); +show(temp_crit); +show(temp_min); +show(temp_max); +show(alarms); + +/* read routines for hysteresis values */ +static ssize_t show_temp_crit_hyst(struct device *dev, char *buf) +{ + struct lm77_data *data = lm77_update_device(dev); + return sprintf(buf, "%d\n", data->temp_crit - data->temp_hyst); +} +static ssize_t show_temp_min_hyst(struct device *dev, char *buf) +{ + struct lm77_data *data = lm77_update_device(dev); + return sprintf(buf, "%d\n", data->temp_min + data->temp_hyst); +} +static ssize_t show_temp_max_hyst(struct device *dev, char *buf) +{ + struct lm77_data *data = lm77_update_device(dev); + return sprintf(buf, "%d\n", data->temp_max - data->temp_hyst); +} + +/* write routines */ +#define set(value, reg) \ +static ssize_t set_##value(struct device *dev, const char *buf, size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct lm77_data *data = i2c_get_clientdata(client); \ + data->value = simple_strtoul(buf, NULL, 10); \ + lm77_write_value(client, reg, LM77_TEMP_TO_REG(data->value)); \ + return count; \ +} + +set(temp_min, LM77_REG_TEMP_MIN); +set(temp_max, LM77_REG_TEMP_MAX); + +/* hysteresis is stored as a relative value on the chip, so it has to be + converted first */ +static ssize_t set_temp_crit_hyst(struct device *dev, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lm77_data *data = i2c_get_clientdata(client); + data->temp_hyst = data->temp_crit - simple_strtoul(buf, NULL, 10); + lm77_write_value(client, LM77_REG_TEMP_HYST, + LM77_TEMP_TO_REG(data->temp_hyst)); + return count; +} + +/* preserve hysteresis when setting T_crit */ +static ssize_t set_temp_crit(struct device *dev, const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lm77_data *data = i2c_get_clientdata(client); + int oldcrithyst = data->temp_crit - data->temp_hyst; + data->temp_crit = simple_strtoul(buf, NULL, 10); + data->temp_hyst = data->temp_crit - oldcrithyst; + lm77_write_value(client, LM77_REG_TEMP_CRIT, + LM77_TEMP_TO_REG(data->temp_crit)); + lm77_write_value(client, LM77_REG_TEMP_HYST, + LM77_TEMP_TO_REG(data->temp_hyst)); + return count; +} + +static DEVICE_ATTR(temp1_input, S_IRUGO, + show_temp_input, NULL); +static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, + show_temp_crit, set_temp_crit); +static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, + show_temp_min, set_temp_min); +static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, + show_temp_max, set_temp_max); + +static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, + show_temp_crit_hyst, set_temp_crit_hyst); +static DEVICE_ATTR(temp1_min_hyst, S_IRUGO, + show_temp_min_hyst, NULL); +static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, + show_temp_max_hyst, NULL); + +static DEVICE_ATTR(alarms, S_IRUGO, + show_alarms, NULL); + +static int lm77_attach_adapter(struct i2c_adapter *adapter) +{ + if (!(adapter->class & I2C_CLASS_HWMON)) + return 0; + return i2c_detect(adapter, &addr_data, lm77_detect); +} + +/* This function is called by i2c_detect */ +static int lm77_detect(struct i2c_adapter *adapter, int address, int kind) +{ + struct i2c_client *new_client; + struct lm77_data *data; + int err = 0; + const char *name = ""; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA)) + goto exit; + + /* OK. For now, we presume we have a valid client. We now create the + client structure, even though we cannot fill it completely yet. + But it allows us to access lm77_{read,write}_value. */ + if (!(data = kmalloc(sizeof(struct lm77_data), GFP_KERNEL))) { + err = -ENOMEM; + goto exit; + } + memset(data, 0, sizeof(struct lm77_data)); + + new_client = &data->client; + i2c_set_clientdata(new_client, data); + new_client->addr = address; + new_client->adapter = adapter; + new_client->driver = &lm77_driver; + new_client->flags = 0; + + /* Here comes the remaining detection. Since the LM77 has no + register dedicated to identification, we have to rely on the + following tricks: + + 1. the high 4 bits represent the sign and thus they should + always be the same + 2. the high 3 bits are unused in the configuration register + 3. addresses 0x06 and 0x07 return the last read value + 4. registers cycling over 8-address boundaries + + Word-sized registers are high-byte first. */ + if (kind < 0) { + int i, cur, conf, hyst, crit, min, max; + + /* addresses cycling */ + cur = i2c_smbus_read_word_data(new_client, 0); + conf = i2c_smbus_read_byte_data(new_client, 1); + hyst = i2c_smbus_read_word_data(new_client, 2); + crit = i2c_smbus_read_word_data(new_client, 3); + min = i2c_smbus_read_word_data(new_client, 4); + max = i2c_smbus_read_word_data(new_client, 5); + for (i = 8; i <= 0xff; i += 8) + if (i2c_smbus_read_byte_data(new_client, i + 1) != conf + || i2c_smbus_read_word_data(new_client, i + 2) != hyst + || i2c_smbus_read_word_data(new_client, i + 3) != crit + || i2c_smbus_read_word_data(new_client, i + 4) != min + || i2c_smbus_read_word_data(new_client, i + 5) != max) + goto exit_free; + + /* sign bits */ + if (((cur & 0x00f0) != 0xf0 && (cur & 0x00f0) != 0x0) + || ((hyst & 0x00f0) != 0xf0 && (hyst & 0x00f0) != 0x0) + || ((crit & 0x00f0) != 0xf0 && (crit & 0x00f0) != 0x0) + || ((min & 0x00f0) != 0xf0 && (min & 0x00f0) != 0x0) + || ((max & 0x00f0) != 0xf0 && (max & 0x00f0) != 0x0)) + goto exit_free; + + /* unused bits */ + if (conf & 0xe0) + goto exit_free; + + /* 0x06 and 0x07 return the last read value */ + cur = i2c_smbus_read_word_data(new_client, 0); + if (i2c_smbus_read_word_data(new_client, 6) != cur + || i2c_smbus_read_word_data(new_client, 7) != cur) + goto exit_free; + hyst = i2c_smbus_read_word_data(new_client, 2); + if (i2c_smbus_read_word_data(new_client, 6) != hyst + || i2c_smbus_read_word_data(new_client, 7) != hyst) + goto exit_free; + min = i2c_smbus_read_word_data(new_client, 4); + if (i2c_smbus_read_word_data(new_client, 6) != min + || i2c_smbus_read_word_data(new_client, 7) != min) + goto exit_free; + + } + + /* Determine the chip type - only one kind supported! */ + if (kind <= 0) + kind = lm77; + + if (kind == lm77) { + name = "lm77"; + } + + /* Fill in the remaining client fields and put it into the global list */ + strlcpy(new_client->name, name, I2C_NAME_SIZE); + + new_client->id = lm77_id++; + data->valid = 0; + init_MUTEX(&data->update_lock); + + /* Tell the I2C layer a new client has arrived */ + if ((err = i2c_attach_client(new_client))) + goto exit_free; + + /* Initialize the LM77 chip */ + lm77_init_client(new_client); + + /* Register sysfs hooks */ + device_create_file(&new_client->dev, &dev_attr_temp1_input); + device_create_file(&new_client->dev, &dev_attr_temp1_crit); + device_create_file(&new_client->dev, &dev_attr_temp1_min); + device_create_file(&new_client->dev, &dev_attr_temp1_max); + device_create_file(&new_client->dev, &dev_attr_temp1_crit_hyst); + device_create_file(&new_client->dev, &dev_attr_temp1_min_hyst); + device_create_file(&new_client->dev, &dev_attr_temp1_max_hyst); + device_create_file(&new_client->dev, &dev_attr_alarms); + return 0; + +exit_free: + kfree(data); +exit: + return err; +} + +static int lm77_detach_client(struct i2c_client *client) +{ + i2c_detach_client(client); + kfree(i2c_get_clientdata(client)); + return 0; +} + +/* All registers are word-sized, except for the configuration register. + The LM77 uses the high-byte first convention. */ +static u16 lm77_read_value(struct i2c_client *client, u8 reg) +{ + if (reg == LM77_REG_CONF) + return i2c_smbus_read_byte_data(client, reg); + else + return swab16(i2c_smbus_read_word_data(client, reg)); +} + +static int lm77_write_value(struct i2c_client *client, u8 reg, u16 value) +{ + if (reg == LM77_REG_CONF) + return i2c_smbus_write_byte_data(client, reg, value); + else + return i2c_smbus_write_word_data(client, reg, swab16(value)); +} + +static void lm77_init_client(struct i2c_client *client) +{ + /* Initialize the LM77 chip - turn off shutdown mode */ + int conf = lm77_read_value(client, LM77_REG_CONF); + if (conf & 1) + lm77_write_value(client, LM77_REG_CONF, conf & 0xfe); +} + +static struct lm77_data *lm77_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lm77_data *data = i2c_get_clientdata(client); + + down(&data->update_lock); + + if ((jiffies - data->last_updated > HZ + HZ / 2) || + (jiffies < data->last_updated) || !data->valid) { + dev_dbg(&client->dev, "Starting lm77 update\n"); + data->temp_input = + LM77_TEMP_FROM_REG(lm77_read_value(client, + LM77_REG_TEMP)); + data->temp_hyst = + LM77_TEMP_FROM_REG(lm77_read_value(client, + LM77_REG_TEMP_HYST)); + data->temp_crit = + LM77_TEMP_FROM_REG(lm77_read_value(client, + LM77_REG_TEMP_CRIT)); + data->temp_min = + LM77_TEMP_FROM_REG(lm77_read_value(client, + LM77_REG_TEMP_MIN)); + data->temp_max = + LM77_TEMP_FROM_REG(lm77_read_value(client, + LM77_REG_TEMP_MAX)); + data->alarms = + lm77_read_value(client, LM77_REG_TEMP) & 0x0007; + data->last_updated = jiffies; + data->valid = 1; + } + + up(&data->update_lock); + + return data; +} + +static int __init sensors_lm77_init(void) +{ + return i2c_add_driver(&lm77_driver); +} + +static void __exit sensors_lm77_exit(void) +{ + i2c_del_driver(&lm77_driver); +} + +MODULE_AUTHOR("Andras BALI "); +MODULE_DESCRIPTION("LM77 driver"); +MODULE_LICENSE("GPL"); + +module_init(sensors_lm77_init); +module_exit(sensors_lm77_exit); diff --git a/drivers/i2c/chips/max1619.c b/drivers/i2c/chips/max1619.c new file mode 100644 index 000000000..0f8a5ac5c --- /dev/null +++ b/drivers/i2c/chips/max1619.c @@ -0,0 +1,378 @@ +/* + * max1619.c - Part of lm_sensors, Linux kernel modules for hardware + * monitoring + * Copyright (C) 2003-2004 Alexey Fisher + * Jean Delvare + * + * Based on the lm90 driver. The MAX1619 is a sensor chip made by Maxim. + * It reports up to two temperatures (its own plus up to + * one external one). Complete datasheet can be + * obtained from Maxim's website at: + * http://pdfserv.maxim-ic.com/en/ds/MAX1619.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include +#include +#include +#include +#include +#include + + +static unsigned short normal_i2c[] = { I2C_CLIENT_END }; +static unsigned short normal_i2c_range[] = { 0x18, 0x1a, 0x29, 0x2b, + 0x4c, 0x4e, I2C_CLIENT_END }; +static unsigned int normal_isa[] = { I2C_CLIENT_ISA_END }; +static unsigned int normal_isa_range[] = { I2C_CLIENT_ISA_END }; + +/* + * Insmod parameters + */ + +SENSORS_INSMOD_1(max1619); + +/* + * The MAX1619 registers + */ + +#define MAX1619_REG_R_MAN_ID 0xFE +#define MAX1619_REG_R_CHIP_ID 0xFF +#define MAX1619_REG_R_CONFIG 0x03 +#define MAX1619_REG_W_CONFIG 0x09 +#define MAX1619_REG_R_CONVRATE 0x04 +#define MAX1619_REG_W_CONVRATE 0x0A +#define MAX1619_REG_R_STATUS 0x02 +#define MAX1619_REG_R_LOCAL_TEMP 0x00 +#define MAX1619_REG_R_REMOTE_TEMP 0x01 +#define MAX1619_REG_R_REMOTE_HIGH 0x07 +#define MAX1619_REG_W_REMOTE_HIGH 0x0D +#define MAX1619_REG_R_REMOTE_LOW 0x08 +#define MAX1619_REG_W_REMOTE_LOW 0x0E +#define MAX1619_REG_R_REMOTE_CRIT 0x10 +#define MAX1619_REG_W_REMOTE_CRIT 0x12 +#define MAX1619_REG_R_TCRIT_HYST 0x11 +#define MAX1619_REG_W_TCRIT_HYST 0x13 + +/* + * Conversions and various macros + */ + +#define TEMP_FROM_REG(val) ((val & 0x80 ? val-0x100 : val) * 1000) +#define TEMP_TO_REG(val) ((val < 0 ? val+0x100*1000 : val) / 1000) + +/* + * Functions declaration + */ + +static int max1619_attach_adapter(struct i2c_adapter *adapter); +static int max1619_detect(struct i2c_adapter *adapter, int address, + int kind); +static void max1619_init_client(struct i2c_client *client); +static int max1619_detach_client(struct i2c_client *client); +static struct max1619_data *max1619_update_device(struct device *dev); + +/* + * Driver data (common to all clients) + */ + +static struct i2c_driver max1619_driver = { + .owner = THIS_MODULE, + .name = "max1619", + .flags = I2C_DF_NOTIFY, + .attach_adapter = max1619_attach_adapter, + .detach_client = max1619_detach_client, +}; + +/* + * Client data (each client gets its own) + */ + +struct max1619_data { + struct i2c_client client; + struct semaphore update_lock; + char valid; /* zero until following fields are valid */ + unsigned long last_updated; /* in jiffies */ + + /* registers values */ + u8 temp_input1; /* local */ + u8 temp_input2, temp_low2, temp_high2; /* remote */ + u8 temp_crit2; + u8 temp_hyst2; + u8 alarms; +}; + +/* + * Internal variables + */ + +static int max1619_id = 0; + +/* + * Sysfs stuff + */ + +#define show_temp(value) \ +static ssize_t show_##value(struct device *dev, char *buf) \ +{ \ + struct max1619_data *data = max1619_update_device(dev); \ + return sprintf(buf, "%d\n", TEMP_FROM_REG(data->value)); \ +} +show_temp(temp_input1); +show_temp(temp_input2); +show_temp(temp_low2); +show_temp(temp_high2); +show_temp(temp_crit2); +show_temp(temp_hyst2); + +#define set_temp2(value, reg) \ +static ssize_t set_##value(struct device *dev, const char *buf, \ + size_t count) \ +{ \ + struct i2c_client *client = to_i2c_client(dev); \ + struct max1619_data *data = i2c_get_clientdata(client); \ + data->value = TEMP_TO_REG(simple_strtol(buf, NULL, 10)); \ + i2c_smbus_write_byte_data(client, reg, data->value); \ + return count; \ +} + +set_temp2(temp_low2, MAX1619_REG_W_REMOTE_LOW); +set_temp2(temp_high2, MAX1619_REG_W_REMOTE_HIGH); +set_temp2(temp_crit2, MAX1619_REG_W_REMOTE_CRIT); +set_temp2(temp_hyst2, MAX1619_REG_W_TCRIT_HYST); + +static ssize_t show_alarms(struct device *dev, char *buf) +{ + struct max1619_data *data = max1619_update_device(dev); + return sprintf(buf, "%d\n", data->alarms); +} + +static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL); +static DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input2, NULL); +static DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_low2, + set_temp_low2); +static DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_high2, + set_temp_high2); +static DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit2, + set_temp_crit2); +static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp_hyst2, + set_temp_hyst2); +static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL); + +/* + * Real code + */ + +static int max1619_attach_adapter(struct i2c_adapter *adapter) +{ + if (!(adapter->class & I2C_CLASS_HWMON)) + return 0; + return i2c_detect(adapter, &addr_data, max1619_detect); +} + +/* + * The following function does more than just detection. If detection + * succeeds, it also registers the new chip. + */ +static int max1619_detect(struct i2c_adapter *adapter, int address, int kind) +{ + struct i2c_client *new_client; + struct max1619_data *data; + int err = 0; + const char *name = ""; + u8 reg_config=0, reg_convrate=0, reg_status=0; + u8 man_id, chip_id; + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + goto exit; + + if (!(data = kmalloc(sizeof(struct max1619_data), GFP_KERNEL))) { + err = -ENOMEM; + goto exit; + } + memset(data, 0, sizeof(struct max1619_data)); + + /* The common I2C client data is placed right before the + MAX1619-specific data. */ + new_client = &data->client; + i2c_set_clientdata(new_client, data); + new_client->addr = address; + new_client->adapter = adapter; + new_client->driver = &max1619_driver; + new_client->flags = 0; + + /* + * Now we do the remaining detection. A negative kind means that + * the driver was loaded with no force parameter (default), so we + * must both detect and identify the chip. A zero kind means that + * the driver was loaded with the force parameter, the detection + * step shall be skipped. A positive kind means that the driver + * was loaded with the force parameter and a given kind of chip is + * requested, so both the detection and the identification steps + * are skipped. + */ + if (kind < 0) { /* detection */ + reg_config = i2c_smbus_read_byte_data(new_client, + MAX1619_REG_R_CONFIG); + reg_convrate = i2c_smbus_read_byte_data(new_client, + MAX1619_REG_R_CONVRATE); + reg_status = i2c_smbus_read_byte_data(new_client, + MAX1619_REG_R_STATUS); + if ((reg_config & 0x03) != 0x00 + || reg_convrate > 0x07 || (reg_status & 0x61 ) !=0x00) { + dev_dbg(&adapter->dev, + "MAX1619 detection failed at 0x%02x.\n", + address); + goto exit_free; + } + } + + if (kind <= 0) { /* identification */ + + man_id = i2c_smbus_read_byte_data(new_client, + MAX1619_REG_R_MAN_ID); + chip_id = i2c_smbus_read_byte_data(new_client, + MAX1619_REG_R_CHIP_ID); + + if ((man_id == 0x4D) && (chip_id == 0x04)){ + kind = max1619; + } + } + + if (kind <= 0) { /* identification failed */ + dev_info(&adapter->dev, + "Unsupported chip (man_id=0x%02X, " + "chip_id=0x%02X).\n", man_id, chip_id); + goto exit_free; + } + + + if (kind == max1619){ + name = "max1619"; + } + + /* We can fill in the remaining client fields */ + strlcpy(new_client->name, name, I2C_NAME_SIZE); + new_client->id = max1619_id++; + data->valid = 0; + init_MUTEX(&data->update_lock); + + /* Tell the I2C layer a new client has arrived */ + if ((err = i2c_attach_client(new_client))) + goto exit_free; + + /* Initialize the MAX1619 chip */ + max1619_init_client(new_client); + + /* Register sysfs hooks */ + device_create_file(&new_client->dev, &dev_attr_temp1_input); + device_create_file(&new_client->dev, &dev_attr_temp2_input); + device_create_file(&new_client->dev, &dev_attr_temp2_min); + device_create_file(&new_client->dev, &dev_attr_temp2_max); + device_create_file(&new_client->dev, &dev_attr_temp2_crit); + device_create_file(&new_client->dev, &dev_attr_temp2_crit_hyst); + device_create_file(&new_client->dev, &dev_attr_alarms); + + return 0; + +exit_free: + kfree(data); +exit: + return err; +} + +static void max1619_init_client(struct i2c_client *client) +{ + u8 config; + + /* + * Start the conversions. + */ + i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONVRATE, + 5); /* 2 Hz */ + config = i2c_smbus_read_byte_data(client, MAX1619_REG_R_CONFIG); + if (config & 0x40) + i2c_smbus_write_byte_data(client, MAX1619_REG_W_CONFIG, + config & 0xBF); /* run */ +} + +static int max1619_detach_client(struct i2c_client *client) +{ + int err; + + if ((err = i2c_detach_client(client))) { + dev_err(&client->dev, "Client deregistration failed, " + "client not detached.\n"); + return err; + } + + kfree(i2c_get_clientdata(client)); + return 0; +} + +static struct max1619_data *max1619_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max1619_data *data = i2c_get_clientdata(client); + + down(&data->update_lock); + + if ((jiffies - data->last_updated > HZ * 2) || + (jiffies < data->last_updated) || + !data->valid) { + + dev_dbg(&client->dev, "Updating max1619 data.\n"); + data->temp_input1 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_LOCAL_TEMP); + data->temp_input2 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_REMOTE_TEMP); + data->temp_high2 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_REMOTE_HIGH); + data->temp_low2 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_REMOTE_LOW); + data->temp_crit2 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_REMOTE_CRIT); + data->temp_hyst2 = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_TCRIT_HYST); + data->alarms = i2c_smbus_read_byte_data(client, + MAX1619_REG_R_STATUS); + + data->last_updated = jiffies; + data->valid = 1; + } + + up(&data->update_lock); + + return data; +} + +static int __init sensors_max1619_init(void) +{ + return i2c_add_driver(&max1619_driver); +} + +static void __exit sensors_max1619_exit(void) +{ + i2c_del_driver(&max1619_driver); +} + +MODULE_AUTHOR("Alexey Fisher and" + "Jean Delvare "); +MODULE_DESCRIPTION("MAX1619 sensor driver"); +MODULE_LICENSE("GPL"); + +module_init(sensors_max1619_init); +module_exit(sensors_max1619_exit); diff --git a/drivers/i2c/chips/rtc8564.c b/drivers/i2c/chips/rtc8564.c new file mode 100644 index 000000000..0fa55d45e --- /dev/null +++ b/drivers/i2c/chips/rtc8564.c @@ -0,0 +1,396 @@ +/* + * linux/drivers/i2c/chips/rtc8564.c + * + * Copyright (C) 2002-2004 Stefan Eletzhofer + * + * based on linux/drivers/acron/char/pcf8583.c + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for system3's EPSON RTC 8564 chip + */ +#include +#include +#include +#include +#include +#include /* get the user-level API */ +#include +#include + +#include "rtc8564.h" + +#ifdef DEBUG +# define _DBG(x, fmt, args...) do{ if (debug>=x) printk(KERN_DEBUG"%s: " fmt "\n", __FUNCTION__, ##args); } while(0); +#else +# define _DBG(x, fmt, args...) do { } while(0); +#endif + +#define _DBGRTCTM(x, rtctm) if (debug>=x) printk("%s: secs=%d, mins=%d, hours=%d, mday=%d, " \ + "mon=%d, year=%d, wday=%d VL=%d\n", __FUNCTION__, \ + (rtctm).secs, (rtctm).mins, (rtctm).hours, (rtctm).mday, \ + (rtctm).mon, (rtctm).year, (rtctm).wday, (rtctm).vl); + +struct rtc8564_data { + struct i2c_client client; + u16 ctrl; +}; + +static inline u8 _rtc8564_ctrl1(struct i2c_client *client) +{ + struct rtc8564_data *data = i2c_get_clientdata(client); + return data->ctrl & 0xff; +} +static inline u8 _rtc8564_ctrl2(struct i2c_client *client) +{ + struct rtc8564_data *data = i2c_get_clientdata(client); + return (data->ctrl & 0xff00) >> 8; +} + +#define CTRL1(c) _rtc8564_ctrl1(c) +#define CTRL2(c) _rtc8564_ctrl2(c) + +#define BCD_TO_BIN(val) (((val)&15) + ((val)>>4)*10) +#define BIN_TO_BCD(val) ((((val)/10)<<4) + (val)%10) + +static int debug = 0; +MODULE_PARM(debug, "i"); + +static struct i2c_driver rtc8564_driver; + +static unsigned short ignore[] = { I2C_CLIENT_END }; +static unsigned short normal_addr[] = { 0x51, I2C_CLIENT_END }; + +static struct i2c_client_address_data addr_data = { + .normal_i2c = normal_addr, + .normal_i2c_range = ignore, + .probe = ignore, + .probe_range = ignore, + .ignore = ignore, + .ignore_range = ignore, + .force = ignore, +}; + +static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem); +static int rtc8564_write_mem(struct i2c_client *client, struct mem *mem); + +static int rtc8564_read(struct i2c_client *client, unsigned char adr, + unsigned char *buf, unsigned char len) +{ + int ret = -EIO; + unsigned char addr[1] = { adr }; + struct i2c_msg msgs[2] = { + {client->addr, 0, 1, addr}, + {client->addr, I2C_M_RD, len, buf} + }; + + _DBG(1, "client=%p, adr=%d, buf=%p, len=%d", client, adr, buf, len); + + if (!buf || !client) { + ret = -EINVAL; + goto done; + } + + ret = i2c_transfer(client->adapter, msgs, 2); + if (ret == 2) { + ret = 0; + } + +done: + return ret; +} + +static int rtc8564_write(struct i2c_client *client, unsigned char adr, + unsigned char *data, unsigned char len) +{ + int ret = 0; + unsigned char _data[16]; + struct i2c_msg wr; + int i; + + if (!client || !data || len > 15) { + ret = -EINVAL; + goto done; + } + + _DBG(1, "client=%p, adr=%d, buf=%p, len=%d", client, adr, data, len); + + _data[0] = adr; + for (i = 0; i < len; i++) { + _data[i + 1] = data[i]; + _DBG(5, "data[%d] = 0x%02x (%d)", i, data[i], data[i]); + } + + wr.addr = client->addr; + wr.flags = 0; + wr.len = len + 1; + wr.buf = _data; + + ret = i2c_transfer(client->adapter, &wr, 1); + if (ret == 1) { + ret = 0; + } + +done: + return ret; +} + +static int rtc8564_attach(struct i2c_adapter *adap, int addr, int kind) +{ + int ret; + struct i2c_client *new_client; + struct rtc8564_data *d; + unsigned char data[10]; + unsigned char ad[1] = { 0 }; + struct i2c_msg ctrl_wr[1] = { + {addr, 0, 2, data} + }; + struct i2c_msg ctrl_rd[2] = { + {addr, 0, 1, ad}, + {addr, I2C_M_RD, 2, data} + }; + + d = kmalloc(sizeof(struct rtc8564_data), GFP_KERNEL); + if (!d) { + ret = -ENOMEM; + goto done; + } + memset(d, 0, sizeof(struct rtc8564_data)); + new_client = &d->client; + + strlcpy(new_client->name, "RTC8564", I2C_NAME_SIZE); + i2c_set_clientdata(new_client, d); + new_client->id = rtc8564_driver.id; + new_client->flags = I2C_CLIENT_ALLOW_USE | I2C_DF_NOTIFY; + new_client->addr = addr; + new_client->adapter = adap; + new_client->driver = &rtc8564_driver; + + _DBG(1, "client=%p", new_client); + _DBG(1, "client.id=%d", new_client->id); + + /* init ctrl1 reg */ + data[0] = 0; + data[1] = 0; + ret = i2c_transfer(new_client->adapter, ctrl_wr, 1); + if (ret != 1) { + printk(KERN_INFO "rtc8564: cant init ctrl1\n"); + ret = -ENODEV; + goto done; + } + + /* read back ctrl1 and ctrl2 */ + ret = i2c_transfer(new_client->adapter, ctrl_rd, 2); + if (ret != 2) { + printk(KERN_INFO "rtc8564: cant read ctrl\n"); + ret = -ENODEV; + goto done; + } + + d->ctrl = data[0] | (data[1] << 8); + + _DBG(1, "RTC8564_REG_CTRL1=%02x, RTC8564_REG_CTRL2=%02x", + data[0], data[1]); + + ret = i2c_attach_client(new_client); +done: + if (ret) { + kfree(d); + } + return ret; +} + +static int rtc8564_probe(struct i2c_adapter *adap) +{ + return i2c_probe(adap, &addr_data, rtc8564_attach); +} + +static int rtc8564_detach(struct i2c_client *client) +{ + i2c_detach_client(client); + kfree(i2c_get_clientdata(client)); + return 0; +} + +static int rtc8564_get_datetime(struct i2c_client *client, struct rtc_tm *dt) +{ + int ret = -EIO; + unsigned char buf[15]; + + _DBG(1, "client=%p, dt=%p", client, dt); + + if (!dt || !client) + return -EINVAL; + + memset(buf, 0, sizeof(buf)); + + ret = rtc8564_read(client, 0, buf, 15); + if (ret) + return ret; + + /* century stored in minute alarm reg */ + dt->year = BCD_TO_BIN(buf[RTC8564_REG_YEAR]); + dt->year += 100 * BCD_TO_BIN(buf[RTC8564_REG_AL_MIN] & 0x3f); + dt->mday = BCD_TO_BIN(buf[RTC8564_REG_DAY] & 0x3f); + dt->wday = BCD_TO_BIN(buf[RTC8564_REG_WDAY] & 7); + dt->mon = BCD_TO_BIN(buf[RTC8564_REG_MON_CENT] & 0x1f); + + dt->secs = BCD_TO_BIN(buf[RTC8564_REG_SEC] & 0x7f); + dt->vl = (buf[RTC8564_REG_SEC] & 0x80) == 0x80; + dt->mins = BCD_TO_BIN(buf[RTC8564_REG_MIN] & 0x7f); + dt->hours = BCD_TO_BIN(buf[RTC8564_REG_HR] & 0x3f); + + _DBGRTCTM(2, *dt); + + return 0; +} + +static int +rtc8564_set_datetime(struct i2c_client *client, struct rtc_tm *dt, int datetoo) +{ + int ret, len = 5; + unsigned char buf[15]; + + _DBG(1, "client=%p, dt=%p", client, dt); + + if (!dt || !client) + return -EINVAL; + + _DBGRTCTM(2, *dt); + + buf[RTC8564_REG_CTRL1] = CTRL1(client) | RTC8564_CTRL1_STOP; + buf[RTC8564_REG_CTRL2] = CTRL2(client); + buf[RTC8564_REG_SEC] = BIN_TO_BCD(dt->secs); + buf[RTC8564_REG_MIN] = BIN_TO_BCD(dt->mins); + buf[RTC8564_REG_HR] = BIN_TO_BCD(dt->hours); + + if (datetoo) { + len += 5; + buf[RTC8564_REG_DAY] = BIN_TO_BCD(dt->mday); + buf[RTC8564_REG_WDAY] = BIN_TO_BCD(dt->wday); + buf[RTC8564_REG_MON_CENT] = BIN_TO_BCD(dt->mon) & 0x1f; + /* century stored in minute alarm reg */ + buf[RTC8564_REG_YEAR] = BIN_TO_BCD(dt->year % 100); + buf[RTC8564_REG_AL_MIN] = BIN_TO_BCD(dt->year / 100); + } + + ret = rtc8564_write(client, 0, buf, len); + if (ret) { + _DBG(1, "error writing data! %d", ret); + } + + buf[RTC8564_REG_CTRL1] = CTRL1(client); + ret = rtc8564_write(client, 0, buf, 1); + if (ret) { + _DBG(1, "error writing data! %d", ret); + } + + return ret; +} + +static int rtc8564_get_ctrl(struct i2c_client *client, unsigned int *ctrl) +{ + struct rtc8564_data *data = i2c_get_clientdata(client); + + if (!ctrl || !client) + return -1; + + *ctrl = data->ctrl; + return 0; +} + +static int rtc8564_set_ctrl(struct i2c_client *client, unsigned int *ctrl) +{ + struct rtc8564_data *data = i2c_get_clientdata(client); + unsigned char buf[2]; + + if (!ctrl || !client) + return -1; + + buf[0] = *ctrl & 0xff; + buf[1] = (*ctrl & 0xff00) >> 8; + data->ctrl = *ctrl; + + return rtc8564_write(client, 0, buf, 2); +} + +static int rtc8564_read_mem(struct i2c_client *client, struct mem *mem) +{ + + if (!mem || !client) + return -EINVAL; + + return rtc8564_read(client, mem->loc, mem->data, mem->nr); +} + +static int rtc8564_write_mem(struct i2c_client *client, struct mem *mem) +{ + + if (!mem || !client) + return -EINVAL; + + return rtc8564_write(client, mem->loc, mem->data, mem->nr); +} + +static int +rtc8564_command(struct i2c_client *client, unsigned int cmd, void *arg) +{ + + _DBG(1, "cmd=%d", cmd); + + switch (cmd) { + case RTC_GETDATETIME: + return rtc8564_get_datetime(client, arg); + + case RTC_SETTIME: + return rtc8564_set_datetime(client, arg, 0); + + case RTC_SETDATETIME: + return rtc8564_set_datetime(client, arg, 1); + + case RTC_GETCTRL: + return rtc8564_get_ctrl(client, arg); + + case RTC_SETCTRL: + return rtc8564_set_ctrl(client, arg); + + case MEM_READ: + return rtc8564_read_mem(client, arg); + + case MEM_WRITE: + return rtc8564_write_mem(client, arg); + + default: + return -EINVAL; + } +} + +static struct i2c_driver rtc8564_driver = { + .owner = THIS_MODULE, + .name = "RTC8564", + .id = I2C_DRIVERID_RTC8564, + .flags = I2C_DF_NOTIFY, + .attach_adapter = rtc8564_probe, + .detach_client = rtc8564_detach, + .command = rtc8564_command +}; + +static __init int rtc8564_init(void) +{ + return i2c_add_driver(&rtc8564_driver); +} + +static __exit void rtc8564_exit(void) +{ + i2c_del_driver(&rtc8564_driver); +} + +MODULE_AUTHOR("Stefan Eletzhofer "); +MODULE_DESCRIPTION("EPSON RTC8564 Driver"); +MODULE_LICENSE("GPL"); + +module_init(rtc8564_init); +module_exit(rtc8564_exit); diff --git a/drivers/i2c/chips/rtc8564.h b/drivers/i2c/chips/rtc8564.h new file mode 100644 index 000000000..e5342d10b --- /dev/null +++ b/drivers/i2c/chips/rtc8564.h @@ -0,0 +1,78 @@ +/* + * linux/drivers/i2c/chips/rtc8564.h + * + * Copyright (C) 2002-2004 Stefan Eletzhofer + * + * based on linux/drivers/acron/char/pcf8583.h + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +struct rtc_tm { + unsigned char secs; + unsigned char mins; + unsigned char hours; + unsigned char mday; + unsigned char mon; + unsigned short year; /* xxxx 4 digits :) */ + unsigned char wday; + unsigned char vl; +}; + +struct mem { + unsigned int loc; + unsigned int nr; + unsigned char *data; +}; + +#define RTC_GETDATETIME 0 +#define RTC_SETTIME 1 +#define RTC_SETDATETIME 2 +#define RTC_GETCTRL 3 +#define RTC_SETCTRL 4 +#define MEM_READ 5 +#define MEM_WRITE 6 + +#define RTC8564_REG_CTRL1 0x0 /* T 0 S 0 | T 0 0 0 */ +#define RTC8564_REG_CTRL2 0x1 /* 0 0 0 TI/TP | AF TF AIE TIE */ +#define RTC8564_REG_SEC 0x2 /* VL 4 2 1 | 8 4 2 1 */ +#define RTC8564_REG_MIN 0x3 /* x 4 2 1 | 8 4 2 1 */ +#define RTC8564_REG_HR 0x4 /* x x 2 1 | 8 4 2 1 */ +#define RTC8564_REG_DAY 0x5 /* x x 2 1 | 8 4 2 1 */ +#define RTC8564_REG_WDAY 0x6 /* x x x x | x 4 2 1 */ +#define RTC8564_REG_MON_CENT 0x7 /* C x x 1 | 8 4 2 1 */ +#define RTC8564_REG_YEAR 0x8 /* 8 4 2 1 | 8 4 2 1 */ +#define RTC8564_REG_AL_MIN 0x9 /* AE 4 2 1 | 8 4 2 1 */ +#define RTC8564_REG_AL_HR 0xa /* AE 4 2 1 | 8 4 2 1 */ +#define RTC8564_REG_AL_DAY 0xb /* AE x 2 1 | 8 4 2 1 */ +#define RTC8564_REG_AL_WDAY 0xc /* AE x x x | x 4 2 1 */ +#define RTC8564_REG_CLKOUT 0xd /* FE x x x | x x FD1 FD0 */ +#define RTC8564_REG_TCTL 0xe /* TE x x x | x x FD1 FD0 */ +#define RTC8564_REG_TIMER 0xf /* 8 bit binary */ + +/* Control reg */ +#define RTC8564_CTRL1_TEST1 (1<<3) +#define RTC8564_CTRL1_STOP (1<<5) +#define RTC8564_CTRL1_TEST2 (1<<7) + +#define RTC8564_CTRL2_TIE (1<<0) +#define RTC8564_CTRL2_AIE (1<<1) +#define RTC8564_CTRL2_TF (1<<2) +#define RTC8564_CTRL2_AF (1<<3) +#define RTC8564_CTRL2_TI_TP (1<<4) + +/* CLKOUT frequencies */ +#define RTC8564_FD_32768HZ (0x0) +#define RTC8564_FD_1024HZ (0x1) +#define RTC8564_FD_32 (0x2) +#define RTC8564_FD_1HZ (0x3) + +/* Timer CTRL */ +#define RTC8564_TD_4096HZ (0x0) +#define RTC8564_TD_64HZ (0x1) +#define RTC8564_TD_1HZ (0x2) +#define RTC8564_TD_1_60HZ (0x3) + +#define I2C_DRIVERID_RTC8564 0xf000 diff --git a/drivers/ide/h8300/ide-h8300.c b/drivers/ide/h8300/ide-h8300.c new file mode 100644 index 000000000..fb91cb8bf --- /dev/null +++ b/drivers/ide/h8300/ide-h8300.c @@ -0,0 +1,119 @@ +/* + * drivers/ide/ide-h8300.c + * H8/300 generic IDE interface + */ + +#include +#include +#include + +#include +#include + +#define bswap(d) \ +({ \ + u16 r; \ + __asm__("mov.b %w1,r1h\n\t" \ + "mov.b %x1,r1l\n\t" \ + "mov.w r1,%0" \ + :"=r"(r) \ + :"r"(d) \ + :"er1"); \ + (r); \ +}) + +static void mm_outw(u16 d, unsigned long a) +{ + __asm__("mov.b %w0,r2h\n\t" + "mov.b %x0,r2l\n\t" + "mov.w r2,@%1" + : + :"r"(d),"r"(a) + :"er2"); +} + +static u16 mm_inw(unsigned long a) +{ + register u16 r __asm__("er0"); + __asm__("mov.w @%1,r2\n\t" + "mov.b r2l,%x0\n\t" + "mov.b r2h,%w0" + :"=r"(r) + :"r"(a) + :"er2"); + return r; +} + +static void mm_outsw(unsigned long addr, void *buf, u32 len) +{ + unsigned short *bp = (unsigned short *)buf; + for (; len > 0; len--, bp++) + *(volatile u16 *)addr = bswap(*bp); +} + +static void mm_insw(unsigned long addr, void *buf, u32 len) +{ + unsigned short *bp = (unsigned short *)buf; + for (; len > 0; len--, bp++) + *bp = bswap(*(volatile u16 *)addr); +} + +#define H8300_IDE_GAP (2) + +static inline void hw_setup(hw_regs_t *hw) +{ + int i; + + memset(hw, 0, sizeof(hw_regs_t)); + for (i = 0; i <= IDE_STATUS_OFFSET; i++) + hw->io_ports[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; + hw->io_ports[IDE_CONTROL_OFFSET] = CONFIG_H8300_IDE_ALT; + hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ; + hw->dma = NO_DMA; + hw->chipset = ide_generic; +} + +static inline void hwif_setup(ide_hwif_t *hwif) +{ + default_hwif_iops(hwif); + + hwif->mmio = 2; + hwif->OUTW = mm_outw; + hwif->OUTSW = mm_outsw; + hwif->INW = mm_inw; + hwif->INSW = mm_insw; + hwif->OUTL = NULL; + hwif->INL = NULL; + hwif->OUTSL = NULL; + hwif->INSL = NULL; +} + +void __init h8300_ide_init(void) +{ + hw_regs_t hw; + ide_hwif_t *hwif; + int idx; + + if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300")) + goto out_busy; + if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) { + release_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8); + goto out_busy; + } + + hw_setup(&hw); + + /* register if */ + idx = ide_register_hw(&hw, &hwif); + if (idx == -1) { + printk(KERN_ERR "ide-h8300: IDE I/F register failed\n"); + return; + } + + hwif_setup(hwif); + printk(KERN_INFO "ide%d: H8/300 generic IDE interface\n", idx); + return; + +out_busy: + printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); +} diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c new file mode 100644 index 000000000..17212b420 --- /dev/null +++ b/drivers/md/dm-exception-store.c @@ -0,0 +1,648 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" +#include "dm-snap.h" +#include "dm-io.h" +#include "kcopyd.h" + +#include +#include +#include +#include + +/*----------------------------------------------------------------- + * Persistent snapshots, by persistent we mean that the snapshot + * will survive a reboot. + *---------------------------------------------------------------*/ + +/* + * We need to store a record of which parts of the origin have + * been copied to the snapshot device. The snapshot code + * requires that we copy exception chunks to chunk aligned areas + * of the COW store. It makes sense therefore, to store the + * metadata in chunk size blocks. + * + * There is no backward or forward compatibility implemented, + * snapshots with different disk versions than the kernel will + * not be usable. It is expected that "lvcreate" will blank out + * the start of a fresh COW device before calling the snapshot + * constructor. + * + * The first chunk of the COW device just contains the header. + * After this there is a chunk filled with exception metadata, + * followed by as many exception chunks as can fit in the + * metadata areas. + * + * All on disk structures are in little-endian format. The end + * of the exceptions info is indicated by an exception with a + * new_chunk of 0, which is invalid since it would point to the + * header chunk. + */ + +/* + * Magic for persistent snapshots: "SnAp" - Feeble isn't it. + */ +#define SNAP_MAGIC 0x70416e53 + +/* + * The on-disk version of the metadata. + */ +#define SNAPSHOT_DISK_VERSION 1 + +struct disk_header { + uint32_t magic; + + /* + * Is this snapshot valid. There is no way of recovering + * an invalid snapshot. + */ + uint32_t valid; + + /* + * Simple, incrementing version. no backward + * compatibility. + */ + uint32_t version; + + /* In sectors */ + uint32_t chunk_size; +}; + +struct disk_exception { + uint64_t old_chunk; + uint64_t new_chunk; +}; + +struct commit_callback { + void (*callback)(void *, int success); + void *context; +}; + +/* + * The top level structure for a persistent exception store. + */ +struct pstore { + struct dm_snapshot *snap; /* up pointer to my snapshot */ + int version; + int valid; + uint32_t chunk_size; + uint32_t exceptions_per_area; + + /* + * Now that we have an asynchronous kcopyd there is no + * need for large chunk sizes, so it wont hurt to have a + * whole chunks worth of metadata in memory at once. + */ + void *area; + + /* + * Used to keep track of which metadata area the data in + * 'chunk' refers to. + */ + uint32_t current_area; + + /* + * The next free chunk for an exception. + */ + uint32_t next_free; + + /* + * The index of next free exception in the current + * metadata area. + */ + uint32_t current_committed; + + atomic_t pending_count; + uint32_t callback_count; + struct commit_callback *callbacks; +}; + +static inline unsigned int sectors_to_pages(unsigned int sectors) +{ + return sectors / (PAGE_SIZE >> 9); +} + +static int alloc_area(struct pstore *ps) +{ + int r = -ENOMEM; + size_t len; + + len = ps->chunk_size << SECTOR_SHIFT; + + /* + * Allocate the chunk_size block of memory that will hold + * a single metadata area. + */ + ps->area = vmalloc(len); + if (!ps->area) + return r; + + return 0; +} + +static void free_area(struct pstore *ps) +{ + vfree(ps->area); +} + +/* + * Read or write a chunk aligned and sized block of data from a device. + */ +static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) +{ + struct io_region where; + unsigned long bits; + + where.bdev = ps->snap->cow->bdev; + where.sector = ps->chunk_size * chunk; + where.count = ps->chunk_size; + + return dm_io_sync_vm(1, &where, rw, ps->area, &bits); +} + +/* + * Read or write a metadata area. Remembering to skip the first + * chunk which holds the header. + */ +static int area_io(struct pstore *ps, uint32_t area, int rw) +{ + int r; + uint32_t chunk; + + /* convert a metadata area index to a chunk index */ + chunk = 1 + ((ps->exceptions_per_area + 1) * area); + + r = chunk_io(ps, chunk, rw); + if (r) + return r; + + ps->current_area = area; + return 0; +} + +static int zero_area(struct pstore *ps, uint32_t area) +{ + memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT); + return area_io(ps, area, WRITE); +} + +static int read_header(struct pstore *ps, int *new_snapshot) +{ + int r; + struct disk_header *dh; + + r = chunk_io(ps, 0, READ); + if (r) + return r; + + dh = (struct disk_header *) ps->area; + + if (le32_to_cpu(dh->magic) == 0) { + *new_snapshot = 1; + + } else if (le32_to_cpu(dh->magic) == SNAP_MAGIC) { + *new_snapshot = 0; + ps->valid = le32_to_cpu(dh->valid); + ps->version = le32_to_cpu(dh->version); + ps->chunk_size = le32_to_cpu(dh->chunk_size); + + } else { + DMWARN("Invalid/corrupt snapshot"); + r = -ENXIO; + } + + return r; +} + +static int write_header(struct pstore *ps) +{ + struct disk_header *dh; + + memset(ps->area, 0, ps->chunk_size << SECTOR_SHIFT); + + dh = (struct disk_header *) ps->area; + dh->magic = cpu_to_le32(SNAP_MAGIC); + dh->valid = cpu_to_le32(ps->valid); + dh->version = cpu_to_le32(ps->version); + dh->chunk_size = cpu_to_le32(ps->chunk_size); + + return chunk_io(ps, 0, WRITE); +} + +/* + * Access functions for the disk exceptions, these do the endian conversions. + */ +static struct disk_exception *get_exception(struct pstore *ps, uint32_t index) +{ + if (index >= ps->exceptions_per_area) + return NULL; + + return ((struct disk_exception *) ps->area) + index; +} + +static int read_exception(struct pstore *ps, + uint32_t index, struct disk_exception *result) +{ + struct disk_exception *e; + + e = get_exception(ps, index); + if (!e) + return -EINVAL; + + /* copy it */ + result->old_chunk = le64_to_cpu(e->old_chunk); + result->new_chunk = le64_to_cpu(e->new_chunk); + + return 0; +} + +static int write_exception(struct pstore *ps, + uint32_t index, struct disk_exception *de) +{ + struct disk_exception *e; + + e = get_exception(ps, index); + if (!e) + return -EINVAL; + + /* copy it */ + e->old_chunk = cpu_to_le64(de->old_chunk); + e->new_chunk = cpu_to_le64(de->new_chunk); + + return 0; +} + +/* + * Registers the exceptions that are present in the current area. + * 'full' is filled in to indicate if the area has been + * filled. + */ +static int insert_exceptions(struct pstore *ps, int *full) +{ + int r; + unsigned int i; + struct disk_exception de; + + /* presume the area is full */ + *full = 1; + + for (i = 0; i < ps->exceptions_per_area; i++) { + r = read_exception(ps, i, &de); + + if (r) + return r; + + /* + * If the new_chunk is pointing at the start of + * the COW device, where the first metadata area + * is we know that we've hit the end of the + * exceptions. Therefore the area is not full. + */ + if (de.new_chunk == 0LL) { + ps->current_committed = i; + *full = 0; + break; + } + + /* + * Keep track of the start of the free chunks. + */ + if (ps->next_free <= de.new_chunk) + ps->next_free = de.new_chunk + 1; + + /* + * Otherwise we add the exception to the snapshot. + */ + r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk); + if (r) + return r; + } + + return 0; +} + +static int read_exceptions(struct pstore *ps) +{ + uint32_t area; + int r, full = 1; + + /* + * Keeping reading chunks and inserting exceptions until + * we find a partially full area. + */ + for (area = 0; full; area++) { + r = area_io(ps, area, READ); + if (r) + return r; + + r = insert_exceptions(ps, &full); + if (r) + return r; + } + + return 0; +} + +static inline struct pstore *get_info(struct exception_store *store) +{ + return (struct pstore *) store->context; +} + +static void persistent_fraction_full(struct exception_store *store, + sector_t *numerator, sector_t *denominator) +{ + *numerator = get_info(store)->next_free * store->snap->chunk_size; + *denominator = get_dev_size(store->snap->cow->bdev); +} + +static void persistent_destroy(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + + dm_io_put(sectors_to_pages(ps->chunk_size)); + vfree(ps->callbacks); + free_area(ps); + kfree(ps); +} + +static int persistent_read_metadata(struct exception_store *store) +{ + int r, new_snapshot; + struct pstore *ps = get_info(store); + + /* + * Read the snapshot header. + */ + r = read_header(ps, &new_snapshot); + if (r) + return r; + + /* + * Do we need to setup a new snapshot ? + */ + if (new_snapshot) { + r = write_header(ps); + if (r) { + DMWARN("write_header failed"); + return r; + } + + r = zero_area(ps, 0); + if (r) { + DMWARN("zero_area(0) failed"); + return r; + } + + } else { + /* + * Sanity checks. + */ + if (!ps->valid) { + DMWARN("snapshot is marked invalid"); + return -EINVAL; + } + + if (ps->version != SNAPSHOT_DISK_VERSION) { + DMWARN("unable to handle snapshot disk version %d", + ps->version); + return -EINVAL; + } + + /* + * Read the metadata. + */ + r = read_exceptions(ps); + if (r) + return r; + } + + return 0; +} + +static int persistent_prepare(struct exception_store *store, + struct exception *e) +{ + struct pstore *ps = get_info(store); + uint32_t stride; + sector_t size = get_dev_size(store->snap->cow->bdev); + + /* Is there enough room ? */ + if (size < ((ps->next_free + 1) * store->snap->chunk_size)) + return -ENOSPC; + + e->new_chunk = ps->next_free; + + /* + * Move onto the next free pending, making sure to take + * into account the location of the metadata chunks. + */ + stride = (ps->exceptions_per_area + 1); + if ((++ps->next_free % stride) == 1) + ps->next_free++; + + atomic_inc(&ps->pending_count); + return 0; +} + +static void persistent_commit(struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context) +{ + int r; + unsigned int i; + struct pstore *ps = get_info(store); + struct disk_exception de; + struct commit_callback *cb; + + de.old_chunk = e->old_chunk; + de.new_chunk = e->new_chunk; + write_exception(ps, ps->current_committed++, &de); + + /* + * Add the callback to the back of the array. This code + * is the only place where the callback array is + * manipulated, and we know that it will never be called + * multiple times concurrently. + */ + cb = ps->callbacks + ps->callback_count++; + cb->callback = callback; + cb->context = callback_context; + + /* + * If there are no more exceptions in flight, or we have + * filled this metadata area we commit the exceptions to + * disk. + */ + if (atomic_dec_and_test(&ps->pending_count) || + (ps->current_committed == ps->exceptions_per_area)) { + r = area_io(ps, ps->current_area, WRITE); + if (r) + ps->valid = 0; + + for (i = 0; i < ps->callback_count; i++) { + cb = ps->callbacks + i; + cb->callback(cb->context, r == 0 ? 1 : 0); + } + + ps->callback_count = 0; + } + + /* + * Have we completely filled the current area ? + */ + if (ps->current_committed == ps->exceptions_per_area) { + ps->current_committed = 0; + r = zero_area(ps, ps->current_area + 1); + if (r) + ps->valid = 0; + } +} + +static void persistent_drop(struct exception_store *store) +{ + struct pstore *ps = get_info(store); + + ps->valid = 0; + if (write_header(ps)) + DMWARN("write header failed"); +} + +int dm_create_persistent(struct exception_store *store, uint32_t chunk_size) +{ + int r; + struct pstore *ps; + + r = dm_io_get(sectors_to_pages(chunk_size)); + if (r) + return r; + + /* allocate the pstore */ + ps = kmalloc(sizeof(*ps), GFP_KERNEL); + if (!ps) { + r = -ENOMEM; + goto bad; + } + + ps->snap = store->snap; + ps->valid = 1; + ps->version = SNAPSHOT_DISK_VERSION; + ps->chunk_size = chunk_size; + ps->exceptions_per_area = (chunk_size << SECTOR_SHIFT) / + sizeof(struct disk_exception); + ps->next_free = 2; /* skipping the header and first area */ + ps->current_committed = 0; + + r = alloc_area(ps); + if (r) + goto bad; + + /* + * Allocate space for all the callbacks. + */ + ps->callback_count = 0; + atomic_set(&ps->pending_count, 0); + ps->callbacks = dm_vcalloc(ps->exceptions_per_area, + sizeof(*ps->callbacks)); + + if (!ps->callbacks) { + r = -ENOMEM; + goto bad; + } + + store->destroy = persistent_destroy; + store->read_metadata = persistent_read_metadata; + store->prepare_exception = persistent_prepare; + store->commit_exception = persistent_commit; + store->drop_snapshot = persistent_drop; + store->fraction_full = persistent_fraction_full; + store->context = ps; + + return 0; + + bad: + dm_io_put(sectors_to_pages(chunk_size)); + if (ps) { + if (ps->area) + free_area(ps); + + kfree(ps); + } + return r; +} + +/*----------------------------------------------------------------- + * Implementation of the store for non-persistent snapshots. + *---------------------------------------------------------------*/ +struct transient_c { + sector_t next_free; +}; + +static void transient_destroy(struct exception_store *store) +{ + kfree(store->context); +} + +static int transient_read_metadata(struct exception_store *store) +{ + return 0; +} + +static int transient_prepare(struct exception_store *store, struct exception *e) +{ + struct transient_c *tc = (struct transient_c *) store->context; + sector_t size = get_dev_size(store->snap->cow->bdev); + + if (size < (tc->next_free + store->snap->chunk_size)) + return -1; + + e->new_chunk = sector_to_chunk(store->snap, tc->next_free); + tc->next_free += store->snap->chunk_size; + + return 0; +} + +static void transient_commit(struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context) +{ + /* Just succeed */ + callback(callback_context, 1); +} + +static void transient_fraction_full(struct exception_store *store, + sector_t *numerator, sector_t *denominator) +{ + *numerator = ((struct transient_c *) store->context)->next_free; + *denominator = get_dev_size(store->snap->cow->bdev); +} + +int dm_create_transient(struct exception_store *store, + struct dm_snapshot *s, int blocksize) +{ + struct transient_c *tc; + + memset(store, 0, sizeof(*store)); + store->destroy = transient_destroy; + store->read_metadata = transient_read_metadata; + store->prepare_exception = transient_prepare; + store->commit_exception = transient_commit; + store->fraction_full = transient_fraction_full; + store->snap = s; + + tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL); + if (!tc) + return -ENOMEM; + + tc->next_free = 0; + store->context = tc; + + return 0; +} diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c new file mode 100644 index 000000000..9f3fb61fd --- /dev/null +++ b/drivers/md/dm-io.c @@ -0,0 +1,647 @@ +/* + * Copyright (C) 2003 Sistina Software + * + * This file is released under the GPL. + */ + +#include "dm-io.h" + +#include +#include +#include +#include +#include + +#define BIO_POOL_SIZE 256 + + +/*----------------------------------------------------------------- + * Bio set, move this to bio.c + *---------------------------------------------------------------*/ +#define BV_NAME_SIZE 16 +struct biovec_pool { + int nr_vecs; + char name[BV_NAME_SIZE]; + kmem_cache_t *slab; + mempool_t *pool; + atomic_t allocated; /* FIXME: debug */ +}; + +#define BIOVEC_NR_POOLS 6 +struct bio_set { + char name[BV_NAME_SIZE]; + kmem_cache_t *bio_slab; + mempool_t *bio_pool; + struct biovec_pool pools[BIOVEC_NR_POOLS]; +}; + +static void bio_set_exit(struct bio_set *bs) +{ + unsigned i; + struct biovec_pool *bp; + + if (bs->bio_pool) + mempool_destroy(bs->bio_pool); + + if (bs->bio_slab) + kmem_cache_destroy(bs->bio_slab); + + for (i = 0; i < BIOVEC_NR_POOLS; i++) { + bp = bs->pools + i; + if (bp->pool) + mempool_destroy(bp->pool); + + if (bp->slab) + kmem_cache_destroy(bp->slab); + } +} + +static void mk_name(char *str, size_t len, const char *prefix, unsigned count) +{ + snprintf(str, len, "%s-%u", prefix, count); +} + +static int bio_set_init(struct bio_set *bs, const char *slab_prefix, + unsigned pool_entries, unsigned scale) +{ + /* FIXME: this must match bvec_index(), why not go the + * whole hog and have a pool per power of 2 ? */ + static unsigned _vec_lengths[BIOVEC_NR_POOLS] = { + 1, 4, 16, 64, 128, BIO_MAX_PAGES + }; + + + unsigned i, size; + struct biovec_pool *bp; + + /* zero the bs so we can tear down properly on error */ + memset(bs, 0, sizeof(*bs)); + + /* + * Set up the bio pool. + */ + snprintf(bs->name, sizeof(bs->name), "%s-bio", slab_prefix); + + bs->bio_slab = kmem_cache_create(bs->name, sizeof(struct bio), 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!bs->bio_slab) { + DMWARN("can't init bio slab"); + goto bad; + } + + bs->bio_pool = mempool_create(pool_entries, mempool_alloc_slab, + mempool_free_slab, bs->bio_slab); + if (!bs->bio_pool) { + DMWARN("can't init bio pool"); + goto bad; + } + + /* + * Set up the biovec pools. + */ + for (i = 0; i < BIOVEC_NR_POOLS; i++) { + bp = bs->pools + i; + bp->nr_vecs = _vec_lengths[i]; + atomic_set(&bp->allocated, 1); /* FIXME: debug */ + + + size = bp->nr_vecs * sizeof(struct bio_vec); + + mk_name(bp->name, sizeof(bp->name), slab_prefix, i); + bp->slab = kmem_cache_create(bp->name, size, 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!bp->slab) { + DMWARN("can't init biovec slab cache"); + goto bad; + } + + if (i >= scale) + pool_entries >>= 1; + + bp->pool = mempool_create(pool_entries, mempool_alloc_slab, + mempool_free_slab, bp->slab); + if (!bp->pool) { + DMWARN("can't init biovec mempool"); + goto bad; + } + } + + return 0; + + bad: + bio_set_exit(bs); + return -ENOMEM; +} + +/* FIXME: blech */ +static inline unsigned bvec_index(unsigned nr) +{ + switch (nr) { + case 1: return 0; + case 2 ... 4: return 1; + case 5 ... 16: return 2; + case 17 ... 64: return 3; + case 65 ... 128:return 4; + case 129 ... BIO_MAX_PAGES: return 5; + } + + BUG(); + return 0; +} + +static inline void bs_bio_init(struct bio *bio) +{ + bio->bi_next = NULL; + bio->bi_flags = 1 << BIO_UPTODATE; + bio->bi_rw = 0; + bio->bi_vcnt = 0; + bio->bi_idx = 0; + bio->bi_phys_segments = 0; + bio->bi_hw_segments = 0; + bio->bi_size = 0; + bio->bi_max_vecs = 0; + bio->bi_end_io = NULL; + atomic_set(&bio->bi_cnt, 1); + bio->bi_private = NULL; +} + +static unsigned _bio_count = 0; +struct bio *bio_set_alloc(struct bio_set *bs, int gfp_mask, int nr_iovecs) +{ + struct biovec_pool *bp; + struct bio_vec *bv = NULL; + unsigned long idx; + struct bio *bio; + + bio = mempool_alloc(bs->bio_pool, gfp_mask); + if (unlikely(!bio)) + return NULL; + + bio_init(bio); + + if (likely(nr_iovecs)) { + idx = bvec_index(nr_iovecs); + bp = bs->pools + idx; + bv = mempool_alloc(bp->pool, gfp_mask); + if (!bv) { + mempool_free(bio, bs->bio_pool); + return NULL; + } + + memset(bv, 0, bp->nr_vecs * sizeof(*bv)); + bio->bi_flags |= idx << BIO_POOL_OFFSET; + bio->bi_max_vecs = bp->nr_vecs; + atomic_inc(&bp->allocated); + } + + bio->bi_io_vec = bv; + return bio; +} + +static void bio_set_free(struct bio_set *bs, struct bio *bio) +{ + struct biovec_pool *bp = bs->pools + BIO_POOL_IDX(bio); + + if (atomic_dec_and_test(&bp->allocated)) + BUG(); + + mempool_free(bio->bi_io_vec, bp->pool); + mempool_free(bio, bs->bio_pool); +} + +/*----------------------------------------------------------------- + * dm-io proper + *---------------------------------------------------------------*/ +static struct bio_set _bios; + +/* FIXME: can we shrink this ? */ +struct io { + unsigned long error; + atomic_t count; + struct task_struct *sleeper; + io_notify_fn callback; + void *context; +}; + +/* + * io contexts are only dynamically allocated for asynchronous + * io. Since async io is likely to be the majority of io we'll + * have the same number of io contexts as buffer heads ! (FIXME: + * must reduce this). + */ +static unsigned _num_ios; +static mempool_t *_io_pool; + +static void *alloc_io(int gfp_mask, void *pool_data) +{ + return kmalloc(sizeof(struct io), gfp_mask); +} + +static void free_io(void *element, void *pool_data) +{ + kfree(element); +} + +static unsigned int pages_to_ios(unsigned int pages) +{ + return 4 * pages; /* too many ? */ +} + +static int resize_pool(unsigned int new_ios) +{ + int r = 0; + + if (_io_pool) { + if (new_ios == 0) { + /* free off the pool */ + mempool_destroy(_io_pool); + _io_pool = NULL; + bio_set_exit(&_bios); + + } else { + /* resize the pool */ + r = mempool_resize(_io_pool, new_ios, GFP_KERNEL); + } + + } else { + /* create new pool */ + _io_pool = mempool_create(new_ios, alloc_io, free_io, NULL); + if (!_io_pool) + r = -ENOMEM; + + r = bio_set_init(&_bios, "dm-io", 512, 1); + if (r) { + mempool_destroy(_io_pool); + _io_pool = NULL; + } + } + + if (!r) + _num_ios = new_ios; + + return r; +} + +int dm_io_get(unsigned int num_pages) +{ + return resize_pool(_num_ios + pages_to_ios(num_pages)); +} + +void dm_io_put(unsigned int num_pages) +{ + resize_pool(_num_ios - pages_to_ios(num_pages)); +} + +/*----------------------------------------------------------------- + * We need to keep track of which region a bio is doing io for. + * In order to save a memory allocation we store this the last + * bvec which we know is unused (blech). + *---------------------------------------------------------------*/ +static inline void bio_set_region(struct bio *bio, unsigned region) +{ + bio->bi_io_vec[bio->bi_max_vecs - 1].bv_len = region; +} + +static inline unsigned bio_get_region(struct bio *bio) +{ + return bio->bi_io_vec[bio->bi_max_vecs - 1].bv_len; +} + +/*----------------------------------------------------------------- + * We need an io object to keep track of the number of bios that + * have been dispatched for a particular io. + *---------------------------------------------------------------*/ +static void dec_count(struct io *io, unsigned int region, int error) +{ + if (error) + set_bit(region, &io->error); + + if (atomic_dec_and_test(&io->count)) { + if (io->sleeper) + wake_up_process(io->sleeper); + + else { + int r = io->error; + io_notify_fn fn = io->callback; + void *context = io->context; + + mempool_free(io, _io_pool); + fn(r, context); + } + } +} + +/* FIXME Move this to bio.h? */ +static void zero_fill_bio(struct bio *bio) +{ + unsigned long flags; + struct bio_vec *bv; + int i; + + bio_for_each_segment(bv, bio, i) { + char *data = bvec_kmap_irq(bv, &flags); + memset(data, 0, bv->bv_len); + flush_dcache_page(bv->bv_page); + bvec_kunmap_irq(data, &flags); + } +} + +static int endio(struct bio *bio, unsigned int done, int error) +{ + struct io *io = (struct io *) bio->bi_private; + + /* keep going until we've finished */ + if (bio->bi_size) + return 1; + + if (error && bio_data_dir(bio) == READ) + zero_fill_bio(bio); + + dec_count(io, bio_get_region(bio), error); + bio_put(bio); + + return 0; +} + +static void bio_dtr(struct bio *bio) +{ + _bio_count--; + bio_set_free(&_bios, bio); +} + +/*----------------------------------------------------------------- + * These little objects provide an abstraction for getting a new + * destination page for io. + *---------------------------------------------------------------*/ +struct dpages { + void (*get_page)(struct dpages *dp, + struct page **p, unsigned long *len, unsigned *offset); + void (*next_page)(struct dpages *dp); + + unsigned context_u; + void *context_ptr; +}; + +/* + * Functions for getting the pages from a list. + */ +static void list_get_page(struct dpages *dp, + struct page **p, unsigned long *len, unsigned *offset) +{ + unsigned o = dp->context_u; + struct page_list *pl = (struct page_list *) dp->context_ptr; + + *p = pl->page; + *len = PAGE_SIZE - o; + *offset = o; +} + +static void list_next_page(struct dpages *dp) +{ + struct page_list *pl = (struct page_list *) dp->context_ptr; + dp->context_ptr = pl->next; + dp->context_u = 0; +} + +static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) +{ + dp->get_page = list_get_page; + dp->next_page = list_next_page; + dp->context_u = offset; + dp->context_ptr = pl; +} + +/* + * Functions for getting the pages from a bvec. + */ +static void bvec_get_page(struct dpages *dp, + struct page **p, unsigned long *len, unsigned *offset) +{ + struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; + *p = bvec->bv_page; + *len = bvec->bv_len; + *offset = bvec->bv_offset; +} + +static void bvec_next_page(struct dpages *dp) +{ + struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; + dp->context_ptr = bvec + 1; +} + +static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) +{ + dp->get_page = bvec_get_page; + dp->next_page = bvec_next_page; + dp->context_ptr = bvec; +} + +static void vm_get_page(struct dpages *dp, + struct page **p, unsigned long *len, unsigned *offset) +{ + *p = vmalloc_to_page(dp->context_ptr); + *offset = dp->context_u; + *len = PAGE_SIZE - dp->context_u; +} + +static void vm_next_page(struct dpages *dp) +{ + dp->context_ptr += PAGE_SIZE - dp->context_u; + dp->context_u = 0; +} + +static void vm_dp_init(struct dpages *dp, void *data) +{ + dp->get_page = vm_get_page; + dp->next_page = vm_next_page; + dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); + dp->context_ptr = data; +} + +/*----------------------------------------------------------------- + * IO routines that accept a list of pages. + *---------------------------------------------------------------*/ +static void do_region(int rw, unsigned int region, struct io_region *where, + struct dpages *dp, struct io *io) +{ + struct bio *bio; + struct page *page; + unsigned long len; + unsigned offset; + unsigned num_bvecs; + sector_t remaining = where->count; + + while (remaining) { + /* + * Allocate a suitably sized bio, we add an extra + * bvec for bio_get/set_region(). + */ + num_bvecs = (remaining / (PAGE_SIZE >> 9)) + 2; + _bio_count++; + bio = bio_set_alloc(&_bios, GFP_NOIO, num_bvecs); + bio->bi_sector = where->sector + (where->count - remaining); + bio->bi_bdev = where->bdev; + bio->bi_end_io = endio; + bio->bi_private = io; + bio->bi_destructor = bio_dtr; + bio_set_region(bio, region); + + /* + * Try and add as many pages as possible. + */ + while (remaining) { + dp->get_page(dp, &page, &len, &offset); + len = min(len, to_bytes(remaining)); + if (!bio_add_page(bio, page, len, offset)) + break; + + offset = 0; + remaining -= to_sector(len); + dp->next_page(dp); + } + + atomic_inc(&io->count); + submit_bio(rw, bio); + } +} + +static void dispatch_io(int rw, unsigned int num_regions, + struct io_region *where, struct dpages *dp, + struct io *io, int sync) +{ + int i; + struct dpages old_pages = *dp; + + if (sync) + rw |= (1 << BIO_RW_SYNC); + + /* + * For multiple regions we need to be careful to rewind + * the dp object for each call to do_region. + */ + for (i = 0; i < num_regions; i++) { + *dp = old_pages; + if (where[i].count) + do_region(rw, i, where + i, dp, io); + } + + /* + * Drop the extra refence that we were holding to avoid + * the io being completed too early. + */ + dec_count(io, 0, 0); +} + +static int sync_io(unsigned int num_regions, struct io_region *where, + int rw, struct dpages *dp, unsigned long *error_bits) +{ + struct io io; + + if (num_regions > 1 && rw != WRITE) { + WARN_ON(1); + return -EIO; + } + + io.error = 0; + atomic_set(&io.count, 1); /* see dispatch_io() */ + io.sleeper = current; + + dispatch_io(rw, num_regions, where, dp, &io, 1); + + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + + if (!atomic_read(&io.count) || signal_pending(current)) + break; + + io_schedule(); + } + set_current_state(TASK_RUNNING); + + if (atomic_read(&io.count)) + return -EINTR; + + *error_bits = io.error; + return io.error ? -EIO : 0; +} + +static int async_io(unsigned int num_regions, struct io_region *where, int rw, + struct dpages *dp, io_notify_fn fn, void *context) +{ + struct io *io; + + if (num_regions > 1 && rw != WRITE) { + WARN_ON(1); + fn(1, context); + return -EIO; + } + + io = mempool_alloc(_io_pool, GFP_NOIO); + io->error = 0; + atomic_set(&io->count, 1); /* see dispatch_io() */ + io->sleeper = NULL; + io->callback = fn; + io->context = context; + + dispatch_io(rw, num_regions, where, dp, io, 0); + return 0; +} + +int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, + struct page_list *pl, unsigned int offset, + unsigned long *error_bits) +{ + struct dpages dp; + list_dp_init(&dp, pl, offset); + return sync_io(num_regions, where, rw, &dp, error_bits); +} + +int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, + struct bio_vec *bvec, unsigned long *error_bits) +{ + struct dpages dp; + bvec_dp_init(&dp, bvec); + return sync_io(num_regions, where, rw, &dp, error_bits); +} + +int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, + void *data, unsigned long *error_bits) +{ + struct dpages dp; + vm_dp_init(&dp, data); + return sync_io(num_regions, where, rw, &dp, error_bits); +} + +int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, + struct page_list *pl, unsigned int offset, + io_notify_fn fn, void *context) +{ + struct dpages dp; + list_dp_init(&dp, pl, offset); + return async_io(num_regions, where, rw, &dp, fn, context); +} + +int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, + struct bio_vec *bvec, io_notify_fn fn, void *context) +{ + struct dpages dp; + bvec_dp_init(&dp, bvec); + return async_io(num_regions, where, rw, &dp, fn, context); +} + +int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, + void *data, io_notify_fn fn, void *context) +{ + struct dpages dp; + vm_dp_init(&dp, data); + return async_io(num_regions, where, rw, &dp, fn, context); +} + +EXPORT_SYMBOL(dm_io_get); +EXPORT_SYMBOL(dm_io_put); +EXPORT_SYMBOL(dm_io_sync); +EXPORT_SYMBOL(dm_io_async); +EXPORT_SYMBOL(dm_io_sync_bvec); +EXPORT_SYMBOL(dm_io_async_bvec); +EXPORT_SYMBOL(dm_io_sync_vm); +EXPORT_SYMBOL(dm_io_async_vm); diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h new file mode 100644 index 000000000..1a77f3265 --- /dev/null +++ b/drivers/md/dm-io.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2003 Sistina Software + * + * This file is released under the GPL. + */ + +#ifndef _DM_IO_H +#define _DM_IO_H + +#include "dm.h" + +/* FIXME make this configurable */ +#define DM_MAX_IO_REGIONS 8 + +struct io_region { + struct block_device *bdev; + sector_t sector; + sector_t count; +}; + +struct page_list { + struct page_list *next; + struct page *page; +}; + + +/* + * 'error' is a bitset, with each bit indicating whether an error + * occurred doing io to the corresponding region. + */ +typedef void (*io_notify_fn)(unsigned long error, void *context); + + +/* + * Before anyone uses the IO interface they should call + * dm_io_get(), specifying roughly how many pages they are + * expecting to perform io on concurrently. + * + * This function may block. + */ +int dm_io_get(unsigned int num_pages); +void dm_io_put(unsigned int num_pages); + +/* + * Synchronous IO. + * + * Please ensure that the rw flag in the next two functions is + * either READ or WRITE, ie. we don't take READA. Any + * regions with a zero count field will be ignored. + */ +int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, + struct page_list *pl, unsigned int offset, + unsigned long *error_bits); + +int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, + struct bio_vec *bvec, unsigned long *error_bits); + +int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, + void *data, unsigned long *error_bits); + +/* + * Aynchronous IO. + * + * The 'where' array may be safely allocated on the stack since + * the function takes a copy. + */ +int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, + struct page_list *pl, unsigned int offset, + io_notify_fn fn, void *context); + +int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, + struct bio_vec *bvec, io_notify_fn fn, void *context); + +int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, + void *data, io_notify_fn fn, void *context); + +#endif diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c new file mode 100644 index 000000000..7a1c77dee --- /dev/null +++ b/drivers/md/dm-log.c @@ -0,0 +1,629 @@ +/* + * Copyright (C) 2003 Sistina Software + * + * This file is released under the LGPL. + */ + +#include +#include +#include +#include + +#include "dm-log.h" +#include "dm-io.h" + +static LIST_HEAD(_log_types); +static spinlock_t _lock = SPIN_LOCK_UNLOCKED; + +int dm_register_dirty_log_type(struct dirty_log_type *type) +{ + if (!try_module_get(type->module)) + return -EINVAL; + + spin_lock(&_lock); + type->use_count = 0; + list_add(&type->list, &_log_types); + spin_unlock(&_lock); + + return 0; +} + +int dm_unregister_dirty_log_type(struct dirty_log_type *type) +{ + spin_lock(&_lock); + + if (type->use_count) + DMWARN("Attempt to unregister a log type that is still in use"); + else { + list_del(&type->list); + module_put(type->module); + } + + spin_unlock(&_lock); + + return 0; +} + +static struct dirty_log_type *get_type(const char *type_name) +{ + struct dirty_log_type *type; + + spin_lock(&_lock); + list_for_each_entry (type, &_log_types, list) + if (!strcmp(type_name, type->name)) { + type->use_count++; + spin_unlock(&_lock); + return type; + } + + spin_unlock(&_lock); + return NULL; +} + +static void put_type(struct dirty_log_type *type) +{ + spin_lock(&_lock); + type->use_count--; + spin_unlock(&_lock); +} + +struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti, + unsigned int argc, char **argv) +{ + struct dirty_log_type *type; + struct dirty_log *log; + + log = kmalloc(sizeof(*log), GFP_KERNEL); + if (!log) + return NULL; + + type = get_type(type_name); + if (!type) { + kfree(log); + return NULL; + } + + log->type = type; + if (type->ctr(log, ti, argc, argv)) { + kfree(log); + put_type(type); + return NULL; + } + + return log; +} + +void dm_destroy_dirty_log(struct dirty_log *log) +{ + log->type->dtr(log); + put_type(log->type); + kfree(log); +} + +/*----------------------------------------------------------------- + * Persistent and core logs share a lot of their implementation. + * FIXME: need a reload method to be called from a resume + *---------------------------------------------------------------*/ +/* + * Magic for persistent mirrors: "MiRr" + */ +#define MIRROR_MAGIC 0x4D695272 + +/* + * The on-disk version of the metadata. + */ +#define MIRROR_DISK_VERSION 1 +#define LOG_OFFSET 2 + +struct log_header { + uint32_t magic; + + /* + * Simple, incrementing version. no backward + * compatibility. + */ + uint32_t version; + sector_t nr_regions; +}; + +struct log_c { + struct dm_target *ti; + int touched; + sector_t region_size; + unsigned int region_count; + region_t sync_count; + + unsigned bitset_uint32_count; + uint32_t *clean_bits; + uint32_t *sync_bits; + uint32_t *recovering_bits; /* FIXME: this seems excessive */ + + int sync_search; + + /* + * Disk log fields + */ + struct dm_dev *log_dev; + struct log_header header; + + struct io_region header_location; + struct log_header *disk_header; + + struct io_region bits_location; + uint32_t *disk_bits; +}; + +/* + * The touched member needs to be updated every time we access + * one of the bitsets. + */ +static inline int log_test_bit(uint32_t *bs, unsigned bit) +{ + return test_bit(bit, (unsigned long *) bs) ? 1 : 0; +} + +static inline void log_set_bit(struct log_c *l, + uint32_t *bs, unsigned bit) +{ + set_bit(bit, (unsigned long *) bs); + l->touched = 1; +} + +static inline void log_clear_bit(struct log_c *l, + uint32_t *bs, unsigned bit) +{ + clear_bit(bit, (unsigned long *) bs); + l->touched = 1; +} + +/*---------------------------------------------------------------- + * Header IO + *--------------------------------------------------------------*/ +static void header_to_disk(struct log_header *core, struct log_header *disk) +{ + disk->magic = cpu_to_le32(core->magic); + disk->version = cpu_to_le32(core->version); + disk->nr_regions = cpu_to_le64(core->nr_regions); +} + +static void header_from_disk(struct log_header *core, struct log_header *disk) +{ + core->magic = le32_to_cpu(disk->magic); + core->version = le32_to_cpu(disk->version); + core->nr_regions = le64_to_cpu(disk->nr_regions); +} + +static int read_header(struct log_c *log) +{ + int r; + unsigned long ebits; + + r = dm_io_sync_vm(1, &log->header_location, READ, + log->disk_header, &ebits); + if (r) + return r; + + header_from_disk(&log->header, log->disk_header); + + if (log->header.magic != MIRROR_MAGIC) { + log->header.magic = MIRROR_MAGIC; + log->header.version = MIRROR_DISK_VERSION; + log->header.nr_regions = 0; + } + + if (log->header.version != MIRROR_DISK_VERSION) { + DMWARN("incompatible disk log version"); + return -EINVAL; + } + + return 0; +} + +static inline int write_header(struct log_c *log) +{ + unsigned long ebits; + + header_to_disk(&log->header, log->disk_header); + return dm_io_sync_vm(1, &log->header_location, WRITE, + log->disk_header, &ebits); +} + +/*---------------------------------------------------------------- + * Bits IO + *--------------------------------------------------------------*/ +static inline void bits_to_core(uint32_t *core, uint32_t *disk, unsigned count) +{ + unsigned i; + + for (i = 0; i < count; i++) + core[i] = le32_to_cpu(disk[i]); +} + +static inline void bits_to_disk(uint32_t *core, uint32_t *disk, unsigned count) +{ + unsigned i; + + /* copy across the clean/dirty bitset */ + for (i = 0; i < count; i++) + disk[i] = cpu_to_le32(core[i]); +} + +static int read_bits(struct log_c *log) +{ + int r; + unsigned long ebits; + + r = dm_io_sync_vm(1, &log->bits_location, READ, + log->disk_bits, &ebits); + if (r) + return r; + + bits_to_core(log->clean_bits, log->disk_bits, + log->bitset_uint32_count); + return 0; +} + +static int write_bits(struct log_c *log) +{ + unsigned long ebits; + bits_to_disk(log->clean_bits, log->disk_bits, + log->bitset_uint32_count); + return dm_io_sync_vm(1, &log->bits_location, WRITE, + log->disk_bits, &ebits); +} + +/*---------------------------------------------------------------- + * constructor/destructor + *--------------------------------------------------------------*/ +#define BYTE_SHIFT 3 +static int core_ctr(struct dirty_log *log, struct dm_target *ti, + unsigned int argc, char **argv) +{ + struct log_c *lc; + sector_t region_size; + unsigned int region_count; + size_t bitset_size; + + if (argc != 1) { + DMWARN("wrong number of arguments to log_c"); + return -EINVAL; + } + + if (sscanf(argv[0], SECTOR_FORMAT, ®ion_size) != 1) { + DMWARN("invalid region size string"); + return -EINVAL; + } + + region_count = dm_div_up(ti->len, region_size); + + lc = kmalloc(sizeof(*lc), GFP_KERNEL); + if (!lc) { + DMWARN("couldn't allocate core log"); + return -ENOMEM; + } + + lc->ti = ti; + lc->touched = 0; + lc->region_size = region_size; + lc->region_count = region_count; + + /* + * Work out how many words we need to hold the bitset. + */ + bitset_size = dm_round_up(region_count, + sizeof(*lc->clean_bits) << BYTE_SHIFT); + bitset_size >>= BYTE_SHIFT; + + lc->bitset_uint32_count = bitset_size / 4; + lc->clean_bits = vmalloc(bitset_size); + if (!lc->clean_bits) { + DMWARN("couldn't allocate clean bitset"); + kfree(lc); + return -ENOMEM; + } + memset(lc->clean_bits, -1, bitset_size); + + lc->sync_bits = vmalloc(bitset_size); + if (!lc->sync_bits) { + DMWARN("couldn't allocate sync bitset"); + vfree(lc->clean_bits); + kfree(lc); + return -ENOMEM; + } + memset(lc->sync_bits, 0, bitset_size); + lc->sync_count = 0; + + lc->recovering_bits = vmalloc(bitset_size); + if (!lc->recovering_bits) { + DMWARN("couldn't allocate sync bitset"); + vfree(lc->sync_bits); + vfree(lc->clean_bits); + kfree(lc); + return -ENOMEM; + } + memset(lc->recovering_bits, 0, bitset_size); + lc->sync_search = 0; + log->context = lc; + return 0; +} + +static void core_dtr(struct dirty_log *log) +{ + struct log_c *lc = (struct log_c *) log->context; + vfree(lc->clean_bits); + vfree(lc->sync_bits); + vfree(lc->recovering_bits); + kfree(lc); +} + +static int disk_ctr(struct dirty_log *log, struct dm_target *ti, + unsigned int argc, char **argv) +{ + int r; + size_t size; + struct log_c *lc; + struct dm_dev *dev; + + if (argc != 2) { + DMWARN("wrong number of arguments to log_d"); + return -EINVAL; + } + + r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */, + FMODE_READ | FMODE_WRITE, &dev); + if (r) + return r; + + r = core_ctr(log, ti, argc - 1, argv + 1); + if (r) { + dm_put_device(ti, dev); + return r; + } + + lc = (struct log_c *) log->context; + lc->log_dev = dev; + + /* setup the disk header fields */ + lc->header_location.bdev = lc->log_dev->bdev; + lc->header_location.sector = 0; + lc->header_location.count = 1; + + /* + * We can't read less than this amount, even though we'll + * not be using most of this space. + */ + lc->disk_header = vmalloc(1 << SECTOR_SHIFT); + if (!lc->disk_header) + goto bad; + + /* setup the disk bitset fields */ + lc->bits_location.bdev = lc->log_dev->bdev; + lc->bits_location.sector = LOG_OFFSET; + + size = dm_round_up(lc->bitset_uint32_count * sizeof(uint32_t), + 1 << SECTOR_SHIFT); + lc->bits_location.count = size >> SECTOR_SHIFT; + lc->disk_bits = vmalloc(size); + if (!lc->disk_bits) { + vfree(lc->disk_header); + goto bad; + } + return 0; + + bad: + dm_put_device(ti, lc->log_dev); + core_dtr(log); + return -ENOMEM; +} + +static void disk_dtr(struct dirty_log *log) +{ + struct log_c *lc = (struct log_c *) log->context; + dm_put_device(lc->ti, lc->log_dev); + vfree(lc->disk_header); + vfree(lc->disk_bits); + core_dtr(log); +} + +static int count_bits32(uint32_t *addr, unsigned size) +{ + int count = 0, i; + + for (i = 0; i < size; i++) { + count += hweight32(*(addr+i)); + } + return count; +} + +static int disk_resume(struct dirty_log *log) +{ + int r; + unsigned i; + struct log_c *lc = (struct log_c *) log->context; + size_t size = lc->bitset_uint32_count * sizeof(uint32_t); + + /* read the disk header */ + r = read_header(lc); + if (r) + return r; + + /* read the bits */ + r = read_bits(lc); + if (r) + return r; + + /* zero any new bits if the mirror has grown */ + for (i = lc->header.nr_regions; i < lc->region_count; i++) + /* FIXME: amazingly inefficient */ + log_clear_bit(lc, lc->clean_bits, i); + + /* copy clean across to sync */ + memcpy(lc->sync_bits, lc->clean_bits, size); + lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count); + + /* write the bits */ + r = write_bits(lc); + if (r) + return r; + + /* set the correct number of regions in the header */ + lc->header.nr_regions = lc->region_count; + + /* write the new header */ + return write_header(lc); +} + +static sector_t core_get_region_size(struct dirty_log *log) +{ + struct log_c *lc = (struct log_c *) log->context; + return lc->region_size; +} + +static int core_is_clean(struct dirty_log *log, region_t region) +{ + struct log_c *lc = (struct log_c *) log->context; + return log_test_bit(lc->clean_bits, region); +} + +static int core_in_sync(struct dirty_log *log, region_t region, int block) +{ + struct log_c *lc = (struct log_c *) log->context; + return log_test_bit(lc->sync_bits, region); +} + +static int core_flush(struct dirty_log *log) +{ + /* no op */ + return 0; +} + +static int disk_flush(struct dirty_log *log) +{ + int r; + struct log_c *lc = (struct log_c *) log->context; + + /* only write if the log has changed */ + if (!lc->touched) + return 0; + + r = write_bits(lc); + if (!r) + lc->touched = 0; + + return r; +} + +static void core_mark_region(struct dirty_log *log, region_t region) +{ + struct log_c *lc = (struct log_c *) log->context; + log_clear_bit(lc, lc->clean_bits, region); +} + +static void core_clear_region(struct dirty_log *log, region_t region) +{ + struct log_c *lc = (struct log_c *) log->context; + log_set_bit(lc, lc->clean_bits, region); +} + +static int core_get_resync_work(struct dirty_log *log, region_t *region) +{ + struct log_c *lc = (struct log_c *) log->context; + + if (lc->sync_search >= lc->region_count) + return 0; + + do { + *region = find_next_zero_bit((unsigned long *) lc->sync_bits, + lc->region_count, + lc->sync_search); + lc->sync_search = *region + 1; + + if (*region == lc->region_count) + return 0; + + } while (log_test_bit(lc->recovering_bits, *region)); + + log_set_bit(lc, lc->recovering_bits, *region); + return 1; +} + +static void core_complete_resync_work(struct dirty_log *log, region_t region, + int success) +{ + struct log_c *lc = (struct log_c *) log->context; + + log_clear_bit(lc, lc->recovering_bits, region); + if (success) { + log_set_bit(lc, lc->sync_bits, region); + lc->sync_count++; + } +} + +static region_t core_get_sync_count(struct dirty_log *log) +{ + struct log_c *lc = (struct log_c *) log->context; + + return lc->sync_count; +} + +static struct dirty_log_type _core_type = { + .name = "core", + .module = THIS_MODULE, + .ctr = core_ctr, + .dtr = core_dtr, + .get_region_size = core_get_region_size, + .is_clean = core_is_clean, + .in_sync = core_in_sync, + .flush = core_flush, + .mark_region = core_mark_region, + .clear_region = core_clear_region, + .get_resync_work = core_get_resync_work, + .complete_resync_work = core_complete_resync_work, + .get_sync_count = core_get_sync_count +}; + +static struct dirty_log_type _disk_type = { + .name = "disk", + .module = THIS_MODULE, + .ctr = disk_ctr, + .dtr = disk_dtr, + .suspend = disk_flush, + .resume = disk_resume, + .get_region_size = core_get_region_size, + .is_clean = core_is_clean, + .in_sync = core_in_sync, + .flush = disk_flush, + .mark_region = core_mark_region, + .clear_region = core_clear_region, + .get_resync_work = core_get_resync_work, + .complete_resync_work = core_complete_resync_work, + .get_sync_count = core_get_sync_count +}; + +int __init dm_dirty_log_init(void) +{ + int r; + + r = dm_register_dirty_log_type(&_core_type); + if (r) + DMWARN("couldn't register core log"); + + r = dm_register_dirty_log_type(&_disk_type); + if (r) { + DMWARN("couldn't register disk type"); + dm_unregister_dirty_log_type(&_core_type); + } + + return r; +} + +void dm_dirty_log_exit(void) +{ + dm_unregister_dirty_log_type(&_disk_type); + dm_unregister_dirty_log_type(&_core_type); +} + +EXPORT_SYMBOL(dm_register_dirty_log_type); +EXPORT_SYMBOL(dm_unregister_dirty_log_type); +EXPORT_SYMBOL(dm_create_dirty_log); +EXPORT_SYMBOL(dm_destroy_dirty_log); diff --git a/drivers/md/dm-log.h b/drivers/md/dm-log.h new file mode 100644 index 000000000..ced4ebacd --- /dev/null +++ b/drivers/md/dm-log.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2003 Sistina Software + * + * This file is released under the LGPL. + */ + +#ifndef DM_DIRTY_LOG +#define DM_DIRTY_LOG + +#include "dm.h" + +typedef sector_t region_t; + +struct dirty_log_type; + +struct dirty_log { + struct dirty_log_type *type; + void *context; +}; + +struct dirty_log_type { + struct list_head list; + const char *name; + struct module *module; + unsigned int use_count; + + int (*ctr)(struct dirty_log *log, struct dm_target *ti, + unsigned int argc, char **argv); + void (*dtr)(struct dirty_log *log); + + /* + * There are times when we don't want the log to touch + * the disk. + */ + int (*suspend)(struct dirty_log *log); + int (*resume)(struct dirty_log *log); + + /* + * Retrieves the smallest size of region that the log can + * deal with. + */ + sector_t (*get_region_size)(struct dirty_log *log); + + /* + * A predicate to say whether a region is clean or not. + * May block. + */ + int (*is_clean)(struct dirty_log *log, region_t region); + + /* + * Returns: 0, 1, -EWOULDBLOCK, < 0 + * + * A predicate function to check the area given by + * [sector, sector + len) is in sync. + * + * If -EWOULDBLOCK is returned the state of the region is + * unknown, typically this will result in a read being + * passed to a daemon to deal with, since a daemon is + * allowed to block. + */ + int (*in_sync)(struct dirty_log *log, region_t region, int can_block); + + /* + * Flush the current log state (eg, to disk). This + * function may block. + */ + int (*flush)(struct dirty_log *log); + + /* + * Mark an area as clean or dirty. These functions may + * block, though for performance reasons blocking should + * be extremely rare (eg, allocating another chunk of + * memory for some reason). + */ + void (*mark_region)(struct dirty_log *log, region_t region); + void (*clear_region)(struct dirty_log *log, region_t region); + + /* + * Returns: <0 (error), 0 (no region), 1 (region) + * + * The mirrord will need perform recovery on regions of + * the mirror that are in the NOSYNC state. This + * function asks the log to tell the caller about the + * next region that this machine should recover. + * + * Do not confuse this function with 'in_sync()', one + * tells you if an area is synchronised, the other + * assigns recovery work. + */ + int (*get_resync_work)(struct dirty_log *log, region_t *region); + + /* + * This notifies the log that the resync of an area has + * been completed. The log should then mark this region + * as CLEAN. + */ + void (*complete_resync_work)(struct dirty_log *log, + region_t region, int success); + + /* + * Returns the number of regions that are in sync. + */ + region_t (*get_sync_count)(struct dirty_log *log); +}; + +int dm_register_dirty_log_type(struct dirty_log_type *type); +int dm_unregister_dirty_log_type(struct dirty_log_type *type); + + +/* + * Make sure you use these two functions, rather than calling + * type->constructor/destructor() directly. + */ +struct dirty_log *dm_create_dirty_log(const char *type_name, struct dm_target *ti, + unsigned int argc, char **argv); +void dm_destroy_dirty_log(struct dirty_log *log); + +/* + * init/exit functions. + */ +int dm_dirty_log_init(void); +void dm_dirty_log_exit(void); + +#endif diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c new file mode 100644 index 000000000..843e9b83d --- /dev/null +++ b/drivers/md/dm-raid1.c @@ -0,0 +1,1278 @@ +/* + * Copyright (C) 2003 Sistina Software Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" +#include "dm-bio-list.h" +#include "dm-io.h" +#include "dm-log.h" +#include "kcopyd.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct workqueue_struct *_kmirrord_wq; +static struct work_struct _kmirrord_work; + +static inline void wake(void) +{ + queue_work(_kmirrord_wq, &_kmirrord_work); +} + +/*----------------------------------------------------------------- + * Region hash + * + * The mirror splits itself up into discrete regions. Each + * region can be in one of three states: clean, dirty, + * nosync. There is no need to put clean regions in the hash. + * + * In addition to being present in the hash table a region _may_ + * be present on one of three lists. + * + * clean_regions: Regions on this list have no io pending to + * them, they are in sync, we are no longer interested in them, + * they are dull. rh_update_states() will remove them from the + * hash table. + * + * quiesced_regions: These regions have been spun down, ready + * for recovery. rh_recovery_start() will remove regions from + * this list and hand them to kmirrord, which will schedule the + * recovery io with kcopyd. + * + * recovered_regions: Regions that kcopyd has successfully + * recovered. rh_update_states() will now schedule any delayed + * io, up the recovery_count, and remove the region from the + * hash. + * + * There are 2 locks: + * A rw spin lock 'hash_lock' protects just the hash table, + * this is never held in write mode from interrupt context, + * which I believe means that we only have to disable irqs when + * doing a write lock. + * + * An ordinary spin lock 'region_lock' that protects the three + * lists in the region_hash, with the 'state', 'list' and + * 'bhs_delayed' fields of the regions. This is used from irq + * context, so all other uses will have to suspend local irqs. + *---------------------------------------------------------------*/ +struct mirror_set; +struct region_hash { + struct mirror_set *ms; + sector_t region_size; + unsigned region_shift; + + /* holds persistent region state */ + struct dirty_log *log; + + /* hash table */ + rwlock_t hash_lock; + mempool_t *region_pool; + unsigned int mask; + unsigned int nr_buckets; + struct list_head *buckets; + + spinlock_t region_lock; + struct semaphore recovery_count; + struct list_head clean_regions; + struct list_head quiesced_regions; + struct list_head recovered_regions; +}; + +enum { + RH_CLEAN, + RH_DIRTY, + RH_NOSYNC, + RH_RECOVERING +}; + +struct region { + struct region_hash *rh; /* FIXME: can we get rid of this ? */ + region_t key; + int state; + + struct list_head hash_list; + struct list_head list; + + atomic_t pending; + struct bio_list delayed_bios; +}; + +/* + * Conversion fns + */ +static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) +{ + return bio->bi_sector >> rh->region_shift; +} + +static inline sector_t region_to_sector(struct region_hash *rh, region_t region) +{ + return region << rh->region_shift; +} + +/* FIXME move this */ +static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); + +static void *region_alloc(int gfp_mask, void *pool_data) +{ + return kmalloc(sizeof(struct region), gfp_mask); +} + +static void region_free(void *element, void *pool_data) +{ + kfree(element); +} + +#define MIN_REGIONS 64 +#define MAX_RECOVERY 1 +static int rh_init(struct region_hash *rh, struct mirror_set *ms, + struct dirty_log *log, sector_t region_size, + region_t nr_regions) +{ + unsigned int nr_buckets, max_buckets; + size_t i; + + /* + * Calculate a suitable number of buckets for our hash + * table. + */ + max_buckets = nr_regions >> 6; + for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) + ; + nr_buckets >>= 1; + + rh->ms = ms; + rh->log = log; + rh->region_size = region_size; + rh->region_shift = ffs(region_size) - 1; + rwlock_init(&rh->hash_lock); + rh->mask = nr_buckets - 1; + rh->nr_buckets = nr_buckets; + + rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); + if (!rh->buckets) { + DMERR("unable to allocate region hash memory"); + return -ENOMEM; + } + + for (i = 0; i < nr_buckets; i++) + INIT_LIST_HEAD(rh->buckets + i); + + spin_lock_init(&rh->region_lock); + sema_init(&rh->recovery_count, 0); + INIT_LIST_HEAD(&rh->clean_regions); + INIT_LIST_HEAD(&rh->quiesced_regions); + INIT_LIST_HEAD(&rh->recovered_regions); + + rh->region_pool = mempool_create(MIN_REGIONS, region_alloc, + region_free, NULL); + if (!rh->region_pool) { + vfree(rh->buckets); + rh->buckets = NULL; + return -ENOMEM; + } + + return 0; +} + +static void rh_exit(struct region_hash *rh) +{ + unsigned int h; + struct region *reg, *nreg; + + BUG_ON(!list_empty(&rh->quiesced_regions)); + for (h = 0; h < rh->nr_buckets; h++) { + list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { + BUG_ON(atomic_read(®->pending)); + mempool_free(reg, rh->region_pool); + } + } + + if (rh->log) + dm_destroy_dirty_log(rh->log); + if (rh->region_pool) + mempool_destroy(rh->region_pool); + vfree(rh->buckets); +} + +#define RH_HASH_MULT 2654435387U + +static inline unsigned int rh_hash(struct region_hash *rh, region_t region) +{ + return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask; +} + +static struct region *__rh_lookup(struct region_hash *rh, region_t region) +{ + struct region *reg; + + list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list) + if (reg->key == region) + return reg; + + return NULL; +} + +static void __rh_insert(struct region_hash *rh, struct region *reg) +{ + unsigned int h = rh_hash(rh, reg->key); + list_add(®->hash_list, rh->buckets + h); +} + +static struct region *__rh_alloc(struct region_hash *rh, region_t region) +{ + struct region *reg, *nreg; + + read_unlock(&rh->hash_lock); + nreg = mempool_alloc(rh->region_pool, GFP_NOIO); + nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? + RH_CLEAN : RH_NOSYNC; + nreg->rh = rh; + nreg->key = region; + + INIT_LIST_HEAD(&nreg->list); + + atomic_set(&nreg->pending, 0); + bio_list_init(&nreg->delayed_bios); + write_lock_irq(&rh->hash_lock); + + reg = __rh_lookup(rh, region); + if (reg) + /* we lost the race */ + mempool_free(nreg, rh->region_pool); + + else { + __rh_insert(rh, nreg); + if (nreg->state == RH_CLEAN) { + spin_lock_irq(&rh->region_lock); + list_add(&nreg->list, &rh->clean_regions); + spin_unlock_irq(&rh->region_lock); + } + reg = nreg; + } + write_unlock_irq(&rh->hash_lock); + read_lock(&rh->hash_lock); + + return reg; +} + +static inline struct region *__rh_find(struct region_hash *rh, region_t region) +{ + struct region *reg; + + reg = __rh_lookup(rh, region); + if (!reg) + reg = __rh_alloc(rh, region); + + return reg; +} + +static int rh_state(struct region_hash *rh, region_t region, int may_block) +{ + int r; + struct region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_lookup(rh, region); + read_unlock(&rh->hash_lock); + + if (reg) + return reg->state; + + /* + * The region wasn't in the hash, so we fall back to the + * dirty log. + */ + r = rh->log->type->in_sync(rh->log, region, may_block); + + /* + * Any error from the dirty log (eg. -EWOULDBLOCK) gets + * taken as a RH_NOSYNC + */ + return r == 1 ? RH_CLEAN : RH_NOSYNC; +} + +static inline int rh_in_sync(struct region_hash *rh, + region_t region, int may_block) +{ + int state = rh_state(rh, region, may_block); + return state == RH_CLEAN || state == RH_DIRTY; +} + +static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) +{ + struct bio *bio; + + while ((bio = bio_list_pop(bio_list))) { + queue_bio(ms, bio, WRITE); + } +} + +static void rh_update_states(struct region_hash *rh) +{ + struct region *reg, *next; + + LIST_HEAD(clean); + LIST_HEAD(recovered); + + /* + * Quickly grab the lists. + */ + write_lock_irq(&rh->hash_lock); + spin_lock(&rh->region_lock); + if (!list_empty(&rh->clean_regions)) { + list_splice(&rh->clean_regions, &clean); + INIT_LIST_HEAD(&rh->clean_regions); + + list_for_each_entry (reg, &clean, list) { + rh->log->type->clear_region(rh->log, reg->key); + list_del(®->hash_list); + } + } + + if (!list_empty(&rh->recovered_regions)) { + list_splice(&rh->recovered_regions, &recovered); + INIT_LIST_HEAD(&rh->recovered_regions); + + list_for_each_entry (reg, &recovered, list) + list_del(®->hash_list); + } + spin_unlock(&rh->region_lock); + write_unlock_irq(&rh->hash_lock); + + /* + * All the regions on the recovered and clean lists have + * now been pulled out of the system, so no need to do + * any more locking. + */ + list_for_each_entry_safe (reg, next, &recovered, list) { + rh->log->type->clear_region(rh->log, reg->key); + rh->log->type->complete_resync_work(rh->log, reg->key, 1); + dispatch_bios(rh->ms, ®->delayed_bios); + up(&rh->recovery_count); + mempool_free(reg, rh->region_pool); + } + + if (!list_empty(&recovered)) + rh->log->type->flush(rh->log); + + list_for_each_entry_safe (reg, next, &clean, list) + mempool_free(reg, rh->region_pool); +} + +static void rh_inc(struct region_hash *rh, region_t region) +{ + struct region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + if (reg->state == RH_CLEAN) { + rh->log->type->mark_region(rh->log, reg->key); + + spin_lock_irq(&rh->region_lock); + reg->state = RH_DIRTY; + list_del_init(®->list); /* take off the clean list */ + spin_unlock_irq(&rh->region_lock); + } + + atomic_inc(®->pending); + read_unlock(&rh->hash_lock); +} + +static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios) +{ + struct bio *bio; + + for (bio = bios->head; bio; bio = bio->bi_next) + rh_inc(rh, bio_to_region(rh, bio)); +} + +static void rh_dec(struct region_hash *rh, region_t region) +{ + unsigned long flags; + struct region *reg; + int should_wake = 0; + + read_lock(&rh->hash_lock); + reg = __rh_lookup(rh, region); + read_unlock(&rh->hash_lock); + + if (atomic_dec_and_test(®->pending)) { + spin_lock_irqsave(&rh->region_lock, flags); + if (reg->state == RH_RECOVERING) { + list_add_tail(®->list, &rh->quiesced_regions); + } else { + reg->state = RH_CLEAN; + list_add(®->list, &rh->clean_regions); + } + spin_unlock_irqrestore(&rh->region_lock, flags); + should_wake = 1; + } + + if (should_wake) + wake(); +} + +/* + * Starts quiescing a region in preparation for recovery. + */ +static int __rh_recovery_prepare(struct region_hash *rh) +{ + int r; + struct region *reg; + region_t region; + + /* + * Ask the dirty log what's next. + */ + r = rh->log->type->get_resync_work(rh->log, ®ion); + if (r <= 0) + return r; + + /* + * Get this region, and start it quiescing by setting the + * recovering flag. + */ + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + read_unlock(&rh->hash_lock); + + spin_lock_irq(&rh->region_lock); + reg->state = RH_RECOVERING; + + /* Already quiesced ? */ + if (atomic_read(®->pending)) + list_del_init(®->list); + + else { + list_del_init(®->list); + list_add(®->list, &rh->quiesced_regions); + } + spin_unlock_irq(&rh->region_lock); + + return 1; +} + +static void rh_recovery_prepare(struct region_hash *rh) +{ + while (!down_trylock(&rh->recovery_count)) + if (__rh_recovery_prepare(rh) <= 0) { + up(&rh->recovery_count); + break; + } +} + +/* + * Returns any quiesced regions. + */ +static struct region *rh_recovery_start(struct region_hash *rh) +{ + struct region *reg = NULL; + + spin_lock_irq(&rh->region_lock); + if (!list_empty(&rh->quiesced_regions)) { + reg = list_entry(rh->quiesced_regions.next, + struct region, list); + list_del_init(®->list); /* remove from the quiesced list */ + } + spin_unlock_irq(&rh->region_lock); + + return reg; +} + +/* FIXME: success ignored for now */ +static void rh_recovery_end(struct region *reg, int success) +{ + struct region_hash *rh = reg->rh; + + spin_lock_irq(&rh->region_lock); + list_add(®->list, ®->rh->recovered_regions); + spin_unlock_irq(&rh->region_lock); + + wake(); +} + +static void rh_flush(struct region_hash *rh) +{ + rh->log->type->flush(rh->log); +} + +static void rh_delay(struct region_hash *rh, struct bio *bio) +{ + struct region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, bio_to_region(rh, bio)); + bio_list_add(®->delayed_bios, bio); + read_unlock(&rh->hash_lock); +} + +static void rh_stop_recovery(struct region_hash *rh) +{ + int i; + + /* wait for any recovering regions */ + for (i = 0; i < MAX_RECOVERY; i++) + down(&rh->recovery_count); +} + +static void rh_start_recovery(struct region_hash *rh) +{ + int i; + + for (i = 0; i < MAX_RECOVERY; i++) + up(&rh->recovery_count); + + wake(); +} + +/*----------------------------------------------------------------- + * Mirror set structures. + *---------------------------------------------------------------*/ +struct mirror { + atomic_t error_count; + struct dm_dev *dev; + sector_t offset; +}; + +struct mirror_set { + struct dm_target *ti; + struct list_head list; + struct region_hash rh; + struct kcopyd_client *kcopyd_client; + + spinlock_t lock; /* protects the next two lists */ + struct bio_list reads; + struct bio_list writes; + + /* recovery */ + region_t nr_regions; + int in_sync; + + unsigned int nr_mirrors; + struct mirror mirror[0]; +}; + +/* + * Every mirror should look like this one. + */ +#define DEFAULT_MIRROR 0 + +/* + * This is yucky. We squirrel the mirror_set struct away inside + * bi_next for write buffers. This is safe since the bh + * doesn't get submitted to the lower levels of block layer. + */ +static struct mirror_set *bio_get_ms(struct bio *bio) +{ + return (struct mirror_set *) bio->bi_next; +} + +static void bio_set_ms(struct bio *bio, struct mirror_set *ms) +{ + bio->bi_next = (struct bio *) ms; +} + +/*----------------------------------------------------------------- + * Recovery. + * + * When a mirror is first activated we may find that some regions + * are in the no-sync state. We have to recover these by + * recopying from the default mirror to all the others. + *---------------------------------------------------------------*/ +static void recovery_complete(int read_err, unsigned int write_err, + void *context) +{ + struct region *reg = (struct region *) context; + + /* FIXME: better error handling */ + rh_recovery_end(reg, read_err || write_err); +} + +static int recover(struct mirror_set *ms, struct region *reg) +{ + int r; + unsigned int i; + struct io_region from, to[ms->nr_mirrors - 1], *dest; + struct mirror *m; + unsigned long flags = 0; + + /* fill in the source */ + m = ms->mirror + DEFAULT_MIRROR; + from.bdev = m->dev->bdev; + from.sector = m->offset + region_to_sector(reg->rh, reg->key); + if (reg->key == (ms->nr_regions - 1)) { + /* + * The final region may be smaller than + * region_size. + */ + from.count = ms->ti->len & (reg->rh->region_size - 1); + if (!from.count) + from.count = reg->rh->region_size; + } else + from.count = reg->rh->region_size; + + /* fill in the destinations */ + for (i = 0, dest = to; i < ms->nr_mirrors; i++) { + if (i == DEFAULT_MIRROR) + continue; + + m = ms->mirror + i; + dest->bdev = m->dev->bdev; + dest->sector = m->offset + region_to_sector(reg->rh, reg->key); + dest->count = from.count; + dest++; + } + + /* hand to kcopyd */ + set_bit(KCOPYD_IGNORE_ERROR, &flags); + r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags, + recovery_complete, reg); + + return r; +} + +static void do_recovery(struct mirror_set *ms) +{ + int r; + struct region *reg; + struct dirty_log *log = ms->rh.log; + + /* + * Start quiescing some regions. + */ + rh_recovery_prepare(&ms->rh); + + /* + * Copy any already quiesced regions. + */ + while ((reg = rh_recovery_start(&ms->rh))) { + r = recover(ms, reg); + if (r) + rh_recovery_end(reg, 0); + } + + /* + * Update the in sync flag. + */ + if (!ms->in_sync && + (log->type->get_sync_count(log) == ms->nr_regions)) { + /* the sync is complete */ + dm_table_event(ms->ti->table); + ms->in_sync = 1; + } +} + +/*----------------------------------------------------------------- + * Reads + *---------------------------------------------------------------*/ +static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) +{ + /* FIXME: add read balancing */ + return ms->mirror + DEFAULT_MIRROR; +} + +/* + * remap a buffer to a particular mirror. + */ +static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio) +{ + bio->bi_bdev = m->dev->bdev; + bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin); +} + +static void do_reads(struct mirror_set *ms, struct bio_list *reads) +{ + region_t region; + struct bio *bio; + struct mirror *m; + + while ((bio = bio_list_pop(reads))) { + region = bio_to_region(&ms->rh, bio); + + /* + * We can only read balance if the region is in sync. + */ + if (rh_in_sync(&ms->rh, region, 0)) + m = choose_mirror(ms, bio->bi_sector); + else + m = ms->mirror + DEFAULT_MIRROR; + + map_bio(ms, m, bio); + generic_make_request(bio); + } +} + +/*----------------------------------------------------------------- + * Writes. + * + * We do different things with the write io depending on the + * state of the region that it's in: + * + * SYNC: increment pending, use kcopyd to write to *all* mirrors + * RECOVERING: delay the io until recovery completes + * NOSYNC: increment pending, just write to the default mirror + *---------------------------------------------------------------*/ +static void write_callback(unsigned long error, void *context) +{ + unsigned int i; + int uptodate = 1; + struct bio *bio = (struct bio *) context; + struct mirror_set *ms; + + ms = bio_get_ms(bio); + bio_set_ms(bio, NULL); + + /* + * NOTE: We don't decrement the pending count here, + * instead it is done by the targets endio function. + * This way we handle both writes to SYNC and NOSYNC + * regions with the same code. + */ + + if (error) { + /* + * only error the io if all mirrors failed. + * FIXME: bogus + */ + uptodate = 0; + for (i = 0; i < ms->nr_mirrors; i++) + if (!test_bit(i, &error)) { + uptodate = 1; + break; + } + } + bio_endio(bio, bio->bi_size, 0); +} + +static void do_write(struct mirror_set *ms, struct bio *bio) +{ + unsigned int i; + struct io_region io[ms->nr_mirrors]; + struct mirror *m; + + for (i = 0; i < ms->nr_mirrors; i++) { + m = ms->mirror + i; + + io[i].bdev = m->dev->bdev; + io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin); + io[i].count = bio->bi_size >> 9; + } + + bio_set_ms(bio, ms); + dm_io_async_bvec(ms->nr_mirrors, io, WRITE, + bio->bi_io_vec + bio->bi_idx, + write_callback, bio); +} + +static void do_writes(struct mirror_set *ms, struct bio_list *writes) +{ + int state; + struct bio *bio; + struct bio_list sync, nosync, recover, *this_list = NULL; + + if (!writes->head) + return; + + /* + * Classify each write. + */ + bio_list_init(&sync); + bio_list_init(&nosync); + bio_list_init(&recover); + + while ((bio = bio_list_pop(writes))) { + state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); + switch (state) { + case RH_CLEAN: + case RH_DIRTY: + this_list = &sync; + break; + + case RH_NOSYNC: + this_list = &nosync; + break; + + case RH_RECOVERING: + this_list = &recover; + break; + } + + bio_list_add(this_list, bio); + } + + /* + * Increment the pending counts for any regions that will + * be written to (writes to recover regions are going to + * be delayed). + */ + rh_inc_pending(&ms->rh, &sync); + rh_inc_pending(&ms->rh, &nosync); + rh_flush(&ms->rh); + + /* + * Dispatch io. + */ + while ((bio = bio_list_pop(&sync))) + do_write(ms, bio); + + while ((bio = bio_list_pop(&recover))) + rh_delay(&ms->rh, bio); + + while ((bio = bio_list_pop(&nosync))) { + map_bio(ms, ms->mirror + DEFAULT_MIRROR, bio); + generic_make_request(bio); + } +} + +/*----------------------------------------------------------------- + * kmirrord + *---------------------------------------------------------------*/ +static LIST_HEAD(_mirror_sets); +static DECLARE_RWSEM(_mirror_sets_lock); + +static void do_mirror(struct mirror_set *ms) +{ + struct bio_list reads, writes; + + spin_lock(&ms->lock); + reads = ms->reads; + writes = ms->writes; + bio_list_init(&ms->reads); + bio_list_init(&ms->writes); + spin_unlock(&ms->lock); + + rh_update_states(&ms->rh); + do_recovery(ms); + do_reads(ms, &reads); + do_writes(ms, &writes); +} + +static void do_work(void *ignored) +{ + struct mirror_set *ms; + + down_read(&_mirror_sets_lock); + list_for_each_entry (ms, &_mirror_sets, list) + do_mirror(ms); + up_read(&_mirror_sets_lock); +} + +/*----------------------------------------------------------------- + * Target functions + *---------------------------------------------------------------*/ +static struct mirror_set *alloc_context(unsigned int nr_mirrors, + sector_t region_size, + struct dm_target *ti, + struct dirty_log *dl) +{ + size_t len; + struct mirror_set *ms = NULL; + + if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) + return NULL; + + len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); + + ms = kmalloc(len, GFP_KERNEL); + if (!ms) { + ti->error = "dm-mirror: Cannot allocate mirror context"; + return NULL; + } + + memset(ms, 0, len); + spin_lock_init(&ms->lock); + + ms->ti = ti; + ms->nr_mirrors = nr_mirrors; + ms->nr_regions = dm_div_up(ti->len, region_size); + ms->in_sync = 0; + + if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { + ti->error = "dm-mirror: Error creating dirty region hash"; + kfree(ms); + return NULL; + } + + return ms; +} + +static void free_context(struct mirror_set *ms, struct dm_target *ti, + unsigned int m) +{ + while (m--) + dm_put_device(ti, ms->mirror[m].dev); + + rh_exit(&ms->rh); + kfree(ms); +} + +static inline int _check_region_size(struct dm_target *ti, sector_t size) +{ + return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) || + size > ti->len); +} + +static int get_mirror(struct mirror_set *ms, struct dm_target *ti, + unsigned int mirror, char **argv) +{ + sector_t offset; + + if (sscanf(argv[1], SECTOR_FORMAT, &offset) != 1) { + ti->error = "dm-mirror: Invalid offset"; + return -EINVAL; + } + + if (dm_get_device(ti, argv[0], offset, ti->len, + dm_table_get_mode(ti->table), + &ms->mirror[mirror].dev)) { + ti->error = "dm-mirror: Device lookup failure"; + return -ENXIO; + } + + ms->mirror[mirror].offset = offset; + + return 0; +} + +static int add_mirror_set(struct mirror_set *ms) +{ + down_write(&_mirror_sets_lock); + list_add_tail(&ms->list, &_mirror_sets); + up_write(&_mirror_sets_lock); + wake(); + + return 0; +} + +static void del_mirror_set(struct mirror_set *ms) +{ + down_write(&_mirror_sets_lock); + list_del(&ms->list); + up_write(&_mirror_sets_lock); +} + +/* + * Create dirty log: log_type #log_params + */ +static struct dirty_log *create_dirty_log(struct dm_target *ti, + unsigned int argc, char **argv, + unsigned int *args_used) +{ + unsigned int param_count; + struct dirty_log *dl; + + if (argc < 2) { + ti->error = "dm-mirror: Insufficient mirror log arguments"; + return NULL; + } + + if (sscanf(argv[1], "%u", ¶m_count) != 1) { + ti->error = "dm-mirror: Invalid mirror log argument count"; + return NULL; + } + + *args_used = 2 + param_count; + + if (argc < *args_used) { + ti->error = "dm-mirror: Insufficient mirror log arguments"; + return NULL; + } + + dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); + if (!dl) { + ti->error = "dm-mirror: Error creating mirror dirty log"; + return NULL; + } + + if (!_check_region_size(ti, dl->type->get_region_size(dl))) { + ti->error = "dm-mirror: Invalid region size"; + dm_destroy_dirty_log(dl); + return NULL; + } + + return dl; +} + +/* + * Construct a mirror mapping: + * + * log_type #log_params + * #mirrors [mirror_path offset]{2,} + * + * For now, #log_params = 1, log_type = "core" + * + */ +#define DM_IO_PAGES 64 +static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + int r; + unsigned int nr_mirrors, m, args_used; + struct mirror_set *ms; + struct dirty_log *dl; + + dl = create_dirty_log(ti, argc, argv, &args_used); + if (!dl) + return -EINVAL; + + argv += args_used; + argc -= args_used; + + if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || + nr_mirrors < 2) { + ti->error = "dm-mirror: Invalid number of mirrors"; + dm_destroy_dirty_log(dl); + return -EINVAL; + } + + argv++, argc--; + + if (argc != nr_mirrors * 2) { + ti->error = "dm-mirror: Wrong number of mirror arguments"; + dm_destroy_dirty_log(dl); + return -EINVAL; + } + + ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); + if (!ms) { + dm_destroy_dirty_log(dl); + return -ENOMEM; + } + + /* Get the mirror parameter sets */ + for (m = 0; m < nr_mirrors; m++) { + r = get_mirror(ms, ti, m, argv); + if (r) { + free_context(ms, ti, m); + return r; + } + argv += 2; + argc -= 2; + } + + ti->private = ms; + + r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); + if (r) { + free_context(ms, ti, ms->nr_mirrors); + return r; + } + + add_mirror_set(ms); + return 0; +} + +static void mirror_dtr(struct dm_target *ti) +{ + struct mirror_set *ms = (struct mirror_set *) ti->private; + + del_mirror_set(ms); + kcopyd_client_destroy(ms->kcopyd_client); + free_context(ms, ti, ms->nr_mirrors); +} + +static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) +{ + int should_wake = 0; + struct bio_list *bl; + + bl = (rw == WRITE) ? &ms->writes : &ms->reads; + spin_lock(&ms->lock); + should_wake = !(bl->head); + bio_list_add(bl, bio); + spin_unlock(&ms->lock); + + if (should_wake) + wake(); +} + +/* + * Mirror mapping function + */ +static int mirror_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + int r, rw = bio_rw(bio); + struct mirror *m; + struct mirror_set *ms = ti->private; + + map_context->ll = bio->bi_sector >> ms->rh.region_shift; + + if (rw == WRITE) { + queue_bio(ms, bio, rw); + return 0; + } + + r = ms->rh.log->type->in_sync(ms->rh.log, + bio_to_region(&ms->rh, bio), 0); + if (r < 0 && r != -EWOULDBLOCK) + return r; + + if (r == -EWOULDBLOCK) /* FIXME: ugly */ + r = 0; + + /* + * We don't want to fast track a recovery just for a read + * ahead. So we just let it silently fail. + * FIXME: get rid of this. + */ + if (!r && rw == READA) + return -EIO; + + if (!r) { + /* Pass this io over to the daemon */ + queue_bio(ms, bio, rw); + return 0; + } + + m = choose_mirror(ms, bio->bi_sector); + if (!m) + return -EIO; + + map_bio(ms, m, bio); + return 1; +} + +static int mirror_end_io(struct dm_target *ti, struct bio *bio, + int error, union map_info *map_context) +{ + int rw = bio_rw(bio); + struct mirror_set *ms = (struct mirror_set *) ti->private; + region_t region = map_context->ll; + + /* + * We need to dec pending if this was a write. + */ + if (rw == WRITE) + rh_dec(&ms->rh, region); + + return 0; +} + +static void mirror_suspend(struct dm_target *ti) +{ + struct mirror_set *ms = (struct mirror_set *) ti->private; + struct dirty_log *log = ms->rh.log; + rh_stop_recovery(&ms->rh); + if (log->type->suspend && log->type->suspend(log)) + /* FIXME: need better error handling */ + DMWARN("log suspend failed"); +} + +static void mirror_resume(struct dm_target *ti) +{ + struct mirror_set *ms = (struct mirror_set *) ti->private; + struct dirty_log *log = ms->rh.log; + if (log->type->resume && log->type->resume(log)) + /* FIXME: need better error handling */ + DMWARN("log resume failed"); + rh_start_recovery(&ms->rh); +} + +static int mirror_status(struct dm_target *ti, status_type_t type, + char *result, unsigned int maxlen) +{ + char buffer[32]; + unsigned int m, sz = 0; + struct mirror_set *ms = (struct mirror_set *) ti->private; + +#define EMIT(x...) sz += ((sz >= maxlen) ? \ + 0 : scnprintf(result + sz, maxlen - sz, x)) + + switch (type) { + case STATUSTYPE_INFO: + EMIT("%d ", ms->nr_mirrors); + + for (m = 0; m < ms->nr_mirrors; m++) { + format_dev_t(buffer, ms->mirror[m].dev->bdev->bd_dev); + EMIT("%s ", buffer); + } + + EMIT(SECTOR_FORMAT "/" SECTOR_FORMAT, + ms->rh.log->type->get_sync_count(ms->rh.log), + ms->nr_regions); + break; + + case STATUSTYPE_TABLE: + EMIT("%s 1 " SECTOR_FORMAT " %d ", + ms->rh.log->type->name, ms->rh.region_size, + ms->nr_mirrors); + + for (m = 0; m < ms->nr_mirrors; m++) { + format_dev_t(buffer, ms->mirror[m].dev->bdev->bd_dev); + EMIT("%s " SECTOR_FORMAT " ", + buffer, ms->mirror[m].offset); + } + } + + return 0; +} + +static struct target_type mirror_target = { + .name = "mirror", + .version = {1, 0, 1}, + .module = THIS_MODULE, + .ctr = mirror_ctr, + .dtr = mirror_dtr, + .map = mirror_map, + .end_io = mirror_end_io, + .suspend = mirror_suspend, + .resume = mirror_resume, + .status = mirror_status, +}; + +static int __init dm_mirror_init(void) +{ + int r; + + r = dm_dirty_log_init(); + if (r) + return r; + + _kmirrord_wq = create_workqueue("kmirrord"); + if (!_kmirrord_wq) { + DMERR("couldn't start kmirrord"); + dm_dirty_log_exit(); + return r; + } + INIT_WORK(&_kmirrord_work, do_work, NULL); + + r = dm_register_target(&mirror_target); + if (r < 0) { + DMERR("%s: Failed to register mirror target", + mirror_target.name); + dm_dirty_log_exit(); + destroy_workqueue(_kmirrord_wq); + } + + return r; +} + +static void __exit dm_mirror_exit(void) +{ + int r; + + r = dm_unregister_target(&mirror_target); + if (r < 0) + DMERR("%s: unregister failed %d", mirror_target.name, r); + + destroy_workqueue(_kmirrord_wq); + dm_dirty_log_exit(); +} + +/* Module hooks */ +module_init(dm_mirror_init); +module_exit(dm_mirror_exit); + +MODULE_DESCRIPTION(DM_NAME " mirror target"); +MODULE_AUTHOR("Joe Thornber"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c new file mode 100644 index 000000000..36691ab1e --- /dev/null +++ b/drivers/md/dm-snap.c @@ -0,0 +1,1213 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dm-snap.h" +#include "dm-bio-list.h" +#include "kcopyd.h" + +/* + * The percentage increment we will wake up users at + */ +#define WAKE_UP_PERCENT 5 + +/* + * kcopyd priority of snapshot operations + */ +#define SNAPSHOT_COPY_PRIORITY 2 + +/* + * Each snapshot reserves this many pages for io + */ +#define SNAPSHOT_PAGES 256 + +struct pending_exception { + struct exception e; + + /* + * Origin buffers waiting for this to complete are held + * in a bio list + */ + struct bio_list origin_bios; + struct bio_list snapshot_bios; + + /* + * Other pending_exceptions that are processing this + * chunk. When this list is empty, we know we can + * complete the origins. + */ + struct list_head siblings; + + /* Pointer back to snapshot context */ + struct dm_snapshot *snap; + + /* + * 1 indicates the exception has already been sent to + * kcopyd. + */ + int started; +}; + +/* + * Hash table mapping origin volumes to lists of snapshots and + * a lock to protect it + */ +static kmem_cache_t *exception_cache; +static kmem_cache_t *pending_cache; +static mempool_t *pending_pool; + +/* + * One of these per registered origin, held in the snapshot_origins hash + */ +struct origin { + /* The origin device */ + struct block_device *bdev; + + struct list_head hash_list; + + /* List of snapshots for this origin */ + struct list_head snapshots; +}; + +/* + * Size of the hash table for origin volumes. If we make this + * the size of the minors list then it should be nearly perfect + */ +#define ORIGIN_HASH_SIZE 256 +#define ORIGIN_MASK 0xFF +static struct list_head *_origins; +static struct rw_semaphore _origins_lock; + +static int init_origin_hash(void) +{ + int i; + + _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), + GFP_KERNEL); + if (!_origins) { + DMERR("Device mapper: Snapshot: unable to allocate memory"); + return -ENOMEM; + } + + for (i = 0; i < ORIGIN_HASH_SIZE; i++) + INIT_LIST_HEAD(_origins + i); + init_rwsem(&_origins_lock); + + return 0; +} + +static void exit_origin_hash(void) +{ + kfree(_origins); +} + +static inline unsigned int origin_hash(struct block_device *bdev) +{ + return bdev->bd_dev & ORIGIN_MASK; +} + +static struct origin *__lookup_origin(struct block_device *origin) +{ + struct list_head *ol; + struct origin *o; + + ol = &_origins[origin_hash(origin)]; + list_for_each_entry (o, ol, hash_list) + if (bdev_equal(o->bdev, origin)) + return o; + + return NULL; +} + +static void __insert_origin(struct origin *o) +{ + struct list_head *sl = &_origins[origin_hash(o->bdev)]; + list_add_tail(&o->hash_list, sl); +} + +/* + * Make a note of the snapshot and its origin so we can look it + * up when the origin has a write on it. + */ +static int register_snapshot(struct dm_snapshot *snap) +{ + struct origin *o; + struct block_device *bdev = snap->origin->bdev; + + down_write(&_origins_lock); + o = __lookup_origin(bdev); + + if (!o) { + /* New origin */ + o = kmalloc(sizeof(*o), GFP_KERNEL); + if (!o) { + up_write(&_origins_lock); + return -ENOMEM; + } + + /* Initialise the struct */ + INIT_LIST_HEAD(&o->snapshots); + o->bdev = bdev; + + __insert_origin(o); + } + + list_add_tail(&snap->list, &o->snapshots); + + up_write(&_origins_lock); + return 0; +} + +static void unregister_snapshot(struct dm_snapshot *s) +{ + struct origin *o; + + down_write(&_origins_lock); + o = __lookup_origin(s->origin->bdev); + + list_del(&s->list); + if (list_empty(&o->snapshots)) { + list_del(&o->hash_list); + kfree(o); + } + + up_write(&_origins_lock); +} + +/* + * Implementation of the exception hash tables. + */ +static int init_exception_table(struct exception_table *et, uint32_t size) +{ + unsigned int i; + + et->hash_mask = size - 1; + et->table = dm_vcalloc(size, sizeof(struct list_head)); + if (!et->table) + return -ENOMEM; + + for (i = 0; i < size; i++) + INIT_LIST_HEAD(et->table + i); + + return 0; +} + +static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem) +{ + struct list_head *slot; + struct exception *ex, *next; + int i, size; + + size = et->hash_mask + 1; + for (i = 0; i < size; i++) { + slot = et->table + i; + + list_for_each_entry_safe (ex, next, slot, hash_list) + kmem_cache_free(mem, ex); + } + + vfree(et->table); +} + +static inline uint32_t exception_hash(struct exception_table *et, chunk_t chunk) +{ + return chunk & et->hash_mask; +} + +static void insert_exception(struct exception_table *eh, struct exception *e) +{ + struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; + list_add(&e->hash_list, l); +} + +static inline void remove_exception(struct exception *e) +{ + list_del(&e->hash_list); +} + +/* + * Return the exception data for a sector, or NULL if not + * remapped. + */ +static struct exception *lookup_exception(struct exception_table *et, + chunk_t chunk) +{ + struct list_head *slot; + struct exception *e; + + slot = &et->table[exception_hash(et, chunk)]; + list_for_each_entry (e, slot, hash_list) + if (e->old_chunk == chunk) + return e; + + return NULL; +} + +static inline struct exception *alloc_exception(void) +{ + struct exception *e; + + e = kmem_cache_alloc(exception_cache, GFP_NOIO); + if (!e) + e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); + + return e; +} + +static inline void free_exception(struct exception *e) +{ + kmem_cache_free(exception_cache, e); +} + +static inline struct pending_exception *alloc_pending_exception(void) +{ + return mempool_alloc(pending_pool, GFP_NOIO); +} + +static inline void free_pending_exception(struct pending_exception *pe) +{ + mempool_free(pe, pending_pool); +} + +int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new) +{ + struct exception *e; + + e = alloc_exception(); + if (!e) + return -ENOMEM; + + e->old_chunk = old; + e->new_chunk = new; + insert_exception(&s->complete, e); + return 0; +} + +/* + * Hard coded magic. + */ +static int calc_max_buckets(void) +{ + /* use a fixed size of 2MB */ + unsigned long mem = 2 * 1024 * 1024; + mem /= sizeof(struct list_head); + + return mem; +} + +/* + * Rounds a number down to a power of 2. + */ +static inline uint32_t round_down(uint32_t n) +{ + while (n & (n - 1)) + n &= (n - 1); + return n; +} + +/* + * Allocate room for a suitable hash table. + */ +static int init_hash_tables(struct dm_snapshot *s) +{ + sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; + + /* + * Calculate based on the size of the original volume or + * the COW volume... + */ + cow_dev_size = get_dev_size(s->cow->bdev); + origin_dev_size = get_dev_size(s->origin->bdev); + max_buckets = calc_max_buckets(); + + hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift; + hash_size = min(hash_size, max_buckets); + + /* Round it down to a power of 2 */ + hash_size = round_down(hash_size); + if (init_exception_table(&s->complete, hash_size)) + return -ENOMEM; + + /* + * Allocate hash table for in-flight exceptions + * Make this smaller than the real hash table + */ + hash_size >>= 3; + if (hash_size < 64) + hash_size = 64; + + if (init_exception_table(&s->pending, hash_size)) { + exit_exception_table(&s->complete, exception_cache); + return -ENOMEM; + } + + return 0; +} + +/* + * Round a number up to the nearest 'size' boundary. size must + * be a power of 2. + */ +static inline ulong round_up(ulong n, ulong size) +{ + size--; + return (n + size) & ~size; +} + +/* + * Construct a snapshot mapping:

+ */ +static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + struct dm_snapshot *s; + unsigned long chunk_size; + int r = -EINVAL; + char persistent; + char *origin_path; + char *cow_path; + char *value; + int blocksize; + + if (argc < 4) { + ti->error = "dm-snapshot: requires exactly 4 arguments"; + r = -EINVAL; + goto bad1; + } + + origin_path = argv[0]; + cow_path = argv[1]; + persistent = toupper(*argv[2]); + + if (persistent != 'P' && persistent != 'N') { + ti->error = "Persistent flag is not P or N"; + r = -EINVAL; + goto bad1; + } + + chunk_size = simple_strtoul(argv[3], &value, 10); + if (chunk_size == 0 || value == NULL) { + ti->error = "Invalid chunk size"; + r = -EINVAL; + goto bad1; + } + + s = kmalloc(sizeof(*s), GFP_KERNEL); + if (s == NULL) { + ti->error = "Cannot allocate snapshot context private " + "structure"; + r = -ENOMEM; + goto bad1; + } + + r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); + if (r) { + ti->error = "Cannot get origin device"; + goto bad2; + } + + r = dm_get_device(ti, cow_path, 0, 0, + FMODE_READ | FMODE_WRITE, &s->cow); + if (r) { + dm_put_device(ti, s->origin); + ti->error = "Cannot get COW device"; + goto bad2; + } + + /* + * Chunk size must be multiple of page size. Silently + * round up if it's not. + */ + chunk_size = round_up(chunk_size, PAGE_SIZE >> 9); + + /* Validate the chunk size against the device block size */ + blocksize = s->cow->bdev->bd_disk->queue->hardsect_size; + if (chunk_size % (blocksize >> 9)) { + ti->error = "Chunk size is not a multiple of device blocksize"; + r = -EINVAL; + goto bad3; + } + + /* Check chunk_size is a power of 2 */ + if (chunk_size & (chunk_size - 1)) { + ti->error = "Chunk size is not a power of 2"; + r = -EINVAL; + goto bad3; + } + + s->chunk_size = chunk_size; + s->chunk_mask = chunk_size - 1; + s->type = persistent; + s->chunk_shift = ffs(chunk_size) - 1; + + s->valid = 1; + s->have_metadata = 0; + s->last_percent = 0; + init_rwsem(&s->lock); + s->table = ti->table; + + /* Allocate hash table for COW data */ + if (init_hash_tables(s)) { + ti->error = "Unable to allocate hash table space"; + r = -ENOMEM; + goto bad3; + } + + /* + * Check the persistent flag - done here because we need the iobuf + * to check the LV header + */ + s->store.snap = s; + + if (persistent == 'P') + r = dm_create_persistent(&s->store, chunk_size); + else + r = dm_create_transient(&s->store, s, blocksize); + + if (r) { + ti->error = "Couldn't create exception store"; + r = -EINVAL; + goto bad4; + } + + r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); + if (r) { + ti->error = "Could not create kcopyd client"; + goto bad5; + } + + /* Add snapshot to the list of snapshots for this origin */ + if (register_snapshot(s)) { + r = -EINVAL; + ti->error = "Cannot register snapshot origin"; + goto bad6; + } + + ti->private = s; + ti->split_io = chunk_size; + + return 0; + + bad6: + kcopyd_client_destroy(s->kcopyd_client); + + bad5: + s->store.destroy(&s->store); + + bad4: + exit_exception_table(&s->pending, pending_cache); + exit_exception_table(&s->complete, exception_cache); + + bad3: + dm_put_device(ti, s->cow); + dm_put_device(ti, s->origin); + + bad2: + kfree(s); + + bad1: + return r; +} + +static void snapshot_dtr(struct dm_target *ti) +{ + struct dm_snapshot *s = (struct dm_snapshot *) ti->private; + + unregister_snapshot(s); + + exit_exception_table(&s->pending, pending_cache); + exit_exception_table(&s->complete, exception_cache); + + /* Deallocate memory used */ + s->store.destroy(&s->store); + + dm_put_device(ti, s->origin); + dm_put_device(ti, s->cow); + kcopyd_client_destroy(s->kcopyd_client); + kfree(s); +} + +/* + * Flush a list of buffers. + */ +static void flush_bios(struct bio *bio) +{ + struct bio *n; + + while (bio) { + n = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = n; + } +} + +/* + * Error a list of buffers. + */ +static void error_bios(struct bio *bio) +{ + struct bio *n; + + while (bio) { + n = bio->bi_next; + bio->bi_next = NULL; + bio_io_error(bio, bio->bi_size); + bio = n; + } +} + +static struct bio *__flush_bios(struct pending_exception *pe) +{ + struct pending_exception *sibling; + + if (list_empty(&pe->siblings)) + return bio_list_get(&pe->origin_bios); + + sibling = list_entry(pe->siblings.next, + struct pending_exception, siblings); + + list_del(&pe->siblings); + + /* This is fine as long as kcopyd is single-threaded. If kcopyd + * becomes multi-threaded, we'll need some locking here. + */ + bio_list_merge(&sibling->origin_bios, &pe->origin_bios); + + return NULL; +} + +static void pending_complete(struct pending_exception *pe, int success) +{ + struct exception *e; + struct dm_snapshot *s = pe->snap; + struct bio *flush = NULL; + + if (success) { + e = alloc_exception(); + if (!e) { + DMWARN("Unable to allocate exception."); + down_write(&s->lock); + s->store.drop_snapshot(&s->store); + s->valid = 0; + flush = __flush_bios(pe); + up_write(&s->lock); + + error_bios(bio_list_get(&pe->snapshot_bios)); + goto out; + } + *e = pe->e; + + /* + * Add a proper exception, and remove the + * in-flight exception from the list. + */ + down_write(&s->lock); + insert_exception(&s->complete, e); + remove_exception(&pe->e); + flush = __flush_bios(pe); + + /* Submit any pending write bios */ + up_write(&s->lock); + + flush_bios(bio_list_get(&pe->snapshot_bios)); + } else { + /* Read/write error - snapshot is unusable */ + down_write(&s->lock); + if (s->valid) + DMERR("Error reading/writing snapshot"); + s->store.drop_snapshot(&s->store); + s->valid = 0; + remove_exception(&pe->e); + flush = __flush_bios(pe); + up_write(&s->lock); + + error_bios(bio_list_get(&pe->snapshot_bios)); + + dm_table_event(s->table); + } + + out: + free_pending_exception(pe); + + if (flush) + flush_bios(flush); +} + +static void commit_callback(void *context, int success) +{ + struct pending_exception *pe = (struct pending_exception *) context; + pending_complete(pe, success); +} + +/* + * Called when the copy I/O has finished. kcopyd actually runs + * this code so don't block. + */ +static void copy_callback(int read_err, unsigned int write_err, void *context) +{ + struct pending_exception *pe = (struct pending_exception *) context; + struct dm_snapshot *s = pe->snap; + + if (read_err || write_err) + pending_complete(pe, 0); + + else + /* Update the metadata if we are persistent */ + s->store.commit_exception(&s->store, &pe->e, commit_callback, + pe); +} + +/* + * Dispatches the copy operation to kcopyd. + */ +static inline void start_copy(struct pending_exception *pe) +{ + struct dm_snapshot *s = pe->snap; + struct io_region src, dest; + struct block_device *bdev = s->origin->bdev; + sector_t dev_size; + + dev_size = get_dev_size(bdev); + + src.bdev = bdev; + src.sector = chunk_to_sector(s, pe->e.old_chunk); + src.count = min(s->chunk_size, dev_size - src.sector); + + dest.bdev = s->cow->bdev; + dest.sector = chunk_to_sector(s, pe->e.new_chunk); + dest.count = src.count; + + /* Hand over to kcopyd */ + kcopyd_copy(s->kcopyd_client, + &src, 1, &dest, 0, copy_callback, pe); +} + +/* + * Looks to see if this snapshot already has a pending exception + * for this chunk, otherwise it allocates a new one and inserts + * it into the pending table. + * + * NOTE: a write lock must be held on snap->lock before calling + * this. + */ +static struct pending_exception * +__find_pending_exception(struct dm_snapshot *s, struct bio *bio) +{ + struct exception *e; + struct pending_exception *pe; + chunk_t chunk = sector_to_chunk(s, bio->bi_sector); + + /* + * Is there a pending exception for this already ? + */ + e = lookup_exception(&s->pending, chunk); + if (e) { + /* cast the exception to a pending exception */ + pe = container_of(e, struct pending_exception, e); + + } else { + /* + * Create a new pending exception, we don't want + * to hold the lock while we do this. + */ + up_write(&s->lock); + pe = alloc_pending_exception(); + down_write(&s->lock); + + e = lookup_exception(&s->pending, chunk); + if (e) { + free_pending_exception(pe); + pe = container_of(e, struct pending_exception, e); + } else { + pe->e.old_chunk = chunk; + bio_list_init(&pe->origin_bios); + bio_list_init(&pe->snapshot_bios); + INIT_LIST_HEAD(&pe->siblings); + pe->snap = s; + pe->started = 0; + + if (s->store.prepare_exception(&s->store, &pe->e)) { + free_pending_exception(pe); + s->valid = 0; + return NULL; + } + + insert_exception(&s->pending, &pe->e); + } + } + + return pe; +} + +static inline void remap_exception(struct dm_snapshot *s, struct exception *e, + struct bio *bio) +{ + bio->bi_bdev = s->cow->bdev; + bio->bi_sector = chunk_to_sector(s, e->new_chunk) + + (bio->bi_sector & s->chunk_mask); +} + +static int snapshot_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + struct exception *e; + struct dm_snapshot *s = (struct dm_snapshot *) ti->private; + int r = 1; + chunk_t chunk; + struct pending_exception *pe; + + chunk = sector_to_chunk(s, bio->bi_sector); + + /* Full snapshots are not usable */ + if (!s->valid) + return -1; + + /* + * Write to snapshot - higher level takes care of RW/RO + * flags so we should only get this if we are + * writeable. + */ + if (bio_rw(bio) == WRITE) { + + /* FIXME: should only take write lock if we need + * to copy an exception */ + down_write(&s->lock); + + /* If the block is already remapped - use that, else remap it */ + e = lookup_exception(&s->complete, chunk); + if (e) { + remap_exception(s, e, bio); + up_write(&s->lock); + + } else { + pe = __find_pending_exception(s, bio); + + if (!pe) { + if (s->store.drop_snapshot) + s->store.drop_snapshot(&s->store); + s->valid = 0; + r = -EIO; + up_write(&s->lock); + } else { + remap_exception(s, &pe->e, bio); + bio_list_add(&pe->snapshot_bios, bio); + + if (!pe->started) { + /* this is protected by snap->lock */ + pe->started = 1; + up_write(&s->lock); + start_copy(pe); + } else + up_write(&s->lock); + r = 0; + } + } + + } else { + /* + * FIXME: this read path scares me because we + * always use the origin when we have a pending + * exception. However I can't think of a + * situation where this is wrong - ejt. + */ + + /* Do reads */ + down_read(&s->lock); + + /* See if it it has been remapped */ + e = lookup_exception(&s->complete, chunk); + if (e) + remap_exception(s, e, bio); + else + bio->bi_bdev = s->origin->bdev; + + up_read(&s->lock); + } + + return r; +} + +static void snapshot_resume(struct dm_target *ti) +{ + struct dm_snapshot *s = (struct dm_snapshot *) ti->private; + + if (s->have_metadata) + return; + + if (s->store.read_metadata(&s->store)) { + down_write(&s->lock); + s->valid = 0; + up_write(&s->lock); + } + + s->have_metadata = 1; +} + +static int snapshot_status(struct dm_target *ti, status_type_t type, + char *result, unsigned int maxlen) +{ + struct dm_snapshot *snap = (struct dm_snapshot *) ti->private; + char cow[32]; + char org[32]; + + switch (type) { + case STATUSTYPE_INFO: + if (!snap->valid) + snprintf(result, maxlen, "Invalid"); + else { + if (snap->store.fraction_full) { + sector_t numerator, denominator; + snap->store.fraction_full(&snap->store, + &numerator, + &denominator); + snprintf(result, maxlen, + SECTOR_FORMAT "/" SECTOR_FORMAT, + numerator, denominator); + } + else + snprintf(result, maxlen, "Unknown"); + } + break; + + case STATUSTYPE_TABLE: + /* + * kdevname returns a static pointer so we need + * to make private copies if the output is to + * make sense. + */ + format_dev_t(cow, snap->cow->bdev->bd_dev); + format_dev_t(org, snap->origin->bdev->bd_dev); + snprintf(result, maxlen, "%s %s %c " SECTOR_FORMAT, org, cow, + snap->type, snap->chunk_size); + break; + } + + return 0; +} + +/*----------------------------------------------------------------- + * Origin methods + *---------------------------------------------------------------*/ +static void list_merge(struct list_head *l1, struct list_head *l2) +{ + struct list_head *l1_n, *l2_p; + + l1_n = l1->next; + l2_p = l2->prev; + + l1->next = l2; + l2->prev = l1; + + l2_p->next = l1_n; + l1_n->prev = l2_p; +} + +static int __origin_write(struct list_head *snapshots, struct bio *bio) +{ + int r = 1, first = 1; + struct dm_snapshot *snap; + struct exception *e; + struct pending_exception *pe, *last = NULL; + chunk_t chunk; + + /* Do all the snapshots on this origin */ + list_for_each_entry (snap, snapshots, list) { + + /* Only deal with valid snapshots */ + if (!snap->valid) + continue; + + down_write(&snap->lock); + + /* + * Remember, different snapshots can have + * different chunk sizes. + */ + chunk = sector_to_chunk(snap, bio->bi_sector); + + /* + * Check exception table to see if block + * is already remapped in this snapshot + * and trigger an exception if not. + */ + e = lookup_exception(&snap->complete, chunk); + if (!e) { + pe = __find_pending_exception(snap, bio); + if (!pe) { + snap->store.drop_snapshot(&snap->store); + snap->valid = 0; + + } else { + if (last) + list_merge(&pe->siblings, + &last->siblings); + + last = pe; + r = 0; + } + } + + up_write(&snap->lock); + } + + /* + * Now that we have a complete pe list we can start the copying. + */ + if (last) { + pe = last; + do { + down_write(&pe->snap->lock); + if (first) + bio_list_add(&pe->origin_bios, bio); + if (!pe->started) { + pe->started = 1; + up_write(&pe->snap->lock); + start_copy(pe); + } else + up_write(&pe->snap->lock); + first = 0; + pe = list_entry(pe->siblings.next, + struct pending_exception, siblings); + + } while (pe != last); + } + + return r; +} + +/* + * Called on a write from the origin driver. + */ +static int do_origin(struct dm_dev *origin, struct bio *bio) +{ + struct origin *o; + int r = 1; + + down_read(&_origins_lock); + o = __lookup_origin(origin->bdev); + if (o) + r = __origin_write(&o->snapshots, bio); + up_read(&_origins_lock); + + return r; +} + +/* + * Origin: maps a linear range of a device, with hooks for snapshotting. + */ + +/* + * Construct an origin mapping: + * The context for an origin is merely a 'struct dm_dev *' + * pointing to the real device. + */ +static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + int r; + struct dm_dev *dev; + + if (argc != 1) { + ti->error = "dm-origin: incorrect number of arguments"; + return -EINVAL; + } + + r = dm_get_device(ti, argv[0], 0, ti->len, + dm_table_get_mode(ti->table), &dev); + if (r) { + ti->error = "Cannot get target device"; + return r; + } + + ti->private = dev; + return 0; +} + +static void origin_dtr(struct dm_target *ti) +{ + struct dm_dev *dev = (struct dm_dev *) ti->private; + dm_put_device(ti, dev); +} + +static int origin_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + struct dm_dev *dev = (struct dm_dev *) ti->private; + bio->bi_bdev = dev->bdev; + + /* Only tell snapshots if this is a write */ + return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : 1; +} + +#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) + +/* + * Set the target "split_io" field to the minimum of all the snapshots' + * chunk sizes. + */ +static void origin_resume(struct dm_target *ti) +{ + struct dm_dev *dev = (struct dm_dev *) ti->private; + struct dm_snapshot *snap; + struct origin *o; + chunk_t chunk_size = 0; + + down_read(&_origins_lock); + o = __lookup_origin(dev->bdev); + if (o) + list_for_each_entry (snap, &o->snapshots, list) + chunk_size = min_not_zero(chunk_size, snap->chunk_size); + up_read(&_origins_lock); + + ti->split_io = chunk_size; +} + +static int origin_status(struct dm_target *ti, status_type_t type, char *result, + unsigned int maxlen) +{ + struct dm_dev *dev = (struct dm_dev *) ti->private; + char buffer[32]; + + switch (type) { + case STATUSTYPE_INFO: + result[0] = '\0'; + break; + + case STATUSTYPE_TABLE: + format_dev_t(buffer, dev->bdev->bd_dev); + snprintf(result, maxlen, "%s", buffer); + break; + } + + return 0; +} + +static struct target_type origin_target = { + .name = "snapshot-origin", + .version = {1, 0, 1}, + .module = THIS_MODULE, + .ctr = origin_ctr, + .dtr = origin_dtr, + .map = origin_map, + .resume = origin_resume, + .status = origin_status, +}; + +static struct target_type snapshot_target = { + .name = "snapshot", + .version = {1, 0, 1}, + .module = THIS_MODULE, + .ctr = snapshot_ctr, + .dtr = snapshot_dtr, + .map = snapshot_map, + .resume = snapshot_resume, + .status = snapshot_status, +}; + +static int __init dm_snapshot_init(void) +{ + int r; + + r = dm_register_target(&snapshot_target); + if (r) { + DMERR("snapshot target register failed %d", r); + return r; + } + + r = dm_register_target(&origin_target); + if (r < 0) { + DMERR("Device mapper: Origin: register failed %d\n", r); + goto bad1; + } + + r = init_origin_hash(); + if (r) { + DMERR("init_origin_hash failed."); + goto bad2; + } + + exception_cache = kmem_cache_create("dm-snapshot-ex", + sizeof(struct exception), + __alignof__(struct exception), + 0, NULL, NULL); + if (!exception_cache) { + DMERR("Couldn't create exception cache."); + r = -ENOMEM; + goto bad3; + } + + pending_cache = + kmem_cache_create("dm-snapshot-in", + sizeof(struct pending_exception), + __alignof__(struct pending_exception), + 0, NULL, NULL); + if (!pending_cache) { + DMERR("Couldn't create pending cache."); + r = -ENOMEM; + goto bad4; + } + + pending_pool = mempool_create(128, mempool_alloc_slab, + mempool_free_slab, pending_cache); + if (!pending_pool) { + DMERR("Couldn't create pending pool."); + r = -ENOMEM; + goto bad5; + } + + return 0; + + bad5: + kmem_cache_destroy(pending_cache); + bad4: + kmem_cache_destroy(exception_cache); + bad3: + exit_origin_hash(); + bad2: + dm_unregister_target(&origin_target); + bad1: + dm_unregister_target(&snapshot_target); + return r; +} + +static void __exit dm_snapshot_exit(void) +{ + int r; + + r = dm_unregister_target(&snapshot_target); + if (r) + DMERR("snapshot unregister failed %d", r); + + r = dm_unregister_target(&origin_target); + if (r) + DMERR("origin unregister failed %d", r); + + exit_origin_hash(); + mempool_destroy(pending_pool); + kmem_cache_destroy(pending_cache); + kmem_cache_destroy(exception_cache); +} + +/* Module hooks */ +module_init(dm_snapshot_init); +module_exit(dm_snapshot_exit); + +MODULE_DESCRIPTION(DM_NAME " snapshot target"); +MODULE_AUTHOR("Joe Thornber"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h new file mode 100644 index 000000000..375aa24d4 --- /dev/null +++ b/drivers/md/dm-snap.h @@ -0,0 +1,161 @@ +/* + * dm-snapshot.c + * + * Copyright (C) 2001-2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + */ + +#ifndef DM_SNAPSHOT_H +#define DM_SNAPSHOT_H + +#include "dm.h" +#include + +struct exception_table { + uint32_t hash_mask; + struct list_head *table; +}; + +/* + * The snapshot code deals with largish chunks of the disk at a + * time. Typically 64k - 256k. + */ +/* FIXME: can we get away with limiting these to a uint32_t ? */ +typedef sector_t chunk_t; + +/* + * An exception is used where an old chunk of data has been + * replaced by a new one. + */ +struct exception { + struct list_head hash_list; + + chunk_t old_chunk; + chunk_t new_chunk; +}; + +/* + * Abstraction to handle the meta/layout of exception stores (the + * COW device). + */ +struct exception_store { + + /* + * Destroys this object when you've finished with it. + */ + void (*destroy) (struct exception_store *store); + + /* + * The target shouldn't read the COW device until this is + * called. + */ + int (*read_metadata) (struct exception_store *store); + + /* + * Find somewhere to store the next exception. + */ + int (*prepare_exception) (struct exception_store *store, + struct exception *e); + + /* + * Update the metadata with this exception. + */ + void (*commit_exception) (struct exception_store *store, + struct exception *e, + void (*callback) (void *, int success), + void *callback_context); + + /* + * The snapshot is invalid, note this in the metadata. + */ + void (*drop_snapshot) (struct exception_store *store); + + /* + * Return how full the snapshot is. + */ + void (*fraction_full) (struct exception_store *store, + sector_t *numerator, + sector_t *denominator); + + struct dm_snapshot *snap; + void *context; +}; + +struct dm_snapshot { + struct rw_semaphore lock; + struct dm_table *table; + + struct dm_dev *origin; + struct dm_dev *cow; + + /* List of snapshots per Origin */ + struct list_head list; + + /* Size of data blocks saved - must be a power of 2 */ + chunk_t chunk_size; + chunk_t chunk_mask; + chunk_t chunk_shift; + + /* You can't use a snapshot if this is 0 (e.g. if full) */ + int valid; + int have_metadata; + + /* Used for display of table */ + char type; + + /* The last percentage we notified */ + int last_percent; + + struct exception_table pending; + struct exception_table complete; + + /* The on disk metadata handler */ + struct exception_store store; + + struct kcopyd_client *kcopyd_client; +}; + +/* + * Used by the exception stores to load exceptions hen + * initialising. + */ +int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new); + +/* + * Constructor and destructor for the default persistent + * store. + */ +int dm_create_persistent(struct exception_store *store, uint32_t chunk_size); + +int dm_create_transient(struct exception_store *store, + struct dm_snapshot *s, int blocksize); + +/* + * Return the number of sectors in the device. + */ +static inline sector_t get_dev_size(struct block_device *bdev) +{ + return bdev->bd_inode->i_size >> SECTOR_SHIFT; +} + +static inline chunk_t sector_to_chunk(struct dm_snapshot *s, sector_t sector) +{ + return (sector & ~s->chunk_mask) >> s->chunk_shift; +} + +static inline sector_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk) +{ + return chunk << s->chunk_shift; +} + +static inline int bdev_equal(struct block_device *lhs, struct block_device *rhs) +{ + /* + * There is only ever one instance of a particular block + * device so we can compare pointers safely. + */ + return lhs == rhs; +} + +#endif diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c new file mode 100644 index 000000000..725f2c812 --- /dev/null +++ b/drivers/md/dm-zero.c @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2003 Christophe Saout + * + * This file is released under the GPL. + */ + +#include "dm.h" + +#include +#include +#include + +/* + * Construct a dummy mapping that only returns zeros + */ +static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + if (argc != 0) { + ti->error = "dm-zero: No arguments required"; + return -EINVAL; + } + + return 0; +} + +/* + * Fills the bio pages with zeros + */ +static void zero_fill_bio(struct bio *bio) +{ + unsigned long flags; + struct bio_vec *bv; + int i; + + bio_for_each_segment(bv, bio, i) { + char *data = bvec_kmap_irq(bv, &flags); + memset(data, 0, bv->bv_len); + flush_dcache_page(bv->bv_page); + bvec_kunmap_irq(data, &flags); + } +} + +/* + * Return zeros only on reads + */ +static int zero_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + switch(bio_rw(bio)) { + case READ: + zero_fill_bio(bio); + break; + case READA: + /* readahead of null bytes only wastes buffer cache */ + return -EIO; + case WRITE: + /* writes get silently dropped */ + break; + } + + bio_endio(bio, bio->bi_size, 0); + + /* accepted bio, don't make new request */ + return 0; +} + +static struct target_type zero_target = { + .name = "zero", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = zero_ctr, + .map = zero_map, +}; + +int __init dm_zero_init(void) +{ + int r = dm_register_target(&zero_target); + + if (r < 0) + DMERR("zero: register failed %d", r); + + return r; +} + +void __exit dm_zero_exit(void) +{ + int r = dm_unregister_target(&zero_target); + + if (r < 0) + DMERR("zero: unregister failed %d", r); +} + +module_init(dm_zero_init) +module_exit(dm_zero_exit) + +MODULE_AUTHOR("Christophe Saout "); +MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c new file mode 100644 index 000000000..40e46944e --- /dev/null +++ b/drivers/md/kcopyd.c @@ -0,0 +1,699 @@ +/* + * Copyright (C) 2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + * + * Kcopyd provides a simple interface for copying an area of one + * block-device to one or more other block-devices, with an asynchronous + * completion notification. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kcopyd.h" + +/* FIXME: this is only needed for the DMERR macros */ +#include "dm.h" + +static struct workqueue_struct *_kcopyd_wq; +static struct work_struct _kcopyd_work; + +static inline void wake(void) +{ + queue_work(_kcopyd_wq, &_kcopyd_work); +} + +/*----------------------------------------------------------------- + * Each kcopyd client has its own little pool of preallocated + * pages for kcopyd io. + *---------------------------------------------------------------*/ +struct kcopyd_client { + struct list_head list; + + spinlock_t lock; + struct page_list *pages; + unsigned int nr_pages; + unsigned int nr_free_pages; +}; + +static struct page_list *alloc_pl(void) +{ + struct page_list *pl; + + pl = kmalloc(sizeof(*pl), GFP_KERNEL); + if (!pl) + return NULL; + + pl->page = alloc_page(GFP_KERNEL); + if (!pl->page) { + kfree(pl); + return NULL; + } + + return pl; +} + +static void free_pl(struct page_list *pl) +{ + __free_page(pl->page); + kfree(pl); +} + +static int kcopyd_get_pages(struct kcopyd_client *kc, + unsigned int nr, struct page_list **pages) +{ + struct page_list *pl; + + spin_lock(&kc->lock); + if (kc->nr_free_pages < nr) { + spin_unlock(&kc->lock); + return -ENOMEM; + } + + kc->nr_free_pages -= nr; + for (*pages = pl = kc->pages; --nr; pl = pl->next) + ; + + kc->pages = pl->next; + pl->next = 0; + + spin_unlock(&kc->lock); + + return 0; +} + +static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl) +{ + struct page_list *cursor; + + spin_lock(&kc->lock); + for (cursor = pl; cursor->next; cursor = cursor->next) + kc->nr_free_pages++; + + kc->nr_free_pages++; + cursor->next = kc->pages; + kc->pages = pl; + spin_unlock(&kc->lock); +} + +/* + * These three functions resize the page pool. + */ +static void drop_pages(struct page_list *pl) +{ + struct page_list *next; + + while (pl) { + next = pl->next; + free_pl(pl); + pl = next; + } +} + +static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr) +{ + unsigned int i; + struct page_list *pl = NULL, *next; + + for (i = 0; i < nr; i++) { + next = alloc_pl(); + if (!next) { + if (pl) + drop_pages(pl); + return -ENOMEM; + } + next->next = pl; + pl = next; + } + + kcopyd_put_pages(kc, pl); + kc->nr_pages += nr; + return 0; +} + +static void client_free_pages(struct kcopyd_client *kc) +{ + BUG_ON(kc->nr_free_pages != kc->nr_pages); + drop_pages(kc->pages); + kc->pages = NULL; + kc->nr_free_pages = kc->nr_pages = 0; +} + +/*----------------------------------------------------------------- + * kcopyd_jobs need to be allocated by the *clients* of kcopyd, + * for this reason we use a mempool to prevent the client from + * ever having to do io (which could cause a deadlock). + *---------------------------------------------------------------*/ +struct kcopyd_job { + struct kcopyd_client *kc; + struct list_head list; + unsigned long flags; + + /* + * Error state of the job. + */ + int read_err; + unsigned int write_err; + + /* + * Either READ or WRITE + */ + int rw; + struct io_region source; + + /* + * The destinations for the transfer. + */ + unsigned int num_dests; + struct io_region dests[KCOPYD_MAX_REGIONS]; + + sector_t offset; + unsigned int nr_pages; + struct page_list *pages; + + /* + * Set this to ensure you are notified when the job has + * completed. 'context' is for callback to use. + */ + kcopyd_notify_fn fn; + void *context; + + /* + * These fields are only used if the job has been split + * into more manageable parts. + */ + struct semaphore lock; + atomic_t sub_jobs; + sector_t progress; +}; + +/* FIXME: this should scale with the number of pages */ +#define MIN_JOBS 512 + +static kmem_cache_t *_job_cache; +static mempool_t *_job_pool; + +/* + * We maintain three lists of jobs: + * + * i) jobs waiting for pages + * ii) jobs that have pages, and are waiting for the io to be issued. + * iii) jobs that have completed. + * + * All three of these are protected by job_lock. + */ +static spinlock_t _job_lock = SPIN_LOCK_UNLOCKED; + +static LIST_HEAD(_complete_jobs); +static LIST_HEAD(_io_jobs); +static LIST_HEAD(_pages_jobs); + +static int jobs_init(void) +{ + _job_cache = kmem_cache_create("kcopyd-jobs", + sizeof(struct kcopyd_job), + __alignof__(struct kcopyd_job), + 0, NULL, NULL); + if (!_job_cache) + return -ENOMEM; + + _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab, + mempool_free_slab, _job_cache); + if (!_job_pool) { + kmem_cache_destroy(_job_cache); + return -ENOMEM; + } + + return 0; +} + +static void jobs_exit(void) +{ + BUG_ON(!list_empty(&_complete_jobs)); + BUG_ON(!list_empty(&_io_jobs)); + BUG_ON(!list_empty(&_pages_jobs)); + + mempool_destroy(_job_pool); + kmem_cache_destroy(_job_cache); + _job_pool = NULL; + _job_cache = NULL; +} + +/* + * Functions to push and pop a job onto the head of a given job + * list. + */ +static inline struct kcopyd_job *pop(struct list_head *jobs) +{ + struct kcopyd_job *job = NULL; + unsigned long flags; + + spin_lock_irqsave(&_job_lock, flags); + + if (!list_empty(jobs)) { + job = list_entry(jobs->next, struct kcopyd_job, list); + list_del(&job->list); + } + spin_unlock_irqrestore(&_job_lock, flags); + + return job; +} + +static inline void push(struct list_head *jobs, struct kcopyd_job *job) +{ + unsigned long flags; + + spin_lock_irqsave(&_job_lock, flags); + list_add_tail(&job->list, jobs); + spin_unlock_irqrestore(&_job_lock, flags); +} + +/* + * These three functions process 1 item from the corresponding + * job list. + * + * They return: + * < 0: error + * 0: success + * > 0: can't process yet. + */ +static int run_complete_job(struct kcopyd_job *job) +{ + void *context = job->context; + int read_err = job->read_err; + unsigned int write_err = job->write_err; + kcopyd_notify_fn fn = job->fn; + + kcopyd_put_pages(job->kc, job->pages); + mempool_free(job, _job_pool); + fn(read_err, write_err, context); + return 0; +} + +static void complete_io(unsigned long error, void *context) +{ + struct kcopyd_job *job = (struct kcopyd_job *) context; + + if (error) { + if (job->rw == WRITE) + job->write_err &= error; + else + job->read_err = 1; + + if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { + push(&_complete_jobs, job); + wake(); + return; + } + } + + if (job->rw == WRITE) + push(&_complete_jobs, job); + + else { + job->rw = WRITE; + push(&_io_jobs, job); + } + + wake(); +} + +/* + * Request io on as many buffer heads as we can currently get for + * a particular job. + */ +static int run_io_job(struct kcopyd_job *job) +{ + int r; + + if (job->rw == READ) + r = dm_io_async(1, &job->source, job->rw, + job->pages, + job->offset, complete_io, job); + + else + r = dm_io_async(job->num_dests, job->dests, job->rw, + job->pages, + job->offset, complete_io, job); + + return r; +} + +static int run_pages_job(struct kcopyd_job *job) +{ + int r; + + job->nr_pages = dm_div_up(job->dests[0].count + job->offset, + PAGE_SIZE >> 9); + r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); + if (!r) { + /* this job is ready for io */ + push(&_io_jobs, job); + return 0; + } + + if (r == -ENOMEM) + /* can't complete now */ + return 1; + + return r; +} + +/* + * Run through a list for as long as possible. Returns the count + * of successful jobs. + */ +static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) +{ + struct kcopyd_job *job; + int r, count = 0; + + while ((job = pop(jobs))) { + + r = fn(job); + + if (r < 0) { + /* error this rogue job */ + if (job->rw == WRITE) + job->write_err = (unsigned int) -1; + else + job->read_err = 1; + push(&_complete_jobs, job); + break; + } + + if (r > 0) { + /* + * We couldn't service this job ATM, so + * push this job back onto the list. + */ + push(jobs, job); + break; + } + + count++; + } + + return count; +} + +/* + * kcopyd does this every time it's woken up. + */ +static void do_work(void *ignored) +{ + /* + * The order that these are called is *very* important. + * complete jobs can free some pages for pages jobs. + * Pages jobs when successful will jump onto the io jobs + * list. io jobs call wake when they complete and it all + * starts again. + */ + process_jobs(&_complete_jobs, run_complete_job); + process_jobs(&_pages_jobs, run_pages_job); + process_jobs(&_io_jobs, run_io_job); +} + +/* + * If we are copying a small region we just dispatch a single job + * to do the copy, otherwise the io has to be split up into many + * jobs. + */ +static void dispatch_job(struct kcopyd_job *job) +{ + push(&_pages_jobs, job); + wake(); +} + +#define SUB_JOB_SIZE 128 +static void segment_complete(int read_err, + unsigned int write_err, void *context) +{ + /* FIXME: tidy this function */ + sector_t progress = 0; + sector_t count = 0; + struct kcopyd_job *job = (struct kcopyd_job *) context; + + down(&job->lock); + + /* update the error */ + if (read_err) + job->read_err = 1; + + if (write_err) + job->write_err &= write_err; + + /* + * Only dispatch more work if there hasn't been an error. + */ + if ((!job->read_err && !job->write_err) || + test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { + /* get the next chunk of work */ + progress = job->progress; + count = job->source.count - progress; + if (count) { + if (count > SUB_JOB_SIZE) + count = SUB_JOB_SIZE; + + job->progress += count; + } + } + up(&job->lock); + + if (count) { + int i; + struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO); + + *sub_job = *job; + sub_job->source.sector += progress; + sub_job->source.count = count; + + for (i = 0; i < job->num_dests; i++) { + sub_job->dests[i].sector += progress; + sub_job->dests[i].count = count; + } + + sub_job->fn = segment_complete; + sub_job->context = job; + dispatch_job(sub_job); + + } else if (atomic_dec_and_test(&job->sub_jobs)) { + + /* + * To avoid a race we must keep the job around + * until after the notify function has completed. + * Otherwise the client may try and stop the job + * after we've completed. + */ + job->fn(read_err, write_err, job->context); + mempool_free(job, _job_pool); + } +} + +/* + * Create some little jobs that will do the move between + * them. + */ +#define SPLIT_COUNT 8 +static void split_job(struct kcopyd_job *job) +{ + int i; + + atomic_set(&job->sub_jobs, SPLIT_COUNT); + for (i = 0; i < SPLIT_COUNT; i++) + segment_complete(0, 0u, job); +} + +int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, + unsigned int num_dests, struct io_region *dests, + unsigned int flags, kcopyd_notify_fn fn, void *context) +{ + struct kcopyd_job *job; + + /* + * Allocate a new job. + */ + job = mempool_alloc(_job_pool, GFP_NOIO); + + /* + * set up for the read. + */ + job->kc = kc; + job->flags = flags; + job->read_err = 0; + job->write_err = 0; + job->rw = READ; + + job->source = *from; + + job->num_dests = num_dests; + memcpy(&job->dests, dests, sizeof(*dests) * num_dests); + + job->offset = 0; + job->nr_pages = 0; + job->pages = NULL; + + job->fn = fn; + job->context = context; + + if (job->source.count < SUB_JOB_SIZE) + dispatch_job(job); + + else { + init_MUTEX(&job->lock); + job->progress = 0; + split_job(job); + } + + return 0; +} + +/* + * Cancels a kcopyd job, eg. someone might be deactivating a + * mirror. + */ +int kcopyd_cancel(struct kcopyd_job *job, int block) +{ + /* FIXME: finish */ + return -1; +} + +/*----------------------------------------------------------------- + * Unit setup + *---------------------------------------------------------------*/ +static DECLARE_MUTEX(_client_lock); +static LIST_HEAD(_clients); + +static int client_add(struct kcopyd_client *kc) +{ + down(&_client_lock); + list_add(&kc->list, &_clients); + up(&_client_lock); + return 0; +} + +static void client_del(struct kcopyd_client *kc) +{ + down(&_client_lock); + list_del(&kc->list); + up(&_client_lock); +} + +static DECLARE_MUTEX(kcopyd_init_lock); +static int kcopyd_clients = 0; + +static int kcopyd_init(void) +{ + int r; + + down(&kcopyd_init_lock); + + if (kcopyd_clients) { + /* Already initialized. */ + kcopyd_clients++; + up(&kcopyd_init_lock); + return 0; + } + + r = jobs_init(); + if (r) { + up(&kcopyd_init_lock); + return r; + } + + _kcopyd_wq = create_singlethread_workqueue("kcopyd"); + if (!_kcopyd_wq) { + jobs_exit(); + up(&kcopyd_init_lock); + return -ENOMEM; + } + + kcopyd_clients++; + INIT_WORK(&_kcopyd_work, do_work, NULL); + up(&kcopyd_init_lock); + return 0; +} + +static void kcopyd_exit(void) +{ + down(&kcopyd_init_lock); + kcopyd_clients--; + if (!kcopyd_clients) { + jobs_exit(); + destroy_workqueue(_kcopyd_wq); + _kcopyd_wq = NULL; + } + up(&kcopyd_init_lock); +} + +int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result) +{ + int r = 0; + struct kcopyd_client *kc; + + r = kcopyd_init(); + if (r) + return r; + + kc = kmalloc(sizeof(*kc), GFP_KERNEL); + if (!kc) { + kcopyd_exit(); + return -ENOMEM; + } + + kc->lock = SPIN_LOCK_UNLOCKED; + kc->pages = NULL; + kc->nr_pages = kc->nr_free_pages = 0; + r = client_alloc_pages(kc, nr_pages); + if (r) { + kfree(kc); + kcopyd_exit(); + return r; + } + + r = dm_io_get(nr_pages); + if (r) { + client_free_pages(kc); + kfree(kc); + kcopyd_exit(); + return r; + } + + r = client_add(kc); + if (r) { + dm_io_put(nr_pages); + client_free_pages(kc); + kfree(kc); + kcopyd_exit(); + return r; + } + + *result = kc; + return 0; +} + +void kcopyd_client_destroy(struct kcopyd_client *kc) +{ + dm_io_put(kc->nr_pages); + client_free_pages(kc); + client_del(kc); + kfree(kc); + kcopyd_exit(); +} + +EXPORT_SYMBOL(kcopyd_client_create); +EXPORT_SYMBOL(kcopyd_client_destroy); +EXPORT_SYMBOL(kcopyd_copy); +EXPORT_SYMBOL(kcopyd_cancel); diff --git a/drivers/md/kcopyd.h b/drivers/md/kcopyd.h new file mode 100644 index 000000000..4621ea055 --- /dev/null +++ b/drivers/md/kcopyd.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2001 Sistina Software + * + * This file is released under the GPL. + * + * Kcopyd provides a simple interface for copying an area of one + * block-device to one or more other block-devices, with an asynchronous + * completion notification. + */ + +#ifndef DM_KCOPYD_H +#define DM_KCOPYD_H + +#include "dm-io.h" + +/* FIXME: make this configurable */ +#define KCOPYD_MAX_REGIONS 8 + +#define KCOPYD_IGNORE_ERROR 1 + +/* + * To use kcopyd you must first create a kcopyd client object. + */ +struct kcopyd_client; +int kcopyd_client_create(unsigned int num_pages, struct kcopyd_client **result); +void kcopyd_client_destroy(struct kcopyd_client *kc); + +/* + * Submit a copy job to kcopyd. This is built on top of the + * previous three fns. + * + * read_err is a boolean, + * write_err is a bitset, with 1 bit for each destination region + */ +typedef void (*kcopyd_notify_fn)(int read_err, + unsigned int write_err, void *context); + +int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, + unsigned int num_dests, struct io_region *dests, + unsigned int flags, kcopyd_notify_fn fn, void *context); + +#endif diff --git a/drivers/media/video/ovcamchip/Makefile b/drivers/media/video/ovcamchip/Makefile new file mode 100644 index 000000000..bca41ad93 --- /dev/null +++ b/drivers/media/video/ovcamchip/Makefile @@ -0,0 +1,4 @@ +ovcamchip-objs := ovcamchip_core.o ov6x20.o ov6x30.o ov7x10.o ov7x20.o \ + ov76be.o + +obj-$(CONFIG_VIDEO_OVCAMCHIP) += ovcamchip.o diff --git a/drivers/media/video/ovcamchip/ov6x20.c b/drivers/media/video/ovcamchip/ov6x20.c new file mode 100644 index 000000000..3433619ad --- /dev/null +++ b/drivers/media/video/ovcamchip/ov6x20.c @@ -0,0 +1,415 @@ +/* OmniVision OV6620/OV6120 Camera Chip Support Code + * + * Copyright (c) 1999-2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include "ovcamchip_priv.h" + +/* Registers */ +#define REG_GAIN 0x00 /* gain [5:0] */ +#define REG_BLUE 0x01 /* blue gain */ +#define REG_RED 0x02 /* red gain */ +#define REG_SAT 0x03 /* saturation */ +#define REG_CNT 0x05 /* Y contrast */ +#define REG_BRT 0x06 /* Y brightness */ +#define REG_WB_BLUE 0x0C /* WB blue ratio [5:0] */ +#define REG_WB_RED 0x0D /* WB red ratio [5:0] */ +#define REG_EXP 0x10 /* exposure */ + +/* Window parameters */ +#define HWSBASE 0x38 +#define HWEBASE 0x3A +#define VWSBASE 0x05 +#define VWEBASE 0x06 + +struct ov6x20 { + int auto_brt; + int auto_exp; + int backlight; + int bandfilt; + int mirror; +}; + +/* Initial values for use with OV511/OV511+ cameras */ +static struct ovcamchip_regvals regvals_init_6x20_511[] = { + { 0x12, 0x80 }, /* reset */ + { 0x11, 0x01 }, + { 0x03, 0x60 }, + { 0x05, 0x7f }, /* For when autoadjust is off */ + { 0x07, 0xa8 }, + { 0x0c, 0x24 }, + { 0x0d, 0x24 }, + { 0x0f, 0x15 }, /* COMS */ + { 0x10, 0x75 }, /* AEC Exposure time */ + { 0x12, 0x24 }, /* Enable AGC and AWB */ + { 0x14, 0x04 }, + { 0x16, 0x03 }, + { 0x26, 0xb2 }, /* BLC enable */ + /* 0x28: 0x05 Selects RGB format if RGB on */ + { 0x28, 0x05 }, + { 0x2a, 0x04 }, /* Disable framerate adjust */ + { 0x2d, 0x99 }, + { 0x33, 0xa0 }, /* Color Processing Parameter */ + { 0x34, 0xd2 }, /* Max A/D range */ + { 0x38, 0x8b }, + { 0x39, 0x40 }, + + { 0x3c, 0x39 }, /* Enable AEC mode changing */ + { 0x3c, 0x3c }, /* Change AEC mode */ + { 0x3c, 0x24 }, /* Disable AEC mode changing */ + + { 0x3d, 0x80 }, + /* These next two registers (0x4a, 0x4b) are undocumented. They + * control the color balance */ + { 0x4a, 0x80 }, + { 0x4b, 0x80 }, + { 0x4d, 0xd2 }, /* This reduces noise a bit */ + { 0x4e, 0xc1 }, + { 0x4f, 0x04 }, + { 0xff, 0xff }, /* END MARKER */ +}; + +/* Initial values for use with OV518 cameras */ +static struct ovcamchip_regvals regvals_init_6x20_518[] = { + { 0x12, 0x80 }, /* Do a reset */ + { 0x03, 0xc0 }, /* Saturation */ + { 0x05, 0x8a }, /* Contrast */ + { 0x0c, 0x24 }, /* AWB blue */ + { 0x0d, 0x24 }, /* AWB red */ + { 0x0e, 0x8d }, /* Additional 2x gain */ + { 0x0f, 0x25 }, /* Black expanding level = 1.3V */ + { 0x11, 0x01 }, /* Clock div. */ + { 0x12, 0x24 }, /* Enable AGC and AWB */ + { 0x13, 0x01 }, /* (default) */ + { 0x14, 0x80 }, /* Set reserved bit 7 */ + { 0x15, 0x01 }, /* (default) */ + { 0x16, 0x03 }, /* (default) */ + { 0x17, 0x38 }, /* (default) */ + { 0x18, 0xea }, /* (default) */ + { 0x19, 0x04 }, + { 0x1a, 0x93 }, + { 0x1b, 0x00 }, /* (default) */ + { 0x1e, 0xc4 }, /* (default) */ + { 0x1f, 0x04 }, /* (default) */ + { 0x20, 0x20 }, /* Enable 1st stage aperture correction */ + { 0x21, 0x10 }, /* Y offset */ + { 0x22, 0x88 }, /* U offset */ + { 0x23, 0xc0 }, /* Set XTAL power level */ + { 0x24, 0x53 }, /* AEC bright ratio */ + { 0x25, 0x7a }, /* AEC black ratio */ + { 0x26, 0xb2 }, /* BLC enable */ + { 0x27, 0xa2 }, /* Full output range */ + { 0x28, 0x01 }, /* (default) */ + { 0x29, 0x00 }, /* (default) */ + { 0x2a, 0x84 }, /* (default) */ + { 0x2b, 0xa8 }, /* Set custom frame rate */ + { 0x2c, 0xa0 }, /* (reserved) */ + { 0x2d, 0x95 }, /* Enable banding filter */ + { 0x2e, 0x88 }, /* V offset */ + { 0x33, 0x22 }, /* Luminance gamma on */ + { 0x34, 0xc7 }, /* A/D bias */ + { 0x36, 0x12 }, /* (reserved) */ + { 0x37, 0x63 }, /* (reserved) */ + { 0x38, 0x8b }, /* Quick AEC/AEB */ + { 0x39, 0x00 }, /* (default) */ + { 0x3a, 0x0f }, /* (default) */ + { 0x3b, 0x3c }, /* (default) */ + { 0x3c, 0x5c }, /* AEC controls */ + { 0x3d, 0x80 }, /* Drop 1 (bad) frame when AEC change */ + { 0x3e, 0x80 }, /* (default) */ + { 0x3f, 0x02 }, /* (default) */ + { 0x40, 0x10 }, /* (reserved) */ + { 0x41, 0x10 }, /* (reserved) */ + { 0x42, 0x00 }, /* (reserved) */ + { 0x43, 0x7f }, /* (reserved) */ + { 0x44, 0x80 }, /* (reserved) */ + { 0x45, 0x1c }, /* (reserved) */ + { 0x46, 0x1c }, /* (reserved) */ + { 0x47, 0x80 }, /* (reserved) */ + { 0x48, 0x5f }, /* (reserved) */ + { 0x49, 0x00 }, /* (reserved) */ + { 0x4a, 0x00 }, /* Color balance (undocumented) */ + { 0x4b, 0x80 }, /* Color balance (undocumented) */ + { 0x4c, 0x58 }, /* (reserved) */ + { 0x4d, 0xd2 }, /* U *= .938, V *= .838 */ + { 0x4e, 0xa0 }, /* (default) */ + { 0x4f, 0x04 }, /* UV 3-point average */ + { 0x50, 0xff }, /* (reserved) */ + { 0x51, 0x58 }, /* (reserved) */ + { 0x52, 0xc0 }, /* (reserved) */ + { 0x53, 0x42 }, /* (reserved) */ + { 0x27, 0xa6 }, /* Enable manual offset adj. (reg 21 & 22) */ + { 0x12, 0x20 }, + { 0x12, 0x24 }, + + { 0xff, 0xff }, /* END MARKER */ +}; + +/* This initializes the OV6x20 camera chip and relevant variables. */ +static int ov6x20_init(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x20 *s; + int rc; + + DDEBUG(4, &c->dev, "entered"); + + switch (c->adapter->id) { + case I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV511: + rc = ov_write_regvals(c, regvals_init_6x20_511); + break; + case I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518: + rc = ov_write_regvals(c, regvals_init_6x20_518); + break; + default: + dev_err(&c->dev, "ov6x20: Unsupported adapter\n"); + rc = -ENODEV; + } + + if (rc < 0) + return rc; + + ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL); + if (!s) + return -ENOMEM; + memset(s, 0, sizeof *s); + + s->auto_brt = 1; + s->auto_exp = 1; + + return rc; +} + +static int ov6x20_free(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + kfree(ov->spriv); + return 0; +} + +static int ov6x20_set_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x20 *s = ov->spriv; + int rc; + int v = ctl->value; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_write(c, REG_CNT, v >> 8); + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_write(c, REG_BRT, v >> 8); + break; + case OVCAMCHIP_CID_SAT: + rc = ov_write(c, REG_SAT, v >> 8); + break; + case OVCAMCHIP_CID_HUE: + rc = ov_write(c, REG_RED, 0xFF - (v >> 8)); + if (rc < 0) + goto out; + + rc = ov_write(c, REG_BLUE, v >> 8); + break; + case OVCAMCHIP_CID_EXP: + rc = ov_write(c, REG_EXP, v); + break; + case OVCAMCHIP_CID_FREQ: + { + int sixty = (v == 60); + + rc = ov_write(c, 0x2b, sixty?0xa8:0x28); + if (rc < 0) + goto out; + + rc = ov_write(c, 0x2a, sixty?0x84:0xa4); + break; + } + case OVCAMCHIP_CID_BANDFILT: + rc = ov_write_mask(c, 0x2d, v?0x04:0x00, 0x04); + s->bandfilt = v; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + rc = ov_write_mask(c, 0x2d, v?0x10:0x00, 0x10); + s->auto_brt = v; + break; + case OVCAMCHIP_CID_AUTOEXP: + rc = ov_write_mask(c, 0x13, v?0x01:0x00, 0x01); + s->auto_exp = v; + break; + case OVCAMCHIP_CID_BACKLIGHT: + { + rc = ov_write_mask(c, 0x4e, v?0xe0:0xc0, 0xe0); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x29, v?0x08:0x00, 0x08); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x0e, v?0x80:0x00, 0x80); + s->backlight = v; + break; + } + case OVCAMCHIP_CID_MIRROR: + rc = ov_write_mask(c, 0x12, v?0x40:0x00, 0x40); + s->mirror = v; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + +out: + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, v, rc); + return rc; +} + +static int ov6x20_get_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x20 *s = ov->spriv; + int rc = 0; + unsigned char val = 0; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_read(c, REG_CNT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_read(c, REG_BRT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_read(c, REG_SAT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_HUE: + rc = ov_read(c, REG_BLUE, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_EXP: + rc = ov_read(c, REG_EXP, &val); + ctl->value = val; + break; + case OVCAMCHIP_CID_BANDFILT: + ctl->value = s->bandfilt; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + ctl->value = s->auto_brt; + break; + case OVCAMCHIP_CID_AUTOEXP: + ctl->value = s->auto_exp; + break; + case OVCAMCHIP_CID_BACKLIGHT: + ctl->value = s->backlight; + break; + case OVCAMCHIP_CID_MIRROR: + ctl->value = s->mirror; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, ctl->value, rc); + return rc; +} + +static int ov6x20_mode_init(struct i2c_client *c, struct ovcamchip_window *win) +{ + /******** QCIF-specific regs ********/ + + ov_write(c, 0x14, win->quarter?0x24:0x04); + + /******** Palette-specific regs ********/ + + /* OV518 needs 8 bit multiplexed in color mode, and 16 bit in B&W */ + if (c->adapter->id == (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518)) { + if (win->format == VIDEO_PALETTE_GREY) + ov_write_mask(c, 0x13, 0x00, 0x20); + else + ov_write_mask(c, 0x13, 0x20, 0x20); + } else { + if (win->format == VIDEO_PALETTE_GREY) + ov_write_mask(c, 0x13, 0x20, 0x20); + else + ov_write_mask(c, 0x13, 0x00, 0x20); + } + + /******** Clock programming ********/ + + /* The OV6620 needs special handling. This prevents the + * severe banding that normally occurs */ + + /* Clock down */ + ov_write(c, 0x2a, 0x04); + + ov_write(c, 0x11, win->clockdiv); + + ov_write(c, 0x2a, 0x84); + /* This next setting is critical. It seems to improve + * the gain or the contrast. The "reserved" bits seem + * to have some effect in this case. */ + ov_write(c, 0x2d, 0x85); /* FIXME: This messes up banding filter */ + + return 0; +} + +static int ov6x20_set_window(struct i2c_client *c, struct ovcamchip_window *win) +{ + int ret, hwscale, vwscale; + + ret = ov6x20_mode_init(c, win); + if (ret < 0) + return ret; + + if (win->quarter) { + hwscale = 0; + vwscale = 0; + } else { + hwscale = 1; + vwscale = 1; /* The datasheet says 0; it's wrong */ + } + + ov_write(c, 0x17, HWSBASE + (win->x >> hwscale)); + ov_write(c, 0x18, HWEBASE + ((win->x + win->width) >> hwscale)); + ov_write(c, 0x19, VWSBASE + (win->y >> vwscale)); + ov_write(c, 0x1a, VWEBASE + ((win->y + win->height) >> vwscale)); + + return 0; +} + +static int ov6x20_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + switch (cmd) { + case OVCAMCHIP_CMD_S_CTRL: + return ov6x20_set_control(c, arg); + case OVCAMCHIP_CMD_G_CTRL: + return ov6x20_get_control(c, arg); + case OVCAMCHIP_CMD_S_MODE: + return ov6x20_set_window(c, arg); + default: + DDEBUG(2, &c->dev, "command not supported: %d", cmd); + return -ENOIOCTLCMD; + } +} + +struct ovcamchip_ops ov6x20_ops = { + .init = ov6x20_init, + .free = ov6x20_free, + .command = ov6x20_command, +}; diff --git a/drivers/media/video/ovcamchip/ov6x30.c b/drivers/media/video/ovcamchip/ov6x30.c new file mode 100644 index 000000000..44a842379 --- /dev/null +++ b/drivers/media/video/ovcamchip/ov6x30.c @@ -0,0 +1,374 @@ +/* OmniVision OV6630/OV6130 Camera Chip Support Code + * + * Copyright (c) 1999-2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include "ovcamchip_priv.h" + +/* Registers */ +#define REG_GAIN 0x00 /* gain [5:0] */ +#define REG_BLUE 0x01 /* blue gain */ +#define REG_RED 0x02 /* red gain */ +#define REG_SAT 0x03 /* saturation [7:3] */ +#define REG_CNT 0x05 /* Y contrast [3:0] */ +#define REG_BRT 0x06 /* Y brightness */ +#define REG_SHARP 0x07 /* sharpness */ +#define REG_WB_BLUE 0x0C /* WB blue ratio [5:0] */ +#define REG_WB_RED 0x0D /* WB red ratio [5:0] */ +#define REG_EXP 0x10 /* exposure */ + +/* Window parameters */ +#define HWSBASE 0x38 +#define HWEBASE 0x3A +#define VWSBASE 0x05 +#define VWEBASE 0x06 + +struct ov6x30 { + int auto_brt; + int auto_exp; + int backlight; + int bandfilt; + int mirror; +}; + +static struct ovcamchip_regvals regvals_init_6x30[] = { + { 0x12, 0x80 }, /* reset */ + { 0x00, 0x1f }, /* Gain */ + { 0x01, 0x99 }, /* Blue gain */ + { 0x02, 0x7c }, /* Red gain */ + { 0x03, 0xc0 }, /* Saturation */ + { 0x05, 0x0a }, /* Contrast */ + { 0x06, 0x95 }, /* Brightness */ + { 0x07, 0x2d }, /* Sharpness */ + { 0x0c, 0x20 }, + { 0x0d, 0x20 }, + { 0x0e, 0x20 }, + { 0x0f, 0x05 }, + { 0x10, 0x9a }, /* "exposure check" */ + { 0x11, 0x00 }, /* Pixel clock = fastest */ + { 0x12, 0x24 }, /* Enable AGC and AWB */ + { 0x13, 0x21 }, + { 0x14, 0x80 }, + { 0x15, 0x01 }, + { 0x16, 0x03 }, + { 0x17, 0x38 }, + { 0x18, 0xea }, + { 0x19, 0x04 }, + { 0x1a, 0x93 }, + { 0x1b, 0x00 }, + { 0x1e, 0xc4 }, + { 0x1f, 0x04 }, + { 0x20, 0x20 }, + { 0x21, 0x10 }, + { 0x22, 0x88 }, + { 0x23, 0xc0 }, /* Crystal circuit power level */ + { 0x25, 0x9a }, /* Increase AEC black pixel ratio */ + { 0x26, 0xb2 }, /* BLC enable */ + { 0x27, 0xa2 }, + { 0x28, 0x00 }, + { 0x29, 0x00 }, + { 0x2a, 0x84 }, /* (keep) */ + { 0x2b, 0xa8 }, /* (keep) */ + { 0x2c, 0xa0 }, + { 0x2d, 0x95 }, /* Enable auto-brightness */ + { 0x2e, 0x88 }, + { 0x33, 0x26 }, + { 0x34, 0x03 }, + { 0x36, 0x8f }, + { 0x37, 0x80 }, + { 0x38, 0x83 }, + { 0x39, 0x80 }, + { 0x3a, 0x0f }, + { 0x3b, 0x3c }, + { 0x3c, 0x1a }, + { 0x3d, 0x80 }, + { 0x3e, 0x80 }, + { 0x3f, 0x0e }, + { 0x40, 0x00 }, /* White bal */ + { 0x41, 0x00 }, /* White bal */ + { 0x42, 0x80 }, + { 0x43, 0x3f }, /* White bal */ + { 0x44, 0x80 }, + { 0x45, 0x20 }, + { 0x46, 0x20 }, + { 0x47, 0x80 }, + { 0x48, 0x7f }, + { 0x49, 0x00 }, + { 0x4a, 0x00 }, + { 0x4b, 0x80 }, + { 0x4c, 0xd0 }, + { 0x4d, 0x10 }, /* U = 0.563u, V = 0.714v */ + { 0x4e, 0x40 }, + { 0x4f, 0x07 }, /* UV average mode, color killer: strongest */ + { 0x50, 0xff }, + { 0x54, 0x23 }, /* Max AGC gain: 18dB */ + { 0x55, 0xff }, + { 0x56, 0x12 }, + { 0x57, 0x81 }, /* (default) */ + { 0x58, 0x75 }, + { 0x59, 0x01 }, /* AGC dark current compensation: +1 */ + { 0x5a, 0x2c }, + { 0x5b, 0x0f }, /* AWB chrominance levels */ + { 0x5c, 0x10 }, + { 0x3d, 0x80 }, + { 0x27, 0xa6 }, + /* Toggle AWB off and on */ + { 0x12, 0x20 }, + { 0x12, 0x24 }, + + { 0xff, 0xff }, /* END MARKER */ +}; + +/* This initializes the OV6x30 camera chip and relevant variables. */ +static int ov6x30_init(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x30 *s; + int rc; + + DDEBUG(4, &c->dev, "entered"); + + rc = ov_write_regvals(c, regvals_init_6x30); + if (rc < 0) + return rc; + + ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL); + if (!s) + return -ENOMEM; + memset(s, 0, sizeof *s); + + s->auto_brt = 1; + s->auto_exp = 1; + + return rc; +} + +static int ov6x30_free(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + kfree(ov->spriv); + return 0; +} + +static int ov6x30_set_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x30 *s = ov->spriv; + int rc; + int v = ctl->value; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_write_mask(c, REG_CNT, v >> 12, 0x0f); + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_write(c, REG_BRT, v >> 8); + break; + case OVCAMCHIP_CID_SAT: + rc = ov_write(c, REG_SAT, v >> 8); + break; + case OVCAMCHIP_CID_HUE: + rc = ov_write(c, REG_RED, 0xFF - (v >> 8)); + if (rc < 0) + goto out; + + rc = ov_write(c, REG_BLUE, v >> 8); + break; + case OVCAMCHIP_CID_EXP: + rc = ov_write(c, REG_EXP, v); + break; + case OVCAMCHIP_CID_FREQ: + { + int sixty = (v == 60); + + rc = ov_write(c, 0x2b, sixty?0xa8:0x28); + if (rc < 0) + goto out; + + rc = ov_write(c, 0x2a, sixty?0x84:0xa4); + break; + } + case OVCAMCHIP_CID_BANDFILT: + rc = ov_write_mask(c, 0x2d, v?0x04:0x00, 0x04); + s->bandfilt = v; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + rc = ov_write_mask(c, 0x2d, v?0x10:0x00, 0x10); + s->auto_brt = v; + break; + case OVCAMCHIP_CID_AUTOEXP: + rc = ov_write_mask(c, 0x28, v?0x00:0x10, 0x10); + s->auto_exp = v; + break; + case OVCAMCHIP_CID_BACKLIGHT: + { + rc = ov_write_mask(c, 0x4e, v?0x80:0x60, 0xe0); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x29, v?0x08:0x00, 0x08); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x28, v?0x02:0x00, 0x02); + s->backlight = v; + break; + } + case OVCAMCHIP_CID_MIRROR: + rc = ov_write_mask(c, 0x12, v?0x40:0x00, 0x40); + s->mirror = v; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + +out: + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, v, rc); + return rc; +} + +static int ov6x30_get_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x30 *s = ov->spriv; + int rc = 0; + unsigned char val = 0; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_read(c, REG_CNT, &val); + ctl->value = (val & 0x0f) << 12; + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_read(c, REG_BRT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_read(c, REG_SAT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_HUE: + rc = ov_read(c, REG_BLUE, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_EXP: + rc = ov_read(c, REG_EXP, &val); + ctl->value = val; + break; + case OVCAMCHIP_CID_BANDFILT: + ctl->value = s->bandfilt; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + ctl->value = s->auto_brt; + break; + case OVCAMCHIP_CID_AUTOEXP: + ctl->value = s->auto_exp; + break; + case OVCAMCHIP_CID_BACKLIGHT: + ctl->value = s->backlight; + break; + case OVCAMCHIP_CID_MIRROR: + ctl->value = s->mirror; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, ctl->value, rc); + return rc; +} + +static int ov6x30_mode_init(struct i2c_client *c, struct ovcamchip_window *win) +{ + /******** QCIF-specific regs ********/ + + ov_write_mask(c, 0x14, win->quarter?0x20:0x00, 0x20); + + /******** Palette-specific regs ********/ + + if (win->format == VIDEO_PALETTE_GREY) { + if (c->adapter->id == (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518)) { + /* Do nothing - we're already in 8-bit mode */ + } else { + ov_write_mask(c, 0x13, 0x20, 0x20); + } + } else { + /* The OV518 needs special treatment. Although both the OV518 + * and the OV6630 support a 16-bit video bus, only the 8 bit Y + * bus is actually used. The UV bus is tied to ground. + * Therefore, the OV6630 needs to be in 8-bit multiplexed + * output mode */ + + if (c->adapter->id == (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518)) { + /* Do nothing - we want to stay in 8-bit mode */ + /* Warning: Messing with reg 0x13 breaks OV518 color */ + } else { + ov_write_mask(c, 0x13, 0x00, 0x20); + } + } + + /******** Clock programming ********/ + + ov_write(c, 0x11, win->clockdiv); + + return 0; +} + +static int ov6x30_set_window(struct i2c_client *c, struct ovcamchip_window *win) +{ + int ret, hwscale, vwscale; + + ret = ov6x30_mode_init(c, win); + if (ret < 0) + return ret; + + if (win->quarter) { + hwscale = 0; + vwscale = 0; + } else { + hwscale = 1; + vwscale = 1; /* The datasheet says 0; it's wrong */ + } + + ov_write(c, 0x17, HWSBASE + (win->x >> hwscale)); + ov_write(c, 0x18, HWEBASE + ((win->x + win->width) >> hwscale)); + ov_write(c, 0x19, VWSBASE + (win->y >> vwscale)); + ov_write(c, 0x1a, VWEBASE + ((win->y + win->height) >> vwscale)); + + return 0; +} + +static int ov6x30_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + switch (cmd) { + case OVCAMCHIP_CMD_S_CTRL: + return ov6x30_set_control(c, arg); + case OVCAMCHIP_CMD_G_CTRL: + return ov6x30_get_control(c, arg); + case OVCAMCHIP_CMD_S_MODE: + return ov6x30_set_window(c, arg); + default: + DDEBUG(2, &c->dev, "command not supported: %d", cmd); + return -ENOIOCTLCMD; + } +} + +struct ovcamchip_ops ov6x30_ops = { + .init = ov6x30_init, + .free = ov6x30_free, + .command = ov6x30_command, +}; diff --git a/drivers/media/video/ovcamchip/ov76be.c b/drivers/media/video/ovcamchip/ov76be.c new file mode 100644 index 000000000..29bbdc05e --- /dev/null +++ b/drivers/media/video/ovcamchip/ov76be.c @@ -0,0 +1,303 @@ +/* OmniVision OV76BE Camera Chip Support Code + * + * Copyright (c) 1999-2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include "ovcamchip_priv.h" + +/* OV7610 registers: Since the OV76BE is undocumented, we'll settle for these + * for now. */ +#define REG_GAIN 0x00 /* gain [5:0] */ +#define REG_BLUE 0x01 /* blue channel balance */ +#define REG_RED 0x02 /* red channel balance */ +#define REG_SAT 0x03 /* saturation */ +#define REG_CNT 0x05 /* Y contrast */ +#define REG_BRT 0x06 /* Y brightness */ +#define REG_BLUE_BIAS 0x0C /* blue channel bias [5:0] */ +#define REG_RED_BIAS 0x0D /* red channel bias [5:0] */ +#define REG_GAMMA_COEFF 0x0E /* gamma settings */ +#define REG_WB_RANGE 0x0F /* AEC/ALC/S-AWB settings */ +#define REG_EXP 0x10 /* manual exposure setting */ +#define REG_CLOCK 0x11 /* polarity/clock prescaler */ +#define REG_FIELD_DIVIDE 0x16 /* field interval/mode settings */ +#define REG_HWIN_START 0x17 /* horizontal window start */ +#define REG_HWIN_END 0x18 /* horizontal window end */ +#define REG_VWIN_START 0x19 /* vertical window start */ +#define REG_VWIN_END 0x1A /* vertical window end */ +#define REG_PIXEL_SHIFT 0x1B /* pixel shift */ +#define REG_YOFFSET 0x21 /* Y channel offset */ +#define REG_UOFFSET 0x22 /* U channel offset */ +#define REG_ECW 0x24 /* exposure white level for AEC */ +#define REG_ECB 0x25 /* exposure black level for AEC */ +#define REG_FRAMERATE_H 0x2A /* frame rate MSB + misc */ +#define REG_FRAMERATE_L 0x2B /* frame rate LSB */ +#define REG_ALC 0x2C /* Auto Level Control settings */ +#define REG_VOFFSET 0x2E /* V channel offset adjustment */ +#define REG_ARRAY_BIAS 0x2F /* array bias -- don't change */ +#define REG_YGAMMA 0x33 /* misc gamma settings [7:6] */ +#define REG_BIAS_ADJUST 0x34 /* misc bias settings */ + +/* Window parameters */ +#define HWSBASE 0x38 +#define HWEBASE 0x3a +#define VWSBASE 0x05 +#define VWEBASE 0x05 + +struct ov76be { + int auto_brt; + int auto_exp; + int bandfilt; + int mirror; +}; + +/* NOTE: These are the same as the 7x10 settings, but should eventually be + * optimized for the OV76BE */ +static struct ovcamchip_regvals regvals_init_76be[] = { + { 0x10, 0xff }, + { 0x16, 0x03 }, + { 0x28, 0x24 }, + { 0x2b, 0xac }, + { 0x12, 0x00 }, + { 0x38, 0x81 }, + { 0x28, 0x24 }, /* 0c */ + { 0x0f, 0x85 }, /* lg's setting */ + { 0x15, 0x01 }, + { 0x20, 0x1c }, + { 0x23, 0x2a }, + { 0x24, 0x10 }, + { 0x25, 0x8a }, + { 0x26, 0xa2 }, + { 0x27, 0xc2 }, + { 0x2a, 0x04 }, + { 0x2c, 0xfe }, + { 0x2d, 0x93 }, + { 0x30, 0x71 }, + { 0x31, 0x60 }, + { 0x32, 0x26 }, + { 0x33, 0x20 }, + { 0x34, 0x48 }, + { 0x12, 0x24 }, + { 0x11, 0x01 }, + { 0x0c, 0x24 }, + { 0x0d, 0x24 }, + { 0xff, 0xff }, /* END MARKER */ +}; + +/* This initializes the OV76be camera chip and relevant variables. */ +static int ov76be_init(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov76be *s; + int rc; + + DDEBUG(4, &c->dev, "entered"); + + rc = ov_write_regvals(c, regvals_init_76be); + if (rc < 0) + return rc; + + ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL); + if (!s) + return -ENOMEM; + memset(s, 0, sizeof *s); + + s->auto_brt = 1; + s->auto_exp = 1; + + return rc; +} + +static int ov76be_free(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + kfree(ov->spriv); + return 0; +} + +static int ov76be_set_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov76be *s = ov->spriv; + int rc; + int v = ctl->value; + + switch (ctl->id) { + case OVCAMCHIP_CID_BRIGHT: + rc = ov_write(c, REG_BRT, v >> 8); + break; + case OVCAMCHIP_CID_SAT: + rc = ov_write(c, REG_SAT, v >> 8); + break; + case OVCAMCHIP_CID_EXP: + rc = ov_write(c, REG_EXP, v); + break; + case OVCAMCHIP_CID_FREQ: + { + int sixty = (v == 60); + + rc = ov_write_mask(c, 0x2a, sixty?0x00:0x80, 0x80); + if (rc < 0) + goto out; + + rc = ov_write(c, 0x2b, sixty?0x00:0xac); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x76, 0x01, 0x01); + break; + } + case OVCAMCHIP_CID_BANDFILT: + rc = ov_write_mask(c, 0x2d, v?0x04:0x00, 0x04); + s->bandfilt = v; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + rc = ov_write_mask(c, 0x2d, v?0x10:0x00, 0x10); + s->auto_brt = v; + break; + case OVCAMCHIP_CID_AUTOEXP: + rc = ov_write_mask(c, 0x13, v?0x01:0x00, 0x01); + s->auto_exp = v; + break; + case OVCAMCHIP_CID_MIRROR: + rc = ov_write_mask(c, 0x12, v?0x40:0x00, 0x40); + s->mirror = v; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + +out: + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, v, rc); + return rc; +} + +static int ov76be_get_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov76be *s = ov->spriv; + int rc = 0; + unsigned char val = 0; + + switch (ctl->id) { + case OVCAMCHIP_CID_BRIGHT: + rc = ov_read(c, REG_BRT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_read(c, REG_SAT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_EXP: + rc = ov_read(c, REG_EXP, &val); + ctl->value = val; + break; + case OVCAMCHIP_CID_BANDFILT: + ctl->value = s->bandfilt; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + ctl->value = s->auto_brt; + break; + case OVCAMCHIP_CID_AUTOEXP: + ctl->value = s->auto_exp; + break; + case OVCAMCHIP_CID_MIRROR: + ctl->value = s->mirror; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, ctl->value, rc); + return rc; +} + +static int ov76be_mode_init(struct i2c_client *c, struct ovcamchip_window *win) +{ + int qvga = win->quarter; + + /******** QVGA-specific regs ********/ + + ov_write(c, 0x14, qvga?0xa4:0x84); + + /******** Palette-specific regs ********/ + + if (win->format == VIDEO_PALETTE_GREY) { + ov_write_mask(c, 0x0e, 0x40, 0x40); + ov_write_mask(c, 0x13, 0x20, 0x20); + } else { + ov_write_mask(c, 0x0e, 0x00, 0x40); + ov_write_mask(c, 0x13, 0x00, 0x20); + } + + /******** Clock programming ********/ + + ov_write(c, 0x11, win->clockdiv); + + /******** Resolution-specific ********/ + + if (win->width == 640 && win->height == 480) + ov_write(c, 0x35, 0x9e); + else + ov_write(c, 0x35, 0x1e); + + return 0; +} + +static int ov76be_set_window(struct i2c_client *c, struct ovcamchip_window *win) +{ + int ret, hwscale, vwscale; + + ret = ov76be_mode_init(c, win); + if (ret < 0) + return ret; + + if (win->quarter) { + hwscale = 1; + vwscale = 0; + } else { + hwscale = 2; + vwscale = 1; + } + + ov_write(c, 0x17, HWSBASE + (win->x >> hwscale)); + ov_write(c, 0x18, HWEBASE + ((win->x + win->width) >> hwscale)); + ov_write(c, 0x19, VWSBASE + (win->y >> vwscale)); + ov_write(c, 0x1a, VWEBASE + ((win->y + win->height) >> vwscale)); + + return 0; +} + +static int ov76be_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + switch (cmd) { + case OVCAMCHIP_CMD_S_CTRL: + return ov76be_set_control(c, arg); + case OVCAMCHIP_CMD_G_CTRL: + return ov76be_get_control(c, arg); + case OVCAMCHIP_CMD_S_MODE: + return ov76be_set_window(c, arg); + default: + DDEBUG(2, &c->dev, "command not supported: %d", cmd); + return -ENOIOCTLCMD; + } +} + +struct ovcamchip_ops ov76be_ops = { + .init = ov76be_init, + .free = ov76be_free, + .command = ov76be_command, +}; diff --git a/drivers/media/video/ovcamchip/ov7x10.c b/drivers/media/video/ovcamchip/ov7x10.c new file mode 100644 index 000000000..6c383d4b1 --- /dev/null +++ b/drivers/media/video/ovcamchip/ov7x10.c @@ -0,0 +1,335 @@ +/* OmniVision OV7610/OV7110 Camera Chip Support Code + * + * Copyright (c) 1999-2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * Color fixes by by Orion Sky Lawlor (2/26/2000) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include "ovcamchip_priv.h" + +/* Registers */ +#define REG_GAIN 0x00 /* gain [5:0] */ +#define REG_BLUE 0x01 /* blue channel balance */ +#define REG_RED 0x02 /* red channel balance */ +#define REG_SAT 0x03 /* saturation */ +#define REG_CNT 0x05 /* Y contrast */ +#define REG_BRT 0x06 /* Y brightness */ +#define REG_BLUE_BIAS 0x0C /* blue channel bias [5:0] */ +#define REG_RED_BIAS 0x0D /* red channel bias [5:0] */ +#define REG_GAMMA_COEFF 0x0E /* gamma settings */ +#define REG_WB_RANGE 0x0F /* AEC/ALC/S-AWB settings */ +#define REG_EXP 0x10 /* manual exposure setting */ +#define REG_CLOCK 0x11 /* polarity/clock prescaler */ +#define REG_FIELD_DIVIDE 0x16 /* field interval/mode settings */ +#define REG_HWIN_START 0x17 /* horizontal window start */ +#define REG_HWIN_END 0x18 /* horizontal window end */ +#define REG_VWIN_START 0x19 /* vertical window start */ +#define REG_VWIN_END 0x1A /* vertical window end */ +#define REG_PIXEL_SHIFT 0x1B /* pixel shift */ +#define REG_YOFFSET 0x21 /* Y channel offset */ +#define REG_UOFFSET 0x22 /* U channel offset */ +#define REG_ECW 0x24 /* exposure white level for AEC */ +#define REG_ECB 0x25 /* exposure black level for AEC */ +#define REG_FRAMERATE_H 0x2A /* frame rate MSB + misc */ +#define REG_FRAMERATE_L 0x2B /* frame rate LSB */ +#define REG_ALC 0x2C /* Auto Level Control settings */ +#define REG_VOFFSET 0x2E /* V channel offset adjustment */ +#define REG_ARRAY_BIAS 0x2F /* array bias -- don't change */ +#define REG_YGAMMA 0x33 /* misc gamma settings [7:6] */ +#define REG_BIAS_ADJUST 0x34 /* misc bias settings */ + +/* Window parameters */ +#define HWSBASE 0x38 +#define HWEBASE 0x3a +#define VWSBASE 0x05 +#define VWEBASE 0x05 + +struct ov7x10 { + int auto_brt; + int auto_exp; + int bandfilt; + int mirror; +}; + +/* Lawrence Glaister reports: + * + * Register 0x0f in the 7610 has the following effects: + * + * 0x85 (AEC method 1): Best overall, good contrast range + * 0x45 (AEC method 2): Very overexposed + * 0xa5 (spec sheet default): Ok, but the black level is + * shifted resulting in loss of contrast + * 0x05 (old driver setting): very overexposed, too much + * contrast + */ +static struct ovcamchip_regvals regvals_init_7x10[] = { + { 0x10, 0xff }, + { 0x16, 0x03 }, + { 0x28, 0x24 }, + { 0x2b, 0xac }, + { 0x12, 0x00 }, + { 0x38, 0x81 }, + { 0x28, 0x24 }, /* 0c */ + { 0x0f, 0x85 }, /* lg's setting */ + { 0x15, 0x01 }, + { 0x20, 0x1c }, + { 0x23, 0x2a }, + { 0x24, 0x10 }, + { 0x25, 0x8a }, + { 0x26, 0xa2 }, + { 0x27, 0xc2 }, + { 0x2a, 0x04 }, + { 0x2c, 0xfe }, + { 0x2d, 0x93 }, + { 0x30, 0x71 }, + { 0x31, 0x60 }, + { 0x32, 0x26 }, + { 0x33, 0x20 }, + { 0x34, 0x48 }, + { 0x12, 0x24 }, + { 0x11, 0x01 }, + { 0x0c, 0x24 }, + { 0x0d, 0x24 }, + { 0xff, 0xff }, /* END MARKER */ +}; + +/* This initializes the OV7x10 camera chip and relevant variables. */ +static int ov7x10_init(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x10 *s; + int rc; + + DDEBUG(4, &c->dev, "entered"); + + rc = ov_write_regvals(c, regvals_init_7x10); + if (rc < 0) + return rc; + + ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL); + if (!s) + return -ENOMEM; + memset(s, 0, sizeof *s); + + s->auto_brt = 1; + s->auto_exp = 1; + + return rc; +} + +static int ov7x10_free(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + kfree(ov->spriv); + return 0; +} + +static int ov7x10_set_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x10 *s = ov->spriv; + int rc; + int v = ctl->value; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_write(c, REG_CNT, v >> 8); + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_write(c, REG_BRT, v >> 8); + break; + case OVCAMCHIP_CID_SAT: + rc = ov_write(c, REG_SAT, v >> 8); + break; + case OVCAMCHIP_CID_HUE: + rc = ov_write(c, REG_RED, 0xFF - (v >> 8)); + if (rc < 0) + goto out; + + rc = ov_write(c, REG_BLUE, v >> 8); + break; + case OVCAMCHIP_CID_EXP: + rc = ov_write(c, REG_EXP, v); + break; + case OVCAMCHIP_CID_FREQ: + { + int sixty = (v == 60); + + rc = ov_write_mask(c, 0x2a, sixty?0x00:0x80, 0x80); + if (rc < 0) + goto out; + + rc = ov_write(c, 0x2b, sixty?0x00:0xac); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x13, 0x10, 0x10); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x13, 0x00, 0x10); + break; + } + case OVCAMCHIP_CID_BANDFILT: + rc = ov_write_mask(c, 0x2d, v?0x04:0x00, 0x04); + s->bandfilt = v; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + rc = ov_write_mask(c, 0x2d, v?0x10:0x00, 0x10); + s->auto_brt = v; + break; + case OVCAMCHIP_CID_AUTOEXP: + rc = ov_write_mask(c, 0x29, v?0x00:0x80, 0x80); + s->auto_exp = v; + break; + case OVCAMCHIP_CID_MIRROR: + rc = ov_write_mask(c, 0x12, v?0x40:0x00, 0x40); + s->mirror = v; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + +out: + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, v, rc); + return rc; +} + +static int ov7x10_get_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x10 *s = ov->spriv; + int rc = 0; + unsigned char val = 0; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_read(c, REG_CNT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_read(c, REG_BRT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_read(c, REG_SAT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_HUE: + rc = ov_read(c, REG_BLUE, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_EXP: + rc = ov_read(c, REG_EXP, &val); + ctl->value = val; + break; + case OVCAMCHIP_CID_BANDFILT: + ctl->value = s->bandfilt; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + ctl->value = s->auto_brt; + break; + case OVCAMCHIP_CID_AUTOEXP: + ctl->value = s->auto_exp; + break; + case OVCAMCHIP_CID_MIRROR: + ctl->value = s->mirror; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, ctl->value, rc); + return rc; +} + +static int ov7x10_mode_init(struct i2c_client *c, struct ovcamchip_window *win) +{ + int qvga = win->quarter; + + /******** QVGA-specific regs ********/ + + ov_write(c, 0x14, qvga?0x24:0x04); + + /******** Palette-specific regs ********/ + + if (win->format == VIDEO_PALETTE_GREY) { + ov_write_mask(c, 0x0e, 0x40, 0x40); + ov_write_mask(c, 0x13, 0x20, 0x20); + } else { + ov_write_mask(c, 0x0e, 0x00, 0x40); + ov_write_mask(c, 0x13, 0x00, 0x20); + } + + /******** Clock programming ********/ + + ov_write(c, 0x11, win->clockdiv); + + /******** Resolution-specific ********/ + + if (win->width == 640 && win->height == 480) + ov_write(c, 0x35, 0x9e); + else + ov_write(c, 0x35, 0x1e); + + return 0; +} + +static int ov7x10_set_window(struct i2c_client *c, struct ovcamchip_window *win) +{ + int ret, hwscale, vwscale; + + ret = ov7x10_mode_init(c, win); + if (ret < 0) + return ret; + + if (win->quarter) { + hwscale = 1; + vwscale = 0; + } else { + hwscale = 2; + vwscale = 1; + } + + ov_write(c, 0x17, HWSBASE + (win->x >> hwscale)); + ov_write(c, 0x18, HWEBASE + ((win->x + win->width) >> hwscale)); + ov_write(c, 0x19, VWSBASE + (win->y >> vwscale)); + ov_write(c, 0x1a, VWEBASE + ((win->y + win->height) >> vwscale)); + + return 0; +} + +static int ov7x10_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + switch (cmd) { + case OVCAMCHIP_CMD_S_CTRL: + return ov7x10_set_control(c, arg); + case OVCAMCHIP_CMD_G_CTRL: + return ov7x10_get_control(c, arg); + case OVCAMCHIP_CMD_S_MODE: + return ov7x10_set_window(c, arg); + default: + DDEBUG(2, &c->dev, "command not supported: %d", cmd); + return -ENOIOCTLCMD; + } +} + +struct ovcamchip_ops ov7x10_ops = { + .init = ov7x10_init, + .free = ov7x10_free, + .command = ov7x10_command, +}; diff --git a/drivers/media/video/ovcamchip/ov7x20.c b/drivers/media/video/ovcamchip/ov7x20.c new file mode 100644 index 000000000..3c8c48f33 --- /dev/null +++ b/drivers/media/video/ovcamchip/ov7x20.c @@ -0,0 +1,455 @@ +/* OmniVision OV7620/OV7120 Camera Chip Support Code + * + * Copyright (c) 1999-2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * OV7620 fixes by Charl P. Botha + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include "ovcamchip_priv.h" + +/* Registers */ +#define REG_GAIN 0x00 /* gain [5:0] */ +#define REG_BLUE 0x01 /* blue gain */ +#define REG_RED 0x02 /* red gain */ +#define REG_SAT 0x03 /* saturation */ +#define REG_BRT 0x06 /* Y brightness */ +#define REG_SHARP 0x07 /* analog sharpness */ +#define REG_BLUE_BIAS 0x0C /* WB blue ratio [5:0] */ +#define REG_RED_BIAS 0x0D /* WB red ratio [5:0] */ +#define REG_EXP 0x10 /* exposure */ + +/* Default control settings. Values are in terms of V4L2 controls. */ +#define OV7120_DFL_BRIGHT 0x60 +#define OV7620_DFL_BRIGHT 0x60 +#define OV7120_DFL_SAT 0xb0 +#define OV7620_DFL_SAT 0xc0 +#define DFL_AUTO_EXP 1 +#define DFL_AUTO_GAIN 1 +#define OV7120_DFL_GAIN 0x00 +#define OV7620_DFL_GAIN 0x00 +/* NOTE: Since autoexposure is the default, these aren't programmed into the + * OV7x20 chip. They are just here because V4L2 expects a default */ +#define OV7120_DFL_EXP 0x7f +#define OV7620_DFL_EXP 0x7f + +/* Window parameters */ +#define HWSBASE 0x2F /* From 7620.SET (spec is wrong) */ +#define HWEBASE 0x2F +#define VWSBASE 0x05 +#define VWEBASE 0x05 + +struct ov7x20 { + int auto_brt; + int auto_exp; + int auto_gain; + int backlight; + int bandfilt; + int mirror; +}; + +/* Contrast look-up table */ +static unsigned char ctab[] = { + 0x01, 0x05, 0x09, 0x11, 0x15, 0x35, 0x37, 0x57, + 0x5b, 0xa5, 0xa7, 0xc7, 0xc9, 0xcf, 0xef, 0xff +}; + +/* Settings for (Black & White) OV7120 camera chip */ +static struct ovcamchip_regvals regvals_init_7120[] = { + { 0x12, 0x80 }, /* reset */ + { 0x13, 0x00 }, /* Autoadjust off */ + { 0x12, 0x20 }, /* Disable AWB */ + { 0x13, DFL_AUTO_GAIN?0x01:0x00 }, /* Autoadjust on (if desired) */ + { 0x00, OV7120_DFL_GAIN }, + { 0x01, 0x80 }, + { 0x02, 0x80 }, + { 0x03, OV7120_DFL_SAT }, + { 0x06, OV7120_DFL_BRIGHT }, + { 0x07, 0x00 }, + { 0x0c, 0x20 }, + { 0x0d, 0x20 }, + { 0x11, 0x01 }, + { 0x14, 0x84 }, + { 0x15, 0x01 }, + { 0x16, 0x03 }, + { 0x17, 0x2f }, + { 0x18, 0xcf }, + { 0x19, 0x06 }, + { 0x1a, 0xf5 }, + { 0x1b, 0x00 }, + { 0x20, 0x08 }, + { 0x21, 0x80 }, + { 0x22, 0x80 }, + { 0x23, 0x00 }, + { 0x26, 0xa0 }, + { 0x27, 0xfa }, + { 0x28, 0x20 }, /* DON'T set bit 6. It is for the OV7620 only */ + { 0x29, DFL_AUTO_EXP?0x00:0x80 }, + { 0x2a, 0x10 }, + { 0x2b, 0x00 }, + { 0x2c, 0x88 }, + { 0x2d, 0x95 }, + { 0x2e, 0x80 }, + { 0x2f, 0x44 }, + { 0x60, 0x20 }, + { 0x61, 0x02 }, + { 0x62, 0x5f }, + { 0x63, 0xd5 }, + { 0x64, 0x57 }, + { 0x65, 0x83 }, /* OV says "don't change this value" */ + { 0x66, 0x55 }, + { 0x67, 0x92 }, + { 0x68, 0xcf }, + { 0x69, 0x76 }, + { 0x6a, 0x22 }, + { 0x6b, 0xe2 }, + { 0x6c, 0x40 }, + { 0x6d, 0x48 }, + { 0x6e, 0x80 }, + { 0x6f, 0x0d }, + { 0x70, 0x89 }, + { 0x71, 0x00 }, + { 0x72, 0x14 }, + { 0x73, 0x54 }, + { 0x74, 0xa0 }, + { 0x75, 0x8e }, + { 0x76, 0x00 }, + { 0x77, 0xff }, + { 0x78, 0x80 }, + { 0x79, 0x80 }, + { 0x7a, 0x80 }, + { 0x7b, 0xe6 }, + { 0x7c, 0x00 }, + { 0x24, 0x3a }, + { 0x25, 0x60 }, + { 0xff, 0xff }, /* END MARKER */ +}; + +/* Settings for (color) OV7620 camera chip */ +static struct ovcamchip_regvals regvals_init_7620[] = { + { 0x12, 0x80 }, /* reset */ + { 0x00, OV7620_DFL_GAIN }, + { 0x01, 0x80 }, + { 0x02, 0x80 }, + { 0x03, OV7620_DFL_SAT }, + { 0x06, OV7620_DFL_BRIGHT }, + { 0x07, 0x00 }, + { 0x0c, 0x24 }, + { 0x0c, 0x24 }, + { 0x0d, 0x24 }, + { 0x11, 0x01 }, + { 0x12, 0x24 }, + { 0x13, DFL_AUTO_GAIN?0x01:0x00 }, + { 0x14, 0x84 }, + { 0x15, 0x01 }, + { 0x16, 0x03 }, + { 0x17, 0x2f }, + { 0x18, 0xcf }, + { 0x19, 0x06 }, + { 0x1a, 0xf5 }, + { 0x1b, 0x00 }, + { 0x20, 0x18 }, + { 0x21, 0x80 }, + { 0x22, 0x80 }, + { 0x23, 0x00 }, + { 0x26, 0xa2 }, + { 0x27, 0xea }, + { 0x28, 0x20 }, + { 0x29, DFL_AUTO_EXP?0x00:0x80 }, + { 0x2a, 0x10 }, + { 0x2b, 0x00 }, + { 0x2c, 0x88 }, + { 0x2d, 0x91 }, + { 0x2e, 0x80 }, + { 0x2f, 0x44 }, + { 0x60, 0x27 }, + { 0x61, 0x02 }, + { 0x62, 0x5f }, + { 0x63, 0xd5 }, + { 0x64, 0x57 }, + { 0x65, 0x83 }, + { 0x66, 0x55 }, + { 0x67, 0x92 }, + { 0x68, 0xcf }, + { 0x69, 0x76 }, + { 0x6a, 0x22 }, + { 0x6b, 0x00 }, + { 0x6c, 0x02 }, + { 0x6d, 0x44 }, + { 0x6e, 0x80 }, + { 0x6f, 0x1d }, + { 0x70, 0x8b }, + { 0x71, 0x00 }, + { 0x72, 0x14 }, + { 0x73, 0x54 }, + { 0x74, 0x00 }, + { 0x75, 0x8e }, + { 0x76, 0x00 }, + { 0x77, 0xff }, + { 0x78, 0x80 }, + { 0x79, 0x80 }, + { 0x7a, 0x80 }, + { 0x7b, 0xe2 }, + { 0x7c, 0x00 }, + { 0xff, 0xff }, /* END MARKER */ +}; + +/* Returns index into the specified look-up table, with 'n' elements, for which + * the value is greater than or equal to "val". If a match isn't found, (n-1) + * is returned. The entries in the table must be in ascending order. */ +static inline int ov7x20_lut_find(unsigned char lut[], int n, unsigned char val) +{ + int i = 0; + + while (lut[i] < val && i < n) + i++; + + return i; +} + +/* This initializes the OV7x20 camera chip and relevant variables. */ +static int ov7x20_init(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x20 *s; + int rc; + + DDEBUG(4, &c->dev, "entered"); + + if (ov->mono) + rc = ov_write_regvals(c, regvals_init_7120); + else + rc = ov_write_regvals(c, regvals_init_7620); + + if (rc < 0) + return rc; + + ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL); + if (!s) + return -ENOMEM; + memset(s, 0, sizeof *s); + + s->auto_brt = 1; + s->auto_exp = DFL_AUTO_EXP; + s->auto_gain = DFL_AUTO_GAIN; + + return 0; +} + +static int ov7x20_free(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + kfree(ov->spriv); + return 0; +} + +static int ov7x20_set_v4l1_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x20 *s = ov->spriv; + int rc; + int v = ctl->value; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + { + /* Use Y gamma control instead. Bit 0 enables it. */ + rc = ov_write(c, 0x64, ctab[v >> 12]); + break; + } + case OVCAMCHIP_CID_BRIGHT: + /* 7620 doesn't like manual changes when in auto mode */ + if (!s->auto_brt) + rc = ov_write(c, REG_BRT, v >> 8); + else + rc = 0; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_write(c, REG_SAT, v >> 8); + break; + case OVCAMCHIP_CID_EXP: + if (!s->auto_exp) + rc = ov_write(c, REG_EXP, v); + else + rc = -EBUSY; + break; + case OVCAMCHIP_CID_FREQ: + { + int sixty = (v == 60); + + rc = ov_write_mask(c, 0x2a, sixty?0x00:0x80, 0x80); + if (rc < 0) + goto out; + + rc = ov_write(c, 0x2b, sixty?0x00:0xac); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x76, 0x01, 0x01); + break; + } + case OVCAMCHIP_CID_BANDFILT: + rc = ov_write_mask(c, 0x2d, v?0x04:0x00, 0x04); + s->bandfilt = v; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + rc = ov_write_mask(c, 0x2d, v?0x10:0x00, 0x10); + s->auto_brt = v; + break; + case OVCAMCHIP_CID_AUTOEXP: + rc = ov_write_mask(c, 0x13, v?0x01:0x00, 0x01); + s->auto_exp = v; + break; + case OVCAMCHIP_CID_BACKLIGHT: + { + rc = ov_write_mask(c, 0x68, v?0xe0:0xc0, 0xe0); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x29, v?0x08:0x00, 0x08); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x28, v?0x02:0x00, 0x02); + s->backlight = v; + break; + } + case OVCAMCHIP_CID_MIRROR: + rc = ov_write_mask(c, 0x12, v?0x40:0x00, 0x40); + s->mirror = v; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + +out: + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, v, rc); + return rc; +} + +static int ov7x20_get_v4l1_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x20 *s = ov->spriv; + int rc = 0; + unsigned char val = 0; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_read(c, 0x64, &val); + ctl->value = ov7x20_lut_find(ctab, 16, val) << 12; + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_read(c, REG_BRT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_read(c, REG_SAT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_EXP: + rc = ov_read(c, REG_EXP, &val); + ctl->value = val; + break; + case OVCAMCHIP_CID_BANDFILT: + ctl->value = s->bandfilt; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + ctl->value = s->auto_brt; + break; + case OVCAMCHIP_CID_AUTOEXP: + ctl->value = s->auto_exp; + break; + case OVCAMCHIP_CID_BACKLIGHT: + ctl->value = s->backlight; + break; + case OVCAMCHIP_CID_MIRROR: + ctl->value = s->mirror; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, ctl->value, rc); + return rc; +} + +static int ov7x20_mode_init(struct i2c_client *c, struct ovcamchip_window *win) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + int qvga = win->quarter; + + /******** QVGA-specific regs ********/ + ov_write_mask(c, 0x14, qvga?0x20:0x00, 0x20); + ov_write_mask(c, 0x28, qvga?0x00:0x20, 0x20); + ov_write(c, 0x24, qvga?0x20:0x3a); + ov_write(c, 0x25, qvga?0x30:0x60); + ov_write_mask(c, 0x2d, qvga?0x40:0x00, 0x40); + if (!ov->mono) + ov_write_mask(c, 0x67, qvga?0xf0:0x90, 0xf0); + ov_write_mask(c, 0x74, qvga?0x20:0x00, 0x20); + + /******** Clock programming ********/ + + ov_write(c, 0x11, win->clockdiv); + + return 0; +} + +static int ov7x20_set_window(struct i2c_client *c, struct ovcamchip_window *win) +{ + int ret, hwscale, vwscale; + + ret = ov7x20_mode_init(c, win); + if (ret < 0) + return ret; + + if (win->quarter) { + hwscale = 1; + vwscale = 0; + } else { + hwscale = 2; + vwscale = 1; + } + + ov_write(c, 0x17, HWSBASE + (win->x >> hwscale)); + ov_write(c, 0x18, HWEBASE + ((win->x + win->width) >> hwscale)); + ov_write(c, 0x19, VWSBASE + (win->y >> vwscale)); + ov_write(c, 0x1a, VWEBASE + ((win->y + win->height) >> vwscale)); + + return 0; +} + +static int ov7x20_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + switch (cmd) { + case OVCAMCHIP_CMD_S_CTRL: + return ov7x20_set_v4l1_control(c, arg); + case OVCAMCHIP_CMD_G_CTRL: + return ov7x20_get_v4l1_control(c, arg); + case OVCAMCHIP_CMD_S_MODE: + return ov7x20_set_window(c, arg); + default: + DDEBUG(2, &c->dev, "command not supported: %d", cmd); + return -ENOIOCTLCMD; + } +} + +struct ovcamchip_ops ov7x20_ops = { + .init = ov7x20_init, + .free = ov7x20_free, + .command = ov7x20_command, +}; diff --git a/drivers/media/video/ovcamchip/ovcamchip_core.c b/drivers/media/video/ovcamchip/ovcamchip_core.c new file mode 100644 index 000000000..d88956a26 --- /dev/null +++ b/drivers/media/video/ovcamchip/ovcamchip_core.c @@ -0,0 +1,446 @@ +/* Shared Code for OmniVision Camera Chip Drivers + * + * Copyright (c) 2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include +#include +#include +#include "ovcamchip_priv.h" + +#define DRIVER_VERSION "v2.27 for Linux 2.6" +#define DRIVER_AUTHOR "Mark McClelland " +#define DRIVER_DESC "OV camera chip I2C driver" + +#define PINFO(fmt, args...) printk(KERN_INFO "ovcamchip: " fmt "\n" , ## args); +#define PERROR(fmt, args...) printk(KERN_ERR "ovcamchip: " fmt "\n" , ## args); + +#ifdef DEBUG +int ovcamchip_debug = 0; +static int debug; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, + "Debug level: 0=none, 1=inits, 2=warning, 3=config, 4=functions, 5=all"); +#endif + +/* By default, let bridge driver tell us if chip is monochrome. mono=0 + * will ignore that and always treat chips as color. mono=1 will force + * monochrome mode for all chips. */ +static int mono = -1; +module_param(mono, int, 0); +MODULE_PARM_DESC(mono, + "1=chips are monochrome (OVx1xx), 0=force color, -1=autodetect (default)"); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); + +/* Registers common to all chips, that are needed for detection */ +#define GENERIC_REG_ID_HIGH 0x1C /* manufacturer ID MSB */ +#define GENERIC_REG_ID_LOW 0x1D /* manufacturer ID LSB */ +#define GENERIC_REG_COM_I 0x29 /* misc ID bits */ + +extern struct ovcamchip_ops ov6x20_ops; +extern struct ovcamchip_ops ov6x30_ops; +extern struct ovcamchip_ops ov7x10_ops; +extern struct ovcamchip_ops ov7x20_ops; +extern struct ovcamchip_ops ov76be_ops; + +static char *chip_names[NUM_CC_TYPES] = { + [CC_UNKNOWN] = "Unknown chip", + [CC_OV76BE] = "OV76BE", + [CC_OV7610] = "OV7610", + [CC_OV7620] = "OV7620", + [CC_OV7620AE] = "OV7620AE", + [CC_OV6620] = "OV6620", + [CC_OV6630] = "OV6630", + [CC_OV6630AE] = "OV6630AE", + [CC_OV6630AF] = "OV6630AF", +}; + +/* Forward declarations */ +static struct i2c_driver driver; +static struct i2c_client client_template; + +/* ----------------------------------------------------------------------- */ + +int ov_write_regvals(struct i2c_client *c, struct ovcamchip_regvals *rvals) +{ + int rc; + + while (rvals->reg != 0xff) { + rc = ov_write(c, rvals->reg, rvals->val); + if (rc < 0) + return rc; + rvals++; + } + + return 0; +} + +/* Writes bits at positions specified by mask to an I2C reg. Bits that are in + * the same position as 1's in "mask" are cleared and set to "value". Bits + * that are in the same position as 0's in "mask" are preserved, regardless + * of their respective state in "value". + */ +int ov_write_mask(struct i2c_client *c, + unsigned char reg, + unsigned char value, + unsigned char mask) +{ + int rc; + unsigned char oldval, newval; + + if (mask == 0xff) { + newval = value; + } else { + rc = ov_read(c, reg, &oldval); + if (rc < 0) + return rc; + + oldval &= (~mask); /* Clear the masked bits */ + value &= mask; /* Enforce mask on value */ + newval = oldval | value; /* Set the desired bits */ + } + + return ov_write(c, reg, newval); +} + +/* ----------------------------------------------------------------------- */ + +/* Reset the chip and ensure that I2C is synchronized. Returns <0 if failure. + */ +static int init_camchip(struct i2c_client *c) +{ + int i, success; + unsigned char high, low; + + /* Reset the chip */ + ov_write(c, 0x12, 0x80); + + /* Wait for it to initialize */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1 + 150 * HZ / 1000); + + for (i = 0, success = 0; i < I2C_DETECT_RETRIES && !success; i++) { + if (ov_read(c, GENERIC_REG_ID_HIGH, &high) >= 0) { + if (ov_read(c, GENERIC_REG_ID_LOW, &low) >= 0) { + if (high == 0x7F && low == 0xA2) { + success = 1; + continue; + } + } + } + + /* Reset the chip */ + ov_write(c, 0x12, 0x80); + + /* Wait for it to initialize */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1 + 150 * HZ / 1000); + + /* Dummy read to sync I2C */ + ov_read(c, 0x00, &low); + } + + if (!success) + return -EIO; + + PDEBUG(1, "I2C synced in %d attempt(s)", i); + + return 0; +} + +/* This detects the OV7610, OV7620, or OV76BE chip. */ +static int ov7xx0_detect(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + int rc; + unsigned char val; + + PDEBUG(4, ""); + + /* Detect chip (sub)type */ + rc = ov_read(c, GENERIC_REG_COM_I, &val); + if (rc < 0) { + PERROR("Error detecting ov7xx0 type"); + return rc; + } + + if ((val & 3) == 3) { + PINFO("Camera chip is an OV7610"); + ov->subtype = CC_OV7610; + } else if ((val & 3) == 1) { + rc = ov_read(c, 0x15, &val); + if (rc < 0) { + PERROR("Error detecting ov7xx0 type"); + return rc; + } + + if (val & 1) { + PINFO("Camera chip is an OV7620AE"); + /* OV7620 is a close enough match for now. There are + * some definite differences though, so this should be + * fixed */ + ov->subtype = CC_OV7620; + } else { + PINFO("Camera chip is an OV76BE"); + ov->subtype = CC_OV76BE; + } + } else if ((val & 3) == 0) { + PINFO("Camera chip is an OV7620"); + ov->subtype = CC_OV7620; + } else { + PERROR("Unknown camera chip version: %d", val & 3); + return -ENOSYS; + } + + if (ov->subtype == CC_OV76BE) + ov->sops = &ov76be_ops; + else if (ov->subtype == CC_OV7620) + ov->sops = &ov7x20_ops; + else + ov->sops = &ov7x10_ops; + + return 0; +} + +/* This detects the OV6620, OV6630, OV6630AE, or OV6630AF chip. */ +static int ov6xx0_detect(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + int rc; + unsigned char val; + + PDEBUG(4, ""); + + /* Detect chip (sub)type */ + rc = ov_read(c, GENERIC_REG_COM_I, &val); + if (rc < 0) { + PERROR("Error detecting ov6xx0 type"); + return -1; + } + + if ((val & 3) == 0) { + ov->subtype = CC_OV6630; + PINFO("Camera chip is an OV6630"); + } else if ((val & 3) == 1) { + ov->subtype = CC_OV6620; + PINFO("Camera chip is an OV6620"); + } else if ((val & 3) == 2) { + ov->subtype = CC_OV6630; + PINFO("Camera chip is an OV6630AE"); + } else if ((val & 3) == 3) { + ov->subtype = CC_OV6630; + PINFO("Camera chip is an OV6630AF"); + } + + if (ov->subtype == CC_OV6620) + ov->sops = &ov6x20_ops; + else + ov->sops = &ov6x30_ops; + + return 0; +} + +static int ovcamchip_detect(struct i2c_client *c) +{ + /* Ideally we would just try a single register write and see if it NAKs. + * That isn't possible since the OV518 can't report I2C transaction + * failures. So, we have to try to initialize the chip (i.e. reset it + * and check the ID registers) to detect its presence. */ + + /* Test for 7xx0 */ + PDEBUG(3, "Testing for 0V7xx0"); + c->addr = OV7xx0_SID; + if (init_camchip(c) < 0) { + /* Test for 6xx0 */ + PDEBUG(3, "Testing for 0V6xx0"); + c->addr = OV6xx0_SID; + if (init_camchip(c) < 0) { + return -ENODEV; + } else { + if (ov6xx0_detect(c) < 0) { + PERROR("Failed to init OV6xx0"); + return -EIO; + } + } + } else { + if (ov7xx0_detect(c) < 0) { + PERROR("Failed to init OV7xx0"); + return -EIO; + } + } + + return 0; +} + +/* ----------------------------------------------------------------------- */ + +static int ovcamchip_attach(struct i2c_adapter *adap) +{ + int rc = 0; + struct ovcamchip *ov; + struct i2c_client *c; + + /* I2C is not a PnP bus, so we can never be certain that we're talking + * to the right chip. To prevent damage to EEPROMS and such, only + * attach to adapters that are known to contain OV camera chips. */ + + switch (adap->id) { + case (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV511): + case (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518): + case (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OVFX2): + case (I2C_ALGO_SMBUS | I2C_HW_SMBUS_W9968CF): + PDEBUG(1, "Adapter ID 0x%06x accepted", adap->id); + break; + default: + PDEBUG(1, "Adapter ID 0x%06x rejected", adap->id); + return -ENODEV; + } + + c = kmalloc(sizeof *c, GFP_KERNEL); + if (!c) { + rc = -ENOMEM; + goto no_client; + } + memcpy(c, &client_template, sizeof *c); + c->adapter = adap; + strcpy(i2c_clientname(c), "OV????"); + + ov = kmalloc(sizeof *ov, GFP_KERNEL); + if (!ov) { + rc = -ENOMEM; + goto no_ov; + } + memset(ov, 0, sizeof *ov); + i2c_set_clientdata(c, ov); + + rc = ovcamchip_detect(c); + if (rc < 0) + goto error; + + strcpy(i2c_clientname(c), chip_names[ov->subtype]); + + PDEBUG(1, "Camera chip detection complete"); + + i2c_attach_client(c); + + return rc; +error: + kfree(ov); +no_ov: + kfree(c); +no_client: + PDEBUG(1, "returning %d", rc); + return rc; +} + +static int ovcamchip_detach(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + int rc; + + rc = ov->sops->free(c); + if (rc < 0) + return rc; + + i2c_detach_client(c); + + kfree(ov); + kfree(c); + return 0; +} + +static int ovcamchip_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + if (!ov->initialized && + cmd != OVCAMCHIP_CMD_Q_SUBTYPE && + cmd != OVCAMCHIP_CMD_INITIALIZE) { + dev_err(&c->dev, "ERROR: Camera chip not initialized yet!\n"); + return -EPERM; + } + + switch (cmd) { + case OVCAMCHIP_CMD_Q_SUBTYPE: + { + *(int *)arg = ov->subtype; + return 0; + } + case OVCAMCHIP_CMD_INITIALIZE: + { + int rc; + + if (mono == -1) + ov->mono = *(int *)arg; + else + ov->mono = mono; + + if (ov->mono) { + if (ov->subtype != CC_OV7620) + dev_warn(&c->dev, "Warning: Monochrome not " + "implemented for this chip\n"); + else + dev_info(&c->dev, "Initializing chip as " + "monochrome\n"); + } + + rc = ov->sops->init(c); + if (rc < 0) + return rc; + + ov->initialized = 1; + return 0; + } + default: + return ov->sops->command(c, cmd, arg); + } +} + +/* ----------------------------------------------------------------------- */ + +static struct i2c_driver driver = { + .owner = THIS_MODULE, + .name = "ovcamchip", + .id = I2C_DRIVERID_OVCAMCHIP, + .class = I2C_CLASS_CAM_DIGITAL, + .flags = I2C_DF_NOTIFY, + .attach_adapter = ovcamchip_attach, + .detach_client = ovcamchip_detach, + .command = ovcamchip_command, +}; + +static struct i2c_client client_template = { + I2C_DEVNAME("(unset)"), + .id = -1, + .driver = &driver, +}; + +static int __init ovcamchip_init(void) +{ +#ifdef DEBUG + ovcamchip_debug = debug; +#endif + + PINFO(DRIVER_VERSION " : " DRIVER_DESC); + return i2c_add_driver(&driver); +} + +static void __exit ovcamchip_exit(void) +{ + i2c_del_driver(&driver); +} + +module_init(ovcamchip_init); +module_exit(ovcamchip_exit); diff --git a/drivers/media/video/ovcamchip/ovcamchip_priv.h b/drivers/media/video/ovcamchip/ovcamchip_priv.h new file mode 100644 index 000000000..575e612a5 --- /dev/null +++ b/drivers/media/video/ovcamchip/ovcamchip_priv.h @@ -0,0 +1,87 @@ +/* OmniVision* camera chip driver private definitions for core code and + * chip-specific code + * + * Copyright (c) 1999-2004 Mark McClelland + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + * + * * OmniVision is a trademark of OmniVision Technologies, Inc. This driver + * is not sponsored or developed by them. + */ + +#ifndef __LINUX_OVCAMCHIP_PRIV_H +#define __LINUX_OVCAMCHIP_PRIV_H + +#include + +#ifdef DEBUG +extern int ovcamchip_debug; +#endif + +#define PDEBUG(level, fmt, args...) \ + if (ovcamchip_debug >= (level)) pr_debug("[%s:%d] " fmt "\n", \ + __FUNCTION__, __LINE__ , ## args) + +#define DDEBUG(level, dev, fmt, args...) \ + if (ovcamchip_debug >= (level)) dev_dbg(dev, "[%s:%d] " fmt "\n", \ + __FUNCTION__, __LINE__ , ## args) + +/* Number of times to retry chip detection. Increase this if you are getting + * "Failed to init camera chip" */ +#define I2C_DETECT_RETRIES 10 + +struct ovcamchip_regvals { + unsigned char reg; + unsigned char val; +}; + +struct ovcamchip_ops { + int (*init)(struct i2c_client *); + int (*free)(struct i2c_client *); + int (*command)(struct i2c_client *, unsigned int, void *); +}; + +struct ovcamchip { + struct ovcamchip_ops *sops; + void *spriv; /* Private data for OV7x10.c etc... */ + int subtype; /* = SEN_OV7610 etc... */ + int mono; /* Monochrome chip? (invalid until init) */ + int initialized; /* OVCAMCHIP_CMD_INITIALIZE was successful */ +}; + +/* --------------------------------- */ +/* I2C I/O */ +/* --------------------------------- */ + +static inline int ov_read(struct i2c_client *c, unsigned char reg, + unsigned char *value) +{ + int rc; + + rc = i2c_smbus_read_byte_data(c, reg); + *value = (unsigned char) rc; + return rc; +} + +static inline int ov_write(struct i2c_client *c, unsigned char reg, + unsigned char value ) +{ + return i2c_smbus_write_byte_data(c, reg, value); +} + +/* --------------------------------- */ +/* FUNCTION PROTOTYPES */ +/* --------------------------------- */ + +/* Functions in ovcamchip_core.c */ + +extern int ov_write_regvals(struct i2c_client *c, + struct ovcamchip_regvals *rvals); + +extern int ov_write_mask(struct i2c_client *c, unsigned char reg, + unsigned char value, unsigned char mask); + +#endif diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c new file mode 100644 index 000000000..d1a785628 --- /dev/null +++ b/drivers/mtd/chips/cfi_util.c @@ -0,0 +1,92 @@ +/* + * Common Flash Interface support: + * Generic utility functions not dependant on command set + * + * Copyright (C) 2002 Red Hat + * Copyright (C) 2003 STMicroelectronics Limited + * + * This code is covered by the GPL. + * + * $Id: cfi_util.c,v 1.4 2004/07/14 08:38:44 dwmw2 Exp $ + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +struct cfi_extquery * +cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name) +{ + struct cfi_private *cfi = map->fldrv_priv; + __u32 base = 0; // cfi->chips[0].start; + int ofs_factor = cfi->interleave * cfi->device_type; + int i; + struct cfi_extquery *extp = NULL; + + printk(" %s Extended Query Table at 0x%4.4X\n", name, adr); + if (!adr) + goto out; + + /* Switch it into Query Mode */ + cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); + + extp = kmalloc(size, GFP_KERNEL); + if (!extp) { + printk(KERN_ERR "Failed to allocate memory\n"); + goto out; + } + + /* Read in the Extended Query Table */ + for (i=0; iMajorVersion != '1' || + (extp->MinorVersion < '0' || extp->MinorVersion > '3')) { + printk(KERN_WARNING " Unknown %s Extended Query " + "version %c.%c.\n", name, extp->MajorVersion, + extp->MinorVersion); + kfree(extp); + extp = NULL; + goto out; + } + +out: + /* Make sure it's in read mode */ + cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xff, 0, base, map, cfi, cfi->device_type, NULL); + + return extp; +} + +EXPORT_SYMBOL(cfi_read_pri); + +void cfi_fixup(struct map_info *map, struct cfi_fixup* fixups) +{ + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_fixup *f; + + for (f=fixups; f->fixup; f++) { + if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) && + ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) { + f->fixup(map, f->param); + } + } +} + +EXPORT_SYMBOL(cfi_fixup); + +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c new file mode 100644 index 000000000..5f66e9bfb --- /dev/null +++ b/drivers/mtd/devices/phram.c @@ -0,0 +1,362 @@ +/** + * + * $Id: phram.c,v 1.1 2003/08/21 17:52:30 joern Exp $ + * + * Copyright (c) Jochen Schaeuble + * 07/2003 rewritten by Joern Engel + * + * DISCLAIMER: This driver makes use of Rusty's excellent module code, + * so it will not work for 2.4 without changes and it wont work for 2.4 + * as a module without major changes. Oh well! + * + * Usage: + * + * one commend line parameter per device, each in the form: + * phram=,, + * may be up to 63 characters. + * and can be octal, decimal or hexadecimal. If followed + * by "k", "M" or "G", the numbers will be interpreted as kilo, mega or + * gigabytes. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define ERROR(fmt, args...) printk(KERN_ERR "phram: " fmt , ## args) + +struct phram_mtd_list { + struct list_head list; + struct mtd_info *mtdinfo; +}; + +static LIST_HEAD(phram_list); + + + +int phram_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + u_char *start = (u_char *)mtd->priv; + + if (instr->addr + instr->len > mtd->size) + return -EINVAL; + + memset(start + instr->addr, 0xff, instr->len); + + /* This'll catch a few races. Free the thing before returning :) + * I don't feel at all ashamed. This kind of thing is possible anyway + * with flash, but unlikely. + */ + + instr->state = MTD_ERASE_DONE; + + if (instr->callback) + (*(instr->callback))(instr); + else + kfree(instr); + + return 0; +} + +int phram_point(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char **mtdbuf) +{ + u_char *start = (u_char *)mtd->priv; + + if (from + len > mtd->size) + return -EINVAL; + + *mtdbuf = start + from; + *retlen = len; + return 0; +} + +void phram_unpoint(struct mtd_info *mtd, u_char *addr, loff_t from, size_t len) +{ +} + +int phram_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + u_char *start = (u_char *)mtd->priv; + + if (from + len > mtd->size) + return -EINVAL; + + memcpy(buf, start + from, len); + + *retlen = len; + return 0; +} + +int phram_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + u_char *start = (u_char *)mtd->priv; + + if (to + len > mtd->size) + return -EINVAL; + + memcpy(start + to, buf, len); + + *retlen = len; + return 0; +} + + + +static void unregister_devices(void) +{ + struct phram_mtd_list *this; + + list_for_each_entry(this, &phram_list, list) { + del_mtd_device(this->mtdinfo); + iounmap(this->mtdinfo->priv); + kfree(this->mtdinfo); + kfree(this); + } +} + +static int register_device(char *name, unsigned long start, unsigned long len) +{ + struct phram_mtd_list *new; + int ret = -ENOMEM; + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + goto out0; + + new->mtdinfo = kmalloc(sizeof(struct mtd_info), GFP_KERNEL); + if (!new->mtdinfo) + goto out1; + + memset(new->mtdinfo, 0, sizeof(struct mtd_info)); + + ret = -EIO; + new->mtdinfo->priv = ioremap(start, len); + if (!new->mtdinfo->priv) { + ERROR("ioremap failed\n"); + goto out2; + } + + + new->mtdinfo->name = name; + new->mtdinfo->size = len; + new->mtdinfo->flags = MTD_CAP_RAM | MTD_ERASEABLE | MTD_VOLATILE; + new->mtdinfo->erase = phram_erase; + new->mtdinfo->point = phram_point; + new->mtdinfo->unpoint = phram_unpoint; + new->mtdinfo->read = phram_read; + new->mtdinfo->write = phram_write; + new->mtdinfo->owner = THIS_MODULE; + new->mtdinfo->type = MTD_RAM; + new->mtdinfo->erasesize = 0x0; + + ret = -EAGAIN; + if (add_mtd_device(new->mtdinfo)) { + ERROR("Failed to register new device\n"); + goto out3; + } + + list_add_tail(&new->list, &phram_list); + return 0; + +out3: + iounmap(new->mtdinfo->priv); +out2: + kfree(new->mtdinfo); +out1: + kfree(new); +out0: + return ret; +} + +static int ustrtoul(const char *cp, char **endp, unsigned int base) +{ + unsigned long result = simple_strtoul(cp, endp, base); + + switch (**endp) { + case 'G': + result *= 1024; + case 'M': + result *= 1024; + case 'k': + result *= 1024; + endp++; + } + return result; +} + +static int parse_num32(uint32_t *num32, const char *token) +{ + char *endp; + unsigned long n; + + n = ustrtoul(token, &endp, 0); + if (*endp) + return -EINVAL; + + *num32 = n; + return 0; +} + +static int parse_name(char **pname, const char *token) +{ + size_t len; + char *name; + + len = strlen(token) + 1; + if (len > 64) + return -ENOSPC; + + name = kmalloc(len, GFP_KERNEL); + if (!name) + return -ENOMEM; + + strcpy(name, token); + + *pname = name; + return 0; +} + +#define parse_err(fmt, args...) do { \ + ERROR(fmt , ## args); \ + return 0; \ +} while (0) + +static int phram_setup(const char *val, struct kernel_param *kp) +{ + char buf[64+12+12], *str = buf; + char *token[3]; + char *name; + uint32_t start; + uint32_t len; + int i, ret; + + if (strnlen(val, sizeof(str)) >= sizeof(str)) + parse_err("parameter too long\n"); + + strcpy(str, val); + + for (i=0; i<3; i++) + token[i] = strsep(&str, ","); + + if (str) + parse_err("too many arguments\n"); + + if (!token[2]) + parse_err("not enough arguments\n"); + + ret = parse_name(&name, token[0]); + if (ret == -ENOMEM) + parse_err("out of memory\n"); + if (ret == -ENOSPC) + parse_err("name too long\n"); + if (ret) + return 0; + + ret = parse_num32(&start, token[1]); + if (ret) + parse_err("illegal start address\n"); + + ret = parse_num32(&len, token[2]); + if (ret) + parse_err("illegal device length\n"); + + register_device(name, start, len); + + return 0; +} + +module_param_call(phram, phram_setup, NULL, NULL, 000); +MODULE_PARM_DESC(phram, "Memory region to map. \"map=,\""); + +/* + * Just for compatibility with slram, this is horrible and should go someday. + */ +static int __init slram_setup(const char *val, struct kernel_param *kp) +{ + char buf[256], *str = buf; + + if (!val || !val[0]) + parse_err("no arguments to \"slram=\"\n"); + + if (strnlen(val, sizeof(str)) >= sizeof(str)) + parse_err("parameter too long\n"); + + strcpy(str, val); + + while (str) { + char *token[3]; + char *name; + uint32_t start; + uint32_t len; + int i, ret; + + for (i=0; i<3; i++) { + token[i] = strsep(&str, ","); + if (token[i]) + continue; + parse_err("wrong number of arguments to \"slram=\"\n"); + } + + /* name */ + ret = parse_name(&name, token[0]); + if (ret == -ENOMEM) + parse_err("of memory\n"); + if (ret == -ENOSPC) + parse_err("too long\n"); + if (ret) + return 1; + + /* start */ + ret = parse_num32(&start, token[1]); + if (ret) + parse_err("illegal start address\n"); + + /* len */ + if (token[2][0] == '+') + ret = parse_num32(&len, token[2] + 1); + else + ret = parse_num32(&len, token[2]); + + if (ret) + parse_err("illegal device length\n"); + + if (token[2][0] != '+') { + if (len < start) + parse_err("end < start\n"); + len -= start; + } + + register_device(name, start, len); + } + return 1; +} + +module_param_call(slram, slram_setup, NULL, NULL, 000); +MODULE_PARM_DESC(slram, "List of memory regions to map. \"map=,\""); + + +int __init init_phram(void) +{ + printk(KERN_ERR "phram loaded\n"); + return 0; +} + +static void __exit cleanup_phram(void) +{ + unregister_devices(); +} + +module_init(init_phram); +module_exit(cleanup_phram); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jörn Engel "); +MODULE_DESCRIPTION("MTD driver for physical RAM"); diff --git a/drivers/mtd/maps/db1550-flash.c b/drivers/mtd/maps/db1550-flash.c new file mode 100644 index 000000000..b2504047c --- /dev/null +++ b/drivers/mtd/maps/db1550-flash.c @@ -0,0 +1,188 @@ +/* + * Flash memory access on Alchemy Db1550 board + * + * $Id: db1550-flash.c,v 1.3 2004/07/14 17:45:40 dwmw2 Exp $ + * + * (C) 2004 Embedded Edge, LLC, based on db1550-flash.c: + * (C) 2003 Pete Popov + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#ifdef DEBUG_RW +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +static unsigned long window_addr; +static unsigned long window_size; + + +static struct map_info db1550_map = { + .name = "Db1550 flash", +}; + +static unsigned char flash_bankwidth = 4; + +/* + * Support only 64MB NOR Flash parts + */ + +#if defined(CONFIG_MTD_DB1550_BOOT) && defined(CONFIG_MTD_DB1550_USER) +#define DB1550_BOTH_BANKS +#elif defined(CONFIG_MTD_DB1550_BOOT) && !defined(CONFIG_MTD_DB1550_USER) +#define DB1550_BOOT_ONLY +#elif !defined(CONFIG_MTD_DB1550_BOOT) && defined(CONFIG_MTD_DB1550_USER) +#define DB1550_USER_ONLY +#endif + +#ifdef DB1550_BOTH_BANKS +/* both banks will be used. Combine the first bank and the first + * part of the second bank together into a single jffs/jffs2 + * partition. + */ +static struct mtd_partition db1550_partitions[] = { + /* assume boot[2:0]:swap is '0000' or '1000', which translates to: + * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash + * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash + */ + { + .name = "User FS", + .size = (0x1FC00000 - 0x18000000), + .offset = 0x0000000 + },{ + .name = "yamon", + .size = 0x0100000, + .offset = MTDPART_OFS_APPEND, + .mask_flags = MTD_WRITEABLE + },{ + .name = "raw kernel", + .size = (0x300000 - 0x40000), /* last 256KB is yamon env */ + .offset = MTDPART_OFS_APPEND, + } +}; +#elif defined(DB1550_BOOT_ONLY) +static struct mtd_partition db1550_partitions[] = { + /* assume boot[2:0]:swap is '0000' or '1000', which translates to: + * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash + */ + { + .name = "User FS", + .size = 0x03c00000, + .offset = 0x0000000 + },{ + .name = "yamon", + .size = 0x0100000, + .offset = MTDPART_OFS_APPEND, + .mask_flags = MTD_WRITEABLE + },{ + .name = "raw kernel", + .size = (0x300000-0x40000), /* last 256KB is yamon env */ + .offset = MTDPART_OFS_APPEND, + } +}; +#elif defined(DB1550_USER_ONLY) +static struct mtd_partition db1550_partitions[] = { + /* assume boot[2:0]:swap is '0000' or '1000', which translates to: + * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash + */ + { + .name = "User FS", + .size = (0x4000000 - 0x200000), /* reserve 2MB for raw kernel */ + .offset = 0x0000000 + },{ + .name = "raw kernel", + .size = MTDPART_SIZ_FULL, + .offset = MTDPART_OFS_APPEND, + } +}; +#else +#error MTD_DB1550 define combo error /* should never happen */ +#endif + +#define NB_OF(x) (sizeof(x)/sizeof(x[0])) + +static struct mtd_info *mymtd; + +/* + * Probe the flash density and setup window address and size + * based on user CONFIG options. There are times when we don't + * want the MTD driver to be probing the boot or user flash, + * so having the option to enable only one bank is important. + */ +int setup_flash_params(void) +{ +#if defined(DB1550_BOTH_BANKS) + window_addr = 0x18000000; + window_size = 0x8000000; +#elif defined(DB1550_BOOT_ONLY) + window_addr = 0x1C000000; + window_size = 0x4000000; +#else /* USER ONLY */ + window_addr = 0x1E000000; + window_size = 0x4000000; +#endif + return 0; +} + +int __init db1550_mtd_init(void) +{ + struct mtd_partition *parts; + int nb_parts = 0; + + /* Default flash bankwidth */ + db1550_map.bankwidth = flash_bankwidth; + + if (setup_flash_params()) + return -ENXIO; + + /* + * Static partition definition selection + */ + parts = db1550_partitions; + nb_parts = NB_OF(db1550_partitions); + db1550_map.size = window_size; + + /* + * Now let's probe for the actual flash. Do it here since + * specific machine settings might have been set above. + */ + printk(KERN_NOTICE "Pb1550 flash: probing %d-bit flash bus\n", + db1550_map.bankwidth*8); + db1550_map.virt = + (unsigned long)ioremap(window_addr, window_size); + mymtd = do_map_probe("cfi_probe", &db1550_map); + if (!mymtd) return -ENXIO; + mymtd->owner = THIS_MODULE; + + add_mtd_partitions(mymtd, parts, nb_parts); + return 0; +} + +static void __exit db1550_mtd_cleanup(void) +{ + if (mymtd) { + del_mtd_partitions(mymtd); + map_destroy(mymtd); + } +} + +module_init(db1550_mtd_init); +module_exit(db1550_mtd_cleanup); + +MODULE_AUTHOR("Embedded Edge, LLC"); +MODULE_DESCRIPTION("Db1550 mtd map driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/maps/db1x00-flash.c b/drivers/mtd/maps/db1x00-flash.c new file mode 100644 index 000000000..388e14b6b --- /dev/null +++ b/drivers/mtd/maps/db1x00-flash.c @@ -0,0 +1,219 @@ +/* + * Flash memory access on Alchemy Db1xxx boards + * + * $Id: db1x00-flash.c,v 1.3 2004/07/14 17:45:40 dwmw2 Exp $ + * + * (C) 2003 Pete Popov + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#ifdef DEBUG_RW +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +static unsigned long window_addr; +static unsigned long window_size; +static unsigned long flash_size; + +static BCSR * const bcsr = (BCSR *)0xAE000000; +static unsigned char flash_bankwidth = 4; + +/* + * The Db1x boards support different flash densities. We setup + * the mtd_partition structures below for default of 64Mbit + * flash densities, and override the partitions sizes, if + * necessary, after we check the board status register. + */ + +#ifdef DB1X00_BOTH_BANKS +/* both banks will be used. Combine the first bank and the first + * part of the second bank together into a single jffs/jffs2 + * partition. + */ +static struct mtd_partition db1x00_partitions[] = { + { + .name = "User FS", + .size = 0x1c00000, + .offset = 0x0000000 + },{ + .name = "yamon", + .size = 0x0100000, + .offset = MTDPART_OFS_APPEND, + .mask_flags = MTD_WRITEABLE + },{ + .name = "raw kernel", + .size = (0x300000-0x40000), /* last 256KB is env */ + .offset = MTDPART_OFS_APPEND, + } +}; +#elif defined(DB1X00_BOOT_ONLY) +static struct mtd_partition db1x00_partitions[] = { + { + .name = "User FS", + .size = 0x00c00000, + .offset = 0x0000000 + },{ + .name = "yamon", + .size = 0x0100000, + .offset = MTDPART_OFS_APPEND, + .mask_flags = MTD_WRITEABLE + },{ + .name = "raw kernel", + .size = (0x300000-0x40000), /* last 256KB is env */ + .offset = MTDPART_OFS_APPEND, + } +}; +#elif defined(DB1X00_USER_ONLY) +static struct mtd_partition db1x00_partitions[] = { + { + .name = "User FS", + .size = 0x0e00000, + .offset = 0x0000000 + },{ + .name = "raw kernel", + .size = MTDPART_SIZ_FULL, + .offset = MTDPART_OFS_APPEND, + } +}; +#else +#error MTD_DB1X00 define combo error /* should never happen */ +#endif +#define NB_OF(x) (sizeof(x)/sizeof(x[0])) + +#define NAME "Db1x00 Linux Flash" + +static struct map_info db1xxx_mtd_map = { + .name = NAME, +}; + +static struct mtd_partition *parsed_parts; +static struct mtd_info *db1xxx_mtd; + +/* + * Probe the flash density and setup window address and size + * based on user CONFIG options. There are times when we don't + * want the MTD driver to be probing the boot or user flash, + * so having the option to enable only one bank is important. + */ +int setup_flash_params(void) +{ + switch ((bcsr->status >> 14) & 0x3) { + case 0: /* 64Mbit devices */ + flash_size = 0x800000; /* 8MB per part */ +#if defined(DB1X00_BOTH_BANKS) + window_addr = 0x1E000000; + window_size = 0x2000000; +#elif defined(DB1X00_BOOT_ONLY) + window_addr = 0x1F000000; + window_size = 0x1000000; +#else /* USER ONLY */ + window_addr = 0x1E000000; + window_size = 0x1000000; +#endif + break; + case 1: + /* 128 Mbit devices */ + flash_size = 0x1000000; /* 16MB per part */ +#if defined(DB1X00_BOTH_BANKS) + window_addr = 0x1C000000; + window_size = 0x4000000; + /* USERFS from 0x1C00 0000 to 0x1FC0 0000 */ + db1x00_partitions[0].size = 0x3C00000; +#elif defined(DB1X00_BOOT_ONLY) + window_addr = 0x1E000000; + window_size = 0x2000000; + /* USERFS from 0x1E00 0000 to 0x1FC0 0000 */ + db1x00_partitions[0].size = 0x1C00000; +#else /* USER ONLY */ + window_addr = 0x1C000000; + window_size = 0x2000000; + /* USERFS from 0x1C00 0000 to 0x1DE00000 */ + db1x00_partitions[0].size = 0x1DE0000; +#endif + break; + case 2: + /* 256 Mbit devices */ + flash_size = 0x4000000; /* 64MB per part */ +#if defined(DB1X00_BOTH_BANKS) + return 1; +#elif defined(DB1X00_BOOT_ONLY) + /* Boot ROM flash bank only; no user bank */ + window_addr = 0x1C000000; + window_size = 0x4000000; + /* USERFS from 0x1C00 0000 to 0x1FC00000 */ + db1x00_partitions[0].size = 0x3C00000; +#else /* USER ONLY */ + return 1; +#endif + break; + default: + return 1; + } + db1xxx_mtd_map.size = window_size; + db1xxx_mtd_map.bankwidth = flash_bankwidth; + db1xxx_mtd_map.phys = window_addr; + db1xxx_mtd_map.bankwidth = flash_bankwidth; + return 0; +} + +int __init db1x00_mtd_init(void) +{ + struct mtd_partition *parts; + int nb_parts = 0; + + if (setup_flash_params()) + return -ENXIO; + + /* + * Static partition definition selection + */ + parts = db1x00_partitions; + nb_parts = NB_OF(db1x00_partitions); + + /* + * Now let's probe for the actual flash. Do it here since + * specific machine settings might have been set above. + */ + printk(KERN_NOTICE "Db1xxx flash: probing %d-bit flash bus\n", + db1xxx_mtd_map.bankwidth*8); + db1xxx_mtd_map.virt = (unsigned long)ioremap(window_addr, window_size); + db1xxx_mtd = do_map_probe("cfi_probe", &db1xxx_mtd_map); + if (!db1xxx_mtd) return -ENXIO; + db1xxx_mtd->owner = THIS_MODULE; + + add_mtd_partitions(db1xxx_mtd, parts, nb_parts); + return 0; +} + +static void __exit db1x00_mtd_cleanup(void) +{ + if (db1xxx_mtd) { + del_mtd_partitions(db1xxx_mtd); + map_destroy(db1xxx_mtd); + if (parsed_parts) + kfree(parsed_parts); + } +} + +module_init(db1x00_mtd_init); +module_exit(db1x00_mtd_cleanup); + +MODULE_AUTHOR("Pete Popov"); +MODULE_DESCRIPTION("Db1x00 mtd map driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/maps/dmv182.c b/drivers/mtd/maps/dmv182.c new file mode 100644 index 000000000..cdb9c1bc9 --- /dev/null +++ b/drivers/mtd/maps/dmv182.c @@ -0,0 +1,150 @@ + +/* + * drivers/mtd/maps/svme182.c + * + * Flash map driver for the Dy4 SVME182 board + * + * $Id: dmv182.c,v 1.3 2004/07/14 17:45:40 dwmw2 Exp $ + * + * Copyright 2003-2004, TimeSys Corporation + * + * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This driver currently handles only the 16MiB user flash bank 1 on the + * board. It does not provide access to bank 0 (contains the Dy4 FFW), bank 2 + * (VxWorks boot), or the optional 48MiB expansion flash. + * + * scott.wood@timesys.com: On the newer boards with 128MiB flash, it + * now supports the first 96MiB (the boot flash bank containing FFW + * is excluded). The VxWorks loader is in partition 1. + */ + +#define FLASH_BASE_ADDR 0xf0000000 +#define FLASH_BANK_SIZE (128*1024*1024) + +MODULE_AUTHOR("Scott Wood, TimeSys Corporation "); +MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board"); +MODULE_LICENSE("GPL"); + +static struct map_info svme182_map = { + .name = "Dy4 SVME182", + .bankwidth = 32, + .size = 128 * 1024 * 1024 +}; + +#define BOOTIMAGE_PART_SIZE ((6*1024*1024)-RESERVED_PART_SIZE) + +// Allow 6MiB for the kernel +#define NEW_BOOTIMAGE_PART_SIZE (6 * 1024 * 1024) +// Allow 1MiB for the bootloader +#define NEW_BOOTLOADER_PART_SIZE (1024 * 1024) +// Use the remaining 9MiB at the end of flash for the RFS +#define NEW_RFS_PART_SIZE (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \ + NEW_BOOTIMAGE_PART_SIZE) + +static struct mtd_partition svme182_partitions[] = { + // The Lower PABS is only 128KiB, but the partition code doesn't + // like partitions that don't end on the largest erase block + // size of the device, even if all of the erase blocks in the + // partition are small ones. The hardware should prevent + // writes to the actual PABS areas. + { + name: "Lower PABS and CPU 0 bootloader or kernel", + size: 6*1024*1024, + offset: 0, + }, + { + name: "Root Filesystem", + size: 10*1024*1024, + offset: MTDPART_OFS_NXTBLK + }, + { + name: "CPU1 Bootloader", + size: 1024*1024, + offset: MTDPART_OFS_NXTBLK, + }, + { + name: "Extra", + size: 110*1024*1024, + offset: MTDPART_OFS_NXTBLK + }, + { + name: "Foundation Firmware and Upper PABS", + size: 1024*1024, + offset: MTDPART_OFS_NXTBLK, + mask_flags: MTD_WRITEABLE // read-only + } +}; + +static struct mtd_info *this_mtd; + +static int __init init_svme182(void) +{ + struct mtd_partition *partitions; + int num_parts = sizeof(svme182_partitions) / sizeof(struct mtd_partition); + + partitions = svme182_partitions; + + svme182_map.virt = + (unsigned long)ioremap(FLASH_BASE_ADDR, svme182_map.size); + + if (svme182_map.virt == 0) { + printk("Failed to ioremap FLASH memory area.\n"); + return -EIO; + } + + simple_map_init(&svme182_map); + + this_mtd = do_map_probe("cfi_probe", &svme182_map); + if (!this_mtd) + { + iounmap((void *)svme182_map.virt); + return -ENXIO; + } + + printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n", + this_mtd->size >> 20, FLASH_BASE_ADDR); + + this_mtd->owner = THIS_MODULE; + add_mtd_partitions(this_mtd, partitions, num_parts); + + return 0; +} + +static void __exit cleanup_svme182(void) +{ + if (this_mtd) + { + del_mtd_partitions(this_mtd); + map_destroy(this_mtd); + } + + if (svme182_map.virt) + { + iounmap((void *)svme182_map.virt); + svme182_map.virt = 0; + } + + return; +} + +module_init(init_svme182); +module_exit(cleanup_svme182); diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c new file mode 100644 index 000000000..ec2612b0d --- /dev/null +++ b/drivers/mtd/maps/ichxrom.c @@ -0,0 +1,407 @@ +/* + * ichxrom.c + * + * Normal mappings of chips in physical memory + * $Id: ichxrom.c,v 1.7 2004/07/14 18:14:09 eric Exp $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define xstr(s) str(s) +#define str(s) #s +#define MOD_NAME xstr(KBUILD_BASENAME) + +#define MTD_DEV_NAME_LENGTH 16 + +#define RESERVE_MEM_REGION 0 + + +#define MANUFACTURER_INTEL 0x0089 +#define I82802AB 0x00ad +#define I82802AC 0x00ac + +#define ICHX_FWH_REGION_START 0xFF000000UL +#define ICHX_FWH_REGION_SIZE 0x01000000UL +#define BIOS_CNTL 0x4e +#define FWH_DEC_EN1 0xE3 +#define FWH_DEC_EN2 0xF0 +#define FWH_SEL1 0xE8 +#define FWH_SEL2 0xEE + +struct ichxrom_map_info { + struct map_info map; + struct mtd_info *mtd; + unsigned long window_addr; + struct pci_dev *pdev; + struct resource window_rsrc; + struct resource rom_rsrc; + char mtd_name[MTD_DEV_NAME_LENGTH]; +}; + +static inline unsigned long addr(struct map_info *map, unsigned long ofs) +{ + unsigned long offset; + offset = ((8*1024*1024) - map->size) + ofs; + if (offset >= (4*1024*1024)) { + offset += 0x400000; + } + return map->map_priv_1 + 0x400000 + offset; +} + +static inline unsigned long dbg_addr(struct map_info *map, unsigned long addr) +{ + return addr - map->map_priv_1 + ICHX_FWH_REGION_START; +} + +static map_word ichxrom_read(struct map_info *map, unsigned long ofs) +{ + map_word val; + int i; + switch(map->bankwidth) { + case 1: val.x[0] = __raw_readb(addr(map, ofs)); break; + case 2: val.x[0] = __raw_readw(addr(map, ofs)); break; + case 4: val.x[0] = __raw_readl(addr(map, ofs)); break; +#if BITS_PER_LONG >= 64 + case 8: val.x[0] = __raw_readq(addr(map, ofs)); break; +#endif + default: val.x[0] = 0; break; + } + for(i = 1; i < map_words(map); i++) { + val.x[i] = 0; + } + return val; +} + +static void ichxrom_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) +{ + memcpy_fromio(to, addr(map, from), len); +} + +static void ichxrom_write(struct map_info *map, map_word d, unsigned long ofs) +{ + switch(map->bankwidth) { + case 1: __raw_writeb(d.x[0], addr(map,ofs)); break; + case 2: __raw_writew(d.x[0], addr(map,ofs)); break; + case 4: __raw_writel(d.x[0], addr(map,ofs)); break; +#if BITS_PER_LONG >= 64 + case 8: __raw_writeq(d.x[0], addr(map,ofs)); break; +#endif + } + mb(); +} + +static void ichxrom_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) +{ + memcpy_toio(addr(map, to), from, len); +} + +static struct ichxrom_map_info ichxrom_map = { + .map = { + .name = MOD_NAME, + .phys = NO_XIP, + .size = 0, + .bankwidth = 1, + .read = ichxrom_read, + .copy_from = ichxrom_copy_from, + .write = ichxrom_write, + .copy_to = ichxrom_copy_to, + /* Firmware hubs only use vpp when being programmed + * in a factory setting. So in-place programming + * needs to use a different method. + */ + }, + /* remaining fields of structure are initialized to 0 */ +}; + +enum fwh_lock_state { + FWH_DENY_WRITE = 1, + FWH_IMMUTABLE = 2, + FWH_DENY_READ = 4, +}; + +static void ichxrom_cleanup(struct ichxrom_map_info *info) +{ + u16 word; + + /* Disable writes through the rom window */ + pci_read_config_word(info->pdev, BIOS_CNTL, &word); + pci_write_config_word(info->pdev, BIOS_CNTL, word & ~1); + + if (info->mtd) { + del_mtd_device(info->mtd); + map_destroy(info->mtd); + info->mtd = NULL; + info->map.virt = 0; + } + if (info->rom_rsrc.parent) + release_resource(&info->rom_rsrc); + if (info->window_rsrc.parent) + release_resource(&info->window_rsrc); + + if (info->window_addr) { + iounmap((void *)(info->window_addr)); + info->window_addr = 0; + } +} + + +static int ichxrom_set_lock_state(struct mtd_info *mtd, loff_t ofs, size_t len, + enum fwh_lock_state state) +{ + struct map_info *map = mtd->priv; + unsigned long start = ofs; + unsigned long end = start + len -1; + + /* FIXME do I need to guard against concurrency here? */ + /* round down to 64K boundaries */ + start = start & ~0xFFFF; + end = end & ~0xFFFF; + while (start <= end) { + unsigned long ctrl_addr; + ctrl_addr = addr(map, start) - 0x400000 + 2; + writeb(state, ctrl_addr); + start = start + 0x10000; + } + return 0; +} + +static int ichxrom_lock(struct mtd_info *mtd, loff_t ofs, size_t len) +{ + return ichxrom_set_lock_state(mtd, ofs, len, FWH_DENY_WRITE); +} + +static int ichxrom_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) +{ + return ichxrom_set_lock_state(mtd, ofs, len, 0); +} + +static int __devinit ichxrom_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + u16 word; + struct ichxrom_map_info *info = &ichxrom_map; + unsigned long map_size; + static char *probes[] = { "cfi_probe", "jedec_probe" }; + struct cfi_private *cfi; + + /* For now I just handle the ichx and I assume there + * are not a lot of resources up at the top of the address + * space. It is possible to handle other devices in the + * top 16MB but it is very painful. Also since + * you can only really attach a FWH to an ICHX there + * a number of simplifications you can make. + * + * Also you can page firmware hubs if an 8MB window isn't enough + * but don't currently handle that case either. + */ + + info->pdev = pdev; + + /* + * Try to reserve the window mem region. If this fails then + * it is likely due to the window being "reseved" by the BIOS. + */ + info->window_rsrc.name = MOD_NAME; + info->window_rsrc.start = ICHX_FWH_REGION_START; + info->window_rsrc.end = ICHX_FWH_REGION_START + ICHX_FWH_REGION_SIZE - 1; + info->window_rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + if (request_resource(&iomem_resource, &info->window_rsrc)) { + info->window_rsrc.parent = NULL; + printk(KERN_ERR MOD_NAME + " %s(): Unable to register resource" + " 0x%.08lx-0x%.08lx - kernel bug?\n", + __func__, + info->window_rsrc.start, info->window_rsrc.end); + } + + /* Enable writes through the rom window */ + pci_read_config_word(pdev, BIOS_CNTL, &word); + if (!(word & 1) && (word & (1<<1))) { + /* The BIOS will generate an error if I enable + * this device, so don't even try. + */ + printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n"); + goto failed; + } + pci_write_config_word(pdev, BIOS_CNTL, word | 1); + + + /* Map the firmware hub into my address space. */ + /* Does this use too much virtual address space? */ + info->window_addr = (unsigned long)ioremap( + ICHX_FWH_REGION_START, ICHX_FWH_REGION_SIZE); + if (!info->window_addr) { + printk(KERN_ERR "Failed to ioremap\n"); + goto failed; + } + + /* For now assume the firmware has setup all relevant firmware + * windows. We don't have enough information to handle this case + * intelligently. + */ + + /* FIXME select the firmware hub and enable a window to it. */ + + info->mtd = NULL; + info->map.map_priv_1 = info->window_addr; + + /* Loop through the possible bankwidths */ + for(ichxrom_map.map.bankwidth = 4; ichxrom_map.map.bankwidth; ichxrom_map.map.bankwidth >>= 1) { + map_size = ICHX_FWH_REGION_SIZE; + while(!info->mtd && (map_size > 0)) { + int i; + info->map.size = map_size; + for(i = 0; i < sizeof(probes)/sizeof(char *); i++) { + info->mtd = do_map_probe(probes[i], &ichxrom_map.map); + if (info->mtd) + break; + } + map_size -= 512*1024; + } + if (info->mtd) + break; + } + if (!info->mtd) { + goto failed; + } + cfi = ichxrom_map.map.fldrv_priv; + if ((cfi->mfr == MANUFACTURER_INTEL) && ( + (cfi->id == I82802AB) || + (cfi->id == I82802AC))) + { + /* If it is a firmware hub put in the special lock + * and unlock routines. + */ + info->mtd->lock = ichxrom_lock; + info->mtd->unlock = ichxrom_unlock; + } + if (info->mtd->size > info->map.size) { + printk(KERN_WARNING MOD_NAME " rom(%u) larger than window(%u). fixing...\n", + info->mtd->size, info->map.size); + info->mtd->size = info->map.size; + } + + info->mtd->owner = THIS_MODULE; + add_mtd_device(info->mtd); + + if (info->window_rsrc.parent) { + /* + * Registering the MTD device in iomem may not be possible + * if there is a BIOS "reserved" and BUSY range. If this + * fails then continue anyway. + */ + snprintf(info->mtd_name, MTD_DEV_NAME_LENGTH, + "mtd%d", info->mtd->index); + + info->rom_rsrc.name = info->mtd_name; + info->rom_rsrc.start = ICHX_FWH_REGION_START + + ICHX_FWH_REGION_SIZE - map_size; + info->rom_rsrc.end = ICHX_FWH_REGION_START + + ICHX_FWH_REGION_SIZE; + info->rom_rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + if (request_resource(&info->window_rsrc, &info->rom_rsrc)) { + printk(KERN_ERR MOD_NAME + ": cannot reserve MTD resource\n"); + info->rom_rsrc.parent = NULL; + } + } + + return 0; + + failed: + ichxrom_cleanup(info); + return -ENODEV; +} + + +static void __devexit ichxrom_remove_one (struct pci_dev *pdev) +{ + struct ichxrom_map_info *info = &ichxrom_map; + u16 word; + + del_mtd_device(info->mtd); + map_destroy(info->mtd); + info->mtd = NULL; + info->map.map_priv_1 = 0; + + iounmap((void *)(info->window_addr)); + info->window_addr = 0; + + /* Disable writes through the rom window */ + pci_read_config_word(pdev, BIOS_CNTL, &word); + pci_write_config_word(pdev, BIOS_CNTL, word & ~1); + +#if RESERVE_MEM_REGION + release_mem_region(ICHX_FWH_REGION_START, ICHX_FWH_REGION_SIZE); +#endif +} + +static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, + PCI_ANY_ID, PCI_ANY_ID, }, + { 0, }, +}; + +MODULE_DEVICE_TABLE(pci, ichxrom_pci_tbl); + +#if 0 +static struct pci_driver ichxrom_driver = { + .name = MOD_NAME, + .id_table = ichxrom_pci_tbl, + .probe = ichxrom_init_one, + .remove = ichxrom_remove_one, +}; +#endif + +static struct pci_dev *mydev; +int __init init_ichxrom(void) +{ + struct pci_dev *pdev; + struct pci_device_id *id; + + pdev = NULL; + for (id = ichxrom_pci_tbl; id->vendor; id++) { + pdev = pci_find_device(id->vendor, id->device, NULL); + if (pdev) { + break; + } + } + if (pdev) { + mydev = pdev; + return ichxrom_init_one(pdev, &ichxrom_pci_tbl[0]); + } + return -ENXIO; +#if 0 + return pci_module_init(&ichxrom_driver); +#endif +} + +static void __exit cleanup_ichxrom(void) +{ + ichxrom_remove_one(mydev); +} + +module_init(init_ichxrom); +module_exit(cleanup_ichxrom); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biederman "); +MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ICHX southbridge"); diff --git a/drivers/mtd/maps/integrator-flash-v24.c b/drivers/mtd/maps/integrator-flash-v24.c new file mode 100644 index 000000000..945d7c910 --- /dev/null +++ b/drivers/mtd/maps/integrator-flash-v24.c @@ -0,0 +1,258 @@ +/*====================================================================== + + drivers/mtd/maps/armflash.c: ARM Flash Layout/Partitioning + + Copyright (C) 2000 ARM Limited + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + This is access code for flashes using ARM's flash partitioning + standards. + + $Id: integrator-flash-v24.c,v 1.13 2004/07/12 21:59:44 dwmw2 Exp $ + +======================================================================*/ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +// board specific stuff - sorry, it should be in arch/arm/mach-*. +#ifdef CONFIG_ARCH_INTEGRATOR + +#define FLASH_BASE INTEGRATOR_FLASH_BASE +#define FLASH_SIZE INTEGRATOR_FLASH_SIZE + +#define FLASH_PART_SIZE 0x400000 + +#define SC_CTRLC (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLC_OFFSET) +#define SC_CTRLS (IO_ADDRESS(INTEGRATOR_SC_BASE) + INTEGRATOR_SC_CTRLS_OFFSET) +#define EBI_CSR1 (IO_ADDRESS(INTEGRATOR_EBI_BASE) + INTEGRATOR_EBI_CSR1_OFFSET) +#define EBI_LOCK (IO_ADDRESS(INTEGRATOR_EBI_BASE) + INTEGRATOR_EBI_LOCK_OFFSET) + +/* + * Initialise the flash access systems: + * - Disable VPP + * - Assert WP + * - Set write enable bit in EBI reg + */ +static void armflash_flash_init(void) +{ + unsigned int tmp; + + __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP, SC_CTRLC); + + tmp = __raw_readl(EBI_CSR1) | INTEGRATOR_EBI_WRITE_ENABLE; + __raw_writel(tmp, EBI_CSR1); + + if (!(__raw_readl(EBI_CSR1) & INTEGRATOR_EBI_WRITE_ENABLE)) { + __raw_writel(0xa05f, EBI_LOCK); + __raw_writel(tmp, EBI_CSR1); + __raw_writel(0, EBI_LOCK); + } +} + +/* + * Shutdown the flash access systems: + * - Disable VPP + * - Assert WP + * - Clear write enable bit in EBI reg + */ +static void armflash_flash_exit(void) +{ + unsigned int tmp; + + __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN | INTEGRATOR_SC_CTRL_nFLWP, SC_CTRLC); + + /* + * Clear the write enable bit in system controller EBI register. + */ + tmp = __raw_readl(EBI_CSR1) & ~INTEGRATOR_EBI_WRITE_ENABLE; + __raw_writel(tmp, EBI_CSR1); + + if (__raw_readl(EBI_CSR1) & INTEGRATOR_EBI_WRITE_ENABLE) { + __raw_writel(0xa05f, EBI_LOCK); + __raw_writel(tmp, EBI_CSR1); + __raw_writel(0, EBI_LOCK); + } +} + +static void armflash_flash_wp(int on) +{ + unsigned int reg; + + if (on) + reg = SC_CTRLC; + else + reg = SC_CTRLS; + + __raw_writel(INTEGRATOR_SC_CTRL_nFLWP, reg); +} + +static void armflash_set_vpp(struct map_info *map, int on) +{ + unsigned int reg; + + if (on) + reg = SC_CTRLS; + else + reg = SC_CTRLC; + + __raw_writel(INTEGRATOR_SC_CTRL_nFLVPPEN, reg); +} +#endif + +#ifdef CONFIG_ARCH_P720T + +#define FLASH_BASE (0x04000000) +#define FLASH_SIZE (64*1024*1024) + +#define FLASH_PART_SIZE (4*1024*1024) +#define FLASH_BLOCK_SIZE (128*1024) + +static void armflash_flash_init(void) +{ +} + +static void armflash_flash_exit(void) +{ +} + +static void armflash_flash_wp(int on) +{ +} + +static void armflash_set_vpp(struct map_info *map, int on) +{ +} +#endif + + +static struct map_info armflash_map = +{ + .name = "AFS", + .set_vpp = armflash_set_vpp, + .phys = FLASH_BASE, +}; + +static struct mtd_info *mtd; +static struct mtd_partition *parts; +static const char *probes[] = { "RedBoot", "afs", NULL }; + +static int __init armflash_cfi_init(void *base, u_int size) +{ + int ret; + + armflash_flash_init(); + armflash_flash_wp(1); + + /* + * look for CFI based flash parts fitted to this board + */ + armflash_map.size = size; + armflash_map.bankwidth = 4; + armflash_map.virt = (unsigned long) base; + + simple_map_init(&armflash_map); + + /* + * Also, the CFI layer automatically works out what size + * of chips we have, and does the necessary identification + * for us automatically. + */ + mtd = do_map_probe("cfi_probe", &armflash_map); + if (!mtd) + return -ENXIO; + + mtd->owner = THIS_MODULE; + + ret = parse_mtd_partitions(mtd, probes, &parts, (void *)0); + if (ret > 0) { + ret = add_mtd_partitions(mtd, parts, ret); + if (ret) + printk(KERN_ERR "mtd partition registration " + "failed: %d\n", ret); + } + + /* + * If we got an error, free all resources. + */ + if (ret < 0) { + del_mtd_partitions(mtd); + map_destroy(mtd); + } + + return ret; +} + +static void armflash_cfi_exit(void) +{ + if (mtd) { + del_mtd_partitions(mtd); + map_destroy(mtd); + } + if (parts) + kfree(parts); +} + +static int __init armflash_init(void) +{ + int err = -EBUSY; + void *base; + + if (request_mem_region(FLASH_BASE, FLASH_SIZE, "flash") == NULL) + goto out; + + base = ioremap(FLASH_BASE, FLASH_SIZE); + err = -ENOMEM; + if (base == NULL) + goto release; + + err = armflash_cfi_init(base, FLASH_SIZE); + if (err) { + iounmap(base); +release: + release_mem_region(FLASH_BASE, FLASH_SIZE); + } +out: + return err; +} + +static void __exit armflash_exit(void) +{ + armflash_cfi_exit(); + iounmap((void *)armflash_map.virt); + release_mem_region(FLASH_BASE, FLASH_SIZE); + armflash_flash_exit(); +} + +module_init(armflash_init); +module_exit(armflash_exit); + +MODULE_AUTHOR("ARM Ltd"); +MODULE_DESCRIPTION("ARM Integrator CFI map driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c new file mode 100644 index 000000000..a10f92126 --- /dev/null +++ b/drivers/mtd/maps/ixp4xx.c @@ -0,0 +1,244 @@ +/* + * $Id: ixp4xx.c,v 1.1 2004/05/13 22:21:26 dsaxena Exp $ + * + * drivers/mtd/maps/ixp4xx.c + * + * MTD Map file for IXP4XX based systems. Please do not make per-board + * changes in here. If your board needs special setup, do it in your + * platform level code in arch/arm/mach-ixp4xx/board-setup.c + * + * Original Author: Intel Corporation + * Maintainer: Deepak Saxena + * + * Copyright (C) 2002 Intel Corporation + * Copyright (C) 2003-2004 MontaVista Software, Inc. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifndef __ARMEB__ +#define BYTE0(h) ((h) & 0xFF) +#define BYTE1(h) (((h) >> 8) & 0xFF) +#else +#define BYTE0(h) (((h) >> 8) & 0xFF) +#define BYTE1(h) ((h) & 0xFF) +#endif + +static __u16 +ixp4xx_read16(struct map_info *map, unsigned long ofs) +{ + return *(__u16 *) (map->map_priv_1 + ofs); +} + +/* + * The IXP4xx expansion bus only allows 16-bit wide acceses + * when attached to a 16-bit wide device (such as the 28F128J3A), + * so we can't just memcpy_fromio(). + */ +static void +ixp4xx_copy_from(struct map_info *map, void *to, + unsigned long from, ssize_t len) +{ + int i; + u8 *dest = (u8 *) to; + u16 *src = (u16 *) (map->map_priv_1 + from); + u16 data; + + for (i = 0; i < (len / 2); i++) { + data = src[i]; + dest[i * 2] = BYTE0(data); + dest[i * 2 + 1] = BYTE1(data); + } + + if (len & 1) + dest[len - 1] = BYTE0(src[i]); +} + +static void +ixp4xx_write16(struct map_info *map, __u16 d, unsigned long adr) +{ + *(__u16 *) (map->map_priv_1 + adr) = d; +} + +struct ixp4xx_flash_info { + struct mtd_info *mtd; + struct map_info map; + struct mtd_partition *partitions; + struct resource *res; +}; + +static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; + +static int +ixp4xx_flash_remove(struct device *_dev) +{ + struct platform_device *dev = to_platform_device(_dev); + struct flash_platform_data *plat = dev->dev.platform_data; + struct ixp4xx_flash_info *info = dev_get_drvdata(&dev->dev); + + dev_set_drvdata(&dev->dev, NULL); + + if(!info) + return 0; + + /* + * This is required for a soft reboot to work. + */ + ixp4xx_write16(&info->map, 0xff, 0x55 * 0x2); + + if (info->mtd) { + del_mtd_partitions(info->mtd); + map_destroy(info->mtd); + } + if (info->map.map_priv_1) + iounmap((void *) info->map.map_priv_1); + + if (info->partitions) + kfree(info->partitions); + + if (info->res) { + release_resource(info->res); + kfree(info->res); + } + + if (plat->exit) + plat->exit(); + + /* Disable flash write */ + *IXP4XX_EXP_CS0 &= ~IXP4XX_FLASH_WRITABLE; + + return 0; +} + +static int ixp4xx_flash_probe(struct device *_dev) +{ + struct platform_device *dev = to_platform_device(_dev); + struct flash_platform_data *plat = dev->dev.platform_data; + struct ixp4xx_flash_info *info; + int err = -1; + + if (!plat) + return -ENODEV; + + if (plat->init) { + err = plat->init(); + if (err) + return err; + } + + info = kmalloc(sizeof(struct ixp4xx_flash_info), GFP_KERNEL); + if(!info) { + err = -ENOMEM; + goto Error; + } + memzero(info, sizeof(struct ixp4xx_flash_info)); + + dev_set_drvdata(&dev->dev, info); + + /* + * Enable flash write + * TODO: Move this out to board specific code + */ + *IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE; + + /* + * Tell the MTD layer we're not 1:1 mapped so that it does + * not attempt to do a direct access on us. + */ + info->map.phys = NO_XIP; + info->map.size = dev->resource->end - dev->resource->start + 1; + + /* + * We only support 16-bit accesses for now. If and when + * any board use 8-bit access, we'll fixup the driver to + * handle that. + */ + info->map.buswidth = 2; + info->map.name = dev->dev.bus_id; + info->map.read16 = ixp4xx_read16, + info->map.write16 = ixp4xx_write16, + info->map.copy_from = ixp4xx_copy_from, + + info->res = request_mem_region(dev->resource->start, + dev->resource->end - dev->resource->start + 1, + "IXP4XXFlash"); + if (!info->res) { + printk(KERN_ERR "IXP4XXFlash: Could not reserve memory region\n"); + err = -ENOMEM; + goto Error; + } + + info->map.map_priv_1 = + (unsigned long) ioremap(dev->resource->start, + dev->resource->end - dev->resource->start + 1); + if (!info->map.map_priv_1) { + printk(KERN_ERR "IXP4XXFlash: Failed to ioremap region\n"); + err = -EIO; + goto Error; + } + + info->mtd = do_map_probe(plat->map_name, &info->map); + if (!info->mtd) { + printk(KERN_ERR "IXP4XXFlash: map_probe failed\n"); + err = -ENXIO; + goto Error; + } + info->mtd->owner = THIS_MODULE; + + err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0); + if (err > 0) { + err = add_mtd_partitions(info->mtd, info->partitions, err); + if(err) + printk(KERN_ERR "Could not parse partitions\n"); + } + + if (err) + goto Error; + + return 0; + +Error: + ixp4xx_flash_remove(_dev); + return err; +} + +static struct device_driver ixp4xx_flash_driver = { + .name = "IXP4XX-Flash", + .bus = &platform_bus_type, + .probe = ixp4xx_flash_probe, + .remove = ixp4xx_flash_remove, +}; + +static int __init ixp4xx_flash_init(void) +{ + return driver_register(&ixp4xx_flash_driver); +} + +static void __exit ixp4xx_flash_exit(void) +{ + driver_unregister(&ixp4xx_flash_driver); +} + + +module_init(ixp4xx_flash_init); +module_exit(ixp4xx_flash_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems") +MODULE_AUTHOR("Deepak Saxena"); + diff --git a/drivers/mtd/maps/mpc1211.c b/drivers/mtd/maps/mpc1211.c new file mode 100644 index 000000000..cc55200a2 --- /dev/null +++ b/drivers/mtd/maps/mpc1211.c @@ -0,0 +1,81 @@ +/* + * Flash on MPC-1211 + * + * $Id: mpc1211.c,v 1.3 2004/07/14 17:45:40 dwmw2 Exp $ + * + * (C) 2002 Interface, Saito.K & Jeanne + * + * GPL'd + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static struct mtd_info *flash_mtd; +static struct mtd_partition *parsed_parts; + +struct map_info mpc1211_flash_map = { + .name = "MPC-1211 FLASH", + .size = 0x80000, + .bankwidth = 1, +}; + +static struct mtd_partition mpc1211_partitions[] = { + { + .name = "IPL & ETH-BOOT", + .offset = 0x00000000, + .size = 0x10000, + }, + { + .name = "Flash FS", + .offset = 0x00010000, + .size = MTDPART_SIZ_FULL, + } +}; + +static int __init init_mpc1211_maps(void) +{ + int nr_parts; + + mpc1211_flash_map.phys = 0; + mpc1211_flash_map.virt = P2SEGADDR(0); + + simple_map_init(&mpc1211_flash_map); + + printk(KERN_NOTICE "Probing for flash chips at 0x00000000:\n"); + flash_mtd = do_map_probe("jedec_probe", &mpc1211_flash_map); + if (!flash_mtd) { + printk(KERN_NOTICE "Flash chips not detected at either possible location.\n"); + return -ENXIO; + } + printk(KERN_NOTICE "MPC-1211: Flash at 0x%08lx\n", mpc1211_flash_map.virt & 0x1fffffff); + flash_mtd->module = THIS_MODULE; + + parsed_parts = mpc1211_partitions; + nr_parts = ARRAY_SIZE(mpc1211_partitions); + + add_mtd_partitions(flash_mtd, parsed_parts, nr_parts); + return 0; +} + +static void __exit cleanup_mpc1211_maps(void) +{ + if (parsed_parts) + del_mtd_partitions(flash_mtd); + else + del_mtd_device(flash_mtd); + map_destroy(flash_mtd); +} + +module_init(init_mpc1211_maps); +module_exit(cleanup_mpc1211_maps); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Saito.K & Jeanne "); +MODULE_DESCRIPTION("MTD map driver for MPC-1211 boards. Interface"); diff --git a/drivers/mtd/maps/omap-toto-flash.c b/drivers/mtd/maps/omap-toto-flash.c new file mode 100644 index 000000000..4262f1c03 --- /dev/null +++ b/drivers/mtd/maps/omap-toto-flash.c @@ -0,0 +1,137 @@ +/* + * NOR Flash memory access on TI Toto board + * + * jzhang@ti.com (C) 2003 Texas Instruments. + * + * (C) 2002 MontVista Software, Inc. + * + * $Id: omap-toto-flash.c,v 1.2 2004/07/12 21:59:44 dwmw2 Exp $ + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include + + +#ifndef CONFIG_ARCH_OMAP +#error This is for OMAP architecture only +#endif + +//these lines need be moved to a hardware header file +#define OMAP_TOTO_FLASH_BASE 0xd8000000 +#define OMAP_TOTO_FLASH_SIZE 0x80000 + +static struct map_info omap_toto_map_flash = { + .name = "OMAP Toto flash", + .bankwidth = 2, + .virt = OMAP_TOTO_FLASH_BASE, +}; + + +static struct mtd_partition toto_flash_partitions[] = { + { + .name = "BootLoader", + .size = 0x00040000, /* hopefully u-boot will stay 128k + 128*/ + .offset = 0, + .mask_flags = MTD_WRITEABLE, /* force read-only */ + }, { + .name = "ReservedSpace", + .size = 0x00030000, + .offset = MTDPART_OFS_APPEND, + //mask_flags: MTD_WRITEABLE, /* force read-only */ + }, { + .name = "EnvArea", /* bottom 64KiB for env vars */ + .size = MTDPART_SIZ_FULL, + .offset = MTDPART_OFS_APPEND, + } +}; + +static struct mtd_partition *parsed_parts; + +static struct mtd_info *flash_mtd; + +static int __init init_flash (void) +{ + + struct mtd_partition *parts; + int nb_parts = 0; + int parsed_nr_parts = 0; + const char *part_type; + + /* + * Static partition definition selection + */ + part_type = "static"; + + parts = toto_flash_partitions; + nb_parts = ARRAY_SIZE(toto_flash_partitions); + omap_toto_map_flash.size = OMAP_TOTO_FLASH_SIZE; + omap_toto_map_flash.phys = virt_to_phys(OMAP_TOTO_FLASH_BASE); + + simple_map_init(&omap_toto_map_flash); + /* + * Now let's probe for the actual flash. Do it here since + * specific machine settings might have been set above. + */ + printk(KERN_NOTICE "OMAP toto flash: probing %d-bit flash bus\n", + omap_toto_map_flash.bankwidth*8); + flash_mtd = do_map_probe("jedec_probe", &omap_toto_map_flash); + if (!flash_mtd) + return -ENXIO; + + if (parsed_nr_parts > 0) { + parts = parsed_parts; + nb_parts = parsed_nr_parts; + } + + if (nb_parts == 0) { + printk(KERN_NOTICE "OMAP toto flash: no partition info available," + "registering whole flash at once\n"); + if (add_mtd_device(flash_mtd)){ + return -ENXIO; + } + } else { + printk(KERN_NOTICE "Using %s partition definition\n", + part_type); + return add_mtd_partitions(flash_mtd, parts, nb_parts); + } + return 0; +} + +int __init omap_toto_mtd_init(void) +{ + int status; + + if (status = init_flash()) { + printk(KERN_ERR "OMAP Toto Flash: unable to init map for toto flash\n"); + } + return status; +} + +static void __exit omap_toto_mtd_cleanup(void) +{ + if (flash_mtd) { + del_mtd_partitions(flash_mtd); + map_destroy(flash_mtd); + if (parsed_parts) + kfree(parsed_parts); + } +} + +module_init(omap_toto_mtd_init); +module_exit(omap_toto_mtd_cleanup); + +MODULE_AUTHOR("Jian Zhang"); +MODULE_DESCRIPTION("OMAP Toto board map driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/maps/pb1550-flash.c b/drivers/mtd/maps/pb1550-flash.c new file mode 100644 index 000000000..4747fbc1f --- /dev/null +++ b/drivers/mtd/maps/pb1550-flash.c @@ -0,0 +1,204 @@ +/* + * Flash memory access on Alchemy Pb1550 board + * + * $Id: pb1550-flash.c,v 1.4 2004/07/14 17:45:40 dwmw2 Exp $ + * + * (C) 2004 Embedded Edge, LLC, based on pb1550-flash.c: + * (C) 2003 Pete Popov + * + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#ifdef DEBUG_RW +#define DBG(x...) printk(x) +#else +#define DBG(x...) +#endif + +static unsigned long window_addr; +static unsigned long window_size; + + +static struct map_info pb1550_map = { + .name = "Pb1550 flash", +}; + +static unsigned char flash_bankwidth = 4; + +/* + * Support only 64MB NOR Flash parts + */ + +#ifdef PB1550_BOTH_BANKS +/* both banks will be used. Combine the first bank and the first + * part of the second bank together into a single jffs/jffs2 + * partition. + */ +static struct mtd_partition pb1550_partitions[] = { + /* assume boot[2:0]:swap is '0000' or '1000', which translates to: + * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash + * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash + */ + { + .name = "User FS", + .size = (0x1FC00000 - 0x18000000), + .offset = 0x0000000 + },{ + .name = "yamon", + .size = 0x0100000, + .offset = MTDPART_OFS_APPEND, + .mask_flags = MTD_WRITEABLE + },{ + .name = "raw kernel", + .size = (0x300000 - 0x40000), /* last 256KB is yamon env */ + .offset = MTDPART_OFS_APPEND, + } +}; +#elif defined(PB1550_BOOT_ONLY) +static struct mtd_partition pb1550_partitions[] = { + /* assume boot[2:0]:swap is '0000' or '1000', which translates to: + * 1C00 0000 1FFF FFFF CE0 64MB Boot NOR Flash + */ + { + .name = "User FS", + .size = 0x03c00000, + .offset = 0x0000000 + },{ + .name = "yamon", + .size = 0x0100000, + .offset = MTDPART_OFS_APPEND, + .mask_flags = MTD_WRITEABLE + },{ + .name = "raw kernel", + .size = (0x300000-0x40000), /* last 256KB is yamon env */ + .offset = MTDPART_OFS_APPEND, + } +}; +#elif defined(PB1550_USER_ONLY) +static struct mtd_partition pb1550_partitions[] = { + /* assume boot[2:0]:swap is '0000' or '1000', which translates to: + * 1800 0000 1BFF FFFF CE0 64MB Param NOR Flash + */ + { + .name = "User FS", + .size = (0x4000000 - 0x200000), /* reserve 2MB for raw kernel */ + .offset = 0x0000000 + },{ + .name = "raw kernel", + .size = MTDPART_SIZ_FULL, + .offset = MTDPART_OFS_APPEND, + } +}; +#else +#error MTD_PB1550 define combo error /* should never happen */ +#endif + +#define NB_OF(x) (sizeof(x)/sizeof(x[0])) + +static struct mtd_info *mymtd; + +/* + * Probe the flash density and setup window address and size + * based on user CONFIG options. There are times when we don't + * want the MTD driver to be probing the boot or user flash, + * so having the option to enable only one bank is important. + */ +int setup_flash_params(void) +{ + u16 boot_swapboot; + boot_swapboot = (au_readl(MEM_STSTAT) & (0x7<<1)) | + ((bcsr->status >> 6) & 0x1); + printk("Pb1550 MTD: boot:swap %d\n", boot_swapboot); + + switch (boot_swapboot) { + case 0: /* 512Mbit devices, both enabled */ + case 1: + case 8: + case 9: +#if defined(PB1550_BOTH_BANKS) + window_addr = 0x18000000; + window_size = 0x8000000; +#elif defined(PB1550_BOOT_ONLY) + window_addr = 0x1C000000; + window_size = 0x4000000; +#else /* USER ONLY */ + window_addr = 0x1E000000; + window_size = 0x4000000; +#endif + break; + case 0xC: + case 0xD: + case 0xE: + case 0xF: + /* 64 MB Boot NOR Flash is disabled */ + /* and the start address is moved to 0x0C00000 */ + window_addr = 0x0C000000; + window_size = 0x4000000; + default: + printk("Pb1550 MTD: unsupported boot:swap setting\n"); + return 1; + } + return 0; +} + +int __init pb1550_mtd_init(void) +{ + struct mtd_partition *parts; + int nb_parts = 0; + + /* Default flash bankwidth */ + pb1550_map.bankwidth = flash_bankwidth; + + if (setup_flash_params()) + return -ENXIO; + + /* + * Static partition definition selection + */ + parts = pb1550_partitions; + nb_parts = NB_OF(pb1550_partitions); + pb1550_map.size = window_size; + + /* + * Now let's probe for the actual flash. Do it here since + * specific machine settings might have been set above. + */ + printk(KERN_NOTICE "Pb1550 flash: probing %d-bit flash bus\n", + pb1550_map.bankwidth*8); + pb1550_map.virt = + (unsigned long)ioremap(window_addr, window_size); + mymtd = do_map_probe("cfi_probe", &pb1550_map); + if (!mymtd) return -ENXIO; + mymtd->owner = THIS_MODULE; + + add_mtd_partitions(mymtd, parts, nb_parts); + return 0; +} + +static void __exit pb1550_mtd_cleanup(void) +{ + if (mymtd) { + del_mtd_partitions(mymtd); + map_destroy(mymtd); + } +} + +module_init(pb1550_mtd_init); +module_exit(pb1550_mtd_cleanup); + +MODULE_AUTHOR("Embedded Edge, LLC"); +MODULE_DESCRIPTION("Pb1550 mtd map driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/maps/sbc8240.c b/drivers/mtd/maps/sbc8240.c new file mode 100644 index 000000000..da684d338 --- /dev/null +++ b/drivers/mtd/maps/sbc8240.c @@ -0,0 +1,247 @@ +/* + * Handle mapping of the flash memory access routines on the SBC8240 board. + * + * Carolyn Smith, Tektronix, Inc. + * + * This code is GPLed + * + * $Id: sbc8240.c,v 1.4 2004/07/12 22:38:29 dwmw2 Exp $ + * + */ + +/* + * The SBC8240 has 2 flash banks. + * Bank 0 is a 512 KiB AMD AM29F040B; 8 x 64 KiB sectors. + * It contains the U-Boot code (7 sectors) and the environment (1 sector). + * Bank 1 is 4 x 1 MiB AMD AM29LV800BT; 15 x 64 KiB sectors, 1 x 32 KiB sector, + * 2 x 8 KiB sectors, 1 x 16 KiB sectors. + * Both parts are JEDEC compatible. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#ifdef CONFIG_MTD_PARTITIONS +#include +#endif + +#define DEBUG + +#ifdef DEBUG +# define debugk(fmt,args...) printk(fmt ,##args) +#else +# define debugk(fmt,args...) +#endif + + +#define WINDOW_ADDR0 0xFFF00000 /* 512 KiB */ +#define WINDOW_SIZE0 0x00080000 +#define BUSWIDTH0 1 + +#define WINDOW_ADDR1 0xFF000000 /* 4 MiB */ +#define WINDOW_SIZE1 0x00400000 +#define BUSWIDTH1 8 + +#define MSG_PREFIX "sbc8240:" /* prefix for our printk()'s */ +#define MTDID "sbc8240-%d" /* for mtdparts= partitioning */ + + +static struct map_info sbc8240_map[2] = { + { + .name = "sbc8240 Flash Bank #0", + .size = WINDOW_SIZE0, + .bankwidth = BUSWIDTH0, + }, + { + .name = "sbc8240 Flash Bank #1", + .size = WINDOW_SIZE1, + .bankwidth = BUSWIDTH1, + } +}; + +#define NUM_FLASH_BANKS (sizeof(sbc8240_map) / sizeof(struct map_info)) + +/* + * The following defines the partition layout of SBC8240 boards. + * + * See include/linux/mtd/partitions.h for definition of the + * mtd_partition structure. + * + * The *_max_flash_size is the maximum possible mapped flash size + * which is not necessarily the actual flash size. It must correspond + * to the value specified in the mapping definition defined by the + * "struct map_desc *_io_desc" for the corresponding machine. + */ + +#ifdef CONFIG_MTD_PARTITIONS + +static struct mtd_partition sbc8240_uboot_partitions [] = { + /* Bank 0 */ + { + .name = "U-boot", /* U-Boot Firmware */ + .offset = 0, + .size = 0x00070000, /* 7 x 64 KiB sectors */ + .mask_flags = MTD_WRITEABLE, /* force read-only */ + }, + { + .name = "environment", /* U-Boot environment */ + .offset = 0x00070000, + .size = 0x00010000, /* 1 x 64 KiB sector */ + }, +}; + +static struct mtd_partition sbc8240_fs_partitions [] = { + { + .name = "jffs", /* JFFS filesystem */ + .offset = 0, + .size = 0x003C0000, /* 4 * 15 * 64KiB */ + }, + { + .name = "tmp32", + .offset = 0x003C0000, + .size = 0x00020000, /* 4 * 32KiB */ + }, + { + .name = "tmp8a", + .offset = 0x003E0000, + .size = 0x00008000, /* 4 * 8KiB */ + }, + { + .name = "tmp8b", + .offset = 0x003E8000, + .size = 0x00008000, /* 4 * 8KiB */ + }, + { + .name = "tmp16", + .offset = 0x003F0000, + .size = 0x00010000, /* 4 * 16KiB */ + } +}; + +#define NB_OF(x) (sizeof (x) / sizeof (x[0])) + +/* trivial struct to describe partition information */ +struct mtd_part_def +{ + int nums; + unsigned char *type; + struct mtd_partition* mtd_part; +}; + +static struct mtd_info *sbc8240_mtd[NUM_FLASH_BANKS]; +static struct mtd_part_def sbc8240_part_banks[NUM_FLASH_BANKS]; + + +#endif /* CONFIG_MTD_PARTITIONS */ + + +int __init init_sbc8240_mtd (void) +{ + static struct _cjs { + u_long addr; + u_long size; + } pt[NUM_FLASH_BANKS] = { + { + .addr = WINDOW_ADDR0, + .size = WINDOW_SIZE0 + }, + { + .addr = WINDOW_ADDR1, + .size = WINDOW_SIZE1 + }, + }; + + int devicesfound = 0; + int i; + + for (i = 0; i < NUM_FLASH_BANKS; i++) { + printk (KERN_NOTICE MSG_PREFIX + "Probing 0x%08lx at 0x%08lx\n", pt[i].size, pt[i].addr); + + sbc8240_map[i].map_priv_1 = + (unsigned long) ioremap (pt[i].addr, pt[i].size); + if (!sbc8240_map[i].map_priv_1) { + printk (MSG_PREFIX "failed to ioremap\n"); + return -EIO; + } + simple_map_init(&sbc8240_mtd[i]); + + sbc8240_mtd[i] = do_map_probe("jedec_probe", &sbc8240_map[i]); + + if (sbc8240_mtd[i]) { + sbc8240_mtd[i]->module = THIS_MODULE; + devicesfound++; + } + } + + if (!devicesfound) { + printk(KERN_NOTICE MSG_PREFIX + "No suppported flash chips found!\n"); + return -ENXIO; + } + +#ifdef CONFIG_MTD_PARTITIONS + sbc8240_part_banks[0].mtd_part = sbc8240_uboot_partitions; + sbc8240_part_banks[0].type = "static image"; + sbc8240_part_banks[0].nums = NB_OF(sbc8240_uboot_partitions); + sbc8240_part_banks[1].mtd_part = sbc8240_fs_partitions; + sbc8240_part_banks[1].type = "static file system"; + sbc8240_part_banks[1].nums = NB_OF(sbc8240_fs_partitions); + + for (i = 0; i < NUM_FLASH_BANKS; i++) { + + if (!sbc8240_mtd[i]) continue; + if (sbc8240_part_banks[i].nums == 0) { + printk (KERN_NOTICE MSG_PREFIX + "No partition info available, registering whole device\n"); + add_mtd_device(sbc8240_mtd[i]); + } else { + printk (KERN_NOTICE MSG_PREFIX + "Using %s partition definition\n", sbc8240_part_banks[i].mtd_part->name); + add_mtd_partitions (sbc8240_mtd[i], + sbc8240_part_banks[i].mtd_part, + sbc8240_part_banks[i].nums); + } + } +#else + printk(KERN_NOTICE MSG_PREFIX + "Registering %d flash banks at once\n", devicesfound); + + for (i = 0; i < devicesfound; i++) { + add_mtd_device(sbc8240_mtd[i]); + } +#endif /* CONFIG_MTD_PARTITIONS */ + + return devicesfound == 0 ? -ENXIO : 0; +} + +static void __exit cleanup_sbc8240_mtd (void) +{ + int i; + + for (i = 0; i < NUM_FLASH_BANKS; i++) { + if (sbc8240_mtd[i]) { + del_mtd_device (sbc8240_mtd[i]); + map_destroy (sbc8240_mtd[i]); + } + if (sbc8240_map[i].map_priv_1) { + iounmap ((void *) sbc8240_map[i].map_priv_1); + sbc8240_map[i].map_priv_1 = 0; + } + } +} + +module_init (init_sbc8240_mtd); +module_exit (cleanup_sbc8240_mtd); + +MODULE_LICENSE ("GPL"); +MODULE_AUTHOR ("Carolyn Smith "); +MODULE_DESCRIPTION ("MTD map driver for SBC8240 boards"); + diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c new file mode 100644 index 000000000..1901302d4 --- /dev/null +++ b/drivers/mtd/maps/wr_sbc82xx_flash.c @@ -0,0 +1,167 @@ +/* + * $Id: wr_sbc82xx_flash.c,v 1.1 2004/06/07 10:21:32 dwmw2 Exp $ + * + * Map for flash chips on Wind River PowerQUICC II SBC82xx board. + * + * Copyright (C) 2004 Red Hat, Inc. + * + * Author: David Woodhouse + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static struct mtd_info *sbcmtd[3]; +static struct mtd_partition *sbcmtd_parts[3]; + +struct map_info sbc82xx_flash_map[3] = { + {.name = "Boot flash"}, + {.name = "Alternate boot flash"}, + {.name = "User flash"} +}; + +static struct mtd_partition smallflash_parts[] = { + { + .name = "space", + .size = 0x100000, + .offset = 0, + }, { + .name = "bootloader", + .size = MTDPART_SIZ_FULL, + .offset = MTDPART_OFS_APPEND, + } +}; + +static struct mtd_partition bigflash_parts[] = { + { + .name = "bootloader", + .size = 0x80000, + .offset = 0, + }, { + .name = "file system", + .size = MTDPART_SIZ_FULL, + .offset = MTDPART_OFS_APPEND, + } +}; + +static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL}; + +int __init init_sbc82xx_flash(void) +{ + volatile memctl8260_t *mc = &immr->im_memctl; + int bigflash; + int i; + + /* First, register the boot flash, whichever we're booting from */ + if ((mc->memc_br0 & 0x00001800) == 0x00001800) { + bigflash = 0; + } else if ((mc->memc_br0 & 0x00001800) == 0x00000800) { + bigflash = 1; + } else { + printk(KERN_WARNING "Bus Controller register BR0 is %08x. Cannot determine flash configuration\n", mc->memc_br0); + return 1; + } + + /* Set parameters for the big flash chip (CS6 or CS0) */ + sbc82xx_flash_map[bigflash].buswidth = 4; + sbc82xx_flash_map[bigflash].size = 0x4000000; + + /* Set parameters for the small flash chip (CS0 or CS6) */ + sbc82xx_flash_map[!bigflash].buswidth = 1; + sbc82xx_flash_map[!bigflash].size = 0x200000; + + /* Set parameters for the user flash chip (CS1) */ + sbc82xx_flash_map[2].buswidth = 4; + sbc82xx_flash_map[2].size = 0x4000000; + + sbc82xx_flash_map[0].phys = mc->memc_br0 & 0xffff8000; + sbc82xx_flash_map[1].phys = mc->memc_br6 & 0xffff8000; + sbc82xx_flash_map[2].phys = mc->memc_br1 & 0xffff8000; + + for (i=0; i<3; i++) { + int8_t flashcs[3] = { 0, 6, 1 }; + int nr_parts; + + printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d", + sbc82xx_flash_map[i].name, sbc82xx_flash_map[i].size >> 20, flashcs[i]); + if (!sbc82xx_flash_map[i].phys) { + /* We know it can't be at zero. */ + printk("): disabled by bootloader.\n"); + continue; + } + printk(" at %08lx)\n", sbc82xx_flash_map[i].phys); + + sbc82xx_flash_map[i].virt = (unsigned long)ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size); + + if (!sbc82xx_flash_map[i].virt) { + printk("Failed to ioremap\n"); + continue; + } + + simple_map_init(&sbc82xx_flash_map[i]); + + sbcmtd[i] = do_map_probe("cfi_probe", &sbc82xx_flash_map[i]); + + if (!sbcmtd[i]) + continue; + + sbcmtd[i]->owner = THIS_MODULE; + + nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes, + &sbcmtd_parts[i], 0); + if (nr_parts > 0) { + add_mtd_partitions (sbcmtd[i], sbcmtd_parts[i], nr_parts); + continue; + } + + /* No partitioning detected. Use default */ + if (i == 2) { + add_mtd_device(sbcmtd[i]); + } else if (i == bigflash) { + add_mtd_partitions (sbcmtd[i], bigflash_parts, ARRAY_SIZE(bigflash_parts)); + } else { + add_mtd_partitions (sbcmtd[i], smallflash_parts, ARRAY_SIZE(smallflash_parts)); + } + } + return 0; +} + +static void __exit cleanup_sbc82xx_flash(void) +{ + int i; + + for (i=0; i<3; i++) { + if (!sbcmtd[i]) + continue; + + if (i<2 || sbcmtd_parts[i]) + del_mtd_partitions(sbcmtd[i]); + else + del_mtd_device(sbcmtd[i]); + + kfree(sbcmtd_parts[i]); + map_destroy(sbcmtd[i]); + + iounmap((void *)sbc82xx_flash_map[i].virt); + sbc82xx_flash_map[i].virt = 0; + } +} + +module_init(init_sbc82xx_flash); +module_exit(cleanup_sbc82xx_flash); + + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Woodhouse "); +MODULE_DESCRIPTION("Flash map driver for WindRiver PowerQUICC II"); diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c new file mode 100644 index 000000000..8e4da6506 --- /dev/null +++ b/drivers/mtd/nand/au1550nd.c @@ -0,0 +1,394 @@ +/* + * drivers/mtd/nand/au1550nd.c + * + * Copyright (C) 2004 Embedded Edge, LLC + * + * $Id: au1550nd.c,v 1.5 2004/05/17 07:19:35 ppopov Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MIPS_PB1550 +#include +#endif +#ifdef CONFIG_MIPS_DB1550 +#include +#endif + + +/* + * MTD structure for NAND controller + */ +static struct mtd_info *au1550_mtd = NULL; +static volatile u32 p_nand; +static int nand_width = 1; /* default, only x8 supported for now */ + +/* Internal buffers. Page buffer and oob buffer for one block*/ +static u_char data_buf[512 + 16]; +static u_char oob_buf[16 * 32]; + +/* + * Define partitions for flash device + */ +const static struct mtd_partition partition_info[] = { +#ifdef CONFIG_MIPS_PB1550 +#define NUM_PARTITIONS 2 + { + .name = "Pb1550 NAND FS 0", + .offset = 0, + .size = 8*1024*1024 + }, + { + .name = "Pb1550 NAND FS 1", + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL + } +#endif +#ifdef CONFIG_MIPS_DB1550 +#define NUM_PARTITIONS 2 + { + .name = "Db1550 NAND FS 0", + .offset = 0, + .size = 8*1024*1024 + }, + { + .name = "Db1550 NAND FS 1", + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL + } +#endif +}; + +static inline void write_cmd_reg(u8 cmd) +{ + if (nand_width) + *((volatile u8 *)(p_nand + MEM_STNAND_CMD)) = cmd; + else + *((volatile u16 *)(p_nand + MEM_STNAND_CMD)) = cmd; + au_sync(); +} + +static inline void write_addr_reg(u8 addr) +{ + if (nand_width) + *((volatile u8 *)(p_nand + MEM_STNAND_ADDR)) = addr; + else + *((volatile u16 *)(p_nand + MEM_STNAND_ADDR)) = addr; + au_sync(); +} + +static inline void write_data_reg(u8 data) +{ + if (nand_width) + *((volatile u8 *)(p_nand + MEM_STNAND_DATA)) = data; + else + *((volatile u16 *)(p_nand + MEM_STNAND_DATA)) = data; + au_sync(); +} + +static inline u32 read_data_reg(void) +{ + u32 data; + if (nand_width) { + data = *((volatile u8 *)(p_nand + MEM_STNAND_DATA)); + au_sync(); + } + else { + data = *((volatile u16 *)(p_nand + MEM_STNAND_DATA)); + au_sync(); + } + return data; +} + +void au1550_hwcontrol(struct mtd_info *mtd, int cmd) +{ +} + +int au1550_device_ready(struct mtd_info *mtd) +{ + int ready; + ready = (au_readl(MEM_STSTAT) & 0x1) ? 1 : 0; + return ready; +} + +static u_char au1550_nand_read_byte(struct mtd_info *mtd) +{ + u_char ret; + ret = read_data_reg(); + return ret; +} + +static void au1550_nand_write_byte(struct mtd_info *mtd, u_char byte) +{ + write_data_reg((u8)byte); +} + +static void +au1550_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + + for (i=0; ipriv; + + /* + * Write out the command to the device. + */ + if (command == NAND_CMD_SEQIN) { + int readcmd; + + if (column >= mtd->oobblock) { + /* OOB area */ + column -= mtd->oobblock; + readcmd = NAND_CMD_READOOB; + } else if (column < 256) { + /* First 256 bytes --> READ0 */ + readcmd = NAND_CMD_READ0; + } else { + column -= 256; + readcmd = NAND_CMD_READ1; + } + write_cmd_reg(readcmd); + } + write_cmd_reg(command); + + if (column != -1 || page_addr != -1) { + + /* Serially input address */ + if (column != -1) + write_addr_reg(column); + if (page_addr != -1) { + write_addr_reg((unsigned char) (page_addr & 0xff)); + write_addr_reg(((page_addr >> 8) & 0xff)); + /* One more address cycle for higher density devices */ + if (mtd->size & 0x0c000000) + write_addr_reg((unsigned char) ((page_addr >> 16) & 0x0f)); + } + } + + switch (command) { + + case NAND_CMD_PAGEPROG: + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: + case NAND_CMD_SEQIN: + case NAND_CMD_STATUS: + break; + + case NAND_CMD_RESET: + if (this->dev_ready) + break; + udelay(this->chip_delay); + write_cmd_reg(NAND_CMD_STATUS); + while ( !(read_data_reg() & 0x40)); + return; + + /* This applies to read commands */ + default: + udelay (this->chip_delay); + } + + /* wait until command is processed */ + while (!this->dev_ready(mtd)); +} + + +/* + * Main initialization routine + */ +int __init au1550_init (void) +{ + struct nand_chip *this; + u16 boot_swapboot = 0; /* default value */ + u32 mem_time; + + /* Allocate memory for MTD device structure and private data */ + au1550_mtd = kmalloc (sizeof(struct mtd_info) + + sizeof (struct nand_chip), GFP_KERNEL); + if (!au1550_mtd) { + printk ("Unable to allocate NAND MTD dev structure.\n"); + return -ENOMEM; + } + + /* Get pointer to private data */ + this = (struct nand_chip *) (&au1550_mtd[1]); + + /* Initialize structures */ + memset((char *) au1550_mtd, 0, sizeof(struct mtd_info)); + memset((char *) this, 0, sizeof(struct nand_chip)); + + /* Link the private data with the MTD structure */ + au1550_mtd->priv = this; + + /* disable interrupts */ + au_writel(au_readl(MEM_STNDCTL) & ~(1<<8), MEM_STNDCTL); + + /* disable NAND boot */ + au_writel(au_readl(MEM_STNDCTL) & ~(1<<0), MEM_STNDCTL); + +#ifdef CONFIG_MIPS_PB1550 + /* set gpio206 high */ + au_writel(au_readl(GPIO2_DIR) & ~(1<<6), GPIO2_DIR); + + boot_swapboot = (au_readl(MEM_STSTAT) & (0x7<<1)) | + ((bcsr->status >> 6) & 0x1); + switch (boot_swapboot) { + case 0: + case 2: + case 8: + case 0xC: + case 0xD: + /* x16 NAND Flash */ + nand_width = 0; + printk("Pb1550 NAND: 16-bit NAND not supported by MTD\n"); + break; + case 1: + case 9: + case 3: + case 0xE: + case 0xF: + /* x8 NAND Flash */ + nand_width = 1; + break; + default: + printk("Pb1550 NAND: bad boot:swap\n"); + kfree(au1550_mtd); + return 1; + } + + /* Configure RCE1 - should be done by YAMON */ + au_writel(0x5 | (nand_width << 22), MEM_STCFG1); + au_writel(NAND_TIMING, MEM_STTIME1); + mem_time = au_readl(MEM_STTIME1); + au_sync(); + + /* setup and enable chip select */ + /* we really need to decode offsets only up till 0x20 */ + au_writel((1<<28) | (NAND_PHYS_ADDR>>4) | + (((NAND_PHYS_ADDR + 0x1000)-1) & (0x3fff<<18)>>18), + MEM_STADDR1); + au_sync(); +#endif + +#ifdef CONFIG_MIPS_DB1550 + /* Configure RCE1 - should be done by YAMON */ + au_writel(0x00400005, MEM_STCFG1); + au_writel(0x00007774, MEM_STTIME1); + au_writel(0x12000FFF, MEM_STADDR1); +#endif + + p_nand = (volatile struct nand_regs *)ioremap(NAND_PHYS_ADDR, 0x1000); + + /* Set address of hardware control function */ + this->hwcontrol = au1550_hwcontrol; + this->dev_ready = au1550_device_ready; + /* 30 us command delay time */ + this->chip_delay = 30; + + this->cmdfunc = au1550_nand_command; + this->select_chip = au1550_nand_select_chip; + this->write_byte = au1550_nand_write_byte; + this->read_byte = au1550_nand_read_byte; + this->write_buf = au1550_nand_write_buf; + this->read_buf = au1550_nand_read_buf; + this->verify_buf = au1550_nand_verify_buf; + this->eccmode = NAND_ECC_SOFT; + + /* Set internal data buffer */ + this->data_buf = data_buf; + this->oob_buf = oob_buf; + + /* Scan to find existence of the device */ + if (nand_scan (au1550_mtd, 1)) { + kfree (au1550_mtd); + return -ENXIO; + } + + /* Register the partitions */ + add_mtd_partitions(au1550_mtd, partition_info, NUM_PARTITIONS); + + return 0; +} + +module_init(au1550_init); + +/* + * Clean up routine + */ +#ifdef MODULE +static void __exit au1550_cleanup (void) +{ + struct nand_chip *this = (struct nand_chip *) &au1550_mtd[1]; + + iounmap ((void *)p_nand); + + /* Unregister partitions */ + del_mtd_partitions(au1550_mtd); + + /* Unregister the device */ + del_mtd_device (au1550_mtd); + + /* Free the MTD device structure */ + kfree (au1550_mtd); +} +module_exit(au1550_cleanup); +#endif + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Embedded Edge, LLC"); +MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board"); diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c new file mode 100644 index 000000000..677c21685 --- /dev/null +++ b/drivers/mtd/nand/diskonchip.c @@ -0,0 +1,1237 @@ +/* + * drivers/mtd/nand/diskonchip.c + * + * (C) 2003 Red Hat, Inc. + * + * Author: David Woodhouse + * + * Interface to generic NAND code for M-Systems DiskOnChip devices + * + * $Id: diskonchip.c,v 1.23 2004/07/13 00:14:35 dbrown Exp $ + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Where to look for the devices? */ +#ifndef CONFIG_MTD_DOCPROBE_ADDRESS +#define CONFIG_MTD_DOCPROBE_ADDRESS 0 +#endif + +static unsigned long __initdata doc_locations[] = { +#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__) +#ifdef CONFIG_MTD_DOCPROBE_HIGH + 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000, + 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000, + 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000, + 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000, + 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000, +#else /* CONFIG_MTD_DOCPROBE_HIGH */ + 0xc8000, 0xca000, 0xcc000, 0xce000, + 0xd0000, 0xd2000, 0xd4000, 0xd6000, + 0xd8000, 0xda000, 0xdc000, 0xde000, + 0xe0000, 0xe2000, 0xe4000, 0xe6000, + 0xe8000, 0xea000, 0xec000, 0xee000, +#endif /* CONFIG_MTD_DOCPROBE_HIGH */ +#elif defined(__PPC__) + 0xe4000000, +#elif defined(CONFIG_MOMENCO_OCELOT) + 0x2f000000, + 0xff000000, +#elif defined(CONFIG_MOMENCO_OCELOT_G) || defined (CONFIG_MOMENCO_OCELOT_C) + 0xff000000, +##else +#warning Unknown architecture for DiskOnChip. No default probe locations defined +#endif + 0xffffffff }; + +static struct mtd_info *doclist = NULL; + +struct doc_priv { + unsigned long virtadr; + unsigned long physadr; + u_char ChipID; + u_char CDSNControl; + int chips_per_floor; /* The number of chips detected on each floor */ + int curfloor; + int curchip; + int mh0_page; + int mh1_page; + struct mtd_info *nextdoc; +}; + +/* Max number of eraseblocks to scan (from start of device) for the (I)NFTL + MediaHeader. The spec says to just keep going, I think, but that's just + silly. */ +#define MAX_MEDIAHEADER_SCAN 8 + +/* This is the syndrome computed by the HW ecc generator upon reading an empty + page, one with all 0xff for data and stored ecc code. */ +static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a }; +/* This is the ecc value computed by the HW ecc generator upon writing an empty + page, one with all 0xff for data. */ +static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 }; + +#define INFTL_BBT_RESERVED_BLOCKS 4 + +#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil) +#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k) + +static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd); +static void doc200x_select_chip(struct mtd_info *mtd, int chip); + +static int debug=0; +MODULE_PARM(debug, "i"); + +static int try_dword=1; +MODULE_PARM(try_dword, "i"); + +static int no_ecc_failures=0; +MODULE_PARM(no_ecc_failures, "i"); + +static int no_autopart=0; +MODULE_PARM(no_autopart, "i"); + +#ifdef MTD_NAND_DISKONCHIP_BBTWRITE +static int inftl_bbt_write=1; +#else +static int inftl_bbt_write=0; +#endif +MODULE_PARM(inftl_bbt_write, "i"); + +static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS; +MODULE_PARM(doc_config_location, "l"); +MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip"); + +static void DoC_Delay(struct doc_priv *doc, unsigned short cycles) +{ + volatile char dummy; + int i; + + for (i = 0; i < cycles; i++) { + if (DoC_is_Millennium(doc)) + dummy = ReadDOC(doc->virtadr, NOP); + else + dummy = ReadDOC(doc->virtadr, DOCStatus); + } + +} +/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ +static int _DoC_WaitReady(struct doc_priv *doc) +{ + unsigned long docptr = doc->virtadr; + unsigned long timeo = jiffies + (HZ * 10); + + if(debug) printk("_DoC_WaitReady...\n"); + /* Out-of-line routine to wait for chip response */ + while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { + if (time_after(jiffies, timeo)) { + printk("_DoC_WaitReady timed out.\n"); + return -EIO; + } + udelay(1); + cond_resched(); + } + + return 0; +} + +static inline int DoC_WaitReady(struct doc_priv *doc) +{ + unsigned long docptr = doc->virtadr; + int ret = 0; + + DoC_Delay(doc, 4); + + if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) + /* Call the out-of-line routine to wait */ + ret = _DoC_WaitReady(doc); + + DoC_Delay(doc, 2); + if(debug) printk("DoC_WaitReady OK\n"); + return ret; +} + +static void doc2000_write_byte(struct mtd_info *mtd, u_char datum) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + if(debug)printk("write_byte %02x\n", datum); + WriteDOC(datum, docptr, CDSNSlowIO); + WriteDOC(datum, docptr, 2k_CDSN_IO); +} + +static u_char doc2000_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + u_char ret; + + ReadDOC(docptr, CDSNSlowIO); + DoC_Delay(doc, 2); + ret = ReadDOC(docptr, 2k_CDSN_IO); + if (debug) printk("read_byte returns %02x\n", ret); + return ret; +} + +static void doc2000_writebuf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + if (debug)printk("writebuf of %d bytes: ", len); + for (i=0; i < len; i++) { + WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i); + if (debug && i < 16) + printk("%02x ", buf[i]); + } + if (debug) printk("\n"); +} + +static void doc2000_readbuf(struct mtd_info *mtd, + u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + if (debug)printk("readbuf of %d bytes: ", len); + + for (i=0; i < len; i++) { + buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i); + } +} + +static void doc2000_readbuf_dword(struct mtd_info *mtd, + u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + if (debug) printk("readbuf_dword of %d bytes: ", len); + + if (unlikely((((unsigned long)buf)|len) & 3)) { + for (i=0; i < len; i++) { + *(uint8_t *)(&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i); + } + } else { + for (i=0; i < len; i+=4) { + *(uint32_t*)(&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i); + } + } +} + +static int doc2000_verifybuf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + for (i=0; i < len; i++) + if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO)) + return -EFAULT; + return 0; +} + +static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + uint16_t ret; + + doc200x_select_chip(mtd, nr); + doc200x_hwcontrol(mtd, NAND_CTL_SETCLE); + this->write_byte(mtd, NAND_CMD_READID); + doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE); + doc200x_hwcontrol(mtd, NAND_CTL_SETALE); + this->write_byte(mtd, 0); + doc200x_hwcontrol(mtd, NAND_CTL_CLRALE); + + ret = this->read_byte(mtd) << 8; + ret |= this->read_byte(mtd); + + if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) { + /* First chip probe. See if we get same results by 32-bit access */ + union { + uint32_t dword; + uint8_t byte[4]; + } ident; + unsigned long docptr = doc->virtadr; + + doc200x_hwcontrol(mtd, NAND_CTL_SETCLE); + doc2000_write_byte(mtd, NAND_CMD_READID); + doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE); + doc200x_hwcontrol(mtd, NAND_CTL_SETALE); + doc2000_write_byte(mtd, 0); + doc200x_hwcontrol(mtd, NAND_CTL_CLRALE); + + ident.dword = readl(docptr + DoC_2k_CDSN_IO); + if (((ident.byte[0] << 8) | ident.byte[1]) == ret) { + printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n"); + this->read_buf = &doc2000_readbuf_dword; + } + } + + return ret; +} + +static void __init doc2000_count_chips(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + uint16_t mfrid; + int i; + + /* Max 4 chips per floor on DiskOnChip 2000 */ + doc->chips_per_floor = 4; + + /* Find out what the first chip is */ + mfrid = doc200x_ident_chip(mtd, 0); + + /* Find how many chips in each floor. */ + for (i = 1; i < 4; i++) { + if (doc200x_ident_chip(mtd, i) != mfrid) + break; + } + doc->chips_per_floor = i; + printk(KERN_DEBUG "Detected %d chips per floor.\n", i); +} + +static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this, int state) +{ + struct doc_priv *doc = (void *)this->priv; + + int status; + + DoC_WaitReady(doc); + this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); + DoC_WaitReady(doc); + status = (int)this->read_byte(mtd); + + return status; +} + +static void doc2001_write_byte(struct mtd_info *mtd, u_char datum) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + WriteDOC(datum, docptr, CDSNSlowIO); + WriteDOC(datum, docptr, Mil_CDSN_IO); + WriteDOC(datum, docptr, WritePipeTerm); +} + +static u_char doc2001_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + //ReadDOC(docptr, CDSNSlowIO); + /* 11.4.5 -- delay twice to allow extended length cycle */ + DoC_Delay(doc, 2); + ReadDOC(docptr, ReadPipeInit); + //return ReadDOC(docptr, Mil_CDSN_IO); + return ReadDOC(docptr, LastDataRead); +} + +static void doc2001_writebuf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + for (i=0; i < len; i++) + WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i); + /* Terminate write pipeline */ + WriteDOC(0x00, docptr, WritePipeTerm); +} + +static void doc2001_readbuf(struct mtd_info *mtd, + u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + /* Start read pipeline */ + ReadDOC(docptr, ReadPipeInit); + + for (i=0; i < len-1; i++) + buf[i] = ReadDOC(docptr, Mil_CDSN_IO); + + /* Terminate read pipeline */ + buf[i] = ReadDOC(docptr, LastDataRead); +} + +static int doc2001_verifybuf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + /* Start read pipeline */ + ReadDOC(docptr, ReadPipeInit); + + for (i=0; i < len-1; i++) + if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) { + ReadDOC(docptr, LastDataRead); + return i; + } + if (buf[i] != ReadDOC(docptr, LastDataRead)) + return i; + return 0; +} + +static void doc200x_select_chip(struct mtd_info *mtd, int chip) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int floor = 0; + + /* 11.4.4 -- deassert CE before changing chip */ + doc200x_hwcontrol(mtd, NAND_CTL_CLRNCE); + + if(debug)printk("select chip (%d)\n", chip); + + if (chip == -1) + return; + + floor = chip / doc->chips_per_floor; + chip -= (floor * doc->chips_per_floor); + + WriteDOC(floor, docptr, FloorSelect); + WriteDOC(chip, docptr, CDSNDeviceSelect); + + doc200x_hwcontrol(mtd, NAND_CTL_SETNCE); + + doc->curchip = chip; + doc->curfloor = floor; +} + +static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + switch(cmd) { + case NAND_CTL_SETNCE: + doc->CDSNControl |= CDSN_CTRL_CE; + break; + case NAND_CTL_CLRNCE: + doc->CDSNControl &= ~CDSN_CTRL_CE; + break; + case NAND_CTL_SETCLE: + doc->CDSNControl |= CDSN_CTRL_CLE; + break; + case NAND_CTL_CLRCLE: + doc->CDSNControl &= ~CDSN_CTRL_CLE; + break; + case NAND_CTL_SETALE: + doc->CDSNControl |= CDSN_CTRL_ALE; + break; + case NAND_CTL_CLRALE: + doc->CDSNControl &= ~CDSN_CTRL_ALE; + break; + case NAND_CTL_SETWP: + doc->CDSNControl |= CDSN_CTRL_WP; + break; + case NAND_CTL_CLRWP: + doc->CDSNControl &= ~CDSN_CTRL_WP; + break; + } + if (debug)printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl); + WriteDOC(doc->CDSNControl, docptr, CDSNControl); + /* 11.4.3 -- 4 NOPs after CSDNControl write */ + DoC_Delay(doc, 4); +} + +static int doc200x_dev_ready(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + /* 11.4.2 -- must NOP four times before checking FR/B# */ + DoC_Delay(doc, 4); + if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { + if(debug) + printk("not ready\n"); + return 0; + } + /* 11.4.2 -- Must NOP twice if it's ready */ + DoC_Delay(doc, 2); + if (debug)printk("was ready\n"); + return 1; +} + +static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) +{ + /* This is our last resort if we couldn't find or create a BBT. Just + pretend all blocks are good. */ + return 0; +} + +static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + /* Prime the ECC engine */ + switch(mode) { + case NAND_ECC_READ: + WriteDOC(DOC_ECC_RESET, docptr, ECCConf); + WriteDOC(DOC_ECC_EN, docptr, ECCConf); + break; + case NAND_ECC_WRITE: + WriteDOC(DOC_ECC_RESET, docptr, ECCConf); + WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf); + break; + } +} + +/* This code is only called on write */ +static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, + unsigned char *ecc_code) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + int emptymatch = 1; + + /* flush the pipeline */ + if (DoC_is_2000(doc)) { + WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl); + WriteDOC(0, docptr, 2k_CDSN_IO); + WriteDOC(0, docptr, 2k_CDSN_IO); + WriteDOC(0, docptr, 2k_CDSN_IO); + WriteDOC(doc->CDSNControl, docptr, CDSNControl); + } else { + WriteDOC(0, docptr, NOP); + WriteDOC(0, docptr, NOP); + WriteDOC(0, docptr, NOP); + } + + for (i = 0; i < 6; i++) { + ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i); + if (ecc_code[i] != empty_write_ecc[i]) + emptymatch = 0; + } + WriteDOC(DOC_ECC_DIS, docptr, ECCConf); +#if 0 + /* If emptymatch=1, we might have an all-0xff data buffer. Check. */ + if (emptymatch) { + /* Note: this somewhat expensive test should not be triggered + often. It could be optimized away by examining the data in + the writebuf routine, and remembering the result. */ + for (i = 0; i < 512; i++) { + if (dat[i] == 0xff) continue; + emptymatch = 0; + break; + } + } + /* If emptymatch still =1, we do have an all-0xff data buffer. + Return all-0xff ecc value instead of the computed one, so + it'll look just like a freshly-erased page. */ + if (emptymatch) memset(ecc_code, 0xff, 6); +#endif + return 0; +} + +static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) +{ + int i, ret = 0; + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + volatile u_char dummy; + int emptymatch = 1; + + /* flush the pipeline */ + if (DoC_is_2000(doc)) { + dummy = ReadDOC(docptr, 2k_ECCStatus); + dummy = ReadDOC(docptr, 2k_ECCStatus); + dummy = ReadDOC(docptr, 2k_ECCStatus); + } else { + dummy = ReadDOC(docptr, ECCConf); + dummy = ReadDOC(docptr, ECCConf); + dummy = ReadDOC(docptr, ECCConf); + } + + /* Error occured ? */ + if (dummy & 0x80) { + for (i = 0; i < 6; i++) { + calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i); + if (calc_ecc[i] != empty_read_syndrome[i]) + emptymatch = 0; + } + /* If emptymatch=1, the read syndrome is consistent with an + all-0xff data and stored ecc block. Check the stored ecc. */ + if (emptymatch) { + for (i = 0; i < 6; i++) { + if (read_ecc[i] == 0xff) continue; + emptymatch = 0; + break; + } + } + /* If emptymatch still =1, check the data block. */ + if (emptymatch) { + /* Note: this somewhat expensive test should not be triggered + often. It could be optimized away by examining the data in + the readbuf routine, and remembering the result. */ + for (i = 0; i < 512; i++) { + if (dat[i] == 0xff) continue; + emptymatch = 0; + break; + } + } + /* If emptymatch still =1, this is almost certainly a freshly- + erased block, in which case the ECC will not come out right. + We'll suppress the error and tell the caller everything's + OK. Because it is. */ + if (!emptymatch) ret = doc_decode_ecc (dat, calc_ecc); + if (ret > 0) + printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret); + } + WriteDOC(DOC_ECC_DIS, docptr, ECCConf); + if (no_ecc_failures && (ret == -1)) { + printk(KERN_ERR "suppressing ECC failure\n"); + ret = 0; + } + return ret; +} + +//u_char mydatabuf[528]; + +static struct nand_oobinfo doc200x_oobinfo = { + .useecc = MTD_NANDECC_AUTOPLACE, + .eccbytes = 6, + .eccpos = {0, 1, 2, 3, 4, 5}, + .oobfree = { {8, 8} } +}; + +/* Find the (I)NFTL Media Header, and optionally also the mirror media header. + On sucessful return, buf will contain a copy of the media header for + further processing. id is the string to scan for, and will presumably be + either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media + header. The page #s of the found media headers are placed in mh0_page and + mh1_page in the DOC private structure. */ +static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, + const char *id, int findmirror) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + int offs, end = (MAX_MEDIAHEADER_SCAN << this->phys_erase_shift); + int ret, retlen; + + end = min(end, mtd->size); // paranoia + for (offs = 0; offs < end; offs += mtd->erasesize) { + ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf); + if (retlen != mtd->oobblock) continue; + if (ret) { + printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", + offs); + } + if (memcmp(buf, id, 6)) continue; + printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs); + if (doc->mh0_page == -1) { + doc->mh0_page = offs >> this->page_shift; + if (!findmirror) return 1; + continue; + } + doc->mh1_page = offs >> this->page_shift; + return 2; + } + if (doc->mh0_page == -1) { + printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id); + return 0; + } + /* Only one mediaheader was found. We want buf to contain a + mediaheader on return, so we'll have to re-read the one we found. */ + offs = doc->mh0_page << this->page_shift; + ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf); + if (retlen != mtd->oobblock) { + /* Insanity. Give up. */ + printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n"); + return 0; + } + return 1; +} + +static inline int __init nftl_partscan(struct mtd_info *mtd, + struct mtd_partition *parts) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + u_char *buf = this->data_buf; + struct NFTLMediaHeader *mh = (struct NFTLMediaHeader *) buf; + const int psize = 1 << this->page_shift; + int blocks, maxblocks; + int offs, numheaders; + + if (!(numheaders=find_media_headers(mtd, buf, "ANAND", 1))) return 0; + +//#ifdef CONFIG_MTD_DEBUG_VERBOSE +// if (CONFIG_MTD_DEBUG_VERBOSE >= 2) + printk(KERN_INFO " DataOrgID = %s\n" + " NumEraseUnits = %d\n" + " FirstPhysicalEUN = %d\n" + " FormattedSize = %d\n" + " UnitSizeFactor = %d\n", + mh->DataOrgID, mh->NumEraseUnits, + mh->FirstPhysicalEUN, mh->FormattedSize, + mh->UnitSizeFactor); +//#endif + + blocks = mtd->size >> this->phys_erase_shift; + maxblocks = min(32768, mtd->erasesize - psize); + + if (mh->UnitSizeFactor == 0x00) { + /* Auto-determine UnitSizeFactor. The constraints are: + - There can be at most 32768 virtual blocks. + - There can be at most (virtual block size - page size) + virtual blocks (because MediaHeader+BBT must fit in 1). + */ + mh->UnitSizeFactor = 0xff; + while (blocks > maxblocks) { + blocks >>= 1; + maxblocks = min(32768, (maxblocks << 1) + psize); + mh->UnitSizeFactor--; + } + printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor); + } + + /* NOTE: The lines below modify internal variables of the NAND and MTD + layers; variables with have already been configured by nand_scan. + Unfortunately, we didn't know before this point what these values + should be. Thus, this code is somewhat dependant on the exact + implementation of the NAND layer. */ + if (mh->UnitSizeFactor != 0xff) { + this->bbt_erase_shift += (0xff - mh->UnitSizeFactor); + mtd->erasesize <<= (0xff - mh->UnitSizeFactor); + printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize); + blocks = mtd->size >> this->bbt_erase_shift; + maxblocks = min(32768, mtd->erasesize - psize); + } + + if (blocks > maxblocks) { + printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor); + return 0; + } + + /* Skip past the media headers. */ + offs = max(doc->mh0_page, doc->mh1_page); + offs <<= this->page_shift; + offs += mtd->erasesize; + + //parts[0].name = " DiskOnChip Boot / Media Header partition"; + //parts[0].offset = 0; + //parts[0].size = offs; + + parts[0].name = " DiskOnChip BDTL partition"; + parts[0].offset = offs; + parts[0].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift; + + offs += parts[0].size; + if (offs < mtd->size) { + parts[1].name = " DiskOnChip Remainder partition"; + parts[1].offset = offs; + parts[1].size = mtd->size - offs; + return 2; + } + return 1; +} + +/* This is a stripped-down copy of the code in inftlmount.c */ +static inline int __init inftl_partscan(struct mtd_info *mtd, + struct mtd_partition *parts) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + u_char *buf = this->data_buf; + struct INFTLMediaHeader *mh = (struct INFTLMediaHeader *) buf; + struct INFTLPartition *ip; + int numparts = 0; + int blocks; + int vshift, lastvunit = 0; + int i; + int end = mtd->size; + + if (inftl_bbt_write) + end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift); + + if (!find_media_headers(mtd, buf, "BNAND", 0)) return 0; + doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift); + + mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks); + mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions); + mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions); + mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits); + mh->FormatFlags = le32_to_cpu(mh->FormatFlags); + mh->PercentUsed = le32_to_cpu(mh->PercentUsed); + +//#ifdef CONFIG_MTD_DEBUG_VERBOSE +// if (CONFIG_MTD_DEBUG_VERBOSE >= 2) + printk(KERN_INFO " bootRecordID = %s\n" + " NoOfBootImageBlocks = %d\n" + " NoOfBinaryPartitions = %d\n" + " NoOfBDTLPartitions = %d\n" + " BlockMultiplerBits = %d\n" + " FormatFlgs = %d\n" + " OsakVersion = 0x%x\n" + " PercentUsed = %d\n", + mh->bootRecordID, mh->NoOfBootImageBlocks, + mh->NoOfBinaryPartitions, + mh->NoOfBDTLPartitions, + mh->BlockMultiplierBits, mh->FormatFlags, + mh->OsakVersion, mh->PercentUsed); +//#endif + + vshift = this->phys_erase_shift + mh->BlockMultiplierBits; + + blocks = mtd->size >> vshift; + if (blocks > 32768) { + printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits); + return 0; + } + + blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift); + if (inftl_bbt_write && (blocks > mtd->erasesize)) { + printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n"); + return 0; + } + + /* Scan the partitions */ + for (i = 0; (i < 4); i++) { + ip = &(mh->Partitions[i]); + ip->virtualUnits = le32_to_cpu(ip->virtualUnits); + ip->firstUnit = le32_to_cpu(ip->firstUnit); + ip->lastUnit = le32_to_cpu(ip->lastUnit); + ip->flags = le32_to_cpu(ip->flags); + ip->spareUnits = le32_to_cpu(ip->spareUnits); + ip->Reserved0 = le32_to_cpu(ip->Reserved0); + +//#ifdef CONFIG_MTD_DEBUG_VERBOSE +// if (CONFIG_MTD_DEBUG_VERBOSE >= 2) + printk(KERN_INFO " PARTITION[%d] ->\n" + " virtualUnits = %d\n" + " firstUnit = %d\n" + " lastUnit = %d\n" + " flags = 0x%x\n" + " spareUnits = %d\n", + i, ip->virtualUnits, ip->firstUnit, + ip->lastUnit, ip->flags, + ip->spareUnits); +//#endif + +/* + if ((i == 0) && (ip->firstUnit > 0)) { + parts[0].name = " DiskOnChip IPL / Media Header partition"; + parts[0].offset = 0; + parts[0].size = mtd->erasesize * ip->firstUnit; + numparts = 1; + } +*/ + + if (ip->flags & INFTL_BINARY) + parts[numparts].name = " DiskOnChip BDK partition"; + else + parts[numparts].name = " DiskOnChip BDTL partition"; + parts[numparts].offset = ip->firstUnit << vshift; + parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift; + numparts++; + if (ip->lastUnit > lastvunit) lastvunit = ip->lastUnit; + if (ip->flags & INFTL_LAST) break; + } + lastvunit++; + if ((lastvunit << vshift) < end) { + parts[numparts].name = " DiskOnChip Remainder partition"; + parts[numparts].offset = lastvunit << vshift; + parts[numparts].size = end - parts[numparts].offset; + numparts++; + } + return numparts; +} + +static int __init nftl_scan_bbt(struct mtd_info *mtd) +{ + int ret, numparts; + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + struct mtd_partition parts[2]; + + memset((char *) parts, 0, sizeof(parts)); + /* On NFTL, we have to find the media headers before we can read the + BBTs, since they're stored in the media header eraseblocks. */ + numparts = nftl_partscan(mtd, parts); + if (!numparts) return -EIO; + this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT | + NAND_BBT_SAVECONTENT | NAND_BBT_WRITE | + NAND_BBT_VERSION; + this->bbt_td->veroffs = 7; + this->bbt_td->pages[0] = doc->mh0_page + 1; + if (doc->mh1_page != -1) { + this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT | + NAND_BBT_SAVECONTENT | NAND_BBT_WRITE | + NAND_BBT_VERSION; + this->bbt_md->veroffs = 7; + this->bbt_md->pages[0] = doc->mh1_page + 1; + } else { + this->bbt_md = NULL; + } + + /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set. + At least as nand_bbt.c is currently written. */ + if ((ret = nand_scan_bbt(mtd, NULL))) + return ret; + add_mtd_device(mtd); +#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE) + if (!no_autopart) add_mtd_partitions(mtd, parts, numparts); +#endif + return 0; +} + +static int __init inftl_scan_bbt(struct mtd_info *mtd) +{ + int ret, numparts; + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + struct mtd_partition parts[5]; + + if (this->numchips > doc->chips_per_floor) { + printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n"); + return -EIO; + } + + if (mtd->size == (8<<20)) { +#if 0 +/* This doesn't seem to work for me. I get ECC errors on every page. */ + /* The Millennium 8MiB is actually an NFTL device! */ + mtd->name = "DiskOnChip Millennium 8MiB (NFTL)"; + return nftl_scan_bbt(mtd); +#endif + printk(KERN_ERR "DiskOnChip Millennium 8MiB is not supported.\n"); + return -EIO; + } + + this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | + NAND_BBT_VERSION; + if (inftl_bbt_write) + this->bbt_td->options |= NAND_BBT_WRITE; + this->bbt_td->offs = 8; + this->bbt_td->len = 8; + this->bbt_td->veroffs = 7; + this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS; + this->bbt_td->reserved_block_code = 0x01; + this->bbt_td->pattern = "MSYS_BBT"; + + this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | + NAND_BBT_VERSION; + if (inftl_bbt_write) + this->bbt_md->options |= NAND_BBT_WRITE; + this->bbt_md->offs = 8; + this->bbt_md->len = 8; + this->bbt_md->veroffs = 7; + this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS; + this->bbt_md->reserved_block_code = 0x01; + this->bbt_md->pattern = "TBB_SYSM"; + + /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set. + At least as nand_bbt.c is currently written. */ + if ((ret = nand_scan_bbt(mtd, NULL))) + return ret; + memset((char *) parts, 0, sizeof(parts)); + numparts = inftl_partscan(mtd, parts); + /* At least for now, require the INFTL Media Header. We could probably + do without it for non-INFTL use, since all it gives us is + autopartitioning, but I want to give it more thought. */ + if (!numparts) return -EIO; + add_mtd_device(mtd); +#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE) + if (!no_autopart) add_mtd_partitions(mtd, parts, numparts); +#endif + return 0; +} + +static inline int __init doc2000_init(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + + this->write_byte = doc2000_write_byte; + this->read_byte = doc2000_read_byte; + this->write_buf = doc2000_writebuf; + this->read_buf = doc2000_readbuf; + this->verify_buf = doc2000_verifybuf; + this->scan_bbt = nftl_scan_bbt; + + doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO; + doc2000_count_chips(mtd); + mtd->name = "DiskOnChip 2000 (NFTL Model)"; + return (4 * doc->chips_per_floor); +} + +static inline int __init doc2001_init(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + + this->write_byte = doc2001_write_byte; + this->read_byte = doc2001_read_byte; + this->write_buf = doc2001_writebuf; + this->read_buf = doc2001_readbuf; + this->verify_buf = doc2001_verifybuf; + this->scan_bbt = inftl_scan_bbt; + + ReadDOC(doc->virtadr, ChipID); + ReadDOC(doc->virtadr, ChipID); + ReadDOC(doc->virtadr, ChipID); + if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) { + /* It's not a Millennium; it's one of the newer + DiskOnChip 2000 units with a similar ASIC. + Treat it like a Millennium, except that it + can have multiple chips. */ + doc2000_count_chips(mtd); + mtd->name = "DiskOnChip 2000 (INFTL Model)"; + return (4 * doc->chips_per_floor); + } else { + /* Bog-standard Millennium */ + doc->chips_per_floor = 1; + mtd->name = "DiskOnChip Millennium"; + return 1; + } +} + +static inline int __init doc_probe(unsigned long physadr) +{ + unsigned char ChipID; + struct mtd_info *mtd; + struct nand_chip *nand; + struct doc_priv *doc; + unsigned long virtadr; + unsigned char save_control; + unsigned char tmp, tmpb, tmpc; + int reg, len, numchips; + int ret = 0; + + virtadr = (unsigned long)ioremap(physadr, DOC_IOREMAP_LEN); + if (!virtadr) { + printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr); + return -EIO; + } + + /* It's not possible to cleanly detect the DiskOnChip - the + * bootup procedure will put the device into reset mode, and + * it's not possible to talk to it without actually writing + * to the DOCControl register. So we store the current contents + * of the DOCControl register's location, in case we later decide + * that it's not a DiskOnChip, and want to put it back how we + * found it. + */ + save_control = ReadDOC(virtadr, DOCControl); + + /* Reset the DiskOnChip ASIC */ + WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, + virtadr, DOCControl); + WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, + virtadr, DOCControl); + + /* Enable the DiskOnChip ASIC */ + WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, + virtadr, DOCControl); + WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, + virtadr, DOCControl); + + ChipID = ReadDOC(virtadr, ChipID); + + switch(ChipID) { + case DOC_ChipID_Doc2k: + reg = DoC_2k_ECCStatus; + break; + case DOC_ChipID_DocMil: + reg = DoC_ECCConf; + break; + default: + ret = -ENODEV; + goto notfound; + } + /* Check the TOGGLE bit in the ECC register */ + tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; + tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; + tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; + if ((tmp == tmpb) || (tmp != tmpc)) { + printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr); + ret = -ENODEV; + goto notfound; + } + + for (mtd = doclist; mtd; mtd = doc->nextdoc) { + nand = mtd->priv; + doc = (void *)nand->priv; + /* Use the alias resolution register to determine if this is + in fact the same DOC aliased to a new address. If writes + to one chip's alias resolution register change the value on + the other chip, they're the same chip. */ + unsigned char oldval = ReadDOC(doc->virtadr, AliasResolution); + unsigned char newval = ReadDOC(virtadr, AliasResolution); + if (oldval != newval) + continue; + WriteDOC(~newval, virtadr, AliasResolution); + oldval = ReadDOC(doc->virtadr, AliasResolution); + WriteDOC(newval, virtadr, AliasResolution); // restore it + newval = ~newval; + if (oldval == newval) { + //printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr); + goto notfound; + } + } + + printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr); + + len = sizeof(struct mtd_info) + + sizeof(struct nand_chip) + + sizeof(struct doc_priv) + + (2 * sizeof(struct nand_bbt_descr)); + mtd = kmalloc(len, GFP_KERNEL); + if (!mtd) { + printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len); + ret = -ENOMEM; + goto fail; + } + memset(mtd, 0, len); + + nand = (struct nand_chip *) (mtd + 1); + doc = (struct doc_priv *) (nand + 1); + nand->bbt_td = (struct nand_bbt_descr *) (doc + 1); + nand->bbt_md = nand->bbt_td + 1; + + mtd->priv = (void *) nand; + mtd->owner = THIS_MODULE; + + nand->priv = (void *) doc; + nand->select_chip = doc200x_select_chip; + nand->hwcontrol = doc200x_hwcontrol; + nand->dev_ready = doc200x_dev_ready; + nand->waitfunc = doc200x_wait; + nand->block_bad = doc200x_block_bad; + nand->enable_hwecc = doc200x_enable_hwecc; + nand->calculate_ecc = doc200x_calculate_ecc; + nand->correct_data = doc200x_correct_data; + //nand->data_buf + nand->autooob = &doc200x_oobinfo; + nand->eccmode = NAND_ECC_HW6_512; + nand->options = NAND_USE_FLASH_BBT | NAND_HWECC_SYNDROME; + + doc->physadr = physadr; + doc->virtadr = virtadr; + doc->ChipID = ChipID; + doc->curfloor = -1; + doc->curchip = -1; + doc->mh0_page = -1; + doc->mh1_page = -1; + doc->nextdoc = doclist; + + if (ChipID == DOC_ChipID_Doc2k) + numchips = doc2000_init(mtd); + else + numchips = doc2001_init(mtd); + + if ((ret = nand_scan(mtd, numchips))) { + /* DBB note: i believe nand_release is necessary here, as + buffers may have been allocated in nand_base. Check with + Thomas. FIX ME! */ + /* nand_release will call del_mtd_device, but we haven't yet + added it. This is handled without incident by + del_mtd_device, as far as I can tell. */ + nand_release(mtd); + kfree(mtd); + goto fail; + } + + /* Success! */ + doclist = mtd; + return 0; + +notfound: + /* Put back the contents of the DOCControl register, in case it's not + actually a DiskOnChip. */ + WriteDOC(save_control, virtadr, DOCControl); +fail: + iounmap((void *)virtadr); + return ret; +} + +int __init init_nanddoc(void) +{ + int i; + + if (doc_config_location) { + printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location); + return doc_probe(doc_config_location); + } else { + for (i=0; (doc_locations[i] != 0xffffffff); i++) { + doc_probe(doc_locations[i]); + } + } + /* No banner message any more. Print a message if no DiskOnChip + found, so the user knows we at least tried. */ + if (!doclist) { + printk(KERN_INFO "No valid DiskOnChip devices found\n"); + return -ENODEV; + } + return 0; +} + +void __exit cleanup_nanddoc(void) +{ + struct mtd_info *mtd, *nextmtd; + struct nand_chip *nand; + struct doc_priv *doc; + + for (mtd = doclist; mtd; mtd = nextmtd) { + nand = mtd->priv; + doc = (void *)nand->priv; + + nextmtd = doc->nextdoc; + nand_release(mtd); + iounmap((void *)doc->virtadr); + kfree(mtd); + } +} + +module_init(init_nanddoc); +module_exit(cleanup_nanddoc); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Woodhouse "); +MODULE_DESCRIPTION("M-Systems DiskOnChip 2000 and Millennium device driver\n"); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c new file mode 100644 index 000000000..596bc8f70 --- /dev/null +++ b/drivers/mtd/nand/nand_base.c @@ -0,0 +1,2581 @@ +/* + * drivers/mtd/nand.c + * + * Overview: + * This is the generic MTD driver for NAND flash devices. It should be + * capable of working with almost all NAND chips currently available. + * Basic support for AG-AND chips is provided. + * + * Additional technical information is available on + * http://www.linux-mtd.infradead.org/tech/nand.html + * + * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) + * 2002 Thomas Gleixner (tglx@linutronix.de) + * + * 02-08-2004 tglx: support for strange chips, which cannot auto increment + * pages on read / read_oob + * + * 03-17-2004 tglx: Check ready before auto increment check. Simon Bayes + * pointed this out, as he marked an auto increment capable chip + * as NOAUTOINCR in the board driver. + * Make reads over block boundaries work too + * + * 04-14-2004 tglx: first working version for 2k page size chips + * + * 05-19-2004 tglx: Basic support for Renesas AG-AND chips + * + * Credits: + * David Woodhouse for adding multichip support + * + * Aleph One Ltd. and Toby Churchill Ltd. for supporting the + * rework for 2K page size chips + * + * TODO: + * Enable cached programming for 2k page size chips + * Check, if mtd->ecctype should be set to MTD_ECC_HW + * if we have HW ecc support. + * The AG-AND chips have nice features for speed improvement, + * which are not supported yet. Read / program 4 pages in one go. + * + * $Id: nand_base.c,v 1.113 2004/07/14 16:31:31 gleixner Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE) +#include +#endif + +/* Define default oob placement schemes for large and small page devices */ +static struct nand_oobinfo nand_oob_8 = { + .useecc = MTD_NANDECC_AUTOPLACE, + .eccbytes = 3, + .eccpos = {0, 1, 2}, + .oobfree = { {3, 2}, {6, 2} } +}; + +static struct nand_oobinfo nand_oob_16 = { + .useecc = MTD_NANDECC_AUTOPLACE, + .eccbytes = 6, + .eccpos = {0, 1, 2, 3, 6, 7}, + .oobfree = { {8, 8} } +}; + +static struct nand_oobinfo nand_oob_64 = { + .useecc = MTD_NANDECC_AUTOPLACE, + .eccbytes = 24, + .eccpos = { + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63}, + .oobfree = { {2, 38} } +}; + +/* This is used for padding purposes in nand_write_oob */ +static u_char ffchars[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; + +/* + * NAND low-level MTD interface functions + */ +static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len); +static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len); +static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len); + +static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf); +static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len, + size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel); +static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf); +static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf); +static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len, + size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel); +static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char *buf); +static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t * retlen); +static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel); +static int nand_erase (struct mtd_info *mtd, struct erase_info *instr); +static void nand_sync (struct mtd_info *mtd); + +/* Some internal functions */ +static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, u_char *oob_buf, + struct nand_oobinfo *oobsel, int mode); +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE +static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages, + u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode); +#else +#define nand_verify_pages(...) (0) +#endif + +static void nand_get_chip (struct nand_chip *this, struct mtd_info *mtd, int new_state); + +/** + * nand_release_chip - [GENERIC] release chip + * @mtd: MTD device structure + * + * Deselect, release chip lock and wake up anyone waiting on the device + */ +static void nand_release_chip (struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + + /* De-select the NAND device */ + this->select_chip(mtd, -1); + /* Release the chip */ + spin_lock_bh (&this->chip_lock); + this->state = FL_READY; + wake_up (&this->wq); + spin_unlock_bh (&this->chip_lock); +} + +/** + * nand_read_byte - [DEFAULT] read one byte from the chip + * @mtd: MTD device structure + * + * Default read function for 8bit buswith + */ +static u_char nand_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + return readb(this->IO_ADDR_R); +} + +/** + * nand_write_byte - [DEFAULT] write one byte to the chip + * @mtd: MTD device structure + * @byte: pointer to data byte to write + * + * Default write function for 8it buswith + */ +static void nand_write_byte(struct mtd_info *mtd, u_char byte) +{ + struct nand_chip *this = mtd->priv; + writeb(byte, this->IO_ADDR_W); +} + +/** + * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip + * @mtd: MTD device structure + * + * Default read function for 16bit buswith with + * endianess conversion + */ +static u_char nand_read_byte16(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + return (u_char) cpu_to_le16(readw(this->IO_ADDR_R)); +} + +/** + * nand_write_byte16 - [DEFAULT] write one byte endianess aware to the chip + * @mtd: MTD device structure + * @byte: pointer to data byte to write + * + * Default write function for 16bit buswith with + * endianess conversion + */ +static void nand_write_byte16(struct mtd_info *mtd, u_char byte) +{ + struct nand_chip *this = mtd->priv; + writew(le16_to_cpu((u16) byte), this->IO_ADDR_W); +} + +/** + * nand_read_word - [DEFAULT] read one word from the chip + * @mtd: MTD device structure + * + * Default read function for 16bit buswith without + * endianess conversion + */ +static u16 nand_read_word(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + return readw(this->IO_ADDR_R); +} + +/** + * nand_write_word - [DEFAULT] write one word to the chip + * @mtd: MTD device structure + * @word: data word to write + * + * Default write function for 16bit buswith without + * endianess conversion + */ +static void nand_write_word(struct mtd_info *mtd, u16 word) +{ + struct nand_chip *this = mtd->priv; + writew(word, this->IO_ADDR_W); +} + +/** + * nand_select_chip - [DEFAULT] control CE line + * @mtd: MTD device structure + * @chip: chipnumber to select, -1 for deselect + * + * Default select function for 1 chip devices. + */ +static void nand_select_chip(struct mtd_info *mtd, int chip) +{ + struct nand_chip *this = mtd->priv; + switch(chip) { + case -1: + this->hwcontrol(mtd, NAND_CTL_CLRNCE); + break; + case 0: + this->hwcontrol(mtd, NAND_CTL_SETNCE); + break; + + default: + BUG(); + } +} + +/** + * nand_write_buf - [DEFAULT] write buffer to chip + * @mtd: MTD device structure + * @buf: data buffer + * @len: number of bytes to write + * + * Default write function for 8bit buswith + */ +static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_W); +} + +/** + * nand_read_buf - [DEFAULT] read chip data into buffer + * @mtd: MTD device structure + * @buf: buffer to store date + * @len: number of bytes to read + * + * Default read function for 8bit buswith + */ +static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_R); +} + +/** + * nand_verify_buf - [DEFAULT] Verify chip data against buffer + * @mtd: MTD device structure + * @buf: buffer containing the data to compare + * @len: number of bytes to compare + * + * Default verify function for 8bit buswith + */ +static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_R)) + return -EFAULT; + + return 0; +} + +/** + * nand_write_buf16 - [DEFAULT] write buffer to chip + * @mtd: MTD device structure + * @buf: data buffer + * @len: number of bytes to write + * + * Default write function for 16bit buswith + */ +static void nand_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + u16 *p = (u16 *) buf; + len >>= 1; + + for (i=0; iIO_ADDR_W); + +} + +/** + * nand_read_buf16 - [DEFAULT] read chip data into buffer + * @mtd: MTD device structure + * @buf: buffer to store date + * @len: number of bytes to read + * + * Default read function for 16bit buswith + */ +static void nand_read_buf16(struct mtd_info *mtd, u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + u16 *p = (u16 *) buf; + len >>= 1; + + for (i=0; iIO_ADDR_R); +} + +/** + * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer + * @mtd: MTD device structure + * @buf: buffer containing the data to compare + * @len: number of bytes to compare + * + * Default verify function for 16bit buswith + */ +static int nand_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + u16 *p = (u16 *) buf; + len >>= 1; + + for (i=0; iIO_ADDR_R)) + return -EFAULT; + + return 0; +} + +/** + * nand_block_bad - [DEFAULT] Read bad block marker from the chip + * @mtd: MTD device structure + * @ofs: offset from device start + * @getchip: 0, if the chip is already selected + * + * Check, if the block is bad. + */ +static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) +{ + int page, chipnr, res = 0; + struct nand_chip *this = mtd->priv; + u16 bad; + + if (getchip) { + page = (int)(ofs >> this->page_shift); + chipnr = (int)(ofs >> this->chip_shift); + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd, FL_READING); + + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + } else + page = (int) ofs; + + if (this->options & NAND_BUSWIDTH_16) { + this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos & 0xFE, page & this->pagemask); + bad = cpu_to_le16(this->read_word(mtd)); + if (this->badblockpos & 0x1) + bad >>= 1; + if ((bad & 0xFF) != 0xff) + res = 1; + } else { + this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos, page & this->pagemask); + if (this->read_byte(mtd) != 0xff) + res = 1; + } + + if (getchip) { + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + } + + return res; +} + +/** + * nand_default_block_markbad - [DEFAULT] mark a block bad + * @mtd: MTD device structure + * @ofs: offset from device start + * + * This is the default implementation, which can be overridden by + * a hardware specific driver. +*/ +static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ + struct nand_chip *this = mtd->priv; + u_char buf[2] = {0, 0}; + size_t retlen; + int block; + + /* Get block number */ + block = ((int) ofs) >> this->bbt_erase_shift; + this->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); + + /* Do we have a flash based bad block table ? */ + if (this->options & NAND_USE_FLASH_BBT) + return nand_update_bbt (mtd, ofs); + + /* We write two bytes, so we dont have to mess with 16 bit access */ + ofs += mtd->oobsize + (this->badblockpos & ~0x01); + return nand_write_oob (mtd, ofs , 2, &retlen, buf); +} + +/** + * nand_check_wp - [GENERIC] check if the chip is write protected + * @mtd: MTD device structure + * Check, if the device is write protected + * + * The function expects, that the device is already selected + */ +static int nand_check_wp (struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + /* Check the WP bit */ + this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1); + return (this->read_byte(mtd) & 0x80) ? 0 : 1; +} + +/** + * nand_block_checkbad - [GENERIC] Check if a block is marked bad + * @mtd: MTD device structure + * @ofs: offset from device start + * @getchip: 0, if the chip is already selected + * @allowbbt: 1, if its allowed to access the bbt area + * + * Check, if the block is bad. Either by reading the bad block table or + * calling of the scan function. + */ +static int nand_block_checkbad (struct mtd_info *mtd, loff_t ofs, int getchip, int allowbbt) +{ + struct nand_chip *this = mtd->priv; + + if (!this->bbt) + return this->block_bad(mtd, ofs, getchip); + + /* Return info from the table */ + return nand_isbad_bbt (mtd, ofs, allowbbt); +} + +/** + * nand_command - [DEFAULT] Send command to NAND device + * @mtd: MTD device structure + * @command: the command to be sent + * @column: the column address for this command, -1 if none + * @page_addr: the page address for this command, -1 if none + * + * Send command to NAND device. This function is used for small page + * devices (256/512 Bytes per page) + */ +static void nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr) +{ + register struct nand_chip *this = mtd->priv; + + /* Begin command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_SETCLE); + /* + * Write out the command to the device. + */ + if (command == NAND_CMD_SEQIN) { + int readcmd; + + if (column >= mtd->oobblock) { + /* OOB area */ + column -= mtd->oobblock; + readcmd = NAND_CMD_READOOB; + } else if (column < 256) { + /* First 256 bytes --> READ0 */ + readcmd = NAND_CMD_READ0; + } else { + column -= 256; + readcmd = NAND_CMD_READ1; + } + this->write_byte(mtd, readcmd); + } + this->write_byte(mtd, command); + + /* Set ALE and clear CLE to start address cycle */ + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + + if (column != -1 || page_addr != -1) { + this->hwcontrol(mtd, NAND_CTL_SETALE); + + /* Serially input address */ + if (column != -1) { + /* Adjust columns for 16 bit buswidth */ + if (this->options & NAND_BUSWIDTH_16) + column >>= 1; + this->write_byte(mtd, column); + } + if (page_addr != -1) { + this->write_byte(mtd, (unsigned char) (page_addr & 0xff)); + this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff)); + /* One more address cycle for higher density devices */ + if (this->chipsize & 0x0c000000) + this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f)); + } + /* Latch in address */ + this->hwcontrol(mtd, NAND_CTL_CLRALE); + } + + /* + * program and erase have their own busy handlers + * status and sequential in needs no delay + */ + switch (command) { + + case NAND_CMD_PAGEPROG: + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: + case NAND_CMD_SEQIN: + case NAND_CMD_STATUS: + return; + + case NAND_CMD_RESET: + if (this->dev_ready) + break; + udelay(this->chip_delay); + this->hwcontrol(mtd, NAND_CTL_SETCLE); + this->write_byte(mtd, NAND_CMD_STATUS); + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + while ( !(this->read_byte(mtd) & 0x40)); + return; + + /* This applies to read commands */ + default: + /* + * If we don't have access to the busy pin, we apply the given + * command delay + */ + if (!this->dev_ready) { + udelay (this->chip_delay); + return; + } + } + + /* Apply this short delay always to ensure that we do wait tWB in + * any case on any machine. */ + ndelay (100); + /* wait until command is processed */ + while (!this->dev_ready(mtd)); +} + +/** + * nand_command_lp - [DEFAULT] Send command to NAND large page device + * @mtd: MTD device structure + * @command: the command to be sent + * @column: the column address for this command, -1 if none + * @page_addr: the page address for this command, -1 if none + * + * Send command to NAND device. This is the version for the new large page devices + * We dont have the seperate regions as we have in the small page devices. + * We must emulate NAND_CMD_READOOB to keep the code compatible. + * + */ +static void nand_command_lp (struct mtd_info *mtd, unsigned command, int column, int page_addr) +{ + register struct nand_chip *this = mtd->priv; + + /* Emulate NAND_CMD_READOOB */ + if (command == NAND_CMD_READOOB) { + column += mtd->oobblock; + command = NAND_CMD_READ0; + } + + + /* Begin command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_SETCLE); + /* Write out the command to the device. */ + this->write_byte(mtd, command); + /* End command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + + if (column != -1 || page_addr != -1) { + this->hwcontrol(mtd, NAND_CTL_SETALE); + + /* Serially input address */ + if (column != -1) { + /* Adjust columns for 16 bit buswidth */ + if (this->options & NAND_BUSWIDTH_16) + column >>= 1; + this->write_byte(mtd, column & 0xff); + this->write_byte(mtd, column >> 8); + } + if (page_addr != -1) { + this->write_byte(mtd, (unsigned char) (page_addr & 0xff)); + this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff)); + /* One more address cycle for devices > 128MiB */ + if (this->chipsize > (128 << 20)) + this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0xff)); + } + /* Latch in address */ + this->hwcontrol(mtd, NAND_CTL_CLRALE); + } + + /* + * program and erase have their own busy handlers + * status and sequential in needs no delay + */ + switch (command) { + + case NAND_CMD_CACHEDPROG: + case NAND_CMD_PAGEPROG: + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: + case NAND_CMD_SEQIN: + case NAND_CMD_STATUS: + return; + + + case NAND_CMD_RESET: + if (this->dev_ready) + break; + udelay(this->chip_delay); + this->hwcontrol(mtd, NAND_CTL_SETCLE); + this->write_byte(mtd, NAND_CMD_STATUS); + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + while ( !(this->read_byte(mtd) & 0x40)); + return; + + case NAND_CMD_READ0: + /* Begin command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_SETCLE); + /* Write out the start read command */ + this->write_byte(mtd, NAND_CMD_READSTART); + /* End command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + /* Fall through into ready check */ + + /* This applies to read commands */ + default: + /* + * If we don't have access to the busy pin, we apply the given + * command delay + */ + if (!this->dev_ready) { + udelay (this->chip_delay); + return; + } + } + + /* Apply this short delay always to ensure that we do wait tWB in + * any case on any machine. */ + ndelay (100); + /* wait until command is processed */ + while (!this->dev_ready(mtd)); +} + +/** + * nand_get_chip - [GENERIC] Get chip for selected access + * @this: the nand chip descriptor + * @mtd: MTD device structure + * @new_state: the state which is requested + * + * Get the device and lock it for exclusive access + */ +static void nand_get_chip (struct nand_chip *this, struct mtd_info *mtd, int new_state) +{ + + DECLARE_WAITQUEUE (wait, current); + + /* + * Grab the lock and see if the device is available + */ +retry: + spin_lock_bh (&this->chip_lock); + + if (this->state == FL_READY) { + this->state = new_state; + spin_unlock_bh (&this->chip_lock); + return; + } + + set_current_state (TASK_UNINTERRUPTIBLE); + add_wait_queue (&this->wq, &wait); + spin_unlock_bh (&this->chip_lock); + schedule (); + remove_wait_queue (&this->wq, &wait); + goto retry; +} + +/** + * nand_wait - [DEFAULT] wait until the command is done + * @mtd: MTD device structure + * @this: NAND chip structure + * @state: state to select the max. timeout value + * + * Wait for command done. This applies to erase and program only + * Erase can take up to 400ms and program up to 20ms according to + * general NAND and SmartMedia specs + * +*/ +static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state) +{ + + unsigned long timeo = jiffies; + int status; + + if (state == FL_ERASING) + timeo += (HZ * 400) / 1000; + else + timeo += (HZ * 20) / 1000; + + /* Apply this short delay always to ensure that we do wait tWB in + * any case on any machine. */ + ndelay (100); + + spin_lock_bh (&this->chip_lock); + if ((state == FL_ERASING) && (this->options & NAND_IS_AND)) + this->cmdfunc (mtd, NAND_CMD_STATUS_MULTI, -1, -1); + else + this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1); + + while (time_before(jiffies, timeo)) { + /* Check, if we were interrupted */ + if (this->state != state) { + spin_unlock_bh (&this->chip_lock); + return 0; + } + if (this->dev_ready) { + if (this->dev_ready(mtd)) + break; + } + if (this->read_byte(mtd) & NAND_STATUS_READY) + break; + + spin_unlock_bh (&this->chip_lock); + yield (); + spin_lock_bh (&this->chip_lock); + } + status = (int) this->read_byte(mtd); + spin_unlock_bh (&this->chip_lock); + + return status; +} + +/** + * nand_write_page - [GENERIC] write one page + * @mtd: MTD device structure + * @this: NAND chip structure + * @page: startpage inside the chip, must be called with (page & this->pagemask) + * @oob_buf: out of band data buffer + * @oobsel: out of band selecttion structre + * @cached: 1 = enable cached programming if supported by chip + * + * Nand_page_program function is used for write and writev ! + * This function will always program a full page of data + * If you call it with a non page aligned buffer, you're lost :) + * + * Cached programming is not supported yet. + */ +static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, + u_char *oob_buf, struct nand_oobinfo *oobsel, int cached) +{ + int i, status; + u_char ecc_code[8]; + int eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE; + int *oob_config = oobsel->eccpos; + int datidx = 0, eccidx = 0, eccsteps = this->eccsteps; + int eccbytes = 0; + + /* FIXME: Enable cached programming */ + cached = 0; + + /* Send command to begin auto page programming */ + this->cmdfunc (mtd, NAND_CMD_SEQIN, 0x00, page); + + /* Write out complete page of data, take care of eccmode */ + switch (eccmode) { + /* No ecc, write all */ + case NAND_ECC_NONE: + printk (KERN_WARNING "Writing data without ECC to NAND-FLASH is not recommended\n"); + this->write_buf(mtd, this->data_poi, mtd->oobblock); + break; + + /* Software ecc 3/256, write all */ + case NAND_ECC_SOFT: + for (; eccsteps; eccsteps--) { + this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code); + for (i = 0; i < 3; i++, eccidx++) + oob_buf[oob_config[eccidx]] = ecc_code[i]; + datidx += this->eccsize; + } + this->write_buf(mtd, this->data_poi, mtd->oobblock); + break; + + /* Hardware ecc 8 byte / 512 byte data */ + case NAND_ECC_HW8_512: + eccbytes += 2; + /* Hardware ecc 6 byte / 512 byte data */ + case NAND_ECC_HW6_512: + eccbytes += 3; + /* Hardware ecc 3 byte / 256 data */ + /* Hardware ecc 3 byte / 512 byte data */ + case NAND_ECC_HW3_256: + case NAND_ECC_HW3_512: + eccbytes += 3; + for (; eccsteps; eccsteps--) { + /* enable hardware ecc logic for write */ + this->enable_hwecc(mtd, NAND_ECC_WRITE); + this->write_buf(mtd, &this->data_poi[datidx], this->eccsize); + this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code); + for (i = 0; i < eccbytes; i++, eccidx++) + oob_buf[oob_config[eccidx]] = ecc_code[i]; + /* If the hardware ecc provides syndromes then + * the ecc code must be written immidiately after + * the data bytes (words) */ + if (this->options & NAND_HWECC_SYNDROME) + this->write_buf(mtd, ecc_code, eccbytes); + + datidx += this->eccsize; + } + break; + + default: + printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode); + BUG(); + } + + /* Write out OOB data */ + if (this->options & NAND_HWECC_SYNDROME) + this->write_buf(mtd, &oob_buf[oobsel->eccbytes], mtd->oobsize - oobsel->eccbytes); + else + this->write_buf(mtd, oob_buf, mtd->oobsize); + + /* Send command to actually program the data */ + this->cmdfunc (mtd, cached ? NAND_CMD_CACHEDPROG : NAND_CMD_PAGEPROG, -1, -1); + + if (!cached) { + /* call wait ready function */ + status = this->waitfunc (mtd, this, FL_WRITING); + /* See if device thinks it succeeded */ + if (status & 0x01) { + DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write, page 0x%08x, ", __FUNCTION__, page); + return -EIO; + } + } else { + /* FIXME: Implement cached programming ! */ + /* wait until cache is ready*/ + // status = this->waitfunc (mtd, this, FL_CACHEDRPG); + } + return 0; +} + +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE +/** + * nand_verify_pages - [GENERIC] verify the chip contents after a write + * @mtd: MTD device structure + * @this: NAND chip structure + * @page: startpage inside the chip, must be called with (page & this->pagemask) + * @numpages: number of pages to verify + * @oob_buf: out of band data buffer + * @oobsel: out of band selecttion structre + * @chipnr: number of the current chip + * @oobmode: 1 = full buffer verify, 0 = ecc only + * + * The NAND device assumes that it is always writing to a cleanly erased page. + * Hence, it performs its internal write verification only on bits that + * transitioned from 1 to 0. The device does NOT verify the whole page on a + * byte by byte basis. It is possible that the page was not completely erased + * or the page is becoming unusable due to wear. The read with ECC would catch + * the error later when the ECC page check fails, but we would rather catch + * it early in the page write stage. Better to write no data than invalid data. + */ +static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages, + u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode) +{ + int i, j, datidx = 0, oobofs = 0, res = -EIO; + int eccsteps = this->eccsteps; + int hweccbytes; + u_char oobdata[64]; + + hweccbytes = (this->options & NAND_HWECC_SYNDROME) ? (oobsel->eccbytes / eccsteps) : 0; + + /* Send command to read back the first page */ + this->cmdfunc (mtd, NAND_CMD_READ0, 0, page); + + for(;;) { + for (j = 0; j < eccsteps; j++) { + /* Loop through and verify the data */ + if (this->verify_buf(mtd, &this->data_poi[datidx], mtd->eccsize)) { + DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page); + goto out; + } + datidx += mtd->eccsize; + /* Have we a hw generator layout ? */ + if (!hweccbytes) + continue; + if (this->verify_buf(mtd, &this->oob_buf[oobofs], hweccbytes)) { + DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page); + goto out; + } + oobofs += hweccbytes; + } + + /* check, if we must compare all data or if we just have to + * compare the ecc bytes + */ + if (oobmode) { + if (this->verify_buf(mtd, &oob_buf[oobofs], mtd->oobsize - hweccbytes * eccsteps)) { + DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page); + goto out; + } + } else { + /* Read always, else autoincrement fails */ + this->read_buf(mtd, oobdata, mtd->oobsize - hweccbytes * eccsteps); + + if (oobsel->useecc != MTD_NANDECC_OFF && !hweccbytes) { + int ecccnt = oobsel->eccbytes; + + for (i = 0; i < ecccnt; i++) { + int idx = oobsel->eccpos[i]; + if (oobdata[idx] != oob_buf[oobofs + idx] ) { + DEBUG (MTD_DEBUG_LEVEL0, + "%s: Failed ECC write " + "verify, page 0x%08x, " "%6i bytes were succesful\n", __FUNCTION__, page, i); + goto out; + } + } + } + } + oobofs += mtd->oobsize - hweccbytes * eccsteps; + page++; + numpages--; + + /* Apply delay or wait for ready/busy pin + * Do this before the AUTOINCR check, so no problems + * arise if a chip which does auto increment + * is marked as NOAUTOINCR by the board driver. + * Do this also before returning, so the chip is + * ready for the next command. + */ + if (!this->dev_ready) + udelay (this->chip_delay); + else + while (!this->dev_ready(mtd)); + + /* All done, return happy */ + if (!numpages) + return 0; + + + /* Check, if the chip supports auto page increment */ + if (!NAND_CANAUTOINCR(this)) + this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page); + } + /* + * Terminate the read command. We come here in case of an error + * So we must issue a reset command. + */ +out: + this->cmdfunc (mtd, NAND_CMD_RESET, -1, -1); + return res; +} +#endif + +/** + * nand_read - [MTD Interface] MTD compability function for nand_read_ecc + * @mtd: MTD device structure + * @from: offset to read from + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put data + * + * This function simply calls nand_read_ecc with oob buffer and oobsel = NULL +*/ +static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf) +{ + return nand_read_ecc (mtd, from, len, retlen, buf, NULL, NULL); +} + + +/** + * nand_read_ecc - [MTD Interface] Read data with ECC + * @mtd: MTD device structure + * @from: offset to read from + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put data + * @oob_buf: filesystem supplied oob data buffer + * @oobsel: oob selection structure + * + * NAND read with ECC + */ +static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len, + size_t * retlen, u_char * buf, u_char * oob_buf, struct nand_oobinfo *oobsel) +{ + int i, j, col, realpage, page, end, ecc, chipnr, sndcmd = 1; + int read = 0, oob = 0, ecc_status = 0, ecc_failed = 0; + struct nand_chip *this = mtd->priv; + u_char *data_poi, *oob_data = oob_buf; + u_char ecc_calc[32]; + u_char ecc_code[32]; + int eccmode, eccsteps; + int *oob_config, datidx; + int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1; + int eccbytes = 3; + int compareecc = 1; + int oobreadlen; + + + DEBUG (MTD_DEBUG_LEVEL3, "nand_read_ecc: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); + + /* Do not allow reads past end of device */ + if ((from + len) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: Attempt read beyond end of device\n"); + *retlen = 0; + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd ,FL_READING); + + /* use userspace supplied oobinfo, if zero */ + if (oobsel == NULL) + oobsel = &mtd->oobinfo; + + /* Autoplace of oob data ? Use the default placement scheme */ + if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) + oobsel = this->autooob; + + eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE; + oob_config = oobsel->eccpos; + + /* Select the NAND device */ + chipnr = (int)(from >> this->chip_shift); + this->select_chip(mtd, chipnr); + + /* First we calculate the starting page */ + realpage = (int) (from >> this->page_shift); + page = realpage & this->pagemask; + + /* Get raw starting column */ + col = from & (mtd->oobblock - 1); + + end = mtd->oobblock; + ecc = this->eccsize; + switch (eccmode) { + case NAND_ECC_HW6_512: /* Hardware ECC 6 byte / 512 byte data */ + eccbytes = 6; + break; + case NAND_ECC_HW8_512: /* Hardware ECC 8 byte / 512 byte data */ + eccbytes = 8; + break; + case NAND_ECC_NONE: + compareecc = 0; + break; + } + + if (this->options & NAND_HWECC_SYNDROME) + compareecc = 0; + + oobreadlen = mtd->oobsize; + if (this->options & NAND_HWECC_SYNDROME) + oobreadlen -= oobsel->eccbytes; + + /* Loop until all data read */ + while (read < len) { + + int aligned = (!col && (len - read) >= end); + /* + * If the read is not page aligned, we have to read into data buffer + * due to ecc, else we read into return buffer direct + */ + if (aligned) + data_poi = &buf[read]; + else + data_poi = this->data_buf; + + /* Check, if we have this page in the buffer + * + * FIXME: Make it work when we must provide oob data too, + * check the usage of data_buf oob field + */ + if (realpage == this->pagebuf && !oob_buf) { + /* aligned read ? */ + if (aligned) + memcpy (data_poi, this->data_buf, end); + goto readdata; + } + + /* Check, if we must send the read command */ + if (sndcmd) { + this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page); + sndcmd = 0; + } + + /* get oob area, if we have no oob buffer from fs-driver */ + if (!oob_buf || oobsel->useecc == MTD_NANDECC_AUTOPLACE) + oob_data = &this->data_buf[end]; + + eccsteps = this->eccsteps; + + switch (eccmode) { + case NAND_ECC_NONE: { /* No ECC, Read in a page */ + static unsigned long lastwhinge = 0; + if ((lastwhinge / HZ) != (jiffies / HZ)) { + printk (KERN_WARNING "Reading data from NAND FLASH without ECC is not recommended\n"); + lastwhinge = jiffies; + } + this->read_buf(mtd, data_poi, end); + break; + } + + case NAND_ECC_SOFT: /* Software ECC 3/256: Read in a page + oob data */ + this->read_buf(mtd, data_poi, end); + for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=3, datidx += ecc) + this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]); + break; + + case NAND_ECC_HW3_256: /* Hardware ECC 3 byte /256 byte data */ + case NAND_ECC_HW3_512: /* Hardware ECC 3 byte /512 byte data */ + case NAND_ECC_HW6_512: /* Hardware ECC 6 byte / 512 byte data */ + case NAND_ECC_HW8_512: /* Hardware ECC 8 byte / 512 byte data */ + for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=eccbytes, datidx += ecc) { + this->enable_hwecc(mtd, NAND_ECC_READ); + this->read_buf(mtd, &data_poi[datidx], ecc); + + /* HW ecc with syndrome calculation must read the + * syndrome from flash immidiately after the data */ + if (!compareecc) { + /* Some hw ecc generators need to know when the + * syndrome is read from flash */ + this->enable_hwecc(mtd, NAND_ECC_READSYN); + this->read_buf(mtd, &oob_data[i], eccbytes); + /* We calc error correction directly, it checks the hw + * generator for an error, reads back the syndrome and + * does the error correction on the fly */ + if (this->correct_data(mtd, &data_poi[datidx], &oob_data[i], &ecc_code[i]) == -1) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: " + "Failed ECC read, page 0x%08x on chip %d\n", page, chipnr); + ecc_failed++; + } + } else { + this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]); + } + } + break; + + default: + printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode); + BUG(); + } + + /* read oobdata */ + this->read_buf(mtd, &oob_data[mtd->oobsize - oobreadlen], oobreadlen); + + /* Skip ECC check, if not requested (ECC_NONE or HW_ECC with syndromes) */ + if (!compareecc) + goto readoob; + + /* Pick the ECC bytes out of the oob data */ + for (j = 0; j < oobsel->eccbytes; j++) + ecc_code[j] = oob_data[oob_config[j]]; + + /* correct data, if neccecary */ + for (i = 0, j = 0, datidx = 0; i < this->eccsteps; i++, datidx += ecc) { + ecc_status = this->correct_data(mtd, &data_poi[datidx], &ecc_code[j], &ecc_calc[j]); + + /* Get next chunk of ecc bytes */ + j += eccbytes; + + /* Check, if we have a fs supplied oob-buffer, + * This is the legacy mode. Used by YAFFS1 + * Should go away some day + */ + if (oob_buf && oobsel->useecc == MTD_NANDECC_PLACE) { + int *p = (int *)(&oob_data[mtd->oobsize]); + p[i] = ecc_status; + } + + if (ecc_status == -1) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: " "Failed ECC read, page 0x%08x\n", page); + ecc_failed++; + } + } + + readoob: + /* check, if we have a fs supplied oob-buffer */ + if (oob_buf) { + /* without autoplace. Legacy mode used by YAFFS1 */ + switch(oobsel->useecc) { + case MTD_NANDECC_AUTOPLACE: + /* Walk through the autoplace chunks */ + for (i = 0, j = 0; j < mtd->oobavail; i++) { + int from = oobsel->oobfree[i][0]; + int num = oobsel->oobfree[i][1]; + memcpy(&oob_buf[oob], &oob_data[from], num); + j+= num; + } + oob += mtd->oobavail; + break; + case MTD_NANDECC_PLACE: + /* YAFFS1 legacy mode */ + oob_data += this->eccsteps * sizeof (int); + default: + oob_data += mtd->oobsize; + } + } + readdata: + /* Partial page read, transfer data into fs buffer */ + if (!aligned) { + for (j = col; j < end && read < len; j++) + buf[read++] = data_poi[j]; + this->pagebuf = realpage; + } else + read += mtd->oobblock; + + /* Apply delay or wait for ready/busy pin + * Do this before the AUTOINCR check, so no problems + * arise if a chip which does auto increment + * is marked as NOAUTOINCR by the board driver. + */ + if (!this->dev_ready) + udelay (this->chip_delay); + else + while (!this->dev_ready(mtd)); + + if (read == len) + break; + + /* For subsequent reads align to page boundary. */ + col = 0; + /* Increment page address */ + realpage++; + + page = realpage & this->pagemask; + /* Check, if we cross a chip boundary */ + if (!page) { + chipnr++; + this->select_chip(mtd, -1); + this->select_chip(mtd, chipnr); + } + /* Check, if the chip supports auto page increment + * or if we have hit a block boundary. + */ + if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) + sndcmd = 1; + } + + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + /* + * Return success, if no ECC failures, else -EIO + * fs driver will take care of that, because + * retlen == desired len and result == -EIO + */ + *retlen = read; + return ecc_failed ? -EIO : 0; +} + +/** + * nand_read_oob - [MTD Interface] NAND read out-of-band + * @mtd: MTD device structure + * @from: offset to read from + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put data + * + * NAND read out-of-band data from the spare area + */ +static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf) +{ + int i, col, page, chipnr; + struct nand_chip *this = mtd->priv; + int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1; + + DEBUG (MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); + + /* Shift to get page */ + page = (int)(from >> this->page_shift); + chipnr = (int)(from >> this->chip_shift); + + /* Mask to get column */ + col = from & (mtd->oobsize - 1); + + /* Initialize return length value */ + *retlen = 0; + + /* Do not allow reads past end of device */ + if ((from + len) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: Attempt read beyond end of device\n"); + *retlen = 0; + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd , FL_READING); + + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + + /* Send the read command */ + this->cmdfunc (mtd, NAND_CMD_READOOB, col, page & this->pagemask); + /* + * Read the data, if we read more than one page + * oob data, let the device transfer the data ! + */ + i = 0; + while (i < len) { + int thislen = mtd->oobsize - col; + thislen = min_t(int, thislen, len); + this->read_buf(mtd, &buf[i], thislen); + i += thislen; + + /* Apply delay or wait for ready/busy pin + * Do this before the AUTOINCR check, so no problems + * arise if a chip which does auto increment + * is marked as NOAUTOINCR by the board driver. + */ + if (!this->dev_ready) + udelay (this->chip_delay); + else + while (!this->dev_ready(mtd)); + + /* Read more ? */ + if (i < len) { + page++; + col = 0; + + /* Check, if we cross a chip boundary */ + if (!(page & this->pagemask)) { + chipnr++; + this->select_chip(mtd, -1); + this->select_chip(mtd, chipnr); + } + + /* Check, if the chip supports auto page increment + * or if we have hit a block boundary. + */ + if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) { + /* For subsequent page reads set offset to 0 */ + this->cmdfunc (mtd, NAND_CMD_READOOB, 0x0, page & this->pagemask); + } + } + } + + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + /* Return happy */ + *retlen = len; + return 0; +} + +/** + * nand_read_raw - [GENERIC] Read raw data including oob into buffer + * @mtd: MTD device structure + * @buf: temporary buffer + * @from: offset to read from + * @len: number of bytes to read + * @ooblen: number of oob data bytes to read + * + * Read raw data including oob into buffer + */ +int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_t len, size_t ooblen) +{ + struct nand_chip *this = mtd->priv; + int page = (int) (from >> this->page_shift); + int chip = (int) (from >> this->chip_shift); + int sndcmd = 1; + int cnt = 0; + int pagesize = mtd->oobblock + mtd->oobsize; + int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1; + + /* Do not allow reads past end of device */ + if ((from + len) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_read_raw: Attempt read beyond end of device\n"); + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd , FL_READING); + + this->select_chip (mtd, chip); + + /* Add requested oob length */ + len += ooblen; + + while (len) { + if (sndcmd) + this->cmdfunc (mtd, NAND_CMD_READ0, 0, page & this->pagemask); + sndcmd = 0; + + this->read_buf (mtd, &buf[cnt], pagesize); + + len -= pagesize; + cnt += pagesize; + page++; + + if (!this->dev_ready) + udelay (this->chip_delay); + else + while (!this->dev_ready(mtd)); + + /* Check, if the chip supports auto page increment */ + if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) + sndcmd = 1; + } + + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + return 0; +} + + +/** + * nand_prepare_oobbuf - [GENERIC] Prepare the out of band buffer + * @mtd: MTD device structure + * @fsbuf: buffer given by fs driver + * @oobsel: out of band selection structre + * @autoplace: 1 = place given buffer into the oob bytes + * @numpages: number of pages to prepare + * + * Return: + * 1. Filesystem buffer available and autoplacement is off, + * return filesystem buffer + * 2. No filesystem buffer or autoplace is off, return internal + * buffer + * 3. Filesystem buffer is given and autoplace selected + * put data from fs buffer into internal buffer and + * retrun internal buffer + * + * Note: The internal buffer is filled with 0xff. This must + * be done only once, when no autoplacement happens + * Autoplacement sets the buffer dirty flag, which + * forces the 0xff fill before using the buffer again. + * +*/ +static u_char * nand_prepare_oobbuf (struct mtd_info *mtd, u_char *fsbuf, struct nand_oobinfo *oobsel, + int autoplace, int numpages) +{ + struct nand_chip *this = mtd->priv; + int i, len, ofs; + + /* Zero copy fs supplied buffer */ + if (fsbuf && !autoplace) + return fsbuf; + + /* Check, if the buffer must be filled with ff again */ + if (this->oobdirty) { + memset (this->oob_buf, 0xff, + mtd->oobsize << (this->phys_erase_shift - this->page_shift)); + this->oobdirty = 0; + } + + /* If we have no autoplacement or no fs buffer use the internal one */ + if (!autoplace || !fsbuf) + return this->oob_buf; + + /* Walk through the pages and place the data */ + this->oobdirty = 1; + ofs = 0; + while (numpages--) { + for (i = 0, len = 0; len < mtd->oobavail; i++) { + int to = ofs + oobsel->oobfree[i][0]; + int num = oobsel->oobfree[i][1]; + memcpy (&this->oob_buf[to], fsbuf, num); + len += num; + fsbuf += num; + } + ofs += mtd->oobavail; + } + return this->oob_buf; +} + +#define NOTALIGNED(x) (x & (mtd->oobblock-1)) != 0 + +/** + * nand_write - [MTD Interface] compability function for nand_write_ecc + * @mtd: MTD device structure + * @to: offset to write to + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of written bytes + * @buf: the data to write + * + * This function simply calls nand_write_ecc with oob buffer and oobsel = NULL + * +*/ +static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf) +{ + return (nand_write_ecc (mtd, to, len, retlen, buf, NULL, NULL)); +} + +/** + * nand_write_ecc - [MTD Interface] NAND write with ECC + * @mtd: MTD device structure + * @to: offset to write to + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of written bytes + * @buf: the data to write + * @eccbuf: filesystem supplied oob data buffer + * @oobsel: oob selection structure + * + * NAND write with ECC + */ +static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len, + size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel) +{ + int startpage, page, ret = -EIO, oob = 0, written = 0, chipnr; + int autoplace = 0, numpages, totalpages; + struct nand_chip *this = mtd->priv; + u_char *oobbuf, *bufstart; + int ppblock = (1 << (this->phys_erase_shift - this->page_shift)); + + DEBUG (MTD_DEBUG_LEVEL3, "nand_write_ecc: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); + + /* Initialize retlen, in case of early exit */ + *retlen = 0; + + /* Do not allow write past end of device */ + if ((to + len) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: Attempt to write past end of page\n"); + return -EINVAL; + } + + /* reject writes, which are not page aligned */ + if (NOTALIGNED (to) || NOTALIGNED(len)) { + printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n"); + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd, FL_WRITING); + + /* Calculate chipnr */ + chipnr = (int)(to >> this->chip_shift); + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + + /* Check, if it is write protected */ + if (nand_check_wp(mtd)) + goto out; + + /* if oobsel is NULL, use chip defaults */ + if (oobsel == NULL) + oobsel = &mtd->oobinfo; + + /* Autoplace of oob data ? Use the default placement scheme */ + if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) { + oobsel = this->autooob; + autoplace = 1; + } + + /* Setup variables and oob buffer */ + totalpages = len >> this->page_shift; + page = (int) (to >> this->page_shift); + /* Invalidate the page cache, if we write to the cached page */ + if (page <= this->pagebuf && this->pagebuf < (page + totalpages)) + this->pagebuf = -1; + + /* Set it relative to chip */ + page &= this->pagemask; + startpage = page; + /* Calc number of pages we can write in one go */ + numpages = min (ppblock - (startpage & (ppblock - 1)), totalpages); + oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel, autoplace, numpages); + bufstart = (u_char *)buf; + + /* Loop until all data is written */ + while (written < len) { + + this->data_poi = (u_char*) &buf[written]; + /* Write one page. If this is the last page to write + * or the last page in this block, then use the + * real pageprogram command, else select cached programming + * if supported by the chip. + */ + ret = nand_write_page (mtd, this, page, &oobbuf[oob], oobsel, (--numpages > 0)); + if (ret) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: write_page failed %d\n", ret); + goto out; + } + /* Next oob page */ + oob += mtd->oobsize; + /* Update written bytes count */ + written += mtd->oobblock; + if (written == len) + goto cmp; + + /* Increment page address */ + page++; + + /* Have we hit a block boundary ? Then we have to verify and + * if verify is ok, we have to setup the oob buffer for + * the next pages. + */ + if (!(page & (ppblock - 1))){ + int ofs; + this->data_poi = bufstart; + ret = nand_verify_pages (mtd, this, startpage, + page - startpage, + oobbuf, oobsel, chipnr, (eccbuf != NULL)); + if (ret) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret); + goto out; + } + *retlen = written; + + ofs = autoplace ? mtd->oobavail : mtd->oobsize; + if (eccbuf) + eccbuf += (page - startpage) * ofs; + totalpages -= page - startpage; + numpages = min (totalpages, ppblock); + page &= this->pagemask; + startpage = page; + oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel, + autoplace, numpages); + /* Check, if we cross a chip boundary */ + if (!page) { + chipnr++; + this->select_chip(mtd, -1); + this->select_chip(mtd, chipnr); + } + } + } + /* Verify the remaining pages */ +cmp: + this->data_poi = bufstart; + ret = nand_verify_pages (mtd, this, startpage, totalpages, + oobbuf, oobsel, chipnr, (eccbuf != NULL)); + if (!ret) + *retlen = written; + else + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret); + +out: + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + return ret; +} + + +/** + * nand_write_oob - [MTD Interface] NAND write out-of-band + * @mtd: MTD device structure + * @to: offset to write to + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of written bytes + * @buf: the data to write + * + * NAND write out-of-band + */ +static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf) +{ + int column, page, status, ret = -EIO, chipnr; + struct nand_chip *this = mtd->priv; + + DEBUG (MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); + + /* Shift to get page */ + page = (int) (to >> this->page_shift); + chipnr = (int) (to >> this->chip_shift); + + /* Mask to get column */ + column = to & (mtd->oobsize - 1); + + /* Initialize return length value */ + *retlen = 0; + + /* Do not allow write past end of page */ + if ((column + len) > mtd->oobsize) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: Attempt to write past end of page\n"); + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd, FL_WRITING); + + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + + /* Reset the chip. Some chips (like the Toshiba TC5832DC found + in one of my DiskOnChip 2000 test units) will clear the whole + data page too if we don't do this. I have no clue why, but + I seem to have 'fixed' it in the doc2000 driver in + August 1999. dwmw2. */ + this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + + /* Check, if it is write protected */ + if (nand_check_wp(mtd)) + goto out; + + /* Invalidate the page cache, if we write to the cached page */ + if (page == this->pagebuf) + this->pagebuf = -1; + + if (NAND_MUST_PAD(this)) { + /* Write out desired data */ + this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock, page & this->pagemask); + /* prepad 0xff for partial programming */ + this->write_buf(mtd, ffchars, column); + /* write data */ + this->write_buf(mtd, buf, len); + /* postpad 0xff for partial programming */ + this->write_buf(mtd, ffchars, mtd->oobsize - (len+column)); + } else { + /* Write out desired data */ + this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock + column, page & this->pagemask); + /* write data */ + this->write_buf(mtd, buf, len); + } + /* Send command to program the OOB data */ + this->cmdfunc (mtd, NAND_CMD_PAGEPROG, -1, -1); + + status = this->waitfunc (mtd, this, FL_WRITING); + + /* See if device thinks it succeeded */ + if (status & 0x01) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write, page 0x%08x\n", page); + ret = -EIO; + goto out; + } + /* Return happy */ + *retlen = len; + +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE + /* Send command to read back the data */ + this->cmdfunc (mtd, NAND_CMD_READOOB, column, page & this->pagemask); + + if (this->verify_buf(mtd, buf, len)) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write verify, page 0x%08x\n", page); + ret = -EIO; + goto out; + } +#endif + ret = 0; +out: + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + return ret; +} + + +/** + * nand_writev - [MTD Interface] compabilty function for nand_writev_ecc + * @mtd: MTD device structure + * @vecs: the iovectors to write + * @count: number of vectors + * @to: offset to write to + * @retlen: pointer to variable to store the number of written bytes + * + * NAND write with kvec. This just calls the ecc function + */ +static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, + loff_t to, size_t * retlen) +{ + return (nand_writev_ecc (mtd, vecs, count, to, retlen, NULL, NULL)); +} + +/** + * nand_writev_ecc - [MTD Interface] write with iovec with ecc + * @mtd: MTD device structure + * @vecs: the iovectors to write + * @count: number of vectors + * @to: offset to write to + * @retlen: pointer to variable to store the number of written bytes + * @eccbuf: filesystem supplied oob data buffer + * @oobsel: oob selection structure + * + * NAND write with iovec with ecc + */ +static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, + loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel) +{ + int i, page, len, total_len, ret = -EIO, written = 0, chipnr; + int oob, numpages, autoplace = 0, startpage; + struct nand_chip *this = mtd->priv; + int ppblock = (1 << (this->phys_erase_shift - this->page_shift)); + u_char *oobbuf, *bufstart; + + /* Preset written len for early exit */ + *retlen = 0; + + /* Calculate total length of data */ + total_len = 0; + for (i = 0; i < count; i++) + total_len += (int) vecs[i].iov_len; + + DEBUG (MTD_DEBUG_LEVEL3, + "nand_writev: to = 0x%08x, len = %i, count = %ld\n", (unsigned int) to, (unsigned int) total_len, count); + + /* Do not allow write past end of page */ + if ((to + total_len) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_writev: Attempted write past end of device\n"); + return -EINVAL; + } + + /* reject writes, which are not page aligned */ + if (NOTALIGNED (to) || NOTALIGNED(total_len)) { + printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n"); + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd, FL_WRITING); + + /* Get the current chip-nr */ + chipnr = (int) (to >> this->chip_shift); + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + + /* Check, if it is write protected */ + if (nand_check_wp(mtd)) + goto out; + + /* if oobsel is NULL, use chip defaults */ + if (oobsel == NULL) + oobsel = &mtd->oobinfo; + + /* Autoplace of oob data ? Use the default placement scheme */ + if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) { + oobsel = this->autooob; + autoplace = 1; + } + + /* Setup start page */ + page = (int) (to >> this->page_shift); + /* Invalidate the page cache, if we write to the cached page */ + if (page <= this->pagebuf && this->pagebuf < ((to + total_len) >> this->page_shift)) + this->pagebuf = -1; + + startpage = page & this->pagemask; + + /* Loop until all kvec' data has been written */ + len = 0; + while (count) { + /* If the given tuple is >= pagesize then + * write it out from the iov + */ + if ((vecs->iov_len - len) >= mtd->oobblock) { + /* Calc number of pages we can write + * out of this iov in one go */ + numpages = (vecs->iov_len - len) >> this->page_shift; + /* Do not cross block boundaries */ + numpages = min (ppblock - (startpage & (ppblock - 1)), numpages); + oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages); + bufstart = (u_char *)vecs->iov_base; + bufstart += len; + this->data_poi = bufstart; + oob = 0; + for (i = 1; i <= numpages; i++) { + /* Write one page. If this is the last page to write + * then use the real pageprogram command, else select + * cached programming if supported by the chip. + */ + ret = nand_write_page (mtd, this, page & this->pagemask, + &oobbuf[oob], oobsel, i != numpages); + if (ret) + goto out; + this->data_poi += mtd->oobblock; + len += mtd->oobblock; + oob += mtd->oobsize; + page++; + } + /* Check, if we have to switch to the next tuple */ + if (len >= (int) vecs->iov_len) { + vecs++; + len = 0; + count--; + } + } else { + /* We must use the internal buffer, read data out of each + * tuple until we have a full page to write + */ + int cnt = 0; + while (cnt < mtd->oobblock) { + if (vecs->iov_base != NULL && vecs->iov_len) + this->data_buf[cnt++] = ((u_char *) vecs->iov_base)[len++]; + /* Check, if we have to switch to the next tuple */ + if (len >= (int) vecs->iov_len) { + vecs++; + len = 0; + count--; + } + } + this->pagebuf = page; + this->data_poi = this->data_buf; + bufstart = this->data_poi; + numpages = 1; + oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages); + ret = nand_write_page (mtd, this, page & this->pagemask, + oobbuf, oobsel, 0); + if (ret) + goto out; + page++; + } + + this->data_poi = bufstart; + ret = nand_verify_pages (mtd, this, startpage, numpages, oobbuf, oobsel, chipnr, 0); + if (ret) + goto out; + + written += mtd->oobblock * numpages; + /* All done ? */ + if (!count) + break; + + startpage = page & this->pagemask; + /* Check, if we cross a chip boundary */ + if (!startpage) { + chipnr++; + this->select_chip(mtd, -1); + this->select_chip(mtd, chipnr); + } + } + ret = 0; +out: + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + *retlen = written; + return ret; +} + +/** + * single_erease_cmd - [GENERIC] NAND standard block erase command function + * @mtd: MTD device structure + * @page: the page address of the block which will be erased + * + * Standard erase command for NAND chips + */ +static void single_erase_cmd (struct mtd_info *mtd, int page) +{ + struct nand_chip *this = mtd->priv; + /* Send commands to erase a block */ + this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page); + this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1); +} + +/** + * multi_erease_cmd - [GENERIC] AND specific block erase command function + * @mtd: MTD device structure + * @page: the page address of the block which will be erased + * + * AND multi block erase command function + * Erase 4 consecutive blocks + */ +static void multi_erase_cmd (struct mtd_info *mtd, int page) +{ + struct nand_chip *this = mtd->priv; + /* Send commands to erase a block */ + this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++); + this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++); + this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++); + this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page); + this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1); +} + +/** + * nand_erase - [MTD Interface] erase block(s) + * @mtd: MTD device structure + * @instr: erase instruction + * + * Erase one ore more blocks + */ +static int nand_erase (struct mtd_info *mtd, struct erase_info *instr) +{ + return nand_erase_nand (mtd, instr, 0); +} + +/** + * nand_erase_intern - [NAND Interface] erase block(s) + * @mtd: MTD device structure + * @instr: erase instruction + * @allowbbt: allow erasing the bbt area + * + * Erase one ore more blocks + */ +int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbbt) +{ + int page, len, status, pages_per_block, ret, chipnr; + struct nand_chip *this = mtd->priv; + + DEBUG (MTD_DEBUG_LEVEL3, + "nand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len); + + /* Start address must align on block boundary */ + if (instr->addr & ((1 << this->phys_erase_shift) - 1)) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n"); + return -EINVAL; + } + + /* Length must align on block boundary */ + if (instr->len & ((1 << this->phys_erase_shift) - 1)) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Length not block aligned\n"); + return -EINVAL; + } + + /* Do not allow erase past end of device */ + if ((instr->len + instr->addr) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Erase past end of device\n"); + return -EINVAL; + } + + instr->fail_addr = 0xffffffff; + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd, FL_ERASING); + + /* Shift to get first page */ + page = (int) (instr->addr >> this->page_shift); + chipnr = (int) (instr->addr >> this->chip_shift); + + /* Calculate pages in each block */ + pages_per_block = 1 << (this->phys_erase_shift - this->page_shift); + + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + + /* Check the WP bit */ + /* Check, if it is write protected */ + if (nand_check_wp(mtd)) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Device is write protected!!!\n"); + instr->state = MTD_ERASE_FAILED; + goto erase_exit; + } + + /* Loop through the pages */ + len = instr->len; + + instr->state = MTD_ERASING; + + while (len) { + /* Check if we have a bad block, we do not erase bad blocks ! */ + if (nand_block_checkbad(mtd, ((loff_t) page) << this->page_shift, 0, allowbbt)) { + printk (KERN_WARNING "nand_erase: attempt to erase a bad block at page 0x%08x\n", page); + instr->state = MTD_ERASE_FAILED; + goto erase_exit; + } + + /* Invalidate the page cache, if we erase the block which contains + the current cached page */ + if (page <= this->pagebuf && this->pagebuf < (page + pages_per_block)) + this->pagebuf = -1; + + this->erase_cmd (mtd, page & this->pagemask); + + status = this->waitfunc (mtd, this, FL_ERASING); + + /* See if block erase succeeded */ + if (status & 0x01) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: " "Failed erase, page 0x%08x\n", page); + instr->state = MTD_ERASE_FAILED; + instr->fail_addr = (page << this->page_shift); + goto erase_exit; + } + + /* Increment page address and decrement length */ + len -= (1 << this->phys_erase_shift); + page += pages_per_block; + + /* Check, if we cross a chip boundary */ + if (len && !(page & this->pagemask)) { + chipnr++; + this->select_chip(mtd, -1); + this->select_chip(mtd, chipnr); + } + } + instr->state = MTD_ERASE_DONE; + +erase_exit: + + ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; + /* Do call back function */ + if (!ret && instr->callback) + instr->callback (instr); + + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + /* Return more or less happy */ + return ret; +} + +/** + * nand_sync - [MTD Interface] sync + * @mtd: MTD device structure + * + * Sync is actually a wait for chip ready function + */ +static void nand_sync (struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + DECLARE_WAITQUEUE (wait, current); + + DEBUG (MTD_DEBUG_LEVEL3, "nand_sync: called\n"); + +retry: + /* Grab the spinlock */ + spin_lock_bh (&this->chip_lock); + + /* See what's going on */ + switch (this->state) { + case FL_READY: + case FL_SYNCING: + this->state = FL_SYNCING; + spin_unlock_bh (&this->chip_lock); + break; + + default: + /* Not an idle state */ + add_wait_queue (&this->wq, &wait); + spin_unlock_bh (&this->chip_lock); + schedule (); + + remove_wait_queue (&this->wq, &wait); + goto retry; + } + + /* Lock the device */ + spin_lock_bh (&this->chip_lock); + + /* Set the device to be ready again */ + if (this->state == FL_SYNCING) { + this->state = FL_READY; + wake_up (&this->wq); + } + + /* Unlock the device */ + spin_unlock_bh (&this->chip_lock); +} + + +/** + * nand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + */ +static int nand_block_isbad (struct mtd_info *mtd, loff_t ofs) +{ + /* Check for invalid offset */ + if (ofs > mtd->size) + return -EINVAL; + + return nand_block_checkbad (mtd, ofs, 1, 0); +} + +/** + * nand_block_markbad - [MTD Interface] Mark the block at the given offset as bad + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + */ +static int nand_block_markbad (struct mtd_info *mtd, loff_t ofs) +{ + struct nand_chip *this = mtd->priv; + int ret; + + if ((ret = nand_block_isbad(mtd, ofs))) { + /* If it was bad already, return success and do nothing. */ + if (ret > 0) + return 0; + return ret; + } + + return this->block_markbad(mtd, ofs); +} + +/** + * nand_scan - [NAND Interface] Scan for the NAND device + * @mtd: MTD device structure + * @maxchips: Number of chips to scan for + * + * This fills out all the not initialized function pointers + * with the defaults. + * The flash ID is read and the mtd/chip structures are + * filled with the appropriate values. Buffers are allocated if + * they are not provided by the board driver + * + */ +int nand_scan (struct mtd_info *mtd, int maxchips) +{ + int i, j, nand_maf_id, nand_dev_id, busw; + struct nand_chip *this = mtd->priv; + + /* Get buswidth to select the correct functions*/ + busw = this->options & NAND_BUSWIDTH_16; + + /* check for proper chip_delay setup, set 20us if not */ + if (!this->chip_delay) + this->chip_delay = 20; + + /* check, if a user supplied command function given */ + if (this->cmdfunc == NULL) + this->cmdfunc = nand_command; + + /* check, if a user supplied wait function given */ + if (this->waitfunc == NULL) + this->waitfunc = nand_wait; + + if (!this->select_chip) + this->select_chip = nand_select_chip; + if (!this->write_byte) + this->write_byte = busw ? nand_write_byte16 : nand_write_byte; + if (!this->read_byte) + this->read_byte = busw ? nand_read_byte16 : nand_read_byte; + if (!this->write_word) + this->write_word = nand_write_word; + if (!this->read_word) + this->read_word = nand_read_word; + if (!this->block_bad) + this->block_bad = nand_block_bad; + if (!this->block_markbad) + this->block_markbad = nand_default_block_markbad; + if (!this->write_buf) + this->write_buf = busw ? nand_write_buf16 : nand_write_buf; + if (!this->read_buf) + this->read_buf = busw ? nand_read_buf16 : nand_read_buf; + if (!this->verify_buf) + this->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf; + if (!this->scan_bbt) + this->scan_bbt = nand_default_bbt; + + /* Select the device */ + this->select_chip(mtd, 0); + + /* Send the command for reading device ID */ + this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1); + + /* Read manufacturer and device IDs */ + nand_maf_id = this->read_byte(mtd); + nand_dev_id = this->read_byte(mtd); + + /* Print and store flash device information */ + for (i = 0; nand_flash_ids[i].name != NULL; i++) { + + if (nand_dev_id != nand_flash_ids[i].id) + continue; + + if (!mtd->name) mtd->name = nand_flash_ids[i].name; + this->chipsize = nand_flash_ids[i].chipsize << 20; + + /* New devices have all the information in additional id bytes */ + if (!nand_flash_ids[i].pagesize) { + int extid; + /* The 3rd id byte contains non relevant data ATM */ + extid = this->read_byte(mtd); + /* The 4th id byte is the important one */ + extid = this->read_byte(mtd); + /* Calc pagesize */ + mtd->oobblock = 1024 << (extid & 0x3); + extid >>= 2; + /* Calc oobsize */ + mtd->oobsize = (8 << (extid & 0x03)) * (mtd->oobblock / 512); + extid >>= 2; + /* Calc blocksize. Blocksize is multiples of 64KiB */ + mtd->erasesize = (64 * 1024) << (extid & 0x03); + extid >>= 2; + /* Get buswidth information */ + busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; + + } else { + /* Old devices have this data hardcoded in the + * device id table */ + mtd->erasesize = nand_flash_ids[i].erasesize; + mtd->oobblock = nand_flash_ids[i].pagesize; + mtd->oobsize = mtd->oobblock / 32; + busw = nand_flash_ids[i].options & NAND_BUSWIDTH_16; + } + + /* Check, if buswidth is correct. Hardware drivers should set + * this correct ! */ + if (busw != (this->options & NAND_BUSWIDTH_16)) { + printk (KERN_INFO "NAND device: Manufacturer ID:" + " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id, + nand_manuf_ids[i].name , mtd->name); + printk (KERN_WARNING + "NAND bus width %d instead %d bit\n", + (this->options & NAND_BUSWIDTH_16) ? 16 : 8, + busw ? 16 : 8); + this->select_chip(mtd, -1); + return 1; + } + + /* Calculate the address shift from the page size */ + this->page_shift = ffs(mtd->oobblock) - 1; + this->bbt_erase_shift = this->phys_erase_shift = ffs(mtd->erasesize) - 1; + this->chip_shift = ffs(this->chipsize) - 1; + + /* Set the bad block position */ + this->badblockpos = mtd->oobblock > 512 ? + NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS; + + /* Get chip options, preserve non chip based options */ + this->options &= ~NAND_CHIPOPTIONS_MSK; + this->options |= nand_flash_ids[i].options & NAND_CHIPOPTIONS_MSK; + /* Set this as a default. Board drivers can override it, if neccecary */ + this->options |= NAND_NO_AUTOINCR; + /* Check if this is a not a samsung device. Do not clear the options + * for chips which are not having an extended id. + */ + if (nand_maf_id != NAND_MFR_SAMSUNG && !nand_flash_ids[i].pagesize) + this->options &= ~NAND_SAMSUNG_LP_OPTIONS; + + /* Check for AND chips with 4 page planes */ + if (this->options & NAND_4PAGE_ARRAY) + this->erase_cmd = multi_erase_cmd; + else + this->erase_cmd = single_erase_cmd; + + /* Do not replace user supplied command function ! */ + if (mtd->oobblock > 512 && this->cmdfunc == nand_command) + this->cmdfunc = nand_command_lp; + + /* Try to identify manufacturer */ + for (j = 0; nand_manuf_ids[j].id != 0x0; j++) { + if (nand_manuf_ids[j].id == nand_maf_id) + break; + } + printk (KERN_INFO "NAND device: Manufacturer ID:" + " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id, + nand_manuf_ids[j].name , nand_flash_ids[i].name); + break; + } + + if (!nand_flash_ids[i].name) { + printk (KERN_WARNING "No NAND device found!!!\n"); + this->select_chip(mtd, -1); + return 1; + } + + for (i=1; i < maxchips; i++) { + this->select_chip(mtd, i); + + /* Send the command for reading device ID */ + this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1); + + /* Read manufacturer and device IDs */ + if (nand_maf_id != this->read_byte(mtd) || + nand_dev_id != this->read_byte(mtd)) + break; + } + if (i > 1) + printk(KERN_INFO "%d NAND chips detected\n", i); + + /* Allocate buffers, if neccecary */ + if (!this->oob_buf) { + size_t len; + len = mtd->oobsize << (this->phys_erase_shift - this->page_shift); + this->oob_buf = kmalloc (len, GFP_KERNEL); + if (!this->oob_buf) { + printk (KERN_ERR "nand_scan(): Cannot allocate oob_buf\n"); + return -ENOMEM; + } + this->options |= NAND_OOBBUF_ALLOC; + } + + if (!this->data_buf) { + size_t len; + len = mtd->oobblock + mtd->oobsize; + this->data_buf = kmalloc (len, GFP_KERNEL); + if (!this->data_buf) { + if (this->options & NAND_OOBBUF_ALLOC) + kfree (this->oob_buf); + printk (KERN_ERR "nand_scan(): Cannot allocate data_buf\n"); + return -ENOMEM; + } + this->options |= NAND_DATABUF_ALLOC; + } + + /* Store the number of chips and calc total size for mtd */ + this->numchips = i; + mtd->size = i * this->chipsize; + /* Convert chipsize to number of pages per chip -1. */ + this->pagemask = (this->chipsize >> this->page_shift) - 1; + /* Preset the internal oob buffer */ + memset(this->oob_buf, 0xff, mtd->oobsize << (this->phys_erase_shift - this->page_shift)); + + /* If no default placement scheme is given, select an + * appropriate one */ + if (!this->autooob) { + /* Select the appropriate default oob placement scheme for + * placement agnostic filesystems */ + switch (mtd->oobsize) { + case 8: + this->autooob = &nand_oob_8; + break; + case 16: + this->autooob = &nand_oob_16; + break; + case 64: + this->autooob = &nand_oob_64; + break; + default: + printk (KERN_WARNING "No oob scheme defined for oobsize %d\n", + mtd->oobsize); + BUG(); + } + } + + /* The number of bytes available for the filesystem to place fs dependend + * oob data */ + if (this->options & NAND_BUSWIDTH_16) { + mtd->oobavail = mtd->oobsize - (this->autooob->eccbytes + 2); + if (this->autooob->eccbytes & 0x01) + mtd->oobavail--; + } else + mtd->oobavail = mtd->oobsize - (this->autooob->eccbytes + 1); + + /* + * check ECC mode, default to software + * if 3byte/512byte hardware ECC is selected and we have 256 byte pagesize + * fallback to software ECC + */ + this->eccsize = 256; /* set default eccsize */ + + switch (this->eccmode) { + + case NAND_ECC_HW3_512: + case NAND_ECC_HW6_512: + case NAND_ECC_HW8_512: + if (mtd->oobblock == 256) { + printk (KERN_WARNING "512 byte HW ECC not possible on 256 Byte pagesize, fallback to SW ECC \n"); + this->eccmode = NAND_ECC_SOFT; + this->calculate_ecc = nand_calculate_ecc; + this->correct_data = nand_correct_data; + break; + } else + this->eccsize = 512; /* set eccsize to 512 and fall through for function check */ + + case NAND_ECC_HW3_256: + if (this->calculate_ecc && this->correct_data && this->enable_hwecc) + break; + printk (KERN_WARNING "No ECC functions supplied, Hardware ECC not possible\n"); + BUG(); + + case NAND_ECC_NONE: + printk (KERN_WARNING "NAND_ECC_NONE selected by board driver. This is not recommended !!\n"); + this->eccmode = NAND_ECC_NONE; + break; + + case NAND_ECC_SOFT: + this->calculate_ecc = nand_calculate_ecc; + this->correct_data = nand_correct_data; + break; + + default: + printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode); + BUG(); + } + + mtd->eccsize = this->eccsize; + + /* Set the number of read / write steps for one page to ensure ECC generation */ + switch (this->eccmode) { + case NAND_ECC_HW3_512: + case NAND_ECC_HW6_512: + case NAND_ECC_HW8_512: + this->eccsteps = mtd->oobblock / 512; + break; + case NAND_ECC_HW3_256: + case NAND_ECC_SOFT: + this->eccsteps = mtd->oobblock / 256; + break; + + case NAND_ECC_NONE: + this->eccsteps = 1; + break; + } + + /* Initialize state, waitqueue and spinlock */ + this->state = FL_READY; + init_waitqueue_head (&this->wq); + spin_lock_init (&this->chip_lock); + + /* De-select the device */ + this->select_chip(mtd, -1); + + /* Invalidate the pagebuffer reference */ + this->pagebuf = -1; + + /* Fill in remaining MTD driver data */ + mtd->type = MTD_NANDFLASH; + mtd->flags = MTD_CAP_NANDFLASH | MTD_ECC; + mtd->ecctype = MTD_ECC_SW; + mtd->erase = nand_erase; + mtd->point = NULL; + mtd->unpoint = NULL; + mtd->read = nand_read; + mtd->write = nand_write; + mtd->read_ecc = nand_read_ecc; + mtd->write_ecc = nand_write_ecc; + mtd->read_oob = nand_read_oob; + mtd->write_oob = nand_write_oob; + mtd->readv = NULL; + mtd->writev = nand_writev; + mtd->writev_ecc = nand_writev_ecc; + mtd->sync = nand_sync; + mtd->lock = NULL; + mtd->unlock = NULL; + mtd->suspend = NULL; + mtd->resume = NULL; + mtd->block_isbad = nand_block_isbad; + mtd->block_markbad = nand_block_markbad; + + /* and make the autooob the default one */ + memcpy(&mtd->oobinfo, this->autooob, sizeof(mtd->oobinfo)); + + mtd->owner = THIS_MODULE; + + /* Build bad block table */ + return this->scan_bbt (mtd); +} + +/** + * nand_release - [NAND Interface] Free resources held by the NAND device + * @mtd: MTD device structure +*/ +void nand_release (struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + +#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE) + /* Unregister partitions */ + del_mtd_partitions (mtd); +#endif + /* Unregister the device */ + del_mtd_device (mtd); + + /* Free bad block table memory, if allocated */ + if (this->bbt) + kfree (this->bbt); + /* Buffer allocated by nand_scan ? */ + if (this->options & NAND_OOBBUF_ALLOC) + kfree (this->oob_buf); + /* Buffer allocated by nand_scan ? */ + if (this->options & NAND_DATABUF_ALLOC) + kfree (this->data_buf); +} + +EXPORT_SYMBOL (nand_scan); +EXPORT_SYMBOL (nand_release); + +MODULE_LICENSE ("GPL"); +MODULE_AUTHOR ("Steven J. Hill , Thomas Gleixner "); +MODULE_DESCRIPTION ("Generic NAND flash driver code"); diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c new file mode 100644 index 000000000..2642e1151 --- /dev/null +++ b/drivers/mtd/nand/nand_bbt.c @@ -0,0 +1,1053 @@ +/* + * drivers/mtd/nand_bbt.c + * + * Overview: + * Bad block table support for the NAND driver + * + * Copyright (C) 2004 Thomas Gleixner (tglx@linutronix.de) + * + * $Id: nand_bbt.c,v 1.24 2004/06/28 08:25:35 gleixner Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Description: + * + * When nand_scan_bbt is called, then it tries to find the bad block table + * depending on the options in the bbt descriptor(s). If a bbt is found + * then the contents are read and the memory based bbt is created. If a + * mirrored bbt is selected then the mirror is searched too and the + * versions are compared. If the mirror has a greater version number + * than the mirror bbt is used to build the memory based bbt. + * If the tables are not versioned, then we "or" the bad block information. + * If one of the bbt's is out of date or does not exist it is (re)created. + * If no bbt exists at all then the device is scanned for factory marked + * good / bad blocks and the bad block tables are created. + * + * For manufacturer created bbts like the one found on M-SYS DOC devices + * the bbt is searched and read but never created + * + * The autogenerated bad block table is located in the last good blocks + * of the device. The table is mirrored, so it can be updated eventually. + * The table is marked in the oob area with an ident pattern and a version + * number which indicates which of both tables is more up to date. + * + * The table uses 2 bits per block + * 11b: block is good + * 00b: block is factory marked bad + * 01b, 10b: block is marked bad due to wear + * + * The memory bad block table uses the following scheme: + * 00b: block is good + * 01b: block is marked bad due to wear + * 10b: block is reserved (to protect the bbt area) + * 11b: block is factory marked bad + * + * Multichip devices like DOC store the bad block info per floor. + * + * Following assumptions are made: + * - bbts start at a page boundary, if autolocated on a block boundary + * - the space neccecary for a bbt in FLASH does not exceed a block boundary + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +/** + * check_pattern - [GENERIC] check if a pattern is in the buffer + * @buf: the buffer to search + * @len: the length of buffer to search + * @paglen: the pagelength + * @td: search pattern descriptor + * + * Check for a pattern at the given place. Used to search bad block + * tables and good / bad block identifiers. + * If the SCAN_EMPTY option is set then check, if all bytes except the + * pattern area contain 0xff + * +*/ +static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) +{ + int i, end; + uint8_t *p = buf; + + end = paglen + td->offs; + if (td->options & NAND_BBT_SCANEMPTY) { + for (i = 0; i < end; i++) { + if (p[i] != 0xff) + return -1; + } + } + p += end; + + /* Compare the pattern */ + for (i = 0; i < td->len; i++) { + if (p[i] != td->pattern[i]) + return -1; + } + + p += td->len; + end += td->len; + if (td->options & NAND_BBT_SCANEMPTY) { + for (i = end; i < len; i++) { + if (*p++ != 0xff) + return -1; + } + } + return 0; +} + +/** + * read_bbt - [GENERIC] Read the bad block table starting from page + * @mtd: MTD device structure + * @buf: temporary buffer + * @page: the starting page + * @num: the number of bbt descriptors to read + * @bits: number of bits per block + * @offs: offset in the memory table + * + * Read the bad block table starting from page. + * + */ +static int read_bbt (struct mtd_info *mtd, uint8_t *buf, int page, int num, + int bits, int offs, int reserved_block_code) +{ + int res, i, j, act = 0; + struct nand_chip *this = mtd->priv; + size_t retlen, len, totlen; + loff_t from; + uint8_t msk = (uint8_t) ((1 << bits) - 1); + + totlen = (num * bits) >> 3; + from = ((loff_t)page) << this->page_shift; + + while (totlen) { + len = min (totlen, (size_t) (1 << this->bbt_erase_shift)); + res = mtd->read_ecc (mtd, from, len, &retlen, buf, NULL, this->autooob); + if (res < 0) { + if (retlen != len) { + printk (KERN_INFO "nand_bbt: Error reading bad block table\n"); + return res; + } + printk (KERN_WARNING "nand_bbt: ECC error while reading bad block table\n"); + } + + /* Analyse data */ + for (i = 0; i < len; i++) { + uint8_t dat = buf[i]; + for (j = 0; j < 8; j += bits, act += 2) { + uint8_t tmp = (dat >> j) & msk; + if (tmp == msk) + continue; + if (reserved_block_code && + (tmp == reserved_block_code)) { + printk (KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n", + ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); + this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); + continue; + } + /* Leave it for now, if its matured we can move this + * message to MTD_DEBUG_LEVEL0 */ + printk (KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n", + ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); + /* Factory marked bad or worn out ? */ + if (tmp == 0) + this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); + else + this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06); + } + } + totlen -= len; + from += len; + } + return 0; +} + +/** + * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page + * @mtd: MTD device structure + * @buf: temporary buffer + * @td: descriptor for the bad block table + * @chip: read the table for a specific chip, -1 read all chips. + * Applies only if NAND_BBT_PERCHIP option is set + * + * Read the bad block table for all chips starting at a given page + * We assume that the bbt bits are in consecutive order. +*/ +static int read_abs_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip) +{ + struct nand_chip *this = mtd->priv; + int res = 0, i; + int bits; + + bits = td->options & NAND_BBT_NRBITS_MSK; + if (td->options & NAND_BBT_PERCHIP) { + int offs = 0; + for (i = 0; i < this->numchips; i++) { + if (chip == -1 || chip == i) + res = read_bbt (mtd, buf, td->pages[i], this->chipsize >> this->bbt_erase_shift, bits, offs, td->reserved_block_code); + if (res) + return res; + offs += this->chipsize >> (this->bbt_erase_shift + 2); + } + } else { + res = read_bbt (mtd, buf, td->pages[0], mtd->size >> this->bbt_erase_shift, bits, 0, td->reserved_block_code); + if (res) + return res; + } + return 0; +} + +/** + * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page + * @mtd: MTD device structure + * @buf: temporary buffer + * @td: descriptor for the bad block table + * @md: descriptor for the bad block table mirror + * + * Read the bad block table(s) for all chips starting at a given page + * We assume that the bbt bits are in consecutive order. + * +*/ +static int read_abs_bbts (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, + struct nand_bbt_descr *md) +{ + struct nand_chip *this = mtd->priv; + + /* Read the primary version, if available */ + if (td->options & NAND_BBT_VERSION) { + nand_read_raw (mtd, buf, td->pages[0] << this->page_shift, mtd->oobblock, mtd->oobsize); + td->version[0] = buf[mtd->oobblock + td->veroffs]; + printk (KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", td->pages[0], td->version[0]); + } + + /* Read the mirror version, if available */ + if (md && (md->options & NAND_BBT_VERSION)) { + nand_read_raw (mtd, buf, md->pages[0] << this->page_shift, mtd->oobblock, mtd->oobsize); + md->version[0] = buf[mtd->oobblock + md->veroffs]; + printk (KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", md->pages[0], md->version[0]); + } + + return 1; +} + +/** + * create_bbt - [GENERIC] Create a bad block table by scanning the device + * @mtd: MTD device structure + * @buf: temporary buffer + * @bd: descriptor for the good/bad block search pattern + * @chip: create the table for a specific chip, -1 read all chips. + * Applies only if NAND_BBT_PERCHIP option is set + * + * Create a bad block table by scanning the device + * for the given good/bad block identify pattern + */ +static void create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip) +{ + struct nand_chip *this = mtd->priv; + int i, j, numblocks, len, scanlen; + int startblock; + loff_t from; + size_t readlen, ooblen; + + printk (KERN_INFO "Scanning device for bad blocks\n"); + + if (bd->options & NAND_BBT_SCANALLPAGES) + len = 1 << (this->bbt_erase_shift - this->page_shift); + else { + if (bd->options & NAND_BBT_SCAN2NDPAGE) + len = 2; + else + len = 1; + } + scanlen = mtd->oobblock + mtd->oobsize; + readlen = len * mtd->oobblock; + ooblen = len * mtd->oobsize; + + if (chip == -1) { + /* Note that numblocks is 2 * (real numblocks) here, see i+=2 below as it + * makes shifting and masking less painful */ + numblocks = mtd->size >> (this->bbt_erase_shift - 1); + startblock = 0; + from = 0; + } else { + if (chip >= this->numchips) { + printk (KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n", + chip + 1, this->numchips); + return; + } + numblocks = this->chipsize >> (this->bbt_erase_shift - 1); + startblock = chip * numblocks; + numblocks += startblock; + from = startblock << (this->bbt_erase_shift - 1); + } + + for (i = startblock; i < numblocks;) { + nand_read_raw (mtd, buf, from, readlen, ooblen); + for (j = 0; j < len; j++) { + if (check_pattern (&buf[j * scanlen], scanlen, mtd->oobblock, bd)) { + this->bbt[i >> 3] |= 0x03 << (i & 0x6); + printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n", + i >> 1, (unsigned int) from); + break; + } + } + i += 2; + from += (1 << this->bbt_erase_shift); + } +} + +/** + * search_bbt - [GENERIC] scan the device for a specific bad block table + * @mtd: MTD device structure + * @buf: temporary buffer + * @td: descriptor for the bad block table + * + * Read the bad block table by searching for a given ident pattern. + * Search is preformed either from the beginning up or from the end of + * the device downwards. The search starts always at the start of a + * block. + * If the option NAND_BBT_PERCHIP is given, each chip is searched + * for a bbt, which contains the bad block information of this chip. + * This is neccecary to provide support for certain DOC devices. + * + * The bbt ident pattern resides in the oob area of the first page + * in a block. + */ +static int search_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td) +{ + struct nand_chip *this = mtd->priv; + int i, chips; + int bits, startblock, block, dir; + int scanlen = mtd->oobblock + mtd->oobsize; + int bbtblocks; + + /* Search direction top -> down ? */ + if (td->options & NAND_BBT_LASTBLOCK) { + startblock = (mtd->size >> this->bbt_erase_shift) -1; + dir = -1; + } else { + startblock = 0; + dir = 1; + } + + /* Do we have a bbt per chip ? */ + if (td->options & NAND_BBT_PERCHIP) { + chips = this->numchips; + bbtblocks = this->chipsize >> this->bbt_erase_shift; + startblock &= bbtblocks - 1; + } else { + chips = 1; + bbtblocks = mtd->size >> this->bbt_erase_shift; + } + + /* Number of bits for each erase block in the bbt */ + bits = td->options & NAND_BBT_NRBITS_MSK; + + for (i = 0; i < chips; i++) { + /* Reset version information */ + td->version[i] = 0; + td->pages[i] = -1; + /* Scan the maximum number of blocks */ + for (block = 0; block < td->maxblocks; block++) { + int actblock = startblock + dir * block; + /* Read first page */ + nand_read_raw (mtd, buf, actblock << this->bbt_erase_shift, mtd->oobblock, mtd->oobsize); + if (!check_pattern(buf, scanlen, mtd->oobblock, td)) { + td->pages[i] = actblock << (this->bbt_erase_shift - this->page_shift); + if (td->options & NAND_BBT_VERSION) { + td->version[i] = buf[mtd->oobblock + td->veroffs]; + } + break; + } + } + startblock += this->chipsize >> this->bbt_erase_shift; + } + /* Check, if we found a bbt for each requested chip */ + for (i = 0; i < chips; i++) { + if (td->pages[i] == -1) + printk (KERN_WARNING "Bad block table not found for chip %d\n", i); + else + printk (KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i], td->version[i]); + } + return 0; +} + +/** + * search_read_bbts - [GENERIC] scan the device for bad block table(s) + * @mtd: MTD device structure + * @buf: temporary buffer + * @td: descriptor for the bad block table + * @md: descriptor for the bad block table mirror + * + * Search and read the bad block table(s) +*/ +static int search_read_bbts (struct mtd_info *mtd, uint8_t *buf, + struct nand_bbt_descr *td, struct nand_bbt_descr *md) +{ + /* Search the primary table */ + search_bbt (mtd, buf, td); + + /* Search the mirror table */ + if (md) + search_bbt (mtd, buf, md); + + /* Force result check */ + return 1; +} + + +/** + * write_bbt - [GENERIC] (Re)write the bad block table + * + * @mtd: MTD device structure + * @buf: temporary buffer + * @td: descriptor for the bad block table + * @md: descriptor for the bad block table mirror + * @chipsel: selector for a specific chip, -1 for all + * + * (Re)write the bad block table + * +*/ +static int write_bbt (struct mtd_info *mtd, uint8_t *buf, + struct nand_bbt_descr *td, struct nand_bbt_descr *md, int chipsel) +{ + struct nand_chip *this = mtd->priv; + struct nand_oobinfo oobinfo; + struct erase_info einfo; + int i, j, res, chip = 0; + int bits, startblock, dir, page, offs, numblocks, sft, sftmsk; + int nrchips, bbtoffs, pageoffs; + uint8_t msk[4]; + uint8_t rcode = td->reserved_block_code; + size_t retlen, len = 0; + loff_t to; + + if (!rcode) + rcode = 0xff; + /* Write bad block table per chip rather than per device ? */ + if (td->options & NAND_BBT_PERCHIP) { + numblocks = (int) (this->chipsize >> this->bbt_erase_shift); + /* Full device write or specific chip ? */ + if (chipsel == -1) { + nrchips = this->numchips; + } else { + nrchips = chipsel + 1; + chip = chipsel; + } + } else { + numblocks = (int) (mtd->size >> this->bbt_erase_shift); + nrchips = 1; + } + + /* Loop through the chips */ + for (; chip < nrchips; chip++) { + + /* There was already a version of the table, reuse the page + * This applies for absolute placement too, as we have the + * page nr. in td->pages. + */ + if (td->pages[chip] != -1) { + page = td->pages[chip]; + goto write; + } + + /* Automatic placement of the bad block table */ + /* Search direction top -> down ? */ + if (td->options & NAND_BBT_LASTBLOCK) { + startblock = numblocks * (chip + 1) - 1; + dir = -1; + } else { + startblock = chip * numblocks; + dir = 1; + } + + for (i = 0; i < td->maxblocks; i++) { + int block = startblock + dir * i; + /* Check, if the block is bad */ + switch ((this->bbt[block >> 2] >> (2 * (block & 0x03))) & 0x03) { + case 0x01: + case 0x03: + continue; + } + page = block << (this->bbt_erase_shift - this->page_shift); + /* Check, if the block is used by the mirror table */ + if (!md || md->pages[chip] != page) + goto write; + } + printk (KERN_ERR "No space left to write bad block table\n"); + return -ENOSPC; +write: + + /* Set up shift count and masks for the flash table */ + bits = td->options & NAND_BBT_NRBITS_MSK; + switch (bits) { + case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01; msk[2] = ~rcode; msk[3] = 0x01; break; + case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01; msk[2] = ~rcode; msk[3] = 0x03; break; + case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C; msk[2] = ~rcode; msk[3] = 0x0f; break; + case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F; msk[2] = ~rcode; msk[3] = 0xff; break; + default: return -EINVAL; + } + + bbtoffs = chip * (numblocks >> 2); + + to = ((loff_t) page) << this->page_shift; + + memcpy (&oobinfo, this->autooob, sizeof(oobinfo)); + oobinfo.useecc = MTD_NANDECC_PLACEONLY; + + /* Must we save the block contents ? */ + if (td->options & NAND_BBT_SAVECONTENT) { + /* Make it block aligned */ + to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1)); + len = 1 << this->bbt_erase_shift; + res = mtd->read_ecc (mtd, to, len, &retlen, buf, &buf[len], &oobinfo); + if (res < 0) { + if (retlen != len) { + printk (KERN_INFO "nand_bbt: Error reading block for writing the bad block table\n"); + return res; + } + printk (KERN_WARNING "nand_bbt: ECC error while reading block for writing bad block table\n"); + } + /* Calc the byte offset in the buffer */ + pageoffs = page - (int)(to >> this->page_shift); + offs = pageoffs << this->page_shift; + /* Preset the bbt area with 0xff */ + memset (&buf[offs], 0xff, (size_t)(numblocks >> sft)); + /* Preset the bbt's oob area with 0xff */ + memset (&buf[len + pageoffs * mtd->oobsize], 0xff, + ((len >> this->page_shift) - pageoffs) * mtd->oobsize); + if (td->options & NAND_BBT_VERSION) { + buf[len + (pageoffs * mtd->oobsize) + td->veroffs] = td->version[chip]; + } + } else { + /* Calc length */ + len = (size_t) (numblocks >> sft); + /* Make it page aligned ! */ + len = (len + (mtd->oobblock-1)) & ~(mtd->oobblock-1); + /* Preset the buffer with 0xff */ + memset (buf, 0xff, len + (len >> this->page_shift) * mtd->oobsize); + offs = 0; + /* Pattern is located in oob area of first page */ + memcpy (&buf[len + td->offs], td->pattern, td->len); + if (td->options & NAND_BBT_VERSION) { + buf[len + td->veroffs] = td->version[chip]; + } + } + + /* walk through the memory table */ + for (i = 0; i < numblocks; ) { + uint8_t dat; + dat = this->bbt[bbtoffs + (i >> 2)]; + for (j = 0; j < 4; j++ , i++) { + int sftcnt = (i << (3 - sft)) & sftmsk; + /* Do not store the reserved bbt blocks ! */ + buf[offs + (i >> sft)] &= ~(msk[dat & 0x03] << sftcnt); + dat >>= 2; + } + } + + memset (&einfo, 0, sizeof (einfo)); + einfo.mtd = mtd; + einfo.addr = (unsigned long) to; + einfo.len = 1 << this->bbt_erase_shift; + res = nand_erase_nand (mtd, &einfo, 1); + if (res < 0) { + printk (KERN_WARNING "nand_bbt: Error during block erase: %d\n", res); + return res; + } + + res = mtd->write_ecc (mtd, to, len, &retlen, buf, &buf[len], &oobinfo); + if (res < 0) { + printk (KERN_WARNING "nand_bbt: Error while writing bad block table %d\n", res); + return res; + } + printk (KERN_DEBUG "Bad block table written to 0x%08x, version 0x%02X\n", + (unsigned int) to, td->version[chip]); + + /* Mark it as used */ + td->pages[chip] = page; + } + return 0; +} + +/** + * nand_memory_bbt - [GENERIC] create a memory based bad block table + * @mtd: MTD device structure + * @bd: descriptor for the good/bad block search pattern + * + * The function creates a memory based bbt by scanning the device + * for manufacturer / software marked good / bad blocks +*/ +static int nand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd) +{ + struct nand_chip *this = mtd->priv; + + /* Ensure that we only scan for the pattern and nothing else */ + bd->options = 0; + create_bbt (mtd, this->data_buf, bd, -1); + return 0; +} + +/** + * check_create - [GENERIC] create and write bbt(s) if neccecary + * @mtd: MTD device structure + * @buf: temporary buffer + * @bd: descriptor for the good/bad block search pattern + * + * The function checks the results of the previous call to read_bbt + * and creates / updates the bbt(s) if neccecary + * Creation is neccecary if no bbt was found for the chip/device + * Update is neccecary if one of the tables is missing or the + * version nr. of one table is less than the other +*/ +static int check_create (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd) +{ + int i, chips, writeops, chipsel, res; + struct nand_chip *this = mtd->priv; + struct nand_bbt_descr *td = this->bbt_td; + struct nand_bbt_descr *md = this->bbt_md; + struct nand_bbt_descr *rd, *rd2; + + /* Do we have a bbt per chip ? */ + if (td->options & NAND_BBT_PERCHIP) + chips = this->numchips; + else + chips = 1; + + for (i = 0; i < chips; i++) { + writeops = 0; + rd = NULL; + rd2 = NULL; + /* Per chip or per device ? */ + chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1; + /* Mirrored table avilable ? */ + if (md) { + if (td->pages[i] == -1 && md->pages[i] == -1) { + writeops = 0x03; + goto create; + } + + if (td->pages[i] == -1) { + rd = md; + td->version[i] = md->version[i]; + writeops = 1; + goto writecheck; + } + + if (md->pages[i] == -1) { + rd = td; + md->version[i] = td->version[i]; + writeops = 2; + goto writecheck; + } + + if (td->version[i] == md->version[i]) { + rd = td; + if (!(td->options & NAND_BBT_VERSION)) + rd2 = md; + goto writecheck; + } + + if (((int8_t) (td->version[i] - md->version[i])) > 0) { + rd = td; + md->version[i] = td->version[i]; + writeops = 2; + } else { + rd = md; + td->version[i] = md->version[i]; + writeops = 1; + } + + goto writecheck; + + } else { + if (td->pages[i] == -1) { + writeops = 0x01; + goto create; + } + rd = td; + goto writecheck; + } +create: + /* Create the bad block table by scanning the device ? */ + if (!(td->options & NAND_BBT_CREATE)) + continue; + + /* Create the table in memory by scanning the chip(s) */ + create_bbt (mtd, buf, bd, chipsel); + + td->version[i] = 1; + if (md) + md->version[i] = 1; +writecheck: + /* read back first ? */ + if (rd) + read_abs_bbt (mtd, buf, rd, chipsel); + /* If they weren't versioned, read both. */ + if (rd2) + read_abs_bbt (mtd, buf, rd2, chipsel); + + /* Write the bad block table to the device ? */ + if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { + res = write_bbt (mtd, buf, td, md, chipsel); + if (res < 0) + return res; + } + + /* Write the mirror bad block table to the device ? */ + if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { + res = write_bbt (mtd, buf, md, td, chipsel); + if (res < 0) + return res; + } + } + return 0; +} + +/** + * mark_bbt_regions - [GENERIC] mark the bad block table regions + * @mtd: MTD device structure + * @td: bad block table descriptor + * + * The bad block table regions are marked as "bad" to prevent + * accidental erasures / writes. The regions are identified by + * the mark 0x02. +*/ +static void mark_bbt_region (struct mtd_info *mtd, struct nand_bbt_descr *td) +{ + struct nand_chip *this = mtd->priv; + int i, j, chips, block, nrblocks, update; + uint8_t oldval, newval; + + /* Do we have a bbt per chip ? */ + if (td->options & NAND_BBT_PERCHIP) { + chips = this->numchips; + nrblocks = (int)(this->chipsize >> this->bbt_erase_shift); + } else { + chips = 1; + nrblocks = (int)(mtd->size >> this->bbt_erase_shift); + } + + for (i = 0; i < chips; i++) { + if ((td->options & NAND_BBT_ABSPAGE) || + !(td->options & NAND_BBT_WRITE)) { + if (td->pages[i] == -1) continue; + block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift); + block <<= 1; + oldval = this->bbt[(block >> 3)]; + newval = oldval | (0x2 << (block & 0x06)); + this->bbt[(block >> 3)] = newval; + if ((oldval != newval) && td->reserved_block_code) + nand_update_bbt(mtd, block << (this->bbt_erase_shift - 1)); + continue; + } + update = 0; + if (td->options & NAND_BBT_LASTBLOCK) + block = ((i + 1) * nrblocks) - td->maxblocks; + else + block = i * nrblocks; + block <<= 1; + for (j = 0; j < td->maxblocks; j++) { + oldval = this->bbt[(block >> 3)]; + newval = oldval | (0x2 << (block & 0x06)); + this->bbt[(block >> 3)] = newval; + if (oldval != newval) update = 1; + block += 2; + } + /* If we want reserved blocks to be recorded to flash, and some + new ones have been marked, then we need to update the stored + bbts. This should only happen once. */ + if (update && td->reserved_block_code) + nand_update_bbt(mtd, (block - 2) << (this->bbt_erase_shift - 1)); + } +} + +/** + * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s) + * @mtd: MTD device structure + * @bd: descriptor for the good/bad block search pattern + * + * The function checks, if a bad block table(s) is/are already + * available. If not it scans the device for manufacturer + * marked good / bad blocks and writes the bad block table(s) to + * the selected place. + * + * The bad block table memory is allocated here. It must be freed + * by calling the nand_free_bbt function. + * +*/ +int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd) +{ + struct nand_chip *this = mtd->priv; + int len, res = 0; + uint8_t *buf; + struct nand_bbt_descr *td = this->bbt_td; + struct nand_bbt_descr *md = this->bbt_md; + + len = mtd->size >> (this->bbt_erase_shift + 2); + /* Allocate memory (2bit per block) */ + this->bbt = (uint8_t *) kmalloc (len, GFP_KERNEL); + if (!this->bbt) { + printk (KERN_ERR "nand_scan_bbt: Out of memory\n"); + return -ENOMEM; + } + /* Clear the memory bad block table */ + memset (this->bbt, 0x00, len); + + /* If no primary table decriptor is given, scan the device + * to build a memory based bad block table + */ + if (!td) + return nand_memory_bbt(mtd, bd); + + /* Allocate a temporary buffer for one eraseblock incl. oob */ + len = (1 << this->bbt_erase_shift); + len += (len >> this->page_shift) * mtd->oobsize; + buf = kmalloc (len, GFP_KERNEL); + if (!buf) { + printk (KERN_ERR "nand_bbt: Out of memory\n"); + kfree (this->bbt); + this->bbt = NULL; + return -ENOMEM; + } + + /* Is the bbt at a given page ? */ + if (td->options & NAND_BBT_ABSPAGE) { + res = read_abs_bbts (mtd, buf, td, md); + } else { + /* Search the bad block table using a pattern in oob */ + res = search_read_bbts (mtd, buf, td, md); + } + + if (res) + res = check_create (mtd, buf, bd); + + /* Prevent the bbt regions from erasing / writing */ + mark_bbt_region (mtd, td); + if (md) + mark_bbt_region (mtd, md); + + kfree (buf); + return res; +} + + +/** + * nand_update_bbt - [NAND Interface] update bad block table(s) + * @mtd: MTD device structure + * @offs: the offset of the newly marked block + * + * The function updates the bad block table(s) +*/ +int nand_update_bbt (struct mtd_info *mtd, loff_t offs) +{ + struct nand_chip *this = mtd->priv; + int len, res = 0, writeops = 0; + int chip, chipsel; + uint8_t *buf; + struct nand_bbt_descr *td = this->bbt_td; + struct nand_bbt_descr *md = this->bbt_md; + + if (!this->bbt || !td) + return -EINVAL; + + len = mtd->size >> (this->bbt_erase_shift + 2); + /* Allocate a temporary buffer for one eraseblock incl. oob */ + len = (1 << this->bbt_erase_shift); + len += (len >> this->page_shift) * mtd->oobsize; + buf = kmalloc (len, GFP_KERNEL); + if (!buf) { + printk (KERN_ERR "nand_update_bbt: Out of memory\n"); + return -ENOMEM; + } + + writeops = md != NULL ? 0x03 : 0x01; + + /* Do we have a bbt per chip ? */ + if (td->options & NAND_BBT_PERCHIP) { + chip = (int) (offs >> this->chip_shift); + chipsel = chip; + } else { + chip = 0; + chipsel = -1; + } + + td->version[chip]++; + if (md) + md->version[chip]++; + + /* Write the bad block table to the device ? */ + if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { + res = write_bbt (mtd, buf, td, md, chipsel); + if (res < 0) + goto out; + } + /* Write the mirror bad block table to the device ? */ + if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { + res = write_bbt (mtd, buf, md, td, chipsel); + } + +out: + kfree (buf); + return res; +} + +/* Define some generic bad / good block scan pattern which are used + * while scanning a device for factory marked good / bad blocks + * + * The memory based patterns just + */ +static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; + +static struct nand_bbt_descr smallpage_memorybased = { + .options = 0, + .offs = 5, + .len = 1, + .pattern = scan_ff_pattern +}; + +static struct nand_bbt_descr largepage_memorybased = { + .options = 0, + .offs = 0, + .len = 2, + .pattern = scan_ff_pattern +}; + +static struct nand_bbt_descr smallpage_flashbased = { + .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, + .offs = 5, + .len = 1, + .pattern = scan_ff_pattern +}; + +static struct nand_bbt_descr largepage_flashbased = { + .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, + .offs = 0, + .len = 2, + .pattern = scan_ff_pattern +}; + +static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; + +static struct nand_bbt_descr agand_flashbased = { + .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, + .offs = 0x20, + .len = 6, + .pattern = scan_agand_pattern +}; + +/* Generic flash bbt decriptors +*/ +static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; +static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; + +static struct nand_bbt_descr bbt_main_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE + | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, + .offs = 8, + .len = 4, + .veroffs = 12, + .maxblocks = 4, + .pattern = bbt_pattern +}; + +static struct nand_bbt_descr bbt_mirror_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE + | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, + .offs = 8, + .len = 4, + .veroffs = 12, + .maxblocks = 4, + .pattern = mirror_pattern +}; + +/** + * nand_default_bbt - [NAND Interface] Select a default bad block table for the device + * @mtd: MTD device structure + * + * This function selects the default bad block table + * support for the device and calls the nand_scan_bbt function + * +*/ +int nand_default_bbt (struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + + /* Default for AG-AND. We must use a flash based + * bad block table as the devices have factory marked + * _good_ blocks. Erasing those blocks leads to loss + * of the good / bad information, so we _must_ store + * this information in a good / bad table during + * startup + */ + if (this->options & NAND_IS_AND) { + /* Use the default pattern descriptors */ + if (!this->bbt_td) { + this->bbt_td = &bbt_main_descr; + this->bbt_md = &bbt_mirror_descr; + } + this->options |= NAND_USE_FLASH_BBT; + return nand_scan_bbt (mtd, &agand_flashbased); + } + + /* Is a flash based bad block table requested ? */ + if (this->options & NAND_USE_FLASH_BBT) { + /* Use the default pattern descriptors */ + if (!this->bbt_td) { + this->bbt_td = &bbt_main_descr; + this->bbt_md = &bbt_mirror_descr; + } + if (mtd->oobblock > 512) + return nand_scan_bbt (mtd, &largepage_flashbased); + else + return nand_scan_bbt (mtd, &smallpage_flashbased); + } else { + this->bbt_td = NULL; + this->bbt_md = NULL; + if (mtd->oobblock > 512) + return nand_scan_bbt (mtd, &largepage_memorybased); + else + return nand_scan_bbt (mtd, &smallpage_memorybased); + } +} + +/** + * nand_isbad_bbt - [NAND Interface] Check if a block is bad + * @mtd: MTD device structure + * @offs: offset in the device + * @allowbbt: allow access to bad block table region + * +*/ +int nand_isbad_bbt (struct mtd_info *mtd, loff_t offs, int allowbbt) +{ + struct nand_chip *this = mtd->priv; + int block; + uint8_t res; + + /* Get block number * 2 */ + block = (int) (offs >> (this->bbt_erase_shift - 1)); + res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; + + DEBUG (MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n", + (unsigned int)offs, res, block >> 1); + + switch ((int)res) { + case 0x00: return 0; + case 0x01: return 1; + case 0x02: return allowbbt ? 0 : 1; + } + return 1; +} + +EXPORT_SYMBOL (nand_scan_bbt); +EXPORT_SYMBOL (nand_default_bbt); diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c new file mode 100644 index 000000000..9c356a0a7 --- /dev/null +++ b/drivers/mtd/nand/ppchameleonevb.c @@ -0,0 +1,430 @@ +/* + * drivers/mtd/nand/ppchameleonevb.c + * + * Copyright (C) 2003 DAVE Srl (info@wawnet.biz) + * + * Derived from drivers/mtd/nand/edb7312.c + * + * + * $Id: ppchameleonevb.c,v 1.2 2004/05/05 22:09:54 gleixner Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Overview: + * This is a device driver for the NAND flash devices found on the + * PPChameleon/PPChameleonEVB system. + * PPChameleon options (autodetected): + * - BA model: no NAND + * - ME model: 32MB (Samsung K9F5608U0B) + * - HI model: 128MB (Samsung K9F1G08UOM) + * PPChameleonEVB options: + * - 32MB (Samsung K9F5608U0B) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#undef USE_READY_BUSY_PIN +#define USE_READY_BUSY_PIN +/* see datasheets (tR) */ +#define NAND_BIG_DELAY_US 25 +#define NAND_SMALL_DELAY_US 10 + +/* handy sizes */ +#define SZ_4M 0x00400000 +#define NAND_SMALL_SIZE 0x02000000 +#define NAND_MTD_NAME "ppchameleon-nand" +#define NAND_EVB_MTD_NAME "ppchameleonevb-nand" + +/* GPIO pins used to drive NAND chip mounted on processor module */ +#define NAND_nCE_GPIO_PIN (0x80000000 >> 1) +#define NAND_CLE_GPIO_PIN (0x80000000 >> 2) +#define NAND_ALE_GPIO_PIN (0x80000000 >> 3) +#define NAND_RB_GPIO_PIN (0x80000000 >> 4) +/* GPIO pins used to drive NAND chip mounted on EVB */ +#define NAND_EVB_nCE_GPIO_PIN (0x80000000 >> 14) +#define NAND_EVB_CLE_GPIO_PIN (0x80000000 >> 15) +#define NAND_EVB_ALE_GPIO_PIN (0x80000000 >> 16) +#define NAND_EVB_RB_GPIO_PIN (0x80000000 >> 31) + +/* + * MTD structure for PPChameleonEVB board + */ +static struct mtd_info *ppchameleon_mtd = NULL; +static struct mtd_info *ppchameleonevb_mtd = NULL; + +/* + * Module stuff + */ +static int ppchameleon_fio_pbase = CFG_NAND0_PADDR; +static int ppchameleonevb_fio_pbase = CFG_NAND1_PADDR; + +#ifdef MODULE +MODULE_PARM(ppchameleon_fio_pbase, "i"); +__setup("ppchameleon_fio_pbase=",ppchameleon_fio_pbase); +MODULE_PARM(ppchameleonevb_fio_pbase, "i"); +__setup("ppchameleonevb_fio_pbase=",ppchameleonevb_fio_pbase); +#endif + +/* Internal buffers. Page buffer and oob buffer for one block */ +static u_char data_buf[2048 + 64]; +static u_char oob_buf[64 * 64]; +static u_char data_buf_evb[512 + 16]; +static u_char oob_buf_evb[16 * 32]; + +#ifdef CONFIG_MTD_PARTITIONS +/* + * Define static partitions for flash devices + */ +static struct mtd_partition partition_info_hi[] = { + { name: "PPChameleon HI Nand Flash", + offset: 0, + size: 128*1024*1024 } +}; + +static struct mtd_partition partition_info_me[] = { + { name: "PPChameleon ME Nand Flash", + offset: 0, + size: 32*1024*1024 } +}; + +static struct mtd_partition partition_info_evb[] = { + { name: "PPChameleonEVB Nand Flash", + offset: 0, + size: 32*1024*1024 } +}; + +#define NUM_PARTITIONS 1 + +extern int parse_cmdline_partitions(struct mtd_info *master, + struct mtd_partition **pparts, + const char *mtd_id); +#endif + + +/* + * hardware specific access to control-lines + */ +static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd) +{ + switch(cmd) { + + case NAND_CTL_SETCLE: + MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR); + break; + case NAND_CTL_CLRCLE: + MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR); + break; + case NAND_CTL_SETALE: + MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR); + break; + case NAND_CTL_CLRALE: + MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR); + break; + case NAND_CTL_SETNCE: + MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR); + break; + case NAND_CTL_CLRNCE: + MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR); + break; + } +} + +static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd) +{ + switch(cmd) { + + case NAND_CTL_SETCLE: + MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR); + break; + case NAND_CTL_CLRCLE: + MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR); + break; + case NAND_CTL_SETALE: + MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR); + break; + case NAND_CTL_CLRALE: + MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR); + break; + case NAND_CTL_SETNCE: + MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR); + break; + case NAND_CTL_CLRNCE: + MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR); + break; + } +} + +#ifdef USE_READY_BUSY_PIN +/* + * read device ready pin + */ +static int ppchameleon_device_ready(struct mtd_info *minfo) +{ + if (in_be32((volatile unsigned*)GPIO0_IR) & NAND_RB_GPIO_PIN) + return 1; + return 0; +} + +static int ppchameleonevb_device_ready(struct mtd_info *minfo) +{ + if (in_be32((volatile unsigned*)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN) + return 1; + return 0; +} +#endif + +#ifdef CONFIG_MTD_PARTITIONS +const char *part_probes[] = { "cmdlinepart", NULL }; +const char *part_probes_evb[] = { "cmdlinepart", NULL }; +#endif + +/* + * Main initialization routine + */ +static int __init ppchameleonevb_init (void) +{ + struct nand_chip *this; + const char *part_type = 0; + int mtd_parts_nb = 0; + struct mtd_partition *mtd_parts = 0; + int ppchameleon_fio_base; + int ppchameleonevb_fio_base; + + + /********************************* + * Processor module NAND (if any) * + *********************************/ + /* Allocate memory for MTD device structure and private data */ + ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) + + sizeof(struct nand_chip), + GFP_KERNEL); + if (!ppchameleon_mtd) { + printk("Unable to allocate PPChameleon NAND MTD device structure.\n"); + return -ENOMEM; + } + + /* map physical address */ + ppchameleon_fio_base = (unsigned long)ioremap(ppchameleon_fio_pbase, SZ_4M); + if(!ppchameleon_fio_base) { + printk("ioremap PPChameleon NAND flash failed\n"); + kfree(ppchameleon_mtd); + return -EIO; + } + + /* Get pointer to private data */ + this = (struct nand_chip *) (&ppchameleon_mtd[1]); + + /* Initialize structures */ + memset((char *) ppchameleon_mtd, 0, sizeof(struct mtd_info)); + memset((char *) this, 0, sizeof(struct nand_chip)); + + /* Link the private data with the MTD structure */ + ppchameleon_mtd->priv = this; + + /* Initialize GPIOs */ + /* Pin mapping for NAND chip */ + /* + CE GPIO_01 + CLE GPIO_02 + ALE GPIO_03 + R/B GPIO_04 + */ + /* output select */ + out_be32((volatile unsigned*)GPIO0_OSRH, in_be32((volatile unsigned*)GPIO0_OSRH) & 0xC0FFFFFF); + /* three-state select */ + out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xC0FFFFFF); + /* enable output driver */ + out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN); +#ifdef USE_READY_BUSY_PIN + /* three-state select */ + out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xFF3FFFFF); + /* high-impedecence */ + out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) & (~NAND_RB_GPIO_PIN)); + /* input select */ + out_be32((volatile unsigned*)GPIO0_ISR1H, (in_be32((volatile unsigned*)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000); +#endif + + /* insert callbacks */ + this->IO_ADDR_R = ppchameleon_fio_base; + this->IO_ADDR_W = ppchameleon_fio_base; + this->hwcontrol = ppchameleon_hwcontrol; +#ifdef USE_READY_BUSY_PIN + this->dev_ready = ppchameleon_device_ready; +#endif + this->chip_delay = NAND_BIG_DELAY_US; + /* ECC mode */ + this->eccmode = NAND_ECC_SOFT; + + /* Set internal data buffer */ + this->data_buf = data_buf; + this->oob_buf = oob_buf; + + /* Scan to find existence of the device (it could not be mounted) */ + if (nand_scan (ppchameleon_mtd, 1)) { + iounmap((void *)ppchameleon_fio_base); + kfree (ppchameleon_mtd); + goto nand_evb_init; + } + +#ifndef USE_READY_BUSY_PIN + /* Adjust delay if necessary */ + if (ppchameleon_mtd->size == NAND_SMALL_SIZE) + this->chip_delay = NAND_SMALL_DELAY_US; +#endif + +#ifdef CONFIG_MTD_PARTITIONS + ppchameleon_mtd->name = "ppchameleon-nand"; + mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0); + if (mtd_parts_nb > 0) + part_type = "command line"; + else + mtd_parts_nb = 0; +#endif + if (mtd_parts_nb == 0) + { + if (ppchameleon_mtd->size == NAND_SMALL_SIZE) + mtd_parts = partition_info_me; + else + mtd_parts = partition_info_hi; + mtd_parts_nb = NUM_PARTITIONS; + part_type = "static"; + } + + /* Register the partitions */ + printk(KERN_NOTICE "Using %s partition definition\n", part_type); + add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb); + +nand_evb_init: + /**************************** + * EVB NAND (always present) * + ****************************/ + /* Allocate memory for MTD device structure and private data */ + ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + + sizeof(struct nand_chip), + GFP_KERNEL); + if (!ppchameleonevb_mtd) { + printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n"); + return -ENOMEM; + } + + /* map physical address */ + ppchameleonevb_fio_base = (unsigned long)ioremap(ppchameleonevb_fio_pbase, SZ_4M); + if(!ppchameleonevb_fio_base) { + printk("ioremap PPChameleonEVB NAND flash failed\n"); + kfree(ppchameleonevb_mtd); + return -EIO; + } + + /* Get pointer to private data */ + this = (struct nand_chip *) (&ppchameleonevb_mtd[1]); + + /* Initialize structures */ + memset((char *) ppchameleonevb_mtd, 0, sizeof(struct mtd_info)); + memset((char *) this, 0, sizeof(struct nand_chip)); + + /* Link the private data with the MTD structure */ + ppchameleonevb_mtd->priv = this; + + /* Initialize GPIOs */ + /* Pin mapping for NAND chip */ + /* + CE GPIO_14 + CLE GPIO_15 + ALE GPIO_16 + R/B GPIO_31 + */ + /* output select */ + out_be32((volatile unsigned*)GPIO0_OSRH, in_be32((volatile unsigned*)GPIO0_OSRH) & 0xFFFFFFF0); + out_be32((volatile unsigned*)GPIO0_OSRL, in_be32((volatile unsigned*)GPIO0_OSRL) & 0x3FFFFFFF); + /* three-state select */ + out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xFFFFFFF0); + out_be32((volatile unsigned*)GPIO0_TSRL, in_be32((volatile unsigned*)GPIO0_TSRL) & 0x3FFFFFFF); + /* enable output driver */ + out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN | NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN); +#ifdef USE_READY_BUSY_PIN + /* three-state select */ + out_be32((volatile unsigned*)GPIO0_TSRL, in_be32((volatile unsigned*)GPIO0_TSRL) & 0xFFFFFFFC); + /* high-impedecence */ + out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN)); + /* input select */ + out_be32((volatile unsigned*)GPIO0_ISR1L, (in_be32((volatile unsigned*)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001); +#endif + + + /* insert callbacks */ + this->IO_ADDR_R = ppchameleonevb_fio_base; + this->IO_ADDR_W = ppchameleonevb_fio_base; + this->hwcontrol = ppchameleonevb_hwcontrol; +#ifdef USE_READY_BUSY_PIN + this->dev_ready = ppchameleonevb_device_ready; +#endif + this->chip_delay = NAND_SMALL_DELAY_US; + + /* ECC mode */ + this->eccmode = NAND_ECC_SOFT; + + /* Set internal data buffer */ + this->data_buf = data_buf_evb; + this->oob_buf = oob_buf_evb; + + /* Scan to find existence of the device */ + if (nand_scan (ppchameleonevb_mtd, 1)) { + iounmap((void *)ppchameleonevb_fio_base); + kfree (ppchameleonevb_mtd); + return -ENXIO; + } + +#ifdef CONFIG_MTD_PARTITIONS + ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; + mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0); + if (mtd_parts_nb > 0) + part_type = "command line"; + else + mtd_parts_nb = 0; +#endif + if (mtd_parts_nb == 0) + { + mtd_parts = partition_info_evb; + mtd_parts_nb = NUM_PARTITIONS; + part_type = "static"; + } + + /* Register the partitions */ + printk(KERN_NOTICE "Using %s partition definition\n", part_type); + add_mtd_partitions(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb); + + /* Return happy */ + return 0; +} +module_init(ppchameleonevb_init); + +/* + * Clean up routine + */ +static void __exit ppchameleonevb_cleanup (void) +{ + struct nand_chip *this = (struct nand_chip *) &ppchameleonevb_mtd[1]; + + /* Unregister the device */ + del_mtd_device (ppchameleonevb_mtd); + + /* Free internal data buffer */ + kfree (this->data_buf); + + /* Free the MTD device structure */ + kfree (ppchameleonevb_mtd); +} +module_exit(ppchameleonevb_cleanup); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("DAVE Srl "); +MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board"); diff --git a/drivers/mtd/nand/toto.c b/drivers/mtd/nand/toto.c new file mode 100644 index 000000000..ecb9f3dc8 --- /dev/null +++ b/drivers/mtd/nand/toto.c @@ -0,0 +1,221 @@ +/* + * drivers/mtd/nand/toto.c + * + * Copyright (c) 2003 Texas Instruments + * + * Derived from drivers/mtd/autcpu12.c + * + * Copyright (c) 2002 Thomas Gleixner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Overview: + * This is a device driver for the NAND flash device found on the + * TI fido board. It supports 32MiB and 64MiB cards + * + * $Id: toto.c,v 1.2 2003/10/21 10:04:58 dwmw2 Exp $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * MTD structure for TOTO board + */ +static struct mtd_info *toto_mtd = NULL; + +static int toto_io_base = OMAP_FLASH_1_BASE; + +#define CONFIG_NAND_WORKAROUND 1 + +#define NAND_NCE 0x4000 +#define NAND_CLE 0x1000 +#define NAND_ALE 0x0002 +#define NAND_MASK (NAND_CLE | NAND_ALE | NAND_NCE) + +#define T_NAND_CTL_CLRALE(iob) gpiosetout(NAND_ALE, 0) +#define T_NAND_CTL_SETALE(iob) gpiosetout(NAND_ALE, NAND_ALE) +#ifdef CONFIG_NAND_WORKAROUND /* "some" dev boards busted, blue wired to rts2 :( */ +#define T_NAND_CTL_CLRCLE(iob) gpiosetout(NAND_CLE, 0); rts2setout(2, 2) +#define T_NAND_CTL_SETCLE(iob) gpiosetout(NAND_CLE, NAND_CLE); rts2setout(2, 0) +#else +#define T_NAND_CTL_CLRCLE(iob) gpiosetout(NAND_CLE, 0) +#define T_NAND_CTL_SETCLE(iob) gpiosetout(NAND_CLE, NAND_CLE) +#endif +#define T_NAND_CTL_SETNCE(iob) gpiosetout(NAND_NCE, 0) +#define T_NAND_CTL_CLRNCE(iob) gpiosetout(NAND_NCE, NAND_NCE) + +/* + * Define partitions for flash devices + */ + +static struct mtd_partition partition_info64M[] = { + { .name = "toto kernel partition 1", + .offset = 0, + .size = 2 * SZ_1M }, + { .name = "toto file sys partition 2", + .offset = 2 * SZ_1M, + .size = 14 * SZ_1M }, + { .name = "toto user partition 3", + .offset = 16 * SZ_1M, + .size = 16 * SZ_1M }, + { .name = "toto devboard extra partition 4", + .offset = 32 * SZ_1M, + .size = 32 * SZ_1M }, +}; + +static struct mtd_partition partition_info32M[] = { + { .name = "toto kernel partition 1", + .offset = 0, + .size = 2 * SZ_1M }, + { .name = "toto file sys partition 2", + .offset = 2 * SZ_1M, + .size = 14 * SZ_1M }, + { .name = "toto user partition 3", + .offset = 16 * SZ_1M, + .size = 16 * SZ_1M }, +}; + +#define NUM_PARTITIONS32M 3 +#define NUM_PARTITIONS64M 4 +/* + * hardware specific access to control-lines +*/ + +static void toto_hwcontrol(struct mtd_info *mtd, int cmd) +{ + + udelay(1); /* hopefully enough time for tc make proceding write to clear */ + switch(cmd){ + + case NAND_CTL_SETCLE: T_NAND_CTL_SETCLE(cmd); break; + case NAND_CTL_CLRCLE: T_NAND_CTL_CLRCLE(cmd); break; + + case NAND_CTL_SETALE: T_NAND_CTL_SETALE(cmd); break; + case NAND_CTL_CLRALE: T_NAND_CTL_CLRALE(cmd); break; + + case NAND_CTL_SETNCE: T_NAND_CTL_SETNCE(cmd); break; + case NAND_CTL_CLRNCE: T_NAND_CTL_CLRNCE(cmd); break; + } + udelay(1); /* allow time to ensure gpio state to over take memory write */ +} + +/* + * Main initialization routine + */ +int __init toto_init (void) +{ + struct nand_chip *this; + int err = 0; + + /* Allocate memory for MTD device structure and private data */ + toto_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip), + GFP_KERNEL); + if (!toto_mtd) { + printk (KERN_WARNING "Unable to allocate toto NAND MTD device structure.\n"); + err = -ENOMEM; + goto out; + } + + /* Get pointer to private data */ + this = (struct nand_chip *) (&toto_mtd[1]); + + /* Initialize structures */ + memset((char *) toto_mtd, 0, sizeof(struct mtd_info)); + memset((char *) this, 0, sizeof(struct nand_chip)); + + /* Link the private data with the MTD structure */ + toto_mtd->priv = this; + + /* Set address of NAND IO lines */ + this->IO_ADDR_R = toto_io_base; + this->IO_ADDR_W = toto_io_base; + this->hwcontrol = toto_hwcontrol; + this->dev_ready = NULL; + /* 25 us command delay time */ + this->chip_delay = 30; + this->eccmode = NAND_ECC_SOFT; + + /* Scan to find existance of the device */ + if (nand_scan (toto_mtd, 1)) { + err = -ENXIO; + goto out_mtd; + } + + /* Allocate memory for internal data buffer */ + this->data_buf = kmalloc (sizeof(u_char) * (toto_mtd->oobblock + toto_mtd->oobsize), GFP_KERNEL); + if (!this->data_buf) { + printk (KERN_WARNING "Unable to allocate NAND data buffer for toto.\n"); + err = -ENOMEM; + goto out_mtd; + } + + /* Register the partitions */ + switch(toto_mtd->size){ + case SZ_64M: add_mtd_partitions(toto_mtd, partition_info64M, NUM_PARTITIONS64M); break; + case SZ_32M: add_mtd_partitions(toto_mtd, partition_info32M, NUM_PARTITIONS32M); break; + default: { + printk (KERN_WARNING "Unsupported Nand device\n"); + err = -ENXIO; + goto out_buf; + } + } + + gpioreserve(NAND_MASK); /* claim our gpios */ + archflashwp(0,0); /* open up flash for writing */ + + goto out; + +out_buf: + kfree (this->data_buf); +out_mtd: + kfree (toto_mtd); +out: + return err; +} + +module_init(toto_init); + +/* + * Clean up routine + */ +static void __exit toto_cleanup (void) +{ + struct nand_chip *this = (struct nand_chip *) &toto_mtd[1]; + + /* Unregister partitions */ + del_mtd_partitions(toto_mtd); + + /* Unregister the device */ + del_mtd_device (toto_mtd); + + /* Free internal data buffers */ + kfree (this->data_buf); + + /* Free the MTD device structure */ + kfree (toto_mtd); + + /* stop flash writes */ + archflashwp(0,1); + + /* release gpios to system */ + gpiorelease(NAND_MASK); +} +module_exit(toto_cleanup); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Richard Woodruff "); +MODULE_DESCRIPTION("Glue layer for NAND flash on toto board"); diff --git a/drivers/mtd/nand/tx4925ndfmc.c b/drivers/mtd/nand/tx4925ndfmc.c new file mode 100644 index 000000000..94a616561 --- /dev/null +++ b/drivers/mtd/nand/tx4925ndfmc.c @@ -0,0 +1,442 @@ +/* + * drivers/mtd/tx4925ndfmc.c + * + * Overview: + * This is a device driver for the NAND flash device found on the + * Toshiba RBTX4925 reference board, which is a SmartMediaCard. It supports + * 16MiB, 32MiB and 64MiB cards. + * + * Author: MontaVista Software, Inc. source@mvista.com + * + * Derived from drivers/mtd/autcpu12.c + * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) + * + * $Id: tx4925ndfmc.c,v 1.2 2004/03/27 19:55:53 gleixner Exp $ + * + * Copyright (C) 2001 Toshiba Corporation + * + * 2003 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct nand_oobinfo jffs2_oobinfo; + +/* + * MTD structure for RBTX4925 board + */ +static struct mtd_info *tx4925ndfmc_mtd = NULL; + +/* + * Module stuff + */ +#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE) +#define tx4925ndfmc_init init_module +#define tx4925ndfmc_cleanup cleanup_module +#endif + +/* + * Define partitions for flash devices + */ + +static struct mtd_partition partition_info16k[] = { + { .name = "RBTX4925 flash partition 1", + .offset = 0, + .size = 8 * 0x00100000 }, + { .name = "RBTX4925 flash partition 2", + .offset = 8 * 0x00100000, + .size = 8 * 0x00100000 }, +}; + +static struct mtd_partition partition_info32k[] = { + { .name = "RBTX4925 flash partition 1", + .offset = 0, + .size = 8 * 0x00100000 }, + { .name = "RBTX4925 flash partition 2", + .offset = 8 * 0x00100000, + .size = 24 * 0x00100000 }, +}; + +static struct mtd_partition partition_info64k[] = { + { .name = "User FS", + .offset = 0, + .size = 16 * 0x00100000 }, + { .name = "RBTX4925 flash partition 2", + .offset = 16 * 0x00100000, + .size = 48 * 0x00100000}, +}; + +static struct mtd_partition partition_info128k[] = { + { .name = "Skip bad section", + .offset = 0, + .size = 16 * 0x00100000 }, + { .name = "User FS", + .offset = 16 * 0x00100000, + .size = 112 * 0x00100000 }, +}; +#define NUM_PARTITIONS16K 2 +#define NUM_PARTITIONS32K 2 +#define NUM_PARTITIONS64K 2 +#define NUM_PARTITIONS128K 2 + +/* + * hardware specific access to control-lines +*/ +static void tx4925ndfmc_hwcontrol(struct mtd_info *mtd, int cmd) +{ + + switch(cmd){ + + case NAND_CTL_SETCLE: + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_CLE; + break; + case NAND_CTL_CLRCLE: + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_CLE; + break; + case NAND_CTL_SETALE: + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ALE; + break; + case NAND_CTL_CLRALE: + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ALE; + break; + case NAND_CTL_SETNCE: + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_CE; + break; + case NAND_CTL_CLRNCE: + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_CE; + break; + case NAND_CTL_SETWP: + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_WE; + break; + case NAND_CTL_CLRWP: + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_WE; + break; + } +} + +/* +* read device ready pin +*/ +static int tx4925ndfmc_device_ready(struct mtd_info *mtd) +{ + int ready; + ready = (tx4925_ndfmcptr->sr & TX4925_NDSFR_BUSY) ? 0 : 1; + return ready; +} +void tx4925ndfmc_enable_hwecc(struct mtd_info *mtd, int mode) +{ + /* reset first */ + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_MASK; + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK; + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_ENAB; +} +static void tx4925ndfmc_disable_ecc(void) +{ + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK; +} +static void tx4925ndfmc_enable_read_ecc(void) +{ + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK; + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_READ; +} +void tx4925ndfmc_readecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code){ + int i; + u_char *ecc = ecc_code; + tx4925ndfmc_enable_read_ecc(); + for (i = 0;i < 6;i++,ecc++) + *ecc = tx4925_read_nfmc(&(tx4925_ndfmcptr->dtr)); + tx4925ndfmc_disable_ecc(); +} +void tx4925ndfmc_device_setup(void) +{ + + *(unsigned char *)0xbb005000 &= ~0x08; + + /* reset NDFMC */ + tx4925_ndfmcptr->rstr |= TX4925_NDFRSTR_RST; + while (tx4925_ndfmcptr->rstr & TX4925_NDFRSTR_RST); + + /* setup BusSeparete, Hold Time, Strobe Pulse Width */ + tx4925_ndfmcptr->mcr = TX4925_BSPRT ? TX4925_NDFMCR_BSPRT : 0; + tx4925_ndfmcptr->spr = TX4925_HOLD << 4 | TX4925_SPW; +} +static u_char tx4925ndfmc_nand_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + return tx4925_read_nfmc(this->IO_ADDR_R); +} + +static void tx4925ndfmc_nand_write_byte(struct mtd_info *mtd, u_char byte) +{ + struct nand_chip *this = mtd->priv; + tx4925_write_nfmc(byte, this->IO_ADDR_W); +} + +static void tx4925ndfmc_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_W); +} + +static void tx4925ndfmc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_R); +} + +static int tx4925ndfmc_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_R)) + return -EFAULT; + + return 0; +} + +/* + * Send command to NAND device + */ +static void tx4925ndfmc_nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr) +{ + register struct nand_chip *this = mtd->priv; + + /* Begin command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_SETCLE); + /* + * Write out the command to the device. + */ + if (command == NAND_CMD_SEQIN) { + int readcmd; + + if (column >= mtd->oobblock) { + /* OOB area */ + column -= mtd->oobblock; + readcmd = NAND_CMD_READOOB; + } else if (column < 256) { + /* First 256 bytes --> READ0 */ + readcmd = NAND_CMD_READ0; + } else { + column -= 256; + readcmd = NAND_CMD_READ1; + } + this->write_byte(mtd, readcmd); + } + this->write_byte(mtd, command); + + /* Set ALE and clear CLE to start address cycle */ + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + + if (column != -1 || page_addr != -1) { + this->hwcontrol(mtd, NAND_CTL_SETALE); + + /* Serially input address */ + if (column != -1) + this->write_byte(mtd, column); + if (page_addr != -1) { + this->write_byte(mtd, (unsigned char) (page_addr & 0xff)); + this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff)); + /* One more address cycle for higher density devices */ + if (mtd->size & 0x0c000000) + this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f)); + } + /* Latch in address */ + this->hwcontrol(mtd, NAND_CTL_CLRALE); + } + + /* + * program and erase have their own busy handlers + * status and sequential in needs no delay + */ + switch (command) { + + case NAND_CMD_PAGEPROG: + /* Turn off WE */ + this->hwcontrol (mtd, NAND_CTL_CLRWP); + return; + + case NAND_CMD_SEQIN: + /* Turn on WE */ + this->hwcontrol (mtd, NAND_CTL_SETWP); + return; + + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: + case NAND_CMD_STATUS: + return; + + case NAND_CMD_RESET: + if (this->dev_ready) + break; + this->hwcontrol(mtd, NAND_CTL_SETCLE); + this->write_byte(mtd, NAND_CMD_STATUS); + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + while ( !(this->read_byte(mtd) & 0x40)); + return; + + /* This applies to read commands */ + default: + /* + * If we don't have access to the busy pin, we apply the given + * command delay + */ + if (!this->dev_ready) { + udelay (this->chip_delay); + return; + } + } + + /* wait until command is processed */ + while (!this->dev_ready(mtd)); +} + +#ifdef CONFIG_MTD_CMDLINE_PARTS +extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partitio +n **pparts, char *); +#endif + +/* + * Main initialization routine + */ +extern int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); +int __init tx4925ndfmc_init (void) +{ + struct nand_chip *this; + int err = 0; + + /* Allocate memory for MTD device structure and private data */ + tx4925ndfmc_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip), + GFP_KERNEL); + if (!tx4925ndfmc_mtd) { + printk ("Unable to allocate RBTX4925 NAND MTD device structure.\n"); + err = -ENOMEM; + goto out; + } + + tx4925ndfmc_device_setup(); + + /* io is indirect via a register so don't need to ioremap address */ + + /* Get pointer to private data */ + this = (struct nand_chip *) (&tx4925ndfmc_mtd[1]); + + /* Initialize structures */ + memset((char *) tx4925ndfmc_mtd, 0, sizeof(struct mtd_info)); + memset((char *) this, 0, sizeof(struct nand_chip)); + + /* Link the private data with the MTD structure */ + tx4925ndfmc_mtd->priv = this; + + /* Set address of NAND IO lines */ + this->IO_ADDR_R = (unsigned long)&(tx4925_ndfmcptr->dtr); + this->IO_ADDR_W = (unsigned long)&(tx4925_ndfmcptr->dtr); + this->hwcontrol = tx4925ndfmc_hwcontrol; + this->enable_hwecc = tx4925ndfmc_enable_hwecc; + this->calculate_ecc = tx4925ndfmc_readecc; + this->correct_data = nand_correct_data; + this->eccmode = NAND_ECC_HW6_512; + this->dev_ready = tx4925ndfmc_device_ready; + /* 20 us command delay time */ + this->chip_delay = 20; + this->read_byte = tx4925ndfmc_nand_read_byte; + this->write_byte = tx4925ndfmc_nand_write_byte; + this->cmdfunc = tx4925ndfmc_nand_command; + this->write_buf = tx4925ndfmc_nand_write_buf; + this->read_buf = tx4925ndfmc_nand_read_buf; + this->verify_buf = tx4925ndfmc_nand_verify_buf; + + /* Scan to find existance of the device */ + if (nand_scan (tx4925ndfmc_mtd, 1)) { + err = -ENXIO; + goto out_ior; + } + + /* Allocate memory for internal data buffer */ + this->data_buf = kmalloc (sizeof(u_char) * (tx4925ndfmc_mtd->oobblock + tx4925ndfmc_mtd->oobsize), GFP_KERNEL); + if (!this->data_buf) { + printk ("Unable to allocate NAND data buffer for RBTX4925.\n"); + err = -ENOMEM; + goto out_ior; + } + + /* Register the partitions */ +#ifdef CONFIG_MTD_CMDLINE_PARTS + { + int mtd_parts_nb = 0; + struct mtd_partition *mtd_parts = 0; + mtd_parts_nb = parse_cmdline_partitions(tx4925ndfmc_mtd, &mtd_parts, "tx4925ndfmc"); + if (mtd_parts_nb > 0) + add_mtd_partitions(tx4925ndfmc_mtd, mtd_parts, mtd_parts_nb); + else + add_mtd_device(tx4925ndfmc_mtd); + } +#else /* ifdef CONFIG_MTD_CMDLINE_PARTS */ + switch(tx4925ndfmc_mtd->size){ + case 0x01000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info16k, NUM_PARTITIONS16K); break; + case 0x02000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info32k, NUM_PARTITIONS32K); break; + case 0x04000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info64k, NUM_PARTITIONS64K); break; + case 0x08000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info128k, NUM_PARTITIONS128K); break; + default: { + printk ("Unsupported SmartMedia device\n"); + err = -ENXIO; + goto out_buf; + } + } +#endif /* ifdef CONFIG_MTD_CMDLINE_PARTS */ + goto out; + +out_buf: + kfree (this->data_buf); +out_ior: +out: + return err; +} + +module_init(tx4925ndfmc_init); + +/* + * Clean up routine + */ +#ifdef MODULE +static void __exit tx4925ndfmc_cleanup (void) +{ + struct nand_chip *this = (struct nand_chip *) &tx4925ndfmc_mtd[1]; + + /* Unregister partitions */ + del_mtd_partitions(tx4925ndfmc_mtd); + + /* Unregister the device */ + del_mtd_device (tx4925ndfmc_mtd); + + /* Free internal data buffers */ + kfree (this->data_buf); + + /* Free the MTD device structure */ + kfree (tx4925ndfmc_mtd); +} +module_exit(tx4925ndfmc_cleanup); +#endif + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alice Hennessy "); +MODULE_DESCRIPTION("Glue layer for SmartMediaCard on Toshiba RBTX4925"); diff --git a/drivers/mtd/nand/tx4938ndfmc.c b/drivers/mtd/nand/tx4938ndfmc.c new file mode 100644 index 000000000..f2375b241 --- /dev/null +++ b/drivers/mtd/nand/tx4938ndfmc.c @@ -0,0 +1,422 @@ +/* + * drivers/mtd/nand/tx4938ndfmc.c + * + * Overview: + * This is a device driver for the NAND flash device connected to + * TX4938 internal NAND Memory Controller. + * TX4938 NDFMC is almost same as TX4925 NDFMC, but register size are 64 bit. + * + * Author: source@mvista.com + * + * Based on spia.c by Steven J. Hill + * + * $Id: tx4938ndfmc.c,v 1.2 2004/03/27 19:55:53 gleixner Exp $ + * + * Copyright (C) 2000-2001 Toshiba Corporation + * + * 2003 (c) MontaVista Software, Inc. This file is licensed under the + * terms of the GNU General Public License version 2. This program is + * licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct nand_oobinfo jffs2_oobinfo; + +/* + * MTD structure for TX4938 NDFMC + */ +static struct mtd_info *tx4938ndfmc_mtd; + +/* + * Define partitions for flash device + */ +#define flush_wb() (void)tx4938_ndfmcptr->mcr; + +#define NUM_PARTITIONS 3 +#define NUMBER_OF_CIS_BLOCKS 24 +#define SIZE_OF_BLOCK 0x00004000 +#define NUMBER_OF_BLOCK_PER_ZONE 1024 +#define SIZE_OF_ZONE (NUMBER_OF_BLOCK_PER_ZONE * SIZE_OF_BLOCK) +#ifndef CONFIG_MTD_CMDLINE_PARTS +/* + * You can use the following sample of MTD partitions + * on the NAND Flash Memory 32MB or more. + * + * The following figure shows the image of the sample partition on + * the 32MB NAND Flash Memory. + * + * Block No. + * 0 +-----------------------------+ ------ + * | CIS | ^ + * 24 +-----------------------------+ | + * | kernel image | | Zone 0 + * | | | + * +-----------------------------+ | + * 1023 | unused area | v + * +-----------------------------+ ------ + * 1024 | JFFS2 | ^ + * | | | + * | | | Zone 1 + * | | | + * | | | + * | | v + * 2047 +-----------------------------+ ------ + * + */ +static struct mtd_partition partition_info[NUM_PARTITIONS] = { + { + .name = "RBTX4938 CIS Area", + .offset = 0, + .size = (NUMBER_OF_CIS_BLOCKS * SIZE_OF_BLOCK), + .mask_flags = MTD_WRITEABLE /* This partition is NOT writable */ + }, + { + .name = "RBTX4938 kernel image", + .offset = MTDPART_OFS_APPEND, + .size = 8 * 0x00100000, /* 8MB (Depends on size of kernel image) */ + .mask_flags = MTD_WRITEABLE /* This partition is NOT writable */ + }, + { + .name = "Root FS (JFFS2)", + .offset = (0 + SIZE_OF_ZONE), /* start address of next zone */ + .size = MTDPART_SIZ_FULL + }, +}; +#endif + +static void tx4938ndfmc_hwcontrol(struct mtd_info *mtd, int cmd) +{ + switch (cmd) { + case NAND_CTL_SETCLE: + tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_CLE; + break; + case NAND_CTL_CLRCLE: + tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_CLE; + break; + case NAND_CTL_SETALE: + tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_ALE; + break; + case NAND_CTL_CLRALE: + tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_ALE; + break; + /* TX4938_NDFMCR_CE bit is 0:high 1:low */ + case NAND_CTL_SETNCE: + tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_CE; + break; + case NAND_CTL_CLRNCE: + tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_CE; + break; + case NAND_CTL_SETWP: + tx4938_ndfmcptr->mcr |= TX4938_NDFMCR_WE; + break; + case NAND_CTL_CLRWP: + tx4938_ndfmcptr->mcr &= ~TX4938_NDFMCR_WE; + break; + } +} +static int tx4938ndfmc_dev_ready(struct mtd_info *mtd) +{ + flush_wb(); + return !(tx4938_ndfmcptr->sr & TX4938_NDFSR_BUSY); +} +static void tx4938ndfmc_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) +{ + u32 mcr = tx4938_ndfmcptr->mcr; + mcr &= ~TX4938_NDFMCR_ECC_ALL; + tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF; + tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_READ; + ecc_code[1] = tx4938_ndfmcptr->dtr; + ecc_code[0] = tx4938_ndfmcptr->dtr; + ecc_code[2] = tx4938_ndfmcptr->dtr; + tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF; +} +static void tx4938ndfmc_enable_hwecc(struct mtd_info *mtd, int mode) +{ + u32 mcr = tx4938_ndfmcptr->mcr; + mcr &= ~TX4938_NDFMCR_ECC_ALL; + tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_RESET; + tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_OFF; + tx4938_ndfmcptr->mcr = mcr | TX4938_NDFMCR_ECC_ON; +} + +static u_char tx4938ndfmc_nand_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + return tx4938_read_nfmc(this->IO_ADDR_R); +} + +static void tx4938ndfmc_nand_write_byte(struct mtd_info *mtd, u_char byte) +{ + struct nand_chip *this = mtd->priv; + tx4938_write_nfmc(byte, this->IO_ADDR_W); +} + +static void tx4938ndfmc_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_W); +} + +static void tx4938ndfmc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_R); +} + +static int tx4938ndfmc_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_R)) + return -EFAULT; + + return 0; +} + +/* + * Send command to NAND device + */ +static void tx4938ndfmc_nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr) +{ + register struct nand_chip *this = mtd->priv; + + /* Begin command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_SETCLE); + /* + * Write out the command to the device. + */ + if (command == NAND_CMD_SEQIN) { + int readcmd; + + if (column >= mtd->oobblock) { + /* OOB area */ + column -= mtd->oobblock; + readcmd = NAND_CMD_READOOB; + } else if (column < 256) { + /* First 256 bytes --> READ0 */ + readcmd = NAND_CMD_READ0; + } else { + column -= 256; + readcmd = NAND_CMD_READ1; + } + this->write_byte(mtd, readcmd); + } + this->write_byte(mtd, command); + + /* Set ALE and clear CLE to start address cycle */ + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + + if (column != -1 || page_addr != -1) { + this->hwcontrol(mtd, NAND_CTL_SETALE); + + /* Serially input address */ + if (column != -1) + this->write_byte(mtd, column); + if (page_addr != -1) { + this->write_byte(mtd, (unsigned char) (page_addr & 0xff)); + this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff)); + /* One more address cycle for higher density devices */ + if (mtd->size & 0x0c000000) + this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f)); + } + /* Latch in address */ + this->hwcontrol(mtd, NAND_CTL_CLRALE); + } + + /* + * program and erase have their own busy handlers + * status and sequential in needs no delay + */ + switch (command) { + + case NAND_CMD_PAGEPROG: + /* Turn off WE */ + this->hwcontrol (mtd, NAND_CTL_CLRWP); + return; + + case NAND_CMD_SEQIN: + /* Turn on WE */ + this->hwcontrol (mtd, NAND_CTL_SETWP); + return; + + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: + case NAND_CMD_STATUS: + return; + + case NAND_CMD_RESET: + if (this->dev_ready) + break; + this->hwcontrol(mtd, NAND_CTL_SETCLE); + this->write_byte(mtd, NAND_CMD_STATUS); + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + while ( !(this->read_byte(mtd) & 0x40)); + return; + + /* This applies to read commands */ + default: + /* + * If we don't have access to the busy pin, we apply the given + * command delay + */ + if (!this->dev_ready) { + udelay (this->chip_delay); + return; + } + } + + /* wait until command is processed */ + while (!this->dev_ready(mtd)); +} + +#ifdef CONFIG_MTD_CMDLINE_PARTS +extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, char *); +#endif +/* + * Main initialization routine + */ +int __init tx4938ndfmc_init (void) +{ + struct nand_chip *this; + int bsprt = 0, hold = 0xf, spw = 0xf; + int protected = 0; + + if ((*rbtx4938_piosel_ptr & 0x0c) != 0x08) { + printk("TX4938 NDFMC: disabled by IOC PIOSEL\n"); + return -ENODEV; + } + bsprt = 1; + hold = 2; + spw = 9 - 1; /* 8 GBUSCLK = 80ns (@ GBUSCLK 100MHz) */ + + if ((tx4938_ccfgptr->pcfg & + (TX4938_PCFG_ATA_SEL|TX4938_PCFG_ISA_SEL|TX4938_PCFG_NDF_SEL)) + != TX4938_PCFG_NDF_SEL) { + printk("TX4938 NDFMC: disabled by PCFG.\n"); + return -ENODEV; + } + + /* reset NDFMC */ + tx4938_ndfmcptr->rstr |= TX4938_NDFRSTR_RST; + while (tx4938_ndfmcptr->rstr & TX4938_NDFRSTR_RST) + ; + /* setup BusSeparete, Hold Time, Strobe Pulse Width */ + tx4938_ndfmcptr->mcr = bsprt ? TX4938_NDFMCR_BSPRT : 0; + tx4938_ndfmcptr->spr = hold << 4 | spw; + + /* Allocate memory for MTD device structure and private data */ + tx4938ndfmc_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip), + GFP_KERNEL); + if (!tx4938ndfmc_mtd) { + printk ("Unable to allocate TX4938 NDFMC MTD device structure.\n"); + return -ENOMEM; + } + + /* Get pointer to private data */ + this = (struct nand_chip *) (&tx4938ndfmc_mtd[1]); + + /* Initialize structures */ + memset((char *) tx4938ndfmc_mtd, 0, sizeof(struct mtd_info)); + memset((char *) this, 0, sizeof(struct nand_chip)); + + /* Link the private data with the MTD structure */ + tx4938ndfmc_mtd->priv = this; + + /* Set address of NAND IO lines */ + this->IO_ADDR_R = (unsigned long)&tx4938_ndfmcptr->dtr; + this->IO_ADDR_W = (unsigned long)&tx4938_ndfmcptr->dtr; + this->hwcontrol = tx4938ndfmc_hwcontrol; + this->dev_ready = tx4938ndfmc_dev_ready; + this->calculate_ecc = tx4938ndfmc_calculate_ecc; + this->correct_data = nand_correct_data; + this->enable_hwecc = tx4938ndfmc_enable_hwecc; + this->eccmode = NAND_ECC_HW3_256; + this->chip_delay = 100; + this->read_byte = tx4938ndfmc_nand_read_byte; + this->write_byte = tx4938ndfmc_nand_write_byte; + this->cmdfunc = tx4938ndfmc_nand_command; + this->write_buf = tx4938ndfmc_nand_write_buf; + this->read_buf = tx4938ndfmc_nand_read_buf; + this->verify_buf = tx4938ndfmc_nand_verify_buf; + + /* Scan to find existance of the device */ + if (nand_scan (tx4938ndfmc_mtd, 1)) { + kfree (tx4938ndfmc_mtd); + return -ENXIO; + } + + /* Allocate memory for internal data buffer */ + this->data_buf = kmalloc (sizeof(u_char) * (tx4938ndfmc_mtd->oobblock + tx4938ndfmc_mtd->oobsize), GFP_KERNEL); + if (!this->data_buf) { + printk ("Unable to allocate NAND data buffer for TX4938.\n"); + kfree (tx4938ndfmc_mtd); + return -ENOMEM; + } + + if (protected) { + printk(KERN_INFO "TX4938 NDFMC: write protected.\n"); + tx4938ndfmc_mtd->flags &= ~(MTD_WRITEABLE | MTD_ERASEABLE); + } + +#ifdef CONFIG_MTD_CMDLINE_PARTS + { + int mtd_parts_nb = 0; + struct mtd_partition *mtd_parts = 0; + mtd_parts_nb = parse_cmdline_partitions(tx4938ndfmc_mtd, &mtd_parts, "tx4938ndfmc"); + if (mtd_parts_nb > 0) + add_mtd_partitions(tx4938ndfmc_mtd, mtd_parts, mtd_parts_nb); + else + add_mtd_device(tx4938ndfmc_mtd); + } +#else + add_mtd_partitions(tx4938ndfmc_mtd, partition_info, NUM_PARTITIONS ); +#endif + + return 0; +} +module_init(tx4938ndfmc_init); + +/* + * Clean up routine + */ +static void __exit tx4938ndfmc_cleanup (void) +{ + struct nand_chip *this = (struct nand_chip *) tx4938ndfmc_mtd->priv; + + /* Unregister the device */ +#ifdef CONFIG_MTD_CMDLINE_PARTS + del_mtd_partitions(tx4938ndfmc_mtd); +#endif + del_mtd_device (tx4938ndfmc_mtd); + + /* Free the MTD device structure */ + kfree (tx4938ndfmc_mtd); + + /* Free internal data buffer */ + kfree (this->data_buf); +} +module_exit(tx4938ndfmc_cleanup); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alice Hennessy "); +MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on TX4938 NDFMC"); diff --git a/drivers/net/fec_8xx/Kconfig b/drivers/net/fec_8xx/Kconfig new file mode 100644 index 000000000..db36ac3ea --- /dev/null +++ b/drivers/net/fec_8xx/Kconfig @@ -0,0 +1,14 @@ +config FEC_8XX + tristate "Motorola 8xx FEC driver" + depends on NET_ETHERNET && 8xx && (NETTA || NETPHONE) + select MII + +config FEC_8XX_GENERIC_PHY + bool "Support any generic PHY" + depends on FEC_8XX + default y + +config FEC_8XX_DM9161_PHY + bool "Support DM9161 PHY" + depends on FEC_8XX + default n diff --git a/drivers/net/fec_8xx/Makefile b/drivers/net/fec_8xx/Makefile new file mode 100644 index 000000000..70c54f8c4 --- /dev/null +++ b/drivers/net/fec_8xx/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for the Motorola 8xx FEC ethernet controller +# + +obj-$(CONFIG_FEC_8XX) += fec_8xx.o + +fec_8xx-objs := fec_main.o fec_mii.o + +# the platform instantatiation objects +ifeq ($(CONFIG_NETTA),y) +fec_8xx-objs += fec_8xx-netta.o +endif diff --git a/drivers/net/fec_8xx/fec_8xx-netta.c b/drivers/net/fec_8xx/fec_8xx-netta.c new file mode 100644 index 000000000..7d73661aa --- /dev/null +++ b/drivers/net/fec_8xx/fec_8xx-netta.c @@ -0,0 +1,153 @@ +/* + * FEC instantatiation file for NETTA + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "fec_8xx.h" + +/*************************************************/ + +static struct fec_platform_info fec1_info = { + .fec_no = 0, + .use_mdio = 1, + .phy_addr = 8, + .fec_irq = SIU_LEVEL1, + .phy_irq = CPM_IRQ_OFFSET + CPMVEC_PIO_PC6, + .rx_ring = 128, + .tx_ring = 16, + .rx_copybreak = 240, + .use_napi = 1, + .napi_weight = 17, +}; + +static struct fec_platform_info fec2_info = { + .fec_no = 1, + .use_mdio = 1, + .phy_addr = 2, + .fec_irq = SIU_LEVEL3, + .phy_irq = CPM_IRQ_OFFSET + CPMVEC_PIO_PC7, + .rx_ring = 128, + .tx_ring = 16, + .rx_copybreak = 240, + .use_napi = 1, + .napi_weight = 17, +}; + +static struct net_device *fec1_dev; +static struct net_device *fec2_dev; + +/* XXX custom u-boot & Linux startup needed */ +extern const char *__fw_getenv(const char *var); + +/* access ports */ +#define setbits32(_addr, _v) __fec_out32(&(_addr), __fec_in32(&(_addr)) | (_v)) +#define clrbits32(_addr, _v) __fec_out32(&(_addr), __fec_in32(&(_addr)) & ~(_v)) + +#define setbits16(_addr, _v) __fec_out16(&(_addr), __fec_in16(&(_addr)) | (_v)) +#define clrbits16(_addr, _v) __fec_out16(&(_addr), __fec_in16(&(_addr)) & ~(_v)) + +int fec_8xx_platform_init(void) +{ + immap_t *immap = (immap_t *)IMAP_ADDR; + bd_t *bd = (bd_t *) __res; + const char *s; + char *e; + int i; + + /* use MDC for MII */ + setbits16(immap->im_ioport.iop_pdpar, 0x0080); + clrbits16(immap->im_ioport.iop_pddir, 0x0080); + + /* configure FEC1 pins */ + setbits16(immap->im_ioport.iop_papar, 0xe810); + setbits16(immap->im_ioport.iop_padir, 0x0810); + clrbits16(immap->im_ioport.iop_padir, 0xe000); + + setbits32(immap->im_cpm.cp_pbpar, 0x00000001); + clrbits32(immap->im_cpm.cp_pbdir, 0x00000001); + + setbits32(immap->im_cpm.cp_cptr, 0x00000100); + clrbits32(immap->im_cpm.cp_cptr, 0x00000050); + + clrbits16(immap->im_ioport.iop_pcpar, 0x0200); + clrbits16(immap->im_ioport.iop_pcdir, 0x0200); + clrbits16(immap->im_ioport.iop_pcso, 0x0200); + setbits16(immap->im_ioport.iop_pcint, 0x0200); + + /* configure FEC2 pins */ + setbits32(immap->im_cpm.cp_pepar, 0x00039620); + setbits32(immap->im_cpm.cp_pedir, 0x00039620); + setbits32(immap->im_cpm.cp_peso, 0x00031000); + clrbits32(immap->im_cpm.cp_peso, 0x00008620); + + setbits32(immap->im_cpm.cp_cptr, 0x00000080); + clrbits32(immap->im_cpm.cp_cptr, 0x00000028); + + clrbits16(immap->im_ioport.iop_pcpar, 0x0200); + clrbits16(immap->im_ioport.iop_pcdir, 0x0200); + clrbits16(immap->im_ioport.iop_pcso, 0x0200); + setbits16(immap->im_ioport.iop_pcint, 0x0200); + + /* fill up */ + fec1_info.sys_clk = bd->bi_intfreq; + fec2_info.sys_clk = bd->bi_intfreq; + + s = __fw_getenv("ethaddr"); + if (s != NULL) { + for (i = 0; i < 6; i++) { + fec1_info.macaddr[i] = simple_strtoul(s, &e, 16); + if (*e) + s = e + 1; + } + } + + s = __fw_getenv("eth1addr"); + if (s != NULL) { + for (i = 0; i < 6; i++) { + fec2_info.macaddr[i] = simple_strtoul(s, &e, 16); + if (*e) + s = e + 1; + } + } + + fec_8xx_init_one(&fec1_info, &fec1_dev); + fec_8xx_init_one(&fec2_info, &fec2_dev); + + return fec1_dev != NULL && fec2_dev != NULL ? 0 : -1; +} + +void fec_8xx_platform_cleanup(void) +{ + if (fec2_dev != NULL) + fec_8xx_cleanup_one(fec2_dev); + + if (fec1_dev != NULL) + fec_8xx_cleanup_one(fec1_dev); +} diff --git a/drivers/net/fec_8xx/fec_8xx.h b/drivers/net/fec_8xx/fec_8xx.h new file mode 100644 index 000000000..5af60b0f9 --- /dev/null +++ b/drivers/net/fec_8xx/fec_8xx.h @@ -0,0 +1,218 @@ +#ifndef FEC_8XX_H +#define FEC_8XX_H + +#include +#include + +#include + +/* HW info */ + +/* CRC polynomium used by the FEC for the multicast group filtering */ +#define FEC_CRC_POLY 0x04C11DB7 + +#define MII_ADVERTISE_HALF (ADVERTISE_100HALF | \ + ADVERTISE_10HALF | ADVERTISE_CSMA) +#define MII_ADVERTISE_ALL (ADVERTISE_100FULL | \ + ADVERTISE_10FULL | MII_ADVERTISE_HALF) + +/* Interrupt events/masks. +*/ +#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ +#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ +#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ +#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ +#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ +#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ +#define FEC_ENET_RXF 0x02000000U /* Full frame received */ +#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ +#define FEC_ENET_MII 0x00800000U /* MII interrupt */ +#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ + +#define FEC_ECNTRL_PINMUX 0x00000004 +#define FEC_ECNTRL_ETHER_EN 0x00000002 +#define FEC_ECNTRL_RESET 0x00000001 + +#define FEC_RCNTRL_BC_REJ 0x00000010 +#define FEC_RCNTRL_PROM 0x00000008 +#define FEC_RCNTRL_MII_MODE 0x00000004 +#define FEC_RCNTRL_DRT 0x00000002 +#define FEC_RCNTRL_LOOP 0x00000001 + +#define FEC_TCNTRL_FDEN 0x00000004 +#define FEC_TCNTRL_HBC 0x00000002 +#define FEC_TCNTRL_GTS 0x00000001 + +/* values for MII phy_status */ + +#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ +#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ +#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ +#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ +#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ +#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ +#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ + +#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ +#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ +#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ +#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ +#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ +#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ +#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ +#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ + +typedef struct phy_info { + unsigned int id; + const char *name; + void (*startup) (struct net_device * dev); + void (*shutdown) (struct net_device * dev); + void (*ack_int) (struct net_device * dev); +} phy_info_t; + +/* The FEC stores dest/src/type, data, and checksum for receive packets. + */ +#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */ +#define MIN_MTU 46 /* this is data size */ +#define CRC_LEN 4 + +#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN) +#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN) + +/* Must be a multiple of 4 */ +#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE+3) & ~3) +/* This is needed so that invalidate_xxx wont invalidate too much */ +#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE) + +/* platform interface */ + +struct fec_platform_info { + int fec_no; /* FEC index */ + int use_mdio; /* use external MII */ + int phy_addr; /* the phy address */ + int fec_irq, phy_irq; /* the irq for the controller */ + int rx_ring, tx_ring; /* number of buffers on rx */ + int sys_clk; /* system clock */ + __u8 macaddr[6]; /* mac address */ + int rx_copybreak; /* limit we copy small frames */ + int use_napi; /* use NAPI */ + int napi_weight; /* NAPI weight */ +}; + +/* forward declaration */ +struct fec; + +struct fec_enet_private { + spinlock_t lock; /* during all ops except TX pckt processing */ + spinlock_t tx_lock; /* during fec_start_xmit and fec_tx */ + int fecno; + struct fec *fecp; + const struct fec_platform_info *fpi; + int rx_ring, tx_ring; + dma_addr_t ring_mem_addr; + void *ring_base; + struct sk_buff **rx_skbuff; + struct sk_buff **tx_skbuff; + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t *tx_bd_base; + cbd_t *dirty_tx; /* ring entries to be free()ed. */ + cbd_t *cur_rx; + cbd_t *cur_tx; + int tx_free; + struct net_device_stats stats; + struct timer_list phy_timer_list; + const struct phy_info *phy; + unsigned int fec_phy_speed; + __u32 msg_enable; + struct mii_if_info mii_if; +}; + +/***************************************************************************/ + +void fec_restart(struct net_device *dev, int duplex, int speed); +void fec_stop(struct net_device *dev); + +/***************************************************************************/ + +int fec_mii_read(struct net_device *dev, int phy_id, int location); +void fec_mii_write(struct net_device *dev, int phy_id, int location, int value); + +int fec_mii_phy_id_detect(struct net_device *dev); +void fec_mii_startup(struct net_device *dev); +void fec_mii_shutdown(struct net_device *dev); +void fec_mii_ack_int(struct net_device *dev); + +void fec_mii_link_status_change_check(struct net_device *dev, int init_media); + +/***************************************************************************/ + +#define FEC1_NO 0x00 +#define FEC2_NO 0x01 +#define FEC3_NO 0x02 + +int fec_8xx_init_one(const struct fec_platform_info *fpi, + struct net_device **devp); +int fec_8xx_cleanup_one(struct net_device *dev); + +/***************************************************************************/ + +#define DRV_MODULE_NAME "fec_8xx" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "0.1" +#define DRV_MODULE_RELDATE "May 6, 2004" + +/***************************************************************************/ + +int fec_8xx_platform_init(void); +void fec_8xx_platform_cleanup(void); + +/***************************************************************************/ + +/* FEC access macros */ +#if defined(CONFIG_8xx) +/* for a 8xx __raw_xxx's are sufficient */ +#define __fec_out32(addr, x) __raw_writel(x, addr) +#define __fec_out16(addr, x) __raw_writew(x, addr) +#define __fec_in32(addr) __raw_readl(addr) +#define __fec_in16(addr) __raw_readw(addr) +#else +/* for others play it safe */ +#define __fec_out32(addr, x) out_be32(addr, x) +#define __fec_out16(addr, x) out_be16(addr, x) +#define __fec_in32(addr) in_be32(addr) +#define __fec_in16(addr) in_be16(addr) +#endif + +/* write */ +#define FW(_fecp, _reg, _v) __fec_out32(&(_fecp)->fec_ ## _reg, (_v)) + +/* read */ +#define FR(_fecp, _reg) __fec_in32(&(_fecp)->fec_ ## _reg) + +/* set bits */ +#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v)) + +/* clear bits */ +#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) + +/* buffer descriptor access macros */ + +/* write */ +#define CBDW_SC(_cbd, _sc) __fec_out16(&(_cbd)->cbd_sc, (_sc)) +#define CBDW_DATLEN(_cbd, _datlen) __fec_out16(&(_cbd)->cbd_datlen, (_datlen)) +#define CBDW_BUFADDR(_cbd, _bufaddr) __fec_out32(&(_cbd)->cbd_bufaddr, (_bufaddr)) + +/* read */ +#define CBDR_SC(_cbd) __fec_in16(&(_cbd)->cbd_sc) +#define CBDR_DATLEN(_cbd) __fec_in16(&(_cbd)->cbd_datlen) +#define CBDR_BUFADDR(_cbd) __fec_in32(&(_cbd)->cbd_bufaddr) + +/* set bits */ +#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc)) + +/* clear bits */ +#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc)) + +/***************************************************************************/ + +#endif diff --git a/drivers/net/fec_8xx/fec_main.c b/drivers/net/fec_8xx/fec_main.c new file mode 100644 index 000000000..1bf15eed6 --- /dev/null +++ b/drivers/net/fec_8xx/fec_main.c @@ -0,0 +1,1275 @@ +/* + * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou + * + * Heavily based on original FEC driver by Dan Malek + * and modifications by Joakim Tjernlund + * + * Released under the GPL + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fec_8xx.h" + +/*************************************************/ + +#define FEC_MAX_MULTICAST_ADDRS 64 + +/*************************************************/ + +static char version[] __devinitdata = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n"; + +MODULE_AUTHOR("Pantelis Antoniou "); +MODULE_DESCRIPTION("Motorola 8xx FEC ethernet driver"); +MODULE_LICENSE("GPL"); + +MODULE_PARM(fec_8xx_debug, "i"); +MODULE_PARM_DESC(fec_8xx_debug, + "FEC 8xx bitmapped debugging message enable value"); + +int fec_8xx_debug = -1; /* -1 == use FEC_8XX_DEF_MSG_ENABLE as value */ + +/*************************************************/ + +/* + * Delay to wait for FEC reset command to complete (in us) + */ +#define FEC_RESET_DELAY 50 + +/*****************************************************************************************/ + +static void fec_whack_reset(fec_t * fecp) +{ + int i; + + /* + * Whack a reset. We should wait for this. + */ + FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET); + for (i = 0; + (FR(fecp, ecntrl) & FEC_ECNTRL_RESET) != 0 && i < FEC_RESET_DELAY; + i++) + udelay(1); + + if (i == FEC_RESET_DELAY) + printk(KERN_WARNING "FEC Reset timeout!\n"); + +} + +/****************************************************************************/ + +/* + * Transmitter timeout. + */ +#define TX_TIMEOUT (2*HZ) + +/****************************************************************************/ + +/* + * Returns the CRC needed when filling in the hash table for + * multicast group filtering + * pAddr must point to a MAC address (6 bytes) + */ +static __u32 fec_mulicast_calc_crc(char *pAddr) +{ + u8 byte; + int byte_count; + int bit_count; + __u32 crc = 0xffffffff; + u8 msb; + + for (byte_count = 0; byte_count < 6; byte_count++) { + byte = pAddr[byte_count]; + for (bit_count = 0; bit_count < 8; bit_count++) { + msb = crc >> 31; + crc <<= 1; + if (msb ^ (byte & 0x1)) { + crc ^= FEC_CRC_POLY; + } + byte >>= 1; + } + } + return (crc); +} + +/* + * Set or clear the multicast filter for this adaptor. + * Skeleton taken from sunlance driver. + * The CPM Ethernet implementation allows Multicast as well as individual + * MAC address filtering. Some of the drivers check to make sure it is + * a group multicast address, and discard those that are not. I guess I + * will do the same for now, but just remove the test if you want + * individual filtering as well (do the upper net layers want or support + * this kind of feature?). + */ +static void fec_set_multicast_list(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + fec_t *fecp = fep->fecp; + struct dev_mc_list *pmc; + __u32 crc; + int temp; + __u32 csrVal; + int hash_index; + __u32 hthi, htlo; + unsigned long flags; + + + if ((dev->flags & IFF_PROMISC) != 0) { + + spin_lock_irqsave(&fep->lock, flags); + FS(fecp, r_cntrl, FEC_RCNTRL_PROM); + spin_unlock_irqrestore(&fep->lock, flags); + + /* + * Log any net taps. + */ + printk(KERN_WARNING DRV_MODULE_NAME + ": %s: Promiscuous mode enabled.\n", dev->name); + return; + + } + + if ((dev->flags & IFF_ALLMULTI) != 0 || + dev->mc_count > FEC_MAX_MULTICAST_ADDRS) { + /* + * Catch all multicast addresses, set the filter to all 1's. + */ + hthi = 0xffffffffU; + htlo = 0xffffffffU; + } else { + hthi = 0; + htlo = 0; + + /* + * Now populate the hash table + */ + for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next) { + crc = fec_mulicast_calc_crc(pmc->dmi_addr); + temp = (crc & 0x3f) >> 1; + hash_index = ((temp & 0x01) << 4) | + ((temp & 0x02) << 2) | + ((temp & 0x04)) | + ((temp & 0x08) >> 2) | + ((temp & 0x10) >> 4); + csrVal = (1 << hash_index); + if (crc & 1) + hthi |= csrVal; + else + htlo |= csrVal; + } + } + + spin_lock_irqsave(&fep->lock, flags); + FC(fecp, r_cntrl, FEC_RCNTRL_PROM); + FW(fecp, hash_table_high, hthi); + FW(fecp, hash_table_low, htlo); + spin_unlock_irqrestore(&fep->lock, flags); +} + +static int fec_set_mac_address(struct net_device *dev, void *addr) +{ + struct sockaddr *mac = addr; + struct fec_enet_private *fep = netdev_priv(dev); + struct fec *fecp = fep->fecp; + int i; + __u32 addrhi, addrlo; + unsigned long flags; + + /* Get pointer to SCC area in parameter RAM. */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = mac->sa_data[i]; + + /* + * Set station address. + */ + addrhi = ((__u32) dev->dev_addr[0] << 24) | + ((__u32) dev->dev_addr[1] << 16) | + ((__u32) dev->dev_addr[2] << 8) | + (__u32) dev->dev_addr[3]; + addrlo = ((__u32) dev->dev_addr[4] << 24) | + ((__u32) dev->dev_addr[5] << 16); + + spin_lock_irqsave(&fep->lock, flags); + FW(fecp, addr_low, addrhi); + FW(fecp, addr_high, addrlo); + spin_unlock_irqrestore(&fep->lock, flags); + + return 0; +} + +/* + * This function is called to start or restart the FEC during a link + * change. This only happens when switching between half and full + * duplex. + */ +void fec_restart(struct net_device *dev, int duplex, int speed) +{ +#ifdef CONFIG_DUET + immap_t *immap = (immap_t *) IMAP_ADDR; + __u32 cptr; +#endif + struct fec_enet_private *fep = netdev_priv(dev); + struct fec *fecp = fep->fecp; + const struct fec_platform_info *fpi = fep->fpi; + cbd_t *bdp; + struct sk_buff *skb; + int i; + __u32 addrhi, addrlo; + + fec_whack_reset(fep->fecp); + + /* + * Set station address. + */ + addrhi = ((__u32) dev->dev_addr[0] << 24) | + ((__u32) dev->dev_addr[1] << 16) | + ((__u32) dev->dev_addr[2] << 8) | + (__u32) dev->dev_addr[3]; + addrlo = ((__u32) dev->dev_addr[4] << 24) | + ((__u32) dev->dev_addr[5] << 16); + FW(fecp, addr_low, addrhi); + FW(fecp, addr_high, addrlo); + + /* + * Reset all multicast. + */ + FW(fecp, hash_table_high, 0); + FW(fecp, hash_table_low, 0); + + /* + * Set maximum receive buffer size. + */ + FW(fecp, r_buff_size, PKT_MAXBLR_SIZE); + FW(fecp, r_hash, PKT_MAXBUF_SIZE); + + /* + * Set receive and transmit descriptor base. + */ + FW(fecp, r_des_start, iopa((__u32) (fep->rx_bd_base))); + FW(fecp, x_des_start, iopa((__u32) (fep->tx_bd_base))); + + fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; + fep->tx_free = fep->tx_ring; + fep->cur_rx = fep->rx_bd_base; + + /* + * Reset SKB receive buffers + */ + for (i = 0; i < fep->rx_ring; i++) { + if ((skb = fep->rx_skbuff[i]) == NULL) + continue; + fep->rx_skbuff[i] = NULL; + dev_kfree_skb(skb); + } + + /* + * Initialize the receive buffer descriptors. + */ + for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { + skb = dev_alloc_skb(ENET_RX_FRSIZE); + if (skb == NULL) { + printk(KERN_WARNING DRV_MODULE_NAME + ": %s Memory squeeze, unable to allocate skb\n", + dev->name); + fep->stats.rx_dropped++; + break; + } + fep->rx_skbuff[i] = skb; + skb->dev = dev; + CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE)); + CBDW_DATLEN(bdp, 0); /* zero */ + CBDW_SC(bdp, BD_ENET_RX_EMPTY | + ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP)); + } + /* + * if we failed, fillup remainder + */ + for (; i < fep->rx_ring; i++, bdp++) { + fep->rx_skbuff[i] = NULL; + CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP); + } + + /* + * Reset SKB transmit buffers. + */ + for (i = 0; i < fep->tx_ring; i++) { + if ((skb = fep->tx_skbuff[i]) == NULL) + continue; + fep->tx_skbuff[i] = NULL; + dev_kfree_skb(skb); + } + + /* + * ...and the same for transmit. + */ + for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) { + fep->tx_skbuff[i] = NULL; + CBDW_BUFADDR(bdp, virt_to_bus(NULL)); + CBDW_DATLEN(bdp, 0); + CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP); + } + + /* + * Enable big endian and don't care about SDMA FC. + */ + FW(fecp, fun_code, 0x78000000); + + /* + * Set MII speed. + */ + FW(fecp, mii_speed, fep->fec_phy_speed); + + /* + * Clear any outstanding interrupt. + */ + FW(fecp, ievent, 0xffc0); + FW(fecp, ivec, (fpi->fec_irq / 2) << 29); + + /* + * adjust to speed (only for DUET & RMII) + */ +#ifdef CONFIG_DUET + cptr = in_be32(&immap->im_cpm.cp_cptr); + switch (fpi->fec_no) { + case 0: + /* + * check if in RMII mode + */ + if ((cptr & 0x100) == 0) + break; + + if (speed == 10) + cptr |= 0x0000010; + else if (speed == 100) + cptr &= ~0x0000010; + break; + case 1: + /* + * check if in RMII mode + */ + if ((cptr & 0x80) == 0) + break; + + if (speed == 10) + cptr |= 0x0000008; + else if (speed == 100) + cptr &= ~0x0000008; + break; + default: + break; + } + out_be32(&immap->im_cpm.cp_cptr, cptr); +#endif + + FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ + /* + * adjust to duplex mode + */ + if (duplex) { + FC(fecp, r_cntrl, FEC_RCNTRL_DRT); + FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ + } else { + FS(fecp, r_cntrl, FEC_RCNTRL_DRT); + FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ + } + + /* + * Enable interrupts we wish to service. + */ + FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB | + FEC_ENET_RXF | FEC_ENET_RXB); + + /* + * And last, enable the transmit and receive processing. + */ + FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + FW(fecp, r_des_active, 0x01000000); +} + +void fec_stop(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + fec_t *fecp = fep->fecp; + struct sk_buff *skb; + int i; + + if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) + return; /* already down */ + + FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */ + for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) && + i < FEC_RESET_DELAY; i++) + udelay(1); + + if (i == FEC_RESET_DELAY) + printk(KERN_WARNING DRV_MODULE_NAME + ": %s FEC timeout on graceful transmit stop\n", + dev->name); + /* + * Disable FEC. Let only MII interrupts. + */ + FW(fecp, imask, 0); + FW(fecp, ecntrl, ~FEC_ECNTRL_ETHER_EN); + + /* + * Reset SKB transmit buffers. + */ + for (i = 0; i < fep->tx_ring; i++) { + if ((skb = fep->tx_skbuff[i]) == NULL) + continue; + fep->tx_skbuff[i] = NULL; + dev_kfree_skb(skb); + } + + /* + * Reset SKB receive buffers + */ + for (i = 0; i < fep->rx_ring; i++) { + if ((skb = fep->rx_skbuff[i]) == NULL) + continue; + fep->rx_skbuff[i] = NULL; + dev_kfree_skb(skb); + } +} + +/* common receive function */ +static int fec_enet_rx_common(struct net_device *dev, int *budget) +{ + struct fec_enet_private *fep = netdev_priv(dev); + fec_t *fecp = fep->fecp; + const struct fec_platform_info *fpi = fep->fpi; + cbd_t *bdp; + struct sk_buff *skb, *skbn, *skbt; + int received = 0; + __u16 pkt_len, sc; + int curidx; + int rx_work_limit; + + if (fpi->use_napi) { + rx_work_limit = min(dev->quota, *budget); + + if (!netif_running(dev)) + return 0; + } + + /* + * First, grab all of the stats for the incoming packet. + * These get messed up if we get called due to a busy condition. + */ + bdp = fep->cur_rx; + + /* clear RX status bits for napi*/ + if (fpi->use_napi) + FW(fecp, ievent, FEC_ENET_RXF | FEC_ENET_RXB); + + while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) { + + curidx = bdp - fep->rx_bd_base; + + /* + * Since we have allocated space to hold a complete frame, + * the last indicator should be set. + */ + if ((sc & BD_ENET_RX_LAST) == 0) + printk(KERN_WARNING DRV_MODULE_NAME + ": %s rcv is not +last\n", + dev->name); + + /* + * Check for errors. + */ + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL | + BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) { + fep->stats.rx_errors++; + /* Frame too long or too short. */ + if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) + fep->stats.rx_length_errors++; + /* Frame alignment */ + if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL)) + fep->stats.rx_frame_errors++; + /* CRC Error */ + if (sc & BD_ENET_RX_CR) + fep->stats.rx_crc_errors++; + /* FIFO overrun */ + if (sc & BD_ENET_RX_OV) + fep->stats.rx_crc_errors++; + + skbn = fep->rx_skbuff[curidx]; + BUG_ON(skbn == NULL); + + } else { + + /* napi, got packet but no quota */ + if (fpi->use_napi && --rx_work_limit < 0) + break; + + skb = fep->rx_skbuff[curidx]; + BUG_ON(skb == NULL); + + /* + * Process the incoming frame. + */ + fep->stats.rx_packets++; + pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */ + fep->stats.rx_bytes += pkt_len + 4; + + if (pkt_len <= fpi->rx_copybreak) { + /* +2 to make IP header L1 cache aligned */ + skbn = dev_alloc_skb(pkt_len + 2); + if (skbn != NULL) { + skb_reserve(skbn, 2); /* align IP header */ + memcpy(skbn->data, skb->data, pkt_len); + /* swap */ + skbt = skb; + skb = skbn; + skbn = skbt; + } + } else + skbn = dev_alloc_skb(ENET_RX_FRSIZE); + + if (skbn != NULL) { + skb->dev = dev; + skb_put(skb, pkt_len); /* Make room */ + skb->protocol = eth_type_trans(skb, dev); + received++; + if (!fpi->use_napi) + netif_rx(skb); + else + netif_receive_skb(skb); + } else { + printk(KERN_WARNING DRV_MODULE_NAME + ": %s Memory squeeze, dropping packet.\n", + dev->name); + fep->stats.rx_dropped++; + skbn = skb; + } + } + + fep->rx_skbuff[curidx] = skbn; + CBDW_BUFADDR(bdp, dma_map_single(NULL, skbn->data, + L1_CACHE_ALIGN(PKT_MAXBUF_SIZE), + DMA_FROM_DEVICE)); + CBDW_DATLEN(bdp, 0); + CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY); + + /* + * Update BD pointer to next entry. + */ + if ((sc & BD_ENET_RX_WRAP) == 0) + bdp++; + else + bdp = fep->rx_bd_base; + + /* + * Doing this here will keep the FEC running while we process + * incoming frames. On a heavily loaded network, we should be + * able to keep up at the expense of system resources. + */ + FW(fecp, r_des_active, 0x01000000); + } + + fep->cur_rx = bdp; + + if (fpi->use_napi) { + dev->quota -= received; + *budget -= received; + + if (rx_work_limit < 0) + return 1; /* not done */ + + /* done */ + netif_rx_complete(dev); + + /* enable RX interrupt bits */ + FS(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); + } + + return 0; +} + +static void fec_enet_tx(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + cbd_t *bdp; + struct sk_buff *skb; + int dirtyidx, do_wake; + __u16 sc; + + spin_lock(&fep->lock); + bdp = fep->dirty_tx; + + do_wake = 0; + while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { + + dirtyidx = bdp - fep->tx_bd_base; + + if (fep->tx_free == fep->tx_ring) + break; + + skb = fep->tx_skbuff[dirtyidx]; + + /* + * Check for errors. + */ + if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | + BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { + fep->stats.tx_errors++; + if (sc & BD_ENET_TX_HB) /* No heartbeat */ + fep->stats.tx_heartbeat_errors++; + if (sc & BD_ENET_TX_LC) /* Late collision */ + fep->stats.tx_window_errors++; + if (sc & BD_ENET_TX_RL) /* Retrans limit */ + fep->stats.tx_aborted_errors++; + if (sc & BD_ENET_TX_UN) /* Underrun */ + fep->stats.tx_fifo_errors++; + if (sc & BD_ENET_TX_CSL) /* Carrier lost */ + fep->stats.tx_carrier_errors++; + } else + fep->stats.tx_packets++; + + if (sc & BD_ENET_TX_READY) + printk(KERN_WARNING DRV_MODULE_NAME + ": %s HEY! Enet xmit interrupt and TX_READY.\n", + dev->name); + + /* + * Deferred means some collisions occurred during transmit, + * but we eventually sent the packet OK. + */ + if (sc & BD_ENET_TX_DEF) + fep->stats.collisions++; + + /* + * Free the sk buffer associated with this last transmit. + */ + dev_kfree_skb_irq(skb); + fep->tx_skbuff[dirtyidx] = NULL; + + /* + * Update pointer to next buffer descriptor to be transmitted. + */ + if ((sc & BD_ENET_TX_WRAP) == 0) + bdp++; + else + bdp = fep->tx_bd_base; + + /* + * Since we have freed up a buffer, the ring is no longer + * full. + */ + if (!fep->tx_free++) + do_wake = 1; + } + + fep->dirty_tx = bdp; + + spin_unlock(&fep->lock); + + if (do_wake && netif_queue_stopped(dev)) + netif_wake_queue(dev); +} + +/* + * The interrupt handler. + * This is called from the MPC core interrupt. + */ +static irqreturn_t +fec_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct fec_enet_private *fep; + const struct fec_platform_info *fpi; + fec_t *fecp; + __u32 int_events; + __u32 int_events_napi; + + if (unlikely(dev == NULL)) + return IRQ_NONE; + + fep = netdev_priv(dev); + fecp = fep->fecp; + fpi = fep->fpi; + + /* + * Get the interrupt events that caused us to be here. + */ + while ((int_events = FR(fecp, ievent) & FR(fecp, imask)) != 0) { + + if (!fpi->use_napi) + FW(fecp, ievent, int_events); + else { + int_events_napi = int_events & ~(FEC_ENET_RXF | FEC_ENET_RXB); + FW(fecp, ievent, int_events_napi); + } + + if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR | + FEC_ENET_BABT | FEC_ENET_EBERR)) != 0) + printk(KERN_WARNING DRV_MODULE_NAME + ": %s FEC ERROR(s) 0x%x\n", + dev->name, int_events); + + if ((int_events & FEC_ENET_RXF) != 0) { + if (!fpi->use_napi) + fec_enet_rx_common(dev, NULL); + else { + if (netif_rx_schedule_prep(dev)) { + /* disable rx interrupts */ + FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); + __netif_rx_schedule(dev); + } else { + printk(KERN_ERR DRV_MODULE_NAME + ": %s driver bug! interrupt while in poll!\n", + dev->name); + FC(fecp, imask, FEC_ENET_RXF | FEC_ENET_RXB); + } + } + } + + if ((int_events & FEC_ENET_TXF) != 0) + fec_enet_tx(dev); + } + + return IRQ_HANDLED; +} + +/* This interrupt occurs when the PHY detects a link change. */ +static irqreturn_t +fec_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct fec_enet_private *fep; + const struct fec_platform_info *fpi; + + if (unlikely(dev == NULL)) + return IRQ_NONE; + + fep = netdev_priv(dev); + fpi = fep->fpi; + + if (!fpi->use_mdio) + return IRQ_NONE; + + /* + * Acknowledge the interrupt if possible. If we have not + * found the PHY yet we can't process or acknowledge the + * interrupt now. Instead we ignore this interrupt for now, + * which we can do since it is edge triggered. It will be + * acknowledged later by fec_enet_open(). + */ + if (!fep->phy) + return IRQ_NONE; + + fec_mii_ack_int(dev); + fec_mii_link_status_change_check(dev, 0); + + return IRQ_HANDLED; +} + + +/**********************************************************************************/ + +static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + fec_t *fecp = fep->fecp; + cbd_t *bdp; + int curidx; + unsigned long flags; + + spin_lock_irqsave(&fep->tx_lock, flags); + + /* + * Fill in a Tx ring entry + */ + bdp = fep->cur_tx; + + if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { + netif_stop_queue(dev); + spin_unlock_irqrestore(&fep->tx_lock, flags); + + /* + * Ooops. All transmit buffers are full. Bail out. + * This should not happen, since the tx queue should be stopped. + */ + printk(KERN_WARNING DRV_MODULE_NAME + ": %s tx queue full!.\n", dev->name); + return 1; + } + + curidx = bdp - fep->tx_bd_base; + /* + * Clear all of the status flags. + */ + CBDC_SC(bdp, BD_ENET_TX_STATS); + + /* + * Save skb pointer. + */ + fep->tx_skbuff[curidx] = skb; + + fep->stats.tx_bytes += skb->len; + + /* + * Push the data cache so the CPM does not get stale memory data. + */ + CBDW_BUFADDR(bdp, dma_map_single(NULL, skb->data, + skb->len, DMA_TO_DEVICE)); + CBDW_DATLEN(bdp, skb->len); + + dev->trans_start = jiffies; + + /* + * If this was the last BD in the ring, start at the beginning again. + */ + if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) + fep->cur_tx++; + else + fep->cur_tx = fep->tx_bd_base; + + if (!--fep->tx_free) + netif_stop_queue(dev); + + /* + * Trigger transmission start + */ + CBDS_SC(bdp, BD_ENET_TX_READY | BD_ENET_TX_INTR | + BD_ENET_TX_LAST | BD_ENET_TX_TC); + FW(fecp, x_des_active, 0x01000000); + + spin_unlock_irqrestore(&fep->tx_lock, flags); + + return 0; +} + +static void fec_timeout(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + + fep->stats.tx_errors++; + + if (fep->tx_free) + netif_wake_queue(dev); + + /* check link status again */ + fec_mii_link_status_change_check(dev, 0); +} + +static int fec_enet_open(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + const struct fec_platform_info *fpi = fep->fpi; + unsigned long flags; + + /* Install our interrupt handler. */ + if (request_irq(fpi->fec_irq, fec_enet_interrupt, 0, "fec", dev) != 0) { + printk(KERN_ERR DRV_MODULE_NAME + ": %s Could not allocate FEC IRQ!", dev->name); + return -EINVAL; + } + + /* Install our phy interrupt handler */ + if (fpi->phy_irq != -1 && + request_irq(fpi->phy_irq, fec_mii_link_interrupt, 0, "fec-phy", + dev) != 0) { + printk(KERN_ERR DRV_MODULE_NAME + ": %s Could not allocate PHY IRQ!", dev->name); + free_irq(fpi->fec_irq, dev); + return -EINVAL; + } + + if (fpi->use_mdio) { + fec_mii_startup(dev); + netif_carrier_off(dev); + fec_mii_link_status_change_check(dev, 1); + } else { + spin_lock_irqsave(&fep->lock, flags); + fec_restart(dev, 1, 100); /* XXX this sucks */ + spin_unlock_irqrestore(&fep->lock, flags); + + netif_carrier_on(dev); + netif_start_queue(dev); + } + return 0; +} + +static int fec_enet_close(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + const struct fec_platform_info *fpi = fep->fpi; + unsigned long flags; + + netif_stop_queue(dev); + netif_carrier_off(dev); + + if (fpi->use_mdio) + fec_mii_shutdown(dev); + + spin_lock_irqsave(&fep->lock, flags); + fec_stop(dev); + spin_unlock_irqrestore(&fep->lock, flags); + + /* release any irqs */ + if (fpi->phy_irq != -1) + free_irq(fpi->phy_irq, dev); + free_irq(fpi->fec_irq, dev); + + return 0; +} + +static struct net_device_stats *fec_enet_get_stats(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + return &fep->stats; +} + +static int fec_enet_poll(struct net_device *dev, int *budget) +{ + return fec_enet_rx_common(dev, budget); +} + +/*************************************************************************/ + +static void fec_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, DRV_MODULE_NAME); + strcpy(info->version, DRV_MODULE_VERSION); +} + +static int fec_get_regs_len(struct net_device *dev) +{ + return sizeof(fec_t); +} + +static void fec_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct fec_enet_private *fep = netdev_priv(dev); + unsigned long flags; + + if (regs->len < sizeof(fec_t)) + return; + + regs->version = 0; + spin_lock_irqsave(&fep->lock, flags); + memcpy_fromio(p, fep->fecp, sizeof(fec_t)); + spin_unlock_irqrestore(&fep->lock, flags); +} + +static int fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct fec_enet_private *fep = netdev_priv(dev); + unsigned long flags; + int rc; + + spin_lock_irqsave(&fep->lock, flags); + rc = mii_ethtool_gset(&fep->mii_if, cmd); + spin_unlock_irqrestore(&fep->lock, flags); + + return rc; +} + +static int fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct fec_enet_private *fep = netdev_priv(dev); + unsigned long flags; + int rc; + + spin_lock_irqsave(&fep->lock, flags); + rc = mii_ethtool_sset(&fep->mii_if, cmd); + spin_unlock_irqrestore(&fep->lock, flags); + + return rc; +} + +static int fec_nway_reset(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + return mii_nway_restart(&fep->mii_if); +} + +static __u32 fec_get_msglevel(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + return fep->msg_enable; +} + +static void fec_set_msglevel(struct net_device *dev, __u32 value) +{ + struct fec_enet_private *fep = netdev_priv(dev); + fep->msg_enable = value; +} + +static struct ethtool_ops fec_ethtool_ops = { + .get_drvinfo = fec_get_drvinfo, + .get_regs_len = fec_get_regs_len, + .get_settings = fec_get_settings, + .set_settings = fec_set_settings, + .nway_reset = fec_nway_reset, + .get_link = ethtool_op_get_link, + .get_msglevel = fec_get_msglevel, + .set_msglevel = fec_set_msglevel, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_csum, /* local! */ + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_regs = fec_get_regs, +}; + +static int fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct fec_enet_private *fep = netdev_priv(dev); + struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data; + unsigned long flags; + int rc; + + if (!netif_running(dev)) + return -EINVAL; + + spin_lock_irqsave(&fep->lock, flags); + rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); + spin_unlock_irqrestore(&fep->lock, flags); + return rc; +} + +int fec_8xx_init_one(const struct fec_platform_info *fpi, + struct net_device **devp) +{ + immap_t *immap = (immap_t *) IMAP_ADDR; + static int fec_8xx_version_printed = 0; + struct net_device *dev = NULL; + struct fec_enet_private *fep = NULL; + fec_t *fecp = NULL; + int i; + int err = 0; + int registered = 0; + __u32 siel; + + *devp = NULL; + + switch (fpi->fec_no) { + case 0: + fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec; + break; +#ifdef CONFIG_DUET + case 1: + fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec2; + break; +#endif + default: + return -EINVAL; + } + + if (fec_8xx_version_printed++ == 0) + printk(KERN_INFO "%s", version); + + i = sizeof(*fep) + (sizeof(struct sk_buff **) * + (fpi->rx_ring + fpi->tx_ring)); + + dev = alloc_etherdev(i); + if (!dev) { + err = -ENOMEM; + goto err; + } + SET_MODULE_OWNER(dev); + + fep = netdev_priv(dev); + + /* partial reset of FEC */ + fec_whack_reset(fecp); + + /* point rx_skbuff, tx_skbuff */ + fep->rx_skbuff = (struct sk_buff **)&fep[1]; + fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; + + fep->fecp = fecp; + fep->fpi = fpi; + + /* init locks */ + spin_lock_init(&fep->lock); + spin_lock_init(&fep->tx_lock); + + /* + * Set the Ethernet address. + */ + for (i = 0; i < 6; i++) + dev->dev_addr[i] = fpi->macaddr[i]; + + fep->ring_base = dma_alloc_coherent(NULL, + (fpi->tx_ring + fpi->rx_ring) * + sizeof(cbd_t), &fep->ring_mem_addr, + GFP_KERNEL); + if (fep->ring_base == NULL) { + printk(KERN_ERR DRV_MODULE_NAME + ": %s dma alloc failed.\n", dev->name); + err = -ENOMEM; + goto err; + } + + /* + * Set receive and transmit descriptor base. + */ + fep->rx_bd_base = fep->ring_base; + fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring; + + /* initialize ring size variables */ + fep->tx_ring = fpi->tx_ring; + fep->rx_ring = fpi->rx_ring; + + /* SIU interrupt */ + if (fpi->phy_irq != -1 && + (fpi->phy_irq >= SIU_IRQ0 && fpi->phy_irq < SIU_LEVEL7)) { + + siel = in_be32(&immap->im_siu_conf.sc_siel); + if ((fpi->phy_irq & 1) == 0) + siel |= (0x80000000 >> fpi->phy_irq); + else + siel &= ~(0x80000000 >> (fpi->phy_irq & ~1)); + out_be32(&immap->im_siu_conf.sc_siel, siel); + } + + /* + * The FEC Ethernet specific entries in the device structure. + */ + dev->open = fec_enet_open; + dev->hard_start_xmit = fec_enet_start_xmit; + dev->tx_timeout = fec_timeout; + dev->watchdog_timeo = TX_TIMEOUT; + dev->stop = fec_enet_close; + dev->get_stats = fec_enet_get_stats; + dev->set_multicast_list = fec_set_multicast_list; + dev->set_mac_address = fec_set_mac_address; + if (fpi->use_napi) { + dev->poll = fec_enet_poll; + dev->weight = fpi->napi_weight; + } + dev->ethtool_ops = &fec_ethtool_ops; + dev->do_ioctl = fec_ioctl; + + fep->fec_phy_speed = + ((((fpi->sys_clk + 4999999) / 2500000) / 2) & 0x3F) << 1; + + init_timer(&fep->phy_timer_list); + + /* partial reset of FEC so that only MII works */ + FW(fecp, mii_speed, fep->fec_phy_speed); + FW(fecp, ievent, 0xffc0); + FW(fecp, ivec, (fpi->fec_irq / 2) << 29); + FW(fecp, imask, 0); + FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ + FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + + netif_carrier_off(dev); + + err = register_netdev(dev); + if (err != 0) + goto err; + registered = 1; + + if (fpi->use_mdio) { + fep->mii_if.dev = dev; + fep->mii_if.mdio_read = fec_mii_read; + fep->mii_if.mdio_write = fec_mii_write; + fep->mii_if.phy_id_mask = 0x1f; + fep->mii_if.reg_num_mask = 0x1f; + fep->mii_if.phy_id = fec_mii_phy_id_detect(dev); + } + + *devp = dev; + + return 0; + + err: + if (dev != NULL) { + if (fecp != NULL) + fec_whack_reset(fecp); + + if (registered) + unregister_netdev(dev); + + if (fep != NULL) { + if (fep->ring_base) + dma_free_coherent(NULL, + (fpi->tx_ring + + fpi->rx_ring) * + sizeof(cbd_t), fep->ring_base, + fep->ring_mem_addr); + } + free_netdev(dev); + } + return err; +} + +int fec_8xx_cleanup_one(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + fec_t *fecp = fep->fecp; + const struct fec_platform_info *fpi = fep->fpi; + + fec_whack_reset(fecp); + + unregister_netdev(dev); + + dma_free_coherent(NULL, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), + fep->ring_base, fep->ring_mem_addr); + + free_netdev(dev); + + return 0; +} + +/**************************************************************************************/ +/**************************************************************************************/ +/**************************************************************************************/ + +static int __init fec_8xx_init(void) +{ + return fec_8xx_platform_init(); +} + +static void __exit fec_8xx_cleanup(void) +{ + fec_8xx_platform_cleanup(); +} + +/**************************************************************************************/ +/**************************************************************************************/ +/**************************************************************************************/ + +module_init(fec_8xx_init); +module_exit(fec_8xx_cleanup); diff --git a/drivers/net/fec_8xx/fec_mii.c b/drivers/net/fec_8xx/fec_mii.c new file mode 100644 index 000000000..700233655 --- /dev/null +++ b/drivers/net/fec_8xx/fec_mii.c @@ -0,0 +1,380 @@ +/* + * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. + * + * Copyright (c) 2003 Intracom S.A. + * by Pantelis Antoniou + * + * Heavily based on original FEC driver by Dan Malek + * and modifications by Joakim Tjernlund + * + * Released under the GPL + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/*************************************************/ + +#include "fec_8xx.h" + +/*************************************************/ + +/* Make MII read/write commands for the FEC. +*/ +#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) +#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff)) +#define mk_mii_end 0 + +/*************************************************/ + +/* XXX both FECs use the MII interface of FEC1 */ +static spinlock_t fec_mii_lock = SPIN_LOCK_UNLOCKED; + +#define FEC_MII_LOOPS 10000 + +int fec_mii_read(struct net_device *dev, int phy_id, int location) +{ + struct fec_enet_private *fep = netdev_priv(dev); + fec_t *fecp; + int i, ret = -1; + unsigned long flags; + + /* XXX MII interface is only connected to FEC1 */ + fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec; + + spin_lock_irqsave(&fec_mii_lock, flags); + + if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) { + FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ + FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + FW(fecp, ievent, FEC_ENET_MII); + } + + /* Add PHY address to register command. */ + FW(fecp, mii_speed, fep->fec_phy_speed); + FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) { + FW(fecp, ievent, FEC_ENET_MII); + ret = FR(fecp, mii_data) & 0xffff; + } + + spin_unlock_irqrestore(&fec_mii_lock, flags); + + return ret; +} + +void fec_mii_write(struct net_device *dev, int phy_id, int location, int value) +{ + struct fec_enet_private *fep = netdev_priv(dev); + fec_t *fecp; + unsigned long flags; + int i; + + /* XXX MII interface is only connected to FEC1 */ + fecp = &((immap_t *) IMAP_ADDR)->im_cpm.cp_fec; + + spin_lock_irqsave(&fec_mii_lock, flags); + + if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0) { + FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ + FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); + FW(fecp, ievent, FEC_ENET_MII); + } + + /* Add PHY address to register command. */ + FW(fecp, mii_speed, fep->fec_phy_speed); /* always adapt mii speed */ + FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value)); + + for (i = 0; i < FEC_MII_LOOPS; i++) + if ((FR(fecp, ievent) & FEC_ENET_MII) != 0) + break; + + if (i < FEC_MII_LOOPS) + FW(fecp, ievent, FEC_ENET_MII); + + spin_unlock_irqrestore(&fec_mii_lock, flags); +} + +/*************************************************/ + +#ifdef CONFIG_FEC_8XX_GENERIC_PHY + +/* + * Generic PHY support. + * Should work for all PHYs, but link change is detected by polling + */ + +static void generic_timer_callback(unsigned long data) +{ + struct net_device *dev = (struct net_device *)data; + struct fec_enet_private *fep = netdev_priv(dev); + + fep->phy_timer_list.expires = jiffies + HZ / 2; + + add_timer(&fep->phy_timer_list); + + fec_mii_link_status_change_check(dev, 0); +} + +static void generic_startup(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + + fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */ + fep->phy_timer_list.data = (unsigned long)dev; + fep->phy_timer_list.function = generic_timer_callback; + add_timer(&fep->phy_timer_list); +} + +static void generic_shutdown(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + + del_timer_sync(&fep->phy_timer_list); +} + +#endif + +#ifdef CONFIG_FEC_8XX_DM9161_PHY + +/* ------------------------------------------------------------------------- */ +/* The Davicom DM9161 is used on the NETTA board */ + +/* register definitions */ + +#define MII_DM9161_ACR 16 /* Aux. Config Register */ +#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */ +#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */ +#define MII_DM9161_INTR 21 /* Interrupt Register */ +#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */ +#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */ + +static void dm9161_startup(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + + fec_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000); +} + +static void dm9161_ack_int(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + + fec_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR); +} + +static void dm9161_shutdown(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + + fec_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00); +} + +#endif + +/**********************************************************************************/ + +static const struct phy_info phy_info[] = { +#ifdef CONFIG_FEC_8XX_DM9161_PHY + { + .id = 0x00181b88, + .name = "DM9161", + .startup = dm9161_startup, + .ack_int = dm9161_ack_int, + .shutdown = dm9161_shutdown, + }, +#endif +#ifdef CONFIG_FEC_8XX_GENERIC_PHY + { + .id = 0, + .name = "GENERIC", + .startup = generic_startup, + .shutdown = generic_shutdown, + }, +#endif +}; + +/**********************************************************************************/ + +int fec_mii_phy_id_detect(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + const struct fec_platform_info *fpi = fep->fpi; + int i, r, start, end, phytype, physubtype; + const struct phy_info *phy; + int phy_hwid, phy_id; + + /* if no MDIO */ + if (fpi->use_mdio == 0) + return -1; + + phy_hwid = -1; + fep->phy = NULL; + + /* auto-detect? */ + if (fpi->phy_addr == -1) { + start = 0; + end = 32; + } else { /* direct */ + start = fpi->phy_addr; + end = start + 1; + } + + for (phy_id = start; phy_id < end; phy_id++) { + r = fec_mii_read(dev, phy_id, MII_PHYSID1); + if (r == -1 || (phytype = (r & 0xffff)) == 0xffff) + continue; + r = fec_mii_read(dev, phy_id, MII_PHYSID2); + if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff) + continue; + phy_hwid = (phytype << 16) | physubtype; + if (phy_hwid != -1) + break; + } + + if (phy_hwid == -1) { + printk(KERN_ERR DRV_MODULE_NAME + ": %s No PHY detected!\n", dev->name); + return -1; + } + + for (i = 0, phy = phy_info; i < sizeof(phy_info) / sizeof(phy_info[0]); + i++, phy++) + if (phy->id == (phy_hwid >> 4) || phy->id == 0) + break; + + if (i >= sizeof(phy_info) / sizeof(phy_info[0])) { + printk(KERN_ERR DRV_MODULE_NAME + ": %s PHY id 0x%08x is not supported!\n", + dev->name, phy_hwid); + return -1; + } + + fep->phy = phy; + + printk(KERN_INFO DRV_MODULE_NAME + ": %s Phy @ 0x%x, type %s (0x%08x)\n", + dev->name, phy_id, fep->phy->name, phy_hwid); + + return phy_id; +} + +void fec_mii_startup(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + const struct fec_platform_info *fpi = fep->fpi; + + if (!fpi->use_mdio || fep->phy == NULL) + return; + + if (fep->phy->startup == NULL) + return; + + (*fep->phy->startup) (dev); +} + +void fec_mii_shutdown(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + const struct fec_platform_info *fpi = fep->fpi; + + if (!fpi->use_mdio || fep->phy == NULL) + return; + + if (fep->phy->shutdown == NULL) + return; + + (*fep->phy->shutdown) (dev); +} + +void fec_mii_ack_int(struct net_device *dev) +{ + struct fec_enet_private *fep = netdev_priv(dev); + const struct fec_platform_info *fpi = fep->fpi; + + if (!fpi->use_mdio || fep->phy == NULL) + return; + + if (fep->phy->ack_int == NULL) + return; + + (*fep->phy->ack_int) (dev); +} + +/* helper function */ +static int mii_negotiated(struct mii_if_info *mii) +{ + int advert, lpa, val; + + if (!mii_link_ok(mii)) + return 0; + + val = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_BMSR); + if ((val & BMSR_ANEGCOMPLETE) == 0) + return 0; + + advert = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_ADVERTISE); + lpa = (*mii->mdio_read) (mii->dev, mii->phy_id, MII_LPA); + + return mii_nway_result(advert & lpa); +} + +void fec_mii_link_status_change_check(struct net_device *dev, int init_media) +{ + struct fec_enet_private *fep = netdev_priv(dev); + unsigned int media; + unsigned long flags; + + if (mii_check_media(&fep->mii_if, netif_msg_link(fep), init_media) == 0) + return; + + media = mii_negotiated(&fep->mii_if); + + if (netif_carrier_ok(dev)) { + spin_lock_irqsave(&fep->lock, flags); + fec_restart(dev, !!(media & ADVERTISE_FULL), + (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) ? + 100 : 10); + spin_unlock_irqrestore(&fep->lock, flags); + + netif_start_queue(dev); + } else { + netif_stop_queue(dev); + + spin_lock_irqsave(&fep->lock, flags); + fec_stop(dev); + spin_unlock_irqrestore(&fep->lock, flags); + + } +} diff --git a/drivers/net/ibm_emac/ibm_emac.h b/drivers/net/ibm_emac/ibm_emac.h new file mode 100644 index 000000000..5310033ad --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac.h @@ -0,0 +1,263 @@ +/* + * ibm_emac.h + * + * + * Armin Kuster akuster@mvista.com + * June, 2002 + * + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _IBM_EMAC_H_ +#define _IBM_EMAC_H_ +/* General defines needed for the driver */ + +/* Emac */ +typedef struct emac_regs { + u32 em0mr0; + u32 em0mr1; + u32 em0tmr0; + u32 em0tmr1; + u32 em0rmr; + u32 em0isr; + u32 em0iser; + u32 em0iahr; + u32 em0ialr; + u32 em0vtpid; + u32 em0vtci; + u32 em0ptr; + u32 em0iaht1; + u32 em0iaht2; + u32 em0iaht3; + u32 em0iaht4; + u32 em0gaht1; + u32 em0gaht2; + u32 em0gaht3; + u32 em0gaht4; + u32 em0lsah; + u32 em0lsal; + u32 em0ipgvr; + u32 em0stacr; + u32 em0trtr; + u32 em0rwmr; +} emac_t; + +/* MODE REG 0 */ +#define EMAC_M0_RXI 0x80000000 +#define EMAC_M0_TXI 0x40000000 +#define EMAC_M0_SRST 0x20000000 +#define EMAC_M0_TXE 0x10000000 +#define EMAC_M0_RXE 0x08000000 +#define EMAC_M0_WKE 0x04000000 + +/* MODE Reg 1 */ +#define EMAC_M1_FDE 0x80000000 +#define EMAC_M1_ILE 0x40000000 +#define EMAC_M1_VLE 0x20000000 +#define EMAC_M1_EIFC 0x10000000 +#define EMAC_M1_APP 0x08000000 +#define EMAC_M1_AEMI 0x02000000 +#define EMAC_M1_IST 0x01000000 +#define EMAC_M1_MF_1000GPCS 0x00c00000 /* Internal GPCS */ +#define EMAC_M1_MF_1000MBPS 0x00800000 /* External GPCS */ +#define EMAC_M1_MF_100MBPS 0x00400000 +#define EMAC_M1_RFS_16K 0x00280000 /* 000 for 512 byte */ +#define EMAC_M1_TR 0x00008000 +#ifdef CONFIG_IBM_EMAC4 +#define EMAC_M1_RFS_8K 0x00200000 +#define EMAC_M1_RFS_4K 0x00180000 +#define EMAC_M1_RFS_2K 0x00100000 +#define EMAC_M1_RFS_1K 0x00080000 +#define EMAC_M1_TX_FIFO_16K 0x00050000 /* 0's for 512 byte */ +#define EMAC_M1_TX_FIFO_8K 0x00040000 +#define EMAC_M1_TX_FIFO_4K 0x00030000 +#define EMAC_M1_TX_FIFO_2K 0x00020000 +#define EMAC_M1_TX_FIFO_1K 0x00010000 +#define EMAC_M1_TX_TR 0x00008000 +#define EMAC_M1_TX_MWSW 0x00001000 /* 0 wait for status */ +#define EMAC_M1_JUMBO_ENABLE 0x00000800 /* Upt to 9Kr status */ +#define EMAC_M1_OPB_CLK_66 0x00000008 /* 66Mhz */ +#define EMAC_M1_OPB_CLK_83 0x00000010 /* 83Mhz */ +#define EMAC_M1_OPB_CLK_100 0x00000018 /* 100Mhz */ +#define EMAC_M1_OPB_CLK_100P 0x00000020 /* 100Mhz+ */ +#else /* CONFIG_IBM_EMAC4 */ +#define EMAC_M1_RFS_4K 0x00300000 /* ~4k for 512 byte */ +#define EMAC_M1_RFS_2K 0x00200000 +#define EMAC_M1_RFS_1K 0x00100000 +#define EMAC_M1_TX_FIFO_2K 0x00080000 /* 0's for 512 byte */ +#define EMAC_M1_TX_FIFO_1K 0x00040000 +#define EMAC_M1_TR0_DEPEND 0x00010000 /* 0'x for single packet */ +#define EMAC_M1_TR1_DEPEND 0x00004000 +#define EMAC_M1_TR1_MULTI 0x00002000 +#define EMAC_M1_JUMBO_ENABLE 0x00001000 +#endif /* CONFIG_IBM_EMAC4 */ +#define EMAC_M1_BASE (EMAC_M1_TX_FIFO_2K | \ + EMAC_M1_APP | \ + EMAC_M1_TR) + +/* Transmit Mode Register 0 */ +#define EMAC_TMR0_GNP0 0x80000000 +#define EMAC_TMR0_GNP1 0x40000000 +#define EMAC_TMR0_GNPD 0x20000000 +#define EMAC_TMR0_FC 0x10000000 +#define EMAC_TMR0_TFAE_2_32 0x00000001 +#define EMAC_TMR0_TFAE_4_64 0x00000002 +#define EMAC_TMR0_TFAE_8_128 0x00000003 +#define EMAC_TMR0_TFAE_16_256 0x00000004 +#define EMAC_TMR0_TFAE_32_512 0x00000005 +#define EMAC_TMR0_TFAE_64_1024 0x00000006 +#define EMAC_TMR0_TFAE_128_2048 0x00000007 + +/* Receive Mode Register */ +#define EMAC_RMR_SP 0x80000000 +#define EMAC_RMR_SFCS 0x40000000 +#define EMAC_RMR_ARRP 0x20000000 +#define EMAC_RMR_ARP 0x10000000 +#define EMAC_RMR_AROP 0x08000000 +#define EMAC_RMR_ARPI 0x04000000 +#define EMAC_RMR_PPP 0x02000000 +#define EMAC_RMR_PME 0x01000000 +#define EMAC_RMR_PMME 0x00800000 +#define EMAC_RMR_IAE 0x00400000 +#define EMAC_RMR_MIAE 0x00200000 +#define EMAC_RMR_BAE 0x00100000 +#define EMAC_RMR_MAE 0x00080000 +#define EMAC_RMR_RFAF_2_32 0x00000001 +#define EMAC_RMR_RFAF_4_64 0x00000002 +#define EMAC_RMR_RFAF_8_128 0x00000003 +#define EMAC_RMR_RFAF_16_256 0x00000004 +#define EMAC_RMR_RFAF_32_512 0x00000005 +#define EMAC_RMR_RFAF_64_1024 0x00000006 +#define EMAC_RMR_RFAF_128_2048 0x00000007 +#define EMAC_RMR_BASE (EMAC_RMR_IAE | EMAC_RMR_BAE) + +/* Interrupt Status & enable Regs */ +#define EMAC_ISR_OVR 0x02000000 +#define EMAC_ISR_PP 0x01000000 +#define EMAC_ISR_BP 0x00800000 +#define EMAC_ISR_RP 0x00400000 +#define EMAC_ISR_SE 0x00200000 +#define EMAC_ISR_ALE 0x00100000 +#define EMAC_ISR_BFCS 0x00080000 +#define EMAC_ISR_PTLE 0x00040000 +#define EMAC_ISR_ORE 0x00020000 +#define EMAC_ISR_IRE 0x00010000 +#define EMAC_ISR_DBDM 0x00000200 +#define EMAC_ISR_DB0 0x00000100 +#define EMAC_ISR_SE0 0x00000080 +#define EMAC_ISR_TE0 0x00000040 +#define EMAC_ISR_DB1 0x00000020 +#define EMAC_ISR_SE1 0x00000010 +#define EMAC_ISR_TE1 0x00000008 +#define EMAC_ISR_MOS 0x00000002 +#define EMAC_ISR_MOF 0x00000001 + +/* STA CONTROL REG */ +#define EMAC_STACR_OC 0x00008000 +#define EMAC_STACR_PHYE 0x00004000 +#define EMAC_STACR_WRITE 0x00002000 +#define EMAC_STACR_READ 0x00001000 +#define EMAC_STACR_CLK_83MHZ 0x00000800 /* 0's for 50Mhz */ +#define EMAC_STACR_CLK_66MHZ 0x00000400 +#define EMAC_STACR_CLK_100MHZ 0x00000C00 + +/* Transmit Request Threshold Register */ +#define EMAC_TRTR_1600 0x18000000 /* 0's for 64 Bytes */ +#define EMAC_TRTR_1024 0x0f000000 +#define EMAC_TRTR_512 0x07000000 +#define EMAC_TRTR_256 0x03000000 +#define EMAC_TRTR_192 0x10000000 +#define EMAC_TRTR_128 0x01000000 + +#define EMAC_TX_CTRL_GFCS 0x0200 +#define EMAC_TX_CTRL_GP 0x0100 +#define EMAC_TX_CTRL_ISA 0x0080 +#define EMAC_TX_CTRL_RSA 0x0040 +#define EMAC_TX_CTRL_IVT 0x0020 +#define EMAC_TX_CTRL_RVT 0x0010 +#define EMAC_TX_CTRL_TAH_CSUM 0x000e /* TAH only */ +#define EMAC_TX_CTRL_TAH_SEG4 0x000a /* TAH only */ +#define EMAC_TX_CTRL_TAH_SEG3 0x0008 /* TAH only */ +#define EMAC_TX_CTRL_TAH_SEG2 0x0006 /* TAH only */ +#define EMAC_TX_CTRL_TAH_SEG1 0x0004 /* TAH only */ +#define EMAC_TX_CTRL_TAH_SEG0 0x0002 /* TAH only */ +#define EMAC_TX_CTRL_TAH_DIS 0x0000 /* TAH only */ + +#define EMAC_TX_CTRL_DFLT ( \ + MAL_TX_CTRL_INTR | EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP ) + +/* madmal transmit status / Control bits */ +#define EMAC_TX_ST_BFCS 0x0200 +#define EMAC_TX_ST_BPP 0x0100 +#define EMAC_TX_ST_LCS 0x0080 +#define EMAC_TX_ST_ED 0x0040 +#define EMAC_TX_ST_EC 0x0020 +#define EMAC_TX_ST_LC 0x0010 +#define EMAC_TX_ST_MC 0x0008 +#define EMAC_TX_ST_SC 0x0004 +#define EMAC_TX_ST_UR 0x0002 +#define EMAC_TX_ST_SQE 0x0001 + +/* madmal receive status / Control bits */ +#define EMAC_RX_ST_OE 0x0200 +#define EMAC_RX_ST_PP 0x0100 +#define EMAC_RX_ST_BP 0x0080 +#define EMAC_RX_ST_RP 0x0040 +#define EMAC_RX_ST_SE 0x0020 +#define EMAC_RX_ST_AE 0x0010 +#define EMAC_RX_ST_BFCS 0x0008 +#define EMAC_RX_ST_PTL 0x0004 +#define EMAC_RX_ST_ORE 0x0002 +#define EMAC_RX_ST_IRE 0x0001 +#define EMAC_BAD_RX_PACKET 0x02ff +#define EMAC_CSUM_VER_ERROR 0x0003 + +/* identify a bad rx packet dependent on emac features */ +#ifdef CONFIG_IBM_EMAC4 +#define EMAC_IS_BAD_RX_PACKET(desc) \ + (((desc & (EMAC_BAD_RX_PACKET & ~EMAC_CSUM_VER_ERROR)) || \ + ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_ORE) || \ + ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_IRE))) +#else +#define EMAC_IS_BAD_RX_PACKET(desc) \ + (desc & EMAC_BAD_RX_PACKET) +#endif + +/* Revision specific EMAC register defaults */ +#ifdef CONFIG_IBM_EMAC4 +#define EMAC_M1_DEFAULT (EMAC_M1_BASE | \ + EMAC_M1_OPB_CLK_83 | \ + EMAC_M1_TX_MWSW) +#define EMAC_RMR_DEFAULT (EMAC_RMR_BASE | \ + EMAC_RMR_RFAF_128_2048) +#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP0 | \ + EMAC_TMR0_TFAE_128_2048) +#define EMAC_TRTR_DEFAULT EMAC_TRTR_1024 +#else /* !CONFIG_IBM_EMAC4 */ +#define EMAC_M1_DEFAULT EMAC_M1_BASE +#define EMAC_RMR_DEFAULT EMAC_RMR_BASE +#define EMAC_TMR0_XMIT EMAC_TMR0_GNP0 +#define EMAC_TRTR_DEFAULT EMAC_TRTR_1600 +#endif /* CONFIG_IBM_EMAC4 */ + +/* SoC implementation specific EMAC register defaults */ +#if defined(CONFIG_440GP) +#define EMAC_RWMR_DEFAULT 0x80009000 +#define EMAC_TMR0_DEFAULT 0x00000000 +#define EMAC_TMR1_DEFAULT 0xf8640000 +#elif defined(CONFIG_440GX) +#define EMAC_RWMR_DEFAULT 0x1000a200 +#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_128_2048 +#define EMAC_TMR1_DEFAULT 0x88810000 +#else +#define EMAC_RWMR_DEFAULT 0x0f002000 +#define EMAC_TMR0_DEFAULT 0x00000000 +#define EMAC_TMR1_DEFAULT 0x380f0000 +#endif /* CONFIG_440GP */ + +#endif diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h new file mode 100644 index 000000000..691ce4e5c --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_core.h @@ -0,0 +1,146 @@ +/* + * ibm_emac_core.h + * + * Ethernet driver for the built in ethernet on the IBM 405 PowerPC + * processor. + * + * Armin Kuster akuster@mvista.com + * Sept, 2001 + * + * Orignial driver + * Johnnie Peters + * jpeters@mvista.com + * + * Copyright 2000 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _IBM_EMAC_CORE_H_ +#define _IBM_EMAC_CORE_H_ + +#include +#include +#include /* For phys_addr_t */ + +#include "ibm_emac.h" +#include "ibm_emac_phy.h" +#include "ibm_emac_rgmii.h" +#include "ibm_emac_zmii.h" +#include "ibm_emac_mal.h" +#include "ibm_emac_tah.h" + +#ifndef CONFIG_IBM_EMAC_TXB +#define NUM_TX_BUFF 64 +#define NUM_RX_BUFF 64 +#else +#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB +#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB +#endif + +/* This does 16 byte alignment, exactly what we need. + * The packet length includes FCS, but we don't want to + * include that when passing upstream as it messes up + * bridging applications. + */ +#ifndef CONFIG_IBM_EMAC_SKBRES +#define SKB_RES 2 +#else +#define SKB_RES CONFIG_IBM_EMAC_SKBRES +#endif + +/* Note about alignement. alloc_skb() returns a cache line + * aligned buffer. However, dev_alloc_skb() will add 16 more + * bytes and "reserve" them, so our buffer will actually end + * on a half cache line. What we do is to use directly + * alloc_skb, allocate 16 more bytes to match the total amount + * allocated by dev_alloc_skb(), but we don't reserve. + */ +#define MAX_NUM_BUF_DESC 255 +#define DESC_BUF_SIZE 4080 /* max 4096-16 */ +#define DESC_BUF_SIZE_REG (DESC_BUF_SIZE / 16) + +/* Transmitter timeout. */ +#define TX_TIMEOUT (2*HZ) + +/* MDIO latency delay */ +#define MDIO_DELAY 50 + +/* Power managment shift registers */ +#define IBM_CPM_EMMII 0 /* Shift value for MII */ +#define IBM_CPM_EMRX 1 /* Shift value for recv */ +#define IBM_CPM_EMTX 2 /* Shift value for MAC */ +#define IBM_CPM_EMAC(x) (((x)>>IBM_CPM_EMMII) | ((x)>>IBM_CPM_EMRX) | ((x)>>IBM_CPM_EMTX)) + +#define ENET_HEADER_SIZE 14 +#define ENET_FCS_SIZE 4 +#define ENET_DEF_MTU_SIZE 1500 +#define ENET_DEF_BUF_SIZE (ENET_DEF_MTU_SIZE + ENET_HEADER_SIZE + ENET_FCS_SIZE) +#define EMAC_MIN_FRAME 64 +#define EMAC_MAX_FRAME 9018 +#define EMAC_MIN_MTU (EMAC_MIN_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE) +#define EMAC_MAX_MTU (EMAC_MAX_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE) + +#ifdef CONFIG_IBM_EMAC_ERRMSG +void emac_serr_dump_0(struct net_device *dev); +void emac_serr_dump_1(struct net_device *dev); +void emac_err_dump(struct net_device *dev, int em0isr); +void emac_phy_dump(struct net_device *); +void emac_desc_dump(struct net_device *); +void emac_mac_dump(struct net_device *); +void emac_mal_dump(struct net_device *); +#else +#define emac_serr_dump_0(dev) do { } while (0) +#define emac_serr_dump_1(dev) do { } while (0) +#define emac_err_dump(dev,x) do { } while (0) +#define emac_phy_dump(dev) do { } while (0) +#define emac_desc_dump(dev) do { } while (0) +#define emac_mac_dump(dev) do { } while (0) +#define emac_mal_dump(dev) do { } while (0) +#endif + +struct ocp_enet_private { + struct sk_buff *tx_skb[NUM_TX_BUFF]; + struct sk_buff *rx_skb[NUM_RX_BUFF]; + struct mal_descriptor *tx_desc; + struct mal_descriptor *rx_desc; + struct mal_descriptor *rx_dirty; + struct net_device_stats stats; + int tx_cnt; + int rx_slot; + int dirty_rx; + int tx_slot; + int ack_slot; + int rx_buffer_size; + + struct mii_phy phy_mii; + int mii_phy_addr; + int want_autoneg; + int timer_ticks; + struct timer_list link_timer; + struct net_device *mdio_dev; + + struct ocp_device *rgmii_dev; + int rgmii_input; + + struct ocp_device *zmii_dev; + int zmii_input; + + struct ibm_ocp_mal *mal; + int mal_tx_chan, mal_rx_chan; + struct mal_commac commac; + + struct ocp_device *tah_dev; + + int opened; + int going_away; + int wol_irq; + emac_t *emacp; + struct ocp_device *ocpdev; + struct net_device *ndev; + spinlock_t lock; +}; +#endif /* _IBM_EMAC_CORE_H_ */ diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c new file mode 100644 index 000000000..c8512046c --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_debug.c @@ -0,0 +1,224 @@ +/* + * ibm_ocp_debug.c + * + * This has all the debug routines that where in *_enet.c + * + * Armin Kuster akuster@mvista.com + * April , 2002 + * + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include "ibm_ocp_mal.h" +#include "ibm_ocp_zmii.h" +#include "ibm_ocp_enet.h" + +extern int emac_phy_read(struct net_device *dev, int mii_id, int reg); + +void emac_phy_dump(struct net_device *dev) +{ + struct ocp_enet_private *fep = dev->priv; + unsigned long i; + uint data; + + printk(KERN_DEBUG " Prepare for Phy dump....\n"); + for (i = 0; i < 0x1A; i++) { + data = emac_phy_read(dev, fep->mii_phy_addr, i); + printk(KERN_DEBUG "Phy reg 0x%lx ==> %4x\n", i, data); + if (i == 0x07) + i = 0x0f; + } +} + +void emac_desc_dump(struct net_device *dev) +{ + struct ocp_enet_private *fep = dev->priv; + int curr_slot; + + printk(KERN_DEBUG + "dumping the receive descriptors: current slot is %d\n", + fep->rx_slot); + for (curr_slot = 0; curr_slot < NUM_RX_BUFF; curr_slot++) { + printk(KERN_DEBUG + "Desc %02d: status 0x%04x, length %3d, addr 0x%x\n", + curr_slot, fep->rx_desc[curr_slot].ctrl, + fep->rx_desc[curr_slot].data_len, + (unsigned int)fep->rx_desc[curr_slot].data_ptr); + } +} + +void emac_mac_dump(struct net_device *dev) +{ + struct ocp_enet_private *fep = dev->priv; + volatile emac_t *emacp = fep->emacp; + + printk(KERN_DEBUG "EMAC DEBUG ********** \n"); + printk(KERN_DEBUG "EMAC_M0 ==> 0x%x\n", in_be32(&emacp->em0mr0)); + printk(KERN_DEBUG "EMAC_M1 ==> 0x%x\n", in_be32(&emacp->em0mr1)); + printk(KERN_DEBUG "EMAC_TXM0==> 0x%x\n", in_be32(&emacp->em0tmr0)); + printk(KERN_DEBUG "EMAC_TXM1==> 0x%x\n", in_be32(&emacp->em0tmr1)); + printk(KERN_DEBUG "EMAC_RXM ==> 0x%x\n", in_be32(&emacp->em0rmr)); + printk(KERN_DEBUG "EMAC_ISR ==> 0x%x\n", in_be32(&emacp->em0isr)); + printk(KERN_DEBUG "EMAC_IER ==> 0x%x\n", in_be32(&emacp->em0iser)); + printk(KERN_DEBUG "EMAC_IAH ==> 0x%x\n", in_be32(&emacp->em0iahr)); + printk(KERN_DEBUG "EMAC_IAL ==> 0x%x\n", in_be32(&emacp->em0ialr)); + printk(KERN_DEBUG "EMAC_VLAN_TPID_REG ==> 0x%x\n", + in_be32(&emacp->em0vtpid)); +} + +void emac_mal_dump(struct net_device *dev) +{ + struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; + + printk(KERN_DEBUG " MAL DEBUG ********** \n"); + printk(KERN_DEBUG " MCR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALCR)); + printk(KERN_DEBUG " ESR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALESR)); + printk(KERN_DEBUG " IER ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALIER)); +#ifdef CONFIG_40x + printk(KERN_DEBUG " DBR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALDBR)); +#endif /* CONFIG_40x */ + printk(KERN_DEBUG " TXCASR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCASR)); + printk(KERN_DEBUG " TXCARR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCARR)); + printk(KERN_DEBUG " TXEOBISR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXEOBISR)); + printk(KERN_DEBUG " TXDEIR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXDEIR)); + printk(KERN_DEBUG " RXCASR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCASR)); + printk(KERN_DEBUG " RXCARR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCARR)); + printk(KERN_DEBUG " RXEOBISR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXEOBISR)); + printk(KERN_DEBUG " RXDEIR ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXDEIR)); + printk(KERN_DEBUG " TXCTP0R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP0R)); + printk(KERN_DEBUG " TXCTP1R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP1R)); + printk(KERN_DEBUG " TXCTP2R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP2R)); + printk(KERN_DEBUG " TXCTP3R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP3R)); + printk(KERN_DEBUG " RXCTP0R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP0R)); + printk(KERN_DEBUG " RXCTP1R ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP1R)); + printk(KERN_DEBUG " RCBS0 ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS0)); + printk(KERN_DEBUG " RCBS1 ==> 0x%x\n", + (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS1)); +} + +void emac_serr_dump_0(struct net_device *dev) +{ + struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; + unsigned long int mal_error, plb_error, plb_addr; + + mal_error = get_mal_dcrn(mal, DCRN_MALESR); + printk(KERN_DEBUG "ppc405_eth_serr: %s channel %ld \n", + (mal_error & 0x40000000) ? "Receive" : + "Transmit", (mal_error & 0x3e000000) >> 25); + printk(KERN_DEBUG " ----- latched error -----\n"); + if (mal_error & MALESR_DE) + printk(KERN_DEBUG " DE: descriptor error\n"); + if (mal_error & MALESR_OEN) + printk(KERN_DEBUG " ONE: OPB non-fullword error\n"); + if (mal_error & MALESR_OTE) + printk(KERN_DEBUG " OTE: OPB timeout error\n"); + if (mal_error & MALESR_OSE) + printk(KERN_DEBUG " OSE: OPB slave error\n"); + + if (mal_error & MALESR_PEIN) { + plb_error = mfdcr(DCRN_PLB0_BESR); + printk(KERN_DEBUG + " PEIN: PLB error, PLB0_BESR is 0x%x\n", + (unsigned int)plb_error); + plb_addr = mfdcr(DCRN_PLB0_BEAR); + printk(KERN_DEBUG + " PEIN: PLB error, PLB0_BEAR is 0x%x\n", + (unsigned int)plb_addr); + } +} + +void emac_serr_dump_1(struct net_device *dev) +{ + struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; + int mal_error = get_mal_dcrn(mal, DCRN_MALESR); + + printk(KERN_DEBUG " ----- cumulative errors -----\n"); + if (mal_error & MALESR_DEI) + printk(KERN_DEBUG " DEI: descriptor error interrupt\n"); + if (mal_error & MALESR_ONEI) + printk(KERN_DEBUG " OPB non-fullword error interrupt\n"); + if (mal_error & MALESR_OTEI) + printk(KERN_DEBUG " OTEI: timeout error interrupt\n"); + if (mal_error & MALESR_OSEI) + printk(KERN_DEBUG " OSEI: slave error interrupt\n"); + if (mal_error & MALESR_PBEI) + printk(KERN_DEBUG " PBEI: PLB bus error interrupt\n"); +} + +void emac_err_dump(struct net_device *dev, int em0isr) +{ + printk(KERN_DEBUG "%s: on-chip ethernet error:\n", dev->name); + + if (em0isr & EMAC_ISR_OVR) + printk(KERN_DEBUG " OVR: overrun\n"); + if (em0isr & EMAC_ISR_PP) + printk(KERN_DEBUG " PP: control pause packet\n"); + if (em0isr & EMAC_ISR_BP) + printk(KERN_DEBUG " BP: packet error\n"); + if (em0isr & EMAC_ISR_RP) + printk(KERN_DEBUG " RP: runt packet\n"); + if (em0isr & EMAC_ISR_SE) + printk(KERN_DEBUG " SE: short event\n"); + if (em0isr & EMAC_ISR_ALE) + printk(KERN_DEBUG " ALE: odd number of nibbles in packet\n"); + if (em0isr & EMAC_ISR_BFCS) + printk(KERN_DEBUG " BFCS: bad FCS\n"); + if (em0isr & EMAC_ISR_PTLE) + printk(KERN_DEBUG " PTLE: oversized packet\n"); + if (em0isr & EMAC_ISR_ORE) + printk(KERN_DEBUG + " ORE: packet length field > max allowed LLC\n"); + if (em0isr & EMAC_ISR_IRE) + printk(KERN_DEBUG " IRE: In Range error\n"); + if (em0isr & EMAC_ISR_DBDM) + printk(KERN_DEBUG " DBDM: xmit error or SQE\n"); + if (em0isr & EMAC_ISR_DB0) + printk(KERN_DEBUG " DB0: xmit error or SQE on TX channel 0\n"); + if (em0isr & EMAC_ISR_SE0) + printk(KERN_DEBUG + " SE0: Signal Quality Error test failure from TX channel 0\n"); + if (em0isr & EMAC_ISR_TE0) + printk(KERN_DEBUG " TE0: xmit channel 0 aborted\n"); + if (em0isr & EMAC_ISR_DB1) + printk(KERN_DEBUG " DB1: xmit error or SQE on TX channel \n"); + if (em0isr & EMAC_ISR_SE1) + printk(KERN_DEBUG + " SE1: Signal Quality Error test failure from TX channel 1\n"); + if (em0isr & EMAC_ISR_TE1) + printk(KERN_DEBUG " TE1: xmit channel 1 aborted\n"); + if (em0isr & EMAC_ISR_MOS) + printk(KERN_DEBUG " MOS\n"); + if (em0isr & EMAC_ISR_MOF) + printk(KERN_DEBUG " MOF\n"); + + emac_mac_dump(dev); + emac_mal_dump(dev); +} diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c new file mode 100644 index 000000000..02d847cfa --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_mal.c @@ -0,0 +1,467 @@ +/* + * ibm_ocp_mal.c + * + * Armin Kuster akuster@mvista.com + * Juen, 2002 + * + * Copyright 2002 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ibm_emac_mal.h" + +// Locking: Should we share a lock with the client ? The client could provide +// a lock pointer (optionally) in the commac structure... I don't think this is +// really necessary though + +/* This lock protects the commac list. On today UP implementations, it's + * really only used as IRQ protection in mal_{register,unregister}_commac() + */ +static rwlock_t mal_list_lock = RW_LOCK_UNLOCKED; + +int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) +{ + unsigned long flags; + + write_lock_irqsave(&mal_list_lock, flags); + + /* Don't let multiple commacs claim the same channel */ + if ((mal->tx_chan_mask & commac->tx_chan_mask) || + (mal->rx_chan_mask & commac->rx_chan_mask)) { + write_unlock_irqrestore(&mal_list_lock, flags); + return -EBUSY; + } + + mal->tx_chan_mask |= commac->tx_chan_mask; + mal->rx_chan_mask |= commac->rx_chan_mask; + + list_add(&commac->list, &mal->commac); + + write_unlock_irqrestore(&mal_list_lock, flags); + + MOD_INC_USE_COUNT; + + return 0; +} + +int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) +{ + unsigned long flags; + + write_lock_irqsave(&mal_list_lock, flags); + + mal->tx_chan_mask &= ~commac->tx_chan_mask; + mal->rx_chan_mask &= ~commac->rx_chan_mask; + + list_del_init(&commac->list); + + write_unlock_irqrestore(&mal_list_lock, flags); + + MOD_DEC_USE_COUNT; + + return 0; +} + +int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size) +{ + switch (channel) { + case 0: + set_mal_dcrn(mal, DCRN_MALRCBS0, size); + break; +#ifdef DCRN_MALRCBS1 + case 1: + set_mal_dcrn(mal, DCRN_MALRCBS1, size); + break; +#endif +#ifdef DCRN_MALRCBS2 + case 2: + set_mal_dcrn(mal, DCRN_MALRCBS2, size); + break; +#endif +#ifdef DCRN_MALRCBS3 + case 3: + set_mal_dcrn(mal, DCRN_MALRCBS3, size); + break; +#endif + default: + return -EINVAL; + } + + return 0; +} + +static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ibm_ocp_mal *mal = dev_instance; + unsigned long mal_error; + + /* + * This SERR applies to one of the devices on the MAL, here we charge + * it against the first EMAC registered for the MAL. + */ + + mal_error = get_mal_dcrn(mal, DCRN_MALESR); + + printk(KERN_ERR "%s: System Error (MALESR=%lx)\n", + "MAL" /* FIXME: get the name right */ , mal_error); + + /* FIXME: decipher error */ + /* DIXME: distribute to commacs, if possible */ + + /* Clear the error status register */ + set_mal_dcrn(mal, DCRN_MALESR, mal_error); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ibm_ocp_mal *mal = dev_instance; + struct list_head *l; + unsigned long isr; + + isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR); + set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr); + + read_lock(&mal_list_lock); + list_for_each(l, &mal->commac) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + + if (isr & mc->tx_chan_mask) { + mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask); + } + } + read_unlock(&mal_list_lock); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ibm_ocp_mal *mal = dev_instance; + struct list_head *l; + unsigned long isr; + + isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR); + set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr); + + read_lock(&mal_list_lock); + list_for_each(l, &mal->commac) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + + if (isr & mc->rx_chan_mask) { + mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask); + } + } + read_unlock(&mal_list_lock); + + return IRQ_HANDLED; +} + +static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ibm_ocp_mal *mal = dev_instance; + struct list_head *l; + unsigned long deir; + + deir = get_mal_dcrn(mal, DCRN_MALTXDEIR); + + /* FIXME: print which MAL correctly */ + printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n", + "MAL", deir); + + read_lock(&mal_list_lock); + list_for_each(l, &mal->commac) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + + if (deir & mc->tx_chan_mask) { + mc->ops->txde(mc->dev, deir & mc->tx_chan_mask); + } + } + read_unlock(&mal_list_lock); + + return IRQ_HANDLED; +} + +/* + * This interrupt should be very rare at best. This occurs when + * the hardware has a problem with the receive descriptors. The manual + * states that it occurs when the hardware cannot the receive descriptor + * empty bit is not set. The recovery mechanism will be to + * traverse through the descriptors, handle any that are marked to be + * handled and reinitialize each along the way. At that point the driver + * will be restarted. + */ +static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ibm_ocp_mal *mal = dev_instance; + struct list_head *l; + unsigned long deir; + + deir = get_mal_dcrn(mal, DCRN_MALRXDEIR); + + /* + * This really is needed. This case encountered in stress testing. + */ + if (deir == 0) + return IRQ_HANDLED; + + /* FIXME: print which MAL correctly */ + printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n", + "MAL", deir); + + read_lock(&mal_list_lock); + list_for_each(l, &mal->commac) { + struct mal_commac *mc = list_entry(l, struct mal_commac, list); + + if (deir & mc->rx_chan_mask) { + mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask); + } + } + read_unlock(&mal_list_lock); + + return IRQ_HANDLED; +} + +static int __init mal_probe(struct ocp_device *ocpdev) +{ + struct ibm_ocp_mal *mal = NULL; + struct ocp_func_mal_data *maldata; + int err = 0; + + maldata = (struct ocp_func_mal_data *)ocpdev->def->additions; + if (maldata == NULL) { + printk(KERN_ERR "mal%d: Missing additional datas !\n", + ocpdev->def->index); + return -ENODEV; + } + + mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL); + if (mal == NULL) { + printk(KERN_ERR + "mal%d: Out of memory allocating MAL structure !\n", + ocpdev->def->index); + return -ENOMEM; + } + memset(mal, 0, sizeof(*mal)); + + switch (ocpdev->def->index) { + case 0: + mal->dcrbase = DCRN_MAL_BASE; + break; +#ifdef DCRN_MAL1_BASE + case 1: + mal->dcrbase = DCRN_MAL1_BASE; + break; +#endif + default: + BUG(); + } + + /**************************/ + + INIT_LIST_HEAD(&mal->commac); + + set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF); + set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF); + + set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR); /* 384 */ + /* FIXME: Add delay */ + + /* Set the MAL configuration register */ + set_mal_dcrn(mal, DCRN_MALCR, + MALCR_PLBB | MALCR_OPBBL | MALCR_LEA | + MALCR_PLBLT_DEFAULT); + + /* It would be nice to allocate buffers separately for each + * channel, but we can't because the channels share the upper + * 13 bits of address lines. Each channels buffer must also + * be 4k aligned, so we allocate 4k for each channel. This is + * inefficient FIXME: do better, if possible */ + mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev, + MAL_DT_ALIGN * + maldata->num_tx_chans, + &mal->tx_phys_addr, GFP_KERNEL); + if (mal->tx_virt_addr == NULL) { + printk(KERN_ERR + "mal%d: Out of memory allocating MAL descriptors !\n", + ocpdev->def->index); + err = -ENOMEM; + goto fail; + } + + /* God, oh, god, I hate DCRs */ + set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr); +#ifdef DCRN_MALTXCTP1R + if (maldata->num_tx_chans > 1) + set_mal_dcrn(mal, DCRN_MALTXCTP1R, + mal->tx_phys_addr + MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP1R */ +#ifdef DCRN_MALTXCTP2R + if (maldata->num_tx_chans > 2) + set_mal_dcrn(mal, DCRN_MALTXCTP2R, + mal->tx_phys_addr + 2 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP2R */ +#ifdef DCRN_MALTXCTP3R + if (maldata->num_tx_chans > 3) + set_mal_dcrn(mal, DCRN_MALTXCTP3R, + mal->tx_phys_addr + 3 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP3R */ +#ifdef DCRN_MALTXCTP4R + if (maldata->num_tx_chans > 4) + set_mal_dcrn(mal, DCRN_MALTXCTP4R, + mal->tx_phys_addr + 4 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP4R */ +#ifdef DCRN_MALTXCTP5R + if (maldata->num_tx_chans > 5) + set_mal_dcrn(mal, DCRN_MALTXCTP5R, + mal->tx_phys_addr + 5 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP5R */ +#ifdef DCRN_MALTXCTP6R + if (maldata->num_tx_chans > 6) + set_mal_dcrn(mal, DCRN_MALTXCTP6R, + mal->tx_phys_addr + 6 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP6R */ +#ifdef DCRN_MALTXCTP7R + if (maldata->num_tx_chans > 7) + set_mal_dcrn(mal, DCRN_MALTXCTP7R, + mal->tx_phys_addr + 7 * MAL_DT_ALIGN); +#endif /* DCRN_MALTXCTP7R */ + + mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev, + MAL_DT_ALIGN * + maldata->num_rx_chans, + &mal->rx_phys_addr, GFP_KERNEL); + + set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr); +#ifdef DCRN_MALRXCTP1R + if (maldata->num_rx_chans > 1) + set_mal_dcrn(mal, DCRN_MALRXCTP1R, + mal->rx_phys_addr + MAL_DT_ALIGN); +#endif /* DCRN_MALRXCTP1R */ +#ifdef DCRN_MALRXCTP2R + if (maldata->num_rx_chans > 2) + set_mal_dcrn(mal, DCRN_MALRXCTP2R, + mal->rx_phys_addr + 2 * MAL_DT_ALIGN); +#endif /* DCRN_MALRXCTP2R */ +#ifdef DCRN_MALRXCTP3R + if (maldata->num_rx_chans > 3) + set_mal_dcrn(mal, DCRN_MALRXCTP3R, + mal->rx_phys_addr + 3 * MAL_DT_ALIGN); +#endif /* DCRN_MALRXCTP3R */ + + err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal); + if (err) + goto fail; + err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal); + if (err) + goto fail; + err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); + if (err) + goto fail; + err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); + if (err) + goto fail; + err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); + if (err) + goto fail; + + set_mal_dcrn(mal, DCRN_MALIER, + MALIER_DE | MALIER_NE | MALIER_TE | + MALIER_OPBE | MALIER_PLBE); + + /* Advertise me to the rest of the world */ + ocp_set_drvdata(ocpdev, mal); + + printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n", + ocpdev->def->index, maldata->num_tx_chans, + maldata->num_rx_chans); + + return 0; + + fail: + /* FIXME: dispose requested IRQs ! */ + if (err && mal) + kfree(mal); + return err; +} + +static void __exit mal_remove(struct ocp_device *ocpdev) +{ + struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev); + struct ocp_func_mal_data *maldata = ocpdev->def->additions; + + BUG_ON(!maldata); + + ocp_set_drvdata(ocpdev, NULL); + + /* FIXME: shut down the MAL, deal with dependency with emac */ + free_irq(maldata->serr_irq, mal); + free_irq(maldata->txde_irq, mal); + free_irq(maldata->txeob_irq, mal); + free_irq(maldata->rxde_irq, mal); + free_irq(maldata->rxeob_irq, mal); + + if (mal->tx_virt_addr) + dma_free_coherent(&ocpdev->dev, + MAL_DT_ALIGN * maldata->num_tx_chans, + mal->tx_virt_addr, mal->tx_phys_addr); + + if (mal->rx_virt_addr) + dma_free_coherent(&ocpdev->dev, + MAL_DT_ALIGN * maldata->num_rx_chans, + mal->rx_virt_addr, mal->rx_phys_addr); + + kfree(mal); +} + +/* Structure for a device driver */ +static struct ocp_device_id mal_ids[] = { + {.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL}, + {.vendor = OCP_VENDOR_INVALID} +}; + +static struct ocp_driver mal_driver = { + .name = "mal", + .id_table = mal_ids, + + .probe = mal_probe, + .remove = mal_remove, +}; + +static int __init init_mals(void) +{ + int rc; + + rc = ocp_register_driver(&mal_driver); + if (rc < 0) { + ocp_unregister_driver(&mal_driver); + return -ENODEV; + } + + return 0; +} + +static void __exit exit_mals(void) +{ + ocp_unregister_driver(&mal_driver); +} + +module_init(init_mals); +module_exit(exit_mals); diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h new file mode 100644 index 000000000..8e456ce5a --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_mal.h @@ -0,0 +1,130 @@ +#ifndef _IBM_EMAC_MAL_H +#define _IBM_EMAC_MAL_H + +#include + +#define MAL_DT_ALIGN (4096) /* Alignment for each channel's descriptor table */ + +#define MAL_CHAN_MASK(chan) (0x80000000 >> (chan)) + +/* MAL Buffer Descriptor structure */ +struct mal_descriptor { + unsigned short ctrl; /* MAL / Commac status control bits */ + short data_len; /* Max length is 4K-1 (12 bits) */ + unsigned char *data_ptr; /* pointer to actual data buffer */ +} __attribute__ ((packed)); + +/* the following defines are for the MadMAL status and control registers. */ +/* MADMAL transmit and receive status/control bits */ +#define MAL_RX_CTRL_EMPTY 0x8000 +#define MAL_RX_CTRL_WRAP 0x4000 +#define MAL_RX_CTRL_CM 0x2000 +#define MAL_RX_CTRL_LAST 0x1000 +#define MAL_RX_CTRL_FIRST 0x0800 +#define MAL_RX_CTRL_INTR 0x0400 + +#define MAL_TX_CTRL_READY 0x8000 +#define MAL_TX_CTRL_WRAP 0x4000 +#define MAL_TX_CTRL_CM 0x2000 +#define MAL_TX_CTRL_LAST 0x1000 +#define MAL_TX_CTRL_INTR 0x0400 + +struct mal_commac_ops { + void (*txeob) (void *dev, u32 chanmask); + void (*txde) (void *dev, u32 chanmask); + void (*rxeob) (void *dev, u32 chanmask); + void (*rxde) (void *dev, u32 chanmask); +}; + +struct mal_commac { + struct mal_commac_ops *ops; + void *dev; + u32 tx_chan_mask, rx_chan_mask; + struct list_head list; +}; + +struct ibm_ocp_mal { + int dcrbase; + + struct list_head commac; + u32 tx_chan_mask, rx_chan_mask; + + dma_addr_t tx_phys_addr; + struct mal_descriptor *tx_virt_addr; + + dma_addr_t rx_phys_addr; + struct mal_descriptor *rx_virt_addr; +}; + +#define GET_MAL_STANZA(base,dcrn) \ + case base: \ + x = mfdcr(dcrn(base)); \ + break; + +#define SET_MAL_STANZA(base,dcrn, val) \ + case base: \ + mtdcr(dcrn(base), (val)); \ + break; + +#define GET_MAL0_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL_BASE,dcrn) +#define SET_MAL0_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL_BASE,dcrn,val) + +#ifdef DCRN_MAL1_BASE +#define GET_MAL1_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL1_BASE,dcrn) +#define SET_MAL1_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL1_BASE,dcrn,val) +#else /* ! DCRN_MAL1_BASE */ +#define GET_MAL1_STANZA(dcrn) +#define SET_MAL1_STANZA(dcrn,val) +#endif + +#define get_mal_dcrn(mal, dcrn) ({ \ + u32 x; \ + switch ((mal)->dcrbase) { \ + GET_MAL0_STANZA(dcrn) \ + GET_MAL1_STANZA(dcrn) \ + default: \ + BUG(); \ + } \ +x; }) + +#define set_mal_dcrn(mal, dcrn, val) do { \ + switch ((mal)->dcrbase) { \ + SET_MAL0_STANZA(dcrn,val) \ + SET_MAL1_STANZA(dcrn,val) \ + default: \ + BUG(); \ + } } while (0) + +static inline void mal_enable_tx_channels(struct ibm_ocp_mal *mal, u32 chanmask) +{ + set_mal_dcrn(mal, DCRN_MALTXCASR, + get_mal_dcrn(mal, DCRN_MALTXCASR) | chanmask); +} + +static inline void mal_disable_tx_channels(struct ibm_ocp_mal *mal, + u32 chanmask) +{ + set_mal_dcrn(mal, DCRN_MALTXCARR, chanmask); +} + +static inline void mal_enable_rx_channels(struct ibm_ocp_mal *mal, u32 chanmask) +{ + set_mal_dcrn(mal, DCRN_MALRXCASR, + get_mal_dcrn(mal, DCRN_MALRXCASR) | chanmask); +} + +static inline void mal_disable_rx_channels(struct ibm_ocp_mal *mal, + u32 chanmask) +{ + set_mal_dcrn(mal, DCRN_MALRXCARR, chanmask); +} + +extern int mal_register_commac(struct ibm_ocp_mal *mal, + struct mal_commac *commac); +extern int mal_unregister_commac(struct ibm_ocp_mal *mal, + struct mal_commac *commac); + +extern int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, + unsigned long size); + +#endif /* _IBM_EMAC_MAL_H */ diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c new file mode 100644 index 000000000..b439087df --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_phy.c @@ -0,0 +1,297 @@ +/* + * ibm_ocp_phy.c + * + * PHY drivers for the ibm ocp ethernet driver. Borrowed + * from sungem_phy.c, though I only kept the generic MII + * driver for now. + * + * This file should be shared with other drivers or eventually + * merged as the "low level" part of miilib + * + * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) + * + */ + +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ibm_emac_phy.h" + +static int reset_one_mii_phy(struct mii_phy *phy, int phy_id) +{ + u16 val; + int limit = 10000; + + val = __phy_read(phy, phy_id, MII_BMCR); + val &= ~BMCR_ISOLATE; + val |= BMCR_RESET; + __phy_write(phy, phy_id, MII_BMCR, val); + + udelay(100); + + while (limit--) { + val = __phy_read(phy, phy_id, MII_BMCR); + if ((val & BMCR_RESET) == 0) + break; + udelay(10); + } + if ((val & BMCR_ISOLATE) && limit > 0) + __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); + + return (limit <= 0); +} + +static int cis8201_init(struct mii_phy *phy) +{ + u16 epcr; + + epcr = phy_read(phy, MII_CIS8201_EPCR); + epcr &= ~EPCR_MODE_MASK; + + switch (phy->mode) { + case PHY_MODE_TBI: + epcr |= EPCR_TBI_MODE; + break; + case PHY_MODE_RTBI: + epcr |= EPCR_RTBI_MODE; + break; + case PHY_MODE_GMII: + epcr |= EPCR_GMII_MODE; + break; + case PHY_MODE_RGMII: + default: + epcr |= EPCR_RGMII_MODE; + } + + phy_write(phy, MII_CIS8201_EPCR, epcr); + + return 0; +} + +static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) +{ + u16 ctl, adv; + + phy->autoneg = 1; + phy->speed = SPEED_10; + phy->duplex = DUPLEX_HALF; + phy->pause = 0; + phy->advertising = advertise; + + /* Setup standard advertise */ + adv = phy_read(phy, MII_ADVERTISE); + adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (advertise & ADVERTISED_10baseT_Half) + adv |= ADVERTISE_10HALF; + if (advertise & ADVERTISED_10baseT_Full) + adv |= ADVERTISE_10FULL; + if (advertise & ADVERTISED_100baseT_Half) + adv |= ADVERTISE_100HALF; + if (advertise & ADVERTISED_100baseT_Full) + adv |= ADVERTISE_100FULL; + phy_write(phy, MII_ADVERTISE, adv); + + /* Start/Restart aneg */ + ctl = phy_read(phy, MII_BMCR); + ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); + phy_write(phy, MII_BMCR, ctl); + + return 0; +} + +static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) +{ + u16 ctl; + + phy->autoneg = 0; + phy->speed = speed; + phy->duplex = fd; + phy->pause = 0; + + ctl = phy_read(phy, MII_BMCR); + ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE); + + /* First reset the PHY */ + phy_write(phy, MII_BMCR, ctl | BMCR_RESET); + + /* Select speed & duplex */ + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + ctl |= BMCR_SPEED100; + break; + case SPEED_1000: + default: + return -EINVAL; + } + if (fd == DUPLEX_FULL) + ctl |= BMCR_FULLDPLX; + phy_write(phy, MII_BMCR, ctl); + + return 0; +} + +static int genmii_poll_link(struct mii_phy *phy) +{ + u16 status; + + (void)phy_read(phy, MII_BMSR); + status = phy_read(phy, MII_BMSR); + if ((status & BMSR_LSTATUS) == 0) + return 0; + if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) + return 0; + return 1; +} + +#define MII_CIS8201_ACSR 0x1c +#define ACSR_DUPLEX_STATUS 0x0020 +#define ACSR_SPEED_1000BASET 0x0010 +#define ACSR_SPEED_100BASET 0x0008 + +static int cis8201_read_link(struct mii_phy *phy) +{ + u16 acsr; + + if (phy->autoneg) { + acsr = phy_read(phy, MII_CIS8201_ACSR); + + if (acsr & ACSR_DUPLEX_STATUS) + phy->duplex = DUPLEX_FULL; + else + phy->duplex = DUPLEX_HALF; + if (acsr & ACSR_SPEED_1000BASET) { + phy->speed = SPEED_1000; + } else if (acsr & ACSR_SPEED_100BASET) + phy->speed = SPEED_100; + else + phy->speed = SPEED_10; + phy->pause = 0; + } + /* On non-aneg, we assume what we put in BMCR is the speed, + * though magic-aneg shouldn't prevent this case from occurring + */ + + return 0; +} + +static int genmii_read_link(struct mii_phy *phy) +{ + u16 lpa; + + if (phy->autoneg) { + lpa = phy_read(phy, MII_LPA); + + if (lpa & (LPA_10FULL | LPA_100FULL)) + phy->duplex = DUPLEX_FULL; + else + phy->duplex = DUPLEX_HALF; + if (lpa & (LPA_100FULL | LPA_100HALF)) + phy->speed = SPEED_100; + else + phy->speed = SPEED_10; + phy->pause = 0; + } + /* On non-aneg, we assume what we put in BMCR is the speed, + * though magic-aneg shouldn't prevent this case from occurring + */ + + return 0; +} + +#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII) +#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \ + SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full) + +/* CIS8201 phy ops */ +static struct mii_phy_ops cis8201_phy_ops = { + init:cis8201_init, + setup_aneg:genmii_setup_aneg, + setup_forced:genmii_setup_forced, + poll_link:genmii_poll_link, + read_link:cis8201_read_link +}; + +/* Generic implementation for most 10/100 PHYs */ +static struct mii_phy_ops generic_phy_ops = { + setup_aneg:genmii_setup_aneg, + setup_forced:genmii_setup_forced, + poll_link:genmii_poll_link, + read_link:genmii_read_link +}; + +static struct mii_phy_def cis8201_phy_def = { + phy_id:0x000fc410, + phy_id_mask:0x000ffff0, + name:"CIS8201 Gigabit Ethernet", + features:MII_GBIT_FEATURES, + magic_aneg:0, + ops:&cis8201_phy_ops +}; + +static struct mii_phy_def genmii_phy_def = { + phy_id:0x00000000, + phy_id_mask:0x00000000, + name:"Generic MII", + features:MII_BASIC_FEATURES, + magic_aneg:0, + ops:&generic_phy_ops +}; + +static struct mii_phy_def *mii_phy_table[] = { + &cis8201_phy_def, + &genmii_phy_def, + NULL +}; + +int mii_phy_probe(struct mii_phy *phy, int mii_id) +{ + int rc; + u32 id; + struct mii_phy_def *def; + int i; + + phy->autoneg = 0; + phy->advertising = 0; + phy->mii_id = mii_id; + phy->speed = 0; + phy->duplex = 0; + phy->pause = 0; + + /* Take PHY out of isloate mode and reset it. */ + rc = reset_one_mii_phy(phy, mii_id); + if (rc) + return -ENODEV; + + /* Read ID and find matching entry */ + id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)) + & 0xfffffff0; + for (i = 0; (def = mii_phy_table[i]) != NULL; i++) + if ((id & def->phy_id_mask) == def->phy_id) + break; + /* Should never be NULL (we have a generic entry), but... */ + if (def == NULL) + return -ENODEV; + + phy->def = def; + + /* Setup default advertising */ + phy->advertising = def->features; + + return 0; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h new file mode 100644 index 000000000..49f188f4e --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h @@ -0,0 +1,65 @@ +/* + * Defines for the IBM RGMII bridge + * + * Based on ocp_zmii.h/ibm_emac_zmii.h + * Armin Kuster akuster@mvista.com + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _IBM_EMAC_RGMII_H_ +#define _IBM_EMAC_RGMII_H_ + +#include + +/* RGMII bridge */ +typedef struct rgmii_regs { + u32 fer; /* Function enable register */ + u32 ssr; /* Speed select register */ +} rgmii_t; + +#define RGMII_INPUTS 4 + +/* RGMII device */ +struct ibm_ocp_rgmii { + struct rgmii_regs *base; + int mode[RGMII_INPUTS]; + int users; /* number of EMACs using this RGMII bridge */ +}; + +/* Fuctional Enable Reg */ +#define RGMII_FER_MASK(x) (0x00000007 << (4*x)) +#define RGMII_RTBI 0x00000004 +#define RGMII_RGMII 0x00000005 +#define RGMII_TBI 0x00000006 +#define RGMII_GMII 0x00000007 + +/* Speed Selection reg */ + +#define RGMII_SP2_100 0x00000002 +#define RGMII_SP2_1000 0x00000004 +#define RGMII_SP3_100 0x00000200 +#define RGMII_SP3_1000 0x00000400 + +#define RGMII_MII2_SPDMASK 0x00000007 +#define RGMII_MII3_SPDMASK 0x00000700 + +#define RGMII_MII2_100MB RGMII_SP2_100 & ~RGMII_SP2_1000 +#define RGMII_MII2_1000MB RGMII_SP2_1000 & ~RGMII_SP2_100 +#define RGMII_MII2_10MB ~(RGMII_SP2_100 | RGMII_SP2_1000) +#define RGMII_MII3_100MB RGMII_SP3_100 & ~RGMII_SP3_1000 +#define RGMII_MII3_1000MB RGMII_SP3_1000 & ~RGMII_SP3_100 +#define RGMII_MII3_10MB ~(RGMII_SP3_100 | RGMII_SP3_1000) + +#define RTBI 0 +#define RGMII 1 +#define TBI 2 +#define GMII 3 + +#endif /* _IBM_EMAC_RGMII_H_ */ diff --git a/drivers/net/ibm_emac/ibm_emac_tah.h b/drivers/net/ibm_emac/ibm_emac_tah.h new file mode 100644 index 000000000..ecfc69805 --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_tah.h @@ -0,0 +1,48 @@ +/* + * Defines for the IBM TAH + * + * Copyright 2004 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _IBM_EMAC_TAH_H +#define _IBM_EMAC_TAH_H + +/* TAH */ +typedef struct tah_regs { + u32 tah_revid; + u32 pad[3]; + u32 tah_mr; + u32 tah_ssr0; + u32 tah_ssr1; + u32 tah_ssr2; + u32 tah_ssr3; + u32 tah_ssr4; + u32 tah_ssr5; + u32 tah_tsr; +} tah_t; + +/* TAH engine */ +#define TAH_MR_CVR 0x80000000 +#define TAH_MR_SR 0x40000000 +#define TAH_MR_ST_256 0x01000000 +#define TAH_MR_ST_512 0x02000000 +#define TAH_MR_ST_768 0x03000000 +#define TAH_MR_ST_1024 0x04000000 +#define TAH_MR_ST_1280 0x05000000 +#define TAH_MR_ST_1536 0x06000000 +#define TAH_MR_TFS_16KB 0x00000000 +#define TAH_MR_TFS_2KB 0x00200000 +#define TAH_MR_TFS_4KB 0x00400000 +#define TAH_MR_TFS_6KB 0x00600000 +#define TAH_MR_TFS_8KB 0x00800000 +#define TAH_MR_TFS_10KB 0x00a00000 +#define TAH_MR_DTFP 0x00100000 +#define TAH_MR_DIG 0x00080000 + +#endif /* _IBM_EMAC_TAH_H */ diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h new file mode 100644 index 000000000..6f6cd2a39 --- /dev/null +++ b/drivers/net/ibm_emac/ibm_emac_zmii.h @@ -0,0 +1,93 @@ +/* + * ocp_zmii.h + * + * Defines for the IBM ZMII bridge + * + * Armin Kuster akuster@mvista.com + * Dec, 2001 + * + * Copyright 2001 MontaVista Softare Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _IBM_EMAC_ZMII_H_ +#define _IBM_EMAC_ZMII_H_ + +#include + +/* ZMII bridge registers */ +struct zmii_regs { + u32 fer; /* Function enable reg */ + u32 ssr; /* Speed select reg */ + u32 smiirs; /* SMII status reg */ +}; + +#define ZMII_INPUTS 4 + +/* ZMII device */ +struct ibm_ocp_zmii { + struct zmii_regs *base; + int mode[ZMII_INPUTS]; + int users; /* number of EMACs using this ZMII bridge */ +}; + +/* Fuctional Enable Reg */ + +#define ZMII_FER_MASK(x) (0xf0000000 >> (4*x)) + +#define ZMII_MDI0 0x80000000 +#define ZMII_SMII0 0x40000000 +#define ZMII_RMII0 0x20000000 +#define ZMII_MII0 0x10000000 +#define ZMII_MDI1 0x08000000 +#define ZMII_SMII1 0x04000000 +#define ZMII_RMII1 0x02000000 +#define ZMII_MII1 0x01000000 +#define ZMII_MDI2 0x00800000 +#define ZMII_SMII2 0x00400000 +#define ZMII_RMII2 0x00200000 +#define ZMII_MII2 0x00100000 +#define ZMII_MDI3 0x00080000 +#define ZMII_SMII3 0x00040000 +#define ZMII_RMII3 0x00020000 +#define ZMII_MII3 0x00010000 + +/* Speed Selection reg */ + +#define ZMII_SCI0 0x40000000 +#define ZMII_FSS0 0x20000000 +#define ZMII_SP0 0x10000000 +#define ZMII_SCI1 0x04000000 +#define ZMII_FSS1 0x02000000 +#define ZMII_SP1 0x01000000 +#define ZMII_SCI2 0x00400000 +#define ZMII_FSS2 0x00200000 +#define ZMII_SP2 0x00100000 +#define ZMII_SCI3 0x00040000 +#define ZMII_FSS3 0x00020000 +#define ZMII_SP3 0x00010000 + +#define ZMII_MII0_100MB ZMII_SP0 +#define ZMII_MII0_10MB ~ZMII_SP0 +#define ZMII_MII1_100MB ZMII_SP1 +#define ZMII_MII1_10MB ~ZMII_SP1 +#define ZMII_MII2_100MB ZMII_SP2 +#define ZMII_MII2_10MB ~ZMII_SP2 +#define ZMII_MII3_100MB ZMII_SP3 +#define ZMII_MII3_10MB ~ZMII_SP3 + +/* SMII Status reg */ + +#define ZMII_STS0 0xFF000000 /* EMAC0 smii status mask */ +#define ZMII_STS1 0x00FF0000 /* EMAC1 smii status mask */ + +#define SMII 0 +#define RMII 1 +#define MII 2 +#define MDI 3 + +#endif /* _IBM_EMAC_ZMII_H_ */ diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c new file mode 100644 index 000000000..86f34b5ec --- /dev/null +++ b/drivers/net/ne-h8300.c @@ -0,0 +1,666 @@ +/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */ +/* + original ne.c + Written 1992-94 by Donald Becker. + + Copyright 1993 United States Government as represented by the + Director, National Security Agency. + + This software may be used and distributed according to the terms + of the GNU General Public License, incorporated herein by reference. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 + + H8/300 modified + Yoshinori Sato +*/ + +static const char version1[] = +"ne-h8300.c:v1.00 2004/04/11 ysato\n"; + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "8390.h" + +/* Some defines that people can play with if so inclined. */ + +/* Do we perform extra sanity checks on stuff ? */ +/* #define NE_SANITY_CHECK */ + +/* Do we implement the read before write bugfix ? */ +/* #define NE_RW_BUGFIX */ + +/* Do we have a non std. amount of memory? (in units of 256 byte pages) */ +/* #define PACKETBUF_MEMSIZE 0x40 */ + +/* A zero-terminated list of I/O addresses to be probed at boot. */ + +/* ---- No user-serviceable parts below ---- */ + +#define NE_BASE (dev->base_addr) +#define NE_CMD 0x00 +#define NE_DATAPORT (ei_status.word16?0x20:0x10) /* NatSemi-defined port window offset. */ +#define NE_RESET (ei_status.word16?0x3f:0x1f) /* Issue a read to reset, a write to clear. */ +#define NE_IO_EXTENT (ei_status.word16?0x40:0x20) + +#define NESM_START_PG 0x40 /* First page of TX buffer */ +#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ + +static int ne_probe1(struct net_device *dev, int ioaddr); + +static int ne_open(struct net_device *dev); +static int ne_close(struct net_device *dev); + +static void ne_reset_8390(struct net_device *dev); +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, + int ring_page); +static void ne_block_input(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); +static void ne_block_output(struct net_device *dev, const int count, + const unsigned char *buf, const int start_page); + + +static u32 reg_offset[16]; + +static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr) +{ + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); + int i; + unsigned char bus_width; + + bus_width = *(volatile unsigned char *)ABWCR; + bus_width &= 1 << ((base_addr >> 21) & 7); + + for (i = 0; i < sizeof(reg_offset) / sizeof(u32); i++) + if (bus_width == 0) + reg_offset[i] = i * 2 + 1; + else + reg_offset[i] = i; + + ei_local->reg_offset = reg_offset; + return 0; +} + +static int __initdata h8300_ne_count = 0; +#ifdef CONFIG_H8300H_H8MAX +static unsigned long __initdata h8300_ne_base[] = { 0x800600 }; +static int h8300_ne_irq[] = {EXT_IRQ4}; +#endif +#ifdef CONFIG_H8300H_AKI3068NET +static unsigned long __initdata h8300_ne_base[] = { 0x200000 }; +static int h8300_ne_irq[] = {EXT_IRQ5}; +#endif + +static inline int init_dev(struct net_device *dev) +{ + if (h8300_ne_count < (sizeof(h8300_ne_base) / sizeof(unsigned long))) { + dev->base_addr = h8300_ne_base[h8300_ne_count]; + dev->irq = h8300_ne_irq[h8300_ne_count]; + h8300_ne_count++; + return 0; + } else + return -ENODEV; +} + +/* Probe for various non-shared-memory ethercards. + + NEx000-clone boards have a Station Address PROM (SAPROM) in the packet + buffer memory space. NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of + the SAPROM, while other supposed NE2000 clones must be detected by their + SA prefix. + + Reading the SAPROM from a word-wide card with the 8390 set in byte-wide + mode results in doubled values, which can be detected and compensated for. + + The probe is also responsible for initializing the card and filling + in the 'dev' and 'ei_status' structures. + + We use the minimum memory size for some ethercard product lines, iff we can't + distinguish models. You can increase the packet buffer size by setting + PACKETBUF_MEMSIZE. Reported Cabletron packet buffer locations are: + E1010 starts at 0x100 and ends at 0x2000. + E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory") + E2010 starts at 0x100 and ends at 0x4000. + E2010-x starts at 0x100 and ends at 0xffff. */ + +static int __init do_ne_probe(struct net_device *dev) +{ + unsigned int base_addr = dev->base_addr; + + SET_MODULE_OWNER(dev); + + /* First check any supplied i/o locations. User knows best. */ + if (base_addr > 0x1ff) /* Check a single specified location. */ + return ne_probe1(dev, base_addr); + else if (base_addr != 0) /* Don't probe at all. */ + return -ENXIO; + + return -ENODEV; +} + +static void cleanup_card(struct net_device *dev) +{ + free_irq(dev->irq, dev); + release_region(dev->base_addr, NE_IO_EXTENT); +} + +struct net_device * __init ne_probe(int unit) +{ + struct net_device *dev = alloc_ei_netdev(); + int err; + + if (!dev) + return ERR_PTR(-ENOMEM); + + if (init_dev(dev)) + return ERR_PTR(-ENODEV); + + sprintf(dev->name, "eth%d", unit); + netdev_boot_setup_check(dev); + + err = init_reg_offset(dev, dev->base_addr); + if (err) + goto out; + + err = do_ne_probe(dev); + if (err) + goto out; + err = register_netdev(dev); + if (err) + goto out1; + return dev; +out1: + cleanup_card(dev); +out: + free_netdev(dev); + return ERR_PTR(err); +} + +static int __init ne_probe1(struct net_device *dev, int ioaddr) +{ + int i; + unsigned char SA_prom[16]; + int wordlength = 2; + const char *name = NULL; + int start_page, stop_page; + int reg0, ret; + static unsigned version_printed; + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); + unsigned char bus_width; + + if (!request_region(ioaddr, NE_IO_EXTENT, dev->name)) + return -EBUSY; + + reg0 = inb_p(ioaddr); + if (reg0 == 0xFF) { + ret = -ENODEV; + goto err_out; + } + + /* Do a preliminary verification that we have a 8390. */ + { + int regd; + outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD); + regd = inb_p(ioaddr + EI_SHIFT(0x0d)); + outb_p(0xff, ioaddr + EI_SHIFT(0x0d)); + outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD); + inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ + if (inb_p(ioaddr + EN0_COUNTER0) != 0) { + outb_p(reg0, ioaddr + EI_SHIFT(0)); + outb_p(regd, ioaddr + EI_SHIFT(0x0d)); /* Restore the old values. */ + ret = -ENODEV; + goto err_out; + } + } + + if (ei_debug && version_printed++ == 0) + printk(KERN_INFO "%s", version1); + + printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr); + + /* Read the 16 bytes of station address PROM. + We must first initialize registers, similar to NS8390_init(eifdev, 0). + We can't reliably read the SAPROM address without this. + (I learned the hard way!). */ + { + struct {unsigned char value, offset; } program_seq[] = + { + {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ + {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ + {0x00, EN0_RCNTLO}, /* Clear the count regs. */ + {0x00, EN0_RCNTHI}, + {0x00, EN0_IMR}, /* Mask completion irq. */ + {0xFF, EN0_ISR}, + {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ + {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ + {32, EN0_RCNTLO}, + {0x00, EN0_RCNTHI}, + {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ + {0x00, EN0_RSARHI}, + {E8390_RREAD+E8390_START, E8390_CMD}, + }; + + for (i = 0; i < sizeof(program_seq)/sizeof(program_seq[0]); i++) + outb_p(program_seq[i].value, ioaddr + program_seq[i].offset); + + } + bus_width = *(volatile unsigned char *)ABWCR; + bus_width &= 1 << ((ioaddr >> 21) & 7); + ei_status.word16 = (bus_width == 0); /* temporary setting */ + for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) { + SA_prom[i] = inb_p(ioaddr + NE_DATAPORT); + inb_p(ioaddr + NE_DATAPORT); /* dummy read */ + } + + start_page = NESM_START_PG; + stop_page = NESM_STOP_PG; + + if (bus_width) + wordlength = 1; + else + outb_p(0x49, ioaddr + EN0_DCFG); + + /* Set up the rest of the parameters. */ + name = (wordlength == 2) ? "NE2000" : "NE1000"; + + if (! dev->irq) { + printk(" failed to detect IRQ line.\n"); + ret = -EAGAIN; + goto err_out; + } + + /* Snarf the interrupt now. There's no point in waiting since we cannot + share and the board will usually be enabled. */ + ret = request_irq(dev->irq, ei_interrupt, 0, name, dev); + if (ret) { + printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret); + goto err_out; + } + + dev->base_addr = ioaddr; + + for(i = 0; i < ETHER_ADDR_LEN; i++) { + printk(" %2.2x", SA_prom[i]); + dev->dev_addr[i] = SA_prom[i]; + } + + printk("\n%s: %s found at %#x, using IRQ %d.\n", + dev->name, name, ioaddr, dev->irq); + + ei_status.name = name; + ei_status.tx_start_page = start_page; + ei_status.stop_page = stop_page; + ei_status.word16 = (wordlength == 2); + + ei_status.rx_start_page = start_page + TX_PAGES; +#ifdef PACKETBUF_MEMSIZE + /* Allow the packet buffer size to be overridden by know-it-alls. */ + ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE; +#endif + + ei_status.reset_8390 = &ne_reset_8390; + ei_status.block_input = &ne_block_input; + ei_status.block_output = &ne_block_output; + ei_status.get_8390_hdr = &ne_get_8390_hdr; + ei_status.priv = 0; + dev->open = &ne_open; + dev->stop = &ne_close; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = ei_poll; +#endif + NS8390_init(dev, 0); + return 0; + +err_out: + release_region(ioaddr, NE_IO_EXTENT); + return ret; +} + +static int ne_open(struct net_device *dev) +{ + ei_open(dev); + return 0; +} + +static int ne_close(struct net_device *dev) +{ + if (ei_debug > 1) + printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name); + ei_close(dev); + return 0; +} + +/* Hard reset the card. This used to pause for the same period that a + 8390 reset command required, but that shouldn't be necessary. */ + +static void ne_reset_8390(struct net_device *dev) +{ + unsigned long reset_start_time = jiffies; + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); + + if (ei_debug > 1) + printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); + + /* DON'T change these to inb_p/outb_p or reset will fail on clones. */ + outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET); + + ei_status.txing = 0; + ei_status.dmaing = 0; + + /* This check _should_not_ be necessary, omit eventually. */ + while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0) + if (jiffies - reset_start_time > 2*HZ/100) { + printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name); + break; + } + outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */ +} + +/* Grab the 8390 specific header. Similar to the block_input routine, but + we don't need to be concerned with ring wrap as the header will be at + the start of a page, so we optimize accordingly. */ + +static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) +{ + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); + outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO); + outb_p(0, NE_BASE + EN0_RCNTHI); + outb_p(0, NE_BASE + EN0_RSARLO); /* On page boundary */ + outb_p(ring_page, NE_BASE + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); + + if (ei_status.word16) { + int len; + unsigned short *p = (unsigned short *)hdr; + for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--) + *p++ = inw(NE_BASE + NE_DATAPORT); + } else + insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); + + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; + + le16_to_cpus(&hdr->count); +} + +/* Block input and output, similar to the Crynwr packet driver. If you + are porting to a new ethercard, look at the packet driver source for hints. + The NEx000 doesn't share the on-board packet memory -- you have to put + the packet out through the "remote DMA" dataport using outb. */ + +static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) +{ + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); +#ifdef NE_SANITY_CHECK + int xfer_count = count; +#endif + char *buf = skb->data; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_block_input " + "[DMAstat:%d][irqlock:%d].\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD); + outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); + outb_p(count >> 8, NE_BASE + EN0_RCNTHI); + outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO); + outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); + if (ei_status.word16) + { + int len; + unsigned short *p = (unsigned short *)buf; + for (len = count>>1; len > 0; len--) + *p++ = inw(NE_BASE + NE_DATAPORT); + if (count & 0x01) + { + buf[count-1] = inb(NE_BASE + NE_DATAPORT); +#ifdef NE_SANITY_CHECK + xfer_count++; +#endif + } + } else { + insb(NE_BASE + NE_DATAPORT, buf, count); + } + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. If you see + this message you either 1) have a slightly incompatible clone + or 2) have noise/speed problems with your bus. */ + + if (ei_debug > 1) + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here + -- it's broken for Rx on some cards! */ + int high = inb_p(NE_BASE + EN0_RSARHI); + int low = inb_p(NE_BASE + EN0_RSARLO); + addr = (high << 8) + low; + if (((ring_offset + xfer_count) & 0xff) == low) + break; + } while (--tries > 0); + if (tries <= 0) + printk(KERN_WARNING "%s: RX transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, ring_offset + xfer_count, addr); + } +#endif + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; +} + +static void ne_block_output(struct net_device *dev, int count, + const unsigned char *buf, const int start_page) +{ + struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); + unsigned long dma_start; +#ifdef NE_SANITY_CHECK + int retries = 0; +#endif + + /* Round the count up for word writes. Do we need to do this? + What effect will an odd byte count have on the 8390? + I should check someday. */ + + if (ei_status.word16 && (count & 0x01)) + count++; + + /* This *shouldn't* happen. If it does, it's the last thing you'll see */ + if (ei_status.dmaing) + { + printk(KERN_EMERG "%s: DMAing conflict in ne_block_output." + "[DMAstat:%d][irqlock:%d]\n", + dev->name, ei_status.dmaing, ei_status.irqlock); + return; + } + ei_status.dmaing |= 0x01; + /* We should already be in page 0, but to be safe... */ + outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD); + +#ifdef NE_SANITY_CHECK +retry: +#endif + +#ifdef NE8390_RW_BUGFIX + /* Handle the read-before-write bug the same way as the + Crynwr packet driver -- the NatSemi method doesn't work. + Actually this doesn't always work either, but if you have + problems with your NEx000 this is better than nothing! */ + + outb_p(0x42, NE_BASE + EN0_RCNTLO); + outb_p(0x00, NE_BASE + EN0_RCNTHI); + outb_p(0x42, NE_BASE + EN0_RSARLO); + outb_p(0x00, NE_BASE + EN0_RSARHI); + outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD); + /* Make certain that the dummy read has occurred. */ + udelay(6); +#endif + + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); + + /* Now the normal output. */ + outb_p(count & 0xff, NE_BASE + EN0_RCNTLO); + outb_p(count >> 8, NE_BASE + EN0_RCNTHI); + outb_p(0x00, NE_BASE + EN0_RSARLO); + outb_p(start_page, NE_BASE + EN0_RSARHI); + + outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD); + if (ei_status.word16) { + int len; + unsigned short *p = (unsigned short *)buf; + for (len = count>>1; len > 0; len--) + outw(*p++, NE_BASE + NE_DATAPORT); + } else { + outsb(NE_BASE + NE_DATAPORT, buf, count); + } + + dma_start = jiffies; + +#ifdef NE_SANITY_CHECK + /* This was for the ALPHA version only, but enough people have + been encountering problems so it is still here. */ + + if (ei_debug > 1) + { + /* DMA termination address check... */ + int addr, tries = 20; + do { + int high = inb_p(NE_BASE + EN0_RSARHI); + int low = inb_p(NE_BASE + EN0_RSARLO); + addr = (high << 8) + low; + if ((start_page << 8) + count == addr) + break; + } while (--tries > 0); + + if (tries <= 0) + { + printk(KERN_WARNING "%s: Tx packet transfer address mismatch," + "%#4.4x (expected) vs. %#4.4x (actual).\n", + dev->name, (start_page << 8) + count, addr); + if (retries++ == 0) + goto retry; + } + } +#endif + + while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0) + if (jiffies - dma_start > 2*HZ/100) { /* 20ms */ + printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name); + ne_reset_8390(dev); + NS8390_init(dev,1); + break; + } + + outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ + ei_status.dmaing &= ~0x01; + return; +} + + +#ifdef MODULE +#define MAX_NE_CARDS 1 /* Max number of NE cards per module */ +static struct net_device *dev_ne[MAX_NE_CARDS]; +static int io[MAX_NE_CARDS]; +static int irq[MAX_NE_CARDS]; +static int bad[MAX_NE_CARDS]; /* 0xbad = bad sig or no reset ack */ + +MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NE_CARDS) "i"); +MODULE_PARM(irq, "1-" __MODULE_STRING(MAX_NE_CARDS) "i"); +MODULE_PARM(bad, "1-" __MODULE_STRING(MAX_NE_CARDS) "i"); +MODULE_PARM_DESC(io, "I/O base address(es)"); +MODULE_PARM_DESC(irq, "IRQ number(s)"); +MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver"); +MODULE_LICENSE("GPL"); + +/* This is set up so that no ISA autoprobe takes place. We can't guarantee +that the ne2k probe is the last 8390 based probe to take place (as it +is at boot) and so the probe will get confused by any other 8390 cards. +ISA device autoprobes on a running machine are not recommended anyway. */ + +int init_module(void) +{ + int this_dev, found = 0; + int err; + + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + struct net_device *dev = alloc_ei_netdev(); + if (!dev) + break; + if (io[this_dev]) { + dev->irq = irq[this_dev]; + dev->mem_end = bad[this_dev]; + dev->base_addr = io[this_dev]; + } else { + dev->base_addr = h8300_ne_base[this_dev]; + dev->irq = h8300_ne_irq[this_dev]; + } + err = init_reg_offset(dev, dev->base_addr); + if (!err) { + if (do_ne_probe(dev) == 0) { + if (register_netdev(dev) == 0) { + dev_ne[found++] = dev; + continue; + } + cleanup_card(dev); + } + } + free_netdev(dev); + if (found) + break; + if (io[this_dev] != 0) + printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr); + else + printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n"); + return -ENXIO; + } + if (found) + return 0; + return -ENODEV; +} + +void cleanup_module(void) +{ + int this_dev; + + for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { + struct net_device *dev = dev_ne[this_dev]; + if (dev) { + unregister_netdev(dev); + cleanup_card(dev); + free_netdev(dev); + } + } +} +#endif /* MODULE */ diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c new file mode 100644 index 000000000..85fae0965 --- /dev/null +++ b/drivers/net/smc91x.c @@ -0,0 +1,2172 @@ +/* + * smc91x.c + * This is a driver for SMSC's 91C9x/91C1xx single-chip Ethernet devices. + * + * Copyright (C) 1996 by Erik Stahlman + * Copyright (C) 2001 Standard Microsystems Corporation + * Developed by Simple Network Magic Corporation + * Copyright (C) 2003 Monta Vista Software, Inc. + * Unified SMC91x driver by Nicolas Pitre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Arguments: + * io = for the base address + * irq = for the IRQ + * nowait = 0 for normal wait states, 1 eliminates additional wait states + * + * original author: + * Erik Stahlman + * + * hardware multicast code: + * Peter Cammaert + * + * contributors: + * Daris A Nevil + * Nicolas Pitre + * Russell King + * + * History: + * 08/20/00 Arnaldo Melo fix kfree(skb) in smc_hardware_send_packet + * 12/15/00 Christian Jullien fix "Warning: kfree_skb on hard IRQ" + * 03/16/01 Daris A Nevil modified smc9194.c for use with LAN91C111 + * 08/22/01 Scott Anderson merge changes from smc9194 to smc91111 + * 08/21/01 Pramod B Bhardwaj added support for RevB of LAN91C111 + * 12/20/01 Jeff Sutherland initial port to Xscale PXA with DMA support + * 04/07/03 Nicolas Pitre unified SMC91x driver, killed irq races, + * more bus abstraction, big cleanup, etc. + * 29/09/03 Russell King - add driver model support + * - ethtool support + * - convert to use generic MII interface + * - add link up/down notification + * - don't try to handle full negotiation in + * smc_phy_configure + * - clean up (and fix stack overrun) in PHY + * MII read/write functions + */ +static const char version[] = + "smc91x.c: v1.0, mar 07 2003 by Nicolas Pitre \n"; + +/* Debugging level */ +#ifndef SMC_DEBUG +#define SMC_DEBUG 0 +#endif + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "smc91x.h" + +#ifdef CONFIG_ISA +/* + * the LAN91C111 can be at any of the following port addresses. To change, + * for a slightly different card, you can add it to the array. Keep in + * mind that the array must end in zero. + */ +static unsigned int smc_portlist[] __initdata = { + 0x200, 0x220, 0x240, 0x260, 0x280, 0x2A0, 0x2C0, 0x2E0, + 0x300, 0x320, 0x340, 0x360, 0x380, 0x3A0, 0x3C0, 0x3E0, 0 +}; + +#ifndef SMC_IOADDR +# define SMC_IOADDR -1 +#endif +static unsigned long io = SMC_IOADDR; +module_param(io, ulong, 0400); +MODULE_PARM_DESC(io, "I/O base address"); + +#ifndef SMC_IRQ +# define SMC_IRQ -1 +#endif +static int irq = SMC_IRQ; +module_param(irq, int, 0400); +MODULE_PARM_DESC(irq, "IRQ number"); + +#endif /* CONFIG_ISA */ + +#ifndef SMC_NOWAIT +# define SMC_NOWAIT 0 +#endif +static int nowait = SMC_NOWAIT; +module_param(nowait, int, 0400); +MODULE_PARM_DESC(nowait, "set to 1 for no wait state"); + +/* + * Transmit timeout, default 5 seconds. + */ +static int watchdog = 5000; +module_param(watchdog, int, 0400); +MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); + +MODULE_LICENSE("GPL"); + +/* + * The internal workings of the driver. If you are changing anything + * here with the SMC stuff, you should have the datasheet and know + * what you are doing. + */ +#define CARDNAME "smc91x" + +/* + * Use power-down feature of the chip + */ +#define POWER_DOWN 1 + +/* + * Wait time for memory to be free. This probably shouldn't be + * tuned that much, as waiting for this means nothing else happens + * in the system + */ +#define MEMORY_WAIT_TIME 16 + +/* + * This selects whether TX packets are sent one by one to the SMC91x internal + * memory and throttled until transmission completes. This may prevent + * RX overruns a litle by keeping much of the memory free for RX packets + * but to the expense of reduced TX throughput and increased IRQ overhead. + * Note this is not a cure for a too slow data bus or too high IRQ latency. + */ +#define THROTTLE_TX_PKTS 0 + +/* + * The MII clock high/low times. 2x this number gives the MII clock period + * in microseconds. (was 50, but this gives 6.4ms for each MII transaction!) + */ +#define MII_DELAY 1 + +/* store this information for the driver.. */ +struct smc_local { + /* + * If I have to wait until memory is available to send a + * packet, I will store the skbuff here, until I get the + * desired memory. Then, I'll send it out and free it. + */ + struct sk_buff *saved_skb; + + /* + * these are things that the kernel wants me to keep, so users + * can find out semi-useless statistics of how well the card is + * performing + */ + struct net_device_stats stats; + + /* version/revision of the SMC91x chip */ + int version; + + /* Contains the current active transmission mode */ + int tcr_cur_mode; + + /* Contains the current active receive mode */ + int rcr_cur_mode; + + /* Contains the current active receive/phy mode */ + int rpc_cur_mode; + int ctl_rfduplx; + int ctl_rspeed; + + u32 msg_enable; + u32 phy_type; + struct mii_if_info mii; + spinlock_t lock; + +#ifdef SMC_USE_PXA_DMA + /* DMA needs the physical address of the chip */ + u_long physaddr; +#endif +}; + +#if SMC_DEBUG > 0 +#define DBG(n, args...) \ + do { \ + if (SMC_DEBUG >= (n)) \ + printk(KERN_DEBUG args); \ + } while (0) + +#define PRINTK(args...) printk(args) +#else +#define DBG(n, args...) do { } while(0) +#define PRINTK(args...) printk(KERN_DEBUG args) +#endif + +#if SMC_DEBUG > 3 +static void PRINT_PKT(u_char *buf, int length) +{ + int i; + int remainder; + int lines; + + lines = length / 16; + remainder = length % 16; + + for (i = 0; i < lines ; i ++) { + int cur; + for (cur = 0; cur < 8; cur++) { + u_char a, b; + a = *buf++; + b = *buf++; + printk("%02x%02x ", a, b); + } + printk("\n"); + } + for (i = 0; i < remainder/2 ; i++) { + u_char a, b; + a = *buf++; + b = *buf++; + printk("%02x%02x ", a, b); + } + printk("\n"); +} +#else +#define PRINT_PKT(x...) do { } while(0) +#endif + + +/* this enables an interrupt in the interrupt mask register */ +#define SMC_ENABLE_INT(x) do { \ + unsigned long flags; \ + unsigned char mask; \ + spin_lock_irqsave(&lp->lock, flags); \ + mask = SMC_GET_INT_MASK(); \ + mask |= (x); \ + SMC_SET_INT_MASK(mask); \ + spin_unlock_irqrestore(&lp->lock, flags); \ +} while (0) + +/* this disables an interrupt from the interrupt mask register */ +#define SMC_DISABLE_INT(x) do { \ + unsigned long flags; \ + unsigned char mask; \ + spin_lock_irqsave(&lp->lock, flags); \ + mask = SMC_GET_INT_MASK(); \ + mask &= ~(x); \ + SMC_SET_INT_MASK(mask); \ + spin_unlock_irqrestore(&lp->lock, flags); \ +} while (0) + +/* + * Wait while MMU is busy. This is usually in the order of a few nanosecs + * if at all, but let's avoid deadlocking the system if the hardware + * decides to go south. + */ +#define SMC_WAIT_MMU_BUSY() do { \ + if (unlikely(SMC_GET_MMU_CMD() & MC_BUSY)) { \ + unsigned long timeout = jiffies + 2; \ + while (SMC_GET_MMU_CMD() & MC_BUSY) { \ + if (time_after(jiffies, timeout)) { \ + printk("%s: timeout %s line %d\n", \ + dev->name, __FILE__, __LINE__); \ + break; \ + } \ + cpu_relax(); \ + } \ + } \ +} while (0) + + +/* + * this does a soft reset on the device + */ +static void smc_reset(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + unsigned int ctl, cfg; + + DBG(2, "%s: %s\n", dev->name, __FUNCTION__); + + /* + * This resets the registers mostly to defaults, but doesn't + * affect EEPROM. That seems unnecessary + */ + SMC_SELECT_BANK(0); + SMC_SET_RCR(RCR_SOFTRST); + + /* + * Setup the Configuration Register + * This is necessary because the CONFIG_REG is not affected + * by a soft reset + */ + SMC_SELECT_BANK(1); + + cfg = CONFIG_DEFAULT; + + /* + * Setup for fast accesses if requested. If the card/system + * can't handle it then there will be no recovery except for + * a hard reset or power cycle + */ + if (nowait) + cfg |= CONFIG_NO_WAIT; + + /* + * Release from possible power-down state + * Configuration register is not affected by Soft Reset + */ + cfg |= CONFIG_EPH_POWER_EN; + + SMC_SET_CONFIG(cfg); + + /* this should pause enough for the chip to be happy */ + /* + * elaborate? What does the chip _need_? --jgarzik + * + * This seems to be undocumented, but something the original + * driver(s) have always done. Suspect undocumented timing + * info/determined empirically. --rmk + */ + udelay(1); + + /* Disable transmit and receive functionality */ + SMC_SELECT_BANK(0); + SMC_SET_RCR(RCR_CLEAR); + SMC_SET_TCR(TCR_CLEAR); + + SMC_SELECT_BANK(1); + ctl = SMC_GET_CTL() | CTL_LE_ENABLE; + + /* + * Set the control register to automatically release successfully + * transmitted packets, to make the best use out of our limited + * memory + */ +#if ! THROTTLE_TX_PKTS + ctl |= CTL_AUTO_RELEASE; +#else + ctl &= ~CTL_AUTO_RELEASE; +#endif + SMC_SET_CTL(ctl); + + /* Disable all interrupts */ + SMC_SELECT_BANK(2); + SMC_SET_INT_MASK(0); + + /* Reset the MMU */ + SMC_SET_MMU_CMD(MC_RESET); + SMC_WAIT_MMU_BUSY(); +} + +/* + * Enable Interrupts, Receive, and Transmit + */ +static void smc_enable(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + struct smc_local *lp = netdev_priv(dev); + int mask; + + DBG(2, "%s: %s\n", dev->name, __FUNCTION__); + + /* see the header file for options in TCR/RCR DEFAULT */ + SMC_SELECT_BANK(0); + SMC_SET_TCR(lp->tcr_cur_mode); + SMC_SET_RCR(lp->rcr_cur_mode); + + /* now, enable interrupts */ + mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT; + if (lp->version >= (CHIP_91100 << 4)) + mask |= IM_MDINT; + SMC_SELECT_BANK(2); + SMC_SET_INT_MASK(mask); +} + +/* + * this puts the device in an inactive state + */ +static void smc_shutdown(unsigned long ioaddr) +{ + DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); + + /* no more interrupts for me */ + SMC_SELECT_BANK(2); + SMC_SET_INT_MASK(0); + + /* and tell the card to stay away from that nasty outside world */ + SMC_SELECT_BANK(0); + SMC_SET_RCR(RCR_CLEAR); + SMC_SET_TCR(TCR_CLEAR); + +#ifdef POWER_DOWN + /* finally, shut the chip down */ + SMC_SELECT_BANK(1); + SMC_SET_CONFIG(SMC_GET_CONFIG() & ~CONFIG_EPH_POWER_EN); +#endif +} + +/* + * This is the procedure to handle the receipt of a packet. + */ +static inline void smc_rcv(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + unsigned int packet_number, status, packet_len; + + DBG(3, "%s: %s\n", dev->name, __FUNCTION__); + + packet_number = SMC_GET_RXFIFO(); + if (unlikely(packet_number & RXFIFO_REMPTY)) { + PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name); + return; + } + + /* read from start of packet */ + SMC_SET_PTR(PTR_READ | PTR_RCV | PTR_AUTOINC); + + /* First two words are status and packet length */ + SMC_GET_PKT_HDR(status, packet_len); + packet_len &= 0x07ff; /* mask off top bits */ + DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n", + dev->name, packet_number, status, + packet_len, packet_len); + + if (unlikely(status & RS_ERRORS)) { + lp->stats.rx_errors++; + if (status & RS_ALGNERR) + lp->stats.rx_frame_errors++; + if (status & (RS_TOOSHORT | RS_TOOLONG)) + lp->stats.rx_length_errors++; + if (status & RS_BADCRC) + lp->stats.rx_crc_errors++; + } else { + struct sk_buff *skb; + unsigned char *data; + unsigned int data_len; + + /* set multicast stats */ + if (status & RS_MULTICAST) + lp->stats.multicast++; + + /* + * Actual payload is packet_len - 4 (or 3 if odd byte). + * We want skb_reserve(2) and the final ctrl word + * (2 bytes, possibly containing the payload odd byte). + * Ence packet_len - 4 + 2 + 2. + */ + skb = dev_alloc_skb(packet_len); + if (unlikely(skb == NULL)) { + printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", + dev->name); + lp->stats.rx_dropped++; + goto done; + } + + /* Align IP header to 32 bits */ + skb_reserve(skb, 2); + + /* BUG: the LAN91C111 rev A never sets this bit. Force it. */ + if (lp->version == 0x90) + status |= RS_ODDFRAME; + + /* + * If odd length: packet_len - 3, + * otherwise packet_len - 4. + */ + data_len = packet_len - ((status & RS_ODDFRAME) ? 3 : 4); + data = skb_put(skb, data_len); + SMC_PULL_DATA(data, packet_len - 2); + + PRINT_PKT(data, packet_len - 2); + + dev->last_rx = jiffies; + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); + netif_rx(skb); + lp->stats.rx_packets++; + lp->stats.rx_bytes += data_len; + } + +done: + SMC_WAIT_MMU_BUSY(); + SMC_SET_MMU_CMD(MC_RELEASE); +} + +/* + * This is called to actually send a packet to the chip. + * Returns non-zero when successful. + */ +static void smc_hardware_send_packet(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + struct sk_buff *skb = lp->saved_skb; + unsigned int packet_no, len; + unsigned char *buf; + + DBG(3, "%s: %s\n", dev->name, __FUNCTION__); + + packet_no = SMC_GET_AR(); + if (unlikely(packet_no & AR_FAILED)) { + printk("%s: Memory allocation failed.\n", dev->name); + lp->saved_skb = NULL; + lp->stats.tx_errors++; + lp->stats.tx_fifo_errors++; + dev_kfree_skb_any(skb); + return; + } + + /* point to the beginning of the packet */ + SMC_SET_PN(packet_no); + SMC_SET_PTR(PTR_AUTOINC); + + buf = skb->data; + len = skb->len; + DBG(2, "%s: TX PNR 0x%x LENGTH 0x%04x (%d) BUF 0x%p\n", + dev->name, packet_no, len, len, buf); + PRINT_PKT(buf, len); + + /* + * Send the packet length (+6 for status words, length, and ctl. + * The card will pad to 64 bytes with zeroes if packet is too small. + */ + SMC_PUT_PKT_HDR(0, len + 6); + + /* send the actual data */ + SMC_PUSH_DATA(buf, len & ~1); + + /* Send final ctl word with the last byte if there is one */ + SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG); + + /* and let the chipset deal with it */ + SMC_SET_MMU_CMD(MC_ENQUEUE); + SMC_ACK_INT(IM_TX_EMPTY_INT); + + dev->trans_start = jiffies; + dev_kfree_skb_any(skb); + lp->saved_skb = NULL; + lp->stats.tx_packets++; + lp->stats.tx_bytes += len; +} + +/* + * Since I am not sure if I will have enough room in the chip's ram + * to store the packet, I call this routine which either sends it + * now, or set the card to generates an interrupt when ready + * for the packet. + */ +static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + unsigned int numPages, poll_count, status, saved_bank; + + DBG(3, "%s: %s\n", dev->name, __FUNCTION__); + + BUG_ON(lp->saved_skb != NULL); + lp->saved_skb = skb; + + /* + * The MMU wants the number of pages to be the number of 256 bytes + * 'pages', minus 1 (since a packet can't ever have 0 pages :)) + * + * The 91C111 ignores the size bits, but earlier models don't. + * + * Pkt size for allocating is data length +6 (for additional status + * words, length and ctl) + * + * If odd size then last byte is included in ctl word. + */ + numPages = ((skb->len & ~1) + (6 - 1)) >> 8; + if (unlikely(numPages > 7)) { + printk("%s: Far too big packet error.\n", dev->name); + lp->saved_skb = NULL; + lp->stats.tx_errors++; + lp->stats.tx_dropped++; + dev_kfree_skb(skb); + return 0; + } + + /* now, try to allocate the memory */ + saved_bank = SMC_CURRENT_BANK(); + SMC_SELECT_BANK(2); + SMC_SET_MMU_CMD(MC_ALLOC | numPages); + + /* + * Poll the chip for a short amount of time in case the + * allocation succeeds quickly. + */ + poll_count = MEMORY_WAIT_TIME; + do { + status = SMC_GET_INT(); + if (status & IM_ALLOC_INT) { + SMC_ACK_INT(IM_ALLOC_INT); + break; + } + } while (--poll_count); + + if (!poll_count) { + /* oh well, wait until the chip finds memory later */ + netif_stop_queue(dev); + DBG(2, "%s: TX memory allocation deferred.\n", dev->name); + SMC_ENABLE_INT(IM_ALLOC_INT); + } else { + /* + * Allocation succeeded: push packet to the chip's own memory + * immediately. + * + * If THROTTLE_TX_PKTS is selected that means we don't want + * more than a single TX packet taking up space in the chip's + * internal memory at all time, in which case we stop the + * queue right here until we're notified of TX completion. + * + * Otherwise we're quite happy to feed more TX packets right + * away for better TX throughput, in which case the queue is + * left active. + */ +#if THROTTLE_TX_PKTS + netif_stop_queue(dev); +#endif + smc_hardware_send_packet(dev); + SMC_ENABLE_INT(IM_TX_INT | IM_TX_EMPTY_INT); + } + + SMC_SELECT_BANK(saved_bank); + return 0; +} + +/* + * This handles a TX interrupt, which is only called when: + * - a TX error occurred, or + * - CTL_AUTO_RELEASE is not set and TX of a packet completed. + */ +static void smc_tx(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + struct smc_local *lp = netdev_priv(dev); + unsigned int saved_packet, packet_no, tx_status, pkt_len; + + DBG(3, "%s: %s\n", dev->name, __FUNCTION__); + + /* If the TX FIFO is empty then nothing to do */ + packet_no = SMC_GET_TXFIFO(); + if (unlikely(packet_no & TXFIFO_TEMPTY)) { + PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name); + return; + } + + /* select packet to read from */ + saved_packet = SMC_GET_PN(); + SMC_SET_PN(packet_no); + + /* read the first word (status word) from this packet */ + SMC_SET_PTR(PTR_AUTOINC | PTR_READ); + SMC_GET_PKT_HDR(tx_status, pkt_len); + DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n", + dev->name, tx_status, packet_no); + + if (!(tx_status & TS_SUCCESS)) + lp->stats.tx_errors++; + if (tx_status & TS_LOSTCAR) + lp->stats.tx_carrier_errors++; + + if (tx_status & TS_LATCOL) { + PRINTK("%s: late collision occurred on last xmit\n", dev->name); + lp->stats.tx_window_errors++; + if (!(lp->stats.tx_window_errors & 63) && net_ratelimit()) { + printk(KERN_INFO "%s: unexpectedly large numbers of " + "late collisions. Please check duplex " + "setting.\n", dev->name); + } + } + + /* kill the packet */ + SMC_WAIT_MMU_BUSY(); + SMC_SET_MMU_CMD(MC_FREEPKT); + + /* Don't restore Packet Number Reg until busy bit is cleared */ + SMC_WAIT_MMU_BUSY(); + SMC_SET_PN(saved_packet); + + /* re-enable transmit */ + SMC_SELECT_BANK(0); + SMC_SET_TCR(lp->tcr_cur_mode); + SMC_SELECT_BANK(2); +} + + +/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ + +static void smc_mii_out(struct net_device *dev, unsigned int val, int bits) +{ + unsigned long ioaddr = dev->base_addr; + unsigned int mii_reg, mask; + + mii_reg = SMC_GET_MII() & ~(MII_MCLK | MII_MDOE | MII_MDO); + mii_reg |= MII_MDOE; + + for (mask = 1 << (bits - 1); mask; mask >>= 1) { + if (val & mask) + mii_reg |= MII_MDO; + else + mii_reg &= ~MII_MDO; + + SMC_SET_MII(mii_reg); + udelay(MII_DELAY); + SMC_SET_MII(mii_reg | MII_MCLK); + udelay(MII_DELAY); + } +} + +static unsigned int smc_mii_in(struct net_device *dev, int bits) +{ + unsigned long ioaddr = dev->base_addr; + unsigned int mii_reg, mask, val; + + mii_reg = SMC_GET_MII() & ~(MII_MCLK | MII_MDOE | MII_MDO); + SMC_SET_MII(mii_reg); + + for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) { + if (SMC_GET_MII() & MII_MDI) + val |= mask; + + SMC_SET_MII(mii_reg); + udelay(MII_DELAY); + SMC_SET_MII(mii_reg | MII_MCLK); + udelay(MII_DELAY); + } + + return val; +} + +/* + * Reads a register from the MII Management serial interface + */ +static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg) +{ + unsigned long ioaddr = dev->base_addr; + unsigned int phydata, old_bank; + + /* Save the current bank, and select bank 3 */ + old_bank = SMC_CURRENT_BANK(); + SMC_SELECT_BANK(3); + + /* Idle - 32 ones */ + smc_mii_out(dev, 0xffffffff, 32); + + /* Start code (01) + read (10) + phyaddr + phyreg */ + smc_mii_out(dev, 6 << 10 | phyaddr << 5 | phyreg, 14); + + /* Turnaround (2bits) + phydata */ + phydata = smc_mii_in(dev, 18); + + /* Return to idle state */ + SMC_SET_MII(SMC_GET_MII() & ~(MII_MCLK|MII_MDOE|MII_MDO)); + + /* And select original bank */ + SMC_SELECT_BANK(old_bank); + + DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __FUNCTION__, phyaddr, phyreg, phydata); + + return phydata; +} + +/* + * Writes a register to the MII Management serial interface + */ +static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg, + int phydata) +{ + unsigned long ioaddr = dev->base_addr; + unsigned int old_bank; + + /* Save the current bank, and select bank 3 */ + old_bank = SMC_CURRENT_BANK(); + SMC_SELECT_BANK(3); + + /* Idle - 32 ones */ + smc_mii_out(dev, 0xffffffff, 32); + + /* Start code (01) + write (01) + phyaddr + phyreg + turnaround + phydata */ + smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32); + + /* Return to idle state */ + SMC_SET_MII(SMC_GET_MII() & ~(MII_MCLK|MII_MDOE|MII_MDO)); + + /* And select original bank */ + SMC_SELECT_BANK(old_bank); + + DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", + __FUNCTION__, phyaddr, phyreg, phydata); +} + +/* + * Finds and reports the PHY address + */ +static void smc_detect_phy(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + int phyaddr; + + DBG(2, "%s: %s\n", dev->name, __FUNCTION__); + + lp->phy_type = 0; + + /* + * Scan all 32 PHY addresses if necessary, starting at + * PHY#1 to PHY#31, and then PHY#0 last. + */ + for (phyaddr = 1; phyaddr < 33; ++phyaddr) { + unsigned int id1, id2; + + /* Read the PHY identifiers */ + id1 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID1); + id2 = smc_phy_read(dev, phyaddr & 31, MII_PHYSID2); + + DBG(3, "%s: phy_id1=0x%x, phy_id2=0x%x\n", + dev->name, id1, id2); + + /* Make sure it is a valid identifier */ + if (id1 != 0x0000 && id1 != 0xffff && id1 != 0x8000 && + id2 != 0x0000 && id2 != 0xffff && id2 != 0x8000) { + /* Save the PHY's address */ + lp->mii.phy_id = phyaddr & 31; + lp->phy_type = id1 << 16 | id2; + break; + } + } +} + +/* + * Sets the PHY to a configuration as determined by the user + */ +static int smc_phy_fixed(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + int phyaddr = lp->mii.phy_id; + int bmcr, cfg1; + + DBG(3, "%s: %s\n", dev->name, __FUNCTION__); + + /* Enter Link Disable state */ + cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG); + cfg1 |= PHY_CFG1_LNKDIS; + smc_phy_write(dev, phyaddr, PHY_CFG1_REG, cfg1); + + /* + * Set our fixed capabilities + * Disable auto-negotiation + */ + bmcr = 0; + + if (lp->ctl_rfduplx) + bmcr |= BMCR_FULLDPLX; + + if (lp->ctl_rspeed == 100) + bmcr |= BMCR_SPEED100; + + /* Write our capabilities to the phy control register */ + smc_phy_write(dev, phyaddr, MII_BMCR, bmcr); + + /* Re-Configure the Receive/Phy Control register */ + SMC_SET_RPC(lp->rpc_cur_mode); + + return 1; +} + +/* + * smc_phy_reset - reset the phy + * @dev: net device + * @phy: phy address + * + * Issue a software reset for the specified PHY and + * wait up to 100ms for the reset to complete. We should + * not access the PHY for 50ms after issuing the reset. + * + * The time to wait appears to be dependent on the PHY. + * + * Must be called with lp->lock locked. + */ +static int smc_phy_reset(struct net_device *dev, int phy) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned int bmcr; + int timeout; + + smc_phy_write(dev, phy, MII_BMCR, BMCR_RESET); + + for (timeout = 2; timeout; timeout--) { + spin_unlock_irq(&lp->lock); + msleep(50); + spin_lock_irq(&lp->lock); + + bmcr = smc_phy_read(dev, phy, MII_BMCR); + if (!(bmcr & BMCR_RESET)) + break; + } + + return bmcr & BMCR_RESET; +} + +/* + * smc_phy_powerdown - powerdown phy + * @dev: net device + * @phy: phy address + * + * Power down the specified PHY + */ +static void smc_phy_powerdown(struct net_device *dev, int phy) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned int bmcr; + + spin_lock_irq(&lp->lock); + bmcr = smc_phy_read(dev, phy, MII_BMCR); + smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); + spin_unlock_irq(&lp->lock); +} + +/* + * smc_phy_check_media - check the media status and adjust TCR + * @dev: net device + * @init: set true for initialisation + * + * Select duplex mode depending on negotiation state. This + * also updates our carrier state. + */ +static void smc_phy_check_media(struct net_device *dev, int init) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + + if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { + unsigned int old_bank; + + /* duplex state has changed */ + if (lp->mii.full_duplex) { + lp->tcr_cur_mode |= TCR_SWFDUP; + } else { + lp->tcr_cur_mode &= ~TCR_SWFDUP; + } + + old_bank = SMC_CURRENT_BANK(); + SMC_SELECT_BANK(0); + SMC_SET_TCR(lp->tcr_cur_mode); + SMC_SELECT_BANK(old_bank); + } +} + +/* + * Configures the specified PHY through the MII management interface + * using Autonegotiation. + * Calls smc_phy_fixed() if the user has requested a certain config. + * If RPC ANEG bit is set, the media selection is dependent purely on + * the selection by the MII (either in the MII BMCR reg or the result + * of autonegotiation.) If the RPC ANEG bit is cleared, the selection + * is controlled by the RPC SPEED and RPC DPLX bits. + */ +static void smc_phy_configure(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + int phyaddr = lp->mii.phy_id; + int my_phy_caps; /* My PHY capabilities */ + int my_ad_caps; /* My Advertised capabilities */ + int status; + + DBG(3, "%s:smc_program_phy()\n", dev->name); + + spin_lock_irq(&lp->lock); + + /* + * We should not be called if phy_type is zero. + */ + if (lp->phy_type == 0) + goto smc_phy_configure_exit; + + if (smc_phy_reset(dev, phyaddr)) { + printk("%s: PHY reset timed out\n", dev->name); + goto smc_phy_configure_exit; + } + + /* + * Enable PHY Interrupts (for register 18) + * Interrupts listed here are disabled + */ + smc_phy_write(dev, phyaddr, PHY_MASK_REG, + PHY_INT_LOSSSYNC | PHY_INT_CWRD | PHY_INT_SSD | + PHY_INT_ESD | PHY_INT_RPOL | PHY_INT_JAB | + PHY_INT_SPDDET | PHY_INT_DPLXDET); + + /* Configure the Receive/Phy Control register */ + SMC_SELECT_BANK(0); + SMC_SET_RPC(lp->rpc_cur_mode); + + /* If the user requested no auto neg, then go set his request */ + if (lp->mii.force_media) { + smc_phy_fixed(dev); + goto smc_phy_configure_exit; + } + + /* Copy our capabilities from MII_BMSR to MII_ADVERTISE */ + my_phy_caps = smc_phy_read(dev, phyaddr, MII_BMSR); + + if (!(my_phy_caps & BMSR_ANEGCAPABLE)) { + printk(KERN_INFO "Auto negotiation NOT supported\n"); + smc_phy_fixed(dev); + goto smc_phy_configure_exit; + } + + my_ad_caps = ADVERTISE_CSMA; /* I am CSMA capable */ + + if (my_phy_caps & BMSR_100BASE4) + my_ad_caps |= ADVERTISE_100BASE4; + if (my_phy_caps & BMSR_100FULL) + my_ad_caps |= ADVERTISE_100FULL; + if (my_phy_caps & BMSR_100HALF) + my_ad_caps |= ADVERTISE_100HALF; + if (my_phy_caps & BMSR_10FULL) + my_ad_caps |= ADVERTISE_10FULL; + if (my_phy_caps & BMSR_10HALF) + my_ad_caps |= ADVERTISE_10HALF; + + /* Disable capabilities not selected by our user */ + if (lp->ctl_rspeed != 100) + my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF); + + if (!lp->ctl_rfduplx) + my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL); + + /* Update our Auto-Neg Advertisement Register */ + smc_phy_write(dev, phyaddr, MII_ADVERTISE, my_ad_caps); + lp->mii.advertising = my_ad_caps; + + /* + * Read the register back. Without this, it appears that when + * auto-negotiation is restarted, sometimes it isn't ready and + * the link does not come up. + */ + status = smc_phy_read(dev, phyaddr, MII_ADVERTISE); + + DBG(2, "%s: phy caps=%x\n", dev->name, my_phy_caps); + DBG(2, "%s: phy advertised caps=%x\n", dev->name, my_ad_caps); + + /* Restart auto-negotiation process in order to advertise my caps */ + smc_phy_write(dev, phyaddr, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART); + + smc_phy_check_media(dev, 1); + +smc_phy_configure_exit: + spin_unlock_irq(&lp->lock); +} + +/* + * smc_phy_interrupt + * + * Purpose: Handle interrupts relating to PHY register 18. This is + * called from the "hard" interrupt handler under our private spinlock. + */ +static void smc_phy_interrupt(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + int phyaddr = lp->mii.phy_id; + int phy18; + + DBG(2, "%s: %s\n", dev->name, __FUNCTION__); + + if (lp->phy_type == 0) + return; + + for(;;) { + smc_phy_check_media(dev, 0); + + /* Read PHY Register 18, Status Output */ + phy18 = smc_phy_read(dev, phyaddr, PHY_INT_REG); + if ((phy18 & PHY_INT_INT) == 0) + break; + } +} + +/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/ + +static void smc_10bt_check_media(struct net_device *dev, int init) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + unsigned int old_carrier, new_carrier, old_bank; + + old_bank = SMC_CURRENT_BANK(); + SMC_SELECT_BANK(0); + old_carrier = netif_carrier_ok(dev) ? 1 : 0; + new_carrier = SMC_inw(ioaddr, EPH_STATUS_REG) & ES_LINK_OK ? 1 : 0; + + if (init || (old_carrier != new_carrier)) { + if (!new_carrier) { + netif_carrier_off(dev); + } else { + netif_carrier_on(dev); + } + if (netif_msg_link(lp)) + printk(KERN_INFO "%s: link %s\n", dev->name, + new_carrier ? "up" : "down"); + } + SMC_SELECT_BANK(old_bank); +} + +static void smc_eph_interrupt(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + unsigned int old_bank, ctl; + + smc_10bt_check_media(dev, 0); + + old_bank = SMC_CURRENT_BANK(); + SMC_SELECT_BANK(1); + + ctl = SMC_GET_CTL(); + SMC_SET_CTL(ctl & ~CTL_LE_ENABLE); + SMC_SET_CTL(ctl); + + SMC_SELECT_BANK(old_bank); +} + +/* + * This is the main routine of the driver, to handle the device when + * it needs some attention. + */ +static irqreturn_t smc_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + unsigned long ioaddr = dev->base_addr; + struct smc_local *lp = netdev_priv(dev); + int status, mask, timeout, card_stats; + int saved_bank, saved_pointer; + + DBG(3, "%s: %s\n", dev->name, __FUNCTION__); + + saved_bank = SMC_CURRENT_BANK(); + SMC_SELECT_BANK(2); + saved_pointer = SMC_GET_PTR(); + mask = SMC_GET_INT_MASK(); + SMC_SET_INT_MASK(0); + + /* set a timeout value, so I don't stay here forever */ + timeout = 8; + + do { + status = SMC_GET_INT(); + + DBG(2, "%s: IRQ 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n", + dev->name, status, mask, + ({ int meminfo; SMC_SELECT_BANK(0); + meminfo = SMC_GET_MIR(); + SMC_SELECT_BANK(2); meminfo; }), + SMC_GET_FIFO()); + + status &= mask; + if (!status) + break; + + spin_lock(&lp->lock); + + if (status & IM_RCV_INT) { + DBG(3, "%s: RX irq\n", dev->name); + smc_rcv(dev); + } else if (status & IM_TX_INT) { + DBG(3, "%s: TX int\n", dev->name); + smc_tx(dev); + SMC_ACK_INT(IM_TX_INT); +#if THROTTLE_TX_PKTS + netif_wake_queue(dev); +#endif + } else if (status & IM_ALLOC_INT) { + DBG(3, "%s: Allocation irq\n", dev->name); + smc_hardware_send_packet(dev); + mask |= (IM_TX_INT | IM_TX_EMPTY_INT); + mask &= ~IM_ALLOC_INT; +#if ! THROTTLE_TX_PKTS + netif_wake_queue(dev); +#endif + } else if (status & IM_TX_EMPTY_INT) { + DBG(3, "%s: TX empty\n", dev->name); + mask &= ~IM_TX_EMPTY_INT; + + /* update stats */ + SMC_SELECT_BANK(0); + card_stats = SMC_GET_COUNTER(); + SMC_SELECT_BANK(2); + + /* single collisions */ + lp->stats.collisions += card_stats & 0xF; + card_stats >>= 4; + + /* multiple collisions */ + lp->stats.collisions += card_stats & 0xF; + } else if (status & IM_RX_OVRN_INT) { + DBG(1, "%s: RX overrun\n", dev->name); + SMC_ACK_INT(IM_RX_OVRN_INT); + lp->stats.rx_errors++; + lp->stats.rx_fifo_errors++; + } else if (status & IM_EPH_INT) { + smc_eph_interrupt(dev); + } else if (status & IM_MDINT) { + SMC_ACK_INT(IM_MDINT); + smc_phy_interrupt(dev); + } else if (status & IM_ERCV_INT) { + SMC_ACK_INT(IM_ERCV_INT); + PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name); + } + + spin_unlock(&lp->lock); + } while (--timeout); + + /* restore register states */ + SMC_SET_INT_MASK(mask); + SMC_SET_PTR(saved_pointer); + SMC_SELECT_BANK(saved_bank); + + DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout); + + /* + * We return IRQ_HANDLED unconditionally here even if there was + * nothing to do. There is a possibility that a packet might + * get enqueued into the chip right after TX_EMPTY_INT is raised + * but just before the CPU acknowledges the IRQ. + * Better take an unneeded IRQ in some occasions than complexifying + * the code for all cases. + */ + return IRQ_HANDLED; +} + +/* Our watchdog timed out. Called by the networking layer */ +static void smc_timeout(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + + DBG(2, "%s: %s\n", dev->name, __FUNCTION__); + + smc_reset(dev); + smc_enable(dev); + +#if 0 + /* + * Reconfiguring the PHY doesn't seem like a bad idea here, but + * it introduced a problem. Now that this is a timeout routine, + * we are getting called from within an interrupt context. + * smc_phy_configure() calls msleep() which calls + * schedule_timeout() which calls schedule(). When schedule() + * is called from an interrupt context, it prints out + * "Scheduling in interrupt" and then calls BUG(). This is + * obviously not desirable. This was worked around by removing + * the call to smc_phy_configure() here because it didn't seem + * absolutely necessary. Ultimately, if msleep() is + * supposed to be usable from an interrupt context (which it + * looks like it thinks it should handle), it should be fixed. + */ + if (lp->phy_type != 0) + smc_phy_configure(dev); +#endif + + /* clear anything saved */ + if (lp->saved_skb != NULL) { + dev_kfree_skb (lp->saved_skb); + lp->saved_skb = NULL; + lp->stats.tx_errors++; + lp->stats.tx_aborted_errors++; + } + /* We can accept TX packets again */ + dev->trans_start = jiffies; + netif_wake_queue(dev); +} + +/* + * This sets the internal hardware table to filter out unwanted multicast + * packets before they take up memory. + * + * The SMC chip uses a hash table where the high 6 bits of the CRC of + * address are the offset into the table. If that bit is 1, then the + * multicast packet is accepted. Otherwise, it's dropped silently. + * + * To use the 6 bits as an offset into the table, the high 3 bits are the + * number of the 8 bit register, while the low 3 bits are the bit within + * that register. + * + * This routine is based very heavily on the one provided by Peter Cammaert. + */ +static void +smc_setmulticast(unsigned long ioaddr, int count, struct dev_mc_list *addrs) +{ + int i; + unsigned char multicast_table[8]; + struct dev_mc_list *cur_addr; + + /* table for flipping the order of 3 bits */ + static unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; + + /* start with a table of all zeros: reject all */ + memset(multicast_table, 0, sizeof(multicast_table)); + + cur_addr = addrs; + for (i = 0; i < count; i++, cur_addr = cur_addr->next) { + int position; + + /* do we have a pointer here? */ + if (!cur_addr) + break; + /* make sure this is a multicast address - shouldn't this + be a given if we have it here ? */ + if (!(*cur_addr->dmi_addr & 1)) + continue; + + /* only use the low order bits */ + position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f; + + /* do some messy swapping to put the bit in the right spot */ + multicast_table[invert3[position&7]] |= + (1<>3)&7]); + + } + /* now, the table can be loaded into the chipset */ + SMC_SELECT_BANK(3); + SMC_SET_MCAST(multicast_table); +} + +/* + * This routine will, depending on the values passed to it, + * either make it accept multicast packets, go into + * promiscuous mode (for TCPDUMP and cousins) or accept + * a select set of multicast packets + */ +static void smc_set_multicast_list(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + + DBG(2, "%s: %s\n", dev->name, __FUNCTION__); + + SMC_SELECT_BANK(0); + if (dev->flags & IFF_PROMISC) { + DBG(2, "%s: RCR_PRMS\n", dev->name); + lp->rcr_cur_mode |= RCR_PRMS; + SMC_SET_RCR(lp->rcr_cur_mode); + } + +/* BUG? I never disable promiscuous mode if multicasting was turned on. + Now, I turn off promiscuous mode, but I don't do anything to multicasting + when promiscuous mode is turned on. +*/ + + /* + * Here, I am setting this to accept all multicast packets. + * I don't need to zero the multicast table, because the flag is + * checked before the table is + */ + else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) { + lp->rcr_cur_mode |= RCR_ALMUL; + SMC_SET_RCR(lp->rcr_cur_mode); + DBG(2, "%s: RCR_ALMUL\n", dev->name); + } + + /* + * We just get all multicast packets even if we only want them + * from one source. This will be changed at some future point. + */ + else if (dev->mc_count) { + /* support hardware multicasting */ + + /* be sure I get rid of flags I might have set */ + lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); + SMC_SET_RCR(lp->rcr_cur_mode); + /* + * NOTE: this has to set the bank, so make sure it is the + * last thing called. The bank is set to zero at the top + */ + smc_setmulticast(ioaddr, dev->mc_count, dev->mc_list); + } else { + DBG(2, "%s: ~(RCR_PRMS|RCR_ALMUL)\n", dev->name); + lp->rcr_cur_mode &= ~(RCR_PRMS | RCR_ALMUL); + SMC_SET_RCR(lp->rcr_cur_mode); + + /* + * since I'm disabling all multicast entirely, I need to + * clear the multicast list + */ + SMC_SELECT_BANK(3); + SMC_CLEAR_MCAST(); + } +} + + +/* + * Open and Initialize the board + * + * Set up everything, reset the card, etc.. + */ +static int +smc_open(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + + DBG(2, "%s: %s\n", dev->name, __FUNCTION__); + + /* + * Check that the address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx + */ + if (!is_valid_ether_addr(dev->dev_addr)) { + DBG(2, (KERN_DEBUG "smc_open: no valid ethernet hw addr\n")); + return -EINVAL; + } + + /* clear out all the junk that was put here before... */ + lp->saved_skb = NULL; + + /* Setup the default Register Modes */ + lp->tcr_cur_mode = TCR_DEFAULT; + lp->rcr_cur_mode = RCR_DEFAULT; + lp->rpc_cur_mode = RPC_DEFAULT; + + /* + * If we are not using a MII interface, we need to + * monitor our own carrier signal to detect faults. + */ + if (lp->phy_type == 0) + lp->tcr_cur_mode |= TCR_MON_CSN; + + /* reset the hardware */ + smc_reset(dev); + smc_enable(dev); + + SMC_SELECT_BANK(1); + SMC_SET_MAC_ADDR(dev->dev_addr); + + /* Configure the PHY */ + if (lp->phy_type != 0) + smc_phy_configure(dev); + else { + spin_lock_irq(&lp->lock); + smc_10bt_check_media(dev, 1); + spin_unlock_irq(&lp->lock); + } + + /* + * make sure to initialize the link state with netif_carrier_off() + * somewhere, too --jgarzik + * + * smc_phy_configure() and smc_10bt_check_media() does that. --rmk + */ + netif_start_queue(dev); + return 0; +} + +/* + * smc_close + * + * this makes the board clean up everything that it can + * and not talk to the outside world. Caused by + * an 'ifconfig ethX down' + */ +static int smc_close(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + + DBG(2, "%s: %s\n", dev->name, __FUNCTION__); + + netif_stop_queue(dev); + netif_carrier_off(dev); + + /* clear everything */ + smc_shutdown(dev->base_addr); + + if (lp->phy_type != 0) + smc_phy_powerdown(dev, lp->mii.phy_id); + + return 0; +} + +/* + * Get the current statistics. + * This may be called with the card open or closed. + */ +static struct net_device_stats *smc_query_statistics(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + + DBG(2, "%s: %s\n", dev->name, __FUNCTION__); + + return &lp->stats; +} + +/* + * Ethtool support + */ +static int +smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smc_local *lp = netdev_priv(dev); + int ret; + + cmd->maxtxpkt = 1; + cmd->maxrxpkt = 1; + + if (lp->phy_type != 0) { + spin_lock_irq(&lp->lock); + ret = mii_ethtool_gset(&lp->mii, cmd); + spin_unlock_irq(&lp->lock); + } else { + cmd->supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_TP | SUPPORTED_AUI; + + if (lp->ctl_rspeed == 10) + cmd->speed = SPEED_10; + else if (lp->ctl_rspeed == 100) + cmd->speed = SPEED_100; + + cmd->autoneg = AUTONEG_DISABLE; + cmd->transceiver = XCVR_INTERNAL; + cmd->port = 0; + cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF; + + ret = 0; + } + + return ret; +} + +static int +smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct smc_local *lp = netdev_priv(dev); + int ret; + + if (lp->phy_type != 0) { + spin_lock_irq(&lp->lock); + ret = mii_ethtool_sset(&lp->mii, cmd); + spin_unlock_irq(&lp->lock); + } else { + if (cmd->autoneg != AUTONEG_DISABLE || + cmd->speed != SPEED_10 || + (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) || + (cmd->port != PORT_TP && cmd->port != PORT_AUI)) + return -EINVAL; + +// lp->port = cmd->port; + lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL; + +// if (netif_running(dev)) +// smc_set_port(dev); + + ret = 0; + } + + return ret; +} + +static void +smc_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strncpy(info->driver, CARDNAME, sizeof(info->driver)); + strncpy(info->version, version, sizeof(info->version)); + strncpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info)); +} + +static int smc_ethtool_nwayreset(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + int ret = -EINVAL; + + if (lp->phy_type != 0) { + spin_lock_irq(&lp->lock); + ret = mii_nway_restart(&lp->mii); + spin_unlock_irq(&lp->lock); + } + + return ret; +} + +static u32 smc_ethtool_getmsglevel(struct net_device *dev) +{ + struct smc_local *lp = netdev_priv(dev); + return lp->msg_enable; +} + +static void smc_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct smc_local *lp = netdev_priv(dev); + lp->msg_enable = level; +} + +static struct ethtool_ops smc_ethtool_ops = { + .get_settings = smc_ethtool_getsettings, + .set_settings = smc_ethtool_setsettings, + .get_drvinfo = smc_ethtool_getdrvinfo, + + .get_msglevel = smc_ethtool_getmsglevel, + .set_msglevel = smc_ethtool_setmsglevel, + .nway_reset = smc_ethtool_nwayreset, + .get_link = ethtool_op_get_link, +// .get_eeprom = smc_ethtool_geteeprom, +// .set_eeprom = smc_ethtool_seteeprom, +}; + +/* + * smc_findirq + * + * This routine has a simple purpose -- make the SMC chip generate an + * interrupt, so an auto-detect routine can detect it, and find the IRQ, + */ +/* + * does this still work? + * + * I just deleted auto_irq.c, since it was never built... + * --jgarzik + */ +static int __init smc_findirq(unsigned long ioaddr) +{ + int timeout = 20; + unsigned long cookie; + + DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); + + cookie = probe_irq_on(); + + /* + * What I try to do here is trigger an ALLOC_INT. This is done + * by allocating a small chunk of memory, which will give an interrupt + * when done. + */ + /* enable ALLOCation interrupts ONLY */ + SMC_SELECT_BANK(2); + SMC_SET_INT_MASK(IM_ALLOC_INT); + + /* + * Allocate 512 bytes of memory. Note that the chip was just + * reset so all the memory is available + */ + SMC_SET_MMU_CMD(MC_ALLOC | 1); + + /* + * Wait until positive that the interrupt has been generated + */ + do { + int int_status; + udelay(10); + int_status = SMC_GET_INT(); + if (int_status & IM_ALLOC_INT) + break; /* got the interrupt */ + } while (--timeout); + + /* + * there is really nothing that I can do here if timeout fails, + * as autoirq_report will return a 0 anyway, which is what I + * want in this case. Plus, the clean up is needed in both + * cases. + */ + + /* and disable all interrupts again */ + SMC_SET_INT_MASK(0); + + /* and return what I found */ + return probe_irq_off(cookie); +} + +/* + * Function: smc_probe(unsigned long ioaddr) + * + * Purpose: + * Tests to see if a given ioaddr points to an SMC91x chip. + * Returns a 0 on success + * + * Algorithm: + * (1) see if the high byte of BANK_SELECT is 0x33 + * (2) compare the ioaddr with the base register's address + * (3) see if I recognize the chip ID in the appropriate register + * + * Here I do typical initialization tasks. + * + * o Initialize the structure if needed + * o print out my vanity message if not done so already + * o print out what type of hardware is detected + * o print out the ethernet address + * o find the IRQ + * o set up my private data + * o configure the dev structure with my subroutines + * o actually GRAB the irq. + * o GRAB the region + */ +static int __init smc_probe(struct net_device *dev, unsigned long ioaddr) +{ + struct smc_local *lp = netdev_priv(dev); + static int version_printed = 0; + int i, retval; + unsigned int val, revision_register; + const char *version_string; + + DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); + + /* First, see if the high byte is 0x33 */ + val = SMC_CURRENT_BANK(); + DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val); + if ((val & 0xFF00) != 0x3300) { + if ((val & 0xFF) == 0x33) { + printk(KERN_WARNING + "%s: Detected possible byte-swapped interface" + " at IOADDR 0x%lx\n", CARDNAME, ioaddr); + } + retval = -ENODEV; + goto err_out; + } + + /* + * The above MIGHT indicate a device, but I need to write to + * further test this. + */ + SMC_SELECT_BANK(0); + val = SMC_CURRENT_BANK(); + if ((val & 0xFF00) != 0x3300) { + retval = -ENODEV; + goto err_out; + } + + /* + * well, we've already written once, so hopefully another + * time won't hurt. This time, I need to switch the bank + * register to bank 1, so I can access the base address + * register + */ + SMC_SELECT_BANK(1); + val = SMC_GET_BASE(); + val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT; + if ((ioaddr & ((PAGE_SIZE-1)<> 4) & 0xF]; + if (!version_string || (revision_register & 0xff00) != 0x3300) { + /* I don't recognize this chip, so... */ + printk("%s: IO 0x%lx: Unrecognized revision register 0x%04x" + ", Contact author.\n", CARDNAME, + ioaddr, revision_register); + + retval = -ENODEV; + goto err_out; + } + + /* At this point I'll assume that the chip is an SMC91x. */ + if (version_printed++ == 0) + printk("%s", version); + + /* fill in some of the fields */ + dev->base_addr = ioaddr; + lp->version = revision_register & 0xff; + + /* Get the MAC address */ + SMC_SELECT_BANK(1); + SMC_GET_MAC_ADDR(dev->dev_addr); + + /* now, reset the chip, and put it into a known state */ + smc_reset(dev); + + /* + * If dev->irq is 0, then the device has to be banged on to see + * what the IRQ is. + * + * This banging doesn't always detect the IRQ, for unknown reasons. + * a workaround is to reset the chip and try again. + * + * Interestingly, the DOS packet driver *SETS* the IRQ on the card to + * be what is requested on the command line. I don't do that, mostly + * because the card that I have uses a non-standard method of accessing + * the IRQs, and because this _should_ work in most configurations. + * + * Specifying an IRQ is done with the assumption that the user knows + * what (s)he is doing. No checking is done!!!! + */ + if (dev->irq < 1) { + int trials; + + trials = 3; + while (trials--) { + dev->irq = smc_findirq(ioaddr); + if (dev->irq) + break; + /* kick the card and try again */ + smc_reset(dev); + } + } + if (dev->irq == 0) { + printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n", + dev->name); + retval = -ENODEV; + goto err_out; + } + dev->irq = irq_canonicalize(dev->irq); + + /* Fill in the fields of the device structure with ethernet values. */ + ether_setup(dev); + + dev->open = smc_open; + dev->stop = smc_close; + dev->hard_start_xmit = smc_hard_start_xmit; + dev->tx_timeout = smc_timeout; + dev->watchdog_timeo = msecs_to_jiffies(watchdog); + dev->get_stats = smc_query_statistics; + dev->set_multicast_list = smc_set_multicast_list; + dev->ethtool_ops = &smc_ethtool_ops; + + spin_lock_init(&lp->lock); + lp->mii.phy_id_mask = 0x1f; + lp->mii.reg_num_mask = 0x1f; + lp->mii.force_media = 0; + lp->mii.full_duplex = 0; + lp->mii.dev = dev; + lp->mii.mdio_read = smc_phy_read; + lp->mii.mdio_write = smc_phy_write; + + /* + * Locate the phy, if any. + */ + if (lp->version >= (CHIP_91100 << 4)) + smc_detect_phy(dev); + + /* Set default parameters */ + lp->msg_enable = NETIF_MSG_LINK; + lp->ctl_rfduplx = 0; + lp->ctl_rspeed = 10; + + if (lp->version >= (CHIP_91100 << 4)) { + lp->ctl_rfduplx = 1; + lp->ctl_rspeed = 100; + } + + /* Grab the IRQ */ + retval = request_irq(dev->irq, &smc_interrupt, 0, dev->name, dev); + if (retval) + goto err_out; + + set_irq_type(dev->irq, IRQT_RISING); +#ifdef SMC_USE_PXA_DMA + { + int dma = pxa_request_dma(dev->name, DMA_PRIO_LOW, + smc_pxa_dma_irq, NULL); + if (dma >= 0) + dev->dma = dma; + } +#endif + + retval = register_netdev(dev); + if (retval == 0) { + /* now, print out the card info, in a short format.. */ + printk("%s: %s (rev %d) at %#lx IRQ %d", + dev->name, version_string, revision_register & 0x0f, + dev->base_addr, dev->irq); + + if (dev->dma != (unsigned char)-1) + printk(" DMA %d", dev->dma); + + printk("%s%s\n", nowait ? " [nowait]" : "", + THROTTLE_TX_PKTS ? " [throttle_tx]" : ""); + + if (!is_valid_ether_addr(dev->dev_addr)) { + printk("%s: Invalid ethernet MAC address. Please " + "set using ifconfig\n", dev->name); + } else { + /* Print the Ethernet address */ + printk("%s: Ethernet addr: ", dev->name); + for (i = 0; i < 5; i++) + printk("%2.2x:", dev->dev_addr[i]); + printk("%2.2x\n", dev->dev_addr[5]); + } + + if (lp->phy_type == 0) { + PRINTK("%s: No PHY found\n", dev->name); + } else if ((lp->phy_type & 0xfffffff0) == 0x0016f840) { + PRINTK("%s: PHY LAN83C183 (LAN91C111 Internal)\n", dev->name); + } else if ((lp->phy_type & 0xfffffff0) == 0x02821c50) { + PRINTK("%s: PHY LAN83C180\n", dev->name); + } + } + +err_out: +#ifdef SMC_USE_PXA_DMA + if (retval && dev->dma != (unsigned char)-1) + pxa_free_dma(dev->dma); +#endif + return retval; +} + +static int smc_enable_device(unsigned long attrib_phys) +{ + unsigned long flags; + unsigned char ecor, ecsr; + void *addr; + + /* + * Map the attribute space. This is overkill, but clean. + */ + addr = ioremap(attrib_phys, ATTRIB_SIZE); + if (!addr) + return -ENOMEM; + + /* + * Reset the device. We must disable IRQs around this + * since a reset causes the IRQ line become active. + */ + local_irq_save(flags); + ecor = readb(addr + (ECOR << SMC_IO_SHIFT)) & ~ECOR_RESET; + writeb(ecor | ECOR_RESET, addr + (ECOR << SMC_IO_SHIFT)); + readb(addr + (ECOR << SMC_IO_SHIFT)); + + /* + * Wait 100us for the chip to reset. + */ + udelay(100); + + /* + * The device will ignore all writes to the enable bit while + * reset is asserted, even if the reset bit is cleared in the + * same write. Must clear reset first, then enable the device. + */ + writeb(ecor, addr + (ECOR << SMC_IO_SHIFT)); + writeb(ecor | ECOR_ENABLE, addr + (ECOR << SMC_IO_SHIFT)); + + /* + * Set the appropriate byte/word mode. + */ + ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; +#ifndef SMC_CAN_USE_16BIT + ecsr |= ECSR_IOIS8; +#endif + writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); + local_irq_restore(flags); + + iounmap(addr); + + /* + * Wait for the chip to wake up. We could poll the control + * register in the main register space, but that isn't mapped + * yet. We know this is going to take 750us. + */ + msleep(1); + + return 0; +} + +/* + * smc_init(void) + * Input parameters: + * dev->base_addr == 0, try to find all possible locations + * dev->base_addr > 0x1ff, this is the address to check + * dev->base_addr == , return failure code + * + * Output: + * 0 --> there is a device + * anything else, error + */ +static int smc_drv_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev; + struct resource *res, *ext = NULL; + unsigned int *addr; + int ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENODEV; + goto out; + } + + /* + * Request the regions. + */ + if (!request_mem_region(res->start, SMC_IO_EXTENT, "smc91x")) { + ret = -EBUSY; + goto out; + } + + ndev = alloc_etherdev(sizeof(struct smc_local)); + if (!ndev) { + printk("%s: could not allocate device.\n", CARDNAME); + ret = -ENOMEM; + goto release_1; + } + SET_MODULE_OWNER(ndev); + SET_NETDEV_DEV(ndev, dev); + + ndev->dma = (unsigned char)-1; + ndev->irq = platform_get_irq(pdev, 0); + + ext = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (ext) { + if (!request_mem_region(ext->start, ATTRIB_SIZE, ndev->name)) { + ret = -EBUSY; + goto release_1; + } + +#if defined(CONFIG_SA1100_ASSABET) + NCR_0 |= NCR_ENET_OSC_EN; +#endif + + ret = smc_enable_device(ext->start); + if (ret) + goto release_both; + } + + addr = ioremap(res->start, SMC_IO_EXTENT); + if (!addr) { + ret = -ENOMEM; + goto release_both; + } + + dev_set_drvdata(dev, ndev); + ret = smc_probe(ndev, (unsigned long)addr); + if (ret != 0) { + dev_set_drvdata(dev, NULL); + iounmap(addr); + release_both: + if (ext) + release_mem_region(ext->start, ATTRIB_SIZE); + free_netdev(ndev); + release_1: + release_mem_region(res->start, SMC_IO_EXTENT); + out: + printk("%s: not found (%d).\n", CARDNAME, ret); + } +#ifdef SMC_USE_PXA_DMA + else { + struct smc_local *lp = netdev_priv(ndev); + lp->physaddr = res->start; + } +#endif + + return ret; +} + +static int smc_drv_remove(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = dev_get_drvdata(dev); + struct resource *res; + + dev_set_drvdata(dev, NULL); + + unregister_netdev(ndev); + + free_irq(ndev->irq, ndev); + +#ifdef SMC_USE_PXA_DMA + if (ndev->dma != (unsigned char)-1) + pxa_free_dma(ndev->dma); +#endif + iounmap((void *)ndev->base_addr); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) + release_mem_region(res->start, ATTRIB_SIZE); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, SMC_IO_EXTENT); + + free_netdev(ndev); + + return 0; +} + +static int smc_drv_suspend(struct device *dev, u32 state, u32 level) +{ + struct net_device *ndev = dev_get_drvdata(dev); + + if (ndev && level == SUSPEND_DISABLE) { + if (netif_running(ndev)) { + netif_device_detach(ndev); + smc_shutdown(ndev->base_addr); + } + } + return 0; +} + +static int smc_drv_resume(struct device *dev, u32 level) +{ + struct platform_device *pdev = to_platform_device(dev); + struct net_device *ndev = dev_get_drvdata(dev); + + if (ndev && level == RESUME_ENABLE) { + struct smc_local *lp = netdev_priv(ndev); + unsigned long ioaddr = ndev->base_addr; + + if (pdev->num_resources == 3) + smc_enable_device(pdev->resource[2].start); + if (netif_running(ndev)) { + smc_reset(ndev); + smc_enable(ndev); + SMC_SELECT_BANK(1); + SMC_SET_MAC_ADDR(ndev->dev_addr); + if (lp->phy_type != 0) + smc_phy_configure(ndev); + netif_device_attach(ndev); + } + } + return 0; +} + +static struct device_driver smc_driver = { + .name = CARDNAME, + .bus = &platform_bus_type, + .probe = smc_drv_probe, + .remove = smc_drv_remove, + .suspend = smc_drv_suspend, + .resume = smc_drv_resume, +}; + +static int __init smc_init(void) +{ +#ifdef MODULE + if (io == -1) + printk(KERN_WARNING + "%s: You shouldn't use auto-probing with insmod!\n", + CARDNAME); +#endif + + return driver_register(&smc_driver); +} + +static void __exit smc_cleanup(void) +{ + driver_unregister(&smc_driver); +} + +module_init(smc_init); +module_exit(smc_cleanup); diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h new file mode 100644 index 000000000..7679022c0 --- /dev/null +++ b/drivers/net/smc91x.h @@ -0,0 +1,866 @@ +/*------------------------------------------------------------------------ + . smc91x.h - macros for SMSC's 91C9x/91C1xx single-chip Ethernet device. + . + . Copyright (C) 1996 by Erik Stahlman + . Copyright (C) 2001 Standard Microsystems Corporation + . Developed by Simple Network Magic Corporation + . Copyright (C) 2003 Monta Vista Software, Inc. + . Unified SMC91x driver by Nicolas Pitre + . + . This program is free software; you can redistribute it and/or modify + . it under the terms of the GNU General Public License as published by + . the Free Software Foundation; either version 2 of the License, or + . (at your option) any later version. + . + . This program is distributed in the hope that it will be useful, + . but WITHOUT ANY WARRANTY; without even the implied warranty of + . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + . GNU General Public License for more details. + . + . You should have received a copy of the GNU General Public License + . along with this program; if not, write to the Free Software + . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + . + . Information contained in this file was obtained from the LAN91C111 + . manual from SMC. To get a copy, if you really want one, you can find + . information under www.smsc.com. + . + . Authors + . Erik Stahlman + . Daris A Nevil + . Nicolas Pitre + . + ---------------------------------------------------------------------------*/ +#ifndef _SMC91X_H_ +#define _SMC91X_H_ + + +/* + * Define your architecture specific bus configuration parameters here. + */ + +#if defined(CONFIG_SA1100_GRAPHICSCLIENT) || \ + defined(CONFIG_SA1100_PFS168) || \ + defined(CONFIG_SA1100_FLEXANET) || \ + defined(CONFIG_SA1100_GRAPHICSMASTER) || \ + defined(CONFIG_ARCH_LUBBOCK) + +/* We can only do 16-bit reads and writes in the static memory space. */ +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 +#define SMC_NOWAIT 1 + +/* The first two address lines aren't connected... */ +#define SMC_IO_SHIFT 2 + +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) + +#elif defined(CONFIG_REDWOOD_5) || defined(CONFIG_REDWOOD_6) + +/* We can only do 16-bit reads and writes in the static memory space. */ +#define SMC_CAN_USE_8BIT 0 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 +#define SMC_NOWAIT 1 + +#define SMC_IO_SHIFT 0 + +#define SMC_inw(a, r) in_be16((volatile u16 *)((a) + (r))) +#define SMC_outw(v, a, r) out_be16((volatile u16 *)((a) + (r)), v) +#define SMC_insw(a, r, p, l) \ + do { \ + unsigned long __port = (a) + (r); \ + u16 *__p = (u16 *)(p); \ + int __l = (l); \ + insw(__port, __p, __l); \ + while (__l > 0) { \ + *__p = swab16(*__p); \ + __p++; \ + __l--; \ + } \ + } while (0) +#define SMC_outsw(a, r, p, l) \ + do { \ + unsigned long __port = (a) + (r); \ + u16 *__p = (u16 *)(p); \ + int __l = (l); \ + while (__l > 0) { \ + /* Believe it or not, the swab isn't needed. */ \ + outw( /* swab16 */ (*__p++), __port); \ + __l--; \ + } \ + } while (0) +#define set_irq_type(irq, type) + +#elif defined(CONFIG_SA1100_ASSABET) + +#include + +/* We can only do 8-bit reads and writes in the static memory space. */ +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 0 +#define SMC_CAN_USE_32BIT 0 +#define SMC_NOWAIT 1 + +/* The first two address lines aren't connected... */ +#define SMC_IO_SHIFT 2 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) +#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) + +#elif defined(CONFIG_ARCH_INNOKOM) || \ + defined(CONFIG_MACH_MAINSTONE) || \ + defined(CONFIG_ARCH_PXA_IDP) || \ + defined(CONFIG_ARCH_RAMSES) + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_IO_SHIFT 0 +#define SMC_NOWAIT 1 +#define SMC_USE_PXA_DMA 1 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) + +/* We actually can't write halfwords properly if not word aligned */ +static inline void +SMC_outw(u16 val, unsigned long ioaddr, int reg) +{ + if (reg & 2) { + unsigned int v = val << 16; + v |= readl(ioaddr + (reg & ~2)) & 0xffff; + writel(v, ioaddr + (reg & ~2)); + } else { + writew(val, ioaddr + reg); + } +} + +#elif defined(CONFIG_ISA) + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 0 + +#define SMC_inb(a, r) inb((a) + (r)) +#define SMC_inw(a, r) inw((a) + (r)) +#define SMC_outb(v, a, r) outb(v, (a) + (r)) +#define SMC_outw(v, a, r) outw(v, (a) + (r)) +#define SMC_insw(a, r, p, l) insw((a) + (r), p, l) +#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l) + +#else + +#define SMC_CAN_USE_8BIT 1 +#define SMC_CAN_USE_16BIT 1 +#define SMC_CAN_USE_32BIT 1 +#define SMC_NOWAIT 1 + +#define SMC_inb(a, r) readb((a) + (r)) +#define SMC_inw(a, r) readw((a) + (r)) +#define SMC_inl(a, r) readl((a) + (r)) +#define SMC_outb(v, a, r) writeb(v, (a) + (r)) +#define SMC_outw(v, a, r) writew(v, (a) + (r)) +#define SMC_outl(v, a, r) writel(v, (a) + (r)) +#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) +#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) + +#define RPC_LSA_DEFAULT RPC_LED_100_10 +#define RPC_LSB_DEFAULT RPC_LED_TX_RX + +#endif + + +#ifdef SMC_USE_PXA_DMA +/* + * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is + * always happening in irq context so no need to worry about races. TX is + * different and probably not worth it for that reason, and not as critical + * as RX which can overrun memory and lose packets. + */ +#include +#include + +#ifdef SMC_insl +#undef SMC_insl +#define SMC_insl(a, r, p, l) \ + smc_pxa_dma_insl(a, lp->physaddr, r, dev->dma, p, l) +static inline void +smc_pxa_dma_insl(u_long ioaddr, u_long physaddr, int reg, int dma, + u_char *buf, int len) +{ + dma_addr_t dmabuf; + + /* fallback if no DMA available */ + if (dma == (unsigned char)-1) { + readsl(ioaddr + reg, buf, len); + return; + } + + /* 64 bit alignment is required for memory to memory DMA */ + if ((long)buf & 4) { + *((u32 *)buf)++ = SMC_inl(ioaddr, reg); + len--; + } + + len *= 4; + dmabuf = dma_map_single(NULL, buf, len, PCI_DMA_FROMDEVICE); + DCSR(dma) = DCSR_NODESC; + DTADR(dma) = dmabuf; + DSADR(dma) = physaddr + reg; + DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | + DCMD_WIDTH4 | (DCMD_LENGTH & len)); + DCSR(dma) = DCSR_NODESC | DCSR_RUN; + while (!(DCSR(dma) & DCSR_STOPSTATE)); + DCSR(dma) = 0; + dma_unmap_single(NULL, dmabuf, len, PCI_DMA_FROMDEVICE); +} +#endif + +#ifdef SMC_insw +#undef SMC_insw +#define SMC_insw(a, r, p, l) \ + smc_pxa_dma_insw(a, lp->physaddr, r, dev->dma, p, l) +static inline void +smc_pxa_dma_insw(u_long ioaddr, u_long physaddr, int reg, int dma, + u_char *buf, int len) +{ + dma_addr_t dmabuf; + + /* fallback if no DMA available */ + if (dma == (unsigned char)-1) { + readsw(ioaddr + reg, buf, len); + return; + } + + /* 64 bit alignment is required for memory to memory DMA */ + while ((long)buf & 6) { + *((u16 *)buf)++ = SMC_inw(ioaddr, reg); + len--; + } + + len *= 2; + dmabuf = dma_map_single(NULL, buf, len, PCI_DMA_FROMDEVICE); + DCSR(dma) = DCSR_NODESC; + DTADR(dma) = dmabuf; + DSADR(dma) = physaddr + reg; + DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | + DCMD_WIDTH2 | (DCMD_LENGTH & len)); + DCSR(dma) = DCSR_NODESC | DCSR_RUN; + while (!(DCSR(dma) & DCSR_STOPSTATE)); + DCSR(dma) = 0; + dma_unmap_single(NULL, dmabuf, len, PCI_DMA_FROMDEVICE); +} +#endif + +static void +smc_pxa_dma_irq(int dma, void *dummy, struct pt_regs *regs) +{ + DCSR(dma) = 0; +} +#endif /* SMC_USE_PXA_DMA */ + + +/* Because of bank switching, the LAN91x uses only 16 I/O ports */ +#ifndef SMC_IO_SHIFT +#define SMC_IO_SHIFT 0 +#endif +#define SMC_IO_EXTENT (16 << SMC_IO_SHIFT) + + +/* + . Bank Select Register: + . + . yyyy yyyy 0000 00xx + . xx = bank number + . yyyy yyyy = 0x33, for identification purposes. +*/ +#define BANK_SELECT (14 << SMC_IO_SHIFT) + + +// Transmit Control Register +/* BANK 0 */ +#define TCR_REG SMC_REG(0x0000, 0) +#define TCR_ENABLE 0x0001 // When 1 we can transmit +#define TCR_LOOP 0x0002 // Controls output pin LBK +#define TCR_FORCOL 0x0004 // When 1 will force a collision +#define TCR_PAD_EN 0x0080 // When 1 will pad tx frames < 64 bytes w/0 +#define TCR_NOCRC 0x0100 // When 1 will not append CRC to tx frames +#define TCR_MON_CSN 0x0400 // When 1 tx monitors carrier +#define TCR_FDUPLX 0x0800 // When 1 enables full duplex operation +#define TCR_STP_SQET 0x1000 // When 1 stops tx if Signal Quality Error +#define TCR_EPH_LOOP 0x2000 // When 1 enables EPH block loopback +#define TCR_SWFDUP 0x8000 // When 1 enables Switched Full Duplex mode + +#define TCR_CLEAR 0 /* do NOTHING */ +/* the default settings for the TCR register : */ +#define TCR_DEFAULT (TCR_ENABLE | TCR_PAD_EN) + + +// EPH Status Register +/* BANK 0 */ +#define EPH_STATUS_REG SMC_REG(0x0002, 0) +#define ES_TX_SUC 0x0001 // Last TX was successful +#define ES_SNGL_COL 0x0002 // Single collision detected for last tx +#define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx +#define ES_LTX_MULT 0x0008 // Last tx was a multicast +#define ES_16COL 0x0010 // 16 Collisions Reached +#define ES_SQET 0x0020 // Signal Quality Error Test +#define ES_LTXBRD 0x0040 // Last tx was a broadcast +#define ES_TXDEFR 0x0080 // Transmit Deferred +#define ES_LATCOL 0x0200 // Late collision detected on last tx +#define ES_LOSTCARR 0x0400 // Lost Carrier Sense +#define ES_EXC_DEF 0x0800 // Excessive Deferral +#define ES_CTR_ROL 0x1000 // Counter Roll Over indication +#define ES_LINK_OK 0x4000 // Driven by inverted value of nLNK pin +#define ES_TXUNRN 0x8000 // Tx Underrun + + +// Receive Control Register +/* BANK 0 */ +#define RCR_REG SMC_REG(0x0004, 0) +#define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted +#define RCR_PRMS 0x0002 // Enable promiscuous mode +#define RCR_ALMUL 0x0004 // When set accepts all multicast frames +#define RCR_RXEN 0x0100 // IFF this is set, we can receive packets +#define RCR_STRIP_CRC 0x0200 // When set strips CRC from rx packets +#define RCR_ABORT_ENB 0x0200 // When set will abort rx on collision +#define RCR_FILT_CAR 0x0400 // When set filters leading 12 bit s of carrier +#define RCR_SOFTRST 0x8000 // resets the chip + +/* the normal settings for the RCR register : */ +#define RCR_DEFAULT (RCR_STRIP_CRC | RCR_RXEN) +#define RCR_CLEAR 0x0 // set it to a base state + + +// Counter Register +/* BANK 0 */ +#define COUNTER_REG SMC_REG(0x0006, 0) + + +// Memory Information Register +/* BANK 0 */ +#define MIR_REG SMC_REG(0x0008, 0) + + +// Receive/Phy Control Register +/* BANK 0 */ +#define RPC_REG SMC_REG(0x000A, 0) +#define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode. +#define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode +#define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode +#define RPC_LSXA_SHFT 5 // Bits to shift LS2A,LS1A,LS0A to lsb +#define RPC_LSXB_SHFT 2 // Bits to get LS2B,LS1B,LS0B to lsb +#define RPC_LED_100_10 (0x00) // LED = 100Mbps OR's with 10Mbps link detect +#define RPC_LED_RES (0x01) // LED = Reserved +#define RPC_LED_10 (0x02) // LED = 10Mbps link detect +#define RPC_LED_FD (0x03) // LED = Full Duplex Mode +#define RPC_LED_TX_RX (0x04) // LED = TX or RX packet occurred +#define RPC_LED_100 (0x05) // LED = 100Mbps link dectect +#define RPC_LED_TX (0x06) // LED = TX packet occurred +#define RPC_LED_RX (0x07) // LED = RX packet occurred + +#ifndef RPC_LSA_DEFAULT +#define RPC_LSA_DEFAULT RPC_LED_100 +#endif +#ifndef RPC_LSB_DEFAULT +#define RPC_LSB_DEFAULT RPC_LED_FD +#endif + +#define RPC_DEFAULT (RPC_ANEG | (RPC_LSA_DEFAULT << RPC_LSXA_SHFT) | (RPC_LSB_DEFAULT << RPC_LSXB_SHFT) | RPC_SPEED | RPC_DPLX) + + +/* Bank 0 0x0C is reserved */ + +// Bank Select Register +/* All Banks */ +#define BSR_REG 0x000E + + +// Configuration Reg +/* BANK 1 */ +#define CONFIG_REG SMC_REG(0x0000, 1) +#define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy +#define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL +#define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus +#define CONFIG_EPH_POWER_EN 0x8000 // When 0 EPH is placed into low power mode. + +// Default is powered-up, Internal Phy, Wait States, and pin nCNTRL=low +#define CONFIG_DEFAULT (CONFIG_EPH_POWER_EN) + + +// Base Address Register +/* BANK 1 */ +#define BASE_REG SMC_REG(0x0002, 1) + + +// Individual Address Registers +/* BANK 1 */ +#define ADDR0_REG SMC_REG(0x0004, 1) +#define ADDR1_REG SMC_REG(0x0006, 1) +#define ADDR2_REG SMC_REG(0x0008, 1) + + +// General Purpose Register +/* BANK 1 */ +#define GP_REG SMC_REG(0x000A, 1) + + +// Control Register +/* BANK 1 */ +#define CTL_REG SMC_REG(0x000C, 1) +#define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received +#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically +#define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt +#define CTL_CR_ENABLE 0x0040 // When 1 enables Counter Rollover interrupt +#define CTL_TE_ENABLE 0x0020 // When 1 enables Transmit Error interrupt +#define CTL_EEPROM_SELECT 0x0004 // Controls EEPROM reload & store +#define CTL_RELOAD 0x0002 // When set reads EEPROM into registers +#define CTL_STORE 0x0001 // When set stores registers into EEPROM + + +// MMU Command Register +/* BANK 2 */ +#define MMU_CMD_REG SMC_REG(0x0000, 2) +#define MC_BUSY 1 // When 1 the last release has not completed +#define MC_NOP (0<<5) // No Op +#define MC_ALLOC (1<<5) // OR with number of 256 byte packets +#define MC_RESET (2<<5) // Reset MMU to initial state +#define MC_REMOVE (3<<5) // Remove the current rx packet +#define MC_RELEASE (4<<5) // Remove and release the current rx packet +#define MC_FREEPKT (5<<5) // Release packet in PNR register +#define MC_ENQUEUE (6<<5) // Enqueue the packet for transmit +#define MC_RSTTXFIFO (7<<5) // Reset the TX FIFOs + + +// Packet Number Register +/* BANK 2 */ +#define PN_REG SMC_REG(0x0002, 2) + + +// Allocation Result Register +/* BANK 2 */ +#define AR_REG SMC_REG(0x0003, 2) +#define AR_FAILED 0x80 // Alocation Failed + + +// TX FIFO Ports Register +/* BANK 2 */ +#define TXFIFO_REG SMC_REG(0x0004, 2) +#define TXFIFO_TEMPTY 0x80 // TX FIFO Empty + +// RX FIFO Ports Register +/* BANK 2 */ +#define RXFIFO_REG SMC_REG(0x0005, 2) +#define RXFIFO_REMPTY 0x80 // RX FIFO Empty + +#define FIFO_REG SMC_REG(0x0004, 2) + +// Pointer Register +/* BANK 2 */ +#define PTR_REG SMC_REG(0x0006, 2) +#define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area +#define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access +#define PTR_READ 0x2000 // When 1 the operation is a read + + +// Data Register +/* BANK 2 */ +#define DATA_REG SMC_REG(0x0008, 2) + + +// Interrupt Status/Acknowledge Register +/* BANK 2 */ +#define INT_REG SMC_REG(0x000C, 2) + + +// Interrupt Mask Register +/* BANK 2 */ +#define IM_REG SMC_REG(0x000D, 2) +#define IM_MDINT 0x80 // PHY MI Register 18 Interrupt +#define IM_ERCV_INT 0x40 // Early Receive Interrupt +#define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section +#define IM_RX_OVRN_INT 0x10 // Set by Receiver Overruns +#define IM_ALLOC_INT 0x08 // Set when allocation request is completed +#define IM_TX_EMPTY_INT 0x04 // Set if the TX FIFO goes empty +#define IM_TX_INT 0x02 // Transmit Interrupt +#define IM_RCV_INT 0x01 // Receive Interrupt + + +// Multicast Table Registers +/* BANK 3 */ +#define MCAST_REG1 SMC_REG(0x0000, 3) +#define MCAST_REG2 SMC_REG(0x0002, 3) +#define MCAST_REG3 SMC_REG(0x0004, 3) +#define MCAST_REG4 SMC_REG(0x0006, 3) + + +// Management Interface Register (MII) +/* BANK 3 */ +#define MII_REG SMC_REG(0x0008, 3) +#define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup +#define MII_MDOE 0x0008 // MII Output Enable +#define MII_MCLK 0x0004 // MII Clock, pin MDCLK +#define MII_MDI 0x0002 // MII Input, pin MDI +#define MII_MDO 0x0001 // MII Output, pin MDO + + +// Revision Register +/* BANK 3 */ +/* ( hi: chip id low: rev # ) */ +#define REV_REG SMC_REG(0x000A, 3) + + +// Early RCV Register +/* BANK 3 */ +/* this is NOT on SMC9192 */ +#define ERCV_REG SMC_REG(0x000C, 3) +#define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received +#define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask + + +// External Register +/* BANK 7 */ +#define EXT_REG SMC_REG(0x0000, 7) + + +#define CHIP_9192 3 +#define CHIP_9194 4 +#define CHIP_9195 5 +#define CHIP_9196 6 +#define CHIP_91100 7 +#define CHIP_91100FD 8 +#define CHIP_91111FD 9 + +static const char * chip_ids[ 16 ] = { + NULL, NULL, NULL, + /* 3 */ "SMC91C90/91C92", + /* 4 */ "SMC91C94", + /* 5 */ "SMC91C95", + /* 6 */ "SMC91C96", + /* 7 */ "SMC91C100", + /* 8 */ "SMC91C100FD", + /* 9 */ "SMC91C11xFD", + NULL, NULL, NULL, + NULL, NULL, NULL}; + + +/* + . Transmit status bits +*/ +#define TS_SUCCESS 0x0001 +#define TS_LOSTCAR 0x0400 +#define TS_LATCOL 0x0200 +#define TS_16COL 0x0010 + +/* + . Receive status bits +*/ +#define RS_ALGNERR 0x8000 +#define RS_BRODCAST 0x4000 +#define RS_BADCRC 0x2000 +#define RS_ODDFRAME 0x1000 +#define RS_TOOLONG 0x0800 +#define RS_TOOSHORT 0x0400 +#define RS_MULTICAST 0x0001 +#define RS_ERRORS (RS_ALGNERR | RS_BADCRC | RS_TOOLONG | RS_TOOSHORT) + + +/* + * PHY IDs + * LAN83C183 == LAN91C111 Internal PHY + */ +#define PHY_LAN83C183 0x0016f840 +#define PHY_LAN83C180 0x02821c50 + +/* + * PHY Register Addresses (LAN91C111 Internal PHY) + * + * Generic PHY registers can be found in + * + * These phy registers are specific to our on-board phy. + */ + +// PHY Configuration Register 1 +#define PHY_CFG1_REG 0x10 +#define PHY_CFG1_LNKDIS 0x8000 // 1=Rx Link Detect Function disabled +#define PHY_CFG1_XMTDIS 0x4000 // 1=TP Transmitter Disabled +#define PHY_CFG1_XMTPDN 0x2000 // 1=TP Transmitter Powered Down +#define PHY_CFG1_BYPSCR 0x0400 // 1=Bypass scrambler/descrambler +#define PHY_CFG1_UNSCDS 0x0200 // 1=Unscramble Idle Reception Disable +#define PHY_CFG1_EQLZR 0x0100 // 1=Rx Equalizer Disabled +#define PHY_CFG1_CABLE 0x0080 // 1=STP(150ohm), 0=UTP(100ohm) +#define PHY_CFG1_RLVL0 0x0040 // 1=Rx Squelch level reduced by 4.5db +#define PHY_CFG1_TLVL_SHIFT 2 // Transmit Output Level Adjust +#define PHY_CFG1_TLVL_MASK 0x003C +#define PHY_CFG1_TRF_MASK 0x0003 // Transmitter Rise/Fall time + + +// PHY Configuration Register 2 +#define PHY_CFG2_REG 0x11 +#define PHY_CFG2_APOLDIS 0x0020 // 1=Auto Polarity Correction disabled +#define PHY_CFG2_JABDIS 0x0010 // 1=Jabber disabled +#define PHY_CFG2_MREG 0x0008 // 1=Multiple register access (MII mgt) +#define PHY_CFG2_INTMDIO 0x0004 // 1=Interrupt signaled with MDIO pulseo + +// PHY Status Output (and Interrupt status) Register +#define PHY_INT_REG 0x12 // Status Output (Interrupt Status) +#define PHY_INT_INT 0x8000 // 1=bits have changed since last read +#define PHY_INT_LNKFAIL 0x4000 // 1=Link Not detected +#define PHY_INT_LOSSSYNC 0x2000 // 1=Descrambler has lost sync +#define PHY_INT_CWRD 0x1000 // 1=Invalid 4B5B code detected on rx +#define PHY_INT_SSD 0x0800 // 1=No Start Of Stream detected on rx +#define PHY_INT_ESD 0x0400 // 1=No End Of Stream detected on rx +#define PHY_INT_RPOL 0x0200 // 1=Reverse Polarity detected +#define PHY_INT_JAB 0x0100 // 1=Jabber detected +#define PHY_INT_SPDDET 0x0080 // 1=100Base-TX mode, 0=10Base-T mode +#define PHY_INT_DPLXDET 0x0040 // 1=Device in Full Duplex + +// PHY Interrupt/Status Mask Register +#define PHY_MASK_REG 0x13 // Interrupt Mask +// Uses the same bit definitions as PHY_INT_REG + + +/* + * SMC91C96 ethernet config and status registers. + * These are in the "attribute" space. + */ +#define ECOR 0x8000 +#define ECOR_RESET 0x80 +#define ECOR_LEVEL_IRQ 0x40 +#define ECOR_WR_ATTRIB 0x04 +#define ECOR_ENABLE 0x01 + +#define ECSR 0x8002 +#define ECSR_IOIS8 0x20 +#define ECSR_PWRDWN 0x04 +#define ECSR_INT 0x02 + +#define ATTRIB_SIZE ((64*1024) << SMC_IO_SHIFT) + + +/* + * Macros to abstract register access according to the data bus + * capabilities. Please use those and not the in/out primitives. + * Note: the following macros do *not* select the bank -- this must + * be done separately as needed in the main code. The SMC_REG() macro + * only uses the bank argument for debugging purposes (when enabled). + */ + +#if SMC_DEBUG > 0 +#define SMC_REG(reg, bank) \ + ({ \ + int __b = SMC_CURRENT_BANK(); \ + if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \ + printk( "%s: bank reg screwed (0x%04x)\n", \ + CARDNAME, __b ); \ + BUG(); \ + } \ + reg<> 8) +#define SMC_GET_TXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) & 0xFF) +#define SMC_GET_RXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) >> 8) +#define SMC_GET_INT() (SMC_inw( ioaddr, INT_REG ) & 0xFF) +#define SMC_ACK_INT(x) \ + do { \ + unsigned long __flags; \ + int __mask; \ + local_irq_save(__flags); \ + __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \ + SMC_outw( __mask | (x), ioaddr, INT_REG ); \ + local_irq_restore(__flags); \ + } while (0) +#define SMC_GET_INT_MASK() (SMC_inw( ioaddr, INT_REG ) >> 8) +#define SMC_SET_INT_MASK(x) SMC_outw( (x) << 8, ioaddr, INT_REG ) +#endif + +#define SMC_CURRENT_BANK() SMC_inw( ioaddr, BANK_SELECT ) +#define SMC_SELECT_BANK(x) SMC_outw( x, ioaddr, BANK_SELECT ) +#define SMC_GET_BASE() SMC_inw( ioaddr, BASE_REG ) +#define SMC_SET_BASE(x) SMC_outw( x, ioaddr, BASE_REG ) +#define SMC_GET_CONFIG() SMC_inw( ioaddr, CONFIG_REG ) +#define SMC_SET_CONFIG(x) SMC_outw( x, ioaddr, CONFIG_REG ) +#define SMC_GET_COUNTER() SMC_inw( ioaddr, COUNTER_REG ) +#define SMC_GET_CTL() SMC_inw( ioaddr, CTL_REG ) +#define SMC_SET_CTL(x) SMC_outw( x, ioaddr, CTL_REG ) +#define SMC_GET_MII() SMC_inw( ioaddr, MII_REG ) +#define SMC_SET_MII(x) SMC_outw( x, ioaddr, MII_REG ) +#define SMC_GET_MIR() SMC_inw( ioaddr, MIR_REG ) +#define SMC_SET_MIR(x) SMC_outw( x, ioaddr, MIR_REG ) +#define SMC_GET_MMU_CMD() SMC_inw( ioaddr, MMU_CMD_REG ) +#define SMC_SET_MMU_CMD(x) SMC_outw( x, ioaddr, MMU_CMD_REG ) +#define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG ) +#define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG ) +#define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG ) +#define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG ) +#define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG ) +#define SMC_GET_REV() SMC_inw( ioaddr, REV_REG ) +#define SMC_GET_RPC() SMC_inw( ioaddr, RPC_REG ) +#define SMC_SET_RPC(x) SMC_outw( x, ioaddr, RPC_REG ) +#define SMC_GET_TCR() SMC_inw( ioaddr, TCR_REG ) +#define SMC_SET_TCR(x) SMC_outw( x, ioaddr, TCR_REG ) + +#ifndef SMC_GET_MAC_ADDR +#define SMC_GET_MAC_ADDR(addr) \ + do { \ + unsigned int __v; \ + __v = SMC_inw( ioaddr, ADDR0_REG ); \ + addr[0] = __v; addr[1] = __v >> 8; \ + __v = SMC_inw( ioaddr, ADDR1_REG ); \ + addr[2] = __v; addr[3] = __v >> 8; \ + __v = SMC_inw( ioaddr, ADDR2_REG ); \ + addr[4] = __v; addr[5] = __v >> 8; \ + } while (0) +#endif + +#define SMC_SET_MAC_ADDR(addr) \ + do { \ + SMC_outw( addr[0]|(addr[1] << 8), ioaddr, ADDR0_REG ); \ + SMC_outw( addr[2]|(addr[3] << 8), ioaddr, ADDR1_REG ); \ + SMC_outw( addr[4]|(addr[5] << 8), ioaddr, ADDR2_REG ); \ + } while (0) + +#define SMC_CLEAR_MCAST() \ + do { \ + SMC_outw( 0, ioaddr, MCAST_REG1 ); \ + SMC_outw( 0, ioaddr, MCAST_REG2 ); \ + SMC_outw( 0, ioaddr, MCAST_REG3 ); \ + SMC_outw( 0, ioaddr, MCAST_REG4 ); \ + } while (0) +#define SMC_SET_MCAST(x) \ + do { \ + unsigned char *mt = (x); \ + SMC_outw( mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1 ); \ + SMC_outw( mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2 ); \ + SMC_outw( mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3 ); \ + SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \ + } while (0) + +#if SMC_CAN_USE_32BIT +/* + * Some setups just can't write 8 or 16 bits reliably when not aligned + * to a 32 bit boundary. I tell you that exists! + * We re-do the ones here that can be easily worked around if they can have + * their low parts written to 0 without adverse effects. + */ +#undef SMC_SELECT_BANK +#define SMC_SELECT_BANK(x) SMC_outl( (x)<<16, ioaddr, 12<> 16; \ + } while (0) +#else +#define SMC_PUT_PKT_HDR(status, length) \ + do { \ + SMC_outw( status, ioaddr, DATA_REG ); \ + SMC_outw( length, ioaddr, DATA_REG ); \ + } while (0) +#define SMC_GET_PKT_HDR(status, length) \ + do { \ + (status) = SMC_inw( ioaddr, DATA_REG ); \ + (length) = SMC_inw( ioaddr, DATA_REG ); \ + } while (0) +#endif + +#if SMC_CAN_USE_32BIT +#define SMC_PUSH_DATA(p, l) \ + do { \ + char *__ptr = (p); \ + int __len = (l); \ + if (__len >= 2 && (long)__ptr & 2) { \ + __len -= 2; \ + SMC_outw( *((u16 *)__ptr)++, ioaddr, DATA_REG );\ + } \ + SMC_outsl( ioaddr, DATA_REG, __ptr, __len >> 2); \ + if (__len & 2) { \ + __ptr += (__len & ~3); \ + SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \ + } \ + } while (0) +#define SMC_PULL_DATA(p, l) \ + do { \ + char *__ptr = (p); \ + int __len = (l); \ + if ((long)__ptr & 2) { \ + /* \ + * We want 32bit alignment here. \ + * Since some buses perform a full 32bit \ + * fetch even for 16bit data we can't use \ + * SMC_inw() here. Back both source (on chip \ + * and destination) pointers of 2 bytes. \ + */ \ + (long)__ptr &= ~2; \ + __len += 2; \ + SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \ + } \ + __len += 2; \ + SMC_insl( ioaddr, DATA_REG, __ptr, __len >> 2); \ + } while (0) +#elif SMC_CAN_USE_16BIT +#define SMC_PUSH_DATA(p, l) SMC_outsw( ioaddr, DATA_REG, p, (l) >> 1 ) +#define SMC_PULL_DATA(p, l) SMC_insw ( ioaddr, DATA_REG, p, (l) >> 1 ) +#elif SMC_CAN_USE_8BIT +#define SMC_PUSH_DATA(p, l) SMC_outsb( ioaddr, DATA_REG, p, l ) +#define SMC_PULL_DATA(p, l) SMC_insb ( ioaddr, DATA_REG, p, l ) +#endif + +#if ! SMC_CAN_USE_16BIT +#define SMC_outw(x, ioaddr, reg) \ + do { \ + unsigned int __val16 = (x); \ + SMC_outb( __val16, ioaddr, reg ); \ + SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ + } while (0) +#define SMC_inw(ioaddr, reg) \ + ({ \ + unsigned int __val16; \ + __val16 = SMC_inb( ioaddr, reg ); \ + __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ + __val16; \ + }) +#endif + + +#endif /* _SMC91X_H_ */ diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c new file mode 100644 index 000000000..3356fd8de --- /dev/null +++ b/drivers/net/via-velocity.c @@ -0,0 +1,3277 @@ +/* + * This code is derived from the VIA reference driver (copyright message + * below) provided to Red Hat by VIA Networking Technologies, Inc. for + * addition to the Linux kernel. + * + * The code has been merged into one source file, cleaned up to follow + * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned + * for 64bit hardware platforms. + * + * TODO + * Big-endian support + * rx_copybreak/alignment + * Scatter gather + * More testing + * + * The changes are (c) Copyright 2004, Red Hat Inc. + * Additional fixes and clean up: Francois Romieu + * + * This source has not been verified for use in safety critical systems. + * + * Please direct queries about the revamped driver to the linux-kernel + * list not VIA. + * + * Original code: + * + * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. + * All rights reserved. + * + * This software may be redistributed and/or modified under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or + * any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Author: Chuang Liang-Shing, AJ Jiang + * + * Date: Jan 24, 2003 + * + * MODULE_LICENSE("GPL"); + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "via-velocity.h" + + +static int velocity_nics = 0; +static int msglevel = MSG_LEVEL_INFO; + + +static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +static struct ethtool_ops velocity_ethtool_ops; + +/* + Define module options +*/ + +MODULE_AUTHOR("VIA Networking Technologies, Inc."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"); + +#define VELOCITY_PARAM(N,D) \ + static const int N[MAX_UNITS]=OPTION_DEFAULT;\ + MODULE_PARM(N, "1-" __MODULE_STRING(MAX_UNITS) "i");\ + MODULE_PARM_DESC(N, D); + +#define RX_DESC_MIN 64 +#define RX_DESC_MAX 255 +#define RX_DESC_DEF 64 +VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors"); + +#define TX_DESC_MIN 16 +#define TX_DESC_MAX 256 +#define TX_DESC_DEF 64 +VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors"); + +#define VLAN_ID_MIN 0 +#define VLAN_ID_MAX 4095 +#define VLAN_ID_DEF 0 +/* VID_setting[] is used for setting the VID of NIC. + 0: default VID. + 1-4094: other VIDs. +*/ +VELOCITY_PARAM(VID_setting, "802.1Q VLAN ID"); + +#define RX_THRESH_MIN 0 +#define RX_THRESH_MAX 3 +#define RX_THRESH_DEF 0 +/* rx_thresh[] is used for controlling the receive fifo threshold. + 0: indicate the rxfifo threshold is 128 bytes. + 1: indicate the rxfifo threshold is 512 bytes. + 2: indicate the rxfifo threshold is 1024 bytes. + 3: indicate the rxfifo threshold is store & forward. +*/ +VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); + +#define DMA_LENGTH_MIN 0 +#define DMA_LENGTH_MAX 7 +#define DMA_LENGTH_DEF 0 + +/* DMA_length[] is used for controlling the DMA length + 0: 8 DWORDs + 1: 16 DWORDs + 2: 32 DWORDs + 3: 64 DWORDs + 4: 128 DWORDs + 5: 256 DWORDs + 6: SF(flush till emply) + 7: SF(flush till emply) +*/ +VELOCITY_PARAM(DMA_length, "DMA length"); + +#define TAGGING_DEF 0 +/* enable_tagging[] is used for enabling 802.1Q VID tagging. + 0: disable VID seeting(default). + 1: enable VID setting. +*/ +VELOCITY_PARAM(enable_tagging, "Enable 802.1Q tagging"); + +#define IP_ALIG_DEF 0 +/* IP_byte_align[] is used for IP header DWORD byte aligned + 0: indicate the IP header won't be DWORD byte aligned.(Default) . + 1: indicate the IP header will be DWORD byte aligned. + In some enviroment, the IP header should be DWORD byte aligned, + or the packet will be droped when we receive it. (eg: IPVS) +*/ +VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); + +#define TX_CSUM_DEF 1 +/* txcsum_offload[] is used for setting the checksum offload ability of NIC. + (We only support RX checksum offload now) + 0: disable csum_offload[checksum offload + 1: enable checksum offload. (Default) +*/ +VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload"); + +#define FLOW_CNTL_DEF 1 +#define FLOW_CNTL_MIN 1 +#define FLOW_CNTL_MAX 5 + +/* flow_control[] is used for setting the flow control ability of NIC. + 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR. + 2: enable TX flow control. + 3: enable RX flow control. + 4: enable RX/TX flow control. + 5: disable +*/ +VELOCITY_PARAM(flow_control, "Enable flow control ability"); + +#define MED_LNK_DEF 0 +#define MED_LNK_MIN 0 +#define MED_LNK_MAX 4 +/* speed_duplex[] is used for setting the speed and duplex mode of NIC. + 0: indicate autonegotiation for both speed and duplex mode + 1: indicate 100Mbps half duplex mode + 2: indicate 100Mbps full duplex mode + 3: indicate 10Mbps half duplex mode + 4: indicate 10Mbps full duplex mode + + Note: + if EEPROM have been set to the force mode, this option is ignored + by driver. +*/ +VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); + +#define VAL_PKT_LEN_DEF 0 +/* ValPktLen[] is used for setting the checksum offload ability of NIC. + 0: Receive frame with invalid layer 2 length (Default) + 1: Drop frame with invalid layer 2 length +*/ +VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); + +#define WOL_OPT_DEF 0 +#define WOL_OPT_MIN 0 +#define WOL_OPT_MAX 7 +/* wol_opts[] is used for controlling wake on lan behavior. + 0: Wake up if recevied a magic packet. (Default) + 1: Wake up if link status is on/off. + 2: Wake up if recevied an arp packet. + 4: Wake up if recevied any unicast packet. + Those value can be sumed up to support more than one option. +*/ +VELOCITY_PARAM(wol_opts, "Wake On Lan options"); + +#define INT_WORKS_DEF 20 +#define INT_WORKS_MIN 10 +#define INT_WORKS_MAX 64 + +VELOCITY_PARAM(int_works, "Number of packets per interrupt services"); + +static int velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent); +static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info); +static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev); +static void velocity_print_info(struct velocity_info *vptr); +static int velocity_open(struct net_device *dev); +static int velocity_change_mtu(struct net_device *dev, int mtu); +static int velocity_xmit(struct sk_buff *skb, struct net_device *dev); +static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs); +static void velocity_set_multi(struct net_device *dev); +static struct net_device_stats *velocity_get_stats(struct net_device *dev); +static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int velocity_close(struct net_device *dev); +static int velocity_rx_srv(struct velocity_info *vptr, int status); +static int velocity_receive_frame(struct velocity_info *, int idx); +static int velocity_alloc_rx_buf(struct velocity_info *, int idx); +static void velocity_init_registers(struct velocity_info *vptr, enum velocity_init_type type); +static void velocity_free_rd_ring(struct velocity_info *vptr); +static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *); +static int velocity_soft_reset(struct velocity_info *vptr); +static void mii_init(struct velocity_info *vptr, u32 mii_status); +static u32 velocity_get_opt_media_mode(struct velocity_info *vptr); +static void velocity_print_link_status(struct velocity_info *vptr); +static void safe_disable_mii_autopoll(struct mac_regs * regs); +static void velocity_shutdown(struct velocity_info *vptr); +static void enable_flow_control_ability(struct velocity_info *vptr); +static void enable_mii_autopoll(struct mac_regs * regs); +static int velocity_mii_read(struct mac_regs *, u8 byIdx, u16 * pdata); +static int velocity_mii_write(struct mac_regs *, u8 byMiiAddr, u16 data); +static int velocity_set_wol(struct velocity_info *vptr); +static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context); +static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context); +static u32 mii_check_media_mode(struct mac_regs * regs); +static u32 check_connection_type(struct mac_regs * regs); +static void velocity_init_cam_filter(struct velocity_info *vptr); +static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status); + +#ifdef CONFIG_PM +static int velocity_suspend(struct pci_dev *pdev, u32 state); +static int velocity_resume(struct pci_dev *pdev); + +static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr); + +static struct notifier_block velocity_inetaddr_notifier = { + notifier_call:velocity_netdev_event, +}; + +#endif /* CONFIG_PM */ + +/* + * Internal board variants. At the moment we have only one + */ + +static struct velocity_info_tbl chip_info_table[] = { + {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1, 0x00FFFFFFUL}, + {0, NULL} +}; + +/* + * Describe the PCI device identifiers that we support in this + * device driver. Used for hotplug autoloading. + */ + +static struct pci_device_id velocity_id_table[] __devinitdata = { + {0x1106, 0x3119, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &chip_info_table[0]}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, velocity_id_table); + +/** + * get_chip_name - identifier to name + * @id: chip identifier + * + * Given a chip identifier return a suitable description. Returns + * a pointer a static string valid while the driver is loaded. + */ + +static char __devinit *get_chip_name(enum chip_type chip_id) +{ + int i; + for (i = 0; chip_info_table[i].name != NULL; i++) + if (chip_info_table[i].chip_id == chip_id) + break; + return chip_info_table[i].name; +} + +/** + * velocity_remove1 - device unplug + * @pdev: PCI device being removed + * + * Device unload callback. Called on an unplug or on module + * unload for each active device that is present. Disconnects + * the device from the network layer and frees all the resources + */ + +static void __devexit velocity_remove1(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct velocity_info *vptr = dev->priv; + + unregister_netdev(dev); + iounmap(vptr->mac_regs); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(dev); +} + +/** + * velocity_set_int_opt - parser for integer options + * @opt: pointer to option value + * @val: value the user requested (or -1 for default) + * @min: lowest value allowed + * @max: highest value allowed + * @def: default value + * @name: property name + * @dev: device name + * + * Set an integer property in the module options. This function does + * all the verification and checking as well as reporting so that + * we don't duplicate code for each option. + */ + +static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, char *devname) +{ + if (val == -1) + *opt = def; + else if (val < min || val > max) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n", + devname, name, min, max); + *opt = def; + } else { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n", + devname, name, val); + *opt = val; + } +} + +/** + * velocity_set_bool_opt - parser for boolean options + * @opt: pointer to option value + * @val: value the user requested (or -1 for default) + * @def: default value (yes/no) + * @flag: numeric value to set for true. + * @name: property name + * @dev: device name + * + * Set a boolean property in the module options. This function does + * all the verification and checking as well as reporting so that + * we don't duplicate code for each option. + */ + +static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, char *devname) +{ + (*opt) &= (~flag); + if (val == -1) + *opt |= (def ? flag : 0); + else if (val < 0 || val > 1) { + printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n", + devname, name); + *opt |= (def ? flag : 0); + } else { + printk(KERN_INFO "%s: set parameter %s to %s\n", + devname, name, val ? "TRUE" : "FALSE"); + *opt |= (val ? flag : 0); + } +} + +/** + * velocity_get_options - set options on device + * @opts: option structure for the device + * @index: index of option to use in module options array + * @devname: device name + * + * Turn the module and command options into a single structure + * for the current device + */ + +static void __devinit velocity_get_options(struct velocity_opt *opts, int index, char *devname) +{ + + velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); + velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname); + velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); + velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); + velocity_set_int_opt(&opts->vid, VID_setting[index], VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_DEF, "VID_setting", devname); + velocity_set_bool_opt(&opts->flags, enable_tagging[index], TAGGING_DEF, VELOCITY_FLAGS_TAGGING, "enable_tagging", devname); + velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname); + velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); + velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); + velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); + velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); + velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); + velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname); + opts->numrx = (opts->numrx & ~3); +} + +/** + * velocity_init_cam_filter - initialise CAM + * @vptr: velocity to program + * + * Initialize the content addressable memory used for filters. Load + * appropriately according to the presence of VLAN + */ + +static void velocity_init_cam_filter(struct velocity_info *vptr) +{ + struct mac_regs * regs = vptr->mac_regs; + + /* T urn on MCFG_PQEN, turn off MCFG_RTGOPT */ + WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG); + WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG); + + /* Disable all CAMs */ + memset(vptr->vCAMmask, 0, sizeof(u8) * 8); + memset(vptr->mCAMmask, 0, sizeof(u8) * 8); + mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); + mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + + /* Enable first VCAM */ + if (vptr->flags & VELOCITY_FLAGS_TAGGING) { + /* If Tagging option is enabled and VLAN ID is not zero, then + turn on MCFG_RTGOPT also */ + if (vptr->options.vid != 0) + WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); + + mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), VELOCITY_VLAN_ID_CAM); + vptr->vCAMmask[0] |= 1; + mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); + } else { + u16 temp = 0; + mac_set_cam(regs, 0, (u8 *) &temp, VELOCITY_VLAN_ID_CAM); + temp = 1; + mac_set_cam_mask(regs, (u8 *) &temp, VELOCITY_VLAN_ID_CAM); + } +} + +/** + * velocity_rx_reset - handle a receive reset + * @vptr: velocity we are resetting + * + * Reset the ownership and status for the receive ring side. + * Hand all the receive queue to the NIC. + */ + +static void velocity_rx_reset(struct velocity_info *vptr) +{ + + struct mac_regs * regs = vptr->mac_regs; + int i; + + vptr->rd_used = vptr->rd_curr = 0; + + /* + * Init state, all RD entries belong to the NIC + */ + for (i = 0; i < vptr->options.numrx; ++i) + vptr->rd_ring[i].rdesc0.owner = cpu_to_le32(OWNED_BY_NIC); + + writew(vptr->options.numrx, ®s->RBRDU); + writel(vptr->rd_pool_dma, ®s->RDBaseLo); + writew(0, ®s->RDIdx); + writew(vptr->options.numrx - 1, ®s->RDCSize); +} + +/** + * velocity_init_registers - initialise MAC registers + * @vptr: velocity to init + * @type: type of initialisation (hot or cold) + * + * Initialise the MAC on a reset or on first set up on the + * hardware. + */ + +static void velocity_init_registers(struct velocity_info *vptr, + enum velocity_init_type type) +{ + struct mac_regs * regs = vptr->mac_regs; + int i, mii_status; + + mac_wol_reset(regs); + + switch (type) { + case VELOCITY_INIT_RESET: + case VELOCITY_INIT_WOL: + + netif_stop_queue(vptr->dev); + + /* + * Reset RX to prevent RX pointer not on the 4X location + */ + velocity_rx_reset(vptr); + mac_rx_queue_run(regs); + mac_rx_queue_wake(regs); + + mii_status = velocity_get_opt_media_mode(vptr); + if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { + velocity_print_link_status(vptr); + if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) + netif_wake_queue(vptr->dev); + } + + enable_flow_control_ability(vptr); + + mac_clear_isr(regs); + writel(CR0_STOP, ®s->CR0Clr); + writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), + ®s->CR0Set); + + break; + + case VELOCITY_INIT_COLD: + default: + /* + * Do reset + */ + velocity_soft_reset(vptr); + mdelay(5); + + mac_eeprom_reload(regs); + for (i = 0; i < 6; i++) { + writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); + } + /* + * clear Pre_ACPI bit. + */ + BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA)); + mac_set_rx_thresh(regs, vptr->options.rx_thresh); + mac_set_dma_length(regs, vptr->options.DMA_length); + + writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet); + /* + * Bback off algorithm use original IEEE standard + */ + BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB); + + /* + * Set packet filter: Receive directed and broadcast address + */ + velocity_set_multi(vptr->dev); + + /* + * Enable MII auto-polling + */ + enable_mii_autopoll(regs); + + vptr->int_mask = INT_MASK_DEF; + + writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo); + writew(vptr->options.numrx - 1, ®s->RDCSize); + mac_rx_queue_run(regs); + mac_rx_queue_wake(regs); + + writew(vptr->options.numtx - 1, ®s->TDCSize); + + for (i = 0; i < vptr->num_txq; i++) { + writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); + mac_tx_queue_run(regs, i); + } + + velocity_init_cam_filter(vptr); + + init_flow_control_register(vptr); + + writel(CR0_STOP, ®s->CR0Clr); + writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); + + mii_status = velocity_get_opt_media_mode(vptr); + netif_stop_queue(vptr->dev); + mac_clear_isr(regs); + + mii_init(vptr, mii_status); + + if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { + velocity_print_link_status(vptr); + if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) + netif_wake_queue(vptr->dev); + } + + enable_flow_control_ability(vptr); + mac_hw_mibs_init(regs); + mac_write_int_mask(vptr->int_mask, regs); + mac_clear_isr(regs); + + } +} + +/** + * velocity_soft_reset - soft reset + * @vptr: velocity to reset + * + * Kick off a soft reset of the velocity adapter and then poll + * until the reset sequence has completed before returning. + */ + +static int velocity_soft_reset(struct velocity_info *vptr) +{ + struct mac_regs * regs = vptr->mac_regs; + int i = 0; + + writel(CR0_SFRST, ®s->CR0Set); + + for (i = 0; i < W_MAX_TIMEOUT; i++) { + udelay(5); + if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set)) + break; + } + + if (i == W_MAX_TIMEOUT) { + writel(CR0_FORSRST, ®s->CR0Set); + /* FIXME: PCI POSTING */ + /* delay 2ms */ + mdelay(2); + } + return 0; +} + +/** + * velocity_found1 - set up discovered velocity card + * @pdev: PCI device + * @ent: PCI device table entry that matched + * + * Configure a discovered adapter from scratch. Return a negative + * errno error code on failure paths. + */ + +static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int first = 1; + struct net_device *dev; + int i; + struct velocity_info_tbl *info = (struct velocity_info_tbl *) ent->driver_data; + struct velocity_info *vptr; + struct mac_regs * regs; + int ret = -ENOMEM; + + if (velocity_nics++ >= MAX_UNITS) { + printk(KERN_NOTICE VELOCITY_NAME ": already found %d NICs.\n", + velocity_nics); + return -ENODEV; + } + + dev = alloc_etherdev(sizeof(struct velocity_info)); + + if (dev == NULL) { + printk(KERN_ERR VELOCITY_NAME ": allocate net device failed.\n"); + goto out; + } + + /* Chain it all together */ + + SET_MODULE_OWNER(dev); + SET_NETDEV_DEV(dev, &pdev->dev); + vptr = dev->priv; + + + if (first) { + printk(KERN_INFO "%s Ver. %s\n", + VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); + printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n"); + printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n"); + first = 0; + } + + velocity_init_info(pdev, vptr, info); + + vptr->dev = dev; + + dev->priv = vptr; + dev->irq = pdev->irq; + + ret = pci_enable_device(pdev); + if (ret < 0) + goto err_free_dev; + + ret = velocity_get_pci_info(vptr, pdev); + if (ret < 0) { + printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n"); + goto err_disable; + } + + ret = pci_request_regions(pdev, VELOCITY_NAME); + if (ret < 0) { + printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n"); + goto err_disable; + } + + regs = ioremap(vptr->memaddr, vptr->io_size); + if (regs == NULL) { + ret = -EIO; + goto err_release_res; + } + + vptr->mac_regs = regs; + + mac_wol_reset(regs); + + dev->base_addr = vptr->ioaddr; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = readb(®s->PAR[i]); + + + velocity_get_options(&vptr->options, velocity_nics - 1, dev->name); + + /* + * Mask out the options cannot be set to the chip + */ + + vptr->options.flags &= info->flags; + + /* + * Enable the chip specified capbilities + */ + + vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); + + vptr->wol_opts = vptr->options.wol_opts; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + + vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); + + dev->irq = pdev->irq; + dev->open = velocity_open; + dev->hard_start_xmit = velocity_xmit; + dev->stop = velocity_close; + dev->get_stats = velocity_get_stats; + dev->set_multicast_list = velocity_set_multi; + dev->do_ioctl = velocity_ioctl; + dev->ethtool_ops = &velocity_ethtool_ops; + dev->change_mtu = velocity_change_mtu; +#ifdef VELOCITY_ZERO_COPY_SUPPORT + dev->features |= NETIF_F_SG; +#endif + + if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) { + dev->features |= NETIF_F_HW_CSUM; + } + + ret = register_netdev(dev); + if (ret < 0) + goto err_iounmap; + + velocity_print_info(vptr); + pci_set_drvdata(pdev, dev); + + /* and leave the chip powered down */ + + pci_set_power_state(pdev, 3); +out: + return ret; + +err_iounmap: + iounmap(regs); +err_release_res: + pci_release_regions(pdev); +err_disable: + pci_disable_device(pdev); +err_free_dev: + free_netdev(dev); + goto out; +} + +/** + * velocity_print_info - per driver data + * @vptr: velocity + * + * Print per driver data as the kernel driver finds Velocity + * hardware + */ + +static void __devinit velocity_print_info(struct velocity_info *vptr) +{ + struct net_device *dev = vptr->dev; + + printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); + printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", + dev->name, + dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], + dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); +} + +/** + * velocity_init_info - init private data + * @pdev: PCI device + * @vptr: Velocity info + * @info: Board type + * + * Set up the initial velocity_info struct for the device that has been + * discovered. + */ + +static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info) +{ + memset(vptr, 0, sizeof(struct velocity_info)); + + vptr->pdev = pdev; + vptr->chip_id = info->chip_id; + vptr->io_size = info->io_size; + vptr->num_txq = info->txqueue; + vptr->multicast_limit = MCAM_SIZE; + + spin_lock_init(&vptr->lock); + spin_lock_init(&vptr->xmit_lock); +} + +/** + * velocity_get_pci_info - retrieve PCI info for device + * @vptr: velocity device + * @pdev: PCI device it matches + * + * Retrieve the PCI configuration space data that interests us from + * the kernel PCI layer + */ + +static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) +{ + + if(pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) + return -EIO; + + pci_set_master(pdev); + + vptr->ioaddr = pci_resource_start(pdev, 0); + vptr->memaddr = pci_resource_start(pdev, 1); + + if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) + { + printk(KERN_ERR "%s: region #0 is not an I/O resource, aborting.\n", + pci_name(pdev)); + return -EINVAL; + } + + if((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) + { + printk(KERN_ERR "%s: region #1 is an I/O resource, aborting.\n", + pci_name(pdev)); + return -EINVAL; + } + + if(pci_resource_len(pdev, 1) < 256) + { + printk(KERN_ERR "%s: region #1 is too small.\n", + pci_name(pdev)); + return -EINVAL; + } + vptr->pdev = pdev; + + return 0; +} + +/** + * velocity_init_rings - set up DMA rings + * @vptr: Velocity to set up + * + * Allocate PCI mapped DMA rings for the receive and transmit layer + * to use. + */ + +static int velocity_init_rings(struct velocity_info *vptr) +{ + int i; + unsigned int psize; + unsigned int tsize; + dma_addr_t pool_dma; + u8 *pool; + + /* + * Allocate all RD/TD rings a single pool + */ + + psize = vptr->options.numrx * sizeof(struct rx_desc) + + vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; + + /* + * pci_alloc_consistent() fulfills the requirement for 64 bytes + * alignment + */ + pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma); + + if (pool == NULL) { + printk(KERN_ERR "%s : DMA memory allocation failed.\n", + vptr->dev->name); + return -ENOMEM; + } + + memset(pool, 0, psize); + + vptr->rd_ring = (struct rx_desc *) pool; + + vptr->rd_pool_dma = pool_dma; + + tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; + vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize, + &vptr->tx_bufs_dma); + + if (vptr->tx_bufs == NULL) { + printk(KERN_ERR "%s: DMA memory allocation failed.\n", + vptr->dev->name); + pci_free_consistent(vptr->pdev, psize, pool, pool_dma); + return -ENOMEM; + } + + memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq); + + i = vptr->options.numrx * sizeof(struct rx_desc); + pool += i; + pool_dma += i; + for (i = 0; i < vptr->num_txq; i++) { + int offset = vptr->options.numtx * sizeof(struct tx_desc); + + vptr->td_pool_dma[i] = pool_dma; + vptr->td_rings[i] = (struct tx_desc *) pool; + pool += offset; + pool_dma += offset; + } + return 0; +} + +/** + * velocity_free_rings - free PCI ring pointers + * @vptr: Velocity to free from + * + * Clean up the PCI ring buffers allocated to this velocity. + */ + +static void velocity_free_rings(struct velocity_info *vptr) +{ + int size; + + size = vptr->options.numrx * sizeof(struct rx_desc) + + vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; + + pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); + + size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; + + pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma); +} + +/** + * velocity_init_rd_ring - set up receive ring + * @vptr: velocity to configure + * + * Allocate and set up the receive buffers for each ring slot and + * assign them to the network adapter. + */ + +static int velocity_init_rd_ring(struct velocity_info *vptr) +{ + int i, ret = -ENOMEM; + struct rx_desc *rd; + struct velocity_rd_info *rd_info; + unsigned int rsize = sizeof(struct velocity_rd_info) * + vptr->options.numrx; + + vptr->rd_info = kmalloc(rsize, GFP_KERNEL); + if(vptr->rd_info == NULL) + goto out; + memset(vptr->rd_info, 0, rsize); + + /* Init the RD ring entries */ + for (i = 0; i < vptr->options.numrx; i++) { + rd = &(vptr->rd_ring[i]); + rd_info = &(vptr->rd_info[i]); + + ret = velocity_alloc_rx_buf(vptr, i); + if (ret < 0) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR + "%s: failed to allocate RX buffer.\n", + vptr->dev->name); + velocity_free_rd_ring(vptr); + goto out; + } + rd->rdesc0.owner = OWNED_BY_NIC; + } + vptr->rd_used = vptr->rd_curr = 0; +out: + return ret; +} + +/** + * velocity_free_rd_ring - set up receive ring + * @vptr: velocity to clean up + * + * Free the receive buffers for each ring slot and any + * attached socket buffers that need to go away. + */ + +static void velocity_free_rd_ring(struct velocity_info *vptr) +{ + int i; + + if (vptr->rd_info == NULL) + return; + + for (i = 0; i < vptr->options.numrx; i++) { + struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); + + if (!rd_info->skb_dma) + continue; + pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, + PCI_DMA_FROMDEVICE); + rd_info->skb_dma = (dma_addr_t) NULL; + + dev_kfree_skb(rd_info->skb); + rd_info->skb = NULL; + } + + kfree(vptr->rd_info); + vptr->rd_info = NULL; +} + +/** + * velocity_init_td_ring - set up transmit ring + * @vptr: velocity + * + * Set up the transmit ring and chain the ring pointers together. + * Returns zero on success or a negative posix errno code for + * failure. + */ + +static int velocity_init_td_ring(struct velocity_info *vptr) +{ + int i, j; + dma_addr_t curr; + struct tx_desc *td; + struct velocity_td_info *td_info; + unsigned int tsize = sizeof(struct velocity_td_info) * + vptr->options.numtx; + + /* Init the TD ring entries */ + for (j = 0; j < vptr->num_txq; j++) { + curr = vptr->td_pool_dma[j]; + + vptr->td_infos[j] = kmalloc(tsize, GFP_KERNEL); + if(vptr->td_infos[j] == NULL) + { + while(--j >= 0) + kfree(vptr->td_infos[j]); + return -ENOMEM; + } + memset(vptr->td_infos[j], 0, tsize); + + for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) { + td = &(vptr->td_rings[j][i]); + td_info = &(vptr->td_infos[j][i]); + td_info->buf = vptr->tx_bufs + (i + j) * PKT_BUF_SZ; + td_info->buf_dma = vptr->tx_bufs_dma + (i + j) * PKT_BUF_SZ; + } + vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; + } + return 0; +} + +/* + * FIXME: could we merge this with velocity_free_tx_buf ? + */ + +static void velocity_free_td_ring_entry(struct velocity_info *vptr, + int q, int n) +{ + struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); + int i; + + if (td_info == NULL) + return; + + if (td_info->skb) { + for (i = 0; i < td_info->nskb_dma; i++) + { + if (td_info->skb_dma[i]) { + pci_unmap_single(vptr->pdev, td_info->skb_dma[i], + td_info->skb->len, PCI_DMA_TODEVICE); + td_info->skb_dma[i] = (dma_addr_t) NULL; + } + } + dev_kfree_skb(td_info->skb); + td_info->skb = NULL; + } +} + +/** + * velocity_free_td_ring - free td ring + * @vptr: velocity + * + * Free up the transmit ring for this particular velocity adapter. + * We free the ring contents but not the ring itself. + */ + +static void velocity_free_td_ring(struct velocity_info *vptr) +{ + int i, j; + + for (j = 0; j < vptr->num_txq; j++) { + if (vptr->td_infos[j] == NULL) + continue; + for (i = 0; i < vptr->options.numtx; i++) { + velocity_free_td_ring_entry(vptr, j, i); + + } + if (vptr->td_infos[j]) { + kfree(vptr->td_infos[j]); + vptr->td_infos[j] = NULL; + } + } +} + +/** + * velocity_rx_srv - service RX interrupt + * @vptr: velocity + * @status: adapter status (unused) + * + * Walk the receive ring of the velocity adapter and remove + * any received packets from the receive queue. Hand the ring + * slots back to the adapter for reuse. + */ + +static int velocity_rx_srv(struct velocity_info *vptr, int status) +{ + struct rx_desc *rd; + struct net_device_stats *stats = &vptr->stats; + struct mac_regs * regs = vptr->mac_regs; + int rd_curr = vptr->rd_curr; + int works = 0; + + while (1) { + + rd = &(vptr->rd_ring[rd_curr]); + + if ((vptr->rd_info[rd_curr]).skb == NULL) { + if (velocity_alloc_rx_buf(vptr, rd_curr) < 0) + break; + } + + if (works++ > 15) + break; + + if (rd->rdesc0.owner == OWNED_BY_NIC) + break; + + /* + * Don't drop CE or RL error frame although RXOK is off + * FIXME: need to handle copybreak + */ + if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { + if (velocity_receive_frame(vptr, rd_curr) == 0) { + if (velocity_alloc_rx_buf(vptr, rd_curr) < 0) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not allocate rx buf\n", vptr->dev->name); + break; + } + } else { + stats->rx_dropped++; + } + } else { + if (rd->rdesc0.RSR & RSR_CRC) + stats->rx_crc_errors++; + if (rd->rdesc0.RSR & RSR_FAE) + stats->rx_frame_errors++; + + stats->rx_dropped++; + } + + rd->inten = 1; + + if (++vptr->rd_used >= 4) { + int i, rd_prev = rd_curr; + for (i = 0; i < 4; i++) { + if (--rd_prev < 0) + rd_prev = vptr->options.numrx - 1; + + rd = &(vptr->rd_ring[rd_prev]); + rd->rdesc0.owner = OWNED_BY_NIC; + } + writew(4, &(regs->RBRDU)); + vptr->rd_used -= 4; + } + + vptr->dev->last_rx = jiffies; + + rd_curr++; + if (rd_curr >= vptr->options.numrx) + rd_curr = 0; + } + vptr->rd_curr = rd_curr; + VAR_USED(stats); + return works; +} + +/** + * velocity_rx_csum - checksum process + * @rd: receive packet descriptor + * @skb: network layer packet buffer + * + * Process the status bits for the received packet and determine + * if the checksum was computed and verified by the hardware + */ + +static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) +{ + skb->ip_summed = CHECKSUM_NONE; + + if (rd->rdesc1.CSM & CSM_IPKT) { + if (rd->rdesc1.CSM & CSM_IPOK) { + if ((rd->rdesc1.CSM & CSM_TCPKT) || + (rd->rdesc1.CSM & CSM_UDPKT)) { + if (!(rd->rdesc1.CSM & CSM_TUPOK)) { + return; + } + } + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } +} + +/** + * velocity_receive_frame - received packet processor + * @vptr: velocity we are handling + * @idx: ring index + * + * A packet has arrived. We process the packet and if appropriate + * pass the frame up the network stack + */ + +static int velocity_receive_frame(struct velocity_info *vptr, int idx) +{ + struct net_device_stats *stats = &vptr->stats; + struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); + struct rx_desc *rd = &(vptr->rd_ring[idx]); + struct sk_buff *skb; + + if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { + VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); + stats->rx_length_errors++; + return -EINVAL; + } + + if (rd->rdesc0.RSR & RSR_MAR) + vptr->stats.multicast++; + + skb = rd_info->skb; + skb->dev = vptr->dev; + + pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, + PCI_DMA_FROMDEVICE); + rd_info->skb_dma = (dma_addr_t) NULL; + rd_info->skb = NULL; + + /* FIXME - memmove ? */ + if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { + int i; + for (i = rd->rdesc0.len + 4; i >= 0; i--) + *(skb->data + i + 2) = *(skb->data + i); + skb->data += 2; + skb->tail += 2; + } + + skb_put(skb, (rd->rdesc0.len - 4)); + skb->protocol = eth_type_trans(skb, skb->dev); + + /* + * Drop frame not meeting IEEE 802.3 + */ + + if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { + if (rd->rdesc0.RSR & RSR_RL) { + stats->rx_length_errors++; + return -EINVAL; + } + } + + velocity_rx_csum(rd, skb); + + /* + * FIXME: need rx_copybreak handling + */ + + stats->rx_bytes += skb->len; + netif_rx(skb); + + return 0; +} + +/** + * velocity_alloc_rx_buf - allocate aligned receive buffer + * @vptr: velocity + * @idx: ring index + * + * Allocate a new full sized buffer for the reception of a frame and + * map it into PCI space for the hardware to use. The hardware + * requires *64* byte alignment of the buffer which makes life + * less fun than would be ideal. + */ + +static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) +{ + struct rx_desc *rd = &(vptr->rd_ring[idx]); + struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); + + rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64); + if (rd_info->skb == NULL) + return -ENOMEM; + + /* + * Do the gymnastics to get the buffer head for data at + * 64byte alignment. + */ + skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->tail & 63); + rd_info->skb->dev = vptr->dev; + rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->tail, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); + + /* + * Fill in the descriptor to match + */ + + *((u32 *) & (rd->rdesc0)) = 0; + rd->len = cpu_to_le32(vptr->rx_buf_sz); + rd->inten = 1; + rd->pa_low = cpu_to_le32(rd_info->skb_dma); + rd->pa_high = 0; + return 0; +} + +/** + * tx_srv - transmit interrupt service + * @vptr; Velocity + * @status: + * + * Scan the queues looking for transmitted packets that + * we can complete and clean up. Update any statistics as + * neccessary/ + */ + +static int velocity_tx_srv(struct velocity_info *vptr, u32 status) +{ + struct tx_desc *td; + int qnum; + int full = 0; + int idx; + int works = 0; + struct velocity_td_info *tdinfo; + struct net_device_stats *stats = &vptr->stats; + + for (qnum = 0; qnum < vptr->num_txq; qnum++) { + for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; + idx = (idx + 1) % vptr->options.numtx) { + + /* + * Get Tx Descriptor + */ + td = &(vptr->td_rings[qnum][idx]); + tdinfo = &(vptr->td_infos[qnum][idx]); + + if (td->tdesc0.owner == OWNED_BY_NIC) + break; + + if ((works++ > 15)) + break; + + if (td->tdesc0.TSR & TSR0_TERR) { + stats->tx_errors++; + stats->tx_dropped++; + if (td->tdesc0.TSR & TSR0_CDH) + stats->tx_heartbeat_errors++; + if (td->tdesc0.TSR & TSR0_CRS) + stats->tx_carrier_errors++; + if (td->tdesc0.TSR & TSR0_ABT) + stats->tx_aborted_errors++; + if (td->tdesc0.TSR & TSR0_OWC) + stats->tx_window_errors++; + } else { + stats->tx_packets++; + stats->tx_bytes += tdinfo->skb->len; + } + velocity_free_tx_buf(vptr, tdinfo); + vptr->td_used[qnum]--; + } + vptr->td_tail[qnum] = idx; + + if (AVAIL_TD(vptr, qnum) < 1) { + full = 1; + } + } + /* + * Look to see if we should kick the transmit network + * layer for more work. + */ + if (netif_queue_stopped(vptr->dev) && (full == 0) + && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { + netif_wake_queue(vptr->dev); + } + return works; +} + +/** + * velocity_print_link_status - link status reporting + * @vptr: velocity to report on + * + * Turn the link status of the velocity card into a kernel log + * description of the new link state, detailing speed and duplex + * status + */ + +static void velocity_print_link_status(struct velocity_info *vptr) +{ + + if (vptr->mii_status & VELOCITY_LINK_FAIL) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name); + } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link autonegation", vptr->dev->name); + + if (vptr->mii_status & VELOCITY_SPEED_1000) + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); + else if (vptr->mii_status & VELOCITY_SPEED_100) + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps"); + else + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps"); + + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n"); + else + VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); + } else { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); + switch (vptr->options.spd_dpx) { + case SPD_DPX_100_HALF: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n"); + break; + case SPD_DPX_100_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n"); + break; + case SPD_DPX_10_HALF: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n"); + break; + case SPD_DPX_10_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n"); + break; + default: + break; + } + } +} + +/** + * velocity_error - handle error from controller + * @vptr: velocity + * @status: card status + * + * Process an error report from the hardware and attempt to recover + * the card itself. At the moment we cannot recover from some + * theoretically impossible errors but this could be fixed using + * the pci_device_failed logic to bounce the hardware + * + */ + +static void velocity_error(struct velocity_info *vptr, int status) +{ + + if (status & ISR_TXSTLI) { + struct mac_regs * regs = vptr->mac_regs; + + printk(KERN_ERR "TD structure errror TDindex=%hx\n", readw(®s->TDIdx[0])); + BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); + writew(TRDCSR_RUN, ®s->TDCSRClr); + netif_stop_queue(vptr->dev); + + /* FIXME: port over the pci_device_failed code and use it + here */ + } + + if (status & ISR_SRCI) { + struct mac_regs * regs = vptr->mac_regs; + int linked; + + if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + vptr->mii_status = check_connection_type(regs); + + /* + * If it is a 3119, disable frame bursting in + * halfduplex mode and enable it in fullduplex + * mode + */ + if (vptr->rev_id < REV_ID_VT3216_A0) { + if (vptr->mii_status | VELOCITY_DUPLEX_FULL) + BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); + else + BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); + } + /* + * Only enable CD heart beat counter in 10HD mode + */ + if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) { + BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); + } else { + BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); + } + } + /* + * Get link status from PHYSR0 + */ + linked = readb(®s->PHYSR0) & PHYSR0_LINKGD; + + if (linked) { + vptr->mii_status &= ~VELOCITY_LINK_FAIL; + } else { + vptr->mii_status |= VELOCITY_LINK_FAIL; + } + + velocity_print_link_status(vptr); + enable_flow_control_ability(vptr); + + /* + * Re-enable auto-polling because SRCI will disable + * auto-polling + */ + + enable_mii_autopoll(regs); + + if (vptr->mii_status & VELOCITY_LINK_FAIL) + netif_stop_queue(vptr->dev); + else + netif_wake_queue(vptr->dev); + + }; + if (status & ISR_MIBFI) + velocity_update_hw_mibs(vptr); + if (status & ISR_LSTEI) + mac_rx_queue_wake(vptr->mac_regs); +} + +/** + * velocity_free_tx_buf - free transmit buffer + * @vptr: velocity + * @tdinfo: buffer + * + * Release an transmit buffer. If the buffer was preallocated then + * recycle it, if not then unmap the buffer. + */ + +static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo) +{ + struct sk_buff *skb = tdinfo->skb; + int i; + + /* + * Don't unmap the pre-allocated tx_bufs + */ + if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) { + + for (i = 0; i < tdinfo->nskb_dma; i++) { +#ifdef VELOCITY_ZERO_COPY_SUPPORT + pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE); +#else + pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE); +#endif + tdinfo->skb_dma[i] = 0; + } + } + dev_kfree_skb_irq(skb); + tdinfo->skb = NULL; +} + +/** + * velocity_open - interface activation callback + * @dev: network layer device to open + * + * Called when the network layer brings the interface up. Returns + * a negative posix error code on failure, or zero on success. + * + * All the ring allocation and set up is done on open for this + * adapter to minimise memory usage when inactive + */ + +static int velocity_open(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + int ret; + + vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32); + + ret = velocity_init_rings(vptr); + if (ret < 0) + goto out; + + ret = velocity_init_rd_ring(vptr); + if (ret < 0) + goto err_free_desc_rings; + + ret = velocity_init_td_ring(vptr); + if (ret < 0) + goto err_free_rd_ring; + + /* Ensure chip is running */ + pci_set_power_state(vptr->pdev, 0); + + velocity_init_registers(vptr, VELOCITY_INIT_COLD); + + ret = request_irq(vptr->pdev->irq, &velocity_intr, SA_SHIRQ, + dev->name, dev); + if (ret < 0) { + /* Power down the chip */ + pci_set_power_state(vptr->pdev, 3); + goto err_free_td_ring; + } + + mac_enable_int(vptr->mac_regs); + netif_start_queue(dev); + vptr->flags |= VELOCITY_FLAGS_OPENED; +out: + return ret; + +err_free_td_ring: + velocity_free_td_ring(vptr); +err_free_rd_ring: + velocity_free_rd_ring(vptr); +err_free_desc_rings: + velocity_free_rings(vptr); + goto out; +} + +/** + * velocity_change_mtu - MTU change callback + * @dev: network device + * @new_mtu: desired MTU + * + * Handle requests from the networking layer for MTU change on + * this interface. It gets called on a change by the network layer. + * Return zero for success or negative posix error code. + */ + +static int velocity_change_mtu(struct net_device *dev, int new_mtu) +{ + struct velocity_info *vptr = dev->priv; + unsigned long flags; + int oldmtu = dev->mtu; + int ret = 0; + + if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", + vptr->dev->name); + return -EINVAL; + } + + if (new_mtu != oldmtu) { + spin_lock_irqsave(&vptr->lock, flags); + + netif_stop_queue(dev); + velocity_shutdown(vptr); + + velocity_free_td_ring(vptr); + velocity_free_rd_ring(vptr); + + dev->mtu = new_mtu; + if (new_mtu > 8192) + vptr->rx_buf_sz = 9 * 1024; + else if (new_mtu > 4096) + vptr->rx_buf_sz = 8192; + else + vptr->rx_buf_sz = 4 * 1024; + + ret = velocity_init_rd_ring(vptr); + if (ret < 0) + goto out_unlock; + + ret = velocity_init_td_ring(vptr); + if (ret < 0) + goto out_unlock; + + velocity_init_registers(vptr, VELOCITY_INIT_COLD); + + mac_enable_int(vptr->mac_regs); + netif_start_queue(dev); +out_unlock: + spin_unlock_irqrestore(&vptr->lock, flags); + } + + return ret; +} + +/** + * velocity_shutdown - shut down the chip + * @vptr: velocity to deactivate + * + * Shuts down the internal operations of the velocity and + * disables interrupts, autopolling, transmit and receive + */ + +static void velocity_shutdown(struct velocity_info *vptr) +{ + struct mac_regs * regs = vptr->mac_regs; + mac_disable_int(regs); + writel(CR0_STOP, ®s->CR0Set); + writew(0xFFFF, ®s->TDCSRClr); + writeb(0xFF, ®s->RDCSRClr); + safe_disable_mii_autopoll(regs); + mac_clear_isr(regs); +} + +/** + * velocity_close - close adapter callback + * @dev: network device + * + * Callback from the network layer when the velocity is being + * deactivated by the network layer + */ + +static int velocity_close(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + + netif_stop_queue(dev); + velocity_shutdown(vptr); + + if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) + velocity_get_ip(vptr); + if (dev->irq != 0) + free_irq(dev->irq, dev); + + /* Power down the chip */ + pci_set_power_state(vptr->pdev, 3); + + /* Free the resources */ + velocity_free_td_ring(vptr); + velocity_free_rd_ring(vptr); + velocity_free_rings(vptr); + + vptr->flags &= (~VELOCITY_FLAGS_OPENED); + return 0; +} + +/** + * velocity_xmit - transmit packet callback + * @skb: buffer to transmit + * @dev: network device + * + * Called by the networ layer to request a packet is queued to + * the velocity. Returns zero on success. + */ + +static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + int qnum = 0; + struct tx_desc *td_ptr; + struct velocity_td_info *tdinfo; + unsigned long flags; + int index; + + int pktlen = skb->len; + + spin_lock_irqsave(&vptr->lock, flags); + + index = vptr->td_curr[qnum]; + td_ptr = &(vptr->td_rings[qnum][index]); + tdinfo = &(vptr->td_infos[qnum][index]); + + td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; + td_ptr->tdesc1.TCR = TCR0_TIC; + td_ptr->td_buf[0].queue = 0; + + /* + * Pad short frames. + */ + if (pktlen < ETH_ZLEN) { + /* Cannot occur until ZC support */ + if(skb_linearize(skb, GFP_ATOMIC)) + return 0; + pktlen = ETH_ZLEN; + memcpy(tdinfo->buf, skb->data, skb->len); + memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); + tdinfo->skb = skb; + tdinfo->skb_dma[0] = tdinfo->buf_dma; + td_ptr->tdesc0.pktsize = pktlen; + td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); + td_ptr->td_buf[0].pa_high = 0; + td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; + tdinfo->nskb_dma = 1; + td_ptr->tdesc1.CMDZ = 2; + } else +#ifdef VELOCITY_ZERO_COPY_SUPPORT + if (skb_shinfo(skb)->nr_frags > 0) { + int nfrags = skb_shinfo(skb)->nr_frags; + tdinfo->skb = skb; + if (nfrags > 6) { + skb_linearize(skb, GFP_ATOMIC); + memcpy(tdinfo->buf, skb->data, skb->len); + tdinfo->skb_dma[0] = tdinfo->buf_dma; + td_ptr->tdesc0.pktsize = + td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); + td_ptr->td_buf[0].pa_high = 0; + td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; + tdinfo->nskb_dma = 1; + td_ptr->tdesc1.CMDZ = 2; + } else { + int i = 0; + tdinfo->nskb_dma = 0; + tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE); + + td_ptr->tdesc0.pktsize = pktlen; + + /* FIXME: support 48bit DMA later */ + td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); + td_ptr->td_buf[i].pa_high = 0; + td_ptr->td_buf[i].bufsize = skb->len->skb->data_len; + + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *addr = ((void *) page_address(frag->page + frag->page_offset)); + + tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); + + td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); + td_ptr->td_buf[i + 1].pa_high = 0; + td_ptr->td_buf[i + 1].bufsize = frag->size; + } + tdinfo->nskb_dma = i - 1; + td_ptr->tdesc1.CMDZ = i; + } + + } else +#endif + { + /* + * Map the linear network buffer into PCI space and + * add it to the transmit ring. + */ + tdinfo->skb = skb; + tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); + td_ptr->tdesc0.pktsize = pktlen; + td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); + td_ptr->td_buf[0].pa_high = 0; + td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; + tdinfo->nskb_dma = 1; + td_ptr->tdesc1.CMDZ = 2; + } + + if (vptr->flags & VELOCITY_FLAGS_TAGGING) { + td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff); + td_ptr->tdesc1.pqinf.priority = 0; + td_ptr->tdesc1.pqinf.CFI = 0; + td_ptr->tdesc1.TCR |= TCR0_VETAG; + } + + /* + * Handle hardware checksum + */ + if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) + && (skb->ip_summed == CHECKSUM_HW)) { + struct iphdr *ip = skb->nh.iph; + if (ip->protocol == IPPROTO_TCP) + td_ptr->tdesc1.TCR |= TCR0_TCPCK; + else if (ip->protocol == IPPROTO_UDP) + td_ptr->tdesc1.TCR |= (TCR0_UDPCK); + td_ptr->tdesc1.TCR |= TCR0_IPCK; + } + { + + int prev = index - 1; + + if (prev < 0) + prev = vptr->options.numtx - 1; + td_ptr->tdesc0.owner = OWNED_BY_NIC; + vptr->td_used[qnum]++; + vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; + + if (AVAIL_TD(vptr, qnum) < 1) + netif_stop_queue(dev); + + td_ptr = &(vptr->td_rings[qnum][prev]); + td_ptr->td_buf[0].queue = 1; + mac_tx_queue_wake(vptr->mac_regs, qnum); + } + dev->trans_start = jiffies; + spin_unlock_irqrestore(&vptr->lock, flags); + return 0; +} + +/** + * velocity_intr - interrupt callback + * @irq: interrupt number + * @dev_instance: interrupting device + * @pt_regs: CPU register state at interrupt + * + * Called whenever an interrupt is generated by the velocity + * adapter IRQ line. We may not be the source of the interrupt + * and need to identify initially if we are, and if not exit as + * efficiently as possible. + */ + +static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct net_device *dev = dev_instance; + struct velocity_info *vptr = dev->priv; + u32 isr_status; + int max_count = 0; + + + spin_lock(&vptr->lock); + isr_status = mac_read_isr(vptr->mac_regs); + + /* Not us ? */ + if (isr_status == 0) { + spin_unlock(&vptr->lock); + return IRQ_NONE; + } + + mac_disable_int(vptr->mac_regs); + + /* + * Keep processing the ISR until we have completed + * processing and the isr_status becomes zero + */ + + while (isr_status != 0) { + mac_write_isr(vptr->mac_regs, isr_status); + if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) + velocity_error(vptr, isr_status); + if (isr_status & (ISR_PRXI | ISR_PPRXI)) + max_count += velocity_rx_srv(vptr, isr_status); + if (isr_status & (ISR_PTXI | ISR_PPTXI)) + max_count += velocity_tx_srv(vptr, isr_status); + isr_status = mac_read_isr(vptr->mac_regs); + if (max_count > vptr->options.int_works) + { + printk(KERN_WARNING "%s: excessive work at interrupt.\n", + dev->name); + max_count = 0; + } + } + spin_unlock(&vptr->lock); + mac_enable_int(vptr->mac_regs); + return IRQ_HANDLED; + +} + + +/** + * ether_crc - ethernet CRC function + * + * Compute an ethernet CRC hash of the data block provided. This + * is not performance optimised but is not needed in performance + * critical code paths. + * + * FIXME: could we use shared code here ? + */ + +static inline u32 ether_crc(int length, unsigned char *data) +{ + static unsigned const ethernet_polynomial = 0x04c11db7U; + + int crc = -1; + + while (--length >= 0) { + unsigned char current_octet = *data++; + int bit; + for (bit = 0; bit < 8; bit++, current_octet >>= 1) { + crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0); + } + } + return crc; +} + +/** + * velocity_set_multi - filter list change callback + * @dev: network device + * + * Called by the network layer when the filter lists need to change + * for a velocity adapter. Reload the CAMs with the new address + * filter ruleset. + */ + +static void velocity_set_multi(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + struct mac_regs * regs = vptr->mac_regs; + u8 rx_mode; + int i; + struct dev_mc_list *mclist; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + /* Unconditionally log net taps. */ + printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); + writel(0xffffffff, ®s->MARCAM[0]); + writel(0xffffffff, ®s->MARCAM[4]); + rx_mode = (RCR_AM | RCR_AB | RCR_PROM); + } else if ((dev->mc_count > vptr->multicast_limit) + || (dev->flags & IFF_ALLMULTI)) { + writel(0xffffffff, ®s->MARCAM[0]); + writel(0xffffffff, ®s->MARCAM[4]); + rx_mode = (RCR_AM | RCR_AB); + } else { + int offset = MCAM_SIZE - vptr->multicast_limit; + mac_get_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + + for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { + mac_set_cam(regs, i + offset, mclist->dmi_addr, VELOCITY_MULTICAST_CAM); + vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); + } + + mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + rx_mode = (RCR_AM | RCR_AB); + } + if (dev->mtu > 1500) + rx_mode |= RCR_AL; + + BYTE_REG_BITS_ON(rx_mode, ®s->RCR); + +} + +/** + * velocity_get_status - statistics callback + * @dev: network device + * + * Callback from the network layer to allow driver statistics + * to be resynchronized with hardware collected state. In the + * case of the velocity we need to pull the MIB counters from + * the hardware into the counters before letting the network + * layer display them. + */ + +static struct net_device_stats *velocity_get_stats(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + + /* If the hardware is down, don't touch MII */ + if(!netif_running(dev)) + return &vptr->stats; + + spin_lock_irq(&vptr->lock); + velocity_update_hw_mibs(vptr); + spin_unlock_irq(&vptr->lock); + + vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; + vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; + vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; + +// unsigned long rx_dropped; /* no space in linux buffers */ + vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; + /* detailed rx_errors: */ +// unsigned long rx_length_errors; +// unsigned long rx_over_errors; /* receiver ring buff overflow */ + vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; +// unsigned long rx_frame_errors; /* recv'd frame alignment error */ +// unsigned long rx_fifo_errors; /* recv'r fifo overrun */ +// unsigned long rx_missed_errors; /* receiver missed packet */ + + /* detailed tx_errors */ +// unsigned long tx_fifo_errors; + + return &vptr->stats; +} + + +/** + * velocity_ioctl - ioctl entry point + * @dev: network device + * @rq: interface request ioctl + * @cmd: command code + * + * Called when the user issues an ioctl request to the network + * device in question. The velocity interface supports MII. + */ + +static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct velocity_info *vptr = dev->priv; + int ret; + + /* If we are asked for information and the device is power + saving then we need to bring the device back up to talk to it */ + + if(!netif_running(dev)) + pci_set_power_state(vptr->pdev, 0); + + switch (cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + case SIOCGMIIREG: /* Read MII PHY register. */ + case SIOCSMIIREG: /* Write to MII PHY register. */ + ret = velocity_mii_ioctl(dev, rq, cmd); + break; + + default: + ret = -EOPNOTSUPP; + } + if(!netif_running(dev)) + pci_set_power_state(vptr->pdev, 3); + + + return ret; +} + +/* + * Definition for our device driver. The PCI layer interface + * uses this to handle all our card discover and plugging + */ + +static struct pci_driver velocity_driver = { + name:VELOCITY_NAME, + id_table:velocity_id_table, + probe:velocity_found1, + remove:velocity_remove1, +#ifdef CONFIG_PM + suspend:velocity_suspend, + resume:velocity_resume, +#endif +}; + +/** + * velocity_init_module - load time function + * + * Called when the velocity module is loaded. The PCI driver + * is registered with the PCI layer, and in turn will call + * the probe functions for each velocity adapter installed + * in the system. + */ + +static int __init velocity_init_module(void) +{ + int ret; + ret = pci_module_init(&velocity_driver); + +#ifdef CONFIG_PM + register_inetaddr_notifier(&velocity_inetaddr_notifier); +#endif + return ret; +} + +/** + * velocity_cleanup - module unload + * + * When the velocity hardware is unloaded this function is called. + * It will clean up the notifiers and the unregister the PCI + * driver interface for this hardware. This in turn cleans up + * all discovered interfaces before returning from the function + */ + +static void __exit velocity_cleanup_module(void) +{ +#ifdef CONFIG_PM + unregister_inetaddr_notifier(&velocity_inetaddr_notifier); +#endif + pci_unregister_driver(&velocity_driver); +} + +module_init(velocity_init_module); +module_exit(velocity_cleanup_module); + + +/* + * MII access , media link mode setting functions + */ + + +/** + * mii_init - set up MII + * @vptr: velocity adapter + * @mii_status: links tatus + * + * Set up the PHY for the current link state. + */ + +static void mii_init(struct velocity_info *vptr, u32 mii_status) +{ + u16 BMCR; + + switch (PHYID_GET_PHY_ID(vptr->phy_id)) { + case PHYID_CICADA_CS8201: + /* + * Reset to hardware default + */ + MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); + /* + * Turn on ECHODIS bit in NWay-forced full mode and turn it + * off it in NWay-forced half mode for NWay-forced v.s. + * legacy-forced issue. + */ + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + /* + * Turn on Link/Activity LED enable bit for CIS8201 + */ + MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs); + break; + case PHYID_VT3216_32BIT: + case PHYID_VT3216_64BIT: + /* + * Reset to hardware default + */ + MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); + /* + * Turn on ECHODIS bit in NWay-forced full mode and turn it + * off it in NWay-forced half mode for NWay-forced v.s. + * legacy-forced issue + */ + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + break; + + case PHYID_MARVELL_1000: + case PHYID_MARVELL_1000S: + /* + * Assert CRS on Transmit + */ + MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); + /* + * Reset to hardware default + */ + MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); + break; + default: + ; + } + velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR); + if (BMCR & BMCR_ISO) { + BMCR &= ~BMCR_ISO; + velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR); + } +} + +/** + * safe_disable_mii_autopoll - autopoll off + * @regs: velocity registers + * + * Turn off the autopoll and wait for it to disable on the chip + */ + +static void safe_disable_mii_autopoll(struct mac_regs * regs) +{ + u16 ww; + + /* turn off MAUTO */ + writeb(0, ®s->MIICR); + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + udelay(1); + if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } +} + +/** + * enable_mii_autopoll - turn on autopolling + * @regs: velocity registers + * + * Enable the MII link status autopoll feature on the Velocity + * hardware. Wait for it to enable. + */ + +static void enable_mii_autopoll(struct mac_regs * regs) +{ + int ii; + + writeb(0, &(regs->MIICR)); + writeb(MIIADR_SWMPL, ®s->MIIADR); + + for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { + udelay(1); + if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } + + writeb(MIICR_MAUTO, ®s->MIICR); + + for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { + udelay(1); + if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } + +} + +/** + * velocity_mii_read - read MII data + * @regs: velocity registers + * @index: MII register index + * @data: buffer for received data + * + * Perform a single read of an MII 16bit register. Returns zero + * on success or -ETIMEDOUT if the PHY did not respond. + */ + +static int velocity_mii_read(struct mac_regs * regs, u8 index, u16 *data) +{ + u16 ww; + + /* + * Disable MIICR_MAUTO, so that mii addr can be set normally + */ + safe_disable_mii_autopoll(regs); + + writeb(index, ®s->MIIADR); + + BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR); + + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + if (!(readb(®s->MIICR) & MIICR_RCMD)) + break; + } + + *data = readw(®s->MIIDATA); + + enable_mii_autopoll(regs); + if (ww == W_MAX_TIMEOUT) + return -ETIMEDOUT; + return 0; +} + +/** + * velocity_mii_write - write MII data + * @regs: velocity registers + * @index: MII register index + * @data: 16bit data for the MII register + * + * Perform a single write to an MII 16bit register. Returns zero + * on success or -ETIMEDOUT if the PHY did not respond. + */ + +static int velocity_mii_write(struct mac_regs * regs, u8 mii_addr, u16 data) +{ + u16 ww; + + /* + * Disable MIICR_MAUTO, so that mii addr can be set normally + */ + safe_disable_mii_autopoll(regs); + + /* MII reg offset */ + writeb(mii_addr, ®s->MIIADR); + /* set MII data */ + writew(data, ®s->MIIDATA); + + /* turn on MIICR_WCMD */ + BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR); + + /* W_MAX_TIMEOUT is the timeout period */ + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + udelay(5); + if (!(readb(®s->MIICR) & MIICR_WCMD)) + break; + } + enable_mii_autopoll(regs); + + if (ww == W_MAX_TIMEOUT) + return -ETIMEDOUT; + return 0; +} + +/** + * velocity_get_opt_media_mode - get media selection + * @vptr: velocity adapter + * + * Get the media mode stored in EEPROM or module options and load + * mii_status accordingly. The requested link state information + * is also returned. + */ + +static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) +{ + u32 status = 0; + + switch (vptr->options.spd_dpx) { + case SPD_DPX_AUTO: + status = VELOCITY_AUTONEG_ENABLE; + break; + case SPD_DPX_100_FULL: + status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL; + break; + case SPD_DPX_10_FULL: + status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL; + break; + case SPD_DPX_100_HALF: + status = VELOCITY_SPEED_100; + break; + case SPD_DPX_10_HALF: + status = VELOCITY_SPEED_10; + break; + } + vptr->mii_status = status; + return status; +} + +/** + * mii_set_auto_on - autonegotiate on + * @vptr: velocity + * + * Enable autonegotation on this interface + */ + +static void mii_set_auto_on(struct velocity_info *vptr) +{ + if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) + MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); + else + MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); +} + + +/* +static void mii_set_auto_off(struct velocity_info * vptr) +{ + MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); +} +*/ + +/** + * set_mii_flow_control - flow control setup + * @vptr: velocity interface + * + * Set up the flow control on this interface according to + * the supplied user/eeprom options. + */ + +static void set_mii_flow_control(struct velocity_info *vptr) +{ + /*Enable or Disable PAUSE in ANAR */ + switch (vptr->options.flow_cntl) { + case FLOW_CNTL_TX: + MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + break; + + case FLOW_CNTL_RX: + MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + break; + + case FLOW_CNTL_TX_RX: + MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + break; + + case FLOW_CNTL_DISABLE: + MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + break; + default: + break; + } +} + +/** + * velocity_set_media_mode - set media mode + * @mii_status: old MII link state + * + * Check the media link state and configure the flow control + * PHY and also velocity hardware setup accordingly. In particular + * we need to set up CD polling and frame bursting. + */ + +static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) +{ + u32 curr_status; + struct mac_regs * regs = vptr->mac_regs; + + vptr->mii_status = mii_check_media_mode(vptr->mac_regs); + curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); + + /* Set mii link status */ + set_mii_flow_control(vptr); + + /* + Check if new status is consisent with current status + if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) + || (mii_status==curr_status)) { + vptr->mii_status=mii_check_media_mode(vptr->mac_regs); + vptr->mii_status=check_connection_type(vptr->mac_regs); + VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n"); + return 0; + } + */ + + if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) { + MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); + } + + /* + * If connection type is AUTO + */ + if (mii_status & VELOCITY_AUTONEG_ENABLE) { + VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n"); + /* clear force MAC mode bit */ + BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); + /* set duplex mode of MAC according to duplex mode of MII */ + MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); + MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); + + /* enable AUTO-NEGO mode */ + mii_set_auto_on(vptr); + } else { + u16 ANAR; + u8 CHIPGCR; + + /* + * 1. if it's 3119, disable frame bursting in halfduplex mode + * and enable it in fullduplex mode + * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR + * 3. only enable CD heart beat counter in 10HD mode + */ + + /* set force MAC mode bit */ + BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); + + CHIPGCR = readb(®s->CHIPGCR); + CHIPGCR &= ~CHIPGCR_FCGMII; + + if (mii_status & VELOCITY_DUPLEX_FULL) { + CHIPGCR |= CHIPGCR_FCFDX; + writeb(CHIPGCR, ®s->CHIPGCR); + VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n"); + if (vptr->rev_id < REV_ID_VT3216_A0) + BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); + } else { + CHIPGCR &= ~CHIPGCR_FCFDX; + VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n"); + writeb(CHIPGCR, ®s->CHIPGCR); + if (vptr->rev_id < REV_ID_VT3216_A0) + BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); + } + + MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); + + if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) { + BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); + } else { + BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); + } + /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */ + velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); + ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)); + if (mii_status & VELOCITY_SPEED_100) { + if (mii_status & VELOCITY_DUPLEX_FULL) + ANAR |= ANAR_TXFD; + else + ANAR |= ANAR_TX; + } else { + if (mii_status & VELOCITY_DUPLEX_FULL) + ANAR |= ANAR_10FD; + else + ANAR |= ANAR_10; + } + velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR); + /* enable AUTO-NEGO mode */ + mii_set_auto_on(vptr); + /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */ + } + /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ + /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ + return VELOCITY_LINK_CHANGE; +} + +/** + * mii_check_media_mode - check media state + * @regs: velocity registers + * + * Check the current MII status and determine the link status + * accordingly + */ + +static u32 mii_check_media_mode(struct mac_regs * regs) +{ + u32 status = 0; + u16 ANAR; + + if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs)) + status |= VELOCITY_LINK_FAIL; + + if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs)) + status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; + else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs)) + status |= (VELOCITY_SPEED_1000); + else { + velocity_mii_read(regs, MII_REG_ANAR, &ANAR); + if (ANAR & ANAR_TXFD) + status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); + else if (ANAR & ANAR_TX) + status |= VELOCITY_SPEED_100; + else if (ANAR & ANAR_10FD) + status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); + else + status |= (VELOCITY_SPEED_10); + } + + if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { + velocity_mii_read(regs, MII_REG_ANAR, &ANAR); + if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) + == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { + if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) + status |= VELOCITY_AUTONEG_ENABLE; + } + } + + return status; +} + +static u32 check_connection_type(struct mac_regs * regs) +{ + u32 status = 0; + u8 PHYSR0; + u16 ANAR; + PHYSR0 = readb(®s->PHYSR0); + + /* + if (!(PHYSR0 & PHYSR0_LINKGD)) + status|=VELOCITY_LINK_FAIL; + */ + + if (PHYSR0 & PHYSR0_FDPX) + status |= VELOCITY_DUPLEX_FULL; + + if (PHYSR0 & PHYSR0_SPDG) + status |= VELOCITY_SPEED_1000; + if (PHYSR0 & PHYSR0_SPD10) + status |= VELOCITY_SPEED_10; + else + status |= VELOCITY_SPEED_100; + + if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { + velocity_mii_read(regs, MII_REG_ANAR, &ANAR); + if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) + == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { + if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) + status |= VELOCITY_AUTONEG_ENABLE; + } + } + + return status; +} + +/** + * enable_flow_control_ability - flow control + * @vptr: veloity to configure + * + * Set up flow control according to the flow control options + * determined by the eeprom/configuration. + */ + +static void enable_flow_control_ability(struct velocity_info *vptr) +{ + + struct mac_regs * regs = vptr->mac_regs; + + switch (vptr->options.flow_cntl) { + + case FLOW_CNTL_DEFAULT: + if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0)) + writel(CR0_FDXRFCEN, ®s->CR0Set); + else + writel(CR0_FDXRFCEN, ®s->CR0Clr); + + if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0)) + writel(CR0_FDXTFCEN, ®s->CR0Set); + else + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_TX: + writel(CR0_FDXTFCEN, ®s->CR0Set); + writel(CR0_FDXRFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_RX: + writel(CR0_FDXRFCEN, ®s->CR0Set); + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_TX_RX: + writel(CR0_FDXTFCEN, ®s->CR0Set); + writel(CR0_FDXRFCEN, ®s->CR0Set); + break; + + case FLOW_CNTL_DISABLE: + writel(CR0_FDXRFCEN, ®s->CR0Clr); + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + default: + break; + } + +} + + +/** + * velocity_ethtool_up - pre hook for ethtool + * @dev: network device + * + * Called before an ethtool operation. We need to make sure the + * chip is out of D3 state before we poke at it. + */ + +static int velocity_ethtool_up(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + if(!netif_running(dev)) + pci_set_power_state(vptr->pdev, 0); + return 0; +} + +/** + * velocity_ethtool_down - post hook for ethtool + * @dev: network device + * + * Called after an ethtool operation. Restore the chip back to D3 + * state if it isn't running. + */ + +static void velocity_ethtool_down(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + if(!netif_running(dev)) + pci_set_power_state(vptr->pdev, 3); +} + +static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct velocity_info *vptr = dev->priv; + struct mac_regs * regs = vptr->mac_regs; + u32 status; + status = check_connection_type(vptr->mac_regs); + + cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; + if (status & VELOCITY_SPEED_100) + cmd->speed = SPEED_100; + else + cmd->speed = SPEED_10; + cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + cmd->port = PORT_TP; + cmd->transceiver = XCVR_INTERNAL; + cmd->phy_address = readb(®s->MIIADR) & 0x1F; + + if (status & VELOCITY_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; + else + cmd->duplex = DUPLEX_HALF; + + return 0; +} + +static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct velocity_info *vptr = dev->priv; + u32 curr_status; + u32 new_status = 0; + int ret = 0; + + curr_status = check_connection_type(vptr->mac_regs); + curr_status &= (~VELOCITY_LINK_FAIL); + + new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); + new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); + new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); + new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); + + if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) + ret = -EINVAL; + else + velocity_set_media_mode(vptr, new_status); + + return ret; +} + +static u32 velocity_get_link(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + struct mac_regs * regs = vptr->mac_regs; + return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 0 : 1; +} + +static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct velocity_info *vptr = dev->priv; + strcpy(info->driver, VELOCITY_NAME); + strcpy(info->version, VELOCITY_VERSION); + strcpy(info->bus_info, vptr->pdev->slot_name); +} + +static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct velocity_info *vptr = dev->priv; + wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP; + wol->wolopts |= WAKE_MAGIC; + /* + if (vptr->wol_opts & VELOCITY_WOL_PHY) + wol.wolopts|=WAKE_PHY; + */ + if (vptr->wol_opts & VELOCITY_WOL_UCAST) + wol->wolopts |= WAKE_UCAST; + if (vptr->wol_opts & VELOCITY_WOL_ARP) + wol->wolopts |= WAKE_ARP; + memcpy(&wol->sopass, vptr->wol_passwd, 6); +} + +static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct velocity_info *vptr = dev->priv; + + if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP))) + return -EFAULT; + vptr->wol_opts = VELOCITY_WOL_MAGIC; + + /* + if (wol.wolopts & WAKE_PHY) { + vptr->wol_opts|=VELOCITY_WOL_PHY; + vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED; + } + */ + + if (wol->wolopts & WAKE_MAGIC) { + vptr->wol_opts |= VELOCITY_WOL_MAGIC; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + if (wol->wolopts & WAKE_UCAST) { + vptr->wol_opts |= VELOCITY_WOL_UCAST; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + if (wol->wolopts & WAKE_ARP) { + vptr->wol_opts |= VELOCITY_WOL_ARP; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + memcpy(vptr->wol_passwd, wol->sopass, 6); + return 0; +} + +static u32 velocity_get_msglevel(struct net_device *dev) +{ + return msglevel; +} + +static void velocity_set_msglevel(struct net_device *dev, u32 value) +{ + msglevel = value; +} + +static struct ethtool_ops velocity_ethtool_ops = { + .get_settings = velocity_get_settings, + .set_settings = velocity_set_settings, + .get_drvinfo = velocity_get_drvinfo, + .get_wol = velocity_ethtool_get_wol, + .set_wol = velocity_ethtool_set_wol, + .get_msglevel = velocity_get_msglevel, + .set_msglevel = velocity_set_msglevel, + .get_link = velocity_get_link, + .begin = velocity_ethtool_up, + .complete = velocity_ethtool_down +}; + +/** + * velocity_mii_ioctl - MII ioctl handler + * @dev: network device + * @ifr: the ifreq block for the ioctl + * @cmd: the command + * + * Process MII requests made via ioctl from the network layer. These + * are used by tools like kudzu to interrogate the link state of the + * hardware + */ + +static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct velocity_info *vptr = dev->priv; + struct mac_regs * regs = vptr->mac_regs; + unsigned long flags; + struct mii_ioctl_data *miidata = (struct mii_ioctl_data *) &(ifr->ifr_data); + int err; + + switch (cmd) { + case SIOCGMIIPHY: + miidata->phy_id = readb(®s->MIIADR) & 0x1f; + break; + case SIOCGMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if(velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) + return -ETIMEDOUT; + break; + case SIOCSMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + spin_lock_irqsave(&vptr->lock, flags); + err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in); + spin_unlock_irqrestore(&vptr->lock, flags); + check_connection_type(vptr->mac_regs); + if(err) + return err; + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +#ifdef CONFIG_PM + +/** + * velocity_save_context - save registers + * @vptr: velocity + * @context: buffer for stored context + * + * Retrieve the current configuration from the velocity hardware + * and stash it in the context structure, for use by the context + * restore functions. This allows us to save things we need across + * power down states + */ + +static void velocity_save_context(struct velocity_info *vptr, struct velocity_context * context) +{ + struct mac_regs * regs = vptr->mac_regs; + u16 i; + u8 *ptr = (u8 *)regs; + + for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + + for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + + for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + +} + +/** + * velocity_restore_context - restore registers + * @vptr: velocity + * @context: buffer for stored context + * + * Reload the register configuration from the velocity context + * created by velocity_save_context. + */ + +static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context) +{ + struct mac_regs * regs = vptr->mac_regs; + int i; + u8 *ptr = (u8 *)regs; + + for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4) { + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + } + + /* Just skip cr0 */ + for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) { + /* Clear */ + writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4); + /* Set */ + writeb(*((u8 *) (context->mac_reg + i)), ptr + i); + } + + for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) { + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + } + + for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) { + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + } + + for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) { + writeb(*((u8 *) (context->mac_reg + i)), ptr + i); + } + +} + +static int velocity_suspend(struct pci_dev *pdev, u32 state) +{ + struct velocity_info *vptr = pci_get_drvdata(pdev); + unsigned long flags; + + if(!netif_running(vptr->dev)) + return 0; + + netif_device_detach(vptr->dev); + + spin_lock_irqsave(&vptr->lock, flags); + pci_save_state(pdev, vptr->pci_state); +#ifdef ETHTOOL_GWOL + if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { + velocity_get_ip(vptr); + velocity_save_context(vptr, &vptr->context); + velocity_shutdown(vptr); + velocity_set_wol(vptr); + pci_enable_wake(pdev, 3, 1); + pci_set_power_state(pdev, 3); + } else { + velocity_save_context(vptr, &vptr->context); + velocity_shutdown(vptr); + pci_disable_device(pdev); + pci_set_power_state(pdev, state); + } +#else + pci_set_power_state(pdev, state); +#endif + spin_unlock_irqrestore(&vptr->lock, flags); + return 0; +} + +static int velocity_resume(struct pci_dev *pdev) +{ + struct velocity_info *vptr = pci_get_drvdata(pdev); + unsigned long flags; + int i; + + if(!netif_running(vptr->dev)) + return 0; + + pci_set_power_state(pdev, 0); + pci_enable_wake(pdev, 0, 0); + pci_restore_state(pdev, vptr->pci_state); + + mac_wol_reset(vptr->mac_regs); + + spin_lock_irqsave(&vptr->lock, flags); + velocity_restore_context(vptr, &vptr->context); + velocity_init_registers(vptr, VELOCITY_INIT_WOL); + mac_disable_int(vptr->mac_regs); + + velocity_tx_srv(vptr, 0); + + for (i = 0; i < vptr->num_txq; i++) { + if (vptr->td_used[i]) { + mac_tx_queue_wake(vptr->mac_regs, i); + } + } + + mac_enable_int(vptr->mac_regs); + spin_unlock_irqrestore(&vptr->lock, flags); + netif_device_attach(vptr->dev); + + return 0; +} + +static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; + struct net_device *dev; + struct velocity_info *vptr; + + if (ifa) { + dev = ifa->ifa_dev->dev; + vptr = dev->priv; + velocity_get_ip(vptr); + } + return NOTIFY_DONE; +} +#endif + +/* + * Purpose: Functions to set WOL. + */ + +const static unsigned short crc16_tab[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; + + +static u32 mask_pattern[2][4] = { + {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */ + {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */ +}; + +/** + * ether_crc16 - compute ethernet CRC + * @len: buffer length + * @cp: buffer + * @crc16: initial CRC + * + * Compute a CRC value for a block of data. + * FIXME: can we use generic functions ? + */ + +static u16 ether_crc16(int len, u8 * cp, u16 crc16) +{ + while (len--) + crc16 = (crc16 >> 8) ^ crc16_tab[(crc16 ^ *cp++) & 0xff]; + return (crc16); +} + +/** + * bit_reverse - 16bit reverse + * @data: 16bit data t reverse + * + * Reverse the order of a 16bit value and return the reversed bits + */ + +static u16 bit_reverse(u16 data) +{ + u32 new = 0x00000000; + int ii; + + + for (ii = 0; ii < 16; ii++) { + new |= ((u32) (data & 1) << (31 - ii)); + data >>= 1; + } + + return (u16) (new >> 16); +} + +/** + * wol_calc_crc - WOL CRC + * @pattern: data pattern + * @mask_pattern: mask + * + * Compute the wake on lan crc hashes for the packet header + * we are interested in. + */ + +u16 wol_calc_crc(int size, u8 * pattern, u8 *mask_pattern) +{ + u16 crc = 0xFFFF; + u8 mask; + int i, j; + + for (i = 0; i < size; i++) { + mask = mask_pattern[i]; + + /* Skip this loop if the mask equals to zero */ + if (mask == 0x00) + continue; + + for (j = 0; j < 8; j++) { + if ((mask & 0x01) == 0) { + mask >>= 1; + continue; + } + mask >>= 1; + crc = ether_crc16(1, &(pattern[i * 8 + j]), crc); + } + } + /* Finally, invert the result once to get the correct data */ + crc = ~crc; + return bit_reverse(crc); +} + +/** + * velocity_set_wol - set up for wake on lan + * @vptr: velocity to set WOL status on + * + * Set a card up for wake on lan either by unicast or by + * ARP packet. + * + * FIXME: check static buffer is safe here + */ + +static int velocity_set_wol(struct velocity_info *vptr) +{ + struct mac_regs * regs = vptr->mac_regs; + static u8 buf[256]; + int i; + + writew(0xFFFF, ®s->WOLCRClr); + writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet); + writew(WOLCR_MAGIC_EN, ®s->WOLCRSet); + + /* + if (vptr->wol_opts & VELOCITY_WOL_PHY) + writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), ®s->WOLCRSet); + */ + + if (vptr->wol_opts & VELOCITY_WOL_UCAST) { + writew(WOLCR_UNICAST_EN, ®s->WOLCRSet); + } + + if (vptr->wol_opts & VELOCITY_WOL_ARP) { + struct arp_packet *arp = (struct arp_packet *) buf; + u16 crc; + memset(buf, 0, sizeof(struct arp_packet) + 7); + + for (i = 0; i < 4; i++) + writel(mask_pattern[0][i], ®s->ByteMask[0][i]); + + arp->type = htons(ETH_P_ARP); + arp->ar_op = htons(1); + + memcpy(arp->ar_tip, vptr->ip_addr, 4); + + crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf, (u8 *) & mask_pattern[0][0]); + + writew(crc, ®s->PatternCRC[0]); + writew(WOLCR_ARP_EN, ®s->WOLCRSet); + } + + BYTE_REG_BITS_ON(PWCFG_WOLTYPE, ®s->PWCFGSet); + BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, ®s->PWCFGSet); + + writew(0x0FFF, ®s->WOLSRClr); + + if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { + if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) + MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); + + MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); + } + + if (vptr->mii_status & VELOCITY_SPEED_1000) + MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); + + BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); + + { + u8 GCR; + GCR = readb(®s->CHIPGCR); + GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX; + writeb(GCR, ®s->CHIPGCR); + } + + BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR); + /* Turn on SWPTAG just before entering power mode */ + BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW); + /* Go to bed ..... */ + BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); + + return 0; +} + diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h new file mode 100644 index 000000000..2175b8696 --- /dev/null +++ b/drivers/net/via-velocity.h @@ -0,0 +1,1885 @@ +/* + * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. + * All rights reserved. + * + * This software may be redistributed and/or modified under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or + * any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * File: via-velocity.h + * + * Purpose: Header file to define driver's private structures. + * + * Author: Chuang Liang-Shing, AJ Jiang + * + * Date: Jan 24, 2003 + */ + + +#ifndef VELOCITY_H +#define VELOCITY_H + +#define VELOCITY_TX_CSUM_SUPPORT + +#define VELOCITY_NAME "via-velocity" +#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver" +#define VELOCITY_VERSION "1.13" + +#define PKT_BUF_SZ 1540 + +#define MAX_UNITS 8 +#define OPTION_DEFAULT { [0 ... MAX_UNITS-1] = -1} + +#define REV_ID_VT6110 (0) +#define DEVICE_ID (0x3119) + +#define BYTE_REG_BITS_ON(x,p) do { writeb(readb((p))|(x),(p));} while (0) +#define WORD_REG_BITS_ON(x,p) do { writew(readw((p))|(x),(p));} while (0) +#define DWORD_REG_BITS_ON(x,p) do { writel(readl((p))|(x),(p));} while (0) + +#define BYTE_REG_BITS_IS_ON(x,p) (readb((p)) & (x)) +#define WORD_REG_BITS_IS_ON(x,p) (readw((p)) & (x)) +#define DWORD_REG_BITS_IS_ON(x,p) (readl((p)) & (x)) + +#define BYTE_REG_BITS_OFF(x,p) do { writeb(readb((p)) & (~(x)),(p));} while (0) +#define WORD_REG_BITS_OFF(x,p) do { writew(readw((p)) & (~(x)),(p));} while (0) +#define DWORD_REG_BITS_OFF(x,p) do { writel(readl((p)) & (~(x)),(p));} while (0) + +#define BYTE_REG_BITS_SET(x,m,p) do { writeb( (readb((p)) & (~(m))) |(x),(p));} while (0) +#define WORD_REG_BITS_SET(x,m,p) do { writew( (readw((p)) & (~(m))) |(x),(p));} while (0) +#define DWORD_REG_BITS_SET(x,m,p) do { writel( (readl((p)) & (~(m)))|(x),(p));} while (0) + +#define VAR_USED(p) do {(p)=(p);} while (0) + +/* + * Purpose: Structures for MAX RX/TX descriptors. + */ + + +#define B_OWNED_BY_CHIP 1 +#define B_OWNED_BY_HOST 0 + +/* + * Bits in the RSR0 register + */ + +#define RSR_DETAG 0x0080 +#define RSR_SNTAG 0x0040 +#define RSR_RXER 0x0020 +#define RSR_RL 0x0010 +#define RSR_CE 0x0008 +#define RSR_FAE 0x0004 +#define RSR_CRC 0x0002 +#define RSR_VIDM 0x0001 + +/* + * Bits in the RSR1 register + */ + +#define RSR_RXOK 0x8000 // rx OK +#define RSR_PFT 0x4000 // Perfect filtering address match +#define RSR_MAR 0x2000 // MAC accept multicast address packet +#define RSR_BAR 0x1000 // MAC accept broadcast address packet +#define RSR_PHY 0x0800 // MAC accept physical address packet +#define RSR_VTAG 0x0400 // 802.1p/1q tagging packet indicator +#define RSR_STP 0x0200 // start of packet +#define RSR_EDP 0x0100 // end of packet + +/* + * Bits in the RSR1 register + */ + +#define RSR1_RXOK 0x80 // rx OK +#define RSR1_PFT 0x40 // Perfect filtering address match +#define RSR1_MAR 0x20 // MAC accept multicast address packet +#define RSR1_BAR 0x10 // MAC accept broadcast address packet +#define RSR1_PHY 0x08 // MAC accept physical address packet +#define RSR1_VTAG 0x04 // 802.1p/1q tagging packet indicator +#define RSR1_STP 0x02 // start of packet +#define RSR1_EDP 0x01 // end of packet + +/* + * Bits in the CSM register + */ + +#define CSM_IPOK 0x40 //IP Checkusm validatiaon ok +#define CSM_TUPOK 0x20 //TCP/UDP Checkusm validatiaon ok +#define CSM_FRAG 0x10 //Fragment IP datagram +#define CSM_IPKT 0x04 //Received an IP packet +#define CSM_TCPKT 0x02 //Received a TCP packet +#define CSM_UDPKT 0x01 //Received a UDP packet + +/* + * Bits in the TSR0 register + */ + +#define TSR0_ABT 0x0080 // Tx abort because of excessive collision +#define TSR0_OWT 0x0040 // Jumbo frame Tx abort +#define TSR0_OWC 0x0020 // Out of window collision +#define TSR0_COLS 0x0010 // experience collision in this transmit event +#define TSR0_NCR3 0x0008 // collision retry counter[3] +#define TSR0_NCR2 0x0004 // collision retry counter[2] +#define TSR0_NCR1 0x0002 // collision retry counter[1] +#define TSR0_NCR0 0x0001 // collision retry counter[0] +#define TSR0_TERR 0x8000 // +#define TSR0_FDX 0x4000 // current transaction is serviced by full duplex mode +#define TSR0_GMII 0x2000 // current transaction is serviced by GMII mode +#define TSR0_LNKFL 0x1000 // packet serviced during link down +#define TSR0_SHDN 0x0400 // shutdown case +#define TSR0_CRS 0x0200 // carrier sense lost +#define TSR0_CDH 0x0100 // AQE test fail (CD heartbeat) + +/* + * Bits in the TSR1 register + */ + +#define TSR1_TERR 0x80 // +#define TSR1_FDX 0x40 // current transaction is serviced by full duplex mode +#define TSR1_GMII 0x20 // current transaction is serviced by GMII mode +#define TSR1_LNKFL 0x10 // packet serviced during link down +#define TSR1_SHDN 0x04 // shutdown case +#define TSR1_CRS 0x02 // carrier sense lost +#define TSR1_CDH 0x01 // AQE test fail (CD heartbeat) + +// +// Bits in the TCR0 register +// +#define TCR0_TIC 0x80 // assert interrupt immediately while descriptor has been send complete +#define TCR0_PIC 0x40 // priority interrupt request, INA# is issued over adaptive interrupt scheme +#define TCR0_VETAG 0x20 // enable VLAN tag +#define TCR0_IPCK 0x10 // request IP checksum calculation. +#define TCR0_UDPCK 0x08 // request UDP checksum calculation. +#define TCR0_TCPCK 0x04 // request TCP checksum calculation. +#define TCR0_JMBO 0x02 // indicate a jumbo packet in GMAC side +#define TCR0_CRC 0x01 // disable CRC generation + +#define TCPLS_NORMAL 3 +#define TCPLS_START 2 +#define TCPLS_END 1 +#define TCPLS_MED 0 + + +// max transmit or receive buffer size +#define CB_RX_BUF_SIZE 2048UL // max buffer size + // NOTE: must be multiple of 4 + +#define CB_MAX_RD_NUM 512 // MAX # of RD +#define CB_MAX_TD_NUM 256 // MAX # of TD + +#define CB_INIT_RD_NUM_3119 128 // init # of RD, for setup VT3119 +#define CB_INIT_TD_NUM_3119 64 // init # of TD, for setup VT3119 + +#define CB_INIT_RD_NUM 128 // init # of RD, for setup default +#define CB_INIT_TD_NUM 64 // init # of TD, for setup default + +// for 3119 +#define CB_TD_RING_NUM 4 // # of TD rings. +#define CB_MAX_SEG_PER_PKT 7 // max data seg per packet (Tx) + + +/* + * If collisions excess 15 times , tx will abort, and + * if tx fifo underflow, tx will fail + * we should try to resend it + */ + +#define CB_MAX_TX_ABORT_RETRY 3 + +/* + * Receive descriptor + */ + +struct rdesc0 { + u16 RSR; /* Receive status */ + u16 len:14; /* Received packet length */ + u16 reserved:1; + u16 owner:1; /* Who owns this buffer ? */ +}; + +struct rdesc1 { + u16 PQTAG; + u8 CSM; + u8 IPKT; +}; + +struct rx_desc { + struct rdesc0 rdesc0; + struct rdesc1 rdesc1; + u32 pa_low; /* Low 32 bit PCI address */ + u16 pa_high; /* Next 16 bit PCI address (48 total) */ + u16 len:15; /* Frame size */ + u16 inten:1; /* Enable interrupt */ +} __attribute__ ((__packed__)); + +/* + * Transmit descriptor + */ + +struct tdesc0 { + u16 TSR; /* Transmit status register */ + u16 pktsize:14; /* Size of frame */ + u16 reserved:1; + u16 owner:1; /* Who owns the buffer */ +}; + +struct pqinf { /* Priority queue info */ + u16 VID:12; + u16 CFI:1; + u16 priority:3; +} __attribute__ ((__packed__)); + +struct tdesc1 { + struct pqinf pqinf; + u8 TCR; + u8 TCPLS:2; + u8 reserved:2; + u8 CMDZ:4; +} __attribute__ ((__packed__)); + +struct td_buf { + u32 pa_low; + u16 pa_high; + u16 bufsize:14; + u16 reserved:1; + u16 queue:1; +} __attribute__ ((__packed__)); + +struct tx_desc { + struct tdesc0 tdesc0; + struct tdesc1 tdesc1; + struct td_buf td_buf[7]; +}; + +struct velocity_rd_info { + struct sk_buff *skb; + dma_addr_t skb_dma; +}; + +/** + * alloc_rd_info - allocate an rd info block + * + * Alocate and initialize a receive info structure used for keeping + * track of kernel side information related to each receive + * descriptor we are using + */ + +static inline struct velocity_rd_info *alloc_rd_info(void) +{ + struct velocity_rd_info *ptr; + if ((ptr = kmalloc(sizeof(struct velocity_rd_info), GFP_ATOMIC)) == NULL) + return NULL; + else { + memset(ptr, 0, sizeof(struct velocity_rd_info)); + return ptr; + } +} + +/* + * Used to track transmit side buffers. + */ + +struct velocity_td_info { + struct sk_buff *skb; + u8 *buf; + int nskb_dma; + dma_addr_t skb_dma[7]; + dma_addr_t buf_dma; +}; + +enum { + OWNED_BY_HOST = 0, + OWNED_BY_NIC = 1 +} velocity_owner; + + +/* + * MAC registers and macros. + */ + + +#define MCAM_SIZE 64 +#define VCAM_SIZE 64 +#define TX_QUEUE_NO 4 + +#define MAX_HW_MIB_COUNTER 32 +#define VELOCITY_MIN_MTU (1514-14) +#define VELOCITY_MAX_MTU (9000) + +/* + * Registers in the MAC + */ + +#define MAC_REG_PAR 0x00 // physical address +#define MAC_REG_RCR 0x06 +#define MAC_REG_TCR 0x07 +#define MAC_REG_CR0_SET 0x08 +#define MAC_REG_CR1_SET 0x09 +#define MAC_REG_CR2_SET 0x0A +#define MAC_REG_CR3_SET 0x0B +#define MAC_REG_CR0_CLR 0x0C +#define MAC_REG_CR1_CLR 0x0D +#define MAC_REG_CR2_CLR 0x0E +#define MAC_REG_CR3_CLR 0x0F +#define MAC_REG_MAR 0x10 +#define MAC_REG_CAM 0x10 +#define MAC_REG_DEC_BASE_HI 0x18 +#define MAC_REG_DBF_BASE_HI 0x1C +#define MAC_REG_ISR_CTL 0x20 +#define MAC_REG_ISR_HOTMR 0x20 +#define MAC_REG_ISR_TSUPTHR 0x20 +#define MAC_REG_ISR_RSUPTHR 0x20 +#define MAC_REG_ISR_CTL1 0x21 +#define MAC_REG_TXE_SR 0x22 +#define MAC_REG_RXE_SR 0x23 +#define MAC_REG_ISR 0x24 +#define MAC_REG_ISR0 0x24 +#define MAC_REG_ISR1 0x25 +#define MAC_REG_ISR2 0x26 +#define MAC_REG_ISR3 0x27 +#define MAC_REG_IMR 0x28 +#define MAC_REG_IMR0 0x28 +#define MAC_REG_IMR1 0x29 +#define MAC_REG_IMR2 0x2A +#define MAC_REG_IMR3 0x2B +#define MAC_REG_TDCSR_SET 0x30 +#define MAC_REG_RDCSR_SET 0x32 +#define MAC_REG_TDCSR_CLR 0x34 +#define MAC_REG_RDCSR_CLR 0x36 +#define MAC_REG_RDBASE_LO 0x38 +#define MAC_REG_RDINDX 0x3C +#define MAC_REG_TDBASE_LO 0x40 +#define MAC_REG_RDCSIZE 0x50 +#define MAC_REG_TDCSIZE 0x52 +#define MAC_REG_TDINDX 0x54 +#define MAC_REG_TDIDX0 0x54 +#define MAC_REG_TDIDX1 0x56 +#define MAC_REG_TDIDX2 0x58 +#define MAC_REG_TDIDX3 0x5A +#define MAC_REG_PAUSE_TIMER 0x5C +#define MAC_REG_RBRDU 0x5E +#define MAC_REG_FIFO_TEST0 0x60 +#define MAC_REG_FIFO_TEST1 0x64 +#define MAC_REG_CAMADDR 0x68 +#define MAC_REG_CAMCR 0x69 +#define MAC_REG_GFTEST 0x6A +#define MAC_REG_FTSTCMD 0x6B +#define MAC_REG_MIICFG 0x6C +#define MAC_REG_MIISR 0x6D +#define MAC_REG_PHYSR0 0x6E +#define MAC_REG_PHYSR1 0x6F +#define MAC_REG_MIICR 0x70 +#define MAC_REG_MIIADR 0x71 +#define MAC_REG_MIIDATA 0x72 +#define MAC_REG_SOFT_TIMER0 0x74 +#define MAC_REG_SOFT_TIMER1 0x76 +#define MAC_REG_CFGA 0x78 +#define MAC_REG_CFGB 0x79 +#define MAC_REG_CFGC 0x7A +#define MAC_REG_CFGD 0x7B +#define MAC_REG_DCFG0 0x7C +#define MAC_REG_DCFG1 0x7D +#define MAC_REG_MCFG0 0x7E +#define MAC_REG_MCFG1 0x7F + +#define MAC_REG_TBIST 0x80 +#define MAC_REG_RBIST 0x81 +#define MAC_REG_PMCC 0x82 +#define MAC_REG_STICKHW 0x83 +#define MAC_REG_MIBCR 0x84 +#define MAC_REG_EERSV 0x85 +#define MAC_REG_REVID 0x86 +#define MAC_REG_MIBREAD 0x88 +#define MAC_REG_BPMA 0x8C +#define MAC_REG_EEWR_DATA 0x8C +#define MAC_REG_BPMD_WR 0x8F +#define MAC_REG_BPCMD 0x90 +#define MAC_REG_BPMD_RD 0x91 +#define MAC_REG_EECHKSUM 0x92 +#define MAC_REG_EECSR 0x93 +#define MAC_REG_EERD_DATA 0x94 +#define MAC_REG_EADDR 0x96 +#define MAC_REG_EMBCMD 0x97 +#define MAC_REG_JMPSR0 0x98 +#define MAC_REG_JMPSR1 0x99 +#define MAC_REG_JMPSR2 0x9A +#define MAC_REG_JMPSR3 0x9B +#define MAC_REG_CHIPGSR 0x9C +#define MAC_REG_TESTCFG 0x9D +#define MAC_REG_DEBUG 0x9E +#define MAC_REG_CHIPGCR 0x9F +#define MAC_REG_WOLCR0_SET 0xA0 +#define MAC_REG_WOLCR1_SET 0xA1 +#define MAC_REG_PWCFG_SET 0xA2 +#define MAC_REG_WOLCFG_SET 0xA3 +#define MAC_REG_WOLCR0_CLR 0xA4 +#define MAC_REG_WOLCR1_CLR 0xA5 +#define MAC_REG_PWCFG_CLR 0xA6 +#define MAC_REG_WOLCFG_CLR 0xA7 +#define MAC_REG_WOLSR0_SET 0xA8 +#define MAC_REG_WOLSR1_SET 0xA9 +#define MAC_REG_WOLSR0_CLR 0xAC +#define MAC_REG_WOLSR1_CLR 0xAD +#define MAC_REG_PATRN_CRC0 0xB0 +#define MAC_REG_PATRN_CRC1 0xB2 +#define MAC_REG_PATRN_CRC2 0xB4 +#define MAC_REG_PATRN_CRC3 0xB6 +#define MAC_REG_PATRN_CRC4 0xB8 +#define MAC_REG_PATRN_CRC5 0xBA +#define MAC_REG_PATRN_CRC6 0xBC +#define MAC_REG_PATRN_CRC7 0xBE +#define MAC_REG_BYTEMSK0_0 0xC0 +#define MAC_REG_BYTEMSK0_1 0xC4 +#define MAC_REG_BYTEMSK0_2 0xC8 +#define MAC_REG_BYTEMSK0_3 0xCC +#define MAC_REG_BYTEMSK1_0 0xD0 +#define MAC_REG_BYTEMSK1_1 0xD4 +#define MAC_REG_BYTEMSK1_2 0xD8 +#define MAC_REG_BYTEMSK1_3 0xDC +#define MAC_REG_BYTEMSK2_0 0xE0 +#define MAC_REG_BYTEMSK2_1 0xE4 +#define MAC_REG_BYTEMSK2_2 0xE8 +#define MAC_REG_BYTEMSK2_3 0xEC +#define MAC_REG_BYTEMSK3_0 0xF0 +#define MAC_REG_BYTEMSK3_1 0xF4 +#define MAC_REG_BYTEMSK3_2 0xF8 +#define MAC_REG_BYTEMSK3_3 0xFC + +/* + * Bits in the RCR register + */ + +#define RCR_AS 0x80 +#define RCR_AP 0x40 +#define RCR_AL 0x20 +#define RCR_PROM 0x10 +#define RCR_AB 0x08 +#define RCR_AM 0x04 +#define RCR_AR 0x02 +#define RCR_SEP 0x01 + +/* + * Bits in the TCR register + */ + +#define TCR_TB2BDIS 0x80 +#define TCR_COLTMC1 0x08 +#define TCR_COLTMC0 0x04 +#define TCR_LB1 0x02 /* loopback[1] */ +#define TCR_LB0 0x01 /* loopback[0] */ + +/* + * Bits in the CR0 register + */ + +#define CR0_TXON 0x00000008UL +#define CR0_RXON 0x00000004UL +#define CR0_STOP 0x00000002UL /* stop MAC, default = 1 */ +#define CR0_STRT 0x00000001UL /* start MAC */ +#define CR0_SFRST 0x00008000UL /* software reset */ +#define CR0_TM1EN 0x00004000UL +#define CR0_TM0EN 0x00002000UL +#define CR0_DPOLL 0x00000800UL /* disable rx/tx auto polling */ +#define CR0_DISAU 0x00000100UL +#define CR0_XONEN 0x00800000UL +#define CR0_FDXTFCEN 0x00400000UL /* full-duplex TX flow control enable */ +#define CR0_FDXRFCEN 0x00200000UL /* full-duplex RX flow control enable */ +#define CR0_HDXFCEN 0x00100000UL /* half-duplex flow control enable */ +#define CR0_XHITH1 0x00080000UL /* TX XON high threshold 1 */ +#define CR0_XHITH0 0x00040000UL /* TX XON high threshold 0 */ +#define CR0_XLTH1 0x00020000UL /* TX pause frame low threshold 1 */ +#define CR0_XLTH0 0x00010000UL /* TX pause frame low threshold 0 */ +#define CR0_GSPRST 0x80000000UL +#define CR0_FORSRST 0x40000000UL +#define CR0_FPHYRST 0x20000000UL +#define CR0_DIAG 0x10000000UL +#define CR0_INTPCTL 0x04000000UL +#define CR0_GINTMSK1 0x02000000UL +#define CR0_GINTMSK0 0x01000000UL + +/* + * Bits in the CR1 register + */ + +#define CR1_SFRST 0x80 /* software reset */ +#define CR1_TM1EN 0x40 +#define CR1_TM0EN 0x20 +#define CR1_DPOLL 0x08 /* disable rx/tx auto polling */ +#define CR1_DISAU 0x01 + +/* + * Bits in the CR2 register + */ + +#define CR2_XONEN 0x80 +#define CR2_FDXTFCEN 0x40 /* full-duplex TX flow control enable */ +#define CR2_FDXRFCEN 0x20 /* full-duplex RX flow control enable */ +#define CR2_HDXFCEN 0x10 /* half-duplex flow control enable */ +#define CR2_XHITH1 0x08 /* TX XON high threshold 1 */ +#define CR2_XHITH0 0x04 /* TX XON high threshold 0 */ +#define CR2_XLTH1 0x02 /* TX pause frame low threshold 1 */ +#define CR2_XLTH0 0x01 /* TX pause frame low threshold 0 */ + +/* + * Bits in the CR3 register + */ + +#define CR3_GSPRST 0x80 +#define CR3_FORSRST 0x40 +#define CR3_FPHYRST 0x20 +#define CR3_DIAG 0x10 +#define CR3_INTPCTL 0x04 +#define CR3_GINTMSK1 0x02 +#define CR3_GINTMSK0 0x01 + +#define ISRCTL_UDPINT 0x8000 +#define ISRCTL_TSUPDIS 0x4000 +#define ISRCTL_RSUPDIS 0x2000 +#define ISRCTL_PMSK1 0x1000 +#define ISRCTL_PMSK0 0x0800 +#define ISRCTL_INTPD 0x0400 +#define ISRCTL_HCRLD 0x0200 +#define ISRCTL_SCRLD 0x0100 + +/* + * Bits in the ISR_CTL1 register + */ + +#define ISRCTL1_UDPINT 0x80 +#define ISRCTL1_TSUPDIS 0x40 +#define ISRCTL1_RSUPDIS 0x20 +#define ISRCTL1_PMSK1 0x10 +#define ISRCTL1_PMSK0 0x08 +#define ISRCTL1_INTPD 0x04 +#define ISRCTL1_HCRLD 0x02 +#define ISRCTL1_SCRLD 0x01 + +/* + * Bits in the TXE_SR register + */ + +#define TXESR_TFDBS 0x08 +#define TXESR_TDWBS 0x04 +#define TXESR_TDRBS 0x02 +#define TXESR_TDSTR 0x01 + +/* + * Bits in the RXE_SR register + */ + +#define RXESR_RFDBS 0x08 +#define RXESR_RDWBS 0x04 +#define RXESR_RDRBS 0x02 +#define RXESR_RDSTR 0x01 + +/* + * Bits in the ISR register + */ + +#define ISR_ISR3 0x80000000UL +#define ISR_ISR2 0x40000000UL +#define ISR_ISR1 0x20000000UL +#define ISR_ISR0 0x10000000UL +#define ISR_TXSTLI 0x02000000UL +#define ISR_RXSTLI 0x01000000UL +#define ISR_HFLD 0x00800000UL +#define ISR_UDPI 0x00400000UL +#define ISR_MIBFI 0x00200000UL +#define ISR_SHDNI 0x00100000UL +#define ISR_PHYI 0x00080000UL +#define ISR_PWEI 0x00040000UL +#define ISR_TMR1I 0x00020000UL +#define ISR_TMR0I 0x00010000UL +#define ISR_SRCI 0x00008000UL +#define ISR_LSTPEI 0x00004000UL +#define ISR_LSTEI 0x00002000UL +#define ISR_OVFI 0x00001000UL +#define ISR_FLONI 0x00000800UL +#define ISR_RACEI 0x00000400UL +#define ISR_TXWB1I 0x00000200UL +#define ISR_TXWB0I 0x00000100UL +#define ISR_PTX3I 0x00000080UL +#define ISR_PTX2I 0x00000040UL +#define ISR_PTX1I 0x00000020UL +#define ISR_PTX0I 0x00000010UL +#define ISR_PTXI 0x00000008UL +#define ISR_PRXI 0x00000004UL +#define ISR_PPTXI 0x00000002UL +#define ISR_PPRXI 0x00000001UL + +/* + * Bits in the IMR register + */ + +#define IMR_TXSTLM 0x02000000UL +#define IMR_UDPIM 0x00400000UL +#define IMR_MIBFIM 0x00200000UL +#define IMR_SHDNIM 0x00100000UL +#define IMR_PHYIM 0x00080000UL +#define IMR_PWEIM 0x00040000UL +#define IMR_TMR1IM 0x00020000UL +#define IMR_TMR0IM 0x00010000UL + +#define IMR_SRCIM 0x00008000UL +#define IMR_LSTPEIM 0x00004000UL +#define IMR_LSTEIM 0x00002000UL +#define IMR_OVFIM 0x00001000UL +#define IMR_FLONIM 0x00000800UL +#define IMR_RACEIM 0x00000400UL +#define IMR_TXWB1IM 0x00000200UL +#define IMR_TXWB0IM 0x00000100UL + +#define IMR_PTX3IM 0x00000080UL +#define IMR_PTX2IM 0x00000040UL +#define IMR_PTX1IM 0x00000020UL +#define IMR_PTX0IM 0x00000010UL +#define IMR_PTXIM 0x00000008UL +#define IMR_PRXIM 0x00000004UL +#define IMR_PPTXIM 0x00000002UL +#define IMR_PPRXIM 0x00000001UL + +/* 0x0013FB0FUL = initial value of IMR */ + +#define INT_MASK_DEF (IMR_PPTXIM|IMR_PPRXIM|IMR_PTXIM|IMR_PRXIM|\ + IMR_PWEIM|IMR_TXWB0IM|IMR_TXWB1IM|IMR_FLONIM|\ + IMR_OVFIM|IMR_LSTEIM|IMR_LSTPEIM|IMR_SRCIM|IMR_MIBFIM|\ + IMR_SHDNIM|IMR_TMR1IM|IMR_TMR0IM|IMR_TXSTLM) + +/* + * Bits in the TDCSR0/1, RDCSR0 register + */ + +#define TRDCSR_DEAD 0x0008 +#define TRDCSR_WAK 0x0004 +#define TRDCSR_ACT 0x0002 +#define TRDCSR_RUN 0x0001 + +/* + * Bits in the CAMADDR register + */ + +#define CAMADDR_CAMEN 0x80 +#define CAMADDR_VCAMSL 0x40 + +/* + * Bits in the CAMCR register + */ + +#define CAMCR_PS1 0x80 +#define CAMCR_PS0 0x40 +#define CAMCR_AITRPKT 0x20 +#define CAMCR_AITR16 0x10 +#define CAMCR_CAMRD 0x08 +#define CAMCR_CAMWR 0x04 +#define CAMCR_PS_CAM_MASK 0x40 +#define CAMCR_PS_CAM_DATA 0x80 +#define CAMCR_PS_MAR 0x00 + +/* + * Bits in the MIICFG register + */ + +#define MIICFG_MPO1 0x80 +#define MIICFG_MPO0 0x40 +#define MIICFG_MFDC 0x20 + +/* + * Bits in the MIISR register + */ + +#define MIISR_MIDLE 0x80 + +/* + * Bits in the PHYSR0 register + */ + +#define PHYSR0_PHYRST 0x80 +#define PHYSR0_LINKGD 0x40 +#define PHYSR0_FDPX 0x10 +#define PHYSR0_SPDG 0x08 +#define PHYSR0_SPD10 0x04 +#define PHYSR0_RXFLC 0x02 +#define PHYSR0_TXFLC 0x01 + +/* + * Bits in the PHYSR1 register + */ + +#define PHYSR1_PHYTBI 0x01 + +/* + * Bits in the MIICR register + */ + +#define MIICR_MAUTO 0x80 +#define MIICR_RCMD 0x40 +#define MIICR_WCMD 0x20 +#define MIICR_MDPM 0x10 +#define MIICR_MOUT 0x08 +#define MIICR_MDO 0x04 +#define MIICR_MDI 0x02 +#define MIICR_MDC 0x01 + +/* + * Bits in the MIIADR register + */ + +#define MIIADR_SWMPL 0x80 + +/* + * Bits in the CFGA register + */ + +#define CFGA_PMHCTG 0x08 +#define CFGA_GPIO1PD 0x04 +#define CFGA_ABSHDN 0x02 +#define CFGA_PACPI 0x01 + +/* + * Bits in the CFGB register + */ + +#define CFGB_GTCKOPT 0x80 +#define CFGB_MIIOPT 0x40 +#define CFGB_CRSEOPT 0x20 +#define CFGB_OFSET 0x10 +#define CFGB_CRANDOM 0x08 +#define CFGB_CAP 0x04 +#define CFGB_MBA 0x02 +#define CFGB_BAKOPT 0x01 + +/* + * Bits in the CFGC register + */ + +#define CFGC_EELOAD 0x80 +#define CFGC_BROPT 0x40 +#define CFGC_DLYEN 0x20 +#define CFGC_DTSEL 0x10 +#define CFGC_BTSEL 0x08 +#define CFGC_BPS2 0x04 /* bootrom select[2] */ +#define CFGC_BPS1 0x02 /* bootrom select[1] */ +#define CFGC_BPS0 0x01 /* bootrom select[0] */ + +/* + * Bits in the CFGD register + */ + +#define CFGD_IODIS 0x80 +#define CFGD_MSLVDACEN 0x40 +#define CFGD_CFGDACEN 0x20 +#define CFGD_PCI64EN 0x10 +#define CFGD_HTMRL4 0x08 + +/* + * Bits in the DCFG1 register + */ + +#define DCFG_XMWI 0x8000 +#define DCFG_XMRM 0x4000 +#define DCFG_XMRL 0x2000 +#define DCFG_PERDIS 0x1000 +#define DCFG_MRWAIT 0x0400 +#define DCFG_MWWAIT 0x0200 +#define DCFG_LATMEN 0x0100 + +/* + * Bits in the MCFG0 register + */ + +#define MCFG_RXARB 0x0080 +#define MCFG_RFT1 0x0020 +#define MCFG_RFT0 0x0010 +#define MCFG_LOWTHOPT 0x0008 +#define MCFG_PQEN 0x0004 +#define MCFG_RTGOPT 0x0002 +#define MCFG_VIDFR 0x0001 + +/* + * Bits in the MCFG1 register + */ + +#define MCFG_TXARB 0x8000 +#define MCFG_TXQBK1 0x0800 +#define MCFG_TXQBK0 0x0400 +#define MCFG_TXQNOBK 0x0200 +#define MCFG_SNAPOPT 0x0100 + +/* + * Bits in the PMCC register + */ + +#define PMCC_DSI 0x80 +#define PMCC_D2_DIS 0x40 +#define PMCC_D1_DIS 0x20 +#define PMCC_D3C_EN 0x10 +#define PMCC_D3H_EN 0x08 +#define PMCC_D2_EN 0x04 +#define PMCC_D1_EN 0x02 +#define PMCC_D0_EN 0x01 + +/* + * Bits in STICKHW + */ + +#define STICKHW_SWPTAG 0x10 +#define STICKHW_WOLSR 0x08 +#define STICKHW_WOLEN 0x04 +#define STICKHW_DS1 0x02 /* R/W by software/cfg cycle */ +#define STICKHW_DS0 0x01 /* suspend well DS write port */ + +/* + * Bits in the MIBCR register + */ + +#define MIBCR_MIBISTOK 0x80 +#define MIBCR_MIBISTGO 0x40 +#define MIBCR_MIBINC 0x20 +#define MIBCR_MIBHI 0x10 +#define MIBCR_MIBFRZ 0x08 +#define MIBCR_MIBFLSH 0x04 +#define MIBCR_MPTRINI 0x02 +#define MIBCR_MIBCLR 0x01 + +/* + * Bits in the EERSV register + */ + +#define EERSV_BOOT_RPL ((u8) 0x01) /* Boot method selection for VT6110 */ + +#define EERSV_BOOT_MASK ((u8) 0x06) +#define EERSV_BOOT_INT19 ((u8) 0x00) +#define EERSV_BOOT_INT18 ((u8) 0x02) +#define EERSV_BOOT_LOCAL ((u8) 0x04) +#define EERSV_BOOT_BEV ((u8) 0x06) + + +/* + * Bits in BPCMD + */ + +#define BPCMD_BPDNE 0x80 +#define BPCMD_EBPWR 0x02 +#define BPCMD_EBPRD 0x01 + +/* + * Bits in the EECSR register + */ + +#define EECSR_EMBP 0x40 /* eeprom embeded programming */ +#define EECSR_RELOAD 0x20 /* eeprom content reload */ +#define EECSR_DPM 0x10 /* eeprom direct programming */ +#define EECSR_ECS 0x08 /* eeprom CS pin */ +#define EECSR_ECK 0x04 /* eeprom CK pin */ +#define EECSR_EDI 0x02 /* eeprom DI pin */ +#define EECSR_EDO 0x01 /* eeprom DO pin */ + +/* + * Bits in the EMBCMD register + */ + +#define EMBCMD_EDONE 0x80 +#define EMBCMD_EWDIS 0x08 +#define EMBCMD_EWEN 0x04 +#define EMBCMD_EWR 0x02 +#define EMBCMD_ERD 0x01 + +/* + * Bits in TESTCFG register + */ + +#define TESTCFG_HBDIS 0x80 + +/* + * Bits in CHIPGCR register + */ + +#define CHIPGCR_FCGMII 0x80 +#define CHIPGCR_FCFDX 0x40 +#define CHIPGCR_FCRESV 0x20 +#define CHIPGCR_FCMODE 0x10 +#define CHIPGCR_LPSOPT 0x08 +#define CHIPGCR_TM1US 0x04 +#define CHIPGCR_TM0US 0x02 +#define CHIPGCR_PHYINTEN 0x01 + +/* + * Bits in WOLCR0 + */ + +#define WOLCR_MSWOLEN7 0x0080 /* enable pattern match filtering */ +#define WOLCR_MSWOLEN6 0x0040 +#define WOLCR_MSWOLEN5 0x0020 +#define WOLCR_MSWOLEN4 0x0010 +#define WOLCR_MSWOLEN3 0x0008 +#define WOLCR_MSWOLEN2 0x0004 +#define WOLCR_MSWOLEN1 0x0002 +#define WOLCR_MSWOLEN0 0x0001 +#define WOLCR_ARP_EN 0x0001 + +/* + * Bits in WOLCR1 + */ + +#define WOLCR_LINKOFF_EN 0x0800 /* link off detected enable */ +#define WOLCR_LINKON_EN 0x0400 /* link on detected enable */ +#define WOLCR_MAGIC_EN 0x0200 /* magic packet filter enable */ +#define WOLCR_UNICAST_EN 0x0100 /* unicast filter enable */ + + +/* + * Bits in PWCFG + */ + +#define PWCFG_PHYPWOPT 0x80 /* internal MII I/F timing */ +#define PWCFG_PCISTICK 0x40 /* PCI sticky R/W enable */ +#define PWCFG_WOLTYPE 0x20 /* pulse(1) or button (0) */ +#define PWCFG_LEGCY_WOL 0x10 +#define PWCFG_PMCSR_PME_SR 0x08 +#define PWCFG_PMCSR_PME_EN 0x04 /* control by PCISTICK */ +#define PWCFG_LEGACY_WOLSR 0x02 /* Legacy WOL_SR shadow */ +#define PWCFG_LEGACY_WOLEN 0x01 /* Legacy WOL_EN shadow */ + +/* + * Bits in WOLCFG + */ + +#define WOLCFG_PMEOVR 0x80 /* for legacy use, force PMEEN always */ +#define WOLCFG_SAM 0x20 /* accept multicast case reset, default=0 */ +#define WOLCFG_SAB 0x10 /* accept broadcast case reset, default=0 */ +#define WOLCFG_SMIIACC 0x08 /* ?? */ +#define WOLCFG_SGENWH 0x02 +#define WOLCFG_PHYINTEN 0x01 /* 0:PHYINT trigger enable, 1:use internal MII + to report status change */ +/* + * Bits in WOLSR1 + */ + +#define WOLSR_LINKOFF_INT 0x0800 +#define WOLSR_LINKON_INT 0x0400 +#define WOLSR_MAGIC_INT 0x0200 +#define WOLSR_UNICAST_INT 0x0100 + +/* + * Ethernet address filter type + */ + +#define PKT_TYPE_NONE 0x0000 /* Turn off receiver */ +#define PKT_TYPE_DIRECTED 0x0001 /* obselete, directed address is always accepted */ +#define PKT_TYPE_MULTICAST 0x0002 +#define PKT_TYPE_ALL_MULTICAST 0x0004 +#define PKT_TYPE_BROADCAST 0x0008 +#define PKT_TYPE_PROMISCUOUS 0x0020 +#define PKT_TYPE_LONG 0x2000 /* NOTE.... the definition of LONG is >2048 bytes in our chip */ +#define PKT_TYPE_RUNT 0x4000 +#define PKT_TYPE_ERROR 0x8000 /* Accept error packets, e.g. CRC error */ + +/* + * Loopback mode + */ + +#define MAC_LB_NONE 0x00 +#define MAC_LB_INTERNAL 0x01 +#define MAC_LB_EXTERNAL 0x02 + +/* + * Enabled mask value of irq + */ + +#if defined(_SIM) +#define IMR_MASK_VALUE 0x0033FF0FUL /* initial value of IMR + set IMR0 to 0x0F according to spec */ + +#else +#define IMR_MASK_VALUE 0x0013FB0FUL /* initial value of IMR + ignore MIBFI,RACEI to + reduce intr. frequency + NOTE.... do not enable NoBuf int mask at driver driver + when (1) NoBuf -> RxThreshold = SF + (2) OK -> RxThreshold = original value + */ +#endif + +/* + * Revision id + */ + +#define REV_ID_VT3119_A0 0x00 +#define REV_ID_VT3119_A1 0x01 +#define REV_ID_VT3216_A0 0x10 + +/* + * Max time out delay time + */ + +#define W_MAX_TIMEOUT 0x0FFFU + + +/* + * MAC registers as a structure. Cannot be directly accessed this + * way but generates offsets for readl/writel() calls + */ + +struct mac_regs { + volatile u8 PAR[6]; /* 0x00 */ + volatile u8 RCR; + volatile u8 TCR; + + volatile u32 CR0Set; /* 0x08 */ + volatile u32 CR0Clr; /* 0x0C */ + + volatile u8 MARCAM[8]; /* 0x10 */ + + volatile u32 DecBaseHi; /* 0x18 */ + volatile u16 DbfBaseHi; /* 0x1C */ + volatile u16 reserved_1E; + + volatile u16 ISRCTL; /* 0x20 */ + volatile u8 TXESR; + volatile u8 RXESR; + + volatile u32 ISR; /* 0x24 */ + volatile u32 IMR; + + volatile u32 TDStatusPort; /* 0x2C */ + + volatile u16 TDCSRSet; /* 0x30 */ + volatile u8 RDCSRSet; + volatile u8 reserved_33; + volatile u16 TDCSRClr; + volatile u8 RDCSRClr; + volatile u8 reserved_37; + + volatile u32 RDBaseLo; /* 0x38 */ + volatile u16 RDIdx; /* 0x3C */ + volatile u16 reserved_3E; + + volatile u32 TDBaseLo[4]; /* 0x40 */ + + volatile u16 RDCSize; /* 0x50 */ + volatile u16 TDCSize; /* 0x52 */ + volatile u16 TDIdx[4]; /* 0x54 */ + volatile u16 tx_pause_timer; /* 0x5C */ + volatile u16 RBRDU; /* 0x5E */ + + volatile u32 FIFOTest0; /* 0x60 */ + volatile u32 FIFOTest1; /* 0x64 */ + + volatile u8 CAMADDR; /* 0x68 */ + volatile u8 CAMCR; /* 0x69 */ + volatile u8 GFTEST; /* 0x6A */ + volatile u8 FTSTCMD; /* 0x6B */ + + volatile u8 MIICFG; /* 0x6C */ + volatile u8 MIISR; + volatile u8 PHYSR0; + volatile u8 PHYSR1; + volatile u8 MIICR; + volatile u8 MIIADR; + volatile u16 MIIDATA; + + volatile u16 SoftTimer0; /* 0x74 */ + volatile u16 SoftTimer1; + + volatile u8 CFGA; /* 0x78 */ + volatile u8 CFGB; + volatile u8 CFGC; + volatile u8 CFGD; + + volatile u16 DCFG; /* 0x7C */ + volatile u16 MCFG; + + volatile u8 TBIST; /* 0x80 */ + volatile u8 RBIST; + volatile u8 PMCPORT; + volatile u8 STICKHW; + + volatile u8 MIBCR; /* 0x84 */ + volatile u8 reserved_85; + volatile u8 rev_id; + volatile u8 PORSTS; + + volatile u32 MIBData; /* 0x88 */ + + volatile u16 EEWrData; + + volatile u8 reserved_8E; + volatile u8 BPMDWr; + volatile u8 BPCMD; + volatile u8 BPMDRd; + + volatile u8 EECHKSUM; /* 0x92 */ + volatile u8 EECSR; + + volatile u16 EERdData; /* 0x94 */ + volatile u8 EADDR; + volatile u8 EMBCMD; + + + volatile u8 JMPSR0; /* 0x98 */ + volatile u8 JMPSR1; + volatile u8 JMPSR2; + volatile u8 JMPSR3; + volatile u8 CHIPGSR; /* 0x9C */ + volatile u8 TESTCFG; + volatile u8 DEBUG; + volatile u8 CHIPGCR; + + volatile u16 WOLCRSet; /* 0xA0 */ + volatile u8 PWCFGSet; + volatile u8 WOLCFGSet; + + volatile u16 WOLCRClr; /* 0xA4 */ + volatile u8 PWCFGCLR; + volatile u8 WOLCFGClr; + + volatile u16 WOLSRSet; /* 0xA8 */ + volatile u16 reserved_AA; + + volatile u16 WOLSRClr; /* 0xAC */ + volatile u16 reserved_AE; + + volatile u16 PatternCRC[8]; /* 0xB0 */ + volatile u32 ByteMask[4][4]; /* 0xC0 */ +} __attribute__ ((__packed__)); + + +enum hw_mib { + HW_MIB_ifRxAllPkts = 0, + HW_MIB_ifRxOkPkts, + HW_MIB_ifTxOkPkts, + HW_MIB_ifRxErrorPkts, + HW_MIB_ifRxRuntOkPkt, + HW_MIB_ifRxRuntErrPkt, + HW_MIB_ifRx64Pkts, + HW_MIB_ifTx64Pkts, + HW_MIB_ifRx65To127Pkts, + HW_MIB_ifTx65To127Pkts, + HW_MIB_ifRx128To255Pkts, + HW_MIB_ifTx128To255Pkts, + HW_MIB_ifRx256To511Pkts, + HW_MIB_ifTx256To511Pkts, + HW_MIB_ifRx512To1023Pkts, + HW_MIB_ifTx512To1023Pkts, + HW_MIB_ifRx1024To1518Pkts, + HW_MIB_ifTx1024To1518Pkts, + HW_MIB_ifTxEtherCollisions, + HW_MIB_ifRxPktCRCE, + HW_MIB_ifRxJumboPkts, + HW_MIB_ifTxJumboPkts, + HW_MIB_ifRxMacControlFrames, + HW_MIB_ifTxMacControlFrames, + HW_MIB_ifRxPktFAE, + HW_MIB_ifRxLongOkPkt, + HW_MIB_ifRxLongPktErrPkt, + HW_MIB_ifTXSQEErrors, + HW_MIB_ifRxNobuf, + HW_MIB_ifRxSymbolErrors, + HW_MIB_ifInRangeLengthErrors, + HW_MIB_ifLateCollisions, + HW_MIB_SIZE +}; + +enum chip_type { + CHIP_TYPE_VT6110 = 1, +}; + +struct velocity_info_tbl { + enum chip_type chip_id; + char *name; + int io_size; + int txqueue; + u32 flags; +}; + +#define mac_hw_mibs_init(regs) {\ + BYTE_REG_BITS_ON(MIBCR_MIBFRZ,&((regs)->MIBCR));\ + BYTE_REG_BITS_ON(MIBCR_MIBCLR,&((regs)->MIBCR));\ + do {}\ + while (BYTE_REG_BITS_IS_ON(MIBCR_MIBCLR,&((regs)->MIBCR)));\ + BYTE_REG_BITS_OFF(MIBCR_MIBFRZ,&((regs)->MIBCR));\ +} + +#define mac_read_isr(regs) readl(&((regs)->ISR)) +#define mac_write_isr(regs, x) writel((x),&((regs)->ISR)) +#define mac_clear_isr(regs) writel(0xffffffffL,&((regs)->ISR)) + +#define mac_write_int_mask(mask, regs) writel((mask),&((regs)->IMR)); +#define mac_disable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Clr)) +#define mac_enable_int(regs) writel(CR0_GINTMSK1,&((regs)->CR0Set)) + +#define mac_hw_mibs_read(regs, MIBs) {\ + int i;\ + BYTE_REG_BITS_ON(MIBCR_MPTRINI,&((regs)->MIBCR));\ + for (i=0;iMIBData));\ + }\ +} + +#define mac_set_dma_length(regs, n) {\ + BYTE_REG_BITS_SET((n),0x07,&((regs)->DCFG));\ +} + +#define mac_set_rx_thresh(regs, n) {\ + BYTE_REG_BITS_SET((n),(MCFG_RFT0|MCFG_RFT1),&((regs)->MCFG));\ +} + +#define mac_rx_queue_run(regs) {\ + writeb(TRDCSR_RUN, &((regs)->RDCSRSet));\ +} + +#define mac_rx_queue_wake(regs) {\ + writeb(TRDCSR_WAK, &((regs)->RDCSRSet));\ +} + +#define mac_tx_queue_run(regs, n) {\ + writew(TRDCSR_RUN<<((n)*4),&((regs)->TDCSRSet));\ +} + +#define mac_tx_queue_wake(regs, n) {\ + writew(TRDCSR_WAK<<(n*4),&((regs)->TDCSRSet));\ +} + +#define mac_eeprom_reload(regs) {\ + int i=0;\ + BYTE_REG_BITS_ON(EECSR_RELOAD,&((regs)->EECSR));\ + do {\ + udelay(10);\ + if (i++>0x1000) {\ + break;\ + }\ + }while (BYTE_REG_BITS_IS_ON(EECSR_RELOAD,&((regs)->EECSR)));\ +} + +enum velocity_cam_type { + VELOCITY_VLAN_ID_CAM = 0, + VELOCITY_MULTICAST_CAM +}; + +/** + * mac_get_cam_mask - Read a CAM mask + * @regs: register block for this velocity + * @mask: buffer to store mask + * @cam_type: CAM to fetch + * + * Fetch the mask bits of the selected CAM and store them into the + * provided mask buffer. + */ + +static inline void mac_get_cam_mask(struct mac_regs * regs, u8 * mask, enum velocity_cam_type cam_type) +{ + int i; + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + if (cam_type == VELOCITY_VLAN_ID_CAM) + writeb(CAMADDR_VCAMSL, ®s->CAMADDR); + else + writeb(0, ®s->CAMADDR); + + /* read mask */ + for (i = 0; i < 8; i++) + *mask++ = readb(&(regs->MARCAM[i])); + + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select mar */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + +} + +/** + * mac_set_cam_mask - Set a CAM mask + * @regs: register block for this velocity + * @mask: CAM mask to load + * @cam_type: CAM to store + * + * Store a new mask into a CAM + */ + +static inline void mac_set_cam_mask(struct mac_regs * regs, u8 * mask, enum velocity_cam_type cam_type) +{ + int i; + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + if (cam_type == VELOCITY_VLAN_ID_CAM) + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, ®s->CAMADDR); + else + writeb(CAMADDR_CAMEN, ®s->CAMADDR); + + for (i = 0; i < 8; i++) { + writeb(*mask++, &(regs->MARCAM[i])); + } + /* disable CAMEN */ + writeb(0, ®s->CAMADDR); + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +/** + * mac_set_cam - set CAM data + * @regs: register block of this velocity + * @idx: Cam index + * @addr: 2 or 6 bytes of CAM data + * @cam_type: CAM to load + * + * Load an address or vlan tag into a CAM + */ + +static inline void mac_set_cam(struct mac_regs * regs, int idx, u8 *addr, enum velocity_cam_type cam_type) +{ + int i; + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + idx &= (64 - 1); + + if (cam_type == VELOCITY_VLAN_ID_CAM) + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); + else + writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); + + if (cam_type == VELOCITY_VLAN_ID_CAM) + writew(*((u16 *) addr), ®s->MARCAM[0]); + else { + for (i = 0; i < 6; i++) { + writeb(*addr++, &(regs->MARCAM[i])); + } + } + BYTE_REG_BITS_ON(CAMCR_CAMWR, ®s->CAMCR); + + udelay(10); + + writeb(0, ®s->CAMADDR); + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +/** + * mac_get_cam - fetch CAM data + * @regs: register block of this velocity + * @idx: Cam index + * @addr: buffer to hold up to 6 bytes of CAM data + * @cam_type: CAM to load + * + * Load an address or vlan tag from a CAM into the buffer provided by + * the caller. VLAN tags are 2 bytes the address cam entries are 6. + */ + +static inline void mac_get_cam(struct mac_regs * regs, int idx, u8 *addr, enum velocity_cam_type cam_type) +{ + int i; + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); + + idx &= (64 - 1); + + if (cam_type == VELOCITY_VLAN_ID_CAM) + writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, ®s->CAMADDR); + else + writeb(CAMADDR_CAMEN | idx, ®s->CAMADDR); + + BYTE_REG_BITS_ON(CAMCR_CAMRD, ®s->CAMCR); + + udelay(10); + + if (cam_type == VELOCITY_VLAN_ID_CAM) + *((u16 *) addr) = readw(&(regs->MARCAM[0])); + else + for (i = 0; i < 6; i++, addr++) + *((u8 *) addr) = readb(&(regs->MARCAM[i])); + + writeb(0, ®s->CAMADDR); + + /* Select CAM mask */ + BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, ®s->CAMCR); +} + +/** + * mac_wol_reset - reset WOL after exiting low power + * @regs: register block of this velocity + * + * Called after we drop out of wake on lan mode in order to + * reset the Wake on lan features. This function doesn't restore + * the rest of the logic from the result of sleep/wakeup + */ + +inline static void mac_wol_reset(struct mac_regs * regs) +{ + + /* Turn off SWPTAG right after leaving power mode */ + BYTE_REG_BITS_OFF(STICKHW_SWPTAG, ®s->STICKHW); + /* clear sticky bits */ + BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); + + BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, ®s->CHIPGCR); + BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); + /* disable force PME-enable */ + writeb(WOLCFG_PMEOVR, ®s->WOLCFGClr); + /* disable power-event config bit */ + writew(0xFFFF, ®s->WOLCRClr); + /* clear power status */ + writew(0xFFFF, ®s->WOLSRClr); +} + + +/* + * Header for WOL definitions. Used to compute hashes + */ + +typedef u8 MCAM_ADDR[ETH_ALEN]; + +struct arp_packet { + u8 dest_mac[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + u16 type; + u16 ar_hrd; + u16 ar_pro; + u8 ar_hln; + u8 ar_pln; + u16 ar_op; + u8 ar_sha[ETH_ALEN]; + u8 ar_sip[4]; + u8 ar_tha[ETH_ALEN]; + u8 ar_tip[4]; +} __attribute__ ((__packed__)); + +struct _magic_packet { + u8 dest_mac[6]; + u8 src_mac[6]; + u16 type; + u8 MAC[16][6]; + u8 password[6]; +} __attribute__ ((__packed__)); + +/* + * Store for chip context when saving and restoring status. Not + * all fields are saved/restored currently. + */ + +struct velocity_context { + u8 mac_reg[256]; + MCAM_ADDR cam_addr[MCAM_SIZE]; + u16 vcam[VCAM_SIZE]; + u32 cammask[2]; + u32 patcrc[2]; + u32 pattern[8]; +}; + + +/* + * MII registers. + */ + + +/* + * Registers in the MII (offset unit is WORD) + */ + +#define MII_REG_BMCR 0x00 // physical address +#define MII_REG_BMSR 0x01 // +#define MII_REG_PHYID1 0x02 // OUI +#define MII_REG_PHYID2 0x03 // OUI + Module ID + REV ID +#define MII_REG_ANAR 0x04 // +#define MII_REG_ANLPAR 0x05 // +#define MII_REG_G1000CR 0x09 // +#define MII_REG_G1000SR 0x0A // +#define MII_REG_MODCFG 0x10 // +#define MII_REG_TCSR 0x16 // +#define MII_REG_PLED 0x1B // +// NS, MYSON only +#define MII_REG_PCR 0x17 // +// ESI only +#define MII_REG_PCSR 0x17 // +#define MII_REG_AUXCR 0x1C // + +// Marvell 88E1000/88E1000S +#define MII_REG_PSCR 0x10 // PHY specific control register + +// +// Bits in the BMCR register +// +#define BMCR_RESET 0x8000 // +#define BMCR_LBK 0x4000 // +#define BMCR_SPEED100 0x2000 // +#define BMCR_AUTO 0x1000 // +#define BMCR_PD 0x0800 // +#define BMCR_ISO 0x0400 // +#define BMCR_REAUTO 0x0200 // +#define BMCR_FDX 0x0100 // +#define BMCR_SPEED1G 0x0040 // +// +// Bits in the BMSR register +// +#define BMSR_AUTOCM 0x0020 // +#define BMSR_LNK 0x0004 // + +// +// Bits in the ANAR register +// +#define ANAR_ASMDIR 0x0800 // Asymmetric PAUSE support +#define ANAR_PAUSE 0x0400 // Symmetric PAUSE Support +#define ANAR_T4 0x0200 // +#define ANAR_TXFD 0x0100 // +#define ANAR_TX 0x0080 // +#define ANAR_10FD 0x0040 // +#define ANAR_10 0x0020 // +// +// Bits in the ANLPAR register +// +#define ANLPAR_ASMDIR 0x0800 // Asymmetric PAUSE support +#define ANLPAR_PAUSE 0x0400 // Symmetric PAUSE Support +#define ANLPAR_T4 0x0200 // +#define ANLPAR_TXFD 0x0100 // +#define ANLPAR_TX 0x0080 // +#define ANLPAR_10FD 0x0040 // +#define ANLPAR_10 0x0020 // + +// +// Bits in the G1000CR register +// +#define G1000CR_1000FD 0x0200 // PHY is 1000-T Full-duplex capable +#define G1000CR_1000 0x0100 // PHY is 1000-T Half-duplex capable + +// +// Bits in the G1000SR register +// +#define G1000SR_1000FD 0x0800 // LP PHY is 1000-T Full-duplex capable +#define G1000SR_1000 0x0400 // LP PHY is 1000-T Half-duplex capable + +#define TCSR_ECHODIS 0x2000 // +#define AUXCR_MDPPS 0x0004 // + +// Bits in the PLED register +#define PLED_LALBE 0x0004 // + +// Marvell 88E1000/88E1000S Bits in the PHY specific control register (10h) +#define PSCR_ACRSTX 0x0800 // Assert CRS on Transmit + +#define PHYID_CICADA_CS8201 0x000FC410UL +#define PHYID_VT3216_32BIT 0x000FC610UL +#define PHYID_VT3216_64BIT 0x000FC600UL +#define PHYID_MARVELL_1000 0x01410C50UL +#define PHYID_MARVELL_1000S 0x01410C40UL + +#define PHYID_REV_ID_MASK 0x0000000FUL + +#define PHYID_GET_PHY_REV_ID(i) ((i) & PHYID_REV_ID_MASK) +#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK) + +#define MII_REG_BITS_ON(x,i,p) do {\ + u16 w;\ + velocity_mii_read((p),(i),&(w));\ + (w)|=(x);\ + velocity_mii_write((p),(i),(w));\ +} while (0) + +#define MII_REG_BITS_OFF(x,i,p) do {\ + u16 w;\ + velocity_mii_read((p),(i),&(w));\ + (w)&=(~(x));\ + velocity_mii_write((p),(i),(w));\ +} while (0) + +#define MII_REG_BITS_IS_ON(x,i,p) ({\ + u16 w;\ + velocity_mii_read((p),(i),&(w));\ + ((int) ((w) & (x)));}) + +#define MII_GET_PHY_ID(p) ({\ + u32 id;\ + velocity_mii_read((p),MII_REG_PHYID2,(u16 *) &id);\ + velocity_mii_read((p),MII_REG_PHYID1,((u16 *) &id)+1);\ + (id);}) + +/* + * Inline debug routine + */ + + +enum velocity_msg_level { + MSG_LEVEL_ERR = 0, //Errors that will cause abnormal operation. + MSG_LEVEL_NOTICE = 1, //Some errors need users to be notified. + MSG_LEVEL_INFO = 2, //Normal message. + MSG_LEVEL_VERBOSE = 3, //Will report all trival errors. + MSG_LEVEL_DEBUG = 4 //Only for debug purpose. +}; + +#ifdef VELOCITY_DEBUG +#define ASSERT(x) { \ + if (!(x)) { \ + printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\ + __FUNCTION__, __LINE__);\ + BUG(); \ + }\ +} +#define VELOCITY_DBG(p,args...) printk(p, ##args) +#else +#define ASSERT(x) +#define VELOCITY_DBG(x) +#endif + +#define VELOCITY_PRT(l, p, args...) do {if (l<=msglevel) printk( p ,##args);} while (0) + +#define VELOCITY_PRT_CAMMASK(p,t) {\ + int i;\ + if ((t)==VELOCITY_MULTICAST_CAM) {\ + for (i=0;i<(MCAM_SIZE/8);i++)\ + printk("%02X",(p)->mCAMmask[i]);\ + }\ + else {\ + for (i=0;i<(VCAM_SIZE/8);i++)\ + printk("%02X",(p)->vCAMmask[i]);\ + }\ + printk("\n");\ +} + + + +#define VELOCITY_WOL_MAGIC 0x00000000UL +#define VELOCITY_WOL_PHY 0x00000001UL +#define VELOCITY_WOL_ARP 0x00000002UL +#define VELOCITY_WOL_UCAST 0x00000004UL +#define VELOCITY_WOL_BCAST 0x00000010UL +#define VELOCITY_WOL_MCAST 0x00000020UL +#define VELOCITY_WOL_MAGIC_SEC 0x00000040UL + +/* + * Flags for options + */ + +#define VELOCITY_FLAGS_TAGGING 0x00000001UL +#define VELOCITY_FLAGS_TX_CSUM 0x00000002UL +#define VELOCITY_FLAGS_RX_CSUM 0x00000004UL +#define VELOCITY_FLAGS_IP_ALIGN 0x00000008UL +#define VELOCITY_FLAGS_VAL_PKT_LEN 0x00000010UL + +#define VELOCITY_FLAGS_FLOW_CTRL 0x01000000UL + +/* + * Flags for driver status + */ + +#define VELOCITY_FLAGS_OPENED 0x00010000UL +#define VELOCITY_FLAGS_VMNS_CONNECTED 0x00020000UL +#define VELOCITY_FLAGS_VMNS_COMMITTED 0x00040000UL +#define VELOCITY_FLAGS_WOL_ENABLED 0x00080000UL + +/* + * Flags for MII status + */ + +#define VELOCITY_LINK_FAIL 0x00000001UL +#define VELOCITY_SPEED_10 0x00000002UL +#define VELOCITY_SPEED_100 0x00000004UL +#define VELOCITY_SPEED_1000 0x00000008UL +#define VELOCITY_DUPLEX_FULL 0x00000010UL +#define VELOCITY_AUTONEG_ENABLE 0x00000020UL +#define VELOCITY_FORCED_BY_EEPROM 0x00000040UL + +/* + * For velocity_set_media_duplex + */ + +#define VELOCITY_LINK_CHANGE 0x00000001UL + +enum speed_opt { + SPD_DPX_AUTO = 0, + SPD_DPX_100_HALF = 1, + SPD_DPX_100_FULL = 2, + SPD_DPX_10_HALF = 3, + SPD_DPX_10_FULL = 4 +}; + +enum velocity_init_type { + VELOCITY_INIT_COLD = 0, + VELOCITY_INIT_RESET, + VELOCITY_INIT_WOL +}; + +enum velocity_flow_cntl_type { + FLOW_CNTL_DEFAULT = 1, + FLOW_CNTL_TX, + FLOW_CNTL_RX, + FLOW_CNTL_TX_RX, + FLOW_CNTL_DISABLE, +}; + +struct velocity_opt { + int numrx; /* Number of RX descriptors */ + int numtx; /* Number of TX descriptors */ + enum speed_opt spd_dpx; /* Media link mode */ + int vid; /* vlan id */ + int DMA_length; /* DMA length */ + int rx_thresh; /* RX_THRESH */ + int flow_cntl; + int wol_opts; /* Wake on lan options */ + int td_int_count; + int int_works; + int rx_bandwidth_hi; + int rx_bandwidth_lo; + int rx_bandwidth_en; + u32 flags; +}; + +struct velocity_info { + struct velocity_info *next; + struct velocity_info *prev; + + struct pci_dev *pdev; + struct net_device *dev; + struct net_device_stats stats; + +#if CONFIG_PM + u32 pci_state[16]; +#endif + + dma_addr_t rd_pool_dma; + dma_addr_t td_pool_dma[TX_QUEUE_NO]; + + dma_addr_t tx_bufs_dma; + u8 *tx_bufs; + + u8 ip_addr[4]; + enum chip_type chip_id; + + struct mac_regs * mac_regs; + unsigned long memaddr; + unsigned long ioaddr; + u32 io_size; + + u8 rev_id; + +#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)])) + + int num_txq; + + volatile int td_used[TX_QUEUE_NO]; + int td_curr[TX_QUEUE_NO]; + int td_tail[TX_QUEUE_NO]; + struct tx_desc *td_rings[TX_QUEUE_NO]; + struct velocity_td_info *td_infos[TX_QUEUE_NO]; + + int rd_curr; + int rd_used; + struct rx_desc *rd_ring; + struct velocity_rd_info *rd_info; /* It's an array */ + +#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) + u32 mib_counter[MAX_HW_MIB_COUNTER]; + struct velocity_opt options; + + u32 int_mask; + + u32 flags; + + int rx_buf_sz; + u32 mii_status; + u32 phy_id; + int multicast_limit; + + u8 vCAMmask[(VCAM_SIZE / 8)]; + u8 mCAMmask[(MCAM_SIZE / 8)]; + + spinlock_t lock; + spinlock_t xmit_lock; + + int wol_opts; + u8 wol_passwd[6]; + + struct velocity_context context; + + u32 ticks; + u32 rx_bytes; + +}; + +/** + * velocity_get_ip - find an IP address for the device + * @vptr: Velocity to query + * + * Dig out an IP address for this interface so that we can + * configure wakeup with WOL for ARP. If there are multiple IP + * addresses on this chain then we use the first - multi-IP WOL is not + * supported. + * + * CHECK ME: locking + */ + +inline static int velocity_get_ip(struct velocity_info *vptr) +{ + struct in_device *in_dev = (struct in_device *) vptr->dev->ip_ptr; + struct in_ifaddr *ifa; + + if (in_dev != NULL) { + ifa = (struct in_ifaddr *) in_dev->ifa_list; + if (ifa != NULL) { + memcpy(vptr->ip_addr, &ifa->ifa_address, 4); + return 0; + } + } + return -ENOENT; +} + +/** + * velocity_update_hw_mibs - fetch MIB counters from chip + * @vptr: velocity to update + * + * The velocity hardware keeps certain counters in the hardware + * side. We need to read these when the user asks for statistics + * or when they overflow (causing an interrupt). The read of the + * statistic clears it, so we keep running master counters in user + * space. + */ + +static inline void velocity_update_hw_mibs(struct velocity_info *vptr) +{ + u32 tmp; + int i; + BYTE_REG_BITS_ON(MIBCR_MIBFLSH, &(vptr->mac_regs->MIBCR)); + + while (BYTE_REG_BITS_IS_ON(MIBCR_MIBFLSH, &(vptr->mac_regs->MIBCR))); + + BYTE_REG_BITS_ON(MIBCR_MPTRINI, &(vptr->mac_regs->MIBCR)); + for (i = 0; i < HW_MIB_SIZE; i++) { + tmp = readl(&(vptr->mac_regs->MIBData)) & 0x00FFFFFFUL; + vptr->mib_counter[i] += tmp; + } +} + +/** + * init_flow_control_register - set up flow control + * @vptr: velocity to configure + * + * Configure the flow control registers for this velocity device. + */ + +static inline void init_flow_control_register(struct velocity_info *vptr) +{ + struct mac_regs * regs = vptr->mac_regs; + + /* Set {XHITH1, XHITH0, XLTH1, XLTH0} in FlowCR1 to {1, 0, 1, 1} + depend on RD=64, and Turn on XNOEN in FlowCR1 */ + writel((CR0_XONEN | CR0_XHITH1 | CR0_XLTH1 | CR0_XLTH0), ®s->CR0Set); + writel((CR0_FDXTFCEN | CR0_FDXRFCEN | CR0_HDXFCEN | CR0_XHITH0), ®s->CR0Clr); + + /* Set TxPauseTimer to 0xFFFF */ + writew(0xFFFF, ®s->tx_pause_timer); + + /* Initialize RBRDU to Rx buffer count. */ + writew(vptr->options.numrx, ®s->RBRDU); +} + + +#endif diff --git a/drivers/net/wireless/prism54/prismcompat.h b/drivers/net/wireless/prism54/prismcompat.h new file mode 100644 index 000000000..ab6f83746 --- /dev/null +++ b/drivers/net/wireless/prism54/prismcompat.h @@ -0,0 +1,46 @@ +/* + * (C) 2004 Margit Schubert-While + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +/* + * Compatibility header file to aid support of different kernel versions + */ + +#ifdef PRISM54_COMPAT24 +#include "prismcompat24.h" +#else /* PRISM54_COMPAT24 */ + +#ifndef _PRISM_COMPAT_H +#define _PRISM_COMPAT_H + +#include +#include +#include +#include +#include +#include + +#if !defined(CONFIG_FW_LOADER) && !defined(CONFIG_FW_LOADER_MODULE) +#error Firmware Loading is not configured in the kernel ! +#endif + +#define prism54_synchronize_irq(irq) synchronize_irq(irq) + +#define PRISM_FW_PDEV &priv->pdev->dev + +#endif /* _PRISM_COMPAT_H */ +#endif /* PRISM54_COMPAT24 */ diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c new file mode 100644 index 000000000..694e13f43 --- /dev/null +++ b/drivers/pcmcia/pd6729.c @@ -0,0 +1,732 @@ +/* + * Driver for the Cirrus PD6729 PCI-PCMCIA bridge. + * + * Based on the i82092.c driver. + * + * This software may be used and distributed according to the terms of + * the GNU General Public License, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "pd6729.h" +#include "i82365.h" +#include "cirrus.h" + +MODULE_LICENSE("GPL"); + +#define MAX_SOCKETS 2 + +/* simple helper functions */ +/* External clock time, in nanoseconds. 120 ns = 8.33 MHz */ +#define to_cycles(ns) ((ns)/120) + +static spinlock_t port_lock = SPIN_LOCK_UNLOCKED; + +/* basic value read/write functions */ + +static unsigned char indirect_read(struct pd6729_socket *socket, unsigned short reg) +{ + unsigned long port; + unsigned char val; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg += socket->number * 0x40; + port = socket->io_base; + outb(reg, port); + val = inb(port + 1); + spin_unlock_irqrestore(&port_lock, flags); + + return val; +} + +static unsigned short indirect_read16(struct pd6729_socket *socket, unsigned short reg) +{ + unsigned long port; + unsigned short tmp; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg = reg + socket->number * 0x40; + port = socket->io_base; + outb(reg, port); + tmp = inb(port + 1); + reg++; + outb(reg, port); + tmp = tmp | (inb(port + 1) << 8); + spin_unlock_irqrestore(&port_lock, flags); + + return tmp; +} + +static void indirect_write(struct pd6729_socket *socket, unsigned short reg, unsigned char value) +{ + unsigned long port; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg = reg + socket->number * 0x40; + port = socket->io_base; + outb(reg, port); + outb(value, port + 1); + spin_unlock_irqrestore(&port_lock, flags); +} + +static void indirect_setbit(struct pd6729_socket *socket, unsigned short reg, unsigned char mask) +{ + unsigned long port; + unsigned char val; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg = reg + socket->number * 0x40; + port = socket->io_base; + outb(reg, port); + val = inb(port + 1); + val |= mask; + outb(reg, port); + outb(val, port + 1); + spin_unlock_irqrestore(&port_lock, flags); +} + +static void indirect_resetbit(struct pd6729_socket *socket, unsigned short reg, unsigned char mask) +{ + unsigned long port; + unsigned char val; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg = reg + socket->number * 0x40; + port = socket->io_base; + outb(reg, port); + val = inb(port + 1); + val &= ~mask; + outb(reg, port); + outb(val, port + 1); + spin_unlock_irqrestore(&port_lock, flags); +} + +static void indirect_write16(struct pd6729_socket *socket, unsigned short reg, unsigned short value) +{ + unsigned long port; + unsigned char val; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg = reg + socket->number * 0x40; + port = socket->io_base; + + outb(reg, port); + val = value & 255; + outb(val, port + 1); + + reg++; + + outb(reg, port); + val = value >> 8; + outb(val, port + 1); + spin_unlock_irqrestore(&port_lock, flags); +} + +/* Interrupt handler functionality */ + +static irqreturn_t pd6729_interrupt(int irq, void *dev, struct pt_regs *regs) +{ + struct pd6729_socket *socket = (struct pd6729_socket *)dev; + int i; + int loopcount = 0; + int handled = 0; + unsigned int events, active = 0; + + while (1) { + loopcount++; + if (loopcount > 20) { + printk(KERN_ERR "pd6729: infinite eventloop in interrupt\n"); + break; + } + + active = 0; + + for (i = 0; i < MAX_SOCKETS; i++) { + unsigned int csc; + + /* card status change register */ + csc = indirect_read(&socket[i], I365_CSC); + if (csc == 0) /* no events on this socket */ + continue; + + handled = 1; + events = 0; + + if (csc & I365_CSC_DETECT) { + events |= SS_DETECT; + dprintk("Card detected in socket %i!\n", i); + } + + if (indirect_read(&socket[i], I365_INTCTL) & I365_PC_IOCARD) { + /* For IO/CARDS, bit 0 means "read the card" */ + events |= (csc & I365_CSC_STSCHG) ? SS_STSCHG : 0; + } else { + /* Check for battery/ready events */ + events |= (csc & I365_CSC_BVD1) ? SS_BATDEAD : 0; + events |= (csc & I365_CSC_BVD2) ? SS_BATWARN : 0; + events |= (csc & I365_CSC_READY) ? SS_READY : 0; + } + + if (events) { + pcmcia_parse_events(&socket[i].socket, events); + } + active |= events; + } + + if (active == 0) /* no more events to handle */ + break; + } + return IRQ_RETVAL(handled); +} + +/* socket functions */ + +static void set_bridge_state(struct pd6729_socket *socket) +{ + indirect_write(socket, I365_GBLCTL, 0x00); + indirect_write(socket, I365_GENCTL, 0x00); + + indirect_setbit(socket, I365_INTCTL, 0x08); +} + +static int pd6729_get_status(struct pcmcia_socket *sock, u_int *value) +{ + struct pd6729_socket *socket = container_of(sock, struct pd6729_socket, socket); + unsigned int status; + unsigned int data; + struct pd6729_socket *t; + + /* Interface Status Register */ + status = indirect_read(socket, I365_STATUS); + *value = 0; + + if ((status & I365_CS_DETECT) == I365_CS_DETECT) { + *value |= SS_DETECT; + } + + /* IO cards have a different meaning of bits 0,1 */ + /* Also notice the inverse-logic on the bits */ + if (indirect_read(socket, I365_INTCTL) & I365_PC_IOCARD) { + /* IO card */ + if (!(status & I365_CS_STSCHG)) + *value |= SS_STSCHG; + } else { + /* non I/O card */ + if (!(status & I365_CS_BVD1)) + *value |= SS_BATDEAD; + if (!(status & I365_CS_BVD2)) + *value |= SS_BATWARN; + } + + if (status & I365_CS_WRPROT) + *value |= SS_WRPROT; /* card is write protected */ + + if (status & I365_CS_READY) + *value |= SS_READY; /* card is not busy */ + + if (status & I365_CS_POWERON) + *value |= SS_POWERON; /* power is applied to the card */ + + t = (socket->number) ? socket : socket + 1; + indirect_write(t, PD67_EXT_INDEX, PD67_EXTERN_DATA); + data = indirect_read16(t, PD67_EXT_DATA); + *value |= (data & PD67_EXD_VS1(socket->number)) ? 0 : SS_3VCARD; + + return 0; +} + + +static int pd6729_get_socket(struct pcmcia_socket *sock, socket_state_t *state) +{ + struct pd6729_socket *socket = container_of(sock, struct pd6729_socket, socket); + unsigned char reg, vcc, vpp; + + state->flags = 0; + state->Vcc = 0; + state->Vpp = 0; + state->io_irq = 0; + state->csc_mask = 0; + + /* First the power status of the socket */ + /* PCTRL - Power Control Register */ + reg = indirect_read(socket, I365_POWER); + + if (reg & I365_PWR_AUTO) + state->flags |= SS_PWR_AUTO; /* Automatic Power Switch */ + + if (reg & I365_PWR_OUT) + state->flags |= SS_OUTPUT_ENA; /* Output signals are enabled */ + + vcc = reg & I365_VCC_MASK; vpp = reg & I365_VPP1_MASK; + + if (reg & I365_VCC_5V) { + state->Vcc = (indirect_read(socket, PD67_MISC_CTL_1) & + PD67_MC1_VCC_3V) ? 33 : 50; + + if (vpp == I365_VPP1_5V) { + if (state->Vcc == 50) + state->Vpp = 50; + else + state->Vpp = 33; + } + if (vpp == I365_VPP1_12V) + state->Vpp = 120; + } + + /* Now the IO card, RESET flags and IO interrupt */ + /* IGENC, Interrupt and General Control */ + reg = indirect_read(socket, I365_INTCTL); + + if ((reg & I365_PC_RESET) == 0) + state->flags |= SS_RESET; + if (reg & I365_PC_IOCARD) + state->flags |= SS_IOCARD; /* This is an IO card */ + + /* Set the IRQ number */ + state->io_irq = socket->socket.pci_irq; + + /* Card status change */ + /* CSCICR, Card Status Change Interrupt Configuration */ + reg = indirect_read(socket, I365_CSCINT); + + if (reg & I365_CSC_DETECT) + state->csc_mask |= SS_DETECT; /* Card detect is enabled */ + + if (state->flags & SS_IOCARD) {/* IO Cards behave different */ + if (reg & I365_CSC_STSCHG) + state->csc_mask |= SS_STSCHG; + } else { + if (reg & I365_CSC_BVD1) + state->csc_mask |= SS_BATDEAD; + if (reg & I365_CSC_BVD2) + state->csc_mask |= SS_BATWARN; + if (reg & I365_CSC_READY) + state->csc_mask |= SS_READY; + } + + return 0; +} + +static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state) +{ + struct pd6729_socket *socket = container_of(sock, struct pd6729_socket, socket); + unsigned char reg; + + /* First, set the global controller options */ + + set_bridge_state(socket); + + /* Values for the IGENC register */ + + reg = 0; + /* The reset bit has "inverse" logic */ + if (!(state->flags & SS_RESET)) + reg = reg | I365_PC_RESET; + if (state->flags & SS_IOCARD) + reg = reg | I365_PC_IOCARD; + + /* IGENC, Interrupt and General Control Register */ + indirect_write(socket, I365_INTCTL, reg); + + /* Power registers */ + + reg = I365_PWR_NORESET; /* default: disable resetdrv on resume */ + + if (state->flags & SS_PWR_AUTO) { + dprintk("Auto power\n"); + reg |= I365_PWR_AUTO; /* automatic power mngmnt */ + } + if (state->flags & SS_OUTPUT_ENA) { + dprintk("Power Enabled\n"); + reg |= I365_PWR_OUT; /* enable power */ + } + + switch (state->Vcc) { + case 0: + break; + case 33: + dprintk("setting voltage to Vcc to 3.3V on socket %i\n", + socket->number); + reg |= I365_VCC_5V; + indirect_setbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); + break; + case 50: + dprintk("setting voltage to Vcc to 5V on socket %i\n", + socket->number); + reg |= I365_VCC_5V; + indirect_resetbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); + break; + default: + dprintk("pd6729: pd6729_set_socket called with invalid VCC power value: %i\n", + state->Vcc); + return -EINVAL; + } + + switch (state->Vpp) { + case 0: + dprintk("not setting Vpp on socket %i\n", socket->number); + break; + case 33: + case 50: + dprintk("setting Vpp to Vcc for socket %i\n", socket->number); + reg |= I365_VPP1_5V; + break; + case 120: + dprintk("setting Vpp to 12.0\n"); + reg |= I365_VPP1_12V; + break; + default: + dprintk("pd6729: pd6729_set_socket called with invalid VPP power value: %i\n", + state->Vpp); + return -EINVAL; + } + + /* only write if changed */ + if (reg != indirect_read(socket, I365_POWER)) + indirect_write(socket, I365_POWER, reg); + + /* Now, specifiy that all interrupts are to be done as PCI interrupts */ + indirect_write(socket, PD67_EXT_INDEX, PD67_EXT_CTL_1); + indirect_write(socket, PD67_EXT_DATA, PD67_EC1_INV_MGMT_IRQ | PD67_EC1_INV_CARD_IRQ); + + /* Enable specific interrupt events */ + + reg = 0x00; + if (state->csc_mask & SS_DETECT) { + reg |= I365_CSC_DETECT; + } + if (state->flags & SS_IOCARD) { + if (state->csc_mask & SS_STSCHG) + reg |= I365_CSC_STSCHG; + } else { + if (state->csc_mask & SS_BATDEAD) + reg |= I365_CSC_BVD1; + if (state->csc_mask & SS_BATWARN) + reg |= I365_CSC_BVD2; + if (state->csc_mask & SS_READY) + reg |= I365_CSC_READY; + } + reg |= 0x30; /* management IRQ: PCI INTA# = "irq 3" */ + indirect_write(socket, I365_CSCINT, reg); + + reg = indirect_read(socket, I365_INTCTL); + reg |= 0x03; /* card IRQ: PCI INTA# = "irq 3" */ + indirect_write(socket, I365_INTCTL, reg); + + /* now clear the (probably bogus) pending stuff by doing a dummy read */ + (void)indirect_read(socket, I365_CSC); + + return 0; +} + +static int pd6729_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) +{ + struct pd6729_socket *socket = container_of(sock, struct pd6729_socket, socket); + unsigned char map, ioctl; + + map = io->map; + + /* Check error conditions */ + if (map > 1) { + dprintk("pd6729_set_io_map with invalid map"); + return -EINVAL; + } + + /* Turn off the window before changing anything */ + if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_IO(map)) + indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_IO(map)); + +/* dprintk("set_io_map: Setting range to %x - %x\n", io->start, io->stop);*/ + + /* write the new values */ + indirect_write16(socket, I365_IO(map)+I365_W_START, io->start); + indirect_write16(socket, I365_IO(map)+I365_W_STOP, io->stop); + + ioctl = indirect_read(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map); + + if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map); + if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map); + if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map); + + indirect_write(socket, I365_IOCTL, ioctl); + + /* Turn the window back on if needed */ + if (io->flags & MAP_ACTIVE) + indirect_setbit(socket, I365_ADDRWIN, I365_ENA_IO(map)); + + return 0; +} + +static int pd6729_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem) +{ + struct pd6729_socket *socket = container_of(sock, struct pd6729_socket, socket); + unsigned short base, i; + unsigned char map; + + map = mem->map; + if (map > 4) { + printk("pd6729_set_mem_map: invalid map"); + return -EINVAL; + } + + if ((mem->sys_start > mem->sys_stop) || (mem->speed > 1000)) { + printk("pd6729_set_mem_map: invalid address / speed"); + /* printk("invalid mem map for socket %i : %lx to %lx with a start of %x\n", + sock, mem->sys_start, mem->sys_stop, mem->card_start); */ + return -EINVAL; + } + + /* Turn off the window before changing anything */ + if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_MEM(map)) + indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_MEM(map)); + + /* write the start address */ + base = I365_MEM(map); + i = (mem->sys_start >> 12) & 0x0fff; + if (mem->flags & MAP_16BIT) + i |= I365_MEM_16BIT; + if (mem->flags & MAP_0WS) + i |= I365_MEM_0WS; + indirect_write16(socket, base + I365_W_START, i); + + /* write the stop address */ + + i= (mem->sys_stop >> 12) & 0x0fff; + switch (to_cycles(mem->speed)) { + case 0: + break; + case 1: + i |= I365_MEM_WS0; + break; + case 2: + i |= I365_MEM_WS1; + break; + default: + i |= I365_MEM_WS1 | I365_MEM_WS0; + break; + } + + indirect_write16(socket, base + I365_W_STOP, i); + + /* Take care of high byte */ + indirect_write(socket, PD67_EXT_INDEX, PD67_MEM_PAGE(map)); + indirect_write(socket, PD67_EXT_DATA, mem->sys_start >> 24); + + /* card start */ + + i = ((mem->card_start - mem->sys_start) >> 12) & 0x3fff; + if (mem->flags & MAP_WRPROT) + i |= I365_MEM_WRPROT; + if (mem->flags & MAP_ATTRIB) { +/* dprintk("requesting attribute memory for socket %i\n", + socket->number);*/ + i |= I365_MEM_REG; + } else { +/* dprintk("requesting normal memory for socket %i\n", + socket->number);*/ + } + indirect_write16(socket, base + I365_W_OFF, i); + + /* Enable the window if necessary */ + if (mem->flags & MAP_ACTIVE) + indirect_setbit(socket, I365_ADDRWIN, I365_ENA_MEM(map)); + + return 0; +} + +static int pd6729_suspend(struct pcmcia_socket *sock) +{ + return pd6729_set_socket(sock, &dead_socket); +} + +static int pd6729_init(struct pcmcia_socket *sock) +{ + int i; + struct resource res = { .end = 0x0fff }; + pccard_io_map io = { 0, 0, 0, 0, 1 }; + pccard_mem_map mem = { .res = &res, .sys_stop = 0x0fff }; + + pd6729_set_socket(sock, &dead_socket); + for (i = 0; i < 2; i++) { + io.map = i; + pd6729_set_io_map(sock, &io); + } + for (i = 0; i < 5; i++) { + mem.map = i; + pd6729_set_mem_map(sock, &mem); + } + + return 0; +} + + +/* the pccard structure and its functions */ +static struct pccard_operations pd6729_operations = { + .init = pd6729_init, + .suspend = pd6729_suspend, + .get_status = pd6729_get_status, + .get_socket = pd6729_get_socket, + .set_socket = pd6729_set_socket, + .set_io_map = pd6729_set_io_map, + .set_mem_map = pd6729_set_mem_map, +}; + +static int __devinit pd6729_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + int i, j, ret; + char configbyte; + struct pd6729_socket *socket; + + socket = kmalloc(sizeof(struct pd6729_socket) * MAX_SOCKETS, GFP_KERNEL); + if (!socket) + return -ENOMEM; + + memset(socket, 0, sizeof(struct pd6729_socket) * MAX_SOCKETS); + + if ((ret = pci_enable_device(dev))) + goto err_out_free_mem; + + printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge at 0x%lx on irq %d\n", + pci_resource_start(dev, 0), dev->irq); + printk(KERN_INFO "pd6729: configured as a %d socket device.\n", MAX_SOCKETS); + /* Since we have no memory BARs some firmware we may not + have had PCI_COMMAND_MEM enabled, yet the device needs + it. */ + pci_read_config_byte(dev, PCI_COMMAND, &configbyte); + if (!(configbyte & PCI_COMMAND_MEMORY)) { + printk(KERN_DEBUG "pd6729: Enabling PCI_COMMAND_MEMORY.\n"); + configbyte |= PCI_COMMAND_MEMORY; + pci_write_config_byte(dev, PCI_COMMAND, configbyte); + } + + ret = pci_request_regions(dev, "pd6729"); + if (ret) { + printk(KERN_INFO "pd6729: pci request region failed.\n"); + goto err_out_disable; + } + + for (i = 0; i < MAX_SOCKETS; i++) { + socket[i].io_base = pci_resource_start(dev, 0); + socket[i].socket.features |= SS_CAP_PCCARD; + socket[i].socket.map_size = 0x1000; + socket[i].socket.irq_mask = 0; + socket[i].socket.pci_irq = dev->irq; + socket[i].socket.owner = THIS_MODULE; + + socket[i].number = i; + + socket[i].socket.ops = &pd6729_operations; + socket[i].socket.dev.dev = &dev->dev; + socket[i].socket.driver_data = &socket[i]; + } + + pci_set_drvdata(dev, socket); + + /* Register the interrupt handler */ + if ((ret = request_irq(dev->irq, pd6729_interrupt, SA_SHIRQ, "pd6729", socket))) { + printk(KERN_ERR "pd6729: Failed to register irq %d, aborting\n", dev->irq); + goto err_out_free_res; + } + + for (i = 0; i < MAX_SOCKETS; i++) { + ret = pcmcia_register_socket(&socket[i].socket); + if (ret) { + printk(KERN_INFO "pd6729: pcmcia_register_socket failed.\n"); + for (j = 0; j < i ; j++) + pcmcia_unregister_socket(&socket[j].socket); + goto err_out_free_res2; + } + } + + return 0; + + err_out_free_res2: + free_irq(dev->irq, socket); + err_out_free_res: + pci_release_regions(dev); + err_out_disable: + pci_disable_device(dev); + + err_out_free_mem: + kfree(socket); + return ret; +} + +static void __devexit pd6729_pci_remove(struct pci_dev *dev) +{ + int i; + struct pd6729_socket *socket = pci_get_drvdata(dev); + + for (i = 0; i < MAX_SOCKETS; i++) + pcmcia_unregister_socket(&socket[i].socket); + + free_irq(dev->irq, socket); + pci_release_regions(dev); + pci_disable_device(dev); + + kfree(socket); +} + +static int pd6729_socket_suspend(struct pci_dev *dev, u32 state) +{ + return pcmcia_socket_dev_suspend(&dev->dev, state); +} + +static int pd6729_socket_resume(struct pci_dev *dev) +{ + return pcmcia_socket_dev_resume(&dev->dev); +} + +static struct pci_device_id pd6729_pci_ids[] = { + { + .vendor = PCI_VENDOR_ID_CIRRUS, + .device = PCI_DEVICE_ID_CIRRUS_6729, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; +MODULE_DEVICE_TABLE(pci, pd6729_pci_ids); + +static struct pci_driver pd6729_pci_drv = { + .name = "pd6729", + .id_table = pd6729_pci_ids, + .probe = pd6729_pci_probe, + .remove = __devexit_p(pd6729_pci_remove), + .suspend = pd6729_socket_suspend, + .resume = pd6729_socket_resume, +}; + +static int pd6729_module_init(void) +{ + return pci_module_init(&pd6729_pci_drv); +} + +static void pd6729_module_exit(void) +{ + pci_unregister_driver(&pd6729_pci_drv); +} + +module_init(pd6729_module_init); +module_exit(pd6729_module_exit); diff --git a/drivers/pcmcia/pd6729.h b/drivers/pcmcia/pd6729.h new file mode 100644 index 000000000..9e90520ef --- /dev/null +++ b/drivers/pcmcia/pd6729.h @@ -0,0 +1,28 @@ +#ifndef _INCLUDE_GUARD_PD6729_H_ +#define _INCLUDE_GUARD_PD6729_H_ + +/* Debuging defines */ +#ifdef NOTRACE +#define dprintk(fmt, args...) printk(fmt , ## args) +#else +#define dprintk(fmt, args...) do {} while (0) +#endif + +/* Flags for I365_GENCTL */ +#define I365_DF_VS1 0x40 /* DF-step Voltage Sense */ +#define I365_DF_VS2 0x80 + +/* Fields in PD67_EXTERN_DATA */ +#define PD67_EXD_VS1(s) (0x01 << ((s) << 1)) +#define PD67_EXD_VS2(s) (0x02 << ((s) << 1)) + + + + +struct pd6729_socket { + int number; + unsigned long io_base; /* base io address of the socket */ + struct pcmcia_socket socket; +}; + +#endif diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h new file mode 100644 index 000000000..e46cff345 --- /dev/null +++ b/drivers/pcmcia/pxa2xx_base.h @@ -0,0 +1,3 @@ +/* temporary measure */ +extern int pxa2xx_drv_pcmcia_probe(struct device *); + diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c new file mode 100644 index 000000000..23288e646 --- /dev/null +++ b/drivers/pcmcia/socket_sysfs.c @@ -0,0 +1,161 @@ +/* + * socket_sysfs.c -- most of socket-related sysfs output + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * (C) 2003 - 2004 Dominik Brodowski + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define IN_CARD_SERVICES +#include +#include +#include +#include +#include +#include +#include +#include +#include "cs_internal.h" + +#define to_socket(_dev) container_of(_dev, struct pcmcia_socket, dev) + +static ssize_t pccard_show_type(struct class_device *dev, char *buf) +{ + int val; + struct pcmcia_socket *s = to_socket(dev); + + if (!(s->state & SOCKET_PRESENT)) + return -ENODEV; + s->ops->get_status(s, &val); + if (val & SS_CARDBUS) + return sprintf(buf, "32-bit\n"); + if (val & SS_DETECT) + return sprintf(buf, "16-bit\n"); + return sprintf(buf, "invalid\n"); +} +static CLASS_DEVICE_ATTR(card_type, 0400, pccard_show_type, NULL); + +static ssize_t pccard_show_voltage(struct class_device *dev, char *buf) +{ + int val; + struct pcmcia_socket *s = to_socket(dev); + + if (!(s->state & SOCKET_PRESENT)) + return -ENODEV; + s->ops->get_status(s, &val); + if (val & SS_3VCARD) + return sprintf(buf, "3.3V\n"); + if (val & SS_XVCARD) + return sprintf(buf, "X.XV\n"); + return sprintf(buf, "5.0V\n"); +} +static CLASS_DEVICE_ATTR(card_voltage, 0400, pccard_show_voltage, NULL); + +static ssize_t pccard_show_vpp(struct class_device *dev, char *buf) +{ + struct pcmcia_socket *s = to_socket(dev); + if (!(s->state & SOCKET_PRESENT)) + return -ENODEV; + return sprintf(buf, "%d.%dV\n", s->socket.Vpp / 10, s->socket.Vpp % 10); +} +static CLASS_DEVICE_ATTR(card_vpp, 0400, pccard_show_vpp, NULL); + +static ssize_t pccard_show_vcc(struct class_device *dev, char *buf) +{ + struct pcmcia_socket *s = to_socket(dev); + if (!(s->state & SOCKET_PRESENT)) + return -ENODEV; + return sprintf(buf, "%d.%dV\n", s->socket.Vcc / 10, s->socket.Vcc % 10); +} +static CLASS_DEVICE_ATTR(card_vcc, 0400, pccard_show_vcc, NULL); + + +static ssize_t pccard_store_insert(struct class_device *dev, const char *buf, size_t count) +{ + ssize_t ret; + struct pcmcia_socket *s = to_socket(dev); + + if (!count) + return -EINVAL; + + ret = pcmcia_insert_card(s); + + return ret ? ret : count; +} +static CLASS_DEVICE_ATTR(card_insert, 0200, NULL, pccard_store_insert); + +static ssize_t pccard_store_eject(struct class_device *dev, const char *buf, size_t count) +{ + ssize_t ret; + struct pcmcia_socket *s = to_socket(dev); + + if (!count) + return -EINVAL; + + ret = pcmcia_eject_card(s); + + return ret ? ret : count; +} +static CLASS_DEVICE_ATTR(card_eject, 0200, NULL, pccard_store_eject); + + +static struct class_device_attribute *pccard_socket_attributes[] = { + &class_device_attr_card_type, + &class_device_attr_card_voltage, + &class_device_attr_card_vpp, + &class_device_attr_card_vcc, + &class_device_attr_card_insert, + &class_device_attr_card_eject, + NULL, +}; + +static int __devinit pccard_sysfs_add_socket(struct class_device *class_dev) +{ + struct class_device_attribute **attr; + int ret = 0; + + for (attr = pccard_socket_attributes; *attr; attr++) { + ret = class_device_create_file(class_dev, *attr); + if (ret) + break; + } + + return ret; +} + +static void __devexit pccard_sysfs_remove_socket(struct class_device *class_dev) +{ + struct class_device_attribute **attr; + + for (attr = pccard_socket_attributes; *attr; attr++) + class_device_remove_file(class_dev, *attr); +} + +struct class_interface pccard_sysfs_interface = { + .class = &pcmcia_socket_class, + .add = &pccard_sysfs_add_socket, + .remove = __devexit_p(&pccard_sysfs_remove_socket), +}; diff --git a/drivers/s390/net/ctcdbug.c b/drivers/s390/net/ctcdbug.c new file mode 100644 index 000000000..ba004e0aa --- /dev/null +++ b/drivers/s390/net/ctcdbug.c @@ -0,0 +1,83 @@ +/* + * + * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.1 $) + * + * Linux on zSeries OSA Express and HiperSockets support + * + * Copyright 2000,2003 IBM Corporation + * + * Author(s): Original Code written by + * Peter Tiedemann (ptiedem@de.ibm.com) + * + * $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include "ctcdbug.h" + +/** + * Debug Facility Stuff + */ +debug_info_t *dbf_setup = NULL; +debug_info_t *dbf_data = NULL; +debug_info_t *dbf_trace = NULL; + +DEFINE_PER_CPU(char[256], dbf_txt_buf); + +void +unregister_dbf_views(void) +{ + if (dbf_setup) + debug_unregister(dbf_setup); + if (dbf_data) + debug_unregister(dbf_data); + if (dbf_trace) + debug_unregister(dbf_trace); +} +int +register_dbf_views(void) +{ + dbf_setup = debug_register(CTC_DBF_SETUP_NAME, + CTC_DBF_SETUP_INDEX, + CTC_DBF_SETUP_NR_AREAS, + CTC_DBF_SETUP_LEN); + dbf_data = debug_register(CTC_DBF_DATA_NAME, + CTC_DBF_DATA_INDEX, + CTC_DBF_DATA_NR_AREAS, + CTC_DBF_DATA_LEN); + dbf_trace = debug_register(CTC_DBF_TRACE_NAME, + CTC_DBF_TRACE_INDEX, + CTC_DBF_TRACE_NR_AREAS, + CTC_DBF_TRACE_LEN); + + if ((dbf_setup == NULL) || (dbf_data == NULL) || + (dbf_trace == NULL)) { + unregister_dbf_views(); + return -ENOMEM; + } + debug_register_view(dbf_setup, &debug_hex_ascii_view); + debug_set_level(dbf_setup, CTC_DBF_SETUP_LEVEL); + + debug_register_view(dbf_data, &debug_hex_ascii_view); + debug_set_level(dbf_data, CTC_DBF_DATA_LEVEL); + + debug_register_view(dbf_trace, &debug_hex_ascii_view); + debug_set_level(dbf_trace, CTC_DBF_TRACE_LEVEL); + + return 0; +} + + diff --git a/drivers/s390/net/ctcdbug.h b/drivers/s390/net/ctcdbug.h new file mode 100644 index 000000000..447fd1abf --- /dev/null +++ b/drivers/s390/net/ctcdbug.h @@ -0,0 +1,123 @@ +/* + * + * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.1 $) + * + * Linux on zSeries OSA Express and HiperSockets support + * + * Copyright 2000,2003 IBM Corporation + * + * Author(s): Original Code written by + * Peter Tiedemann (ptiedem@de.ibm.com) + * + * $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include +/** + * Debug Facility stuff + */ +#define CTC_DBF_SETUP_NAME "ctc_setup" +#define CTC_DBF_SETUP_LEN 16 +#define CTC_DBF_SETUP_INDEX 3 +#define CTC_DBF_SETUP_NR_AREAS 1 +#define CTC_DBF_SETUP_LEVEL 3 + +#define CTC_DBF_DATA_NAME "ctc_data" +#define CTC_DBF_DATA_LEN 128 +#define CTC_DBF_DATA_INDEX 3 +#define CTC_DBF_DATA_NR_AREAS 1 +#define CTC_DBF_DATA_LEVEL 2 + +#define CTC_DBF_TRACE_NAME "ctc_trace" +#define CTC_DBF_TRACE_LEN 16 +#define CTC_DBF_TRACE_INDEX 2 +#define CTC_DBF_TRACE_NR_AREAS 2 +#define CTC_DBF_TRACE_LEVEL 3 + +#define DBF_TEXT(name,level,text) \ + do { \ + debug_text_event(dbf_##name,level,text); \ + } while (0) + +#define DBF_HEX(name,level,addr,len) \ + do { \ + debug_event(dbf_##name,level,(void*)(addr),len); \ + } while (0) + +extern DEFINE_PER_CPU(char[256], dbf_txt_buf); +extern debug_info_t *dbf_setup; +extern debug_info_t *dbf_data; +extern debug_info_t *dbf_trace; + + +#define DBF_TEXT_(name,level,text...) \ + do { \ + char* dbf_txt_buf = get_cpu_var(dbf_txt_buf); \ + sprintf(dbf_txt_buf, text); \ + debug_text_event(dbf_##name,level,dbf_txt_buf); \ + put_cpu_var(dbf_txt_buf); \ + } while (0) + +#define DBF_SPRINTF(name,level,text...) \ + do { \ + debug_sprintf_event(dbf_trace, level, ##text ); \ + debug_sprintf_event(dbf_trace, level, text ); \ + } while (0) + + +int register_dbf_views(void); + +void unregister_dbf_views(void); + +/** + * some more debug stuff + */ + +#define HEXDUMP16(importance,header,ptr) \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ + *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ + *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ + *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ + *(((char*)ptr)+12),*(((char*)ptr)+13), \ + *(((char*)ptr)+14),*(((char*)ptr)+15)); \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)+16),*(((char*)ptr)+17), \ + *(((char*)ptr)+18),*(((char*)ptr)+19), \ + *(((char*)ptr)+20),*(((char*)ptr)+21), \ + *(((char*)ptr)+22),*(((char*)ptr)+23), \ + *(((char*)ptr)+24),*(((char*)ptr)+25), \ + *(((char*)ptr)+26),*(((char*)ptr)+27), \ + *(((char*)ptr)+28),*(((char*)ptr)+29), \ + *(((char*)ptr)+30),*(((char*)ptr)+31)); + +static inline void +hex_dump(unsigned char *buf, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) { + if (i && !(i % 16)) + printk("\n"); + printk("%02x ", *(buf + i)); + } + printk("\n"); +} + diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c new file mode 100644 index 000000000..9f75c87c3 --- /dev/null +++ b/drivers/scsi/3w-9xxx.c @@ -0,0 +1,2153 @@ +/* + 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. + + Written By: Adam Radford + + Copyright (C) 2004 Applied Micro Circuits Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Bugs/Comments/Suggestions should be mailed to: + linuxraid@amcc.com + + For more information, goto: + http://www.amcc.com + + Note: This version of the driver does not contain a bundled firmware + image. + + History + ------- + 2.26.02.000 - Driver cleanup for kernel submission. + 2.26.02.001 - Replace schedule_timeout() calls with msleep(). +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "3w-9xxx.h" + +/* Globals */ +static const char *twa_driver_version="2.26.02.001"; +static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; +static unsigned int twa_device_extension_count; +static int twa_major = -1; +extern struct timezone sys_tz; + +/* Module parameters */ +MODULE_AUTHOR ("AMCC"); +MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver"); +MODULE_LICENSE("GPL"); + +/* Function prototypes */ +static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); +static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); +static char *twa_aen_severity_lookup(unsigned char severity_code); +static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); +static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); +static int twa_chrdev_open(struct inode *inode, struct file *file); +static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); +static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); +static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id); +static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, + u32 set_features, unsigned short current_fw_srl, + unsigned short current_fw_arch_id, + unsigned short current_fw_branch, + unsigned short current_fw_build, + unsigned short *fw_on_ctlr_srl, + unsigned short *fw_on_ctlr_arch_id, + unsigned short *fw_on_ctlr_branch, + unsigned short *fw_on_ctlr_build, + u32 *init_connect_result); +static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length); +static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds); +static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds); +static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); +static int twa_reset_device_extension(TW_Device_Extension *tw_dev); +static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); +static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Apache *sglistarg); +static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); +static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); +static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id); + +/* Functions */ + +/* Show some statistics about the card */ +static ssize_t twa_show_stats(struct class_device *class_dev, char *buf) +{ + struct Scsi_Host *host = class_to_shost(class_dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + unsigned long flags = 0; + ssize_t len; + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + len = snprintf(buf, PAGE_SIZE, "Driver version: %s\n" + "Current commands posted: %4d\n" + "Max commands posted: %4d\n" + "Current pending commands: %4d\n" + "Max pending commands: %4d\n" + "Last sgl length: %4d\n" + "Max sgl length: %4d\n" + "Last sector count: %4d\n" + "Max sector count: %4d\n" + "SCSI Host Resets: %4d\n" + "SCSI Aborts/Timeouts: %4d\n" + "AEN's: %4d\n", + twa_driver_version, + tw_dev->posted_request_count, + tw_dev->max_posted_request_count, + tw_dev->pending_request_count, + tw_dev->max_pending_request_count, + tw_dev->sgl_entries, + tw_dev->max_sgl_entries, + tw_dev->sector_count, + tw_dev->max_sector_count, + tw_dev->num_resets, + tw_dev->num_aborts, + tw_dev->aen_count); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + return len; +} /* End twa_show_stats() */ + +/* This function will set a devices queue depth */ +static ssize_t twa_store_queue_depth(struct device *dev, const char *buf, size_t count) +{ + int queue_depth; + struct scsi_device *sdev = to_scsi_device(dev); + + queue_depth = simple_strtoul(buf, NULL, 0); + if (queue_depth > TW_Q_LENGTH-2) + return -EINVAL; + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); + + return count; +} /* End twa_store_queue_depth() */ + +/* Create sysfs 'queue_depth' entry */ +static struct device_attribute twa_queue_depth_attr = { + .attr = { + .name = "queue_depth", + .mode = S_IRUSR | S_IWUSR, + }, + .store = twa_store_queue_depth +}; + +/* Device attributes initializer */ +static struct device_attribute *twa_dev_attrs[] = { + &twa_queue_depth_attr, + NULL, +}; + +/* Create sysfs 'stats' entry */ +static struct class_device_attribute twa_host_stats_attr = { + .attr = { + .name = "stats", + .mode = S_IRUGO, + }, + .show = twa_show_stats +}; + +/* Host attributes initializer */ +static struct class_device_attribute *twa_host_attrs[] = { + &twa_host_stats_attr, + NULL, +}; + +/* File operations struct for character device */ +static struct file_operations twa_fops = { + .owner = THIS_MODULE, + .ioctl = twa_chrdev_ioctl, + .open = twa_chrdev_open, + .release = NULL +}; + +/* This function will complete an aen request from the isr */ +static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Command_Apache_Header *header; + unsigned short aen; + int retval = 1; + + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; + tw_dev->posted_request_count--; + aen = header->status_block.error; + full_command_packet = tw_dev->command_packet_virt[request_id]; + command_packet = &full_command_packet->command.oldcommand; + + /* First check for internal completion of set param for time sync */ + if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { + /* Keep reading the queue in case there are more aen's */ + if (twa_aen_read_queue(tw_dev, request_id)) + goto out2; + else { + retval = 0; + goto out; + } + } + + switch (aen) { + case TW_AEN_QUEUE_EMPTY: + /* Quit reading the queue if this is the last one */ + break; + case TW_AEN_SYNC_TIME_WITH_HOST: + twa_aen_sync_time(tw_dev, request_id); + retval = 0; + goto out; + default: + twa_aen_queue_event(tw_dev, header); + + /* If there are more aen's, keep reading the queue */ + if (twa_aen_read_queue(tw_dev, request_id)) + goto out2; + else { + retval = 0; + goto out; + } + } + retval = 0; +out2: + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); +out: + return retval; +} /* End twa_aen_complete() */ + +/* This function will drain aen queue */ +static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) +{ + int request_id = 0; + char cdb[TW_MAX_CDB_LEN]; + TW_SG_Apache sglist[1]; + int finished = 0, count = 0; + TW_Command_Full *full_command_packet; + TW_Command_Apache_Header *header; + unsigned short aen; + int first_reset = 0, queue = 0, retval = 1; + + if (no_check_reset) + first_reset = 0; + else + first_reset = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + + /* Initialize cdb */ + memset(&cdb, 0, TW_MAX_CDB_LEN); + cdb[0] = REQUEST_SENSE; /* opcode */ + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ + + /* Initialize sglist */ + memset(&sglist, 0, sizeof(TW_SG_Apache)); + sglist[0].length = TW_SECTOR_SIZE; + sglist[0].address = tw_dev->generic_buffer_phys[request_id]; + + if (sglist[0].address & TW_ALIGNMENT_9000_SGL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain"); + goto out; + } + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + do { + /* Send command to the board */ + if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense"); + goto out; + } + + /* Now poll for completion */ + if (twa_poll_response(tw_dev, request_id, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue"); + tw_dev->posted_request_count--; + goto out; + } + + tw_dev->posted_request_count--; + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; + aen = header->status_block.error; + queue = 0; + count++; + + switch (aen) { + case TW_AEN_QUEUE_EMPTY: + if (first_reset != 1) + goto out; + else + finished = 1; + break; + case TW_AEN_SOFT_RESET: + if (first_reset == 0) + first_reset = 1; + else + queue = 1; + break; + case TW_AEN_SYNC_TIME_WITH_HOST: + break; + default: + queue = 1; + } + + /* Now queue an event info */ + if (queue) + twa_aen_queue_event(tw_dev, header); + } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); + + if (count == TW_MAX_AEN_DRAIN) + goto out; + + retval = 0; +out: + tw_dev->state[request_id] = TW_S_INITIAL; + return retval; +} /* End twa_aen_drain_queue() */ + +/* This function will queue an event */ +static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) +{ + u32 local_time; + struct timeval time; + TW_Event *event; + unsigned short aen; + char host[16]; + + tw_dev->aen_count++; + + /* Fill out event info */ + event = tw_dev->event_queue[tw_dev->error_index]; + + /* Check for clobber */ + host[0] = '\0'; + if (tw_dev->host) { + sprintf(host, " scsi%d:", tw_dev->host->host_no); + if (event->retrieved == TW_AEN_NOT_RETRIEVED) + tw_dev->aen_clobber = 1; + } + + aen = header->status_block.error; + memset(event, 0, sizeof(TW_Event)); + + event->severity = TW_SEV_OUT(header->status_block.severity__reserved); + do_gettimeofday(&time); + local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60)); + event->time_stamp_sec = local_time; + event->aen_code = aen; + event->retrieved = TW_AEN_NOT_RETRIEVED; + event->sequence_id = tw_dev->error_sequence_id; + tw_dev->error_sequence_id++; + + header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; + event->parameter_len = strlen(header->err_specific_desc); + memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len); + if (event->severity != TW_AEN_SEVERITY_DEBUG) + printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", + host, + twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), + TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, + twa_string_lookup(twa_aen_table, aen), + header->err_specific_desc); + else + tw_dev->aen_count--; + + if ((tw_dev->error_index + 1) == TW_Q_LENGTH) + tw_dev->event_queue_wrapped = 1; + tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; +} /* End twa_aen_queue_event() */ + +/* This function will read the aen queue from the isr */ +static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) +{ + char cdb[TW_MAX_CDB_LEN]; + TW_SG_Apache sglist[1]; + TW_Command_Full *full_command_packet; + int retval = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + + /* Initialize cdb */ + memset(&cdb, 0, TW_MAX_CDB_LEN); + cdb[0] = REQUEST_SENSE; /* opcode */ + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ + + /* Initialize sglist */ + memset(&sglist, 0, sizeof(TW_SG_Apache)); + sglist[0].length = TW_SECTOR_SIZE; + sglist[0].address = tw_dev->generic_buffer_phys[request_id]; + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + /* Now post the command packet */ + if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue"); + goto out; + } + retval = 0; +out: + return retval; +} /* End twa_aen_read_queue() */ + +/* This function will look up an AEN severity string */ +static char *twa_aen_severity_lookup(unsigned char severity_code) +{ + char *retval = NULL; + + if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || + (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) + goto out; + + retval = twa_aen_severity_table[severity_code]; +out: + return retval; +} /* End twa_aen_severity_lookup() */ + +/* This function will sync firmware time with the host time */ +static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) +{ + u32 schedulertime; + struct timeval utc; + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Param_Apache *param; + u32 local_time; + + /* Fill out the command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + command_packet = &full_command_packet->command.oldcommand; + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); + command_packet->request_id = request_id; + command_packet->byte8_offset.param.sgl[0].address = tw_dev->generic_buffer_phys[request_id]; + command_packet->byte8_offset.param.sgl[0].length = TW_SECTOR_SIZE; + command_packet->size = TW_COMMAND_SIZE; + command_packet->byte6_offset.parameter_count = 1; + + /* Setup the param */ + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; + memset(param, 0, TW_SECTOR_SIZE); + param->table_id = TW_TIMEKEEP_TABLE | 0x8000; /* Controller time keep table */ + param->parameter_id = 0x3; /* SchedulerTime */ + param->parameter_size_bytes = 4; + + /* Convert system time in UTC to local time seconds since last + Sunday 12:00AM */ + do_gettimeofday(&utc); + local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60)); + schedulertime = local_time - (3 * 86400); + schedulertime = schedulertime % 604800; + + memcpy(param->data, &schedulertime, sizeof(u32)); + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + /* Now post the command */ + twa_post_command_packet(tw_dev, request_id, 1); +} /* End twa_aen_sync_time() */ + +/* This function will allocate memory and check if it is correctly aligned */ +static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) +{ + int i; + dma_addr_t dma_handle; + unsigned long *cpu_addr; + int retval = 1; + + cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); + if (!cpu_addr) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); + goto out; + } + + if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory"); + pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle); + goto out; + } + + memset(cpu_addr, 0, size*TW_Q_LENGTH); + + for (i = 0; i < TW_Q_LENGTH; i++) { + switch(which) { + case 0: + tw_dev->command_packet_phys[i] = dma_handle+(i*size); + tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); + break; + case 1: + tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); + tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); + break; + } + } + retval = 0; +out: + return retval; +} /* End twa_allocate_memory() */ + +/* This function will check the status register for unexpected bits */ +static int twa_check_bits(u32 status_reg_value) +{ + int retval = 1; + + if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) + goto out; + if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0) + goto out; + + retval = 0; +out: + return retval; +} /* End twa_check_bits() */ + +/* This function will check the srl and decide if we are compatible */ +static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) +{ + int retval = 1; + unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; + unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; + u32 init_connect_result = 0; + + if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, + TW_EXTENDED_INIT_CONNECT, TW_CURRENT_FW_SRL, + TW_9000_ARCH_ID, TW_CURRENT_FW_BRANCH, + TW_CURRENT_FW_BUILD, &fw_on_ctlr_srl, + &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, + &fw_on_ctlr_build, &init_connect_result)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL"); + goto out; + } + + tw_dev->working_srl = TW_CURRENT_FW_SRL; + tw_dev->working_branch = TW_CURRENT_FW_BRANCH; + tw_dev->working_build = TW_CURRENT_FW_BUILD; + + /* Try base mode compatibility */ + if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { + if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, + TW_EXTENDED_INIT_CONNECT, + TW_BASE_FW_SRL, TW_9000_ARCH_ID, + TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD, + &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, + &fw_on_ctlr_branch, &fw_on_ctlr_build, + &init_connect_result)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL"); + goto out; + } + if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { + if (TW_CURRENT_FW_SRL > fw_on_ctlr_srl) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware"); + } else { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver"); + } + goto out; + } + tw_dev->working_srl = TW_BASE_FW_SRL; + tw_dev->working_branch = TW_BASE_FW_BRANCH; + tw_dev->working_build = TW_BASE_FW_BUILD; + } + retval = 0; +out: + return retval; +} /* End twa_check_srl() */ + +/* This function handles ioctl for the character device */ +static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +{ + long timeout; + unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; + dma_addr_t dma_handle; + int request_id = 0; + unsigned int sequence_id = 0; + unsigned char event_index, start_index; + TW_Ioctl_Driver_Command driver_command; + TW_Ioctl_Buf_Apache *tw_ioctl; + TW_Lock *tw_lock; + TW_Command_Full *full_command_packet; + TW_Compatibility_Info *tw_compat_info; + TW_Event *event; + struct timeval current_time; + u32 current_time_ms; + TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)]; + int retval = TW_IOCTL_ERROR_OS_EFAULT; + + /* Only let one of these through at a time */ + if (down_interruptible(&tw_dev->ioctl_sem)) { + retval = TW_IOCTL_ERROR_OS_EINTR; + goto out; + } + + /* First copy down the driver command */ + if (copy_from_user(&driver_command, (void *)arg, sizeof(TW_Ioctl_Driver_Command))) + goto out2; + + /* Check data buffer size */ + if (driver_command.buffer_length > TW_MAX_SECTORS * 512) { + retval = TW_IOCTL_ERROR_OS_EINVAL; + goto out2; + } + + /* Hardware can only do multiple of 512 byte transfers */ + data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; + + /* Now allocate ioctl buf memory */ + cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle); + if (!cpu_addr) { + retval = TW_IOCTL_ERROR_OS_ENOMEM; + goto out2; + } + + tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; + + /* Now copy down the entire ioctl */ + if (copy_from_user(tw_ioctl, (void *)arg, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1)) + goto out3; + + /* See which ioctl we are doing */ + switch (cmd) { + case TW_IOCTL_FIRMWARE_PASS_THROUGH: + spin_lock_irqsave(tw_dev->host->host_lock, flags); + twa_get_request_id(tw_dev, &request_id); + + /* Flag internal command */ + tw_dev->srb[request_id] = 0; + + /* Flag chrdev ioctl */ + tw_dev->chrdev_request_id = request_id; + + full_command_packet = &tw_ioctl->firmware_command; + + /* Load request id and sglist for both command types */ + twa_load_sgl(full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); + + memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); + + /* Now post the command packet to the controller */ + twa_post_command_packet(tw_dev, request_id, 1); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; + + /* Now wait for command to complete */ + timeout = wait_event_interruptible_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); + + /* Check if we timed out, got a signal, or didn't get + an interrupt */ + if ((timeout <= 0) && (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE)) { + /* Now we need to reset the board */ + if (timeout == TW_IOCTL_ERROR_OS_ERESTARTSYS) { + retval = timeout; + } else { + printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", + tw_dev->host->host_no, TW_DRIVER, 0xc, + cmd); + retval = TW_IOCTL_ERROR_OS_EIO; + } + spin_lock_irqsave(tw_dev->host->host_lock, flags); + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + tw_dev->posted_request_count--; + twa_reset_device_extension(tw_dev); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + goto out3; + } + + /* Now copy in the command packet response */ + memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); + + /* Now complete the io */ + spin_lock_irqsave(tw_dev->host->host_lock, flags); + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + break; + case TW_IOCTL_GET_COMPATIBILITY_INFO: + tw_ioctl->driver_command.status = 0; + /* Copy compatiblity struct into ioctl data buffer */ + tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; + strncpy(tw_compat_info->driver_version, twa_driver_version, strlen(twa_driver_version)); + tw_compat_info->working_srl = tw_dev->working_srl; + tw_compat_info->working_branch = tw_dev->working_branch; + tw_compat_info->working_build = tw_dev->working_build; + break; + case TW_IOCTL_GET_LAST_EVENT: + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } else + tw_ioctl->driver_command.status = 0; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + tw_ioctl->driver_command.status = 0; + } + event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH; + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_FIRST_EVENT: + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } else + tw_ioctl->driver_command.status = 0; + event_index = tw_dev->error_index; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + tw_ioctl->driver_command.status = 0; + event_index = 0; + } + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_NEXT_EVENT: + event = (TW_Event *)tw_ioctl->data_buffer; + sequence_id = event->sequence_id; + tw_ioctl->driver_command.status = 0; + + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } + start_index = tw_dev->error_index; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + start_index = 0; + } + event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH; + + if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) { + if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) + tw_dev->aen_clobber = 1; + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_PREVIOUS_EVENT: + event = (TW_Event *)tw_ioctl->data_buffer; + sequence_id = event->sequence_id; + tw_ioctl->driver_command.status = 0; + + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } + start_index = tw_dev->error_index; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + start_index = 0; + } + event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH; + + if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) { + if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) + tw_dev->aen_clobber = 1; + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_LOCK: + tw_lock = (TW_Lock *)tw_ioctl->data_buffer; + do_gettimeofday(¤t_time); + current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000); + + if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) { + tw_dev->ioctl_sem_lock = 1; + tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec; + tw_ioctl->driver_command.status = 0; + tw_lock->time_remaining_msec = tw_lock->timeout_msec; + } else { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED; + tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms; + } + break; + case TW_IOCTL_RELEASE_LOCK: + if (tw_dev->ioctl_sem_lock == 1) { + tw_dev->ioctl_sem_lock = 0; + tw_ioctl->driver_command.status = 0; + } else { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED; + } + break; + default: + retval = TW_IOCTL_ERROR_OS_ENOTTY; + goto out3; + } + + /* Now copy the entire response to userspace */ + if (copy_to_user((void *)arg, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0) + retval = 0; +out3: + /* Now free ioctl buf memory */ + pci_free_consistent(tw_dev->tw_pci_dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle); +out2: + up(&tw_dev->ioctl_sem); +out: + return retval; +} /* End twa_chrdev_ioctl() */ + +/* This function handles open for the character device */ +static int twa_chrdev_open(struct inode *inode, struct file *file) +{ + unsigned int minor_number; + int retval = TW_IOCTL_ERROR_OS_ENODEV; + + minor_number = iminor(inode); + if (minor_number >= twa_device_extension_count) + goto out; + retval = 0; +out: + return retval; +} /* End twa_chrdev_open() */ + +/* This function will print readable messages from status register errors */ +static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value) +{ + int retval = 1; + + /* Check for various error conditions and handle them appropriately */ + if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing"); + writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_PCI_ABORT) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing"); + writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev)); + pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT); + } + + if (status_reg_value & TW_STATUS_QUEUE_ERROR) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing"); + writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "SBUF Write Error: clearing"); + writel(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { + if (tw_dev->reset_print == 0) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); + tw_dev->reset_print = 1; + } + goto out; + } + retval = 0; +out: + return retval; +} /* End twa_decode_bits() */ + +/* This function will empty the response queue */ +static int twa_empty_response_queue(TW_Device_Extension *tw_dev) +{ + u32 status_reg_value, response_que_value; + int count = 0, retval = 1; + + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) { + response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + count++; + } + if (count == TW_MAX_RESPONSE_DRAIN) + goto out; + + retval = 0; +out: + return retval; +} /* End twa_empty_response_queue() */ + +/* This function passes sense keys from firmware to scsi layer */ +static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) +{ + TW_Command_Full *full_command_packet; + unsigned short error; + int retval = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + /* Don't print error for Logical unit not supported during rollcall */ + error = full_command_packet->header.status_block.error; + if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) { + if (print_host) + printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", + tw_dev->host->host_no, + TW_MESSAGE_SOURCE_CONTROLLER_ERROR, + full_command_packet->header.status_block.error, + twa_string_lookup(twa_error_table, + full_command_packet->header.status_block.error), + full_command_packet->header.err_specific_desc); + else + printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n", + TW_MESSAGE_SOURCE_CONTROLLER_ERROR, + full_command_packet->header.status_block.error, + twa_string_lookup(twa_error_table, + full_command_packet->header.status_block.error), + full_command_packet->header.err_specific_desc); + } + + if (copy_sense) { + memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH); + tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); + retval = TW_ISR_DONT_RESULT; + goto out; + } + retval = 0; +out: + return retval; +} /* End twa_fill_sense() */ + +/* This function will free up device extension resources */ +static void twa_free_device_extension(TW_Device_Extension *tw_dev) +{ + if (tw_dev->command_packet_virt[0]) + pci_free_consistent(tw_dev->tw_pci_dev, + sizeof(TW_Command_Full)*TW_Q_LENGTH, + tw_dev->command_packet_virt[0], + tw_dev->command_packet_phys[0]); + + if (tw_dev->generic_buffer_virt[0]) + pci_free_consistent(tw_dev->tw_pci_dev, + TW_SECTOR_SIZE*TW_Q_LENGTH, + tw_dev->generic_buffer_virt[0], + tw_dev->generic_buffer_phys[0]); + + if (tw_dev->event_queue[0]) + kfree(tw_dev->event_queue[0]); +} /* End twa_free_device_extension() */ + +/* This function will free a request id */ +static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id) +{ + tw_dev->free_queue[tw_dev->free_tail] = request_id; + tw_dev->state[request_id] = TW_S_FINISHED; + tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; +} /* End twa_free_request_id() */ + +/* This function will get parameter table entires from the firmware */ +static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) +{ + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Param_Apache *param; + unsigned long param_value; + void *retval = NULL; + + /* Setup the command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + command_packet = &full_command_packet->command.oldcommand; + + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = TW_COMMAND_SIZE; + command_packet->request_id = request_id; + command_packet->byte6_offset.block_count = 1; + + /* Now setup the param */ + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; + memset(param, 0, TW_SECTOR_SIZE); + param->table_id = table_id | 0x8000; + param->parameter_id = parameter_id; + param->parameter_size_bytes = parameter_size_bytes; + param_value = tw_dev->generic_buffer_phys[request_id]; + + command_packet->byte8_offset.param.sgl[0].address = param_value; + command_packet->byte8_offset.param.sgl[0].length = TW_SECTOR_SIZE; + + /* Post the command packet to the board */ + twa_post_command_packet(tw_dev, request_id, 1); + + /* Poll for completion */ + if (twa_poll_response(tw_dev, request_id, 30)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param") + else + retval = (void *)&(param->data[0]); + + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_INITIAL; + + return retval; +} /* End twa_get_param() */ + +/* This function will assign an available request id */ +static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id) +{ + *request_id = tw_dev->free_queue[tw_dev->free_head]; + tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; + tw_dev->state[*request_id] = TW_S_STARTED; +} /* End twa_get_request_id() */ + +/* This function will send an initconnection command to controller */ +static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, + u32 set_features, unsigned short current_fw_srl, + unsigned short current_fw_arch_id, + unsigned short current_fw_branch, + unsigned short current_fw_build, + unsigned short *fw_on_ctlr_srl, + unsigned short *fw_on_ctlr_arch_id, + unsigned short *fw_on_ctlr_branch, + unsigned short *fw_on_ctlr_build, + u32 *init_connect_result) +{ + TW_Command_Full *full_command_packet; + TW_Initconnect *tw_initconnect; + int request_id = 0, retval = 1; + + /* Initialize InitConnection command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + full_command_packet->header.header_desc.size_header = 128; + + tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; + tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); + tw_initconnect->request_id = request_id; + tw_initconnect->message_credits = message_credits; + tw_initconnect->features = set_features; +#if BITS_PER_LONG > 32 + /* Turn on 64-bit sgl support */ + tw_initconnect->features |= 1; +#endif + + if (set_features & TW_EXTENDED_INIT_CONNECT) { + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; + tw_initconnect->fw_srl = current_fw_srl; + tw_initconnect->fw_arch_id = current_fw_arch_id; + tw_initconnect->fw_branch = current_fw_branch; + tw_initconnect->fw_build = current_fw_build; + } else + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; + + /* Send command packet to the board */ + twa_post_command_packet(tw_dev, request_id, 1); + + /* Poll for completion */ + if (twa_poll_response(tw_dev, request_id, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection"); + } else { + if (set_features & TW_EXTENDED_INIT_CONNECT) { + *fw_on_ctlr_srl = tw_initconnect->fw_srl; + *fw_on_ctlr_arch_id = tw_initconnect->fw_arch_id; + *fw_on_ctlr_branch = tw_initconnect->fw_branch; + *fw_on_ctlr_build = tw_initconnect->fw_build; + *init_connect_result = tw_initconnect->result; + } + retval = 0; + } + + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_INITIAL; + + return retval; +} /* End twa_initconnection() */ + +/* This function will initialize the fields of a device extension */ +static int twa_initialize_device_extension(TW_Device_Extension *tw_dev) +{ + int i, retval = 1; + + /* Initialize command packet buffers */ + if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed"); + goto out; + } + + /* Initialize generic buffer */ + if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed"); + goto out; + } + + /* Allocate event info space */ + tw_dev->event_queue[0] = kmalloc(sizeof(TW_Event) * TW_Q_LENGTH, GFP_KERNEL); + if (!tw_dev->event_queue[0]) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed"); + goto out; + } + + memset(tw_dev->event_queue[0], 0, sizeof(TW_Event) * TW_Q_LENGTH); + + for (i = 0; i < TW_Q_LENGTH; i++) { + tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); + tw_dev->free_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + + tw_dev->pending_head = TW_Q_START; + tw_dev->pending_tail = TW_Q_START; + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->error_sequence_id = 1; + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + + init_MUTEX(&tw_dev->ioctl_sem); + init_waitqueue_head(&tw_dev->ioctl_wqueue); + + retval = 0; +out: + return retval; +} /* End twa_initialize_device_extension() */ + +/* This function is the interrupt service routine */ +static irqreturn_t twa_interrupt(int irq, void *dev_instance, struct pt_regs *regs) +{ + int request_id, error = 0; + u32 status_reg_value; + TW_Response_Queue response_que; + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; + int handled = 0; + + /* Get the per adapter lock */ + spin_lock(tw_dev->host->host_lock); + + /* See if the interrupt matches this instance */ + if (tw_dev->tw_pci_dev->irq == (unsigned int)irq) { + + handled = 1; + + /* Read the registers */ + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + /* Check if this is our interrupt, otherwise bail */ + if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT)) + goto twa_interrupt_bail; + + /* Check controller for errors */ + if (twa_check_bits(status_reg_value)) { + if (twa_decode_bits(tw_dev, status_reg_value)) { + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + } + + /* Handle host interrupt */ + if (status_reg_value & TW_STATUS_HOST_INTERRUPT) + TW_CLEAR_HOST_INTERRUPT(tw_dev); + + /* Handle attention interrupt */ + if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) { + TW_CLEAR_ATTENTION_INTERRUPT(tw_dev); + if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { + twa_get_request_id(tw_dev, &request_id); + + error = twa_aen_read_queue(tw_dev, request_id); + if (error) { + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); + } + } + } + + /* Handle command interrupt */ + if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) { + TW_MASK_COMMAND_INTERRUPT(tw_dev); + /* Drain as many pending commands as we can */ + while (tw_dev->pending_request_count > 0) { + request_id = tw_dev->pending_queue[tw_dev->pending_head]; + if (tw_dev->state[request_id] != TW_S_PENDING) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending"); + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + if (twa_post_command_packet(tw_dev, request_id, 1)==0) { + tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH; + tw_dev->pending_request_count--; + } else { + /* If we get here, we will continue re-posting on the next command interrupt */ + break; + } + } + } + + /* Handle response interrupt */ + if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) { + + /* Drain the response queue from the board */ + while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) { + /* Complete the response */ + response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + request_id = TW_RESID_OUT(response_que.response_id); + full_command_packet = tw_dev->command_packet_virt[request_id]; + error = 0; + command_packet = &full_command_packet->command.oldcommand; + /* Check for command packet errors */ + if (full_command_packet->command.newcommand.status != 0) { + if (tw_dev->srb[request_id] != 0) { + error = twa_fill_sense(tw_dev, request_id, 1, 1); + } else { + /* Skip ioctl error prints */ + if (request_id != tw_dev->chrdev_request_id) { + error = twa_fill_sense(tw_dev, request_id, 0, 1); + } + } + } + + /* Check for correct state */ + if (tw_dev->state[request_id] != TW_S_POSTED) { + if (tw_dev->srb[request_id] != 0) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted"); + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + } + + /* Check for internal command completion */ + if (tw_dev->srb[request_id] == 0) { + if (request_id != tw_dev->chrdev_request_id) { + if (twa_aen_complete(tw_dev, request_id)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt"); + } else { + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + wake_up(&tw_dev->ioctl_wqueue); + } + } else { + twa_scsiop_execute_scsi_complete(tw_dev, request_id); + /* If no error command was a success */ + if (error == 0) { + tw_dev->srb[request_id]->result = (DID_OK << 16); + } + + /* If error, command failed */ + if (error == 1) { + /* Ask for a host reset */ + tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1); + } + + /* Now complete the io */ + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + tw_dev->posted_request_count--; + tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); + twa_unmap_scsi_data(tw_dev, request_id); + } + + /* Check for valid status after each drain */ + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + if (twa_check_bits(status_reg_value)) { + if (twa_decode_bits(tw_dev, status_reg_value)) { + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + } + } + } + } +twa_interrupt_bail: + spin_unlock(tw_dev->host->host_lock); + return IRQ_RETVAL(handled); +} /* End twa_interrupt() */ + +/* This function will load the request id and various sgls for ioctls */ +static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) +{ + TW_Command *oldcommand; + TW_Command_Apache *newcommand; + TW_SG_Entry *sgl; + + if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { + newcommand = &full_command_packet->command.newcommand; + newcommand->request_id = request_id; + newcommand->sg_list[0].address = dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1; + newcommand->sg_list[0].length = length; + } else { + oldcommand = &full_command_packet->command.oldcommand; + oldcommand->request_id = request_id; + + if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { + /* Load the sg list */ + sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset)); + sgl->address = dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1; + sgl->length = length; + } + } +} /* End twa_load_sgl() */ + +/* This function will perform a pci-dma mapping for a scatter gather list */ +static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) +{ + int use_sg; + struct scsi_cmnd *cmd = tw_dev->srb[request_id]; + struct pci_dev *pdev = tw_dev->tw_pci_dev; + int retval = 0; + + if (cmd->use_sg == 0) + goto out; + + use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, DMA_BIDIRECTIONAL); + + if (use_sg == 0) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); + goto out; + } + + cmd->SCp.phase = TW_PHASE_SGLIST; + cmd->SCp.have_data_in = use_sg; + retval = use_sg; +out: + return retval; +} /* End twa_map_scsi_sg_data() */ + +/* This function will perform a pci-dma map for a single buffer */ +static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int request_id) +{ + dma_addr_t mapping; + struct scsi_cmnd *cmd = tw_dev->srb[request_id]; + struct pci_dev *pdev = tw_dev->tw_pci_dev; + int retval = 0; + + if (cmd->request_bufflen == 0) { + retval = 0; + goto out; + } + + mapping = pci_map_single(pdev, cmd->request_buffer, cmd->request_bufflen, DMA_BIDIRECTIONAL); + + if (mapping == 0) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Failed to map page"); + goto out; + } + + cmd->SCp.phase = TW_PHASE_SINGLE; + cmd->SCp.have_data_in = mapping; + retval = mapping; +out: + return retval; +} /* End twa_map_scsi_single_data() */ + +/* This function will poll for a response interrupt of a request */ +static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) +{ + int retval = 1, found = 0, response_request_id; + TW_Response_Queue response_queue; + TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id]; + + if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) { + response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + response_request_id = TW_RESID_OUT(response_queue.response_id); + if (request_id != response_request_id) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response"); + goto out; + } + if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { + if (full_command_packet->command.newcommand.status != 0) { + /* bad response */ + twa_fill_sense(tw_dev, request_id, 0, 0); + goto out; + } + found = 1; + } else { + if (full_command_packet->command.oldcommand.status != 0) { + /* bad response */ + twa_fill_sense(tw_dev, request_id, 0, 0); + goto out; + } + found = 1; + } + } + + if (found) + retval = 0; +out: + return retval; +} /* End twa_poll_response() */ + +/* This function will poll the status register for a flag */ +static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds) +{ + u32 status_reg_value; + unsigned long before; + int retval = 1; + + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + before = jiffies; + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + while ((status_reg_value & flag) != flag) { + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + if (time_after(jiffies, before + HZ * seconds)) + goto out; + + msleep(50); + } + retval = 0; +out: + return retval; +} /* End twa_poll_status() */ + +/* This function will poll the status register for disappearance of a flag */ +static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds) +{ + u32 status_reg_value; + unsigned long before; + int retval = 1; + + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + before = jiffies; + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + while ((status_reg_value & flag) != 0) { + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + if (time_after(jiffies, before + HZ * seconds)) + goto out; + + msleep(50); + } + retval = 0; +out: + return retval; +} /* End twa_poll_status_gone() */ + +/* This function will attempt to post a command packet to the board */ +static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal) +{ + u32 status_reg_value; + unsigned long command_que_value; + int retval = 1; + + command_que_value = tw_dev->command_packet_phys[request_id]; + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) { + + /* Only pend internal driver commands */ + if (!internal) { + retval = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + /* Couldn't post the command packet, so we do it later */ + if (tw_dev->state[request_id] != TW_S_PENDING) { + tw_dev->state[request_id] = TW_S_PENDING; + tw_dev->pending_request_count++; + if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) { + tw_dev->max_pending_request_count = tw_dev->pending_request_count; + } + tw_dev->pending_queue[tw_dev->pending_tail] = request_id; + tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH; + } + TW_UNMASK_COMMAND_INTERRUPT(tw_dev); + goto out; + } else { + /* We successfully posted the command packet */ +#if BITS_PER_LONG > 32 + writeq(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); +#else + writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); +#endif + tw_dev->state[request_id] = TW_S_POSTED; + tw_dev->posted_request_count++; + if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) { + tw_dev->max_posted_request_count = tw_dev->posted_request_count; + } + } + retval = 0; +out: + return retval; +} /* End twa_post_command_packet() */ + +/* This function will reset a device extension */ +static int twa_reset_device_extension(TW_Device_Extension *tw_dev) +{ + int i = 0; + int retval = 1; + + /* Abort all requests that are in progress */ + for (i = 0; i < TW_Q_LENGTH; i++) { + if ((tw_dev->state[i] != TW_S_FINISHED) && + (tw_dev->state[i] != TW_S_INITIAL) && + (tw_dev->state[i] != TW_S_COMPLETED)) { + if (tw_dev->srb[i]) { + tw_dev->srb[i]->result = (DID_RESET << 16); + tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); + twa_unmap_scsi_data(tw_dev, i); + } + } + } + + /* Reset queues and counts */ + for (i = 0; i < TW_Q_LENGTH; i++) { + tw_dev->free_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->posted_request_count = 0; + tw_dev->pending_request_count = 0; + tw_dev->pending_head = TW_Q_START; + tw_dev->pending_tail = TW_Q_START; + tw_dev->reset_print = 0; + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + tw_dev->flags = 0; + + TW_DISABLE_INTERRUPTS(tw_dev); + + if (twa_reset_sequence(tw_dev, 1)) + goto out; + + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); + + retval = 0; +out: + return retval; +} /* End twa_reset_device_extension() */ + +/* This function will reset a controller */ +static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) +{ + int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; + + while (tries < TW_MAX_RESET_TRIES) { + if (do_soft_reset) + TW_SOFT_RESET(tw_dev); + + /* Make sure controller is in a good state */ + if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + /* Empty response queue */ + if (twa_empty_response_queue(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + flashed = 0; + + /* Check for compatibility/flash */ + if (twa_check_srl(tw_dev, &flashed)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } else { + if (flashed) { + tries++; + continue; + } + } + + /* Drain the AEN queue */ + if (twa_aen_drain_queue(tw_dev, soft_reset)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + /* If we got here, controller is in a good state */ + retval = 0; + goto out; + } +out: + return retval; +} /* End twa_reset_sequence() */ + +/* This funciton returns unit geometry in cylinders/heads/sectors */ +static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) +{ + int heads, sectors, cylinders; + TW_Device_Extension *tw_dev; + + tw_dev = (TW_Device_Extension *)sdev->host->hostdata; + + if (capacity >= 0x200000) { + heads = 255; + sectors = 63; + cylinders = sector_div(capacity, heads * sectors); + } else { + heads = 64; + sectors = 32; + cylinders = sector_div(capacity, heads * sectors); + } + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return 0; +} /* End twa_scsi_biosparam() */ + +/* This is the new scsi eh abort function */ +static int twa_scsi_eh_abort(struct scsi_cmnd *SCpnt) +{ + int i; + TW_Device_Extension *tw_dev = NULL; + int retval = FAILED; + + tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + spin_unlock_irq(tw_dev->host->host_lock); + + tw_dev->num_aborts++; + + /* If we find any IO's in process, we have to reset the card */ + for (i = 0; i < TW_Q_LENGTH; i++) { + if ((tw_dev->state[i] != TW_S_FINISHED) && (tw_dev->state[i] != TW_S_INITIAL)) { + printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Unit #%d: Command (0x%x) timed out, resetting card.\n", + tw_dev->host->host_no, TW_DRIVER, 0x2c, + SCpnt->device->id, SCpnt->cmnd[0]); + if (twa_reset_device_extension(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2a, "Controller reset failed during scsi abort"); + goto out; + } + break; + } + } + retval = SUCCESS; +out: + spin_lock_irq(tw_dev->host->host_lock); + return retval; +} /* End twa_scsi_eh_abort() */ + +/* This is the new scsi eh reset function */ +static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt) +{ + TW_Device_Extension *tw_dev = NULL; + int retval = FAILED; + + tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + spin_unlock_irq(tw_dev->host->host_lock); + + tw_dev->num_resets++; + + printk(KERN_WARNING "3w-9xxx: scsi%d: SCSI host reset started.\n", tw_dev->host->host_no); + + /* Now reset the card and some of the device extension data */ + if (twa_reset_device_extension(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset"); + goto out; + } + printk(KERN_WARNING "3w-9xxx: scsi%d: SCSI host reset succeeded.\n", tw_dev->host->host_no); + retval = SUCCESS; +out: + spin_lock_irq(tw_dev->host->host_lock); + return retval; +} /* End twa_scsi_eh_reset() */ + +/* This is the main scsi queue function to handle scsi opcodes */ +static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +{ + int request_id, retval; + TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + /* Save done function into scsi_cmnd struct */ + SCpnt->scsi_done = done; + + /* Get a free request id */ + twa_get_request_id(tw_dev, &request_id); + + /* Save the scsi command for use by the ISR */ + tw_dev->srb[request_id] = SCpnt; + + /* Initialize phase to zero */ + SCpnt->SCp.phase = TW_PHASE_INITIAL; + + retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); + switch (retval) { + case SCSI_MLQUEUE_HOST_BUSY: + twa_free_request_id(tw_dev, request_id); + break; + case 1: + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + SCpnt->result = (DID_ERROR << 16); + done(SCpnt); + } + + return retval; +} /* End twa_scsi_queue() */ + +/* This function hands scsi cdb's to the firmware */ +static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Apache *sglistarg) +{ + TW_Command_Full *full_command_packet; + TW_Command_Apache *command_packet; + u32 num_sectors = 0x0; + int i, sg_count; + struct scsi_cmnd *srb = NULL; + struct scatterlist *sglist = NULL; + u32 buffaddr = 0x0; + int retval = 1; + + if (tw_dev->srb[request_id]) { + if (tw_dev->srb[request_id]->request_buffer) { + sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer; + } + srb = tw_dev->srb[request_id]; + } + + /* Initialize command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + full_command_packet->header.header_desc.size_header = 128; + full_command_packet->header.status_block.error = 0; + full_command_packet->header.status_block.severity__reserved = 0; + + command_packet = &full_command_packet->command.newcommand; + command_packet->status = 0; + command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); + + /* We forced 16 byte cdb use earlier */ + if (!cdb) + memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); + else + memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); + + if (srb) + command_packet->unit = srb->device->id; + else + command_packet->unit = 0; + + command_packet->request_id = request_id; + command_packet->sgl_offset = 16; + + if (!sglistarg) { + /* Map sglist from scsi layer to cmd packet */ + if (tw_dev->srb[request_id]->use_sg == 0) { + if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH) { + command_packet->sg_list[0].address = tw_dev->generic_buffer_phys[request_id]; + command_packet->sg_list[0].length = TW_MIN_SGL_LENGTH; + } else { + buffaddr = twa_map_scsi_single_data(tw_dev, request_id); + if (buffaddr == 0) + goto out; + + command_packet->sg_list[0].address = buffaddr; + command_packet->sg_list[0].length = tw_dev->srb[request_id]->request_bufflen; + } + command_packet->sgl_entries = 1; + + if (command_packet->sg_list[0].address & TW_ALIGNMENT_9000_SGL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2d, "Found unaligned address during execute scsi"); + goto out; + } + } + + if (tw_dev->srb[request_id]->use_sg > 0) { + sg_count = twa_map_scsi_sg_data(tw_dev, request_id); + if (sg_count == 0) + goto out; + + for (i = 0; i < sg_count; i++) { + command_packet->sg_list[i].address = sg_dma_address(&sglist[i]); + command_packet->sg_list[i].length = sg_dma_len(&sglist[i]); + if (command_packet->sg_list[i].address & TW_ALIGNMENT_9000_SGL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi"); + goto out; + } + } + command_packet->sgl_entries = tw_dev->srb[request_id]->use_sg; + } + } else { + /* Internal cdb post */ + for (i = 0; i < use_sg; i++) { + command_packet->sg_list[i].address = sglistarg[i].address; + command_packet->sg_list[i].length = sglistarg[i].length; + if (command_packet->sg_list[i].address & TW_ALIGNMENT_9000_SGL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post"); + goto out; + } + } + command_packet->sgl_entries = use_sg; + } + + if (srb) { + if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6) + num_sectors = (u32)srb->cmnd[4]; + + if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) + num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8); + } + + /* Update sector statistic */ + tw_dev->sector_count = num_sectors; + if (tw_dev->sector_count > tw_dev->max_sector_count) + tw_dev->max_sector_count = tw_dev->sector_count; + + /* Update SG statistics */ + if (srb) { + tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg; + if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) + tw_dev->max_sgl_entries = tw_dev->sgl_entries; + } + + /* Now post the command to the board */ + if (srb) { + retval = twa_post_command_packet(tw_dev, request_id, 0); + } else { + twa_post_command_packet(tw_dev, request_id, 1); + retval = 0; + } +out: + return retval; +} /* End twa_scsiop_execute_scsi() */ + +/* This function completes an execute scsi operation */ +static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) +{ + /* Copy the response if too small */ + if ((tw_dev->srb[request_id]->request_buffer) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) { + memcpy(tw_dev->srb[request_id]->request_buffer, + tw_dev->generic_buffer_virt[request_id], + tw_dev->srb[request_id]->request_bufflen); + } +} /* End twa_scsiop_execute_scsi_complete() */ + +/* This function tells the controller to shut down */ +static void __twa_shutdown(TW_Device_Extension *tw_dev) +{ + /* Disable interrupts */ + TW_DISABLE_INTERRUPTS(tw_dev); + + printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no); + + /* Tell the card we are shutting down */ + if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed"); + } else { + printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n"); + } + + /* Clear all interrupts just before exit */ + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); +} /* End __twa_shutdown() */ + +/* Wrapper for __twa_shutdown */ +static void twa_shutdown(struct device *dev) +{ + struct Scsi_Host *host = pci_get_drvdata(to_pci_dev(dev)); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + __twa_shutdown(tw_dev); +} /* End twa_shutdown() */ + +/* This function will look up a string */ +static char *twa_string_lookup(twa_message_type *table, unsigned int code) +{ + int index; + + for (index = 0; ((code != table[index].code) && + (table[index].text != (char *)0)); index++); + return(table[index].text); +} /* End twa_string_lookup() */ + +/* This function will perform a pci-dma unmap */ +static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) +{ + struct scsi_cmnd *cmd = tw_dev->srb[request_id]; + struct pci_dev *pdev = tw_dev->tw_pci_dev; + + switch(cmd->SCp.phase) { + case TW_PHASE_SINGLE: + pci_unmap_single(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL); + break; + case TW_PHASE_SGLIST: + pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL); + break; + } +} /* End twa_unmap_scsi_data() */ + +/* scsi_host_template initializer */ +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "3ware 9000 Storage Controller", + .queuecommand = twa_scsi_queue, + .eh_abort_handler = twa_scsi_eh_abort, + .eh_host_reset_handler = twa_scsi_eh_reset, + .bios_param = twa_scsi_biosparam, + .can_queue = TW_Q_LENGTH-2, + .this_id = -1, + .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, + .max_sectors = TW_MAX_SECTORS, + .cmd_per_lun = TW_MAX_CMDS_PER_LUN, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = twa_host_attrs, + .sdev_attrs = twa_dev_attrs, + .emulated = 1 +}; + +/* This function will probe and initialize a card */ +static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) +{ + struct Scsi_Host *host = NULL; + TW_Device_Extension *tw_dev; + u32 mem_addr; + int retval = -ENODEV; + + retval = pci_enable_device(pdev); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device"); + goto out_disable_device; + } + + pci_set_master(pdev); + + retval = pci_set_dma_mask(pdev, TW_DMA_MASK); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); + goto out_disable_device; + } + + host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); + if (!host) { + TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension"); + retval = -ENOMEM; + goto out_disable_device; + } + tw_dev = (TW_Device_Extension *)host->hostdata; + + memset(tw_dev, 0, sizeof(TW_Device_Extension)); + + /* Save values to device extension */ + tw_dev->host = host; + tw_dev->tw_pci_dev = pdev; + + if (twa_initialize_device_extension(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); + goto out_free_device_extension; + } + + /* Request IO regions */ + retval = pci_request_regions(pdev, "3w-9xxx"); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region"); + goto out_free_device_extension; + } + + mem_addr = pci_resource_start(pdev, 1); + + /* Save base address */ + tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE); + if (!tw_dev->base_addr) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); + goto out_release_mem_region; + } + + /* Disable interrupts on the card */ + TW_DISABLE_INTERRUPTS(tw_dev); + + /* Initialize the card */ + if (twa_reset_sequence(tw_dev, 0)) + goto out_release_mem_region; + + /* Set host specific parameters */ + host->max_id = TW_MAX_UNITS; + host->max_cmd_len = TW_MAX_CDB_LEN; + + /* Luns and channels aren't supported by adapter */ + host->max_lun = 0; + host->max_channel = 0; + + /* Register the card with the kernel SCSI layer */ + retval = scsi_add_host(host, &pdev->dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed"); + goto out_release_mem_region; + } + + pci_set_drvdata(pdev, host); + + printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%x, IRQ: %d.\n", + host->host_no, mem_addr, pdev->irq); + printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", + host->host_no, + (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE, + TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), + (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE, + TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), + *(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, + TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)); + + /* Now setup the interrupt handler */ + retval = request_irq(pdev->irq, twa_interrupt, SA_SHIRQ, "3w-9xxx", tw_dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ"); + goto out_remove_host; + } + + twa_device_extension_list[twa_device_extension_count] = tw_dev; + twa_device_extension_count++; + + /* Re-enable interrupts on the card */ + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); + + /* Finally, scan the host */ + scsi_scan_host(host); + + if (twa_major == -1) { + if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0) + TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device"); + } + return 0; + +out_remove_host: + scsi_remove_host(host); +out_release_mem_region: + pci_release_regions(pdev); +out_free_device_extension: + twa_free_device_extension(tw_dev); + scsi_host_put(host); +out_disable_device: + pci_disable_device(pdev); + + return retval; +} /* End twa_probe() */ + +/* This function is called to remove a device */ +static void twa_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + scsi_remove_host(tw_dev->host); + + __twa_shutdown(tw_dev); + + /* Free up the IRQ */ + free_irq(tw_dev->tw_pci_dev->irq, tw_dev); + + /* Free up the mem region */ + pci_release_regions(pdev); + + /* Free up device extension resources */ + twa_free_device_extension(tw_dev); + + /* Unregister character device */ + if (twa_major >= 0) { + unregister_chrdev(twa_major, "twa"); + twa_major = -1; + } + + scsi_host_put(tw_dev->host); + pci_disable_device(pdev); + twa_device_extension_count--; +} /* End twa_remove() */ + +/* PCI Devices supported by this driver */ +static struct pci_device_id twa_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { } +}; +MODULE_DEVICE_TABLE(pci, twa_pci_tbl); + +/* pci_driver initializer */ +static struct pci_driver twa_driver = { + .name = "3w-9xxx", + .id_table = twa_pci_tbl, + .probe = twa_probe, + .remove = twa_remove, + .driver = { + .shutdown = twa_shutdown + } +}; + +/* This function is called on driver initialization */ +static int __init twa_init(void) +{ + printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", twa_driver_version); + + return pci_module_init(&twa_driver); +} /* End twa_init() */ + +/* This function is called on driver exit */ +static void __exit twa_exit(void) +{ + pci_unregister_driver(&twa_driver); +} /* End twa_exit() */ + +module_init(twa_init); +module_exit(twa_exit); + diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h new file mode 100644 index 000000000..3c91ce6ed --- /dev/null +++ b/drivers/scsi/3w-9xxx.h @@ -0,0 +1,704 @@ +/* + 3w-9xxx.h -- 3ware 9000 Storage Controller device driver for Linux. + + Written By: Adam Radford + + Copyright (C) 2004 Applied Micro Circuits Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Bugs/Comments/Suggestions should be mailed to: + linuxraid@amcc.com + + For more information, goto: + http://www.amcc.com +*/ + +#ifndef _3W_9XXX_H +#define _3W_9XXX_H + +/* AEN string type */ +typedef struct TAG_twa_message_type { + unsigned int code; + char* text; +} twa_message_type; + +/* AEN strings */ +static twa_message_type twa_aen_table[] = { + {0x0000, "AEN queue empty"}, + {0x0001, "Controller reset occurred"}, + {0x0002, "Degraded unit detected"}, + {0x0003, "Controller error occured"}, + {0x0004, "Background rebuild failed"}, + {0x0005, "Background rebuild done"}, + {0x0006, "Incomplete unit detected"}, + {0x0007, "Background initialize done"}, + {0x0008, "Unclean shutdown detected"}, + {0x0009, "Drive timeout detected"}, + {0x000A, "Drive error detected"}, + {0x000B, "Rebuild started"}, + {0x000C, "Background initialize started"}, + {0x000D, "Entire logical unit was deleted"}, + {0x000E, "Background initialize failed"}, + {0x000F, "SMART attribute exceeded threshold"}, + {0x0010, "Power supply reported AC under range"}, + {0x0011, "Power supply reported DC out of range"}, + {0x0012, "Power supply reported a malfunction"}, + {0x0013, "Power supply predicted malfunction"}, + {0x0014, "Battery charge is below threshold"}, + {0x0015, "Fan speed is below threshold"}, + {0x0016, "Temperature sensor is above threshold"}, + {0x0017, "Power supply was removed"}, + {0x0018, "Power supply was inserted"}, + {0x0019, "Drive was removed from a bay"}, + {0x001A, "Drive was inserted into a bay"}, + {0x001B, "Drive bay cover door was opened"}, + {0x001C, "Drive bay cover door was closed"}, + {0x001D, "Product case was opened"}, + {0x0020, "Prepare for shutdown (power-off)"}, + {0x0021, "Downgrade UDMA mode to lower speed"}, + {0x0022, "Upgrade UDMA mode to higher speed"}, + {0x0023, "Sector repair completed"}, + {0x0024, "Sbuf memory test failed"}, + {0x0025, "Error flushing cached write data to array"}, + {0x0026, "Drive reported data ECC error"}, + {0x0027, "DCB has checksum error"}, + {0x0028, "DCB version is unsupported"}, + {0x0029, "Background verify started"}, + {0x002A, "Background verify failed"}, + {0x002B, "Background verify done"}, + {0x002C, "Bad sector overwritten during rebuild"}, + {0x002D, "Background rebuild error on source drive"}, + {0x002E, "Replace failed because replacement drive too small"}, + {0x002F, "Verify failed because array was never initialized"}, + {0x0030, "Unsupported ATA drive"}, + {0x0031, "Synchronize host/controller time"}, + {0x0032, "Spare capacity is inadequate for some units"}, + {0x0033, "Background migration started"}, + {0x0034, "Background migration failed"}, + {0x0035, "Background migration done"}, + {0x0036, "Verify detected and fixed data/parity mismatch"}, + {0x0037, "SO-DIMM incompatible"}, + {0x0038, "SO-DIMM not detected"}, + {0x0039, "Corrected Sbuf ECC error"}, + {0x003A, "Drive power on reset detected"}, + {0x003B, "Background rebuild paused"}, + {0x003C, "Background initialize paused"}, + {0x003D, "Background verify paused"}, + {0x003E, "Background migration paused"}, + {0x003F, "Corrupt flash file system detected"}, + {0x0040, "Flash file system repaired"}, + {0x0041, "Unit number assignments were lost"}, + {0x0042, "Error during read of primary DCB"}, + {0x0043, "Latent error found in backup DCB"}, + {0x00FC, "Recovered/finished array membership update"}, + {0x00FD, "Handler lockup"}, + {0x00FE, "Retrying PCI transfer"}, + {0x00FF, "AEN queue is full"}, + {0xFFFFFFFF, (char*) 0} +}; + +/* AEN severity table */ +static char *twa_aen_severity_table[] = +{ + "None", "ERROR", "WARNING", "INFO", "DEBUG", (char*) 0 +}; + +/* Error strings */ +static twa_message_type twa_error_table[] = { + {0x0100, "SGL entry contains zero data"}, + {0x0101, "Invalid command opcode"}, + {0x0102, "SGL entry has unaligned address"}, + {0x0103, "SGL size does not match command"}, + {0x0104, "SGL entry has illegal length"}, + {0x0105, "Command packet is not aligned"}, + {0x0106, "Invalid request ID"}, + {0x0107, "Duplicate request ID"}, + {0x0108, "ID not locked"}, + {0x0109, "LBA out of range"}, + {0x010A, "Logical unit not supported"}, + {0x010B, "Parameter table does not exist"}, + {0x010C, "Parameter index does not exist"}, + {0x010D, "Invalid field in CDB"}, + {0x010E, "Specified port has invalid drive"}, + {0x010F, "Parameter item size mismatch"}, + {0x0110, "Failed memory allocation"}, + {0x0111, "Memory request too large"}, + {0x0112, "Out of memory segments"}, + {0x0113, "Invalid address to deallocate"}, + {0x0114, "Out of memory"}, + {0x0115, "Out of heap"}, + {0x0120, "Double degrade"}, + {0x0121, "Drive not degraded"}, + {0x0122, "Reconstruct error"}, + {0x0123, "Replace not accepted"}, + {0x0124, "Replace drive capacity too small"}, + {0x0125, "Sector count not allowed"}, + {0x0126, "No spares left"}, + {0x0127, "Reconstruct error"}, + {0x0128, "Unit is offline"}, + {0x0129, "Cannot update status to DCB"}, + {0x0130, "Invalid stripe handle"}, + {0x0131, "Handle that was not locked"}, + {0x0132, "Handle that was not empty"}, + {0x0133, "Handle has different owner"}, + {0x0140, "IPR has parent"}, + {0x0150, "Illegal Pbuf address alignment"}, + {0x0151, "Illegal Pbuf transfer length"}, + {0x0152, "Illegal Sbuf address alignment"}, + {0x0153, "Illegal Sbuf transfer length"}, + {0x0160, "Command packet too large"}, + {0x0161, "SGL exceeds maximum length"}, + {0x0162, "SGL has too many entries"}, + {0x0170, "Insufficient resources for rebuilder"}, + {0x0171, "Verify error (data != parity)"}, + {0x0180, "Requested segment not in directory of this DCB"}, + {0x0181, "DCB segment has unsupported version"}, + {0x0182, "DCB segment has checksum error"}, + {0x0183, "DCB support (settings) segment invalid"}, + {0x0184, "DCB UDB (unit descriptor block) segment invalid"}, + {0x0185, "DCB GUID (globally unique identifier) segment invalid"}, + {0x01A0, "Could not clear Sbuf"}, + {0x01C0, "Flash identify failed"}, + {0x01C1, "Flash out of bounds"}, + {0x01C2, "Flash verify error"}, + {0x01C3, "Flash file object not found"}, + {0x01C4, "Flash file already present"}, + {0x01C5, "Flash file system full"}, + {0x01C6, "Flash file not present"}, + {0x01C7, "Flash file size error"}, + {0x01C8, "Bad flash file checksum"}, + {0x01CA, "Corrupt flash file system detected"}, + {0x01D0, "Invalid field in parameter list"}, + {0x01D1, "Parameter list length error"}, + {0x01D2, "Parameter item is not changeable"}, + {0x01D3, "Parameter item is not saveable"}, + {0x0200, "UDMA CRC error"}, + {0x0201, "Internal CRC error"}, + {0x0202, "Data ECC error"}, + {0x0203, "ADP level 1 error"}, + {0x0204, "Port timeout"}, + {0x0205, "Drive power on reset"}, + {0x0206, "ADP level 2 error"}, + {0x0207, "Soft reset failed"}, + {0x0208, "Drive not ready"}, + {0x0209, "Unclassified port error"}, + {0x020A, "Drive aborted command"}, + {0x0210, "Internal CRC error"}, + {0x0211, "PCI abort error"}, + {0x0212, "PCI parity error"}, + {0x0213, "Port handler error"}, + {0x0214, "Token interrupt count error"}, + {0x0215, "Timeout waiting for PCI transfer"}, + {0x0216, "Corrected buffer ECC"}, + {0x0217, "Uncorrected buffer ECC"}, + {0x0230, "Unsupported command during flash recovery"}, + {0x0231, "Next image buffer expected"}, + {0x0232, "Binary image architecture incompatible"}, + {0x0233, "Binary image has no signature"}, + {0x0234, "Binary image has bad checksum"}, + {0x0235, "Image downloaded overflowed buffer"}, + {0x0240, "I2C device not found"}, + {0x0241, "I2C transaction aborted"}, + {0x0242, "SO-DIMM parameter(s) incompatible using defaults"}, + {0x0243, "SO-DIMM unsupported"}, + {0x0248, "SPI transfer status error"}, + {0x0249, "SPI transfer timeout error"}, + {0x0250, "Invalid unit descriptor size in CreateUnit"}, + {0x0251, "Unit descriptor size exceeds data buffer in CreateUnit"}, + {0x0252, "Invalid value in CreateUnit descriptor"}, + {0x0253, "Inadequate disk space to support descriptor in CreateUnit"}, + {0x0254, "Unable to create data channel for this unit descriptor"}, + {0x0255, "CreateUnit descriptor specifies a drive already in use"}, + {0x0256, "Unable to write configuration to all disks during CreateUnit"}, + {0x0257, "CreateUnit does not support this descriptor version"}, + {0x0258, "Invalid subunit for RAID 0 or 5 in CreateUnit"}, + {0x0259, "Too many descriptors in CreateUnit"}, + {0x025A, "Invalid configuration specified in CreateUnit descriptor"}, + {0x025B, "Invalid LBA offset specified in CreateUnit descriptor"}, + {0x025C, "Invalid stripelet size specified in CreateUnit descriptor"}, + {0x0260, "SMART attribute exceeded threshold"}, + {0xFFFFFFFF, (char*) 0} +}; + +/* Control register bit definitions */ +#define TW_CONTROL_CLEAR_HOST_INTERRUPT 0x00080000 +#define TW_CONTROL_CLEAR_ATTENTION_INTERRUPT 0x00040000 +#define TW_CONTROL_MASK_COMMAND_INTERRUPT 0x00020000 +#define TW_CONTROL_MASK_RESPONSE_INTERRUPT 0x00010000 +#define TW_CONTROL_UNMASK_COMMAND_INTERRUPT 0x00008000 +#define TW_CONTROL_UNMASK_RESPONSE_INTERRUPT 0x00004000 +#define TW_CONTROL_CLEAR_ERROR_STATUS 0x00000200 +#define TW_CONTROL_ISSUE_SOFT_RESET 0x00000100 +#define TW_CONTROL_ENABLE_INTERRUPTS 0x00000080 +#define TW_CONTROL_DISABLE_INTERRUPTS 0x00000040 +#define TW_CONTROL_ISSUE_HOST_INTERRUPT 0x00000020 +#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000 +#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000 +#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000 +#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008 + +/* Status register bit definitions */ +#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000 +#define TW_STATUS_MINOR_VERSION_MASK 0x0F000000 +#define TW_STATUS_PCI_PARITY_ERROR 0x00800000 +#define TW_STATUS_QUEUE_ERROR 0x00400000 +#define TW_STATUS_MICROCONTROLLER_ERROR 0x00200000 +#define TW_STATUS_PCI_ABORT 0x00100000 +#define TW_STATUS_HOST_INTERRUPT 0x00080000 +#define TW_STATUS_ATTENTION_INTERRUPT 0x00040000 +#define TW_STATUS_COMMAND_INTERRUPT 0x00020000 +#define TW_STATUS_RESPONSE_INTERRUPT 0x00010000 +#define TW_STATUS_COMMAND_QUEUE_FULL 0x00008000 +#define TW_STATUS_RESPONSE_QUEUE_EMPTY 0x00004000 +#define TW_STATUS_MICROCONTROLLER_READY 0x00002000 +#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000 +#define TW_STATUS_EXPECTED_BITS 0x00002000 +#define TW_STATUS_UNEXPECTED_BITS 0x00F00008 +#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008 +#define TW_STATUS_VALID_INTERRUPT 0x00DF0008 + +/* RESPONSE QUEUE BIT DEFINITIONS */ +#define TW_RESPONSE_ID_MASK 0x00000FF0 + +/* PCI related defines */ +#define TW_DEVICE_NAME "3w-9xxx" +#define TW_NUMDEVICES 1 +#define TW_PCI_CLEAR_PARITY_ERRORS 0xc100 +#define TW_PCI_CLEAR_PCI_ABORT 0x2000 + +/* Command packet opcodes used by the driver */ +#define TW_OP_INIT_CONNECTION 0x1 +#define TW_OP_GET_PARAM 0x12 +#define TW_OP_SET_PARAM 0x13 +#define TW_OP_EXECUTE_SCSI 0x10 +#define TW_OP_DOWNLOAD_FIRMWARE 0x16 +#define TW_OP_RESET 0x1C + +/* Asynchronous Event Notification (AEN) codes used by the driver */ +#define TW_AEN_QUEUE_EMPTY 0x0000 +#define TW_AEN_SOFT_RESET 0x0001 +#define TW_AEN_SYNC_TIME_WITH_HOST 0x031 +#define TW_AEN_SEVERITY_ERROR 0x1 +#define TW_AEN_SEVERITY_DEBUG 0x4 +#define TW_AEN_NOT_RETRIEVED 0x1 +#define TW_AEN_RETRIEVED 0x2 + +/* Command state defines */ +#define TW_S_INITIAL 0x1 /* Initial state */ +#define TW_S_STARTED 0x2 /* Id in use */ +#define TW_S_POSTED 0x4 /* Posted to the controller */ +#define TW_S_PENDING 0x8 /* Waiting to be posted in isr */ +#define TW_S_COMPLETED 0x10 /* Completed by isr */ +#define TW_S_FINISHED 0x20 /* I/O completely done */ + +/* Compatibility defines */ +#define TW_9000_ARCH_ID 0x5 +#define TW_CURRENT_FW_SRL 24 +#define TW_CURRENT_FW_BUILD 5 +#define TW_CURRENT_FW_BRANCH 1 + +/* Phase defines */ +#define TW_PHASE_INITIAL 0 +#define TW_PHASE_SINGLE 1 +#define TW_PHASE_SGLIST 2 + +/* Misc defines */ +#define TW_SECTOR_SIZE 512 +#define TW_ALIGNMENT_9000 4 /* 4 bytes */ +#define TW_ALIGNMENT_9000_SGL 0x3 +#define TW_MAX_UNITS 16 +#define TW_INIT_MESSAGE_CREDITS 0x100 +#define TW_INIT_COMMAND_PACKET_SIZE 0x3 +#define TW_INIT_COMMAND_PACKET_SIZE_EXTENDED 0x6 +#define TW_EXTENDED_INIT_CONNECT 0x2 +#define TW_BUNDLED_FW_SAFE_TO_FLASH 0x4 +#define TW_CTLR_FW_RECOMMENDS_FLASH 0x8 +#define TW_CTLR_FW_COMPATIBLE 0x2 +#define TW_BASE_FW_SRL 0x17 +#define TW_BASE_FW_BRANCH 0 +#define TW_BASE_FW_BUILD 1 +#if BITS_PER_LONG > 32 +#define TW_APACHE_MAX_SGL_LENGTH 72 +#define TW_ESCALADE_MAX_SGL_LENGTH 41 +#define TW_APACHE_CMD_PKT_SIZE 5 +#else +#define TW_APACHE_MAX_SGL_LENGTH 109 +#define TW_ESCALADE_MAX_SGL_LENGTH 62 +#define TW_APACHE_CMD_PKT_SIZE 4 +#endif +#define TW_ATA_PASS_SGL_MAX 60 +#define TW_Q_LENGTH 256 +#define TW_Q_START 0 +#define TW_MAX_SLOT 32 +#define TW_MAX_RESET_TRIES 2 +#define TW_MAX_CMDS_PER_LUN 254 +#define TW_MAX_RESPONSE_DRAIN 256 +#define TW_MAX_AEN_DRAIN 40 +#define TW_IN_IOCTL 2 +#define TW_IN_CHRDEV_IOCTL 3 +#define TW_IN_ATTENTION_LOOP 4 +#define TW_MAX_SECTORS 256 +#define TW_AEN_WAIT_TIME 1000 +#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */ +#define TW_MAX_CDB_LEN 16 +#define TW_ISR_DONT_COMPLETE 2 +#define TW_ISR_DONT_RESULT 3 +#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */ +#define TW_IOCTL_CHRDEV_FREE -1 +#define TW_COMMAND_OFFSET 128 /* 128 bytes */ +#define TW_VERSION_TABLE 0x0402 +#define TW_TIMEKEEP_TABLE 0x040A +#define TW_INFORMATION_TABLE 0x0403 +#define TW_PARAM_FWVER 3 +#define TW_PARAM_FWVER_LENGTH 16 +#define TW_PARAM_BIOSVER 4 +#define TW_PARAM_BIOSVER_LENGTH 16 +#define TW_PARAM_PORTCOUNT 3 +#define TW_PARAM_PORTCOUNT_LENGTH 1 +#define TW_MIN_SGL_LENGTH 0x200 /* 512 bytes */ +#define TW_MAX_SENSE_LENGTH 256 +#define TW_EVENT_SOURCE_AEN 0x1000 +#define TW_EVENT_SOURCE_COMMAND 0x1001 +#define TW_EVENT_SOURCE_PCHIP 0x1002 +#define TW_EVENT_SOURCE_DRIVER 0x1003 +#define TW_IOCTL_GET_COMPATIBILITY_INFO 0x101 +#define TW_IOCTL_GET_LAST_EVENT 0x102 +#define TW_IOCTL_GET_FIRST_EVENT 0x103 +#define TW_IOCTL_GET_NEXT_EVENT 0x104 +#define TW_IOCTL_GET_PREVIOUS_EVENT 0x105 +#define TW_IOCTL_GET_LOCK 0x106 +#define TW_IOCTL_RELEASE_LOCK 0x107 +#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 +#define TW_IOCTL_ERROR_STATUS_NOT_LOCKED 0x1001 // Not locked +#define TW_IOCTL_ERROR_STATUS_LOCKED 0x1002 // Already locked +#define TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS 0x1003 // No more events +#define TW_IOCTL_ERROR_STATUS_AEN_CLOBBER 0x1004 // AEN clobber occurred +#define TW_IOCTL_ERROR_OS_EFAULT -EFAULT // Bad address +#define TW_IOCTL_ERROR_OS_EINTR -EINTR // Interrupted system call +#define TW_IOCTL_ERROR_OS_EINVAL -EINVAL // Invalid argument +#define TW_IOCTL_ERROR_OS_ENOMEM -ENOMEM // Out of memory +#define TW_IOCTL_ERROR_OS_ERESTARTSYS -ERESTARTSYS // Restart system call +#define TW_IOCTL_ERROR_OS_EIO -EIO // I/O error +#define TW_IOCTL_ERROR_OS_ENOTTY -ENOTTY // Not a typewriter +#define TW_IOCTL_ERROR_OS_ENODEV -ENODEV // No such device +#define TW_ALLOCATION_LENGTH 128 +#define TW_SENSE_DATA_LENGTH 18 +#define TW_STATUS_CHECK_CONDITION 2 +#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a +#define TW_ERROR_UNIT_OFFLINE 0x128 +#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3 +#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4 +#define TW_MESSAGE_SOURCE_LINUX_DRIVER 6 +#define TW_DRIVER TW_MESSAGE_SOURCE_LINUX_DRIVER +#define TW_MESSAGE_SOURCE_LINUX_OS 9 +#define TW_OS TW_MESSAGE_SOURCE_LINUX_OS +#if BITS_PER_LONG > 32 +#define TW_COMMAND_SIZE 5 +#define TW_DMA_MASK DMA_64BIT_MASK +#else +#define TW_COMMAND_SIZE 4 +#define TW_DMA_MASK DMA_32BIT_MASK +#endif +#ifndef PCI_DEVICE_ID_3WARE_9000 +#define PCI_DEVICE_ID_3WARE_9000 0x1002 +#endif + +/* Bitmask macros to eliminate bitfields */ + +/* opcode: 5, reserved: 3 */ +#define TW_OPRES_IN(x,y) ((x << 5) | (y & 0x1f)) +#define TW_OP_OUT(x) (x & 0x1f) + +/* opcode: 5, sgloffset: 3 */ +#define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f)) +#define TW_SGL_OUT(x) ((x >> 5) & 0x7) + +/* severity: 3, reserved: 5 */ +#define TW_SEV_OUT(x) (x & 0x7) + +/* reserved_1: 4, response_id: 8, reserved_2: 20 */ +#define TW_RESID_OUT(x) ((x >> 4) & 0xff) + +/* Macros */ +#define TW_CONTROL_REG_ADDR(x) (x->base_addr) +#define TW_STATUS_REG_ADDR(x) ((unsigned char *)x->base_addr + 0x4) +#if BITS_PER_LONG > 32 +#define TW_COMMAND_QUEUE_REG_ADDR(x) ((unsigned char *)x->base_addr + 0x20) +#else +#define TW_COMMAND_QUEUE_REG_ADDR(x) ((unsigned char *)x->base_addr + 0x8) +#endif +#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char *)x->base_addr + 0xC) +#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_DISABLE_INTERRUPTS(x) (writel(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) +#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) +#define TW_MASK_COMMAND_INTERRUPT(x) (writel(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_UNMASK_COMMAND_INTERRUPT(x) (writel(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x))) +#define TW_SOFT_RESET(x) (writel(TW_CONTROL_ISSUE_SOFT_RESET | \ + TW_CONTROL_CLEAR_HOST_INTERRUPT | \ + TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \ + TW_CONTROL_MASK_COMMAND_INTERRUPT | \ + TW_CONTROL_MASK_RESPONSE_INTERRUPT | \ + TW_CONTROL_CLEAR_ERROR_STATUS | \ + TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) +#define TW_PRINTK(h,a,b,c) { \ +if (h) \ +printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s.\n",h->host_no,a,b,c); \ +else \ +printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \ +} + +#pragma pack(1) + +/* Scatter Gather List Entry */ +typedef struct TAG_TW_SG_Entry { + unsigned long address; + u32 length; +} TW_SG_Entry; + +/* Command Packet */ +typedef struct TW_Command { + unsigned char opcode__sgloffset; + unsigned char size; + unsigned char request_id; + unsigned char unit__hostid; + /* Second DWORD */ + unsigned char status; + unsigned char flags; + union { + unsigned short block_count; + unsigned short parameter_count; + } byte6_offset; + union { + struct { + u32 lba; + TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH]; +#if BITS_PER_LONG > 32 + u32 padding[2]; /* pad to 512 bytes */ +#else + u32 padding; +#endif + } io; + struct { + TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH]; +#if BITS_PER_LONG > 32 + u32 padding[3]; +#else + u32 padding[2]; +#endif + } param; + } byte8_offset; +} TW_Command; + +/* Scatter gather element for 9000+ controllers */ +typedef struct TAG_TW_SG_Apache { + unsigned long address; + u32 length; +} TW_SG_Apache; + +/* Command Packet for 9000+ controllers */ +typedef struct TAG_TW_Command_Apache { + unsigned char opcode__reserved; + unsigned char unit; + unsigned short request_id; + unsigned char status; + unsigned char sgl_offset; + unsigned short sgl_entries; + unsigned char cdb[16]; + TW_SG_Apache sg_list[TW_APACHE_MAX_SGL_LENGTH]; +#if BITS_PER_LONG > 32 + unsigned char padding[8]; +#endif +} TW_Command_Apache; + +/* New command packet header */ +typedef struct TAG_TW_Command_Apache_Header { + unsigned char sense_data[TW_SENSE_DATA_LENGTH]; + struct { + char reserved[4]; + unsigned short error; + unsigned char padding; + unsigned char severity__reserved; + } status_block; + unsigned char err_specific_desc[98]; + struct { + unsigned char size_header; + unsigned short reserved; + unsigned char size_sense; + } header_desc; +} TW_Command_Apache_Header; + +/* This struct is a union of the 2 command packets */ +typedef struct TAG_TW_Command_Full { + TW_Command_Apache_Header header; + union { + TW_Command oldcommand; + TW_Command_Apache newcommand; + } command; +} TW_Command_Full; + +/* Initconnection structure */ +typedef struct TAG_TW_Initconnect { + unsigned char opcode__reserved; + unsigned char size; + unsigned char request_id; + unsigned char res2; + unsigned char status; + unsigned char flags; + unsigned short message_credits; + u32 features; + unsigned short fw_srl; + unsigned short fw_arch_id; + unsigned short fw_branch; + unsigned short fw_build; + u32 result; +} TW_Initconnect; + +/* Event info structure */ +typedef struct TAG_TW_Event +{ + unsigned int sequence_id; + unsigned int time_stamp_sec; + unsigned short aen_code; + unsigned char severity; + unsigned char retrieved; + unsigned char repeat_count; + unsigned char parameter_len; + unsigned char parameter_data[98]; +} TW_Event; + +typedef struct TAG_TW_Ioctl_Driver_Command { + unsigned int control_code; + unsigned int status; + unsigned int unique_id; + unsigned int sequence_id; + unsigned int os_specific; + unsigned int buffer_length; +} TW_Ioctl_Driver_Command; + +typedef struct TAG_TW_Ioctl_Apache { + TW_Ioctl_Driver_Command driver_command; + char padding[488]; + TW_Command_Full firmware_command; + char data_buffer[1]; +} TW_Ioctl_Buf_Apache; + +/* Lock structure for ioctl get/release lock */ +typedef struct TAG_TW_Lock { + unsigned long timeout_msec; + unsigned long time_remaining_msec; + unsigned long force_flag; +} TW_Lock; + +/* GetParam descriptor */ +typedef struct { + unsigned short table_id; + unsigned short parameter_id; + unsigned short parameter_size_bytes; + unsigned short actual_parameter_size_bytes; + unsigned char data[1]; +} TW_Param_Apache, *PTW_Param_Apache; + +/* Response queue */ +typedef union TAG_TW_Response_Queue { + u32 response_id; + u32 value; +} TW_Response_Queue; + +typedef struct TAG_TW_Info { + char *buffer; + int length; + int offset; + int position; +} TW_Info; + +/* Compatibility information structure */ +typedef struct TAG_TW_Compatibility_Info +{ + char driver_version[32]; + unsigned short working_srl; + unsigned short working_branch; + unsigned short working_build; +} TW_Compatibility_Info; + +typedef struct TAG_TW_Device_Extension { + u32 *base_addr; + unsigned long *generic_buffer_virt[TW_Q_LENGTH]; + unsigned long generic_buffer_phys[TW_Q_LENGTH]; + TW_Command_Full *command_packet_virt[TW_Q_LENGTH]; + unsigned long command_packet_phys[TW_Q_LENGTH]; + struct pci_dev *tw_pci_dev; + struct scsi_cmnd *srb[TW_Q_LENGTH]; + unsigned char free_queue[TW_Q_LENGTH]; + unsigned char free_head; + unsigned char free_tail; + unsigned char pending_queue[TW_Q_LENGTH]; + unsigned char pending_head; + unsigned char pending_tail; + int state[TW_Q_LENGTH]; + unsigned int posted_request_count; + unsigned int max_posted_request_count; + unsigned int pending_request_count; + unsigned int max_pending_request_count; + unsigned int max_sgl_entries; + unsigned int sgl_entries; + unsigned int num_aborts; + unsigned int num_resets; + unsigned int sector_count; + unsigned int max_sector_count; + unsigned int aen_count; + struct Scsi_Host *host; + long flags; + int reset_print; + TW_Event *event_queue[TW_Q_LENGTH]; + unsigned char error_index; + unsigned char event_queue_wrapped; + unsigned int error_sequence_id; + int ioctl_sem_lock; + u32 ioctl_msec; + int chrdev_request_id; + wait_queue_head_t ioctl_wqueue; + struct semaphore ioctl_sem; + char aen_clobber; + unsigned short working_srl; + unsigned short working_branch; + unsigned short working_build; +} TW_Device_Extension; + +#pragma pack() + +#endif /* _3W_9XXX_H */ + diff --git a/drivers/scsi/fdomain.h b/drivers/scsi/fdomain.h new file mode 100644 index 000000000..47021d9d4 --- /dev/null +++ b/drivers/scsi/fdomain.h @@ -0,0 +1,24 @@ +/* + * fdomain.c -- Future Domain TMC-16x0 SCSI driver + * Author: Rickard E. Faith, faith@cs.unc.edu + * Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +extern struct scsi_host_template fdomain_driver_template; +extern int fdomain_setup(char *str); +extern struct Scsi_Host *__fdomain_16x0_detect(struct scsi_host_template *tpnt ); +extern int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c new file mode 100644 index 000000000..e9c5098ed --- /dev/null +++ b/drivers/scsi/ipr.c @@ -0,0 +1,6021 @@ +/* + * ipr.c -- driver for IBM Power Linux RAID adapters + * + * Written By: Brian King, IBM Corporation + * + * Copyright (C) 2003, 2004 IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +/* + * Notes: + * + * This driver is used to control the following SCSI adapters: + * + * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B + * + * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter + * PCI-X Dual Channel Ultra 320 SCSI Adapter + * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card + * Embedded SCSI adapter on p615 and p655 systems + * + * Supported Hardware Features: + * - Ultra 320 SCSI controller + * - PCI-X host interface + * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine + * - Non-Volatile Write Cache + * - Supports attachment of non-RAID disks, tape, and optical devices + * - RAID Levels 0, 5, 10 + * - Hot spare + * - Background Parity Checking + * - Background Data Scrubbing + * - Ability to increase the capacity of an existing RAID 5 disk array + * by adding disks + * + * Driver Features: + * - Tagged command queuing + * - Adapter microcode download + * - PCI hot plug + * - SCSI device hot plug + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipr.h" + +/* + * Global Data + */ +static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head); +static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL; +static unsigned int ipr_max_speed = 1; +static int ipr_testmode = 0; +static spinlock_t ipr_driver_lock = SPIN_LOCK_UNLOCKED; + +/* This table describes the differences between DMA controller chips */ +static const struct ipr_chip_cfg_t ipr_chip_cfg[] = { + { /* Gemstone */ + .mailbox = 0x0042C, + .cache_line_size = 0x20, + { + .set_interrupt_mask_reg = 0x0022C, + .clr_interrupt_mask_reg = 0x00230, + .sense_interrupt_mask_reg = 0x0022C, + .clr_interrupt_reg = 0x00228, + .sense_interrupt_reg = 0x00224, + .ioarrin_reg = 0x00404, + .sense_uproc_interrupt_reg = 0x00214, + .set_uproc_interrupt_reg = 0x00214, + .clr_uproc_interrupt_reg = 0x00218 + } + }, + { /* Snipe */ + .mailbox = 0x0052C, + .cache_line_size = 0x20, + { + .set_interrupt_mask_reg = 0x00288, + .clr_interrupt_mask_reg = 0x0028C, + .sense_interrupt_mask_reg = 0x00288, + .clr_interrupt_reg = 0x00284, + .sense_interrupt_reg = 0x00280, + .ioarrin_reg = 0x00504, + .sense_uproc_interrupt_reg = 0x00290, + .set_uproc_interrupt_reg = 0x00290, + .clr_uproc_interrupt_reg = 0x00294 + } + }, +}; + +static int ipr_max_bus_speeds [] = { + IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE +}; + +MODULE_AUTHOR("Brian King "); +MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver"); +module_param_named(max_speed, ipr_max_speed, uint, 0); +MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320"); +module_param_named(log_level, ipr_log_level, uint, 0); +MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver"); +module_param_named(testmode, ipr_testmode, int, 0); +MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(IPR_DRIVER_VERSION); + +static const char *ipr_gpdd_dev_end_states[] = { + "Command complete", + "Terminated by host", + "Terminated by device reset", + "Terminated by bus reset", + "Unknown", + "Command not started" +}; + +static const char *ipr_gpdd_dev_bus_phases[] = { + "Bus free", + "Arbitration", + "Selection", + "Message out", + "Command", + "Message in", + "Data out", + "Data in", + "Status", + "Reselection", + "Unknown" +}; + +/* A constant array of IOASCs/URCs/Error Messages */ +static const +struct ipr_error_table_t ipr_error_table[] = { + {0x00000000, 1, 1, + "8155: An unknown error was received"}, + {0x00330000, 0, 0, + "Soft underlength error"}, + {0x005A0000, 0, 0, + "Command to be cancelled not found"}, + {0x00808000, 0, 0, + "Qualified success"}, + {0x01080000, 1, 1, + "FFFE: Soft device bus error recovered by the IOA"}, + {0x01170600, 0, 1, + "FFF9: Device sector reassign successful"}, + {0x01170900, 0, 1, + "FFF7: Media error recovered by device rewrite procedures"}, + {0x01180200, 0, 1, + "7001: IOA sector reassignment successful"}, + {0x01180500, 0, 1, + "FFF9: Soft media error. Sector reassignment recommended"}, + {0x01180600, 0, 1, + "FFF7: Media error recovered by IOA rewrite procedures"}, + {0x01418000, 0, 1, + "FF3D: Soft PCI bus error recovered by the IOA"}, + {0x01440000, 1, 1, + "FFF6: Device hardware error recovered by the IOA"}, + {0x01448100, 0, 1, + "FFF6: Device hardware error recovered by the device"}, + {0x01448200, 1, 1, + "FF3D: Soft IOA error recovered by the IOA"}, + {0x01448300, 0, 1, + "FFFA: Undefined device response recovered by the IOA"}, + {0x014A0000, 1, 1, + "FFF6: Device bus error, message or command phase"}, + {0x015D0000, 0, 1, + "FFF6: Failure prediction threshold exceeded"}, + {0x015D9200, 0, 1, + "8009: Impending cache battery pack failure"}, + {0x02040400, 0, 0, + "34FF: Disk device format in progress"}, + {0x023F0000, 0, 0, + "Synchronization required"}, + {0x024E0000, 0, 0, + "No ready, IOA shutdown"}, + {0x02670100, 0, 1, + "3020: Storage subsystem configuration error"}, + {0x03110B00, 0, 0, + "FFF5: Medium error, data unreadable, recommend reassign"}, + {0x03110C00, 0, 0, + "7000: Medium error, data unreadable, do not reassign"}, + {0x03310000, 0, 1, + "FFF3: Disk media format bad"}, + {0x04050000, 0, 1, + "3002: Addressed device failed to respond to selection"}, + {0x04080000, 1, 1, + "3100: Device bus error"}, + {0x04080100, 0, 1, + "3109: IOA timed out a device command"}, + {0x04088000, 0, 0, + "3120: SCSI bus is not operational"}, + {0x04118000, 0, 1, + "9000: IOA reserved area data check"}, + {0x04118100, 0, 1, + "9001: IOA reserved area invalid data pattern"}, + {0x04118200, 0, 1, + "9002: IOA reserved area LRC error"}, + {0x04320000, 0, 1, + "102E: Out of alternate sectors for disk storage"}, + {0x04330000, 1, 1, + "FFF4: Data transfer underlength error"}, + {0x04338000, 1, 1, + "FFF4: Data transfer overlength error"}, + {0x043E0100, 0, 1, + "3400: Logical unit failure"}, + {0x04408500, 0, 1, + "FFF4: Device microcode is corrupt"}, + {0x04418000, 1, 1, + "8150: PCI bus error"}, + {0x04430000, 1, 0, + "Unsupported device bus message received"}, + {0x04440000, 1, 1, + "FFF4: Disk device problem"}, + {0x04448200, 1, 1, + "8150: Permanent IOA failure"}, + {0x04448300, 0, 1, + "3010: Disk device returned wrong response to IOA"}, + {0x04448400, 0, 1, + "8151: IOA microcode error"}, + {0x04448500, 0, 0, + "Device bus status error"}, + {0x04448600, 0, 1, + "8157: IOA error requiring IOA reset to recover"}, + {0x04490000, 0, 0, + "Message reject received from the device"}, + {0x04449200, 0, 1, + "8008: A permanent cache battery pack failure occurred"}, + {0x0444A000, 0, 1, + "9090: Disk unit has been modified after the last known status"}, + {0x0444A200, 0, 1, + "9081: IOA detected device error"}, + {0x0444A300, 0, 1, + "9082: IOA detected device error"}, + {0x044A0000, 1, 1, + "3110: Device bus error, message or command phase"}, + {0x04670400, 0, 1, + "9091: Incorrect hardware configuration change has been detected"}, + {0x046E0000, 0, 1, + "FFF4: Command to logical unit failed"}, + {0x05240000, 1, 0, + "Illegal request, invalid request type or request packet"}, + {0x05250000, 0, 0, + "Illegal request, invalid resource handle"}, + {0x05260000, 0, 0, + "Illegal request, invalid field in parameter list"}, + {0x05260100, 0, 0, + "Illegal request, parameter not supported"}, + {0x05260200, 0, 0, + "Illegal request, parameter value invalid"}, + {0x052C0000, 0, 0, + "Illegal request, command sequence error"}, + {0x06040500, 0, 1, + "9031: Array protection temporarily suspended, protection resuming"}, + {0x06040600, 0, 1, + "9040: Array protection temporarily suspended, protection resuming"}, + {0x06290000, 0, 1, + "FFFB: SCSI bus was reset"}, + {0x06290500, 0, 0, + "FFFE: SCSI bus transition to single ended"}, + {0x06290600, 0, 0, + "FFFE: SCSI bus transition to LVD"}, + {0x06298000, 0, 1, + "FFFB: SCSI bus was reset by another initiator"}, + {0x063F0300, 0, 1, + "3029: A device replacement has occurred"}, + {0x064C8000, 0, 1, + "9051: IOA cache data exists for a missing or failed device"}, + {0x06670100, 0, 1, + "9025: Disk unit is not supported at its physical location"}, + {0x06670600, 0, 1, + "3020: IOA detected a SCSI bus configuration error"}, + {0x06678000, 0, 1, + "3150: SCSI bus configuration error"}, + {0x06690200, 0, 1, + "9041: Array protection temporarily suspended"}, + {0x066B0200, 0, 1, + "9030: Array no longer protected due to missing or failed disk unit"}, + {0x07270000, 0, 0, + "Failure due to other device"}, + {0x07278000, 0, 1, + "9008: IOA does not support functions expected by devices"}, + {0x07278100, 0, 1, + "9010: Cache data associated with attached devices cannot be found"}, + {0x07278200, 0, 1, + "9011: Cache data belongs to devices other than those attached"}, + {0x07278400, 0, 1, + "9020: Array missing 2 or more devices with only 1 device present"}, + {0x07278500, 0, 1, + "9021: Array missing 2 or more devices with 2 or more devices present"}, + {0x07278600, 0, 1, + "9022: Exposed array is missing a required device"}, + {0x07278700, 0, 1, + "9023: Array member(s) not at required physical locations"}, + {0x07278800, 0, 1, + "9024: Array not functional due to present hardware configuration"}, + {0x07278900, 0, 1, + "9026: Array not functional due to present hardware configuration"}, + {0x07278A00, 0, 1, + "9027: Array is missing a device and parity is out of sync"}, + {0x07278B00, 0, 1, + "9028: Maximum number of arrays already exist"}, + {0x07278C00, 0, 1, + "9050: Required cache data cannot be located for a disk unit"}, + {0x07278D00, 0, 1, + "9052: Cache data exists for a device that has been modified"}, + {0x07278F00, 0, 1, + "9054: IOA resources not available due to previous problems"}, + {0x07279100, 0, 1, + "9092: Disk unit requires initialization before use"}, + {0x07279200, 0, 1, + "9029: Incorrect hardware configuration change has been detected"}, + {0x07279600, 0, 1, + "9060: One or more disk pairs are missing from an array"}, + {0x07279700, 0, 1, + "9061: One or more disks are missing from an array"}, + {0x07279800, 0, 1, + "9062: One or more disks are missing from an array"}, + {0x07279900, 0, 1, + "9063: Maximum number of functional arrays has been exceeded"}, + {0x0B260000, 0, 0, + "Aborted command, invalid descriptor"}, + {0x0B5A0000, 0, 0, + "Command terminated by host"} +}; + +static const struct ipr_ses_table_entry ipr_ses_table[] = { + { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 }, + { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 }, + { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */ + { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */ + { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */ + { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */ + { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 }, + { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 }, + { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, + { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 }, + { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 }, + { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 }, + { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 } +}; + +/* + * Function Prototypes + */ +static int ipr_reset_alert(struct ipr_cmnd *); +static void ipr_process_ccn(struct ipr_cmnd *); +static void ipr_process_error(struct ipr_cmnd *); +static void ipr_reset_ioa_job(struct ipr_cmnd *); +static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *, + enum ipr_shutdown_type); + +#ifdef CONFIG_SCSI_IPR_TRACE +/** + * ipr_trc_hook - Add a trace entry to the driver trace + * @ipr_cmd: ipr command struct + * @type: trace type + * @add_data: additional data + * + * Return value: + * none + **/ +static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, + u8 type, u32 add_data) +{ + struct ipr_trace_entry *trace_entry; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++]; + trace_entry->time = jiffies; + trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; + trace_entry->type = type; + trace_entry->cmd_index = ipr_cmd->cmd_index; + trace_entry->res_handle = ipr_cmd->ioarcb.res_handle; + trace_entry->u.add_data = add_data; +} +#else +#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0) +#endif + +/** + * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + + memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); + ioarcb->write_data_transfer_length = 0; + ioarcb->read_data_transfer_length = 0; + ioarcb->write_ioadl_len = 0; + ioarcb->read_ioadl_len = 0; + ioasa->ioasc = 0; + ioasa->residual_data_len = 0; + + ipr_cmd->scsi_cmd = NULL; + ipr_cmd->sense_buffer[0] = 0; + ipr_cmd->dma_use_sg = 0; +} + +/** + * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd) +{ + ipr_reinit_ipr_cmnd(ipr_cmd); + ipr_cmd->u.scratch = 0; + init_timer(&ipr_cmd->timer); +} + +/** + * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block + * @ioa_cfg: ioa config struct + * + * Return value: + * pointer to ipr command struct + **/ +static +struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_cmnd *ipr_cmd; + + ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue); + list_del(&ipr_cmd->queue); + ipr_init_ipr_cmnd(ipr_cmd); + + return ipr_cmd; +} + +/** + * ipr_unmap_sglist - Unmap scatterlist if mapped + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * Return value: + * nothing + **/ +static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + + if (ipr_cmd->dma_use_sg) { + if (scsi_cmd->use_sg > 0) { + pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer, + scsi_cmd->use_sg, + scsi_cmd->sc_data_direction); + } else { + pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle, + scsi_cmd->request_bufflen, + scsi_cmd->sc_data_direction); + } + } +} + +/** + * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts + * @ioa_cfg: ioa config struct + * @clr_ints: interrupts to clear + * + * This function masks all interrupts on the adapter, then clears the + * interrupts specified in the mask + * + * Return value: + * none + **/ +static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg, + u32 clr_ints) +{ + volatile u32 int_reg; + + /* Stop new interrupts */ + ioa_cfg->allow_interrupts = 0; + + /* Set interrupt mask to stop all new interrupts */ + writel(~0, ioa_cfg->regs.set_interrupt_mask_reg); + + /* Clear any pending interrupts */ + writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); +} + +/** + * ipr_save_pcix_cmd_reg - Save PCI-X command register + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) +{ + int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); + + if (pcix_cmd_reg == 0) { + dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); + return -EIO; + } + + if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg, + &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { + dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n"); + return -EIO; + } + + ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO; + return 0; +} + +/** + * ipr_set_pcix_cmd_reg - Setup PCI-X command register + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) +{ + int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX); + + if (pcix_cmd_reg) { + if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg, + ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) { + dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n"); + return -EIO; + } + } else { + dev_err(&ioa_cfg->pdev->dev, + "Failed to setup PCI-X command register\n"); + return -EIO; + } + + return 0; +} + +/** + * ipr_scsi_eh_done - mid-layer done function for aborted ops + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler for + * ops generated by the SCSI mid-layer which are being aborted. + * + * Return value: + * none + **/ +static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + + scsi_cmd->result |= (DID_ERROR << 16); + + ipr_unmap_sglist(ioa_cfg, ipr_cmd); + scsi_cmd->scsi_done(scsi_cmd); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); +} + +/** + * ipr_fail_all_ops - Fails all outstanding ops. + * @ioa_cfg: ioa config struct + * + * This function fails all outstanding ops. + * + * Return value: + * none + **/ +static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_cmnd *ipr_cmd, *temp; + + ENTER; + list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) { + list_del(&ipr_cmd->queue); + + ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET); + ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID); + + if (ipr_cmd->scsi_cmd) + ipr_cmd->done = ipr_scsi_eh_done; + + ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); + del_timer(&ipr_cmd->timer); + ipr_cmd->done(ipr_cmd); + } + + LEAVE; +} + +/** + * ipr_do_req - Send driver initiated requests. + * @ipr_cmd: ipr command struct + * @done: done function + * @timeout_func: timeout function + * @timeout: timeout value + * + * This function sends the specified command to the adapter with the + * timeout given. The done function is invoked on command completion. + * + * Return value: + * none + **/ +static void ipr_do_req(struct ipr_cmnd *ipr_cmd, + void (*done) (struct ipr_cmnd *), + void (*timeout_func) (struct ipr_cmnd *), u32 timeout) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + + ipr_cmd->done = done; + + ipr_cmd->timer.data = (unsigned long) ipr_cmd; + ipr_cmd->timer.expires = jiffies + timeout; + ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func; + + add_timer(&ipr_cmd->timer); + + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0); + + mb(); + writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), + ioa_cfg->regs.ioarrin_reg); +} + +/** + * ipr_internal_cmd_done - Op done function for an internally generated op. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for an internally generated, + * blocking op. It simply wakes the sleeping thread. + * + * Return value: + * none + **/ +static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd) +{ + if (ipr_cmd->u.sibling) + ipr_cmd->u.sibling = NULL; + else + complete(&ipr_cmd->completion); +} + +/** + * ipr_send_blocking_cmd - Send command and sleep on its completion. + * @ipr_cmd: ipr command struct + * @timeout_func: function to invoke if command times out + * @timeout: timeout + * + * Return value: + * none + **/ +static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, + void (*timeout_func) (struct ipr_cmnd *ipr_cmd), + u32 timeout) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + init_completion(&ipr_cmd->completion); + ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout); + + spin_unlock_irq(ioa_cfg->host->host_lock); + wait_for_completion(&ipr_cmd->completion); + spin_lock_irq(ioa_cfg->host->host_lock); +} + +/** + * ipr_send_hcam - Send an HCAM to the adapter. + * @ioa_cfg: ioa config struct + * @type: HCAM type + * @hostrcb: hostrcb struct + * + * This function will send a Host Controlled Async command to the adapter. + * If HCAMs are currently not allowed to be issued to the adapter, it will + * place the hostrcb on the free queue. + * + * Return value: + * none + **/ +static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioarcb *ioarcb; + + if (ioa_cfg->allow_cmds) { + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q); + + ipr_cmd->u.hostrcb = hostrcb; + ioarcb = &ipr_cmd->ioarcb; + + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM; + ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC; + ioarcb->cmd_pkt.cdb[1] = type; + ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff; + + ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam)); + ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ipr_cmd->ioadl[0].flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam)); + ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma); + + if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE) + ipr_cmd->done = ipr_process_ccn; + else + ipr_cmd->done = ipr_process_error; + + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR); + + mb(); + writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), + ioa_cfg->regs.ioarrin_reg); + } else { + list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); + } +} + +/** + * ipr_init_res_entry - Initialize a resource entry struct. + * @res: resource entry struct + * + * Return value: + * none + **/ +static void ipr_init_res_entry(struct ipr_resource_entry *res) +{ + res->needs_sync_complete = 1; + res->in_erp = 0; + res->add_to_ml = 0; + res->del_from_ml = 0; + res->resetting_device = 0; + res->tcq_active = 0; + res->qdepth = IPR_MAX_CMD_PER_LUN; + res->sdev = NULL; +} + +/** + * ipr_handle_config_change - Handle a config change from the adapter + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb + * + * Return value: + * none + **/ +static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_resource_entry *res = NULL; + struct ipr_config_table_entry *cfgte; + u32 is_ndn = 1; + + cfgte = &hostrcb->hcam.u.ccn.cfgte; + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr, + sizeof(cfgte->res_addr))) { + is_ndn = 0; + break; + } + } + + if (is_ndn) { + if (list_empty(&ioa_cfg->free_res_q)) { + ipr_send_hcam(ioa_cfg, + IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, + hostrcb); + return; + } + + res = list_entry(ioa_cfg->free_res_q.next, + struct ipr_resource_entry, queue); + + list_del(&res->queue); + ipr_init_res_entry(res); + list_add_tail(&res->queue, &ioa_cfg->used_res_q); + } + + memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); + + if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) { + if (res->sdev) { + res->sdev->hostdata = NULL; + res->del_from_ml = 1; + if (ioa_cfg->allow_ml_add_del) + schedule_work(&ioa_cfg->work_q); + } else + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + } else if (!res->sdev) { + res->add_to_ml = 1; + if (ioa_cfg->allow_ml_add_del) + schedule_work(&ioa_cfg->work_q); + } + + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); +} + +/** + * ipr_process_ccn - Op done function for a CCN. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for a configuration + * change notification host controlled async from the adapter. + * + * Return value: + * none + **/ +static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + list_del(&hostrcb->queue); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + + if (ioasc) { + if (ioasc != IPR_IOASC_IOA_WAS_RESET) + dev_err(&ioa_cfg->pdev->dev, + "Host RCB failed with IOASC: 0x%08X\n", ioasc); + + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); + } else { + ipr_handle_config_change(ioa_cfg, hostrcb); + } +} + +/** + * ipr_log_vpd - Log the passed VPD to the error log. + * @vpids: vendor/product id struct + * @serial_num: serial number string + * + * Return value: + * none + **/ +static void ipr_log_vpd(struct ipr_std_inq_vpids *vpids, u8 *serial_num) +{ + char buffer[max_t(int, sizeof(struct ipr_std_inq_vpids), + IPR_SERIAL_NUM_LEN) + 1]; + + memcpy(buffer, vpids, sizeof(struct ipr_std_inq_vpids)); + buffer[sizeof(struct ipr_std_inq_vpids)] = '\0'; + ipr_err("Vendor/Product ID: %s\n", buffer); + + memcpy(buffer, serial_num, IPR_SERIAL_NUM_LEN); + buffer[IPR_SERIAL_NUM_LEN] = '\0'; + ipr_err(" Serial Number: %s\n", buffer); +} + +/** + * ipr_log_cache_error - Log a cache error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + struct ipr_hostrcb_type_02_error *error = + &hostrcb->hcam.u.error.u.type_02_error; + + ipr_err("-----Current Configuration-----\n"); + ipr_err("Cache Directory Card Information:\n"); + ipr_log_vpd(&error->ioa_vpids, error->ioa_sn); + ipr_err("Adapter Card Information:\n"); + ipr_log_vpd(&error->cfc_vpids, error->cfc_sn); + + ipr_err("-----Expected Configuration-----\n"); + ipr_err("Cache Directory Card Information:\n"); + ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpids, + error->ioa_last_attached_to_cfc_sn); + ipr_err("Adapter Card Information:\n"); + ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpids, + error->cfc_last_attached_to_ioa_sn); + + ipr_err("Additional IOA Data: %08X %08X %08X\n", + be32_to_cpu(error->ioa_data[0]), + be32_to_cpu(error->ioa_data[1]), + be32_to_cpu(error->ioa_data[2])); +} + +/** + * ipr_log_config_error - Log a configuration error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int errors_logged, i; + struct ipr_hostrcb_device_data_entry *dev_entry; + struct ipr_hostrcb_type_03_error *error; + + error = &hostrcb->hcam.u.error.u.type_03_error; + errors_logged = be32_to_cpu(error->errors_logged); + + ipr_err("Device Errors Detected/Logged: %d/%d\n", + be32_to_cpu(error->errors_detected), errors_logged); + + dev_entry = error->dev_entry; + + for (i = 0; i < errors_logged; i++, dev_entry++) { + ipr_err_separator; + + if (dev_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { + ipr_err("Device %d: missing\n", i + 1); + } else { + ipr_err("Device %d: %d:%d:%d:%d\n", i + 1, + ioa_cfg->host->host_no, dev_entry->dev_res_addr.bus, + dev_entry->dev_res_addr.target, dev_entry->dev_res_addr.lun); + } + ipr_log_vpd(&dev_entry->dev_vpids, dev_entry->dev_sn); + + ipr_err("-----New Device Information-----\n"); + ipr_log_vpd(&dev_entry->new_dev_vpids, dev_entry->new_dev_sn); + + ipr_err("Cache Directory Card Information:\n"); + ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpids, + dev_entry->ioa_last_with_dev_sn); + + ipr_err("Adapter Card Information:\n"); + ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpids, + dev_entry->cfc_last_with_dev_sn); + + ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n", + be32_to_cpu(dev_entry->ioa_data[0]), + be32_to_cpu(dev_entry->ioa_data[1]), + be32_to_cpu(dev_entry->ioa_data[2]), + be32_to_cpu(dev_entry->ioa_data[3]), + be32_to_cpu(dev_entry->ioa_data[4])); + } +} + +/** + * ipr_log_array_error - Log an array configuration error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int i; + struct ipr_hostrcb_type_04_error *error; + struct ipr_hostrcb_array_data_entry *array_entry; + u8 zero_sn[IPR_SERIAL_NUM_LEN]; + + memset(zero_sn, '0', IPR_SERIAL_NUM_LEN); + + error = &hostrcb->hcam.u.error.u.type_04_error; + + ipr_err_separator; + + ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n", + error->protection_level, + ioa_cfg->host->host_no, + error->last_func_vset_res_addr.bus, + error->last_func_vset_res_addr.target, + error->last_func_vset_res_addr.lun); + + ipr_err_separator; + + array_entry = error->array_member; + + for (i = 0; i < 18; i++) { + if (!memcmp(array_entry->serial_num, zero_sn, IPR_SERIAL_NUM_LEN)) + continue; + + if (error->exposed_mode_adn == i) { + ipr_err("Exposed Array Member %d:\n", i); + } else { + ipr_err("Array Member %d:\n", i); + } + + ipr_log_vpd(&array_entry->vpids, array_entry->serial_num); + + if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { + ipr_err("Current Location: unknown\n"); + } else { + ipr_err("Current Location: %d:%d:%d:%d\n", + ioa_cfg->host->host_no, + array_entry->dev_res_addr.bus, + array_entry->dev_res_addr.target, + array_entry->dev_res_addr.lun); + } + + if (array_entry->dev_res_addr.bus >= IPR_MAX_NUM_BUSES) { + ipr_err("Expected Location: unknown\n"); + } else { + ipr_err("Expected Location: %d:%d:%d:%d\n", + ioa_cfg->host->host_no, + array_entry->expected_dev_res_addr.bus, + array_entry->expected_dev_res_addr.target, + array_entry->expected_dev_res_addr.lun); + } + + ipr_err_separator; + + if (i == 9) + array_entry = error->array_member2; + else + array_entry++; + } +} + +/** + * ipr_log_generic_error - Log an adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * Return value: + * none + **/ +static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + int i; + int ioa_data_len = be32_to_cpu(hostrcb->hcam.length); + + if (ioa_data_len == 0) + return; + + ipr_err("IOA Error Data:\n"); + ipr_err("Offset 0 1 2 3 4 5 6 7 8 9 A B C D E F\n"); + + for (i = 0; i < ioa_data_len / 4; i += 4) { + ipr_err("%08X: %08X %08X %08X %08X\n", i*4, + be32_to_cpu(hostrcb->hcam.u.raw.data[i]), + be32_to_cpu(hostrcb->hcam.u.raw.data[i+1]), + be32_to_cpu(hostrcb->hcam.u.raw.data[i+2]), + be32_to_cpu(hostrcb->hcam.u.raw.data[i+3])); + } +} + +/** + * ipr_get_error - Find the specfied IOASC in the ipr_error_table. + * @ioasc: IOASC + * + * This function will return the index of into the ipr_error_table + * for the specified IOASC. If the IOASC is not in the table, + * 0 will be returned, which points to the entry used for unknown errors. + * + * Return value: + * index into the ipr_error_table + **/ +static u32 ipr_get_error(u32 ioasc) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++) + if (ipr_error_table[i].ioasc == ioasc) + return i; + + return 0; +} + +/** + * ipr_handle_log_data - Log an adapter error. + * @ioa_cfg: ioa config struct + * @hostrcb: hostrcb struct + * + * This function logs an adapter error to the system. + * + * Return value: + * none + **/ +static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_hostrcb *hostrcb) +{ + u32 ioasc; + int error_index; + + if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) + return; + + if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST) + dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n"); + + ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc); + + if (ioasc == IPR_IOASC_BUS_WAS_RESET || + ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) { + /* Tell the midlayer we had a bus reset so it will handle the UA properly */ + scsi_report_bus_reset(ioa_cfg->host, + hostrcb->hcam.u.error.failing_dev_res_addr.bus); + } + + error_index = ipr_get_error(ioasc); + + if (!ipr_error_table[error_index].log_hcam) + return; + + if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) { + ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr, + "%s\n", ipr_error_table[error_index].error); + } else { + dev_err(&ioa_cfg->pdev->dev, "%s\n", + ipr_error_table[error_index].error); + } + + /* Set indication we have logged an error */ + ioa_cfg->errors_logged++; + + if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) + return; + + switch (hostrcb->hcam.overlay_id) { + case IPR_HOST_RCB_OVERLAY_ID_1: + ipr_log_generic_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_2: + ipr_log_cache_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_3: + ipr_log_config_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_4: + case IPR_HOST_RCB_OVERLAY_ID_6: + ipr_log_array_error(ioa_cfg, hostrcb); + break; + case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: + ipr_log_generic_error(ioa_cfg, hostrcb); + break; + default: + dev_err(&ioa_cfg->pdev->dev, + "Unknown error received. Overlay ID: %d\n", + hostrcb->hcam.overlay_id); + break; + } +} + +/** + * ipr_process_error - Op done function for an adapter error log. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for an error log host + * controlled async from the adapter. It will log the error and + * send the HCAM back to the adapter. + * + * Return value: + * none + **/ +static void ipr_process_error(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + list_del(&hostrcb->queue); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + + if (!ioasc) { + ipr_handle_log_data(ioa_cfg, hostrcb); + } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) { + dev_err(&ioa_cfg->pdev->dev, + "Host RCB failed with IOASC: 0x%08X\n", ioasc); + } + + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); +} + +/** + * ipr_timeout - An internally generated op has timed out. + * @ipr_cmd: ipr command struct + * + * This function blocks host requests and initiates an + * adapter reset. + * + * Return value: + * none + **/ +static void ipr_timeout(struct ipr_cmnd *ipr_cmd) +{ + unsigned long lock_flags = 0; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, + "Adapter being reset due to command timeout.\n"); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +/** + * ipr_reset_reload - Reset/Reload the IOA + * @ioa_cfg: ioa config struct + * @shutdown_type: shutdown type + * + * This function resets the adapter and re-initializes it. + * This function assumes that all new host commands have been stopped. + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg, + enum ipr_shutdown_type shutdown_type) +{ + if (!ioa_cfg->in_reset_reload) + ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); + + spin_unlock_irq(ioa_cfg->host->host_lock); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irq(ioa_cfg->host->host_lock); + + /* If we got hit with a host reset while we were already resetting + the adapter for some reason, and the reset failed. */ + if (ioa_cfg->ioa_is_dead) { + ipr_trace; + return FAILED; + } + + return SUCCESS; +} + +/** + * ipr_find_ses_entry - Find matching SES in SES table + * @res: resource entry struct of SES + * + * Return value: + * pointer to SES table entry / NULL on failure + **/ +static const struct ipr_ses_table_entry * +ipr_find_ses_entry(struct ipr_resource_entry *res) +{ + int i, j, matches; + const struct ipr_ses_table_entry *ste = ipr_ses_table; + + for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) { + for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) { + if (ste->compare_product_id_byte[j] == 'X') { + if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j]) + matches++; + else + break; + } else + matches++; + } + + if (matches == IPR_PROD_ID_LEN) + return ste; + } + + return NULL; +} + +/** + * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus + * @ioa_cfg: ioa config struct + * @bus: SCSI bus + * @bus_width: bus width + * + * Return value: + * SCSI bus speed in units of 100KHz, 1600 is 160 MHz + * For a 2-byte wide SCSI bus, the maximum transfer speed is + * twice the maximum transfer rate (e.g. for a wide enabled bus, + * max 160MHz = max 320MB/sec). + **/ +static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width) +{ + struct ipr_resource_entry *res; + const struct ipr_ses_table_entry *ste; + u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width); + + /* Loop through each config table entry in the config table buffer */ + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data))) + continue; + + if (bus != res->cfgte.res_addr.bus) + continue; + + if (!(ste = ipr_find_ses_entry(res))) + continue; + + max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8); + } + + return max_xfer_rate; +} + +/** + * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA + * @ioa_cfg: ioa config struct + * @max_delay: max delay in micro-seconds to wait + * + * Waits for an IODEBUG ACK from the IOA, doing busy looping. + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay) +{ + volatile u32 pcii_reg; + int delay = 1; + + /* Read interrupt reg until IOA signals IO Debug Acknowledge */ + while (delay < max_delay) { + pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + + if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE) + return 0; + + /* udelay cannot be used if delay is more than a few milliseconds */ + if ((delay / 1000) > MAX_UDELAY_MS) + mdelay(delay / 1000); + else + udelay(delay); + + delay += delay; + } + return -EIO; +} + +/** + * ipr_get_ldump_data_section - Dump IOA memory + * @ioa_cfg: ioa config struct + * @start_addr: adapter address to dump + * @dest: destination kernel buffer + * @length_in_words: length to dump in 4 byte words + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg, + u32 start_addr, + u32 *dest, u32 length_in_words) +{ + volatile u32 temp_pcii_reg; + int i, delay = 0; + + /* Write IOA interrupt reg starting LDUMP state */ + writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT), + ioa_cfg->regs.set_uproc_interrupt_reg); + + /* Wait for IO debug acknowledge */ + if (ipr_wait_iodbg_ack(ioa_cfg, + IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) { + dev_err(&ioa_cfg->pdev->dev, + "IOA dump long data transfer timeout\n"); + return -EIO; + } + + /* Signal LDUMP interlocked - clear IO debug ack */ + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, + ioa_cfg->regs.clr_interrupt_reg); + + /* Write Mailbox with starting address */ + writel(start_addr, ioa_cfg->ioa_mailbox); + + /* Signal address valid - clear IOA Reset alert */ + writel(IPR_UPROCI_RESET_ALERT, + ioa_cfg->regs.clr_uproc_interrupt_reg); + + for (i = 0; i < length_in_words; i++) { + /* Wait for IO debug acknowledge */ + if (ipr_wait_iodbg_ack(ioa_cfg, + IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) { + dev_err(&ioa_cfg->pdev->dev, + "IOA dump short data transfer timeout\n"); + return -EIO; + } + + /* Read data from mailbox and increment destination pointer */ + *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox)); + dest++; + + /* For all but the last word of data, signal data received */ + if (i < (length_in_words - 1)) { + /* Signal dump data received - Clear IO debug Ack */ + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, + ioa_cfg->regs.clr_interrupt_reg); + } + } + + /* Signal end of block transfer. Set reset alert then clear IO debug ack */ + writel(IPR_UPROCI_RESET_ALERT, + ioa_cfg->regs.set_uproc_interrupt_reg); + + writel(IPR_UPROCI_IO_DEBUG_ALERT, + ioa_cfg->regs.clr_uproc_interrupt_reg); + + /* Signal dump data received - Clear IO debug Ack */ + writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, + ioa_cfg->regs.clr_interrupt_reg); + + /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */ + while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) { + temp_pcii_reg = + readl(ioa_cfg->regs.sense_uproc_interrupt_reg); + + if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT)) + return 0; + + udelay(10); + delay += 10; + } + + return 0; +} + +#ifdef CONFIG_SCSI_IPR_DUMP +/** + * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer + * @ioa_cfg: ioa config struct + * @pci_address: adapter address + * @length: length of data to copy + * + * Copy data from PCI adapter to kernel buffer. + * Note: length MUST be a 4 byte multiple + * Return value: + * 0 on success / other on failure + **/ +static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg, + unsigned long pci_address, u32 length) +{ + int bytes_copied = 0; + int cur_len, rc, rem_len, rem_page_len; + u32 *page; + unsigned long lock_flags = 0; + struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump; + + while (bytes_copied < length && + (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) { + if (ioa_dump->page_offset >= PAGE_SIZE || + ioa_dump->page_offset == 0) { + page = (u32 *)__get_free_page(GFP_ATOMIC); + + if (!page) { + ipr_trace; + return bytes_copied; + } + + ioa_dump->page_offset = 0; + ioa_dump->ioa_data[ioa_dump->next_page_index] = page; + ioa_dump->next_page_index++; + } else + page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1]; + + rem_len = length - bytes_copied; + rem_page_len = PAGE_SIZE - ioa_dump->page_offset; + cur_len = min(rem_len, rem_page_len); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->sdt_state == ABORT_DUMP) { + rc = -EIO; + } else { + rc = ipr_get_ldump_data_section(ioa_cfg, + pci_address + bytes_copied, + &page[ioa_dump->page_offset / 4], + (cur_len / sizeof(u32))); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + if (!rc) { + ioa_dump->page_offset += cur_len; + bytes_copied += cur_len; + } else { + ipr_trace; + break; + } + schedule(); + } + + return bytes_copied; +} + +/** + * ipr_init_dump_entry_hdr - Initialize a dump entry header. + * @hdr: dump entry header struct + * + * Return value: + * nothing + **/ +static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr) +{ + hdr->eye_catcher = IPR_DUMP_EYE_CATCHER; + hdr->num_elems = 1; + hdr->offset = sizeof(*hdr); + hdr->status = IPR_DUMP_STATUS_SUCCESS; +} + +/** + * ipr_dump_ioa_type_data - Fill in the adapter type in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; + + ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr); + driver_dump->ioa_type_entry.hdr.len = + sizeof(struct ipr_dump_ioa_type_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; + driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID; + driver_dump->ioa_type_entry.type = ioa_cfg->type; + driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) | + (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) | + ucode_vpd->minor_release[1]; + driver_dump->hdr.num_entries++; +} + +/** + * ipr_dump_version_data - Fill in the driver version in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr); + driver_dump->version_entry.hdr.len = + sizeof(struct ipr_dump_version_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; + driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID; + strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION); + driver_dump->hdr.num_entries++; +} + +/** + * ipr_dump_trace_data - Fill in the IOA trace in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr); + driver_dump->trace_entry.hdr.len = + sizeof(struct ipr_dump_trace_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; + driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID; + memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE); + driver_dump->hdr.num_entries++; +} + +/** + * ipr_dump_location_data - Fill in the IOA location in the dump. + * @ioa_cfg: ioa config struct + * @driver_dump: driver dump struct + * + * Return value: + * nothing + **/ +static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_driver_dump *driver_dump) +{ + ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr); + driver_dump->location_entry.hdr.len = + sizeof(struct ipr_dump_location_entry) - + sizeof(struct ipr_dump_entry_header); + driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII; + driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID; + strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id); + driver_dump->hdr.num_entries++; +} + +/** + * ipr_get_ioa_dump - Perform a dump of the driver and adapter. + * @ioa_cfg: ioa config struct + * @dump: dump struct + * + * Return value: + * nothing + **/ +static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) +{ + unsigned long start_addr, sdt_word; + unsigned long lock_flags = 0; + struct ipr_driver_dump *driver_dump = &dump->driver_dump; + struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump; + u32 num_entries, start_off, end_off; + u32 bytes_to_copy, bytes_copied, rc; + struct ipr_sdt *sdt; + int i; + + ENTER; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->sdt_state != GET_DUMP) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + start_addr = readl(ioa_cfg->ioa_mailbox); + + if (!ipr_sdt_is_fmt2(start_addr)) { + dev_err(&ioa_cfg->pdev->dev, + "Invalid dump table format: %lx\n", start_addr); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n"); + + driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER; + + /* Initialize the overall dump header */ + driver_dump->hdr.len = sizeof(struct ipr_driver_dump); + driver_dump->hdr.num_entries = 1; + driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header); + driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS; + driver_dump->hdr.os = IPR_DUMP_OS_LINUX; + driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME; + + ipr_dump_version_data(ioa_cfg, driver_dump); + ipr_dump_location_data(ioa_cfg, driver_dump); + ipr_dump_ioa_type_data(ioa_cfg, driver_dump); + ipr_dump_trace_data(ioa_cfg, driver_dump); + + /* Update dump_header */ + driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header); + + /* IOA Dump entry */ + ipr_init_dump_entry_hdr(&ioa_dump->hdr); + ioa_dump->format = IPR_SDT_FMT2; + ioa_dump->hdr.len = 0; + ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY; + ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID; + + /* First entries in sdt are actually a list of dump addresses and + lengths to gather the real dump data. sdt represents the pointer + to the ioa generated dump table. Dump data will be extracted based + on entries in this table */ + sdt = &ioa_dump->sdt; + + rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (u32 *)sdt, + sizeof(struct ipr_sdt) / sizeof(u32)); + + /* Smart Dump table is ready to use and the first entry is valid */ + if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) { + dev_err(&ioa_cfg->pdev->dev, + "Dump of IOA failed. Dump table not valid: %d, %X.\n", + rc, be32_to_cpu(sdt->hdr.state)); + driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED; + ioa_cfg->sdt_state = DUMP_OBTAINED; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + num_entries = be32_to_cpu(sdt->hdr.num_entries_used); + + if (num_entries > IPR_NUM_SDT_ENTRIES) + num_entries = IPR_NUM_SDT_ENTRIES; + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + for (i = 0; i < num_entries; i++) { + if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) { + driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; + break; + } + + if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) { + sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset); + start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK; + end_off = be32_to_cpu(sdt->entry[i].end_offset); + + if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) { + bytes_to_copy = end_off - start_off; + if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) { + sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY; + continue; + } + + /* Copy data from adapter to driver buffers */ + bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word, + bytes_to_copy); + + ioa_dump->hdr.len += bytes_copied; + + if (bytes_copied != bytes_to_copy) { + driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS; + break; + } + } + } + } + + dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n"); + + /* Update dump_header */ + driver_dump->hdr.len += ioa_dump->hdr.len; + wmb(); + ioa_cfg->sdt_state = DUMP_OBTAINED; + LEAVE; +} + +#else +#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0) +#endif + +/** + * ipr_worker_thread - Worker thread + * @data: ioa config struct + * + * Called at task level from a work thread. This function takes care + * of adding and removing device from the mid-layer as configuration + * changes are detected by the adapter. + * + * Return value: + * nothing + **/ +static void ipr_worker_thread(void *data) +{ + unsigned long lock_flags; + struct ipr_resource_entry *res; + struct scsi_device *sdev; + struct ipr_dump *dump; + struct ipr_ioa_cfg *ioa_cfg = data; + u8 bus, target, lun; + int did_work; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->sdt_state == GET_DUMP) { + dump = ioa_cfg->dump; + if (!dump || !kobject_get(&dump->kobj)) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + ipr_get_ioa_dump(ioa_cfg, dump); + kobject_put(&dump->kobj); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->sdt_state == DUMP_OBTAINED) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + +restart: + do { + did_work = 0; + if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->del_from_ml && res->sdev) { + did_work = 1; + sdev = res->sdev; + if (!scsi_device_get(sdev)) { + res->sdev = NULL; + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_remove_device(sdev); + scsi_device_put(sdev); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + } + break; + } + } + } while(did_work); + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (res->add_to_ml) { + bus = res->cfgte.res_addr.bus; + target = res->cfgte.res_addr.target; + lun = res->cfgte.res_addr.lun; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_add_device(ioa_cfg->host, bus, target, lun); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + goto restart; + } + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +#ifdef CONFIG_SCSI_IPR_TRACE +/** + * ipr_read_trace - Dump the adapter trace + * @kobj: kobject struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_read_trace(struct kobject *kobj, char *buf, + loff_t off, size_t count) +{ + struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int size = IPR_TRACE_SIZE; + char *src = (char *)ioa_cfg->trace; + + if (off > size) + return 0; + if (off + count > size) { + size -= off; + count = size; + } + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + memcpy(buf, &src[off], count); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return count; +} + +static struct bin_attribute ipr_trace_attr = { + .attr = { + .name = "trace", + .mode = S_IRUGO, + }, + .size = 0, + .read = ipr_read_trace, +}; +#endif + +/** + * ipr_show_fw_version - Show the firmware version + * @class_dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; + unsigned long lock_flags = 0; + int len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n", + ucode_vpd->major_release, ucode_vpd->card_type, + ucode_vpd->minor_release[0], + ucode_vpd->minor_release[1]); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct class_device_attribute ipr_fw_version_attr = { + .attr = { + .name = "fw_version", + .mode = S_IRUGO, + }, + .show = ipr_show_fw_version, +}; + +/** + * ipr_show_log_level - Show the adapter's error logging level + * @class_dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int len; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +/** + * ipr_store_log_level - Change the adapter's error logging level + * @class_dev: class device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_store_log_level(struct class_device *class_dev, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->log_level = simple_strtoul(buf, NULL, 10); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return strlen(buf); +} + +static struct class_device_attribute ipr_log_level_attr = { + .attr = { + .name = "log_level", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ipr_show_log_level, + .store = ipr_store_log_level +}; + +/** + * ipr_store_diagnostics - IOA Diagnostics interface + * @class_dev: class_device struct + * @buf: buffer + * @count: buffer size + * + * This function will reset the adapter and wait a reasonable + * amount of time for any errors that the adapter might log. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_diagnostics(struct class_device *class_dev, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags = 0; + int rc = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->errors_logged = 0; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + + if (ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + /* Wait for a second for any errors to be logged */ + schedule_timeout(HZ); + } else { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return -EIO; + } + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged) + rc = -EIO; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + return rc; +} + +static struct class_device_attribute ipr_diagnostics_attr = { + .attr = { + .name = "run_diagnostics", + .mode = S_IWUSR, + }, + .store = ipr_store_diagnostics +}; + +/** + * ipr_store_reset_adapter - Reset the adapter + * @class_dev: class_device struct + * @buf: buffer + * @count: buffer size + * + * This function will reset the adapter. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_reset_adapter(struct class_device *class_dev, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + unsigned long lock_flags; + int result = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (!ioa_cfg->in_reset_reload) + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + return result; +} + +static struct class_device_attribute ipr_ioa_reset_attr = { + .attr = { + .name = "reset_host", + .mode = S_IWUSR, + }, + .store = ipr_store_reset_adapter +}; + +/** + * ipr_alloc_ucode_buffer - Allocates a microcode download buffer + * @buf_len: buffer length + * + * Allocates a DMA'able buffer in chunks and assembles a scatter/gather + * list to use for microcode download + * + * Return value: + * pointer to sglist / NULL on failure + **/ +static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len) +{ + int sg_size, order, bsize_elem, num_elem, i, j; + struct ipr_sglist *sglist; + struct scatterlist *scatterlist; + struct page *page; + + /* Get the minimum size per scatter/gather element */ + sg_size = buf_len / (IPR_MAX_SGLIST - 1); + + /* Get the actual size per element */ + order = get_order(sg_size); + + /* Determine the actual number of bytes per element */ + bsize_elem = PAGE_SIZE * (1 << order); + + /* Determine the actual number of sg entries needed */ + if (buf_len % bsize_elem) + num_elem = (buf_len / bsize_elem) + 1; + else + num_elem = buf_len / bsize_elem; + + /* Allocate a scatter/gather list for the DMA */ + sglist = kmalloc(sizeof(struct ipr_sglist) + + (sizeof(struct scatterlist) * (num_elem - 1)), + GFP_KERNEL); + + if (sglist == NULL) { + ipr_trace; + return NULL; + } + + memset(sglist, 0, sizeof(struct ipr_sglist) + + (sizeof(struct scatterlist) * (num_elem - 1))); + + scatterlist = sglist->scatterlist; + + sglist->order = order; + sglist->num_sg = num_elem; + + /* Allocate a bunch of sg elements */ + for (i = 0; i < num_elem; i++) { + page = alloc_pages(GFP_KERNEL, order); + if (!page) { + ipr_trace; + + /* Free up what we already allocated */ + for (j = i - 1; j >= 0; j--) + __free_pages(scatterlist[j].page, order); + kfree(sglist); + return NULL; + } + + scatterlist[i].page = page; + } + + return sglist; +} + +/** + * ipr_free_ucode_buffer - Frees a microcode download buffer + * @p_dnld: scatter/gather list pointer + * + * Free a DMA'able ucode download buffer previously allocated with + * ipr_alloc_ucode_buffer + * + * Return value: + * nothing + **/ +static void ipr_free_ucode_buffer(struct ipr_sglist *sglist) +{ + int i; + + for (i = 0; i < sglist->num_sg; i++) + __free_pages(sglist->scatterlist[i].page, sglist->order); + + kfree(sglist); +} + +/** + * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer + * @sglist: scatter/gather list pointer + * @buffer: buffer pointer + * @len: buffer length + * + * Copy a microcode image from a user buffer into a buffer allocated by + * ipr_alloc_ucode_buffer + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist, + u8 *buffer, u32 len) +{ + int bsize_elem, i, result = 0; + struct scatterlist *scatterlist; + void *kaddr; + + /* Determine the actual number of bytes per element */ + bsize_elem = PAGE_SIZE * (1 << sglist->order); + + scatterlist = sglist->scatterlist; + + for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) { + kaddr = kmap(scatterlist[i].page); + memcpy(kaddr, buffer, bsize_elem); + kunmap(scatterlist[i].page); + + scatterlist[i].length = bsize_elem; + + if (result != 0) { + ipr_trace; + return result; + } + } + + if (len % bsize_elem) { + kaddr = kmap(scatterlist[i].page); + memcpy(kaddr, buffer, len % bsize_elem); + kunmap(scatterlist[i].page); + + scatterlist[i].length = len % bsize_elem; + } + + sglist->buffer_len = len; + return result; +} + +/** + * ipr_map_ucode_buffer - Map a microcode download buffer + * @ipr_cmd: ipr command struct + * @sglist: scatter/gather list + * @len: total length of download buffer + * + * Maps a microcode download scatter/gather list for DMA and + * builds the IOADL. + * + * Return value: + * 0 on success / -EIO on failure + **/ +static int ipr_map_ucode_buffer(struct ipr_cmnd *ipr_cmd, + struct ipr_sglist *sglist, int len) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct scatterlist *scatterlist = sglist->scatterlist; + int i; + + ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, scatterlist, + sglist->num_sg, DMA_TO_DEVICE); + + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->write_data_transfer_length = cpu_to_be32(len); + ioarcb->write_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + + for (i = 0; i < ipr_cmd->dma_use_sg; i++) { + ioadl[i].flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i])); + ioadl[i].address = + cpu_to_be32(sg_dma_address(&scatterlist[i])); + } + + if (likely(ipr_cmd->dma_use_sg)) { + ioadl[i-1].flags_and_data_len |= + cpu_to_be32(IPR_IOADL_FLAGS_LAST); + } + else { + dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + return -EIO; + } + + return 0; +} + +/** + * ipr_store_update_fw - Update the firmware on the adapter + * @class_dev: class_device struct + * @buf: buffer + * @count: buffer size + * + * This function will update the firmware on the adapter. + * + * Return value: + * count on success / other on failure + **/ +static ssize_t ipr_store_update_fw(struct class_device *class_dev, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(class_dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_ucode_image_header *image_hdr; + const struct firmware *fw_entry; + struct ipr_sglist *sglist; + unsigned long lock_flags; + char fname[100]; + char *src; + int len, result, dnld_size; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + len = snprintf(fname, 99, "%s", buf); + fname[len-1] = '\0'; + + if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { + dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); + return -EIO; + } + + image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; + + if (be32_to_cpu(image_hdr->header_length) > fw_entry->size || + (ioa_cfg->vpd_cbs->page3_data.card_type && + ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) { + dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n"); + release_firmware(fw_entry); + return -EINVAL; + } + + src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); + dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); + sglist = ipr_alloc_ucode_buffer(dnld_size); + + if (!sglist) { + dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n"); + release_firmware(fw_entry); + return -ENOMEM; + } + + result = ipr_copy_ucode_buffer(sglist, src, dnld_size); + + if (result) { + dev_err(&ioa_cfg->pdev->dev, + "Microcode buffer copy to DMA buffer failed\n"); + ipr_free_ucode_buffer(sglist); + release_firmware(fw_entry); + return result; + } + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->ucode_sglist) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + dev_err(&ioa_cfg->pdev->dev, + "Microcode download already in progress\n"); + ipr_free_ucode_buffer(sglist); + release_firmware(fw_entry); + return -EIO; + } + + ioa_cfg->ucode_sglist = sglist; + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->ucode_sglist = NULL; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + ipr_free_ucode_buffer(sglist); + release_firmware(fw_entry); + + return count; +} + +static struct class_device_attribute ipr_update_fw_attr = { + .attr = { + .name = "update_fw", + .mode = S_IWUSR, + }, + .store = ipr_store_update_fw +}; + +static struct class_device_attribute *ipr_ioa_attrs[] = { + &ipr_fw_version_attr, + &ipr_log_level_attr, + &ipr_diagnostics_attr, + &ipr_ioa_reset_attr, + &ipr_update_fw_attr, + NULL, +}; + +#ifdef CONFIG_SCSI_IPR_DUMP +/** + * ipr_read_dump - Dump the adapter + * @kobj: kobject struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_read_dump(struct kobject *kobj, char *buf, + loff_t off, size_t count) +{ + struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + struct ipr_dump *dump; + unsigned long lock_flags = 0; + char *src; + int len; + size_t rc = count; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + dump = ioa_cfg->dump; + + if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump || !kobject_get(&dump->kobj)) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + if (off > dump->driver_dump.hdr.len) { + kobject_put(&dump->kobj); + return 0; + } + + if (off + count > dump->driver_dump.hdr.len) { + count = dump->driver_dump.hdr.len - off; + rc = count; + } + + if (count && off < sizeof(dump->driver_dump)) { + if (off + count > sizeof(dump->driver_dump)) + len = sizeof(dump->driver_dump) - off; + else + len = count; + src = (u8 *)&dump->driver_dump + off; + memcpy(buf, src, len); + buf += len; + off += len; + count -= len; + } + + off -= sizeof(dump->driver_dump); + + if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) { + if (off + count > offsetof(struct ipr_ioa_dump, ioa_data)) + len = offsetof(struct ipr_ioa_dump, ioa_data) - off; + else + len = count; + src = (u8 *)&dump->ioa_dump + off; + memcpy(buf, src, len); + buf += len; + off += len; + count -= len; + } + + off -= offsetof(struct ipr_ioa_dump, ioa_data); + + while (count) { + if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK)) + len = PAGE_ALIGN(off) - off; + else + len = count; + src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT]; + src += off & ~PAGE_MASK; + memcpy(buf, src, len); + buf += len; + off += len; + count -= len; + } + + kobject_put(&dump->kobj); + return rc; +} + +/** + * ipr_release_dump - Free adapter dump memory + * @kobj: kobject struct + * + * Return value: + * nothing + **/ +static void ipr_release_dump(struct kobject *kobj) +{ + struct ipr_dump *dump = container_of(kobj,struct ipr_dump,kobj); + struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; + unsigned long lock_flags = 0; + int i; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ioa_cfg->dump = NULL; + ioa_cfg->sdt_state = INACTIVE; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + for (i = 0; i < dump->ioa_dump.next_page_index; i++) + free_page((unsigned long) dump->ioa_dump.ioa_data[i]); + + kfree(dump); + LEAVE; +} + +static struct kobj_type ipr_dump_kobj_type = { + .release = ipr_release_dump, +}; + +/** + * ipr_alloc_dump - Prepare for adapter dump + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_dump *dump; + unsigned long lock_flags = 0; + + ENTER; + dump = kmalloc(sizeof(struct ipr_dump), GFP_KERNEL); + + if (!dump) { + ipr_err("Dump memory allocation failed\n"); + return -ENOMEM; + } + + memset(dump, 0, sizeof(struct ipr_dump)); + kobject_init(&dump->kobj); + dump->kobj.ktype = &ipr_dump_kobj_type; + dump->ioa_cfg = ioa_cfg; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (INACTIVE != ioa_cfg->sdt_state) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + kfree(dump); + return 0; + } + + ioa_cfg->dump = dump; + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) { + ioa_cfg->dump_taken = 1; + schedule_work(&ioa_cfg->work_q); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + LEAVE; + return 0; +} + +/** + * ipr_free_dump - Free adapter dump memory + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / other on failure + **/ +static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_dump *dump; + unsigned long lock_flags = 0; + + ENTER; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + dump = ioa_cfg->dump; + if (!dump) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; + } + + ioa_cfg->dump = NULL; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + kobject_put(&dump->kobj); + + LEAVE; + return 0; +} + +/** + * ipr_write_dump - Setup dump state of adapter + * @kobj: kobject struct + * @buf: buffer + * @off: offset + * @count: buffer size + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_write_dump(struct kobject *kobj, char *buf, + loff_t off, size_t count) +{ + struct class_device *cdev = container_of(kobj,struct class_device,kobj); + struct Scsi_Host *shost = class_to_shost(cdev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata; + int rc; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (buf[0] == '1') + rc = ipr_alloc_dump(ioa_cfg); + else if (buf[0] == '0') + rc = ipr_free_dump(ioa_cfg); + else + return -EINVAL; + + if (rc) + return rc; + else + return count; +} + +static struct bin_attribute ipr_dump_attr = { + .attr = { + .name = "dump", + .mode = S_IRUSR | S_IWUSR, + }, + .size = 0, + .read = ipr_read_dump, + .write = ipr_write_dump +}; +#else +static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; +#endif + +/** + * ipr_store_queue_depth - Change the device's queue depth + * @dev: device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_store_queue_depth(struct device *dev, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + int qdepth = simple_strtoul(buf, NULL, 10); + int tagged = 0; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) { + res->qdepth = qdepth; + + if (ipr_is_gscsi(res) && res->tcq_active) + tagged = MSG_ORDERED_TAG; + + len = strlen(buf); + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_adjust_queue_depth(sdev, tagged, qdepth); + return len; +} + +static struct device_attribute ipr_queue_depth_attr = { + .attr = { + .name = "queue_depth", + .mode = S_IRUSR | S_IWUSR, + }, + .store = ipr_store_queue_depth +}; + +/** + * ipr_show_tcq_enable - Show if the device is enabled for tcqing + * @dev: device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_tcq_enable(struct device *dev, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) + len = snprintf(buf, PAGE_SIZE, "%d\n", res->tcq_active); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +/** + * ipr_store_tcq_enable - Change the device's TCQing state + * @dev: device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_store_tcq_enable(struct device *dev, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + int tcq_active = simple_strtoul(buf, NULL, 10); + int qdepth = IPR_MAX_CMD_PER_LUN; + int tagged = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + res = (struct ipr_resource_entry *)sdev->hostdata; + + if (res) { + res->tcq_active = 0; + qdepth = res->qdepth; + + if (ipr_is_gscsi(res) && sdev->tagged_supported) { + if (tcq_active) { + tagged = MSG_ORDERED_TAG; + res->tcq_active = 1; + } + + len = strlen(buf); + } else if (tcq_active) { + len = -EINVAL; + } + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + scsi_adjust_queue_depth(sdev, tagged, qdepth); + return len; +} + +static struct device_attribute ipr_tcqing_attr = { + .attr = { + .name = "tcq_enable", + .mode = S_IRUSR | S_IWUSR, + }, + .store = ipr_store_tcq_enable, + .show = ipr_show_tcq_enable +}; + +/** + * ipr_show_adapter_handle - Show the adapter's resource handle for this device + * @dev: device struct + * @buf: buffer + * + * Return value: + * number of bytes printed to buffer + **/ +static ssize_t ipr_show_adapter_handle(struct device *dev, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + ssize_t len = -ENXIO; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *)sdev->hostdata; + if (res) + len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return len; +} + +static struct device_attribute ipr_adapter_handle_attr = { + .attr = { + .name = "adapter_handle", + .mode = S_IRUSR, + }, + .show = ipr_show_adapter_handle +}; + +static struct device_attribute *ipr_dev_attrs[] = { + &ipr_queue_depth_attr, + &ipr_tcqing_attr, + &ipr_adapter_handle_attr, + NULL, +}; + +/** + * ipr_biosparam - Return the HSC mapping + * @sdev: scsi device struct + * @block_device: block device pointer + * @capacity: capacity of the device + * @parm: Array containing returned HSC values. + * + * This function generates the HSC parms that fdisk uses. + * We want to make sure we return something that places partitions + * on 4k boundaries for best performance with the IOA. + * + * Return value: + * 0 on success + **/ +static int ipr_biosparam(struct scsi_device *sdev, + struct block_device *block_device, + sector_t capacity, int *parm) +{ + int heads, sectors, cylinders; + + heads = 128; + sectors = 32; + + cylinders = capacity; + sector_div(cylinders, (128 * 32)); + + /* return result */ + parm[0] = heads; + parm[1] = sectors; + parm[2] = cylinders; + + return 0; +} + +/** + * ipr_slave_destroy - Unconfigure a SCSI device + * @sdev: scsi device struct + * + * Return value: + * nothing + **/ +static void ipr_slave_destroy(struct scsi_device *sdev) +{ + struct ipr_resource_entry *res; + struct ipr_ioa_cfg *ioa_cfg; + unsigned long lock_flags = 0; + + ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = (struct ipr_resource_entry *) sdev->hostdata; + if (res) { + sdev->hostdata = NULL; + res->sdev = NULL; + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); +} + +/** + * ipr_slave_configure - Configure a SCSI device + * @sdev: scsi device struct + * + * This function configures the specified scsi device. + * + * Return value: + * 0 on success + **/ +static int ipr_slave_configure(struct scsi_device *sdev) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + res = sdev->hostdata; + if (res) { + if (ipr_is_af_dasd_device(res)) + sdev->type = TYPE_RAID; + if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) + sdev->scsi_level = 4; + if (ipr_is_vset_device(res)) + sdev->timeout = IPR_VSET_RW_TIMEOUT; + + sdev->allow_restart = 1; + scsi_adjust_queue_depth(sdev, 0, res->qdepth); + } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return 0; +} + +/** + * ipr_slave_alloc - Prepare for commands to a device. + * @sdev: scsi device struct + * + * This function saves a pointer to the resource entry + * in the scsi device struct if the device exists. We + * can then use this pointer in ipr_queuecommand when + * handling new commands. + * + * Return value: + * 0 on success + **/ +static int ipr_slave_alloc(struct scsi_device *sdev) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata; + struct ipr_resource_entry *res; + unsigned long lock_flags; + + sdev->hostdata = NULL; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if ((res->cfgte.res_addr.bus == sdev->channel) && + (res->cfgte.res_addr.target == sdev->id) && + (res->cfgte.res_addr.lun == sdev->lun)) { + res->sdev = sdev; + res->add_to_ml = 0; + sdev->hostdata = res; + res->needs_sync_complete = 1; + break; + } + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + return 0; +} + +/** + * ipr_eh_host_reset - Reset the host adapter + * @scsi_cmd: scsi command struct + * + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg; + int rc; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + + dev_err(&ioa_cfg->pdev->dev, + "Adapter being reset as a result of error recovery.\n"); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV); + + LEAVE; + return rc; +} + +/** + * ipr_eh_dev_reset - Reset the device + * @scsi_cmd: scsi command struct + * + * This function issues a device reset to the affected device. + * A LUN reset will be sent to the device first. If that does + * not work, a target reset will be sent. + * + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; + struct ipr_cmd_pkt *cmd_pkt; + u32 ioasc; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + res = scsi_cmd->device->hostdata; + + if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) + return FAILED; + + /* + * If we are currently going through reset/reload, return failed. This will force the + * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the + * reset to complete + */ + if (ioa_cfg->in_reset_reload) + return FAILED; + if (ioa_cfg->ioa_is_dead) + return FAILED; + + list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { + if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) { + if (ipr_cmd->scsi_cmd) + ipr_cmd->done = ipr_scsi_eh_done; + } + } + + res->resetting_device = 1; + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + + ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; + cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_RESET_DEVICE; + + ipr_sdev_err(scsi_cmd->device, "Resetting device\n"); + ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); + + ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + res->resetting_device = 0; + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + + LEAVE; + return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); +} + +/** + * ipr_bus_reset_done - Op done function for bus reset. + * @ipr_cmd: ipr command struct + * + * This function is the op done function for a bus reset + * + * Return value: + * none + **/ +static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_resource_entry *res; + + ENTER; + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle, + sizeof(res->cfgte.res_handle))) { + scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus); + break; + } + } + + /* + * If abort has not completed, indicate the reset has, else call the + * abort's done function to wake the sleeping eh thread + */ + if (ipr_cmd->u.sibling->u.sibling) + ipr_cmd->u.sibling->u.sibling = NULL; + else + ipr_cmd->u.sibling->done(ipr_cmd->u.sibling); + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + LEAVE; +} + +/** + * ipr_abort_timeout - An abort task has timed out + * @ipr_cmd: ipr command struct + * + * This function handles when an abort task times out. If this + * happens we issue a bus reset since we have resources tied + * up that must be freed before returning to the midlayer. + * + * Return value: + * none + **/ +static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_cmnd *reset_cmd; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_cmd_pkt *cmd_pkt; + unsigned long lock_flags = 0; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return; + } + + ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n"); + reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ipr_cmd->u.sibling = reset_cmd; + reset_cmd->u.sibling = ipr_cmd; + reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle; + cmd_pkt = &reset_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_RESET_DEVICE; + cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET; + + ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + LEAVE; +} + +/** + * ipr_cancel_op - Cancel specified op + * @scsi_cmd: scsi command struct + * + * This function cancels specified op. + * + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; + struct ipr_cmd_pkt *cmd_pkt; + u32 ioasc, ioarcb_addr; + int op_found = 0; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; + res = scsi_cmd->device->hostdata; + + if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res))) + return FAILED; + + list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) { + if (ipr_cmd->scsi_cmd == scsi_cmd) { + ipr_cmd->done = ipr_scsi_eh_done; + op_found = 1; + break; + } + } + + if (!op_found) + return SUCCESS; + + ioarcb_addr = be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr); + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle; + cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_ABORT_TASK; + cmd_pkt->cdb[2] = (ioarcb_addr >> 24) & 0xff; + cmd_pkt->cdb[3] = (ioarcb_addr >> 16) & 0xff; + cmd_pkt->cdb[4] = (ioarcb_addr >> 8) & 0xff; + cmd_pkt->cdb[5] = ioarcb_addr & 0xff; + ipr_cmd->u.sdev = scsi_cmd->device; + + ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]); + ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_ABORT_TASK_TIMEOUT); + ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + /* + * If the abort task timed out and we sent a bus reset, we will get + * one the following responses to the abort + */ + if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) { + ioasc = 0; + ipr_trace; + } + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + res->needs_sync_complete = 1; + + LEAVE; + return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); +} + +/** + * ipr_eh_abort - Abort a single op + * @scsi_cmd: scsi command struct + * + * Return value: + * SUCCESS / FAILED + **/ +static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg; + + ENTER; + ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; + + /* If we are currently going through reset/reload, return failed. This will force the + mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the + reset to complete */ + if (ioa_cfg->in_reset_reload) + return FAILED; + if (ioa_cfg->ioa_is_dead) + return FAILED; + if (!scsi_cmd->device->hostdata) + return FAILED; + + LEAVE; + return ipr_cancel_op(scsi_cmd); +} + +/** + * ipr_handle_other_interrupt - Handle "other" interrupts + * @ioa_cfg: ioa config struct + * @int_reg: interrupt register + * + * Return value: + * IRQ_NONE / IRQ_HANDLED + **/ +static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg, + volatile u32 int_reg) +{ + irqreturn_t rc = IRQ_HANDLED; + + if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { + /* Mask the interrupt */ + writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg); + + /* Clear the interrupt */ + writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + + list_del(&ioa_cfg->reset_cmd->queue); + del_timer(&ioa_cfg->reset_cmd->timer); + ipr_reset_ioa_job(ioa_cfg->reset_cmd); + } else { + if (int_reg & IPR_PCII_IOA_UNIT_CHECKED) + ioa_cfg->ioa_unit_checked = 1; + else + dev_err(&ioa_cfg->pdev->dev, + "Permanent IOA failure. 0x%08X\n", int_reg); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + ipr_mask_and_clear_interrupts(ioa_cfg, ~0); + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + } + + return rc; +} + +/** + * ipr_isr - Interrupt service routine + * @irq: irq number + * @devp: pointer to ioa config struct + * @regs: pt_regs struct + * + * Return value: + * IRQ_NONE / IRQ_HANDLED + **/ +static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs) +{ + struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; + unsigned long lock_flags = 0; + volatile u32 int_reg, int_mask_reg; + u32 ioasc; + u16 cmd_index; + struct ipr_cmnd *ipr_cmd; + irqreturn_t rc = IRQ_NONE; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + /* If interrupts are disabled, ignore the interrupt */ + if (!ioa_cfg->allow_interrupts) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return IRQ_NONE; + } + + int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + + /* If an interrupt on the adapter did not occur, ignore it */ + if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) { + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return IRQ_NONE; + } + + while (1) { + ipr_cmd = NULL; + + while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == + ioa_cfg->toggle_bit) { + + cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) & + IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT; + + if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) { + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n"); + + if (WAIT_FOR_DUMP == ioa_cfg->sdt_state) + ioa_cfg->sdt_state = GET_DUMP; + + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return IRQ_HANDLED; + } + + ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index]; + + ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc); + + list_del(&ipr_cmd->queue); + del_timer(&ipr_cmd->timer); + ipr_cmd->done(ipr_cmd); + + rc = IRQ_HANDLED; + + if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) { + ioa_cfg->hrrq_curr++; + } else { + ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start; + ioa_cfg->toggle_bit ^= 1u; + } + } + + if (ipr_cmd != NULL) { + /* Clear the PCI interrupt */ + writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg; + } else + break; + } + + if (unlikely(rc == IRQ_NONE)) + rc = ipr_handle_other_interrupt(ioa_cfg, int_reg); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + return rc; +} + +/** + * ipr_build_ioadl - Build a scatter/gather list and map the buffer + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * Return value: + * 0 on success / -1 on failure + **/ +static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + int i; + struct scatterlist *sglist; + u32 length; + u32 ioadl_flags = 0; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + + length = scsi_cmd->request_bufflen; + + if (length == 0) + return 0; + + if (scsi_cmd->use_sg) { + ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, + scsi_cmd->request_buffer, + scsi_cmd->use_sg, + scsi_cmd->sc_data_direction); + + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->write_data_transfer_length = cpu_to_be32(length); + ioarcb->write_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_READ; + ioarcb->read_data_transfer_length = cpu_to_be32(length); + ioarcb->read_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg); + } + + sglist = scsi_cmd->request_buffer; + + for (i = 0; i < ipr_cmd->dma_use_sg; i++) { + ioadl[i].flags_and_data_len = + cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); + ioadl[i].address = + cpu_to_be32(sg_dma_address(&sglist[i])); + } + + if (likely(ipr_cmd->dma_use_sg)) { + ioadl[i-1].flags_and_data_len |= + cpu_to_be32(IPR_IOADL_FLAGS_LAST); + return 0; + } else + dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); + } else { + if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_WRITE; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->write_data_transfer_length = cpu_to_be32(length); + ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) { + ioadl_flags = IPR_IOADL_FLAGS_READ; + ioarcb->read_data_transfer_length = cpu_to_be32(length); + ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + } + + ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev, + scsi_cmd->request_buffer, length, + scsi_cmd->sc_data_direction); + + if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) { + ipr_cmd->dma_use_sg = 1; + ioadl[0].flags_and_data_len = + cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST); + ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle); + return 0; + } else + dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n"); + } + + return -1; +} + +/** + * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes + * @scsi_cmd: scsi command struct + * + * Return value: + * task attributes + **/ +static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd) +{ + u8 tag[2]; + u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK; + + if (scsi_populate_tag_msg(scsi_cmd, tag)) { + switch (tag[0]) { + case MSG_SIMPLE_TAG: + rc = IPR_FLAGS_LO_SIMPLE_TASK; + break; + case MSG_HEAD_TAG: + rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK; + break; + case MSG_ORDERED_TAG: + rc = IPR_FLAGS_LO_ORDERED_TASK; + break; + }; + } + + return rc; +} + +/** + * ipr_erp_done - Process completion of ERP for a device + * @ipr_cmd: ipr command struct + * + * This function copies the sense buffer into the scsi_cmd + * struct and pushes the scsi_done function. + * + * Return value: + * nothing + **/ +static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_resource_entry *res = scsi_cmd->device->hostdata; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { + scsi_cmd->result |= (DID_ERROR << 16); + ipr_sdev_err(scsi_cmd->device, + "Request Sense failed with IOASC: 0x%08X\n", ioasc); + } else { + memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE); + } + + if (res) + res->needs_sync_complete = 1; + ipr_unmap_sglist(ioa_cfg, ipr_cmd); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + scsi_cmd->scsi_done(scsi_cmd); +} + +/** + * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP + * @ipr_cmd: ipr command struct + * + * Return value: + * none + **/ +static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioarcb *ioarcb; + struct ipr_ioasa *ioasa; + + ioarcb = &ipr_cmd->ioarcb; + ioasa = &ipr_cmd->ioasa; + + memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt)); + ioarcb->write_data_transfer_length = 0; + ioarcb->read_data_transfer_length = 0; + ioarcb->write_ioadl_len = 0; + ioarcb->read_ioadl_len = 0; + ioasa->ioasc = 0; + ioasa->residual_data_len = 0; +} + +/** + * ipr_erp_request_sense - Send request sense to a device + * @ipr_cmd: ipr command struct + * + * This function sends a request sense to a device as a result + * of a check condition. + * + * Return value: + * nothing + **/ +static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + + ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); + + cmd_pkt->request_type = IPR_RQTYPE_SCSICDB; + cmd_pkt->cdb[0] = REQUEST_SENSE; + cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE; + cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE; + cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; + cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ); + + ipr_cmd->ioadl[0].flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE); + ipr_cmd->ioadl[0].address = + cpu_to_be32(ipr_cmd->sense_buffer_dma); + + ipr_cmd->ioarcb.read_ioadl_len = + cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ipr_cmd->ioarcb.read_data_transfer_length = + cpu_to_be32(SCSI_SENSE_BUFFERSIZE); + + ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout, + IPR_REQUEST_SENSE_TIMEOUT * 2); +} + +/** + * ipr_erp_cancel_all - Send cancel all to a device + * @ipr_cmd: ipr command struct + * + * This function sends a cancel all to a device to clear the + * queue. If we are running TCQ on the device, QERR is set to 1, + * which means all outstanding ops have been dropped on the floor. + * Cancel all will return them to us. + * + * Return value: + * nothing + **/ +static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_resource_entry *res = scsi_cmd->device->hostdata; + struct ipr_cmd_pkt *cmd_pkt; + + res->in_erp = 1; + + ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); + + cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; + cmd_pkt->request_type = IPR_RQTYPE_IOACMD; + cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS; + + ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout, + IPR_CANCEL_ALL_TIMEOUT); +} + +/** + * ipr_dump_ioasa - Dump contents of IOASA + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler when ops + * fail. It will log the IOASA if appropriate. Only called + * for GPDD ops. + * + * Return value: + * none + **/ +static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + int i; + u16 data_len; + u32 ioasc; + struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + u32 *ioasa_data = (u32 *)ioasa; + int error_index; + + ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK; + + if (0 == ioasc) + return; + + if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL) + return; + + error_index = ipr_get_error(ioasc); + + if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) { + /* Don't log an error if the IOA already logged one */ + if (ioasa->ilid != 0) + return; + + if (ipr_error_table[error_index].log_ioasa == 0) + return; + } + + ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n", + ipr_error_table[error_index].error); + + if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) && + (ioasa->u.gpdd.bus_phase <= ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) { + ipr_sdev_err(ipr_cmd->scsi_cmd->device, + "Device End state: %s Phase: %s\n", + ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state], + ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]); + } + + if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len)) + data_len = sizeof(struct ipr_ioasa); + else + data_len = be16_to_cpu(ioasa->ret_stat_len); + + ipr_err("IOASA Dump:\n"); + + for (i = 0; i < data_len / 4; i += 4) { + ipr_err("%08X: %08X %08X %08X %08X\n", i*4, + be32_to_cpu(ioasa_data[i]), + be32_to_cpu(ioasa_data[i+1]), + be32_to_cpu(ioasa_data[i+2]), + be32_to_cpu(ioasa_data[i+3])); + } +} + +/** + * ipr_gen_sense - Generate SCSI sense data from an IOASA + * @ioasa: IOASA + * @sense_buf: sense data buffer + * + * Return value: + * none + **/ +static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd) +{ + u32 failing_lba; + u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer; + struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata; + struct ipr_ioasa *ioasa = &ipr_cmd->ioasa; + u32 ioasc = be32_to_cpu(ioasa->ioasc); + + memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE); + + if (ioasc >= IPR_FIRST_DRIVER_IOASC) + return; + + ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION; + + if (ipr_is_vset_device(res) && + ioasc == IPR_IOASC_MED_DO_NOT_REALLOC && + ioasa->u.vset.failing_lba_hi != 0) { + sense_buf[0] = 0x72; + sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc); + sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc); + sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc); + + sense_buf[7] = 12; + sense_buf[8] = 0; + sense_buf[9] = 0x0A; + sense_buf[10] = 0x80; + + failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi); + + sense_buf[12] = (failing_lba & 0xff000000) >> 24; + sense_buf[13] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[14] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[15] = failing_lba & 0x000000ff; + + failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); + + sense_buf[16] = (failing_lba & 0xff000000) >> 24; + sense_buf[17] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[18] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[19] = failing_lba & 0x000000ff; + } else { + sense_buf[0] = 0x70; + sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc); + sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc); + sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc); + + /* Illegal request */ + if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) && + (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) { + sense_buf[7] = 10; /* additional length */ + + /* IOARCB was in error */ + if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24) + sense_buf[15] = 0xC0; + else /* Parameter data was invalid */ + sense_buf[15] = 0x80; + + sense_buf[16] = + ((IPR_FIELD_POINTER_MASK & + be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff; + sense_buf[17] = + (IPR_FIELD_POINTER_MASK & + be32_to_cpu(ioasa->ioasc_specific)) & 0xff; + } else { + if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) { + if (ipr_is_vset_device(res)) + failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo); + else + failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba); + + sense_buf[0] |= 0x80; /* Or in the Valid bit */ + sense_buf[3] = (failing_lba & 0xff000000) >> 24; + sense_buf[4] = (failing_lba & 0x00ff0000) >> 16; + sense_buf[5] = (failing_lba & 0x0000ff00) >> 8; + sense_buf[6] = failing_lba & 0x000000ff; + } + + sense_buf[7] = 6; /* additional length */ + } + } +} + +/** + * ipr_erp_start - Process an error response for a SCSI op + * @ioa_cfg: ioa config struct + * @ipr_cmd: ipr command struct + * + * This function determines whether or not to initiate ERP + * on the affected device. + * + * Return value: + * nothing + **/ +static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_cmnd *ipr_cmd) +{ + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + struct ipr_resource_entry *res = scsi_cmd->device->hostdata; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + if (!res) { + ipr_scsi_eh_done(ipr_cmd); + return; + } + + if (ipr_is_gscsi(res)) + ipr_dump_ioasa(ioa_cfg, ipr_cmd); + else + ipr_gen_sense(ipr_cmd); + + switch (ioasc & IPR_IOASC_IOASC_MASK) { + case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST: + scsi_cmd->result |= (DID_ERROR << 16); + break; + case IPR_IOASC_IR_RESOURCE_HANDLE: + scsi_cmd->result |= (DID_NO_CONNECT << 16); + break; + case IPR_IOASC_HW_SEL_TIMEOUT: + scsi_cmd->result |= (DID_NO_CONNECT << 16); + res->needs_sync_complete = 1; + break; + case IPR_IOASC_SYNC_REQUIRED: + if (!res->in_erp) + res->needs_sync_complete = 1; + scsi_cmd->result |= (DID_IMM_RETRY << 16); + break; + case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ + scsi_cmd->result |= (DID_PASSTHROUGH << 16); + break; + case IPR_IOASC_BUS_WAS_RESET: + case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: + /* + * Report the bus reset and ask for a retry. The device + * will give CC/UA the next command. + */ + if (!res->resetting_device) + scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel); + scsi_cmd->result |= (DID_ERROR << 16); + res->needs_sync_complete = 1; + break; + case IPR_IOASC_HW_DEV_BUS_STATUS: + scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc); + if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) { + ipr_erp_cancel_all(ipr_cmd); + return; + } + break; + case IPR_IOASC_NR_INIT_CMD_REQUIRED: + break; + default: + scsi_cmd->result |= (DID_ERROR << 16); + if (!ipr_is_vset_device(res)) + res->needs_sync_complete = 1; + break; + } + + ipr_unmap_sglist(ioa_cfg, ipr_cmd); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + scsi_cmd->scsi_done(scsi_cmd); +} + +/** + * ipr_scsi_done - mid-layer done function + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler for + * ops generated by the SCSI mid-layer + * + * Return value: + * none + **/ +static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; + u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len); + + if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { + ipr_unmap_sglist(ioa_cfg, ipr_cmd); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + scsi_cmd->scsi_done(scsi_cmd); + } else + ipr_erp_start(ioa_cfg, ipr_cmd); +} + +/** + * ipr_save_ioafp_mode_select - Save adapters mode select data + * @ioa_cfg: ioa config struct + * @scsi_cmd: scsi command struct + * + * This function saves mode select data for the adapter to + * use following an adapter reset. + * + * Return value: + * 0 on success / SCSI_MLQUEUE_HOST_BUSY on failure + **/ +static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg, + struct scsi_cmnd *scsi_cmd) +{ + if (!ioa_cfg->saved_mode_pages) { + ioa_cfg->saved_mode_pages = kmalloc(sizeof(struct ipr_mode_pages), + GFP_ATOMIC); + if (!ioa_cfg->saved_mode_pages) { + dev_err(&ioa_cfg->pdev->dev, + "IOA mode select buffer allocation failed\n"); + return SCSI_MLQUEUE_HOST_BUSY; + } + } + + memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]); + ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4]; + return 0; +} + +/** + * ipr_queuecommand - Queue a mid-layer request + * @scsi_cmd: scsi command struct + * @done: done function + * + * This function queues a request generated by the mid-layer. + * + * Return value: + * 0 on success + * SCSI_MLQUEUE_DEVICE_BUSY if device is busy + * SCSI_MLQUEUE_HOST_BUSY if host is busy + **/ +static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, + void (*done) (struct scsi_cmnd *)) +{ + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; + struct ipr_ioarcb *ioarcb; + struct ipr_cmnd *ipr_cmd; + int rc = 0; + + scsi_cmd->scsi_done = done; + ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata; + res = scsi_cmd->device->hostdata; + scsi_cmd->result = (DID_OK << 16); + + /* + * We are currently blocking all devices due to a host reset + * We have told the host to stop giving us new requests, but + * ERP ops don't count. FIXME + */ + if (unlikely(!ioa_cfg->allow_cmds)) + return SCSI_MLQUEUE_HOST_BUSY; + + /* + * FIXME - Create scsi_set_host_offline interface + * and the ioa_is_dead check can be removed + */ + if (unlikely(ioa_cfg->ioa_is_dead || !res)) { + memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + scsi_cmd->result = (DID_NO_CONNECT << 16); + scsi_cmd->scsi_done(scsi_cmd); + return 0; + } + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ioarcb = &ipr_cmd->ioarcb; + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + + memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len); + ipr_cmd->scsi_cmd = scsi_cmd; + ioarcb->res_handle = res->cfgte.res_handle; + ipr_cmd->done = ipr_scsi_done; + ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr)); + + if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) { + if (scsi_cmd->underflow == 0) + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK; + + if (res->needs_sync_complete) { + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE; + res->needs_sync_complete = 0; + } + + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; + ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR; + ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd); + } + + if (!ipr_is_gscsi(res) && scsi_cmd->cmnd[0] >= 0xC0) + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + + if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT) + rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd); + + if (likely(rc == 0)) + rc = ipr_build_ioadl(ioa_cfg, ipr_cmd); + + if (likely(rc == 0)) { + mb(); + writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr), + ioa_cfg->regs.ioarrin_reg); + } else { + list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + return SCSI_MLQUEUE_HOST_BUSY; + } + + return 0; +} + +/** + * ipr_info - Get information about the card/driver + * @scsi_host: scsi host struct + * + * Return value: + * pointer to buffer with description string + **/ +static const char * ipr_ioa_info(struct Scsi_Host *host) +{ + static char buffer[512]; + struct ipr_ioa_cfg *ioa_cfg; + unsigned long lock_flags = 0; + + ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata; + + spin_lock_irqsave(host->host_lock, lock_flags); + sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type); + spin_unlock_irqrestore(host->host_lock, lock_flags); + + return buffer; +} + +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "IPR", + .info = ipr_ioa_info, + .queuecommand = ipr_queuecommand, + .eh_abort_handler = ipr_eh_abort, + .eh_device_reset_handler = ipr_eh_dev_reset, + .eh_host_reset_handler = ipr_eh_host_reset, + .slave_alloc = ipr_slave_alloc, + .slave_configure = ipr_slave_configure, + .slave_destroy = ipr_slave_destroy, + .bios_param = ipr_biosparam, + .can_queue = IPR_MAX_COMMANDS, + .this_id = -1, + .sg_tablesize = IPR_MAX_SGLIST, + .max_sectors = IPR_MAX_SECTORS, + .cmd_per_lun = IPR_MAX_CMD_PER_LUN, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = ipr_ioa_attrs, + .sdev_attrs = ipr_dev_attrs, + .proc_name = IPR_NAME +}; + +#ifdef CONFIG_PPC_PSERIES +static const u16 ipr_blocked_processors[] = { + PV_NORTHSTAR, + PV_PULSAR, + PV_POWER4, + PV_ICESTAR, + PV_SSTAR, + PV_POWER4p, + PV_630, + PV_630p +}; + +/** + * ipr_invalid_adapter - Determine if this adapter is supported on this hardware + * @ioa_cfg: ioa cfg struct + * + * Adapters that use Gemstone revision < 3.1 do not work reliably on + * certain pSeries hardware. This function determines if the given + * adapter is in one of these confgurations or not. + * + * Return value: + * 1 if adapter is not supported / 0 if adapter is supported + **/ +static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) +{ + u8 rev_id; + int i; + + if (ioa_cfg->type == 0x5702) { + if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID, + &rev_id) == PCIBIOS_SUCCESSFUL) { + if (rev_id < 4) { + for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){ + if (__is_processor(ipr_blocked_processors[i])) + return 1; + } + } + } + } + return 0; +} +#else +#define ipr_invalid_adapter(ioa_cfg) 0 +#endif + +/** + * ipr_ioa_bringdown_done - IOA bring down completion. + * @ipr_cmd: ipr command struct + * + * This function processes the completion of an adapter bring down. + * It wakes any reset sleepers. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ioa_cfg->in_reset_reload = 0; + ioa_cfg->reset_retries = 0; + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + wake_up_all(&ioa_cfg->reset_wait_q); + + spin_unlock_irq(ioa_cfg->host->host_lock); + scsi_unblock_requests(ioa_cfg->host); + spin_lock_irq(ioa_cfg->host->host_lock); + LEAVE; + + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioa_reset_done - IOA reset completion. + * @ipr_cmd: ipr command struct + * + * This function processes the completion of an adapter reset. + * It schedules any necessary mid-layer add/removes and + * wakes any reset sleepers. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_resource_entry *res; + struct ipr_hostrcb *hostrcb, *temp; + int i = 0; + + ENTER; + ioa_cfg->in_reset_reload = 0; + ioa_cfg->allow_cmds = 1; + ioa_cfg->reset_cmd = NULL; + + list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { + if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) { + ipr_trace; + schedule_work(&ioa_cfg->work_q); + break; + } + } + + list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) { + list_del(&hostrcb->queue); + if (i++ < IPR_NUM_LOG_HCAMS) + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); + else + ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); + } + + dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n"); + + ioa_cfg->reset_retries = 0; + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + wake_up_all(&ioa_cfg->reset_wait_q); + + spin_unlock_irq(ioa_cfg->host->host_lock); + scsi_unblock_requests(ioa_cfg->host); + spin_lock_irq(ioa_cfg->host->host_lock); + + if (!ioa_cfg->allow_cmds) + scsi_block_requests(ioa_cfg->host); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer + * @supported_dev: supported device struct + * @vpids: vendor product id struct + * + * Return value: + * none + **/ +static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev, + struct ipr_std_inq_vpids *vpids) +{ + memset(supported_dev, 0, sizeof(struct ipr_supported_device)); + memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids)); + supported_dev->num_records = 1; + supported_dev->data_length = + cpu_to_be16(sizeof(struct ipr_supported_device)); + supported_dev->reserved = 0; +} + +/** + * ipr_set_supported_devs - Send Set Supported Devices for a device + * @ipr_cmd: ipr command struct + * + * This function send a Set Supported Devices to the adapter + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_resource_entry *res = ipr_cmd->u.res; + + ipr_cmd->job_step = ipr_ioa_reset_done; + + list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) { + if (!ipr_is_af_dasd_device(res)) + continue; + + ipr_cmd->u.res = res; + ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids); + + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + + ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES; + ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff; + + ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | + sizeof(struct ipr_supported_device)); + ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma + + offsetof(struct ipr_misc_cbs, supp_dev)); + ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ioarcb->write_data_transfer_length = + cpu_to_be32(sizeof(struct ipr_supported_device)); + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, + IPR_SET_SUP_DEVICE_TIMEOUT); + + ipr_cmd->job_step = ipr_set_supported_devs; + return IPR_RC_JOB_RETURN; + } + + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_get_mode_page - Locate specified mode page + * @mode_pages: mode page buffer + * @page_code: page code to find + * @len: minimum required length for mode page + * + * Return value: + * pointer to mode page / NULL on failure + **/ +static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages, + u32 page_code, u32 len) +{ + struct ipr_mode_page_hdr *mode_hdr; + u32 page_length; + u32 length; + + if (!mode_pages || (mode_pages->hdr.length == 0)) + return NULL; + + length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len; + mode_hdr = (struct ipr_mode_page_hdr *) + (mode_pages->data + mode_pages->hdr.block_desc_len); + + while (length) { + if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) { + if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr))) + return mode_hdr; + break; + } else { + page_length = (sizeof(struct ipr_mode_page_hdr) + + mode_hdr->page_length); + length -= page_length; + mode_hdr = (struct ipr_mode_page_hdr *) + ((unsigned long)mode_hdr + page_length); + } + } + return NULL; +} + +/** + * ipr_check_term_power - Check for term power errors + * @ioa_cfg: ioa config struct + * @mode_pages: IOAFP mode pages buffer + * + * Check the IOAFP's mode page 28 for term power errors + * + * Return value: + * nothing + **/ +static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_mode_pages *mode_pages) +{ + int i; + int entry_length; + struct ipr_dev_bus_entry *bus; + struct ipr_mode_page28 *mode_page; + + mode_page = ipr_get_mode_page(mode_pages, 0x28, + sizeof(struct ipr_mode_page28)); + + entry_length = mode_page->entry_length; + + bus = mode_page->bus; + + for (i = 0; i < mode_page->num_entries; i++) { + if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) { + dev_err(&ioa_cfg->pdev->dev, + "Term power is absent on scsi bus %d\n", + bus->res_addr.bus); + } + + bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length); + } +} + +/** + * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table + * @ioa_cfg: ioa config struct + * + * Looks through the config table checking for SES devices. If + * the SES device is in the SES table indicating a maximum SCSI + * bus speed, the speed is limited for the bus. + * + * Return value: + * none + **/ +static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) +{ + u32 max_xfer_rate; + int i; + + for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { + max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i, + ioa_cfg->bus_attr[i].bus_width); + + if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate) + ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate; + } +} + +/** + * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28 + * @ioa_cfg: ioa config struct + * @mode_pages: mode page 28 buffer + * + * Updates mode page 28 based on driver configuration + * + * Return value: + * none + **/ +static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, + struct ipr_mode_pages *mode_pages) +{ + int i, entry_length; + struct ipr_dev_bus_entry *bus; + struct ipr_bus_attributes *bus_attr; + struct ipr_mode_page28 *mode_page; + + mode_page = ipr_get_mode_page(mode_pages, 0x28, + sizeof(struct ipr_mode_page28)); + + entry_length = mode_page->entry_length; + + /* Loop for each device bus entry */ + for (i = 0, bus = mode_page->bus; + i < mode_page->num_entries; + i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) { + if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) { + dev_err(&ioa_cfg->pdev->dev, + "Invalid resource address reported: 0x%08X\n", + IPR_GET_PHYS_LOC(bus->res_addr)); + continue; + } + + bus_attr = &ioa_cfg->bus_attr[i]; + bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY; + bus->bus_width = bus_attr->bus_width; + bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate); + bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK; + if (bus_attr->qas_enabled) + bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS; + else + bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS; + } +} + +/** + * ipr_build_mode_select - Build a mode select command + * @ipr_cmd: ipr command struct + * @res_handle: resource handle to send command to + * @parm: Byte 2 of Mode Sense command + * @dma_addr: DMA buffer address + * @xfer_len: data transfer length + * + * Return value: + * none + **/ +static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd, + u32 res_handle, u8 parm, u32 dma_addr, + u8 xfer_len) +{ + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + + ioarcb->res_handle = res_handle; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ; + ioarcb->cmd_pkt.cdb[0] = MODE_SELECT; + ioarcb->cmd_pkt.cdb[1] = parm; + ioarcb->cmd_pkt.cdb[4] = xfer_len; + + ioadl->flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len); + ioadl->address = cpu_to_be32(dma_addr); + ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len); +} + +/** + * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA + * @ipr_cmd: ipr command struct + * + * This function sets up the SCSI bus attributes and sends + * a Mode Select for Page 28 to activate them. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages; + int length; + + ENTER; + if (ioa_cfg->saved_mode_pages) { + memcpy(mode_pages, ioa_cfg->saved_mode_pages, + ioa_cfg->saved_mode_page_len); + length = ioa_cfg->saved_mode_page_len; + } else { + ipr_scsi_bus_speed_limit(ioa_cfg); + ipr_check_term_power(ioa_cfg, mode_pages); + ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages); + length = mode_pages->hdr.length + 1; + mode_pages->hdr.length = 0; + } + + ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages), + length); + + ipr_cmd->job_step = ipr_set_supported_devs; + ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next, + struct ipr_resource_entry, queue); + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_build_mode_sense - Builds a mode sense command + * @ipr_cmd: ipr command struct + * @res: resource entry struct + * @parm: Byte 2 of mode sense command + * @dma_addr: DMA address of mode sense buffer + * @xfer_len: Size of DMA buffer + * + * Return value: + * none + **/ +static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd, + u32 res_handle, + u8 parm, u32 dma_addr, u8 xfer_len) +{ + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + + ioarcb->res_handle = res_handle; + ioarcb->cmd_pkt.cdb[0] = MODE_SENSE; + ioarcb->cmd_pkt.cdb[2] = parm; + ioarcb->cmd_pkt.cdb[4] = xfer_len; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + + ioadl->flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len); + ioadl->address = cpu_to_be32(dma_addr); + ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); +} + +/** + * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA + * @ipr_cmd: ipr command struct + * + * This function send a Page 28 mode sense to the IOA to + * retrieve SCSI bus attributes. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), + 0x28, ioa_cfg->vpd_cbs_dma + + offsetof(struct ipr_misc_cbs, mode_pages), + sizeof(struct ipr_mode_pages)); + + ipr_cmd->job_step = ipr_ioafp_mode_select_page28; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_init_res_table - Initialize the resource table + * @ipr_cmd: ipr command struct + * + * This function looks through the existing resource table, comparing + * it with the config table. This function will take care of old/new + * devices and schedule adding/removing them from the mid-layer + * as appropriate. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_resource_entry *res, *temp; + struct ipr_config_table_entry *cfgte; + int found, i; + LIST_HEAD(old_res); + + ENTER; + if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ) + dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n"); + + list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue) + list_move_tail(&res->queue, &old_res); + + for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) { + cfgte = &ioa_cfg->cfg_table->dev[i]; + found = 0; + + list_for_each_entry_safe(res, temp, &old_res, queue) { + if (!memcmp(&res->cfgte.res_addr, + &cfgte->res_addr, sizeof(cfgte->res_addr))) { + list_move_tail(&res->queue, &ioa_cfg->used_res_q); + found = 1; + break; + } + } + + if (!found) { + if (list_empty(&ioa_cfg->free_res_q)) { + dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n"); + break; + } + + found = 1; + res = list_entry(ioa_cfg->free_res_q.next, + struct ipr_resource_entry, queue); + list_move_tail(&res->queue, &ioa_cfg->used_res_q); + ipr_init_res_entry(res); + res->add_to_ml = 1; + } + + if (found) + memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry)); + } + + list_for_each_entry_safe(res, temp, &old_res, queue) { + if (res->sdev) { + res->del_from_ml = 1; + list_move_tail(&res->queue, &ioa_cfg->used_res_q); + } else { + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + } + } + + ipr_cmd->job_step = ipr_ioafp_mode_sense_page28; + + LEAVE; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Query IOA Configuration command + * to the adapter to retrieve the IOA configuration table. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data; + + ENTER; + dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n", + ucode_vpd->major_release, ucode_vpd->card_type, + ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]); + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + + ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG; + ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff; + + ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ioarcb->read_data_transfer_length = + cpu_to_be32(sizeof(struct ipr_config_table)); + + ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma); + ioadl->flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table)); + + ipr_cmd->job_step = ipr_init_res_table; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_inquiry - Send an Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This utility function sends an inquiry to the adapter. + * + * Return value: + * none + **/ +static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page, + u32 dma_addr, u8 xfer_len) +{ + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; + + ENTER; + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + + ioarcb->cmd_pkt.cdb[0] = INQUIRY; + ioarcb->cmd_pkt.cdb[1] = flags; + ioarcb->cmd_pkt.cdb[2] = page; + ioarcb->cmd_pkt.cdb[4] = xfer_len; + + ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc)); + ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len); + + ioadl->address = cpu_to_be32(dma_addr); + ioadl->flags_and_data_len = + cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len); + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + LEAVE; +} + +/** + * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a Page 3 inquiry to the adapter + * to retrieve software VPD information. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + char type[5]; + + ENTER; + + /* Grab the type out of the VPD and store it away */ + memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4); + type[4] = '\0'; + ioa_cfg->type = simple_strtoul((char *)type, NULL, 16); + + ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg; + + ipr_ioafp_inquiry(ipr_cmd, 1, 3, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data), + sizeof(struct ipr_inquiry_page3)); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter. + * @ipr_cmd: ipr command struct + * + * This function sends a standard inquiry to the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + ENTER; + ipr_cmd->job_step = ipr_ioafp_page3_inquiry; + + ipr_ioafp_inquiry(ipr_cmd, 0, 0, + ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd), + sizeof(struct ipr_ioa_vpd)); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ. + * @ipr_cmd: ipr command struct + * + * This function send an Identify Host Request Response Queue + * command to establish the HRRQ with the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; + + ENTER; + dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); + + ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q; + ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + + ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ioarcb->cmd_pkt.cdb[2] = + ((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff; + ioarcb->cmd_pkt.cdb[3] = + ((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff; + ioarcb->cmd_pkt.cdb[4] = + ((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[5] = + ((u32) ioa_cfg->host_rrq_dma) & 0xff; + ioarcb->cmd_pkt.cdb[7] = + ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff; + ioarcb->cmd_pkt.cdb[8] = + (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff; + + ipr_cmd->job_step = ipr_ioafp_std_inquiry; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_timer_done - Adapter reset timer function + * @ipr_cmd: ipr command struct + * + * Description: This function is used in adapter reset processing + * for timing events. If the reset_cmd pointer in the IOA + * config struct is not this adapter's we are doing nested + * resets and fail_all_ops will take care of freeing the + * command block. + * + * Return value: + * none + **/ +static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + + if (ioa_cfg->reset_cmd == ipr_cmd) { + list_del(&ipr_cmd->queue); + ipr_cmd->done(ipr_cmd); + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); +} + +/** + * ipr_reset_start_timer - Start a timer for adapter reset job + * @ipr_cmd: ipr command struct + * @timeout: timeout value + * + * Description: This function is used in adapter reset processing + * for timing events. If the reset_cmd pointer in the IOA + * config struct is not this adapter's we are doing nested + * resets and fail_all_ops will take care of freeing the + * command block. + * + * Return value: + * none + **/ +static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, + unsigned long timeout) +{ + list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q); + ipr_cmd->done = ipr_reset_ioa_job; + + ipr_cmd->timer.data = (unsigned long) ipr_cmd; + ipr_cmd->timer.expires = jiffies + timeout; + ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done; + add_timer(&ipr_cmd->timer); +} + +/** + * ipr_init_ioa_mem - Initialize ioa_cfg control block + * @ioa_cfg: ioa cfg struct + * + * Return value: + * nothing + **/ +static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg) +{ + memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS); + + /* Initialize Host RRQ pointers */ + ioa_cfg->hrrq_start = ioa_cfg->host_rrq; + ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1]; + ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start; + ioa_cfg->toggle_bit = 1; + + /* Zero out config table */ + memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table)); +} + +/** + * ipr_reset_enable_ioa - Enable the IOA following a reset. + * @ipr_cmd: ipr command struct + * + * This function reinitializes some control blocks and + * enables destructive diagnostics on the adapter. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + volatile u32 int_reg; + + ENTER; + ipr_cmd->job_step = ipr_ioafp_indentify_hrrq; + ipr_init_ioa_mem(ioa_cfg); + + ioa_cfg->allow_interrupts = 1; + int_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + + if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) { + writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED), + ioa_cfg->regs.clr_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + return IPR_RC_JOB_CONTINUE; + } + + /* Enable destructive diagnostics on IOA */ + writel(IPR_DOORBELL, ioa_cfg->regs.set_uproc_interrupt_reg); + + writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg); + int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); + + dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n"); + + ipr_cmd->timer.data = (unsigned long) ipr_cmd; + ipr_cmd->timer.expires = jiffies + IPR_OPERATIONAL_TIMEOUT; + ipr_cmd->timer.function = (void (*)(unsigned long))ipr_timeout; + add_timer(&ipr_cmd->timer); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_wait_for_dump - Wait for a dump to timeout. + * @ipr_cmd: ipr command struct + * + * This function is invoked when an adapter dump has run out + * of processing time. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + if (ioa_cfg->sdt_state == GET_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + + ipr_cmd->job_step = ipr_reset_alert; + + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_unit_check_no_data - Log a unit check/no data error log + * @ioa_cfg: ioa config struct + * + * Logs an error indicating the adapter unit checked, but for some + * reason, we were unable to fetch the unit check buffer. + * + * Return value: + * nothing + **/ +static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg) +{ + ioa_cfg->errors_logged++; + dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n"); +} + +/** + * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA + * @ioa_cfg: ioa config struct + * + * Fetches the unit check buffer from the adapter by clocking the data + * through the mailbox register. + * + * Return value: + * nothing + **/ +static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg) +{ + unsigned long mailbox; + struct ipr_hostrcb *hostrcb; + struct ipr_uc_sdt sdt; + int rc, length; + + mailbox = readl(ioa_cfg->ioa_mailbox); + + if (!ipr_sdt_is_fmt2(mailbox)) { + ipr_unit_check_no_data(ioa_cfg); + return; + } + + memset(&sdt, 0, sizeof(struct ipr_uc_sdt)); + rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (u32 *) &sdt, + (sizeof(struct ipr_uc_sdt)) / sizeof(u32)); + + if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) || + !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) { + ipr_unit_check_no_data(ioa_cfg); + return; + } + + /* Find length of the first sdt entry (UC buffer) */ + length = (be32_to_cpu(sdt.entry[0].end_offset) - + be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK; + + hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, + struct ipr_hostrcb, queue); + list_del(&hostrcb->queue); + memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); + + rc = ipr_get_ldump_data_section(ioa_cfg, + be32_to_cpu(sdt.entry[0].bar_str_offset), + (u32 *)&hostrcb->hcam, + min(length, (int)sizeof(hostrcb->hcam)) / sizeof(u32)); + + if (!rc) + ipr_handle_log_data(ioa_cfg, hostrcb); + else + ipr_unit_check_no_data(ioa_cfg); + + list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q); +} + +/** + * ipr_reset_restore_cfg_space - Restore PCI config space. + * @ipr_cmd: ipr command struct + * + * Description: This function restores the saved PCI config space of + * the adapter, fails all outstanding ops back to the callers, and + * fetches the dump/unit check if applicable to this reset. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc; + + ENTER; + rc = pci_restore_state(ioa_cfg->pdev, ioa_cfg->pci_cfg_buf); + + if (rc != PCIBIOS_SUCCESSFUL) { + ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + return IPR_RC_JOB_CONTINUE; + } + + if (ipr_set_pcix_cmd_reg(ioa_cfg)) { + ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + return IPR_RC_JOB_CONTINUE; + } + + ipr_fail_all_ops(ioa_cfg); + + if (ioa_cfg->ioa_unit_checked) { + ioa_cfg->ioa_unit_checked = 0; + ipr_get_unit_check_buffer(ioa_cfg); + ipr_cmd->job_step = ipr_reset_alert; + ipr_reset_start_timer(ipr_cmd, 0); + return IPR_RC_JOB_RETURN; + } + + if (ioa_cfg->in_ioa_bringdown) { + ipr_cmd->job_step = ipr_ioa_bringdown_done; + } else { + ipr_cmd->job_step = ipr_reset_enable_ioa; + + if (GET_DUMP == ioa_cfg->sdt_state) { + ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT); + ipr_cmd->job_step = ipr_reset_wait_for_dump; + schedule_work(&ioa_cfg->work_q); + return IPR_RC_JOB_RETURN; + } + } + + ENTER; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_start_bist - Run BIST on the adapter. + * @ipr_cmd: ipr command struct + * + * Description: This function runs BIST on the adapter, then delays 2 seconds. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc; + + ENTER; + rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); + + if (rc != PCIBIOS_SUCCESSFUL) { + ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR); + rc = IPR_RC_JOB_CONTINUE; + } else { + ipr_cmd->job_step = ipr_reset_restore_cfg_space; + ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); + rc = IPR_RC_JOB_RETURN; + } + + LEAVE; + return rc; +} + +/** + * ipr_reset_allowed - Query whether or not IOA can be reset + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 if reset not allowed / non-zero if reset is allowed + **/ +static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg) +{ + volatile u32 temp_reg; + + temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg); + return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0); +} + +/** + * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA. + * @ipr_cmd: ipr command struct + * + * Description: This function waits for adapter permission to run BIST, + * then runs BIST. If the adapter does not give permission after a + * reasonable time, we will reset the adapter anyway. The impact of + * resetting the adapter without warning the adapter is the risk of + * losing the persistent error log on the adapter. If the adapter is + * reset while it is writing to the flash on the adapter, the flash + * segment will have bad ECC and be zeroed. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int rc = IPR_RC_JOB_RETURN; + + if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) { + ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT; + ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); + } else { + ipr_cmd->job_step = ipr_reset_start_bist; + rc = IPR_RC_JOB_CONTINUE; + } + + return rc; +} + +/** + * ipr_reset_alert_part2 - Alert the adapter of a pending reset + * @ipr_cmd: ipr command struct + * + * Description: This function alerts the adapter that it will be reset. + * If memory space is not currently enabled, proceed directly + * to running BIST on the adapter. The timer must always be started + * so we guarantee we do not run BIST from ipr_isr. + * + * Return value: + * IPR_RC_JOB_RETURN + **/ +static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + u16 cmd_reg; + int rc; + + ENTER; + rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg); + + if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) { + ipr_mask_and_clear_interrupts(ioa_cfg, ~0); + writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg); + ipr_cmd->job_step = ipr_reset_wait_to_start_bist; + } else { + ipr_cmd->job_step = ipr_reset_start_bist; + } + + ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT; + ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_ucode_download_done - Microcode download completion + * @ipr_cmd: ipr command struct + * + * Description: This function unmaps the microcode download buffer. + * + * Return value: + * IPR_RC_JOB_CONTINUE + **/ +static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; + + pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist, + sglist->num_sg, DMA_TO_DEVICE); + + ipr_cmd->job_step = ipr_reset_alert; + return IPR_RC_JOB_CONTINUE; +} + +/** + * ipr_reset_ucode_download - Download microcode to the adapter + * @ipr_cmd: ipr command struct + * + * Description: This function checks to see if it there is microcode + * to download to the adapter. If there is, a download is performed. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + struct ipr_sglist *sglist = ioa_cfg->ucode_sglist; + + ENTER; + ipr_cmd->job_step = ipr_reset_alert; + + if (!sglist) + return IPR_RC_JOB_CONTINUE; + + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB; + ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER; + ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE; + ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16; + ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8; + ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff; + + if (ipr_map_ucode_buffer(ipr_cmd, sglist, sglist->buffer_len)) { + dev_err(&ioa_cfg->pdev->dev, + "Failed to map microcode download buffer\n"); + return IPR_RC_JOB_CONTINUE; + } + + ipr_cmd->job_step = ipr_reset_ucode_download_done; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, + IPR_WRITE_BUFFER_TIMEOUT); + + LEAVE; + return IPR_RC_JOB_RETURN; +} + +/** + * ipr_reset_shutdown_ioa - Shutdown the adapter + * @ipr_cmd: ipr command struct + * + * Description: This function issues an adapter shutdown of the + * specified type to the specified adapter as part of the + * adapter reset job. + * + * Return value: + * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN + **/ +static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type; + unsigned long timeout; + int rc = IPR_RC_JOB_CONTINUE; + + ENTER; + if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) { + ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE); + ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD; + ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN; + ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type; + + if (shutdown_type == IPR_SHUTDOWN_ABBREV) + timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT; + else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL) + timeout = IPR_INTERNAL_TIMEOUT; + else + timeout = IPR_SHUTDOWN_TIMEOUT; + + ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout); + + rc = IPR_RC_JOB_RETURN; + ipr_cmd->job_step = ipr_reset_ucode_download; + } else + ipr_cmd->job_step = ipr_reset_alert; + + LEAVE; + return rc; +} + +/** + * ipr_reset_ioa_job - Adapter reset job + * @ipr_cmd: ipr command struct + * + * Description: This function is the job router for the adapter reset job. + * + * Return value: + * none + **/ +static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) +{ + u32 rc, ioasc; + unsigned long scratch = ipr_cmd->u.scratch; + struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + + do { + ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); + + if (ioa_cfg->reset_cmd != ipr_cmd) { + /* + * We are doing nested adapter resets and this is + * not the current reset job. + */ + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + return; + } + + if (IPR_IOASC_SENSE_KEY(ioasc)) { + dev_err(&ioa_cfg->pdev->dev, + "0x%02X failed with IOASC: 0x%08X\n", + ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc); + + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + return; + } + + ipr_reinit_ipr_cmnd(ipr_cmd); + ipr_cmd->u.scratch = scratch; + rc = ipr_cmd->job_step(ipr_cmd); + } while(rc == IPR_RC_JOB_CONTINUE); +} + +/** + * _ipr_initiate_ioa_reset - Initiate an adapter reset + * @ioa_cfg: ioa config struct + * @job_step: first job step of reset job + * @shutdown_type: shutdown type + * + * Description: This function will initiate the reset of the given adapter + * starting at the selected job step. + * If the caller needs to wait on the completion of the reset, + * the caller must sleep on the reset_wait_q. + * + * Return value: + * none + **/ +static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, + int (*job_step) (struct ipr_cmnd *), + enum ipr_shutdown_type shutdown_type) +{ + struct ipr_cmnd *ipr_cmd; + + ioa_cfg->in_reset_reload = 1; + ioa_cfg->allow_cmds = 0; + scsi_block_requests(ioa_cfg->host); + + ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); + ioa_cfg->reset_cmd = ipr_cmd; + ipr_cmd->job_step = job_step; + ipr_cmd->u.shutdown_type = shutdown_type; + + ipr_reset_ioa_job(ipr_cmd); +} + +/** + * ipr_initiate_ioa_reset - Initiate an adapter reset + * @ioa_cfg: ioa config struct + * @shutdown_type: shutdown type + * + * Description: This function will initiate the reset of the given adapter. + * If the caller needs to wait on the completion of the reset, + * the caller must sleep on the reset_wait_q. + * + * Return value: + * none + **/ +static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, + enum ipr_shutdown_type shutdown_type) +{ + if (ioa_cfg->ioa_is_dead) + return; + + if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + + if (ioa_cfg->reset_retries++ > IPR_NUM_RESET_RELOAD_RETRIES) { + dev_err(&ioa_cfg->pdev->dev, + "IOA taken offline - error recovery failed\n"); + + ioa_cfg->reset_retries = 0; + ioa_cfg->ioa_is_dead = 1; + + if (ioa_cfg->in_ioa_bringdown) { + ioa_cfg->reset_cmd = NULL; + ioa_cfg->in_reset_reload = 0; + ipr_fail_all_ops(ioa_cfg); + wake_up_all(&ioa_cfg->reset_wait_q); + + spin_unlock_irq(ioa_cfg->host->host_lock); + scsi_unblock_requests(ioa_cfg->host); + spin_lock_irq(ioa_cfg->host->host_lock); + return; + } else { + ioa_cfg->in_ioa_bringdown = 1; + shutdown_type = IPR_SHUTDOWN_NONE; + } + } + + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa, + shutdown_type); +} + +/** + * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..) + * @ioa_cfg: ioa cfg struct + * + * Description: This is the second phase of adapter intialization + * This function takes care of initilizing the adapter to the point + * where it can accept new commands. + + * Return value: + * 0 on sucess / -EIO on failure + **/ +static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg) +{ + int rc = 0; + unsigned long host_lock_flags = 0; + + ENTER; + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); + _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + + if (ioa_cfg->ioa_is_dead) { + rc = -EIO; + } else if (ipr_invalid_adapter(ioa_cfg)) { + if (!ipr_testmode) + rc = -EIO; + + dev_err(&ioa_cfg->pdev->dev, + "Adapter not supported in this hardware configuration.\n"); + } + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + + LEAVE; + return rc; +} + +/** + * ipr_free_cmd_blks - Frees command blocks allocated for an adapter + * @ioa_cfg: ioa config struct + * + * Return value: + * none + **/ +static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { + if (ioa_cfg->ipr_cmnd_list[i]) + pci_pool_free(ioa_cfg->ipr_cmd_pool, + ioa_cfg->ipr_cmnd_list[i], + ioa_cfg->ipr_cmnd_list_dma[i]); + + ioa_cfg->ipr_cmnd_list[i] = NULL; + } + + if (ioa_cfg->ipr_cmd_pool) + pci_pool_destroy (ioa_cfg->ipr_cmd_pool); + + ioa_cfg->ipr_cmd_pool = NULL; +} + +/** + * ipr_free_mem - Frees memory allocated for an adapter + * @ioa_cfg: ioa cfg struct + * + * Return value: + * nothing + **/ +static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + kfree(ioa_cfg->res_entries); + pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs), + ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma); + ipr_free_cmd_blks(ioa_cfg); + pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS, + ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma); + pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table), + ioa_cfg->cfg_table, + ioa_cfg->cfg_table_dma); + + for (i = 0; i < IPR_NUM_HCAMS; i++) { + pci_free_consistent(ioa_cfg->pdev, + sizeof(struct ipr_hostrcb), + ioa_cfg->hostrcb[i], + ioa_cfg->hostrcb_dma[i]); + } + + ipr_free_dump(ioa_cfg); + kfree(ioa_cfg->saved_mode_pages); + kfree(ioa_cfg->trace); +} + +/** + * ipr_free_all_resources - Free all allocated resources for an adapter. + * @ipr_cmd: ipr command struct + * + * This function frees all allocated resources for the + * specified adapter. + * + * Return value: + * none + **/ +static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg) +{ + ENTER; + free_irq(ioa_cfg->pdev->irq, ioa_cfg); + iounmap((void *) ioa_cfg->hdw_dma_regs); + release_mem_region(ioa_cfg->hdw_dma_regs_pci, + pci_resource_len(ioa_cfg->pdev, 0)); + ipr_free_mem(ioa_cfg); + scsi_host_put(ioa_cfg->host); + LEAVE; +} + +/** + * ipr_alloc_cmd_blks - Allocate command blocks for an adapter + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / -ENOMEM on allocation failure + **/ +static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) +{ + struct ipr_cmnd *ipr_cmd; + struct ipr_ioarcb *ioarcb; + u32 dma_addr; + int i; + + ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, + sizeof(struct ipr_cmnd), 8, 0); + + if (!ioa_cfg->ipr_cmd_pool) + return -ENOMEM; + + for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { + ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr); + + if (!ipr_cmd) { + ipr_free_cmd_blks(ioa_cfg); + return -ENOMEM; + } + + memset(ipr_cmd, 0, sizeof(*ipr_cmd)); + ioa_cfg->ipr_cmnd_list[i] = ipr_cmd; + ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr; + + ioarcb = &ipr_cmd->ioarcb; + ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr); + ioarcb->host_response_handle = cpu_to_be32(i << 2); + ioarcb->write_ioadl_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl)); + ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr; + ioarcb->ioasa_host_pci_addr = + cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa)); + ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa)); + ipr_cmd->cmd_index = i; + ipr_cmd->ioa_cfg = ioa_cfg; + ipr_cmd->sense_buffer_dma = dma_addr + + offsetof(struct ipr_cmnd, sense_buffer); + + list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); + } + + return 0; +} + +/** + * ipr_alloc_mem - Allocate memory for an adapter + * @ioa_cfg: ioa config struct + * + * Return value: + * 0 on success / non-zero for error + **/ +static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + ENTER; + ioa_cfg->res_entries = kmalloc(sizeof(struct ipr_resource_entry) * + IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL); + + if (!ioa_cfg->res_entries) + goto cleanup; + + memset(ioa_cfg->res_entries, 0, + sizeof(struct ipr_resource_entry) * IPR_MAX_PHYSICAL_DEVS); + + for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++) + list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q); + + ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev, + sizeof(struct ipr_misc_cbs), + &ioa_cfg->vpd_cbs_dma); + + if (!ioa_cfg->vpd_cbs) + goto cleanup; + + if (ipr_alloc_cmd_blks(ioa_cfg)) + goto cleanup; + + ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev, + sizeof(u32) * IPR_NUM_CMD_BLKS, + &ioa_cfg->host_rrq_dma); + + if (!ioa_cfg->host_rrq) + goto cleanup; + + ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev, + sizeof(struct ipr_config_table), + &ioa_cfg->cfg_table_dma); + + if (!ioa_cfg->cfg_table) + goto cleanup; + + for (i = 0; i < IPR_NUM_HCAMS; i++) { + ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev, + sizeof(struct ipr_hostrcb), + &ioa_cfg->hostrcb_dma[i]); + + if (!ioa_cfg->hostrcb[i]) + goto cleanup; + + memset(ioa_cfg->hostrcb[i], 0, sizeof(struct ipr_hostrcb)); + ioa_cfg->hostrcb[i]->hostrcb_dma = + ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam); + list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q); + } + + ioa_cfg->trace = kmalloc(sizeof(struct ipr_trace_entry) * + IPR_NUM_TRACE_ENTRIES, GFP_KERNEL); + + if (!ioa_cfg->trace) + goto cleanup; + + memset(ioa_cfg->trace, 0, + sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES); + + LEAVE; + return 0; + +cleanup: + ipr_free_mem(ioa_cfg); + + LEAVE; + return -ENOMEM; +} + +/** + * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values + * @ioa_cfg: ioa config struct + * + * Return value: + * none + **/ +static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg) +{ + int i; + + for (i = 0; i < IPR_MAX_NUM_BUSES; i++) { + ioa_cfg->bus_attr[i].bus = i; + ioa_cfg->bus_attr[i].qas_enabled = 0; + ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH; + if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds)) + ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed]; + else + ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE; + } +} + +/** + * ipr_init_ioa_cfg - Initialize IOA config struct + * @ioa_cfg: ioa config struct + * @host: scsi host struct + * @pdev: PCI dev struct + * + * Return value: + * none + **/ +static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg, + struct Scsi_Host *host, struct pci_dev *pdev) +{ + ioa_cfg->host = host; + ioa_cfg->pdev = pdev; + ioa_cfg->log_level = ipr_log_level; + sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER); + sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL); + sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL); + sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL); + sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START); + sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL); + sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL); + sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL); + + INIT_LIST_HEAD(&ioa_cfg->free_q); + INIT_LIST_HEAD(&ioa_cfg->pending_q); + INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); + INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); + INIT_LIST_HEAD(&ioa_cfg->free_res_q); + INIT_LIST_HEAD(&ioa_cfg->used_res_q); + INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg); + init_waitqueue_head(&ioa_cfg->reset_wait_q); + ioa_cfg->sdt_state = INACTIVE; + + ipr_initialize_bus_attr(ioa_cfg); + + host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS; + host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET; + host->max_channel = IPR_MAX_BUS_TO_SCAN; + host->unique_id = host->host_no; + host->max_cmd_len = IPR_MAX_CDB_LEN; + pci_set_drvdata(pdev, ioa_cfg); + + memcpy(&ioa_cfg->regs, &ioa_cfg->chip_cfg->regs, sizeof(ioa_cfg->regs)); + + ioa_cfg->regs.set_interrupt_mask_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.clr_interrupt_mask_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.sense_interrupt_mask_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.clr_interrupt_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.sense_interrupt_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.ioarrin_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.sense_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.set_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs; + ioa_cfg->regs.clr_uproc_interrupt_reg += ioa_cfg->hdw_dma_regs; +} + +/** + * ipr_probe_ioa - Allocates memory and does first stage of initialization + * @pdev: PCI device struct + * @dev_id: PCI device id struct + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int __devinit ipr_probe_ioa(struct pci_dev *pdev, + const struct pci_device_id *dev_id) +{ + struct ipr_ioa_cfg *ioa_cfg; + struct Scsi_Host *host; + unsigned long ipr_regs, ipr_regs_pci; + u32 rc = PCIBIOS_SUCCESSFUL; + + ENTER; + + if ((rc = pci_enable_device(pdev))) { + dev_err(&pdev->dev, "Cannot enable adapter\n"); + return rc; + } + + dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); + + host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg)); + + if (!host) { + dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n"); + return -ENOMEM; + } + + ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; + memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg)); + + ioa_cfg->chip_cfg = (const struct ipr_chip_cfg_t *)dev_id->driver_data; + + ipr_regs_pci = pci_resource_start(pdev, 0); + + if (!request_mem_region(ipr_regs_pci, + pci_resource_len(pdev, 0), IPR_NAME)) { + dev_err(&pdev->dev, + "Couldn't register memory range of registers\n"); + scsi_host_put(host); + return -ENOMEM; + } + + ipr_regs = (unsigned long)ioremap(ipr_regs_pci, + pci_resource_len(pdev, 0)); + + if (!ipr_regs) { + dev_err(&pdev->dev, + "Couldn't map memory range of registers\n"); + release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0)); + scsi_host_put(host); + return -ENOMEM; + } + + ioa_cfg->hdw_dma_regs = ipr_regs; + ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci; + ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; + + ipr_init_ioa_cfg(ioa_cfg, host, pdev); + + pci_set_master(pdev); + rc = pci_set_dma_mask(pdev, 0xffffffff); + + if (rc != PCIBIOS_SUCCESSFUL) { + dev_err(&pdev->dev, "Failed to set PCI DMA mask\n"); + rc = -EIO; + goto cleanup_nomem; + } + + rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, + ioa_cfg->chip_cfg->cache_line_size); + + if (rc != PCIBIOS_SUCCESSFUL) { + dev_err(&pdev->dev, "Write of cache line size failed\n"); + rc = -EIO; + goto cleanup_nomem; + } + + /* Save away PCI config space for use following IOA reset */ + rc = pci_save_state(pdev, ioa_cfg->pci_cfg_buf); + + if (rc != PCIBIOS_SUCCESSFUL) { + dev_err(&pdev->dev, "Failed to save PCI config space\n"); + rc = -EIO; + goto cleanup_nomem; + } + + if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg))) + goto cleanup_nomem; + + if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg))) + goto cleanup_nomem; + + if ((rc = ipr_alloc_mem(ioa_cfg))) + goto cleanup; + + ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER); + rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg); + + if (rc) { + dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n", + pdev->irq, rc); + goto cleanup_nolog; + } + + spin_lock(&ipr_driver_lock); + list_add_tail(&ioa_cfg->queue, &ipr_ioa_head); + spin_unlock(&ipr_driver_lock); + + LEAVE; + return 0; + +cleanup: + dev_err(&pdev->dev, "Couldn't allocate enough memory for device driver!\n"); +cleanup_nolog: + ipr_free_mem(ioa_cfg); +cleanup_nomem: + iounmap((void *) ipr_regs); + release_mem_region(ipr_regs_pci, pci_resource_len(pdev, 0)); + scsi_host_put(host); + + return rc; +} + +/** + * ipr_scan_vsets - Scans for VSET devices + * @ioa_cfg: ioa config struct + * + * Description: Since the VSET resources do not follow SAM in that we can have + * sparse LUNs with no LUN 0, we have to scan for these ourselves. + * + * Return value: + * none + **/ +static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg) +{ + int target, lun; + + for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++) + for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ ) + scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun); +} + +/** + * ipr_initiate_ioa_bringdown - Bring down an adapter + * @ioa_cfg: ioa config struct + * @shutdown_type: shutdown type + * + * Description: This function will initiate bringing down the adapter. + * This consists of issuing an IOA shutdown to the adapter + * to flush the cache, and running BIST. + * If the caller needs to wait on the completion of the reset, + * the caller must sleep on the reset_wait_q. + * + * Return value: + * none + **/ +static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg, + enum ipr_shutdown_type shutdown_type) +{ + ENTER; + if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + ioa_cfg->reset_retries = 0; + ioa_cfg->in_ioa_bringdown = 1; + ipr_initiate_ioa_reset(ioa_cfg, shutdown_type); + LEAVE; +} + +/** + * __ipr_remove - Remove a single adapter + * @pdev: pci device struct + * + * Adapter hot plug remove entry point. + * + * Return value: + * none + **/ +static void __ipr_remove(struct pci_dev *pdev) +{ + unsigned long host_lock_flags = 0; + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + ENTER; + + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); + + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); + + spin_lock(&ipr_driver_lock); + list_del(&ioa_cfg->queue); + spin_unlock(&ipr_driver_lock); + + if (ioa_cfg->sdt_state == ABORT_DUMP) + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); + + ipr_free_all_resources(ioa_cfg); + + LEAVE; +} + +/** + * ipr_remove - IOA hot plug remove entry point + * @pdev: pci device struct + * + * Adapter hot plug remove entry point. + * + * Return value: + * none + **/ +static void ipr_remove(struct pci_dev *pdev) +{ + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + + ENTER; + + ioa_cfg->allow_cmds = 0; + flush_scheduled_work(); + ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj, + &ipr_trace_attr); + ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj, + &ipr_dump_attr); + scsi_remove_host(ioa_cfg->host); + + __ipr_remove(pdev); + + LEAVE; +} + +/** + * ipr_probe - Adapter hot plug add entry point + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int __devinit ipr_probe(struct pci_dev *pdev, + const struct pci_device_id *dev_id) +{ + struct ipr_ioa_cfg *ioa_cfg; + int rc; + + rc = ipr_probe_ioa(pdev, dev_id); + + if (rc) + return rc; + + ioa_cfg = pci_get_drvdata(pdev); + rc = ipr_probe_ioa_part2(ioa_cfg); + + if (rc) { + __ipr_remove(pdev); + return rc; + } + + rc = scsi_add_host(ioa_cfg->host, &pdev->dev); + + if (rc) { + __ipr_remove(pdev); + return rc; + } + + rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj, + &ipr_trace_attr); + + if (rc) { + scsi_remove_host(ioa_cfg->host); + __ipr_remove(pdev); + return rc; + } + + rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj, + &ipr_dump_attr); + + if (rc) { + ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj, + &ipr_trace_attr); + scsi_remove_host(ioa_cfg->host); + __ipr_remove(pdev); + return rc; + } + + scsi_scan_host(ioa_cfg->host); + ipr_scan_vsets(ioa_cfg); + scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN); + ioa_cfg->allow_ml_add_del = 1; + schedule_work(&ioa_cfg->work_q); + return 0; +} + +/** + * ipr_shutdown - Shutdown handler. + * @dev: device struct + * + * This function is invoked upon system shutdown/reboot. It will issue + * an adapter shutdown to the adapter to flush the write cache. + * + * Return value: + * none + **/ +static void ipr_shutdown(struct device *dev) +{ + struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(to_pci_dev(dev)); + unsigned long lock_flags = 0; + + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); +} + +static struct pci_device_id ipr_pci_table[] __devinitdata = { + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, + 0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] }, + { } +}; +MODULE_DEVICE_TABLE(pci, ipr_pci_table); + +static struct pci_driver ipr_driver = { + .name = IPR_NAME, + .id_table = ipr_pci_table, + .probe = ipr_probe, + .remove = ipr_remove, + .driver = { + .shutdown = ipr_shutdown, + }, +}; + +/** + * ipr_init - Module entry point + * + * Return value: + * 0 on success / non-zero on failure + **/ +static int __init ipr_init(void) +{ + ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n", + IPR_DRIVER_VERSION, IPR_DRIVER_DATE); + + pci_register_driver(&ipr_driver); + + return 0; +} + +/** + * ipr_exit - Module unload + * + * Module unload entry point. + * + * Return value: + * none + **/ +static void __exit ipr_exit(void) +{ + pci_unregister_driver(&ipr_driver); +} + +module_init(ipr_init); +module_exit(ipr_exit); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h new file mode 100644 index 000000000..468c80796 --- /dev/null +++ b/drivers/scsi/ipr.h @@ -0,0 +1,1252 @@ +/* + * ipr.h -- driver for IBM Power Linux RAID adapters + * + * Written By: Brian King, IBM Corporation + * + * Copyright (C) 2003, 2004 IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#ifndef _IPR_H +#define _IPR_H + +#include +#include +#include +#include +#include +#ifdef CONFIG_KDB +#include +#endif + +/* + * Literals + */ +#define IPR_DRIVER_VERSION "2.0.7" +#define IPR_DRIVER_DATE "(May 21, 2004)" + +/* + * IPR_DBG_TRACE: Setting this to 1 will turn on some general function tracing + * resulting in a bunch of extra debugging printks to the console + * + * IPR_DEBUG: Setting this to 1 will turn on some error path tracing. + * Enables the ipr_trace macro. + */ +#ifdef IPR_DEBUG_ALL +#define IPR_DEBUG 1 +#define IPR_DBG_TRACE 1 +#else +#define IPR_DEBUG 0 +#define IPR_DBG_TRACE 0 +#endif + +/* + * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding + * ops per device for devices not running tagged command queuing. + * This can be adjusted at runtime through sysfs device attributes. + */ +#define IPR_MAX_CMD_PER_LUN 6 + +/* + * IPR_NUM_BASE_CMD_BLKS: This defines the maximum number of + * ops the mid-layer can send to the adapter. + */ +#define IPR_NUM_BASE_CMD_BLKS 100 + +#define IPR_SUBS_DEV_ID_2780 0x0264 +#define IPR_SUBS_DEV_ID_5702 0x0266 +#define IPR_SUBS_DEV_ID_5703 0x0278 +#define IPR_SUBS_DEV_ID_572E 0x02D3 +#define IPR_SUBS_DEV_ID_573D 0x02D4 + +#define IPR_NAME "ipr" + +/* + * Return codes + */ +#define IPR_RC_JOB_CONTINUE 1 +#define IPR_RC_JOB_RETURN 2 + +/* + * IOASCs + */ +#define IPR_IOASC_NR_INIT_CMD_REQUIRED 0x02040200 +#define IPR_IOASC_SYNC_REQUIRED 0x023f0000 +#define IPR_IOASC_MED_DO_NOT_REALLOC 0x03110C00 +#define IPR_IOASC_HW_SEL_TIMEOUT 0x04050000 +#define IPR_IOASC_HW_DEV_BUS_STATUS 0x04448500 +#define IPR_IOASC_IOASC_MASK 0xFFFFFF00 +#define IPR_IOASC_SCSI_STATUS_MASK 0x000000FF +#define IPR_IOASC_IR_RESOURCE_HANDLE 0x05250000 +#define IPR_IOASC_BUS_WAS_RESET 0x06290000 +#define IPR_IOASC_BUS_WAS_RESET_BY_OTHER 0x06298000 +#define IPR_IOASC_ABORTED_CMD_TERM_BY_HOST 0x0B5A0000 + +#define IPR_FIRST_DRIVER_IOASC 0x10000000 +#define IPR_IOASC_IOA_WAS_RESET 0x10000001 +#define IPR_IOASC_PCI_ACCESS_ERROR 0x10000002 + +#define IPR_NUM_LOG_HCAMS 2 +#define IPR_NUM_CFG_CHG_HCAMS 2 +#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) +#define IPR_MAX_NUM_TARGETS_PER_BUS 0x10 +#define IPR_MAX_NUM_LUNS_PER_TARGET 256 +#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET 8 +#define IPR_VSET_BUS 0xff +#define IPR_IOA_BUS 0xff +#define IPR_IOA_TARGET 0xff +#define IPR_IOA_LUN 0xff +#define IPR_MAX_NUM_BUSES 4 +#define IPR_MAX_BUS_TO_SCAN IPR_MAX_NUM_BUSES + +#define IPR_NUM_RESET_RELOAD_RETRIES 3 + +/* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */ +#define IPR_NUM_INTERNAL_CMD_BLKS (IPR_NUM_HCAMS + \ + ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 3) + +#define IPR_MAX_COMMANDS IPR_NUM_BASE_CMD_BLKS +#define IPR_NUM_CMD_BLKS (IPR_NUM_BASE_CMD_BLKS + \ + IPR_NUM_INTERNAL_CMD_BLKS) + +#define IPR_MAX_PHYSICAL_DEVS 192 + +#define IPR_MAX_SGLIST 64 +#define IPR_MAX_SECTORS 512 +#define IPR_MAX_CDB_LEN 16 + +#define IPR_DEFAULT_BUS_WIDTH 16 +#define IPR_80MBs_SCSI_RATE ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) +#define IPR_U160_SCSI_RATE ((160 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) +#define IPR_U320_SCSI_RATE ((320 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8)) +#define IPR_MAX_SCSI_RATE(width) ((320 * 10) / ((width) / 8)) + +#define IPR_IOA_RES_HANDLE 0xffffffff +#define IPR_IOA_RES_ADDR 0x00ffffff + +/* + * Adapter Commands + */ +#define IPR_RESET_DEVICE 0xC3 +#define IPR_RESET_TYPE_SELECT 0x80 +#define IPR_LUN_RESET 0x40 +#define IPR_TARGET_RESET 0x20 +#define IPR_BUS_RESET 0x10 +#define IPR_ID_HOST_RR_Q 0xC4 +#define IPR_QUERY_IOA_CONFIG 0xC5 +#define IPR_ABORT_TASK 0xC7 +#define IPR_CANCEL_ALL_REQUESTS 0xCE +#define IPR_HOST_CONTROLLED_ASYNC 0xCF +#define IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE 0x01 +#define IPR_HCAM_CDB_OP_CODE_LOG_DATA 0x02 +#define IPR_SET_SUPPORTED_DEVICES 0xFB +#define IPR_IOA_SHUTDOWN 0xF7 +#define IPR_WR_BUF_DOWNLOAD_AND_SAVE 0x05 + +/* + * Timeouts + */ +#define IPR_SHUTDOWN_TIMEOUT (10 * 60 * HZ) +#define IPR_VSET_RW_TIMEOUT (2 * 60 * HZ) +#define IPR_ABBREV_SHUTDOWN_TIMEOUT (10 * HZ) +#define IPR_DEVICE_RESET_TIMEOUT (30 * HZ) +#define IPR_CANCEL_ALL_TIMEOUT (30 * HZ) +#define IPR_ABORT_TASK_TIMEOUT (30 * HZ) +#define IPR_INTERNAL_TIMEOUT (30 * HZ) +#define IPR_WRITE_BUFFER_TIMEOUT (10 * 60 * HZ) +#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ) +#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ) +#define IPR_OPERATIONAL_TIMEOUT (5 * 60 * HZ) +#define IPR_WAIT_FOR_RESET_TIMEOUT (2 * HZ) +#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10) +#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ) +#define IPR_DUMP_TIMEOUT (15 * HZ) + +/* + * SCSI Literals + */ +#define IPR_VENDOR_ID_LEN 8 +#define IPR_PROD_ID_LEN 16 +#define IPR_SERIAL_NUM_LEN 8 + +/* + * Hardware literals + */ +#define IPR_FMT2_MBX_ADDR_MASK 0x0fffffff +#define IPR_FMT2_MBX_BAR_SEL_MASK 0xf0000000 +#define IPR_FMT2_MKR_BAR_SEL_SHIFT 28 +#define IPR_GET_FMT2_BAR_SEL(mbx) \ +(((mbx) & IPR_FMT2_MBX_BAR_SEL_MASK) >> IPR_FMT2_MKR_BAR_SEL_SHIFT) +#define IPR_SDT_FMT2_BAR0_SEL 0x0 +#define IPR_SDT_FMT2_BAR1_SEL 0x1 +#define IPR_SDT_FMT2_BAR2_SEL 0x2 +#define IPR_SDT_FMT2_BAR3_SEL 0x3 +#define IPR_SDT_FMT2_BAR4_SEL 0x4 +#define IPR_SDT_FMT2_BAR5_SEL 0x5 +#define IPR_SDT_FMT2_EXP_ROM_SEL 0x8 +#define IPR_FMT2_SDT_READY_TO_USE 0xC4D4E3F2 +#define IPR_DOORBELL 0x82800000 + +#define IPR_PCII_IOA_TRANS_TO_OPER (0x80000000 >> 0) +#define IPR_PCII_IOARCB_XFER_FAILED (0x80000000 >> 3) +#define IPR_PCII_IOA_UNIT_CHECKED (0x80000000 >> 4) +#define IPR_PCII_NO_HOST_RRQ (0x80000000 >> 5) +#define IPR_PCII_CRITICAL_OPERATION (0x80000000 >> 6) +#define IPR_PCII_IO_DEBUG_ACKNOWLEDGE (0x80000000 >> 7) +#define IPR_PCII_IOARRIN_LOST (0x80000000 >> 27) +#define IPR_PCII_MMIO_ERROR (0x80000000 >> 28) +#define IPR_PCII_PROC_ERR_STATE (0x80000000 >> 29) +#define IPR_PCII_HRRQ_UPDATED (0x80000000 >> 30) +#define IPR_PCII_CORE_ISSUED_RST_REQ (0x80000000 >> 31) + +#define IPR_PCII_ERROR_INTERRUPTS \ +(IPR_PCII_IOARCB_XFER_FAILED | IPR_PCII_IOA_UNIT_CHECKED | \ +IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR) + +#define IPR_PCII_OPER_INTERRUPTS \ +(IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED | IPR_PCII_IOA_TRANS_TO_OPER) + +#define IPR_UPROCI_RESET_ALERT (0x80000000 >> 7) +#define IPR_UPROCI_IO_DEBUG_ALERT (0x80000000 >> 9) + +#define IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC 200000 /* 200 ms */ +#define IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC 200000 /* 200 ms */ + +/* + * Dump literals + */ +#define IPR_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024) +#define IPR_NUM_SDT_ENTRIES 511 +#define IPR_MAX_NUM_DUMP_PAGES ((IPR_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) + +/* + * Misc literals + */ +#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST + +/* + * Adapter interface types + */ + +struct ipr_res_addr { + u8 reserved; + u8 bus; + u8 target; + u8 lun; +#define IPR_GET_PHYS_LOC(res_addr) \ + (((res_addr).bus << 16) | ((res_addr).target << 8) | (res_addr).lun) +}__attribute__((packed, aligned (4))); + +struct ipr_std_inq_vpids { + u8 vendor_id[IPR_VENDOR_ID_LEN]; + u8 product_id[IPR_PROD_ID_LEN]; +}__attribute__((packed)); + +struct ipr_std_inq_data { + u8 peri_qual_dev_type; +#define IPR_STD_INQ_PERI_QUAL(peri) ((peri) >> 5) +#define IPR_STD_INQ_PERI_DEV_TYPE(peri) ((peri) & 0x1F) + + u8 removeable_medium_rsvd; +#define IPR_STD_INQ_REMOVEABLE_MEDIUM 0x80 + +#define IPR_IS_DASD_DEVICE(std_inq) \ +((IPR_STD_INQ_PERI_DEV_TYPE((std_inq).peri_qual_dev_type) == TYPE_DISK) && \ +!(((std_inq).removeable_medium_rsvd) & IPR_STD_INQ_REMOVEABLE_MEDIUM)) + +#define IPR_IS_SES_DEVICE(std_inq) \ +(IPR_STD_INQ_PERI_DEV_TYPE((std_inq).peri_qual_dev_type) == TYPE_ENCLOSURE) + + u8 version; + u8 aen_naca_fmt; + u8 additional_len; + u8 sccs_rsvd; + u8 bq_enc_multi; + u8 sync_cmdq_flags; + + struct ipr_std_inq_vpids vpids; + + u8 ros_rsvd_ram_rsvd[4]; + + u8 serial_num[IPR_SERIAL_NUM_LEN]; +}__attribute__ ((packed)); + +struct ipr_config_table_entry { + u8 service_level; + u8 array_id; + u8 flags; +#define IPR_IS_IOA_RESOURCE 0x80 +#define IPR_IS_ARRAY_MEMBER 0x20 +#define IPR_IS_HOT_SPARE 0x10 + + u8 rsvd_subtype; +#define IPR_RES_SUBTYPE(res) (((res)->cfgte.rsvd_subtype) & 0x0f) +#define IPR_SUBTYPE_AF_DASD 0 +#define IPR_SUBTYPE_GENERIC_SCSI 1 +#define IPR_SUBTYPE_VOLUME_SET 2 + + struct ipr_res_addr res_addr; + u32 res_handle; + u32 reserved4[2]; + struct ipr_std_inq_data std_inq_data; +}__attribute__ ((packed, aligned (4))); + +struct ipr_config_table_hdr { + u8 num_entries; + u8 flags; +#define IPR_UCODE_DOWNLOAD_REQ 0x10 + u16 reserved; +}__attribute__((packed, aligned (4))); + +struct ipr_config_table { + struct ipr_config_table_hdr hdr; + struct ipr_config_table_entry dev[IPR_MAX_PHYSICAL_DEVS]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_cfg_ch_not { + struct ipr_config_table_entry cfgte; + u8 reserved[936]; +}__attribute__((packed, aligned (4))); + +struct ipr_supported_device { + u16 data_length; + u8 reserved; + u8 num_records; + struct ipr_std_inq_vpids vpids; + u8 reserved2[16]; +}__attribute__((packed, aligned (4))); + +/* Command packet structure */ +struct ipr_cmd_pkt { + u16 reserved; /* Reserved by IOA */ + u8 request_type; +#define IPR_RQTYPE_SCSICDB 0x00 +#define IPR_RQTYPE_IOACMD 0x01 +#define IPR_RQTYPE_HCAM 0x02 + + u8 luntar_luntrn; + + u8 flags_hi; +#define IPR_FLAGS_HI_WRITE_NOT_READ 0x80 +#define IPR_FLAGS_HI_NO_ULEN_CHK 0x20 +#define IPR_FLAGS_HI_SYNC_OVERRIDE 0x10 +#define IPR_FLAGS_HI_SYNC_COMPLETE 0x08 +#define IPR_FLAGS_HI_NO_LINK_DESC 0x04 + + u8 flags_lo; +#define IPR_FLAGS_LO_ALIGNED_BFR 0x20 +#define IPR_FLAGS_LO_DELAY_AFTER_RST 0x10 +#define IPR_FLAGS_LO_UNTAGGED_TASK 0x00 +#define IPR_FLAGS_LO_SIMPLE_TASK 0x02 +#define IPR_FLAGS_LO_ORDERED_TASK 0x04 +#define IPR_FLAGS_LO_HEAD_OF_Q_TASK 0x06 +#define IPR_FLAGS_LO_ACA_TASK 0x08 + + u8 cdb[16]; + u16 timeout; +}__attribute__ ((packed, aligned(4))); + +/* IOA Request Control Block 128 bytes */ +struct ipr_ioarcb { + u32 ioarcb_host_pci_addr; + u32 reserved; + u32 res_handle; + u32 host_response_handle; + u32 reserved1; + u32 reserved2; + u32 reserved3; + + u32 write_data_transfer_length; + u32 read_data_transfer_length; + u32 write_ioadl_addr; + u32 write_ioadl_len; + u32 read_ioadl_addr; + u32 read_ioadl_len; + + u32 ioasa_host_pci_addr; + u16 ioasa_len; + u16 reserved4; + + struct ipr_cmd_pkt cmd_pkt; + + u32 add_cmd_parms_len; + u32 add_cmd_parms[10]; +}__attribute__((packed, aligned (4))); + +struct ipr_ioadl_desc { + u32 flags_and_data_len; +#define IPR_IOADL_FLAGS_MASK 0xff000000 +#define IPR_IOADL_GET_FLAGS(x) (be32_to_cpu(x) & IPR_IOADL_FLAGS_MASK) +#define IPR_IOADL_DATA_LEN_MASK 0x00ffffff +#define IPR_IOADL_GET_DATA_LEN(x) (be32_to_cpu(x) & IPR_IOADL_DATA_LEN_MASK) +#define IPR_IOADL_FLAGS_READ 0x48000000 +#define IPR_IOADL_FLAGS_READ_LAST 0x49000000 +#define IPR_IOADL_FLAGS_WRITE 0x68000000 +#define IPR_IOADL_FLAGS_WRITE_LAST 0x69000000 +#define IPR_IOADL_FLAGS_LAST 0x01000000 + + u32 address; +}__attribute__((packed, aligned (8))); + +struct ipr_ioasa_vset { + u32 failing_lba_hi; + u32 failing_lba_lo; + u32 ioa_data[22]; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa_af_dasd { + u32 failing_lba; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa_gpdd { + u8 end_state; + u8 bus_phase; + u16 reserved; + u32 ioa_data[23]; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa_raw { + u32 ioa_data[24]; +}__attribute__((packed, aligned (4))); + +struct ipr_ioasa { + u32 ioasc; +#define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24) +#define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16) +#define IPR_IOASC_SENSE_QUAL(ioasc) (((ioasc) & 0x0000ff00) >> 8) +#define IPR_IOASC_SENSE_STATUS(ioasc) ((ioasc) & 0x000000ff) + + u16 ret_stat_len; /* Length of the returned IOASA */ + + u16 avail_stat_len; /* Total Length of status available. */ + + u32 residual_data_len; /* number of bytes in the host data */ + /* buffers that were not used by the IOARCB command. */ + + u32 ilid; +#define IPR_NO_ILID 0 +#define IPR_DRIVER_ILID 0xffffffff + + u32 fd_ioasc; + + u32 fd_phys_locator; + + u32 fd_res_handle; + + u32 ioasc_specific; /* status code specific field */ +#define IPR_IOASC_SPECIFIC_MASK 0x00ffffff +#define IPR_FIELD_POINTER_VALID (0x80000000 >> 8) +#define IPR_FIELD_POINTER_MASK 0x0000ffff + + union { + struct ipr_ioasa_vset vset; + struct ipr_ioasa_af_dasd dasd; + struct ipr_ioasa_gpdd gpdd; + struct ipr_ioasa_raw raw; + } u; +}__attribute__((packed, aligned (4))); + +struct ipr_mode_parm_hdr { + u8 length; + u8 medium_type; + u8 device_spec_parms; + u8 block_desc_len; +}__attribute__((packed)); + +struct ipr_mode_pages { + struct ipr_mode_parm_hdr hdr; + u8 data[255 - sizeof(struct ipr_mode_parm_hdr)]; +}__attribute__((packed)); + +struct ipr_mode_page_hdr { + u8 ps_page_code; +#define IPR_MODE_PAGE_PS 0x80 +#define IPR_GET_MODE_PAGE_CODE(hdr) ((hdr)->ps_page_code & 0x3F) + u8 page_length; +}__attribute__ ((packed)); + +struct ipr_dev_bus_entry { + struct ipr_res_addr res_addr; + u8 flags; +#define IPR_SCSI_ATTR_ENABLE_QAS 0x80 +#define IPR_SCSI_ATTR_DISABLE_QAS 0x40 +#define IPR_SCSI_ATTR_QAS_MASK 0xC0 +#define IPR_SCSI_ATTR_ENABLE_TM 0x20 +#define IPR_SCSI_ATTR_NO_TERM_PWR 0x10 +#define IPR_SCSI_ATTR_TM_SUPPORTED 0x08 +#define IPR_SCSI_ATTR_LVD_TO_SE_NOT_ALLOWED 0x04 + + u8 scsi_id; + u8 bus_width; + u8 extended_reset_delay; +#define IPR_EXTENDED_RESET_DELAY 7 + + u32 max_xfer_rate; + + u8 spinup_delay; + u8 reserved3; + u16 reserved4; +}__attribute__((packed, aligned (4))); + +struct ipr_mode_page28 { + struct ipr_mode_page_hdr hdr; + u8 num_entries; + u8 entry_length; + struct ipr_dev_bus_entry bus[0]; +}__attribute__((packed)); + +struct ipr_ioa_vpd { + struct ipr_std_inq_data std_inq_data; + u8 ascii_part_num[12]; + u8 reserved[40]; + u8 ascii_plant_code[4]; +}__attribute__((packed)); + +struct ipr_inquiry_page3 { + u8 peri_qual_dev_type; + u8 page_code; + u8 reserved1; + u8 page_length; + u8 ascii_len; + u8 reserved2[3]; + u8 load_id[4]; + u8 major_release; + u8 card_type; + u8 minor_release[2]; + u8 ptf_number[4]; + u8 patch_number[4]; +}__attribute__((packed)); + +struct ipr_hostrcb_device_data_entry { + struct ipr_std_inq_vpids dev_vpids; + u8 dev_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_res_addr dev_res_addr; + struct ipr_std_inq_vpids new_dev_vpids; + u8 new_dev_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids ioa_last_with_dev_vpids; + u8 ioa_last_with_dev_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids cfc_last_with_dev_vpids; + u8 cfc_last_with_dev_sn[IPR_SERIAL_NUM_LEN]; + u32 ioa_data[5]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_array_data_entry { + struct ipr_std_inq_vpids vpids; + u8 serial_num[IPR_SERIAL_NUM_LEN]; + struct ipr_res_addr expected_dev_res_addr; + struct ipr_res_addr dev_res_addr; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_ff_error { + u32 ioa_data[246]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_01_error { + u32 seek_counter; + u32 read_counter; + u8 sense_data[32]; + u32 ioa_data[236]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_02_error { + struct ipr_std_inq_vpids ioa_vpids; + u8 ioa_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids cfc_vpids; + u8 cfc_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids ioa_last_attached_to_cfc_vpids; + u8 ioa_last_attached_to_cfc_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids cfc_last_attached_to_ioa_vpids; + u8 cfc_last_attached_to_ioa_sn[IPR_SERIAL_NUM_LEN]; + u32 ioa_data[3]; + u8 reserved[844]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_03_error { + struct ipr_std_inq_vpids ioa_vpids; + u8 ioa_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids cfc_vpids; + u8 cfc_sn[IPR_SERIAL_NUM_LEN]; + u32 errors_detected; + u32 errors_logged; + u8 ioa_data[12]; + struct ipr_hostrcb_device_data_entry dev_entry[3]; + u8 reserved[444]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_type_04_error { + struct ipr_std_inq_vpids ioa_vpids; + u8 ioa_sn[IPR_SERIAL_NUM_LEN]; + struct ipr_std_inq_vpids cfc_vpids; + u8 cfc_sn[IPR_SERIAL_NUM_LEN]; + u8 ioa_data[12]; + struct ipr_hostrcb_array_data_entry array_member[10]; + u32 exposed_mode_adn; + u32 array_id; + struct ipr_std_inq_vpids incomp_dev_vpids; + u8 incomp_dev_sn[IPR_SERIAL_NUM_LEN]; + u32 ioa_data2; + struct ipr_hostrcb_array_data_entry array_member2[8]; + struct ipr_res_addr last_func_vset_res_addr; + u8 vset_serial_num[IPR_SERIAL_NUM_LEN]; + u8 protection_level[8]; + u8 reserved[124]; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_error { + u32 failing_dev_ioasc; + struct ipr_res_addr failing_dev_res_addr; + u32 failing_dev_res_handle; + u32 prc; + union { + struct ipr_hostrcb_type_ff_error type_ff_error; + struct ipr_hostrcb_type_01_error type_01_error; + struct ipr_hostrcb_type_02_error type_02_error; + struct ipr_hostrcb_type_03_error type_03_error; + struct ipr_hostrcb_type_04_error type_04_error; + } u; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb_raw { + u32 data[sizeof(struct ipr_hostrcb_error)/sizeof(u32)]; +}__attribute__((packed, aligned (4))); + +struct ipr_hcam { + u8 op_code; +#define IPR_HOST_RCB_OP_CODE_CONFIG_CHANGE 0xE1 +#define IPR_HOST_RCB_OP_CODE_LOG_DATA 0xE2 + + u8 notify_type; +#define IPR_HOST_RCB_NOTIF_TYPE_EXISTING_CHANGED 0x00 +#define IPR_HOST_RCB_NOTIF_TYPE_NEW_ENTRY 0x01 +#define IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY 0x02 +#define IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY 0x10 +#define IPR_HOST_RCB_NOTIF_TYPE_INFORMATION_ENTRY 0x11 + + u8 notifications_lost; +#define IPR_HOST_RCB_NO_NOTIFICATIONS_LOST 0 +#define IPR_HOST_RCB_NOTIFICATIONS_LOST 0x80 + + u8 flags; +#define IPR_HOSTRCB_INTERNAL_OPER 0x80 +#define IPR_HOSTRCB_ERR_RESP_SENT 0x40 + + u8 overlay_id; +#define IPR_HOST_RCB_OVERLAY_ID_1 0x01 +#define IPR_HOST_RCB_OVERLAY_ID_2 0x02 +#define IPR_HOST_RCB_OVERLAY_ID_3 0x03 +#define IPR_HOST_RCB_OVERLAY_ID_4 0x04 +#define IPR_HOST_RCB_OVERLAY_ID_6 0x06 +#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF + + u8 reserved1[3]; + u32 ilid; + u32 time_since_last_ioa_reset; + u32 reserved2; + u32 length; + + union { + struct ipr_hostrcb_error error; + struct ipr_hostrcb_cfg_ch_not ccn; + struct ipr_hostrcb_raw raw; + } u; +}__attribute__((packed, aligned (4))); + +struct ipr_hostrcb { + struct ipr_hcam hcam; + u32 hostrcb_dma; + struct list_head queue; +}; + +/* IPR smart dump table structures */ +struct ipr_sdt_entry { + u32 bar_str_offset; + u32 end_offset; + u8 entry_byte; + u8 reserved[3]; + + u8 flags; +#define IPR_SDT_ENDIAN 0x80 +#define IPR_SDT_VALID_ENTRY 0x20 + + u8 resv; + u16 priority; +}__attribute__((packed, aligned (4))); + +struct ipr_sdt_header { + u32 state; + u32 num_entries; + u32 num_entries_used; + u32 dump_size; +}__attribute__((packed, aligned (4))); + +struct ipr_sdt { + struct ipr_sdt_header hdr; + struct ipr_sdt_entry entry[IPR_NUM_SDT_ENTRIES]; +}__attribute__((packed, aligned (4))); + +struct ipr_uc_sdt { + struct ipr_sdt_header hdr; + struct ipr_sdt_entry entry[1]; +}__attribute__((packed, aligned (4))); + +/* + * Driver types + */ +struct ipr_bus_attributes { + u8 bus; + u8 qas_enabled; + u8 bus_width; + u8 reserved; + u32 max_xfer_rate; +}; + +struct ipr_resource_entry { + struct ipr_config_table_entry cfgte; + u8 needs_sync_complete:1; + u8 in_erp:1; + u8 add_to_ml:1; + u8 del_from_ml:1; + u8 resetting_device:1; + u8 tcq_active:1; + + int qdepth; + struct scsi_device *sdev; + struct list_head queue; +}; + +struct ipr_resource_hdr { + u16 num_entries; + u16 reserved; +}; + +struct ipr_resource_table { + struct ipr_resource_hdr hdr; + struct ipr_resource_entry dev[IPR_MAX_PHYSICAL_DEVS]; +}; + +struct ipr_misc_cbs { + struct ipr_ioa_vpd ioa_vpd; + struct ipr_inquiry_page3 page3_data; + struct ipr_mode_pages mode_pages; + struct ipr_supported_device supp_dev; +}; + +struct ipr_interrupts { + unsigned long set_interrupt_mask_reg; + unsigned long clr_interrupt_mask_reg; + unsigned long sense_interrupt_mask_reg; + unsigned long clr_interrupt_reg; + + unsigned long sense_interrupt_reg; + unsigned long ioarrin_reg; + unsigned long sense_uproc_interrupt_reg; + unsigned long set_uproc_interrupt_reg; + unsigned long clr_uproc_interrupt_reg; +}; + +struct ipr_chip_cfg_t { + u32 mailbox; + u8 cache_line_size; + struct ipr_interrupts regs; +}; + +enum ipr_shutdown_type { + IPR_SHUTDOWN_NORMAL = 0x00, + IPR_SHUTDOWN_PREPARE_FOR_NORMAL = 0x40, + IPR_SHUTDOWN_ABBREV = 0x80, + IPR_SHUTDOWN_NONE = 0x100 +}; + +struct ipr_trace_entry { + u32 time; + + u8 op_code; + u8 type; +#define IPR_TRACE_START 0x00 +#define IPR_TRACE_FINISH 0xff + u16 cmd_index; + + u32 res_handle; + union { + u32 ioasc; + u32 add_data; + u32 res_addr; + } u; +}; + +struct ipr_sglist { + u32 order; + u32 num_sg; + u32 buffer_len; + struct scatterlist scatterlist[1]; +}; + +enum ipr_sdt_state { + INACTIVE, + WAIT_FOR_DUMP, + GET_DUMP, + ABORT_DUMP, + DUMP_OBTAINED +}; + +/* Per-controller data */ +struct ipr_ioa_cfg { + char eye_catcher[8]; +#define IPR_EYECATCHER "iprcfg" + + struct list_head queue; + + u8 allow_interrupts:1; + u8 in_reset_reload:1; + u8 in_ioa_bringdown:1; + u8 ioa_unit_checked:1; + u8 ioa_is_dead:1; + u8 dump_taken:1; + u8 allow_cmds:1; + u8 allow_ml_add_del:1; + + u16 type; /* CCIN of the card */ + + u8 log_level; +#define IPR_MAX_LOG_LEVEL 4 +#define IPR_DEFAULT_LOG_LEVEL 2 + +#define IPR_NUM_TRACE_INDEX_BITS 8 +#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) +#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) + char trace_start[8]; +#define IPR_TRACE_START_LABEL "trace" + struct ipr_trace_entry *trace; + u32 trace_index:IPR_NUM_TRACE_INDEX_BITS; + + /* + * Queue for free command blocks + */ + char ipr_free_label[8]; +#define IPR_FREEQ_LABEL "free-q" + struct list_head free_q; + + /* + * Queue for command blocks outstanding to the adapter + */ + char ipr_pending_label[8]; +#define IPR_PENDQ_LABEL "pend-q" + struct list_head pending_q; + + char cfg_table_start[8]; +#define IPR_CFG_TBL_START "cfg" + struct ipr_config_table *cfg_table; + u32 cfg_table_dma; + + char resource_table_label[8]; +#define IPR_RES_TABLE_LABEL "res_tbl" + struct ipr_resource_entry *res_entries; + struct list_head free_res_q; + struct list_head used_res_q; + + char ipr_hcam_label[8]; +#define IPR_HCAM_LABEL "hcams" + struct ipr_hostrcb *hostrcb[IPR_NUM_HCAMS]; + u32 hostrcb_dma[IPR_NUM_HCAMS]; + struct list_head hostrcb_free_q; + struct list_head hostrcb_pending_q; + + u32 *host_rrq; + u32 host_rrq_dma; +#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc +#define IPR_HRRQ_RESP_BIT_SET 0x00000002 +#define IPR_HRRQ_TOGGLE_BIT 0x00000001 +#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2 + volatile u32 *hrrq_start; + volatile u32 *hrrq_end; + volatile u32 *hrrq_curr; + volatile u32 toggle_bit; + + struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES]; + + const struct ipr_chip_cfg_t *chip_cfg; + + unsigned long hdw_dma_regs; /* iomapped PCI memory space */ + unsigned long hdw_dma_regs_pci; /* raw PCI memory space */ + unsigned long ioa_mailbox; + struct ipr_interrupts regs; + + u32 pci_cfg_buf[64]; + u16 saved_pcix_cmd_reg; + u16 reset_retries; + + u32 errors_logged; + + struct Scsi_Host *host; + struct pci_dev *pdev; + struct ipr_sglist *ucode_sglist; + struct ipr_mode_pages *saved_mode_pages; + u8 saved_mode_page_len; + + struct work_struct work_q; + + wait_queue_head_t reset_wait_q; + + struct ipr_dump *dump; + enum ipr_sdt_state sdt_state; + + struct ipr_misc_cbs *vpd_cbs; + u32 vpd_cbs_dma; + + struct pci_pool *ipr_cmd_pool; + + struct ipr_cmnd *reset_cmd; + + char ipr_cmd_label[8]; +#define IPR_CMD_LABEL "ipr_cmnd" + struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS]; + u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS]; +}; + +struct ipr_cmnd { + struct ipr_ioarcb ioarcb; + struct ipr_ioasa ioasa; + struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES]; + struct list_head queue; + struct scsi_cmnd *scsi_cmd; + struct completion completion; + struct timer_list timer; + void (*done) (struct ipr_cmnd *); + int (*job_step) (struct ipr_cmnd *); + u16 cmd_index; + u8 sense_buffer[SCSI_SENSE_BUFFERSIZE]; + dma_addr_t sense_buffer_dma; + unsigned short dma_use_sg; + dma_addr_t dma_handle; + union { + enum ipr_shutdown_type shutdown_type; + struct ipr_hostrcb *hostrcb; + unsigned long time_left; + unsigned long scratch; + struct ipr_resource_entry *res; + struct ipr_cmnd *sibling; + struct scsi_device *sdev; + } u; + + struct ipr_ioa_cfg *ioa_cfg; +}; + +struct ipr_ses_table_entry { + char product_id[17]; + char compare_product_id_byte[17]; + u32 max_bus_speed_limit; /* MB/sec limit for this backplane */ +}; + +struct ipr_dump_header { + u32 eye_catcher; +#define IPR_DUMP_EYE_CATCHER 0xC5D4E3F2 + u32 len; + u32 num_entries; + u32 first_entry_offset; + u32 status; +#define IPR_DUMP_STATUS_SUCCESS 0 +#define IPR_DUMP_STATUS_QUAL_SUCCESS 2 +#define IPR_DUMP_STATUS_FAILED 0xffffffff + u32 os; +#define IPR_DUMP_OS_LINUX 0x4C4E5558 + u32 driver_name; +#define IPR_DUMP_DRIVER_NAME 0x49505232 +}__attribute__((packed, aligned (4))); + +struct ipr_dump_entry_header { + u32 eye_catcher; +#define IPR_DUMP_EYE_CATCHER 0xC5D4E3F2 + u32 len; + u32 num_elems; + u32 offset; + u32 data_type; +#define IPR_DUMP_DATA_TYPE_ASCII 0x41534349 +#define IPR_DUMP_DATA_TYPE_BINARY 0x42494E41 + u32 id; +#define IPR_DUMP_IOA_DUMP_ID 0x494F4131 +#define IPR_DUMP_LOCATION_ID 0x4C4F4341 +#define IPR_DUMP_TRACE_ID 0x54524143 +#define IPR_DUMP_DRIVER_VERSION_ID 0x44525652 +#define IPR_DUMP_DRIVER_TYPE_ID 0x54595045 +#define IPR_DUMP_IOA_CTRL_BLK 0x494F4342 +#define IPR_DUMP_PEND_OPS 0x414F5053 + u32 status; +}__attribute__((packed, aligned (4))); + +struct ipr_dump_location_entry { + struct ipr_dump_entry_header hdr; + u8 location[BUS_ID_SIZE]; +}__attribute__((packed)); + +struct ipr_dump_trace_entry { + struct ipr_dump_entry_header hdr; + u32 trace[IPR_TRACE_SIZE / sizeof(u32)]; +}__attribute__((packed, aligned (4))); + +struct ipr_dump_version_entry { + struct ipr_dump_entry_header hdr; + u8 version[sizeof(IPR_DRIVER_VERSION)]; +}; + +struct ipr_dump_ioa_type_entry { + struct ipr_dump_entry_header hdr; + u32 type; + u32 fw_version; +}; + +struct ipr_driver_dump { + struct ipr_dump_header hdr; + struct ipr_dump_version_entry version_entry; + struct ipr_dump_location_entry location_entry; + struct ipr_dump_ioa_type_entry ioa_type_entry; + struct ipr_dump_trace_entry trace_entry; +}__attribute__((packed)); + +struct ipr_ioa_dump { + struct ipr_dump_entry_header hdr; + struct ipr_sdt sdt; + u32 *ioa_data[IPR_MAX_NUM_DUMP_PAGES]; + u32 reserved; + u32 next_page_index; + u32 page_offset; + u32 format; +#define IPR_SDT_FMT2 2 +#define IPR_SDT_UNKNOWN 3 +}__attribute__((packed, aligned (4))); + +struct ipr_dump { + struct kobject kobj; + struct ipr_ioa_cfg *ioa_cfg; + struct ipr_driver_dump driver_dump; + struct ipr_ioa_dump ioa_dump; +}; + +struct ipr_error_table_t { + u32 ioasc; + int log_ioasa; + int log_hcam; + char *error; +}; + +struct ipr_software_inq_lid_info { + u32 load_id; + u32 timestamp[3]; +}__attribute__((packed, aligned (4))); + +struct ipr_ucode_image_header { + u32 header_length; + u32 lid_table_offset; + u8 major_release; + u8 card_type; + u8 minor_release[2]; + u8 reserved[20]; + char eyecatcher[16]; + u32 num_lids; + struct ipr_software_inq_lid_info lid[1]; +}__attribute__((packed, aligned (4))); + +/* + * Macros + */ +#if IPR_DEBUG +#define IPR_DBG_CMD(CMD) do { CMD; } while (0) +#else +#define IPR_DBG_CMD(CMD) +#endif + +#define ipr_breakpoint_data KERN_ERR IPR_NAME\ +": %s: %s: Line: %d ioa_cfg: %p\n", __FILE__, \ +__FUNCTION__, __LINE__, ioa_cfg + +#if defined(CONFIG_KDB) && !defined(CONFIG_PPC_ISERIES) +#define ipr_breakpoint {printk(ipr_breakpoint_data); KDB_ENTER();} +#define ipr_breakpoint_or_die {printk(ipr_breakpoint_data); KDB_ENTER();} +#else +#define ipr_breakpoint +#define ipr_breakpoint_or_die panic(ipr_breakpoint_data) +#endif + +#ifdef CONFIG_SCSI_IPR_TRACE +#define ipr_create_trace_file(kobj, attr) sysfs_create_bin_file(kobj, attr) +#define ipr_remove_trace_file(kobj, attr) sysfs_remove_bin_file(kobj, attr) +#else +#define ipr_create_trace_file(kobj, attr) 0 +#define ipr_remove_trace_file(kobj, attr) do { } while(0) +#endif + +#ifdef CONFIG_SCSI_IPR_DUMP +#define ipr_create_dump_file(kobj, attr) sysfs_create_bin_file(kobj, attr) +#define ipr_remove_dump_file(kobj, attr) sysfs_remove_bin_file(kobj, attr) +#else +#define ipr_create_dump_file(kobj, attr) 0 +#define ipr_remove_dump_file(kobj, attr) do { } while(0) +#endif + +/* + * Error logging macros + */ +#define ipr_err(...) printk(KERN_ERR IPR_NAME ": "__VA_ARGS__) +#define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__) +#define ipr_crit(...) printk(KERN_CRIT IPR_NAME ": "__VA_ARGS__) +#define ipr_warn(...) printk(KERN_WARNING IPR_NAME": "__VA_ARGS__) +#define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)) + +#define ipr_sdev_printk(level, sdev, fmt, ...) \ + printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, sdev->host->host_no, \ + sdev->channel, sdev->id, sdev->lun, ##__VA_ARGS__) + +#define ipr_sdev_err(sdev, fmt, ...) \ + ipr_sdev_printk(KERN_ERR, sdev, fmt, ##__VA_ARGS__) + +#define ipr_sdev_info(sdev, fmt, ...) \ + ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__) + +#define ipr_sdev_dbg(sdev, fmt, ...) \ + IPR_DBG_CMD(ipr_sdev_printk(KERN_INFO, sdev, fmt, ##__VA_ARGS__)) + +#define ipr_res_printk(level, ioa_cfg, res, fmt, ...) \ + printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, ioa_cfg->host->host_no, \ + res.bus, res.target, res.lun, ##__VA_ARGS__) + +#define ipr_res_err(ioa_cfg, res, fmt, ...) \ + ipr_res_printk(KERN_ERR, ioa_cfg, res, fmt, ##__VA_ARGS__) +#define ipr_res_dbg(ioa_cfg, res, fmt, ...) \ + IPR_DBG_CMD(ipr_res_printk(KERN_INFO, ioa_cfg, res, fmt, ##__VA_ARGS__)) + +#define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\ + __FILE__, __FUNCTION__, __LINE__) + +#if IPR_DBG_TRACE +#define ENTER printk(KERN_INFO IPR_NAME": Entering %s\n", __FUNCTION__) +#define LEAVE printk(KERN_INFO IPR_NAME": Leaving %s\n", __FUNCTION__) +#else +#define ENTER +#define LEAVE +#endif + +#define ipr_err_separator \ +ipr_err("----------------------------------------------------------\n") + + +/* + * Inlines + */ + +/** + * ipr_is_ioa_resource - Determine if a resource is the IOA + * @res: resource entry struct + * + * Return value: + * 1 if IOA / 0 if not IOA + **/ +static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res) +{ + return (res->cfgte.flags & IPR_IS_IOA_RESOURCE) ? 1 : 0; +} + +/** + * ipr_is_af_dasd_device - Determine if a resource is an AF DASD + * @res: resource entry struct + * + * Return value: + * 1 if AF DASD / 0 if not AF DASD + **/ +static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res) +{ + if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && + !ipr_is_ioa_resource(res) && + IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_AF_DASD) + return 1; + else + return 0; +} + +/** + * ipr_is_vset_device - Determine if a resource is a VSET + * @res: resource entry struct + * + * Return value: + * 1 if VSET / 0 if not VSET + **/ +static inline int ipr_is_vset_device(struct ipr_resource_entry *res) +{ + if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) && + !ipr_is_ioa_resource(res) && + IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_VOLUME_SET) + return 1; + else + return 0; +} + +/** + * ipr_is_gscsi - Determine if a resource is a generic scsi resource + * @res: resource entry struct + * + * Return value: + * 1 if GSCSI / 0 if not GSCSI + **/ +static inline int ipr_is_gscsi(struct ipr_resource_entry *res) +{ + if (!ipr_is_ioa_resource(res) && + IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_SCSI) + return 1; + else + return 0; +} + +/** + * ipr_is_device - Determine if resource address is that of a device + * @res_addr: resource address struct + * + * Return value: + * 1 if AF / 0 if not AF + **/ +static inline int ipr_is_device(struct ipr_res_addr *res_addr) +{ + if ((res_addr->bus < IPR_MAX_NUM_BUSES) && + (res_addr->target < IPR_MAX_NUM_TARGETS_PER_BUS)) + return 1; + + return 0; +} + +/** + * ipr_sdt_is_fmt2 - Determine if a SDT address is in format 2 + * @sdt_word: SDT address + * + * Return value: + * 1 if format 2 / 0 if not + **/ +static inline int ipr_sdt_is_fmt2(u32 sdt_word) +{ + u32 bar_sel = IPR_GET_FMT2_BAR_SEL(sdt_word); + + switch (bar_sel) { + case IPR_SDT_FMT2_BAR0_SEL: + case IPR_SDT_FMT2_BAR1_SEL: + case IPR_SDT_FMT2_BAR2_SEL: + case IPR_SDT_FMT2_BAR3_SEL: + case IPR_SDT_FMT2_BAR4_SEL: + case IPR_SDT_FMT2_BAR5_SEL: + case IPR_SDT_FMT2_EXP_ROM_SEL: + return 1; + }; + + return 0; +} + +#endif diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c new file mode 100644 index 000000000..4277db3b6 --- /dev/null +++ b/drivers/scsi/pcmcia/sym53c500_cs.c @@ -0,0 +1,1042 @@ +/* +* sym53c500_cs.c Bob Tracy (rct@frus.com) +* +* A rewrite of the pcmcia-cs add-on driver for newer (circa 1997) +* New Media Bus Toaster PCMCIA SCSI cards using the Symbios Logic +* 53c500 controller: intended for use with 2.6 and later kernels. +* The pcmcia-cs add-on version of this driver is not supported +* beyond 2.4. It consisted of three files with history/copyright +* information as follows: +* +* SYM53C500.h +* Bob Tracy (rct@frus.com) +* Original by Tom Corner (tcorner@via.at). +* Adapted from NCR53c406a.h which is Copyrighted (C) 1994 +* Normunds Saumanis (normunds@rx.tech.swh.lv) +* +* SYM53C500.c +* Bob Tracy (rct@frus.com) +* Original driver by Tom Corner (tcorner@via.at) was adapted +* from NCR53c406a.c which is Copyrighted (C) 1994, 1995, 1996 +* Normunds Saumanis (normunds@fi.ibm.com) +* +* sym53c500.c +* Bob Tracy (rct@frus.com) +* Original by Tom Corner (tcorner@via.at) was adapted from a +* driver for the Qlogic SCSI card written by +* David Hinds (dhinds@allegro.stanford.edu). +* +* This program is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License as published by the +* Free Software Foundation; either version 2, or (at your option) any +* later version. +* +* This program is distributed in the hope that it will be useful, but +* WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* General Public License for more details. +*/ + +#define SYM53C500_DEBUG 0 +#define VERBOSE_SYM53C500_DEBUG 0 + +/* +* Set this to 0 if you encounter kernel lockups while transferring +* data in PIO mode. Note this can be changed via "sysfs". +*/ +#define USE_FAST_PIO 1 + +/* =============== End of user configurable parameters ============== */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/* ================================================================== */ + +#ifdef PCMCIA_DEBUG +static int pc_debug = PCMCIA_DEBUG; +module_param(pc_debug, int, 0); +#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) +static char *version = +"sym53c500_cs.c 0.9b 2004/05/10 (Bob Tracy)"; +#else +#define DEBUG(n, args...) +#endif + +/* ================================================================== */ + +/* Parameters that can be set with 'insmod' */ + +/* Bit map of interrupts to choose from */ +static unsigned int irq_mask = 0xdeb8; /* 3, 6, 7, 9-12, 14, 15 */ +static int irq_list[4] = { -1 }; +static int num_irqs = 1; + +module_param(irq_mask, int, 0); +MODULE_PARM_DESC(irq_mask, "IRQ mask bits (default: 0xdeb8)"); +module_param_array(irq_list, int, num_irqs, 0); +MODULE_PARM_DESC(irq_list, "Comma-separated list of up to 4 IRQs to try (default: auto select)."); + +/* ================================================================== */ + +#define SYNC_MODE 0 /* Synchronous transfer mode */ + +/* Default configuration */ +#define C1_IMG 0x07 /* ID=7 */ +#define C2_IMG 0x48 /* FE SCSI2 */ +#define C3_IMG 0x20 /* CDB */ +#define C4_IMG 0x04 /* ANE */ +#define C5_IMG 0xa4 /* ? changed from b6= AA PI SIE POL */ +#define C7_IMG 0x80 /* added for SYM53C500 t. corner */ + +/* Hardware Registers: offsets from io_port (base) */ + +/* Control Register Set 0 */ +#define TC_LSB 0x00 /* transfer counter lsb */ +#define TC_MSB 0x01 /* transfer counter msb */ +#define SCSI_FIFO 0x02 /* scsi fifo register */ +#define CMD_REG 0x03 /* command register */ +#define STAT_REG 0x04 /* status register */ +#define DEST_ID 0x04 /* selection/reselection bus id */ +#define INT_REG 0x05 /* interrupt status register */ +#define SRTIMOUT 0x05 /* select/reselect timeout reg */ +#define SEQ_REG 0x06 /* sequence step register */ +#define SYNCPRD 0x06 /* synchronous transfer period */ +#define FIFO_FLAGS 0x07 /* indicates # of bytes in fifo */ +#define SYNCOFF 0x07 /* synchronous offset register */ +#define CONFIG1 0x08 /* configuration register */ +#define CLKCONV 0x09 /* clock conversion register */ +/* #define TESTREG 0x0A */ /* test mode register */ +#define CONFIG2 0x0B /* configuration 2 register */ +#define CONFIG3 0x0C /* configuration 3 register */ +#define CONFIG4 0x0D /* configuration 4 register */ +#define TC_HIGH 0x0E /* transfer counter high */ +/* #define FIFO_BOTTOM 0x0F */ /* reserve FIFO byte register */ + +/* Control Register Set 1 */ +/* #define JUMPER_SENSE 0x00 */ /* jumper sense port reg (r/w) */ +/* #define SRAM_PTR 0x01 */ /* SRAM address pointer reg (r/w) */ +/* #define SRAM_DATA 0x02 */ /* SRAM data register (r/w) */ +#define PIO_FIFO 0x04 /* PIO FIFO registers (r/w) */ +/* #define PIO_FIFO1 0x05 */ /* */ +/* #define PIO_FIFO2 0x06 */ /* */ +/* #define PIO_FIFO3 0x07 */ /* */ +#define PIO_STATUS 0x08 /* PIO status (r/w) */ +/* #define ATA_CMD 0x09 */ /* ATA command/status reg (r/w) */ +/* #define ATA_ERR 0x0A */ /* ATA features/error reg (r/w) */ +#define PIO_FLAG 0x0B /* PIO flag interrupt enable (r/w) */ +#define CONFIG5 0x09 /* configuration 5 register */ +/* #define SIGNATURE 0x0E */ /* signature register (r) */ +/* #define CONFIG6 0x0F */ /* configuration 6 register (r) */ +#define CONFIG7 0x0d + +/* select register set 0 */ +#define REG0(x) (outb(C4_IMG, (x) + CONFIG4)) +/* select register set 1 */ +#define REG1(x) outb(C7_IMG, (x) + CONFIG7); outb(C5_IMG, (x) + CONFIG5) + +#if SYM53C500_DEBUG +#define DEB(x) x +#else +#define DEB(x) +#endif + +#if VERBOSE_SYM53C500_DEBUG +#define VDEB(x) x +#else +#define VDEB(x) +#endif + +#define LOAD_DMA_COUNT(x, count) \ + outb(count & 0xff, (x) + TC_LSB); \ + outb((count >> 8) & 0xff, (x) + TC_MSB); \ + outb((count >> 16) & 0xff, (x) + TC_HIGH); + +/* Chip commands */ +#define DMA_OP 0x80 + +#define SCSI_NOP 0x00 +#define FLUSH_FIFO 0x01 +#define CHIP_RESET 0x02 +#define SCSI_RESET 0x03 +#define RESELECT 0x40 +#define SELECT_NO_ATN 0x41 +#define SELECT_ATN 0x42 +#define SELECT_ATN_STOP 0x43 +#define ENABLE_SEL 0x44 +#define DISABLE_SEL 0x45 +#define SELECT_ATN3 0x46 +#define RESELECT3 0x47 +#define TRANSFER_INFO 0x10 +#define INIT_CMD_COMPLETE 0x11 +#define MSG_ACCEPT 0x12 +#define TRANSFER_PAD 0x18 +#define SET_ATN 0x1a +#define RESET_ATN 0x1b +#define SEND_MSG 0x20 +#define SEND_STATUS 0x21 +#define SEND_DATA 0x22 +#define DISCONN_SEQ 0x23 +#define TERMINATE_SEQ 0x24 +#define TARG_CMD_COMPLETE 0x25 +#define DISCONN 0x27 +#define RECV_MSG 0x28 +#define RECV_CMD 0x29 +#define RECV_DATA 0x2a +#define RECV_CMD_SEQ 0x2b +#define TARGET_ABORT_DMA 0x04 + +/* ================================================================== */ + +struct scsi_info_t { + dev_link_t link; + dev_node_t node; + struct Scsi_Host *host; + unsigned short manf_id; +}; + +/* +* Repository for per-instance host data. +*/ +struct sym53c500_data { + struct scsi_cmnd *current_SC; + int fast_pio; +}; + +enum Phase { + idle, + data_out, + data_in, + command_ph, + status_ph, + message_out, + message_in +}; + +/* ================================================================== */ + +/* +* Global (within this module) variables other than +* sym53c500_driver_template (the scsi_host_template). +*/ +static dev_link_t *dev_list; +static dev_info_t dev_info = "sym53c500_cs"; + +/* ================================================================== */ + +static void +chip_init(int io_port) +{ + REG1(io_port); + outb(0x01, io_port + PIO_STATUS); + outb(0x00, io_port + PIO_FLAG); + + outb(C4_IMG, io_port + CONFIG4); /* REG0(io_port); */ + outb(C3_IMG, io_port + CONFIG3); + outb(C2_IMG, io_port + CONFIG2); + outb(C1_IMG, io_port + CONFIG1); + + outb(0x05, io_port + CLKCONV); /* clock conversion factor */ + outb(0x9C, io_port + SRTIMOUT); /* Selection timeout */ + outb(0x05, io_port + SYNCPRD); /* Synchronous transfer period */ + outb(SYNC_MODE, io_port + SYNCOFF); /* synchronous mode */ +} + +static void +SYM53C500_int_host_reset(int io_port) +{ + outb(C4_IMG, io_port + CONFIG4); /* REG0(io_port); */ + outb(CHIP_RESET, io_port + CMD_REG); + outb(SCSI_NOP, io_port + CMD_REG); /* required after reset */ + outb(SCSI_RESET, io_port + CMD_REG); + chip_init(io_port); +} + +static __inline__ int +SYM53C500_pio_read(int fast_pio, int base, unsigned char *request, unsigned int reqlen) +{ + int i; + int len; /* current scsi fifo size */ + + REG1(base); + while (reqlen) { + i = inb(base + PIO_STATUS); + /* VDEB(printk("pio_status=%x\n", i)); */ + if (i & 0x80) + return 0; + + switch (i & 0x1e) { + default: + case 0x10: /* fifo empty */ + len = 0; + break; + case 0x0: + len = 1; + break; + case 0x8: /* fifo 1/3 full */ + len = 42; + break; + case 0xc: /* fifo 2/3 full */ + len = 84; + break; + case 0xe: /* fifo full */ + len = 128; + break; + } + + if ((i & 0x40) && len == 0) { /* fifo empty and interrupt occurred */ + return 0; + } + + if (len) { + if (len > reqlen) + len = reqlen; + + if (fast_pio && len > 3) { + insl(base + PIO_FIFO, request, len >> 2); + request += len & 0xfc; + reqlen -= len & 0xfc; + } else { + while (len--) { + *request++ = inb(base + PIO_FIFO); + reqlen--; + } + } + } + } + return 0; +} + +static __inline__ int +SYM53C500_pio_write(int fast_pio, int base, unsigned char *request, unsigned int reqlen) +{ + int i = 0; + int len; /* current scsi fifo size */ + + REG1(base); + while (reqlen && !(i & 0x40)) { + i = inb(base + PIO_STATUS); + /* VDEB(printk("pio_status=%x\n", i)); */ + if (i & 0x80) /* error */ + return 0; + + switch (i & 0x1e) { + case 0x10: + len = 128; + break; + case 0x0: + len = 84; + break; + case 0x8: + len = 42; + break; + case 0xc: + len = 1; + break; + default: + case 0xe: + len = 0; + break; + } + + if (len) { + if (len > reqlen) + len = reqlen; + + if (fast_pio && len > 3) { + outsl(base + PIO_FIFO, request, len >> 2); + request += len & 0xfc; + reqlen -= len & 0xfc; + } else { + while (len--) { + outb(*request++, base + PIO_FIFO); + reqlen--; + } + } + } + } + return 0; +} + +static irqreturn_t +SYM53C500_intr(int irq, void *dev_id, struct pt_regs *regs) +{ + unsigned long flags; + struct Scsi_Host *dev = dev_id; + DEB(unsigned char fifo_size;) + DEB(unsigned char seq_reg;) + unsigned char status, int_reg; + unsigned char pio_status; + struct scatterlist *sglist; + unsigned int sgcount; + int port_base = dev->io_port; + struct sym53c500_data *data = + (struct sym53c500_data *)dev->hostdata; + struct scsi_cmnd *curSC = data->current_SC; + int fast_pio = data->fast_pio; + + spin_lock_irqsave(dev->host_lock, flags); + + VDEB(printk("SYM53C500_intr called\n")); + + REG1(port_base); + pio_status = inb(port_base + PIO_STATUS); + REG0(port_base); + status = inb(port_base + STAT_REG); + DEB(seq_reg = inb(port_base + SEQ_REG)); + int_reg = inb(port_base + INT_REG); + DEB(fifo_size = inb(port_base + FIFO_FLAGS) & 0x1f); + +#if SYM53C500_DEBUG + printk("status=%02x, seq_reg=%02x, int_reg=%02x, fifo_size=%02x", + status, seq_reg, int_reg, fifo_size); + printk(", pio=%02x\n", pio_status); +#endif /* SYM53C500_DEBUG */ + + if (int_reg & 0x80) { /* SCSI reset intr */ + DEB(printk("SYM53C500: reset intr received\n")); + curSC->result = DID_RESET << 16; + goto idle_out; + } + + if (pio_status & 0x80) { + printk("SYM53C500: Warning: PIO error!\n"); + curSC->result = DID_ERROR << 16; + goto idle_out; + } + + if (status & 0x20) { /* Parity error */ + printk("SYM53C500: Warning: parity error!\n"); + curSC->result = DID_PARITY << 16; + goto idle_out; + } + + if (status & 0x40) { /* Gross error */ + printk("SYM53C500: Warning: gross error!\n"); + curSC->result = DID_ERROR << 16; + goto idle_out; + } + + if (int_reg & 0x20) { /* Disconnect */ + DEB(printk("SYM53C500: disconnect intr received\n")); + if (curSC->SCp.phase != message_in) { /* Unexpected disconnect */ + curSC->result = DID_NO_CONNECT << 16; + } else { /* Command complete, return status and message */ + curSC->result = (curSC->SCp.Status & 0xff) + | ((curSC->SCp.Message & 0xff) << 8) | (DID_OK << 16); + } + goto idle_out; + } + + switch (status & 0x07) { /* scsi phase */ + case 0x00: /* DATA-OUT */ + if (int_reg & 0x10) { /* Target requesting info transfer */ + curSC->SCp.phase = data_out; + VDEB(printk("SYM53C500: Data-Out phase\n")); + outb(FLUSH_FIFO, port_base + CMD_REG); + LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */ + outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); + if (!curSC->use_sg) /* Don't use scatter-gather */ + SYM53C500_pio_write(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen); + else { /* use scatter-gather */ + sgcount = curSC->use_sg; + sglist = curSC->request_buffer; + while (sgcount--) { + SYM53C500_pio_write(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length); + sglist++; + } + } + REG0(port_base); + } + break; + + case 0x01: /* DATA-IN */ + if (int_reg & 0x10) { /* Target requesting info transfer */ + curSC->SCp.phase = data_in; + VDEB(printk("SYM53C500: Data-In phase\n")); + outb(FLUSH_FIFO, port_base + CMD_REG); + LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */ + outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); + if (!curSC->use_sg) /* Don't use scatter-gather */ + SYM53C500_pio_read(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen); + else { /* Use scatter-gather */ + sgcount = curSC->use_sg; + sglist = curSC->request_buffer; + while (sgcount--) { + SYM53C500_pio_read(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length); + sglist++; + } + } + REG0(port_base); + } + break; + + case 0x02: /* COMMAND */ + curSC->SCp.phase = command_ph; + printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n"); + break; + + case 0x03: /* STATUS */ + curSC->SCp.phase = status_ph; + VDEB(printk("SYM53C500: Status phase\n")); + outb(FLUSH_FIFO, port_base + CMD_REG); + outb(INIT_CMD_COMPLETE, port_base + CMD_REG); + break; + + case 0x04: /* Reserved */ + case 0x05: /* Reserved */ + printk("SYM53C500: WARNING: Reserved phase!!!\n"); + break; + + case 0x06: /* MESSAGE-OUT */ + DEB(printk("SYM53C500: Message-Out phase\n")); + curSC->SCp.phase = message_out; + outb(SET_ATN, port_base + CMD_REG); /* Reject the message */ + outb(MSG_ACCEPT, port_base + CMD_REG); + break; + + case 0x07: /* MESSAGE-IN */ + VDEB(printk("SYM53C500: Message-In phase\n")); + curSC->SCp.phase = message_in; + + curSC->SCp.Status = inb(port_base + SCSI_FIFO); + curSC->SCp.Message = inb(port_base + SCSI_FIFO); + + VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f)); + DEB(printk("Status = %02x Message = %02x\n", curSC->SCp.Status, curSC->SCp.Message)); + + if (curSC->SCp.Message == SAVE_POINTERS || curSC->SCp.Message == DISCONNECT) { + outb(SET_ATN, port_base + CMD_REG); /* Reject message */ + DEB(printk("Discarding SAVE_POINTERS message\n")); + } + outb(MSG_ACCEPT, port_base + CMD_REG); + break; + } +out: + spin_unlock_irqrestore(dev->host_lock, flags); + return IRQ_HANDLED; + +idle_out: + curSC->SCp.phase = idle; + curSC->scsi_done(curSC); + goto out; +} + +static void +SYM53C500_release(dev_link_t *link) +{ + struct scsi_info_t *info = link->priv; + struct Scsi_Host *shost = info->host; + + DEBUG(0, "SYM53C500_release(0x%p)\n", link); + + /* + * Do this before releasing/freeing resources. + */ + scsi_remove_host(shost); + + /* + * Interrupts getting hosed on card removal. Try + * the following code, mostly from qlogicfas.c. + */ + if (shost->irq) + free_irq(shost->irq, shost); + if (shost->dma_channel != 0xff) + free_dma(shost->dma_channel); + if (shost->io_port && shost->n_io_port) + release_region(shost->io_port, shost->n_io_port); + + link->dev = NULL; + + pcmcia_release_configuration(link->handle); + pcmcia_release_io(link->handle, &link->io); + pcmcia_release_irq(link->handle, &link->irq); + + link->state &= ~DEV_CONFIG; + + scsi_host_put(shost); +} /* SYM53C500_release */ + +static const char* +SYM53C500_info(struct Scsi_Host *SChost) +{ + static char info_msg[256]; + struct sym53c500_data *data = + (struct sym53c500_data *)SChost->hostdata; + + DEB(printk("SYM53C500_info called\n")); + (void)snprintf(info_msg, sizeof(info_msg), + "SYM53C500 at 0x%lx, IRQ %d, %s PIO mode.", + SChost->io_port, SChost->irq, data->fast_pio ? "fast" : "slow"); + return (info_msg); +} + +static int +SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +{ + int i; + int port_base = SCpnt->device->host->io_port; + struct sym53c500_data *data = + (struct sym53c500_data *)SCpnt->device->host->hostdata; + + VDEB(printk("SYM53C500_queue called\n")); + + DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", + SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id, + SCpnt->device->lun, SCpnt->request_bufflen)); + + VDEB(for (i = 0; i < SCpnt->cmd_len; i++) + printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i])); + VDEB(printk("\n")); + + data->current_SC = SCpnt; + data->current_SC->scsi_done = done; + data->current_SC->SCp.phase = command_ph; + data->current_SC->SCp.Status = 0; + data->current_SC->SCp.Message = 0; + + /* We are locked here already by the mid layer */ + REG0(port_base); + outb(SCpnt->device->id, port_base + DEST_ID); /* set destination */ + outb(FLUSH_FIFO, port_base + CMD_REG); /* reset the fifos */ + + for (i = 0; i < SCpnt->cmd_len; i++) { + outb(SCpnt->cmnd[i], port_base + SCSI_FIFO); + } + outb(SELECT_NO_ATN, port_base + CMD_REG); + + return 0; +} + +static int +SYM53C500_host_reset(struct scsi_cmnd *SCpnt) +{ + int port_base = SCpnt->device->host->io_port; + + DEB(printk("SYM53C500_host_reset called\n")); + SYM53C500_int_host_reset(port_base); + + return SUCCESS; +} + +static int +SYM53C500_biosparm(struct scsi_device *disk, + struct block_device *dev, + sector_t capacity, int *info_array) +{ + int size; + + DEB(printk("SYM53C500_biosparm called\n")); + + size = capacity; + info_array[0] = 64; /* heads */ + info_array[1] = 32; /* sectors */ + info_array[2] = size >> 11; /* cylinders */ + if (info_array[2] > 1024) { /* big disk */ + info_array[0] = 255; + info_array[1] = 63; + info_array[2] = size / (255 * 63); + } + return 0; +} + +static ssize_t +SYM53C500_show_pio(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *SHp = class_to_shost(cdev); + struct sym53c500_data *data = + (struct sym53c500_data *)SHp->hostdata; + + return snprintf(buf, 4, "%d\n", data->fast_pio); +} + +static ssize_t +SYM53C500_store_pio(struct class_device *cdev, const char *buf, size_t count) +{ + int pio; + struct Scsi_Host *SHp = class_to_shost(cdev); + struct sym53c500_data *data = + (struct sym53c500_data *)SHp->hostdata; + + pio = simple_strtoul(buf, NULL, 0); + if (pio == 0 || pio == 1) { + data->fast_pio = pio; + return count; + } + else + return -EINVAL; +} + +/* +* SCSI HBA device attributes we want to +* make available via sysfs. +*/ +static struct class_device_attribute SYM53C500_pio_attr = { + .attr = { + .name = "fast_pio", + .mode = (S_IRUGO | S_IWUSR), + }, + .show = SYM53C500_show_pio, + .store = SYM53C500_store_pio, +}; + +static struct class_device_attribute *SYM53C500_shost_attrs[] = { + &SYM53C500_pio_attr, + NULL, +}; + +/* +* scsi_host_template initializer +*/ +static struct scsi_host_template sym53c500_driver_template = { + .module = THIS_MODULE, + .name = "SYM53C500", + .info = SYM53C500_info, + .queuecommand = SYM53C500_queue, + .eh_host_reset_handler = SYM53C500_host_reset, + .bios_param = SYM53C500_biosparm, + .proc_name = "SYM53C500", + .can_queue = 1, + .this_id = 7, + .sg_tablesize = 32, + .cmd_per_lun = 1, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = SYM53C500_shost_attrs +}; + +#define CS_CHECK(fn, ret) \ +do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0) + +static void +SYM53C500_config(dev_link_t *link) +{ + client_handle_t handle = link->handle; + struct scsi_info_t *info = link->priv; + tuple_t tuple; + cisparse_t parse; + int i, last_ret, last_fn; + int irq_level, port_base; + unsigned short tuple_data[32]; + struct Scsi_Host *host; + struct scsi_host_template *tpnt = &sym53c500_driver_template; + struct sym53c500_data *data; + + DEBUG(0, "SYM53C500_config(0x%p)\n", link); + + tuple.TupleData = (cisdata_t *)tuple_data; + tuple.TupleDataMax = 64; + tuple.TupleOffset = 0; + tuple.DesiredTuple = CISTPL_CONFIG; + CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); + CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple)); + CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse)); + link->conf.ConfigBase = parse.config.base; + + tuple.DesiredTuple = CISTPL_MANFID; + if ((pcmcia_get_first_tuple(handle, &tuple) == CS_SUCCESS) && + (pcmcia_get_tuple_data(handle, &tuple) == CS_SUCCESS)) + info->manf_id = le16_to_cpu(tuple.TupleData[0]); + + /* Configure card */ + link->state |= DEV_CONFIG; + + tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY; + CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple)); + while (1) { + if (pcmcia_get_tuple_data(handle, &tuple) != 0 || + pcmcia_parse_tuple(handle, &tuple, &parse) != 0) + goto next_entry; + link->conf.ConfigIndex = parse.cftable_entry.index; + link->io.BasePort1 = parse.cftable_entry.io.win[0].base; + link->io.NumPorts1 = parse.cftable_entry.io.win[0].len; + + if (link->io.BasePort1 != 0) { + i = pcmcia_request_io(handle, &link->io); + if (i == CS_SUCCESS) + break; + } +next_entry: + CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple)); + } + + CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq)); + CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf)); + + /* + * That's the trouble with copying liberally from another driver. + * Some things probably aren't relevant, and I suspect this entire + * section dealing with manufacturer IDs can be scrapped. --rct + */ + if ((info->manf_id == MANFID_MACNICA) || + (info->manf_id == MANFID_PIONEER) || + (info->manf_id == 0x0098)) { + /* set ATAcmd */ + outb(0xb4, link->io.BasePort1 + 0xd); + outb(0x24, link->io.BasePort1 + 0x9); + outb(0x04, link->io.BasePort1 + 0xd); + } + + /* + * irq_level == 0 implies tpnt->can_queue == 0, which + * is not supported in 2.6. Thus, only irq_level > 0 + * will be allowed. + * + * Possible port_base values are as follows: + * + * 0x130, 0x230, 0x280, 0x290, + * 0x320, 0x330, 0x340, 0x350 + */ + port_base = link->io.BasePort1; + irq_level = link->irq.AssignedIRQ; + + DEB(printk("SYM53C500: port_base=0x%x, irq=%d, fast_pio=%d\n", + port_base, irq_level, USE_FAST_PIO);) + + chip_init(port_base); + + host = scsi_host_alloc(tpnt, sizeof(struct sym53c500_data)); + if (!host) { + printk("SYM53C500: Unable to register host, giving up.\n"); + goto err_release; + } + + data = (struct sym53c500_data *)host->hostdata; + + if (irq_level > 0) { + if (request_irq(irq_level, SYM53C500_intr, 0, "SYM53C500", host)) { + printk("SYM53C500: unable to allocate IRQ %d\n", irq_level); + goto err_free_scsi; + } + DEB(printk("SYM53C500: allocated IRQ %d\n", irq_level)); + } else if (irq_level == 0) { + DEB(printk("SYM53C500: No interrupts detected\n")); + goto err_free_scsi; + } else { + DEB(printk("SYM53C500: Shouldn't get here!\n")); + goto err_free_scsi; + } + + host->unique_id = port_base; + host->irq = irq_level; + host->io_port = port_base; + host->n_io_port = 0x10; + host->dma_channel = -1; + + /* + * Note fast_pio is set to USE_FAST_PIO by + * default, but can be changed via "sysfs". + */ + data->fast_pio = USE_FAST_PIO; + + sprintf(info->node.dev_name, "scsi%d", host->host_no); + link->dev = &info->node; + info->host = host; + + if (scsi_add_host(host, NULL)) + goto err_free_irq; + + scsi_scan_host(host); + + goto out; /* SUCCESS */ + +err_free_irq: + free_irq(irq_level, host); +err_free_scsi: + scsi_host_put(host); +err_release: + release_region(port_base, 0x10); + printk(KERN_INFO "sym53c500_cs: no SCSI devices found\n"); + +out: + link->state &= ~DEV_CONFIG_PENDING; + return; + +cs_failed: + cs_error(link->handle, last_fn, last_ret); + SYM53C500_release(link); + return; +} /* SYM53C500_config */ + +static int +SYM53C500_event(event_t event, int priority, event_callback_args_t *args) +{ + dev_link_t *link = args->client_data; + struct scsi_info_t *info = link->priv; + + DEBUG(1, "SYM53C500_event(0x%06x)\n", event); + + switch (event) { + case CS_EVENT_CARD_REMOVAL: + link->state &= ~DEV_PRESENT; + if (link->state & DEV_CONFIG) + SYM53C500_release(link); + break; + case CS_EVENT_CARD_INSERTION: + link->state |= DEV_PRESENT | DEV_CONFIG_PENDING; + SYM53C500_config(link); + break; + case CS_EVENT_PM_SUSPEND: + link->state |= DEV_SUSPEND; + /* Fall through... */ + case CS_EVENT_RESET_PHYSICAL: + if (link->state & DEV_CONFIG) + pcmcia_release_configuration(link->handle); + break; + case CS_EVENT_PM_RESUME: + link->state &= ~DEV_SUSPEND; + /* Fall through... */ + case CS_EVENT_CARD_RESET: + if (link->state & DEV_CONFIG) { + pcmcia_request_configuration(link->handle, &link->conf); + /* See earlier comment about manufacturer IDs. */ + if ((info->manf_id == MANFID_MACNICA) || + (info->manf_id == MANFID_PIONEER) || + (info->manf_id == 0x0098)) { + outb(0x80, link->io.BasePort1 + 0xd); + outb(0x24, link->io.BasePort1 + 0x9); + outb(0x04, link->io.BasePort1 + 0xd); + } + /* + * If things don't work after a "resume", + * this is a good place to start looking. + */ + SYM53C500_int_host_reset(link->io.BasePort1); + } + break; + } + return 0; +} /* SYM53C500_event */ + +static void +SYM53C500_detach(dev_link_t *link) +{ + dev_link_t **linkp; + + DEBUG(0, "SYM53C500_detach(0x%p)\n", link); + + /* Locate device structure */ + for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next) + if (*linkp == link) + break; + if (*linkp == NULL) + return; + + if (link->state & DEV_CONFIG) + SYM53C500_release(link); + + if (link->handle) + pcmcia_deregister_client(link->handle); + + /* Unlink device structure, free bits. */ + *linkp = link->next; + kfree(link->priv); + link->priv = NULL; +} /* SYM53C500_detach */ + +static dev_link_t * +SYM53C500_attach(void) +{ + struct scsi_info_t *info; + client_reg_t client_reg; + dev_link_t *link; + int i, ret; + + DEBUG(0, "SYM53C500_attach()\n"); + + /* Create new SCSI device */ + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return NULL; + memset(info, 0, sizeof(*info)); + link = &info->link; + link->priv = info; + link->io.NumPorts1 = 16; + link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; + link->io.IOAddrLines = 10; + link->irq.Attributes = IRQ_TYPE_EXCLUSIVE; + link->irq.IRQInfo1 = IRQ_INFO2_VALID | IRQ_LEVEL_ID; + if (irq_list[0] == -1) + link->irq.IRQInfo2 = irq_mask; + else + for (i = 0; i < 4; i++) + link->irq.IRQInfo2 |= 1 << irq_list[i]; + link->conf.Attributes = CONF_ENABLE_IRQ; + link->conf.Vcc = 50; + link->conf.IntType = INT_MEMORY_AND_IO; + link->conf.Present = PRESENT_OPTION; + + /* Register with Card Services */ + link->next = dev_list; + dev_list = link; + client_reg.dev_info = &dev_info; + client_reg.Attributes = INFO_IO_CLIENT | INFO_CARD_SHARE; + client_reg.event_handler = &SYM53C500_event; + client_reg.EventMask = CS_EVENT_RESET_REQUEST | CS_EVENT_CARD_RESET | + CS_EVENT_CARD_INSERTION | CS_EVENT_CARD_REMOVAL | + CS_EVENT_PM_SUSPEND | CS_EVENT_PM_RESUME; + client_reg.Version = 0x0210; + client_reg.event_callback_args.client_data = link; + ret = pcmcia_register_client(&link->handle, &client_reg); + if (ret != 0) { + cs_error(link->handle, RegisterClient, ret); + SYM53C500_detach(link); + return NULL; + } + + return link; +} /* SYM53C500_attach */ + +MODULE_AUTHOR("Bob Tracy "); +MODULE_DESCRIPTION("SYM53C500 PCMCIA SCSI driver"); +MODULE_LICENSE("GPL"); + +static struct pcmcia_driver sym53c500_cs_driver = { + .owner = THIS_MODULE, + .drv = { + .name = "sym53c500_cs", + }, + .attach = SYM53C500_attach, + .detach = SYM53C500_detach, +}; + +static int __init +init_sym53c500_cs(void) +{ + return pcmcia_register_driver(&sym53c500_cs_driver); +} + +static void __exit +exit_sym53c500_cs(void) +{ + pcmcia_unregister_driver(&sym53c500_cs_driver); +} + +module_init(init_sym53c500_cs); +module_exit(exit_sym53c500_cs); diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h new file mode 100644 index 000000000..f01cbd66c --- /dev/null +++ b/drivers/scsi/qlogicfas408.h @@ -0,0 +1,120 @@ +/* to be used by qlogicfas and qlogic_cs */ +#ifndef __QLOGICFAS408_H +#define __QLOGICFAS408_H + +/*----------------------------------------------------------------*/ +/* Configuration */ + +/* Set the following to max out the speed of the PIO PseudoDMA transfers, + again, 0 tends to be slower, but more stable. */ + +#define QL_TURBO_PDMA 1 + +/* This should be 1 to enable parity detection */ + +#define QL_ENABLE_PARITY 1 + +/* This will reset all devices when the driver is initialized (during bootup). + The other linux drivers don't do this, but the DOS drivers do, and after + using DOS or some kind of crash or lockup this will bring things back + without requiring a cold boot. It does take some time to recover from a + reset, so it is slower, and I have seen timeouts so that devices weren't + recognized when this was set. */ + +#define QL_RESET_AT_START 0 + +/* crystal frequency in megahertz (for offset 5 and 9) + Please set this for your card. Most Qlogic cards are 40 Mhz. The + Control Concepts ISA (not VLB) is 24 Mhz */ + +#define XTALFREQ 40 + +/**********/ +/* DANGER! modify these at your own risk */ +/* SLOWCABLE can usually be reset to zero if you have a clean setup and + proper termination. The rest are for synchronous transfers and other + advanced features if your device can transfer faster than 5Mb/sec. + If you are really curious, email me for a quick howto until I have + something official */ +/**********/ + +/*****/ +/* config register 1 (offset 8) options */ +/* This needs to be set to 1 if your cabling is long or noisy */ +#define SLOWCABLE 1 + +/*****/ +/* offset 0xc */ +/* This will set fast (10Mhz) synchronous timing when set to 1 + For this to have an effect, FASTCLK must also be 1 */ +#define FASTSCSI 0 + +/* This when set to 1 will set a faster sync transfer rate */ +#define FASTCLK 0 /*(XTALFREQ>25?1:0)*/ + +/*****/ +/* offset 6 */ +/* This is the sync transfer divisor, XTALFREQ/X will be the maximum + achievable data rate (assuming the rest of the system is capable + and set properly) */ +#define SYNCXFRPD 5 /*(XTALFREQ/5)*/ + +/*****/ +/* offset 7 */ +/* This is the count of how many synchronous transfers can take place + i.e. how many reqs can occur before an ack is given. + The maximum value for this is 15, the upper bits can modify + REQ/ACK assertion and deassertion during synchronous transfers + If this is 0, the bus will only transfer asynchronously */ +#define SYNCOFFST 0 +/* for the curious, bits 7&6 control the deassertion delay in 1/2 cycles + of the 40Mhz clock. If FASTCLK is 1, specifying 01 (1/2) will + cause the deassertion to be early by 1/2 clock. Bits 5&4 control + the assertion delay, also in 1/2 clocks (FASTCLK is ignored here). */ + +/*----------------------------------------------------------------*/ + +struct qlogicfas408_priv { + int qbase; /* Port */ + int qinitid; /* initiator ID */ + int qabort; /* Flag to cause an abort */ + int qlirq; /* IRQ being used */ + int int_type; /* type of irq, 2 for ISA board, 0 for PCMCIA */ + char qinfo[80]; /* description */ + Scsi_Cmnd *qlcmd; /* current command being processed */ + struct Scsi_Host *shost; /* pointer back to host */ + struct qlogicfas408_priv *next; /* next private struct */ +}; + +/* The qlogic card uses two register maps - These macros select which one */ +#define REG0 ( outb( inb( qbase + 0xd ) & 0x7f , qbase + 0xd ), outb( 4 , qbase + 0xd )) +#define REG1 ( outb( inb( qbase + 0xd ) | 0x80 , qbase + 0xd ), outb( 0xb4 | int_type, qbase + 0xd )) + +/* following is watchdog timeout in microseconds */ +#define WATCHDOG 5000000 + +/*----------------------------------------------------------------*/ +/* the following will set the monitor border color (useful to find + where something crashed or gets stuck at and as a simple profiler) */ + +#define rtrc(i) {} + +#define get_priv_by_cmd(x) (struct qlogicfas408_priv *)&((x)->device->host->hostdata[0]) +#define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0]) + +irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id, struct pt_regs *regs); +int qlogicfas408_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)); +int qlogicfas408_biosparam(struct scsi_device * disk, + struct block_device *dev, + sector_t capacity, int ip[]); +int qlogicfas408_abort(Scsi_Cmnd * cmd); +int qlogicfas408_bus_reset(Scsi_Cmnd * cmd); +int qlogicfas408_host_reset(Scsi_Cmnd * cmd); +int qlogicfas408_device_reset(Scsi_Cmnd * cmd); +const char *qlogicfas408_info(struct Scsi_Host *host); +int qlogicfas408_get_chip_type(int qbase, int int_type); +void qlogicfas408_setup(int qbase, int id, int int_type); +int qlogicfas408_detect(int qbase, int int_type); +void qlogicfas408_disable_ints(struct qlogicfas408_priv *priv); +#endif /* __QLOGICFAS408_H */ + diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c new file mode 100644 index 000000000..bf1932e49 --- /dev/null +++ b/drivers/scsi/sata_nv.c @@ -0,0 +1,355 @@ +/* + * sata_nv.c - NVIDIA nForce SATA + * + * Copyright 2004 NVIDIA Corp. All rights reserved. + * Copyright 2004 Andrew Chew + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include +#include + +#define DRV_NAME "sata_nv" +#define DRV_VERSION "0.01" + +#define NV_PORTS 2 +#define NV_PIO_MASK 0x1f +#define NV_UDMA_MASK 0x7f +#define NV_PORT0_BMDMA_REG_OFFSET 0x00 +#define NV_PORT1_BMDMA_REG_OFFSET 0x08 +#define NV_PORT0_SCR_REG_OFFSET 0x00 +#define NV_PORT1_SCR_REG_OFFSET 0x40 + +#define NV_INT_STATUS 0x10 +#define NV_INT_STATUS_PDEV_INT 0x01 +#define NV_INT_STATUS_PDEV_PM 0x02 +#define NV_INT_STATUS_PDEV_ADDED 0x04 +#define NV_INT_STATUS_PDEV_REMOVED 0x08 +#define NV_INT_STATUS_SDEV_INT 0x10 +#define NV_INT_STATUS_SDEV_PM 0x20 +#define NV_INT_STATUS_SDEV_ADDED 0x40 +#define NV_INT_STATUS_SDEV_REMOVED 0x80 +#define NV_INT_STATUS_PDEV_HOTPLUG (NV_INT_STATUS_PDEV_ADDED | \ + NV_INT_STATUS_PDEV_REMOVED) +#define NV_INT_STATUS_SDEV_HOTPLUG (NV_INT_STATUS_SDEV_ADDED | \ + NV_INT_STATUS_SDEV_REMOVED) +#define NV_INT_STATUS_HOTPLUG (NV_INT_STATUS_PDEV_HOTPLUG | \ + NV_INT_STATUS_SDEV_HOTPLUG) + +#define NV_INT_ENABLE 0x11 +#define NV_INT_ENABLE_PDEV_MASK 0x01 +#define NV_INT_ENABLE_PDEV_PM 0x02 +#define NV_INT_ENABLE_PDEV_ADDED 0x04 +#define NV_INT_ENABLE_PDEV_REMOVED 0x08 +#define NV_INT_ENABLE_SDEV_MASK 0x10 +#define NV_INT_ENABLE_SDEV_PM 0x20 +#define NV_INT_ENABLE_SDEV_ADDED 0x40 +#define NV_INT_ENABLE_SDEV_REMOVED 0x80 +#define NV_INT_ENABLE_PDEV_HOTPLUG (NV_INT_ENABLE_PDEV_ADDED | \ + NV_INT_ENABLE_PDEV_REMOVED) +#define NV_INT_ENABLE_SDEV_HOTPLUG (NV_INT_ENABLE_SDEV_ADDED | \ + NV_INT_ENABLE_SDEV_REMOVED) +#define NV_INT_ENABLE_HOTPLUG (NV_INT_ENABLE_PDEV_HOTPLUG | \ + NV_INT_ENABLE_SDEV_HOTPLUG) + +#define NV_INT_CONFIG 0x12 +#define NV_INT_CONFIG_METHD 0x01 // 0 = INT, 1 = SMI + +static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +irqreturn_t nv_interrupt (int irq, void *dev_instance, struct pt_regs *regs); +static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg); +static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static void nv_host_stop (struct ata_host_set *host_set); + +static struct pci_device_id nv_pci_tbl[] = { + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2, + PCI_ANY_ID, PCI_ANY_ID, }, + { 0, } /* terminate list */ +}; + +static struct pci_driver nv_pci_driver = { + .name = DRV_NAME, + .id_table = nv_pci_tbl, + .probe = nv_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template nv_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = ATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .dma_boundary = ATA_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, + .bios_param = ata_std_bios_param, +}; + +static struct ata_port_operations nv_ops = { + .port_disable = ata_port_disable, + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + .exec_command = ata_exec_command_pio, + .check_status = ata_check_status_pio, + .phy_reset = sata_phy_reset, + .bmdma_setup = ata_bmdma_setup_pio, + .bmdma_start = ata_bmdma_start_pio, + .qc_prep = ata_qc_prep, + .qc_issue = ata_qc_issue_prot, + .eng_timeout = ata_eng_timeout, + .irq_handler = nv_interrupt, + .irq_clear = ata_bmdma_irq_clear, + .scr_read = nv_scr_read, + .scr_write = nv_scr_write, + .port_start = ata_port_start, + .port_stop = ata_port_stop, + .host_stop = nv_host_stop, +}; + +MODULE_AUTHOR("NVIDIA"); +MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, nv_pci_tbl); + +irqreturn_t nv_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + unsigned int i; + unsigned int handled = 0; + unsigned long flags; + u8 intr_status; + u8 intr_enable; + + spin_lock_irqsave(&host_set->lock, flags); + + for (i = 0; i < host_set->n_ports; i++) { + struct ata_port *ap; + + ap = host_set->ports[i]; + if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && (!(qc->tf.ctl & ATA_NIEN))) + handled += ata_host_intr(ap, qc); + } + + intr_status = inb(ap->ioaddr.scr_addr + NV_INT_STATUS); + intr_enable = inb(ap->ioaddr.scr_addr + NV_INT_ENABLE); + + // Clear interrupt status. + outb(0xff, ap->ioaddr.scr_addr + NV_INT_STATUS); + + if (intr_status & NV_INT_STATUS_HOTPLUG) { + if (intr_status & NV_INT_STATUS_PDEV_ADDED) { + printk(KERN_WARNING "ata%u: " + "Primary device added\n", ap->id); + } + + if (intr_status & NV_INT_STATUS_PDEV_REMOVED) { + printk(KERN_WARNING "ata%u: " + "Primary device removed\n", ap->id); + } + + if (intr_status & NV_INT_STATUS_SDEV_ADDED) { + printk(KERN_WARNING "ata%u: " + "Secondary device added\n", ap->id); + } + + if (intr_status & NV_INT_STATUS_SDEV_REMOVED) { + printk(KERN_WARNING "ata%u: " + "Secondary device removed\n", ap->id); + } + } + } + + spin_unlock_irqrestore(&host_set->lock, flags); + + return IRQ_RETVAL(handled); +} + +static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + if (sc_reg > SCR_CONTROL) + return 0xffffffffU; + + return inl(ap->ioaddr.scr_addr + (sc_reg * 4)); +} + +static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +{ + if (sc_reg > SCR_CONTROL) + return; + + outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)); +} + +static void nv_host_stop (struct ata_host_set *host_set) +{ + int i; + + for (i=0; in_ports; i++) { + u8 intr_mask; + + // Disable hotplug event interrupts. + intr_mask = inb(host_set->ports[i]->ioaddr.scr_addr + + NV_INT_ENABLE); + intr_mask &= ~(NV_INT_ENABLE_HOTPLUG); + outb(intr_mask, host_set->ports[i]->ioaddr.scr_addr + + NV_INT_ENABLE); + } +} + +static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version = 0; + struct ata_probe_ent *probe_ent = NULL; + int i; + int rc; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + INIT_LIST_HEAD(&probe_ent->node); + + probe_ent->pdev = pdev; + probe_ent->sht = &nv_sht; + probe_ent->host_flags = ATA_FLAG_SATA | + ATA_FLAG_SATA_RESET | + ATA_FLAG_SRST | + ATA_FLAG_NO_LEGACY; + probe_ent->port_ops = &nv_ops; + probe_ent->n_ports = NV_PORTS; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->pio_mask = NV_PIO_MASK; + probe_ent->udma_mask = NV_UDMA_MASK; + + probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); + ata_std_ports(&probe_ent->port[0]); + probe_ent->port[0].altstatus_addr = + probe_ent->port[0].ctl_addr = + pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; + probe_ent->port[0].bmdma_addr = + pci_resource_start(pdev, 4) | NV_PORT0_BMDMA_REG_OFFSET; + probe_ent->port[0].scr_addr = + pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET; + + probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); + ata_std_ports(&probe_ent->port[1]); + probe_ent->port[1].altstatus_addr = + probe_ent->port[1].ctl_addr = + pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; + probe_ent->port[1].bmdma_addr = + pci_resource_start(pdev, 4) | NV_PORT1_BMDMA_REG_OFFSET; + probe_ent->port[1].scr_addr = + pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET; + + pci_set_master(pdev); + + rc = ata_device_add(probe_ent); + if (rc != NV_PORTS) + goto err_out_regions; + + // Enable hotplug event interrupts. + for (i=0; in_ports; i++) { + u8 intr_mask; + + outb(NV_INT_STATUS_HOTPLUG, probe_ent->port[i].scr_addr + + NV_INT_STATUS); + + intr_mask = inb(probe_ent->port[i].scr_addr + NV_INT_ENABLE); + intr_mask |= NV_INT_ENABLE_HOTPLUG; + outb(intr_mask, probe_ent->port[i].scr_addr + NV_INT_ENABLE); + } + + kfree(probe_ent); + + return 0; + +err_out_regions: + pci_release_regions(pdev); + +err_out: + pci_disable_device(pdev); + return rc; +} + +static int __init nv_init(void) +{ + return pci_module_init(&nv_pci_driver); +} + +static void __exit nv_exit(void) +{ + pci_unregister_driver(&nv_pci_driver); +} + +module_init(nv_init); +module_exit(nv_exit); diff --git a/drivers/scsi/sata_promise.h b/drivers/scsi/sata_promise.h new file mode 100644 index 000000000..6e7e96b9e --- /dev/null +++ b/drivers/scsi/sata_promise.h @@ -0,0 +1,154 @@ +/* + * sata_promise.h - Promise SATA common definitions and inline funcs + * + * Copyright 2003-2004 Red Hat, Inc. + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#ifndef __SATA_PROMISE_H__ +#define __SATA_PROMISE_H__ + +#include + +enum pdc_packet_bits { + PDC_PKT_READ = (1 << 2), + PDC_PKT_NODATA = (1 << 3), + + PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5), + PDC_PKT_CLEAR_BSY = (1 << 4), + PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4), + PDC_LAST_REG = (1 << 3), + + PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1), +}; + +static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf, + dma_addr_t sg_table, + unsigned int devno, u8 *buf) +{ + u8 dev_reg; + u32 *buf32 = (u32 *) buf; + + /* set control bits (byte 0), zero delay seq id (byte 3), + * and seq id (byte 2) + */ + switch (tf->protocol) { + case ATA_PROT_DMA: + if (!(tf->flags & ATA_TFLAG_WRITE)) + buf32[0] = cpu_to_le32(PDC_PKT_READ); + else + buf32[0] = 0; + break; + + case ATA_PROT_NODATA: + buf32[0] = cpu_to_le32(PDC_PKT_NODATA); + break; + + default: + BUG(); + break; + } + + buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */ + buf32[2] = 0; /* no next-packet */ + + if (devno == 0) + dev_reg = ATA_DEVICE_OBS; + else + dev_reg = ATA_DEVICE_OBS | ATA_DEV1; + + /* select device */ + buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE; + buf[13] = dev_reg; + + /* device control register */ + buf[14] = (1 << 5) | PDC_REG_DEVCTL; + buf[15] = tf->ctl; + + return 16; /* offset of next byte */ +} + +static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf, + unsigned int i) +{ + if (tf->flags & ATA_TFLAG_DEVICE) { + buf[i++] = (1 << 5) | ATA_REG_DEVICE; + buf[i++] = tf->device; + } + + /* and finally the command itself; also includes end-of-pkt marker */ + buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD; + buf[i++] = tf->command; + + return i; +} + +static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i) +{ + /* the "(1 << 5)" should be read "(count << 5)" */ + + /* ATA command block registers */ + buf[i++] = (1 << 5) | ATA_REG_FEATURE; + buf[i++] = tf->feature; + + buf[i++] = (1 << 5) | ATA_REG_NSECT; + buf[i++] = tf->nsect; + + buf[i++] = (1 << 5) | ATA_REG_LBAL; + buf[i++] = tf->lbal; + + buf[i++] = (1 << 5) | ATA_REG_LBAM; + buf[i++] = tf->lbam; + + buf[i++] = (1 << 5) | ATA_REG_LBAH; + buf[i++] = tf->lbah; + + return i; +} + +static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i) +{ + /* the "(2 << 5)" should be read "(count << 5)" */ + + /* ATA command block registers */ + buf[i++] = (2 << 5) | ATA_REG_FEATURE; + buf[i++] = tf->hob_feature; + buf[i++] = tf->feature; + + buf[i++] = (2 << 5) | ATA_REG_NSECT; + buf[i++] = tf->hob_nsect; + buf[i++] = tf->nsect; + + buf[i++] = (2 << 5) | ATA_REG_LBAL; + buf[i++] = tf->hob_lbal; + buf[i++] = tf->lbal; + + buf[i++] = (2 << 5) | ATA_REG_LBAM; + buf[i++] = tf->hob_lbam; + buf[i++] = tf->lbam; + + buf[i++] = (2 << 5) | ATA_REG_LBAH; + buf[i++] = tf->hob_lbah; + buf[i++] = tf->lbah; + + return i; +} + + +#endif /* __SATA_PROMISE_H__ */ diff --git a/drivers/serial/cpm_uart/Makefile b/drivers/serial/cpm_uart/Makefile new file mode 100644 index 000000000..e072724ea --- /dev/null +++ b/drivers/serial/cpm_uart/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Motorola 8xx FEC ethernet controller +# + +obj-$(CONFIG_SERIAL_CPM) += cpm_uart.o + +# Select the correct platform objects. +cpm_uart-objs-$(CONFIG_CPM2) += cpm_uart_cpm2.o +cpm_uart-objs-$(CONFIG_8xx) += cpm_uart_cpm1.o + +cpm_uart-objs := cpm_uart_core.o $(cpm_uart-objs-y) diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/serial/cpm_uart/cpm_uart.h new file mode 100644 index 000000000..598704e7e --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart.h @@ -0,0 +1,92 @@ +/* + * linux/drivers/serial/cpm_uart.h + * + * Driver for CPM (SCC/SMC) serial ports + * + * Copyright (C) 2004 Freescale Semiconductor, Inc. + * + */ +#ifndef CPM_UART_H +#define CPM_UART_H + +#include + +#if defined(CONFIG_CPM2) +#include "cpm_uart_cpm2.h" +#elif defined(CONFIG_8xx) +#include "cpm_uart_cpm1.h" +#endif + +#ifndef CONFIG_SERIAL_8250 +#define SERIAL_CPM_MAJOR TTY_MAJOR +#define SERIAL_CPM_MINOR 64 +#else +#define SERIAL_CPM_MAJOR 204 +#define SERIAL_CPM_MINOR 42 +#endif + +#define IS_SMC(pinfo) (pinfo->flags & FLAG_SMC) +#define IS_DISCARDING(pinfo) (pinfo->flags & FLAG_DISCARDING) +#define FLAG_DISCARDING 0x00000004 /* when set, don't discard */ +#define FLAG_SMC 0x00000002 +#define FLAG_CONSOLE 0x00000001 + +#define UART_SMC1 0 +#define UART_SMC2 1 +#define UART_SCC1 2 +#define UART_SCC2 3 +#define UART_SCC3 4 +#define UART_SCC4 5 + +#define UART_NR 6 + +#define RX_NUM_FIFO 4 +#define RX_BUF_SIZE 32 +#define TX_NUM_FIFO 4 +#define TX_BUF_SIZE 32 + +struct uart_cpm_port { + struct uart_port port; + u16 rx_nrfifos; + u16 rx_fifosize; + u16 tx_nrfifos; + u16 tx_fifosize; + smc_t *smcp; + smc_uart_t *smcup; + scc_t *sccp; + scc_uart_t *sccup; + volatile cbd_t *rx_bd_base; + volatile cbd_t *rx_cur; + volatile cbd_t *tx_bd_base; + volatile cbd_t *tx_cur; + unsigned char *tx_buf; + unsigned char *rx_buf; + u32 flags; + void (*set_lineif)(struct uart_cpm_port *); + u8 brg; + uint dp_addr; + void *mem_addr; + dma_addr_t dma_addr; + /* helpers */ + int baud; + int bits; +}; + +extern int cpm_uart_port_map[UART_NR]; +extern int cpm_uart_nr; +extern struct uart_cpm_port cpm_uart_ports[UART_NR]; + +/* these are located in their respective files */ +void cpm_line_cr_cmd(int line, int cmd); +int cpm_uart_init_portdesc(void); +int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con); +void cpm_uart_freebuf(struct uart_cpm_port *pinfo); + +void smc1_lineif(struct uart_cpm_port *pinfo); +void smc2_lineif(struct uart_cpm_port *pinfo); +void scc1_lineif(struct uart_cpm_port *pinfo); +void scc2_lineif(struct uart_cpm_port *pinfo); +void scc3_lineif(struct uart_cpm_port *pinfo); +void scc4_lineif(struct uart_cpm_port *pinfo); + +#endif /* CPM_UART_H */ diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c new file mode 100644 index 000000000..0612e9453 --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart_core.c @@ -0,0 +1,1179 @@ +/* + * linux/drivers/serial/cpm_uart.c + * + * Driver for CPM (SCC/SMC) serial ports; core driver + * + * Based on arch/ppc/cpm2_io/uart.c by Dan Malek + * Based on ppc8xx.c by Thomas Gleixner + * Based on drivers/serial/amba.c by Russell King + * + * Maintainer: Kumar Gala (kumar.gala@freescale.com) (CPM2) + * Pantelis Antoniou (panto@intracom.gr) (CPM1) + * + * Copyright (C) 2004 Freescale Semiconductor, Inc. + * (C) 2004 Intracom, S.A. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if defined(CONFIG_SERIAL_CPM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +#define SUPPORT_SYSRQ +#endif + +#include +#include + +#include "cpm_uart.h" + +/***********************************************************************/ + +/* Track which ports are configured as uarts */ +int cpm_uart_port_map[UART_NR]; +/* How many ports did we config as uarts */ +int cpm_uart_nr; + +/**************************************************************/ + +static int cpm_uart_tx_pump(struct uart_port *port); +static void cpm_uart_init_smc(struct uart_cpm_port *pinfo, int bits, u16 cval); +static void cpm_uart_init_scc(struct uart_cpm_port *pinfo, int sbits, u16 sval); + +/**************************************************************/ + +/* + * Check, if transmit buffers are processed +*/ +static unsigned int cpm_uart_tx_empty(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile cbd_t *bdp = pinfo->tx_bd_base; + int ret = 0; + + while (1) { + if (bdp->cbd_sc & BD_SC_READY) + break; + + if (bdp->cbd_sc & BD_SC_WRAP) { + ret = TIOCSER_TEMT; + break; + } + bdp++; + } + + pr_debug("CPM uart[%d]:tx_empty: %d\n", port->line, ret); + + return ret; +} + +static void cpm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* Whee. Do nothing. */ +} + +static unsigned int cpm_uart_get_mctrl(struct uart_port *port) +{ + /* Whee. Do nothing. */ + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; +} + +/* + * Stop transmitter + */ +static void cpm_uart_stop_tx(struct uart_port *port, unsigned int tty_stop) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile smc_t *smcp = pinfo->smcp; + volatile scc_t *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:stop tx\n", port->line); + + if (IS_SMC(pinfo)) + smcp->smc_smcm &= ~SMCM_TX; + else + sccp->scc_sccm &= ~UART_SCCM_TX; +} + +/* + * Start transmitter + */ +static void cpm_uart_start_tx(struct uart_port *port, unsigned int tty_start) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile smc_t *smcp = pinfo->smcp; + volatile scc_t *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:start tx\n", port->line); + + /* if in the middle of discarding return */ + if (IS_DISCARDING(pinfo)) + return; + + if (IS_SMC(pinfo)) { + if (smcp->smc_smcm & SMCM_TX) + return; + } else { + if (sccp->scc_sccm & UART_SCCM_TX) + return; + } + + if (cpm_uart_tx_pump(port) != 0) { + if (IS_SMC(pinfo)) + smcp->smc_smcm |= SMCM_TX; + else + sccp->scc_sccm |= UART_SCCM_TX; + } +} + +/* + * Stop receiver + */ +static void cpm_uart_stop_rx(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile smc_t *smcp = pinfo->smcp; + volatile scc_t *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:stop rx\n", port->line); + + if (IS_SMC(pinfo)) + smcp->smc_smcm &= ~SMCM_RX; + else + sccp->scc_sccm &= ~UART_SCCM_RX; +} + +/* + * Enable Modem status interrupts + */ +static void cpm_uart_enable_ms(struct uart_port *port) +{ + pr_debug("CPM uart[%d]:enable ms\n", port->line); +} + +/* + * Generate a break. + */ +static void cpm_uart_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + int line = pinfo - cpm_uart_ports; + + pr_debug("CPM uart[%d]:break ctrl, break_state: %d\n", port->line, + break_state); + + if (break_state) + cpm_line_cr_cmd(line, CPM_CR_STOP_TX); + else + cpm_line_cr_cmd(line, CPM_CR_RESTART_TX); +} + +/* + * Transmit characters, refill buffer descriptor, if possible + */ +static void cpm_uart_int_tx(struct uart_port *port, struct pt_regs *regs) +{ + pr_debug("CPM uart[%d]:TX INT\n", port->line); + + cpm_uart_tx_pump(port); +} + +/* + * Receive characters + */ +static void cpm_uart_int_rx(struct uart_port *port, struct pt_regs *regs) +{ + int i; + unsigned char ch, *cp; + struct tty_struct *tty = port->info->tty; + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile cbd_t *bdp; + u16 status; + unsigned int flg; + + pr_debug("CPM uart[%d]:RX INT\n", port->line); + + /* Just loop through the closed BDs and copy the characters into + * the buffer. + */ + bdp = pinfo->rx_cur; + for (;;) { + /* get status */ + status = bdp->cbd_sc; + /* If this one is empty, return happy */ + if (status & BD_SC_EMPTY) + break; + + /* get number of characters, and check spce in flip-buffer */ + i = bdp->cbd_datlen; + + /* If we have not enough room in tty flip buffer, then we try + * later, which will be the next rx-interrupt or a timeout + */ + if ((tty->flip.count + i) >= TTY_FLIPBUF_SIZE) { + tty->flip.work.func((void *)tty); + if ((tty->flip.count + i) >= TTY_FLIPBUF_SIZE) { + printk(KERN_WARNING "TTY_DONT_FLIP set\n"); + return; + } + } + + /* get pointer */ + cp = (unsigned char *)bus_to_virt(bdp->cbd_bufaddr); + + /* loop through the buffer */ + while (i-- > 0) { + ch = *cp++; + port->icount.rx++; + flg = TTY_NORMAL; + + if (status & + (BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV)) + goto handle_error; + if (uart_handle_sysrq_char(port, ch, regs)) + continue; + + error_return: + *tty->flip.char_buf_ptr++ = ch; + *tty->flip.flag_buf_ptr++ = flg; + tty->flip.count++; + + } /* End while (i--) */ + + /* This BD is ready to be used again. Clear status. get next */ + bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV); + bdp->cbd_sc |= BD_SC_EMPTY; + + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = pinfo->rx_bd_base; + else + bdp++; + } /* End for (;;) */ + + /* Write back buffer pointer */ + pinfo->rx_cur = (volatile cbd_t *) bdp; + + /* activate BH processing */ + tty_flip_buffer_push(tty); + + return; + + /* Error processing */ + + handle_error: + /* Statistics */ + if (status & BD_SC_BR) + port->icount.brk++; + if (status & BD_SC_PR) + port->icount.parity++; + if (status & BD_SC_FR) + port->icount.frame++; + if (status & BD_SC_OV) + port->icount.overrun++; + + /* Mask out ignored conditions */ + status &= port->read_status_mask; + + /* Handle the remaining ones */ + if (status & BD_SC_BR) + flg = TTY_BREAK; + else if (status & BD_SC_PR) + flg = TTY_PARITY; + else if (status & BD_SC_FR) + flg = TTY_FRAME; + + /* overrun does not affect the current character ! */ + if (status & BD_SC_OV) { + ch = 0; + flg = TTY_OVERRUN; + /* We skip this buffer */ + /* CHECK: Is really nothing senseful there */ + /* ASSUMPTION: it contains nothing valid */ + i = 0; + } +#ifdef SUPPORT_SYSRQ + port->sysrq = 0; +#endif + goto error_return; +} + +/* + * Asynchron mode interrupt handler + */ +static irqreturn_t cpm_uart_int(int irq, void *data, struct pt_regs *regs) +{ + u8 events; + struct uart_port *port = (struct uart_port *)data; + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile smc_t *smcp = pinfo->smcp; + volatile scc_t *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:IRQ\n", port->line); + + if (IS_SMC(pinfo)) { + events = smcp->smc_smce; + if (events & SMCM_BRKE) + uart_handle_break(port); + if (events & SMCM_RX) + cpm_uart_int_rx(port, regs); + if (events & SMCM_TX) + cpm_uart_int_tx(port, regs); + smcp->smc_smce = events; + } else { + events = sccp->scc_scce; + if (events & UART_SCCM_BRKE) + uart_handle_break(port); + if (events & UART_SCCM_RX) + cpm_uart_int_rx(port, regs); + if (events & UART_SCCM_TX) + cpm_uart_int_tx(port, regs); + sccp->scc_scce = events; + } + return (events) ? IRQ_HANDLED : IRQ_NONE; +} + +static int cpm_uart_startup(struct uart_port *port) +{ + int retval; + + pr_debug("CPM uart[%d]:startup\n", port->line); + + /* Install interrupt handler. */ + retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port); + if (retval) + return retval; + + return 0; +} + +/* + * Shutdown the uart + */ +static void cpm_uart_shutdown(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + int line = pinfo - cpm_uart_ports; + + pr_debug("CPM uart[%d]:shutdown\n", port->line); + + /* free interrupt handler */ + free_irq(port->irq, port); + + /* If the port is not the console, disable Rx and Tx. */ + if (!(pinfo->flags & FLAG_CONSOLE)) { + /* Stop uarts */ + if (IS_SMC(pinfo)) { + volatile smc_t *smcp = pinfo->smcp; + smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX); + } else { + volatile scc_t *sccp = pinfo->sccp; + sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX); + } + + /* Shut them really down and reinit buffer descriptors */ + cpm_line_cr_cmd(line, CPM_CR_INIT_TRX); + } +} + +static void cpm_uart_set_termios(struct uart_port *port, + struct termios *termios, struct termios *old) +{ + int baud; + unsigned long flags; + u16 cval, scval; + int bits, sbits; + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + int line = pinfo - cpm_uart_ports; + volatile cbd_t *bdp; + + pr_debug("CPM uart[%d]:set_termios\n", port->line); + + spin_lock_irqsave(&port->lock, flags); + /* disable uart interrupts */ + if (IS_SMC(pinfo)) + pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX); + else + pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX); + pinfo->flags |= FLAG_DISCARDING; + spin_unlock_irqrestore(&port->lock, flags); + + /* if previous configuration exists wait for tx to finish */ + if (pinfo->baud != 0 && pinfo->bits != 0) { + + /* point to the last txed bd */ + bdp = pinfo->tx_cur; + if (bdp == pinfo->tx_bd_base) + bdp = pinfo->tx_bd_base + (pinfo->tx_nrfifos - 1); + else + bdp--; + + /* wait for it to be transmitted */ + while ((bdp->cbd_sc & BD_SC_READY) != 0) + schedule(); + + /* and delay for the hw fifo to drain */ + udelay((3 * 1000000 * pinfo->bits) / pinfo->baud); + } + + spin_lock_irqsave(&port->lock, flags); + + /* Send the CPM an initialize command. */ + cpm_line_cr_cmd(line, CPM_CR_STOP_TX); + + /* Stop uart */ + if (IS_SMC(pinfo)) + pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + else + pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + + /* Send the CPM an initialize command. */ + cpm_line_cr_cmd(line, CPM_CR_INIT_TRX); + + spin_unlock_irqrestore(&port->lock, flags); + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); + + /* Character length programmed into the mode register is the + * sum of: 1 start bit, number of data bits, 0 or 1 parity bit, + * 1 or 2 stop bits, minus 1. + * The value 'bits' counts this for us. + */ + cval = 0; + scval = 0; + + /* byte size */ + switch (termios->c_cflag & CSIZE) { + case CS5: + bits = 5; + break; + case CS6: + bits = 6; + break; + case CS7: + bits = 7; + break; + case CS8: + bits = 8; + break; + /* Never happens, but GCC is too dumb to figure it out */ + default: + bits = 8; + break; + } + sbits = bits - 5; + + if (termios->c_cflag & CSTOPB) { + cval |= SMCMR_SL; /* Two stops */ + scval |= SCU_PSMR_SL; + bits++; + } + + if (termios->c_cflag & PARENB) { + cval |= SMCMR_PEN; + scval |= SCU_PSMR_PEN; + bits++; + if (!(termios->c_cflag & PARODD)) { + cval |= SMCMR_PM_EVEN; + scval |= (SCU_PSMR_REVP | SCU_PSMR_TEVP); + } + } + + /* + * Set up parity check flag + */ +#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) + + port->read_status_mask = (BD_SC_EMPTY | BD_SC_OV); + if (termios->c_iflag & INPCK) + port->read_status_mask |= BD_SC_FR | BD_SC_PR; + if ((termios->c_iflag & BRKINT) || (termios->c_iflag & PARMRK)) + port->read_status_mask |= BD_SC_BR; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= BD_SC_PR | BD_SC_FR; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= BD_SC_BR; + /* + * If we're ignore parity and break indicators, ignore + * overruns too. (For real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= BD_SC_OV; + } + /* + * !!! ignore all characters if CREAD is not set + */ + if ((termios->c_cflag & CREAD) == 0) + port->read_status_mask &= ~BD_SC_EMPTY; + + spin_lock_irqsave(&port->lock, flags); + + cpm_set_brg(pinfo->brg - 1, baud); + + /* Start bit has not been added (so don't, because we would just + * subtract it later), and we need to add one for the number of + * stops bits (there is always at least one). + */ + bits++; + + /* re-init */ + if (IS_SMC(pinfo)) + cpm_uart_init_smc(pinfo, bits, cval); + else + cpm_uart_init_scc(pinfo, sbits, scval); + + pinfo->baud = baud; + pinfo->bits = bits; + + pinfo->flags &= ~FLAG_DISCARDING; + spin_unlock_irqrestore(&port->lock, flags); + +} + +static const char *cpm_uart_type(struct uart_port *port) +{ + pr_debug("CPM uart[%d]:uart_type\n", port->line); + + return port->type == PORT_CPM ? "CPM UART" : NULL; +} + +/* + * verify the new serial_struct (for TIOCSSERIAL). + */ +static int cpm_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + int ret = 0; + + pr_debug("CPM uart[%d]:verify_port\n", port->line); + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= NR_IRQS) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + return ret; +} + +/* + * Transmit characters, refill buffer descriptor, if possible + */ +static int cpm_uart_tx_pump(struct uart_port *port) +{ + volatile cbd_t *bdp; + unsigned char *p; + int count; + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + struct circ_buf *xmit = &port->info->xmit; + + /* Handle xon/xoff */ + if (port->x_char) { + /* Pick next descriptor and fill from buffer */ + bdp = pinfo->tx_cur; + + p = bus_to_virt(bdp->cbd_bufaddr); + *p++ = xmit->buf[xmit->tail]; + bdp->cbd_datlen = 1; + bdp->cbd_sc |= BD_SC_READY; + /* Get next BD. */ + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = pinfo->tx_bd_base; + else + bdp++; + pinfo->tx_cur = bdp; + + port->icount.tx++; + port->x_char = 0; + return 1; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + cpm_uart_stop_tx(port, 0); + return 0; + } + + /* Pick next descriptor and fill from buffer */ + bdp = pinfo->tx_cur; + + while (!(bdp->cbd_sc & BD_SC_READY) && (xmit->tail != xmit->head)) { + count = 0; + p = bus_to_virt(bdp->cbd_bufaddr); + while (count < pinfo->tx_fifosize) { + *p++ = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + count++; + if (xmit->head == xmit->tail) + break; + } + bdp->cbd_datlen = count; + bdp->cbd_sc |= BD_SC_READY; + /* Get next BD. */ + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = pinfo->tx_bd_base; + else + bdp++; + } + pinfo->tx_cur = bdp; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) { + cpm_uart_stop_tx(port, 0); + return 0; + } + + return 1; +} + +static void cpm_uart_init_scc(struct uart_cpm_port *pinfo, int bits, u16 scval) +{ + int line = pinfo - cpm_uart_ports; + volatile scc_t *scp; + volatile scc_uart_t *sup; + u8 *mem_addr; + volatile cbd_t *bdp; + int i; + + pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line); + + scp = pinfo->sccp; + sup = pinfo->sccup; + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + pinfo->rx_cur = pinfo->rx_bd_base; + mem_addr = pinfo->mem_addr; + for (bdp = pinfo->rx_bd_base, i = 0; i < pinfo->rx_nrfifos; i++, bdp++) { + bdp->cbd_bufaddr = virt_to_bus(mem_addr); + bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT | (i < (pinfo->rx_nrfifos - 1) ? 0 : BD_SC_WRAP); + mem_addr += pinfo->rx_fifosize; + } + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize); + pinfo->tx_cur = pinfo->tx_bd_base; + for (bdp = pinfo->tx_bd_base, i = 0; i < pinfo->tx_nrfifos; i++, bdp++) { + bdp->cbd_bufaddr = virt_to_bus(mem_addr); + bdp->cbd_sc = BD_SC_INTRPT | (i < (pinfo->tx_nrfifos - 1) ? 0 : BD_SC_WRAP); + mem_addr += pinfo->tx_fifosize; + bdp++; + } + + /* Store address */ + pinfo->sccup->scc_genscc.scc_rbase = (unsigned char *)pinfo->rx_bd_base - DPRAM_BASE; + pinfo->sccup->scc_genscc.scc_tbase = (unsigned char *)pinfo->tx_bd_base - DPRAM_BASE; + + /* Set up the uart parameters in the + * parameter ram. + */ + + cpm_set_scc_fcr(sup); + + sup->scc_genscc.scc_mrblr = pinfo->rx_fifosize; + sup->scc_maxidl = pinfo->rx_fifosize; + sup->scc_brkcr = 1; + sup->scc_parec = 0; + sup->scc_frmec = 0; + sup->scc_nosec = 0; + sup->scc_brkec = 0; + sup->scc_uaddr1 = 0; + sup->scc_uaddr2 = 0; + sup->scc_toseq = 0; + sup->scc_char1 = 0x8000; + sup->scc_char2 = 0x8000; + sup->scc_char3 = 0x8000; + sup->scc_char4 = 0x8000; + sup->scc_char5 = 0x8000; + sup->scc_char6 = 0x8000; + sup->scc_char7 = 0x8000; + sup->scc_char8 = 0x8000; + sup->scc_rccm = 0xc0ff; + + /* Send the CPM an initialize command. + */ + cpm_line_cr_cmd(line, CPM_CR_INIT_TRX); + + /* Set UART mode, 8 bit, no parity, one stop. + * Enable receive and transmit. + */ + scp->scc_gsmrh = 0; + scp->scc_gsmrl = + (SCC_GSMRL_MODE_UART | SCC_GSMRL_TDCR_16 | SCC_GSMRL_RDCR_16); + + /* Enable rx interrupts and clear all pending events. */ + scp->scc_sccm = UART_SCCM_RX; + scp->scc_scce = 0xffff; + scp->scc_dsr = 0x7e7e; + scp->scc_psmr = (bits << 12) | scval; + + scp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT); +} + +static void cpm_uart_init_smc(struct uart_cpm_port *pinfo, int bits, u16 cval) +{ + int line = pinfo - cpm_uart_ports; + volatile smc_t *sp; + volatile smc_uart_t *up; + volatile u8 *mem_addr; + volatile cbd_t *bdp; + int i; + + pr_debug("CPM uart[%d]:init_smc\n", pinfo->port.line); + + sp = pinfo->smcp; + up = pinfo->smcup; + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + mem_addr = pinfo->mem_addr; + pinfo->rx_cur = pinfo->rx_bd_base; + for (bdp = pinfo->rx_bd_base, i = 0; i < pinfo->rx_nrfifos; i++, bdp++) { + bdp->cbd_bufaddr = virt_to_bus(mem_addr); + bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT | (i < (pinfo->rx_nrfifos - 1) ? 0 : BD_SC_WRAP); + mem_addr += pinfo->rx_fifosize; + } + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize); + pinfo->tx_cur = pinfo->tx_bd_base; + for (bdp = pinfo->tx_bd_base, i = 0; i < pinfo->tx_nrfifos; i++, bdp++) { + bdp->cbd_bufaddr = virt_to_bus(mem_addr); + bdp->cbd_sc = BD_SC_INTRPT | (i < (pinfo->tx_nrfifos - 1) ? 0 : BD_SC_WRAP); + mem_addr += pinfo->tx_fifosize; + } + + /* Store address */ + pinfo->smcup->smc_rbase = (u_char *)pinfo->rx_bd_base - DPRAM_BASE; + pinfo->smcup->smc_tbase = (u_char *)pinfo->tx_bd_base - DPRAM_BASE; + + /* Set up the uart parameters in the + * parameter ram. + */ + cpm_set_smc_fcr(up); + + /* Using idle charater time requires some additional tuning. */ + up->smc_mrblr = pinfo->rx_fifosize; + up->smc_maxidl = pinfo->rx_fifosize; + up->smc_brkcr = 1; + + cpm_line_cr_cmd(line, CPM_CR_INIT_TRX); + + /* Set UART mode, according to the parameters */ + sp->smc_smcmr = smcr_mk_clen(bits) | cval | SMCMR_SM_UART; + + /* Enable only rx interrupts clear all pending events. */ + sp->smc_smcm = SMCM_RX; + sp->smc_smce = 0xff; + + sp->smc_smcmr |= (SMCMR_REN | SMCMR_TEN); +} + +/* + * Initialize port. This is called from early_console stuff + * so we have to be careful here ! + */ +static int cpm_uart_request_port(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + int ret; + + pr_debug("CPM uart[%d]:request port\n", port->line); + + if (pinfo->flags & FLAG_CONSOLE) + return 0; + + /* + * Setup any port IO, connect any baud rate generators, + * etc. This is expected to be handled by board + * dependant code + */ + if (pinfo->set_lineif) + pinfo->set_lineif(pinfo); + + ret = cpm_uart_allocbuf(pinfo, 0); + if (ret) + return ret; + + return 0; +} + +static void cpm_uart_release_port(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + + if (!(pinfo->flags & FLAG_CONSOLE)) + cpm_uart_freebuf(pinfo); +} + +/* + * Configure/autoconfigure the port. + */ +static void cpm_uart_config_port(struct uart_port *port, int flags) +{ + pr_debug("CPM uart[%d]:config_port\n", port->line); + + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_CPM; + cpm_uart_request_port(port); + } +} +static struct uart_ops cpm_uart_pops = { + .tx_empty = cpm_uart_tx_empty, + .set_mctrl = cpm_uart_set_mctrl, + .get_mctrl = cpm_uart_get_mctrl, + .stop_tx = cpm_uart_stop_tx, + .start_tx = cpm_uart_start_tx, + .stop_rx = cpm_uart_stop_rx, + .enable_ms = cpm_uart_enable_ms, + .break_ctl = cpm_uart_break_ctl, + .startup = cpm_uart_startup, + .shutdown = cpm_uart_shutdown, + .set_termios = cpm_uart_set_termios, + .type = cpm_uart_type, + .release_port = cpm_uart_release_port, + .request_port = cpm_uart_request_port, + .config_port = cpm_uart_config_port, + .verify_port = cpm_uart_verify_port, +}; + +struct uart_cpm_port cpm_uart_ports[UART_NR] = { + [UART_SMC1] = { + .port = { + .irq = SMC1_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .flags = FLAG_SMC, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = smc1_lineif, + }, + [UART_SMC2] = { + .port = { + .irq = SMC2_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .flags = FLAG_SMC, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = smc2_lineif, + }, + [UART_SCC1] = { + .port = { + .irq = SCC1_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = scc1_lineif, + }, + [UART_SCC2] = { + .port = { + .irq = SCC2_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = scc2_lineif, + }, + [UART_SCC3] = { + .port = { + .irq = SCC3_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = scc3_lineif, + }, + [UART_SCC4] = { + .port = { + .irq = SCC4_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = scc4_lineif, + }, +}; + +#ifdef CONFIG_SERIAL_CPM_CONSOLE +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + * + * Note that this is called with interrupts already disabled + */ +static void cpm_uart_console_write(struct console *co, const char *s, + u_int count) +{ + struct uart_cpm_port *pinfo = + &cpm_uart_ports[cpm_uart_port_map[co->index]]; + unsigned int i; + volatile cbd_t *bdp, *bdbase; + volatile unsigned char *cp; + + if (IS_DISCARDING(pinfo)) + return; + + /* Get the address of the host memory buffer. + */ + bdp = pinfo->tx_cur; + bdbase = pinfo->tx_bd_base; + + /* + * Now, do each character. This is not as bad as it looks + * since this is a holding FIFO and not a transmitting FIFO. + * We could add the complexity of filling the entire transmit + * buffer, but we would just wait longer between accesses...... + */ + for (i = 0; i < count; i++, s++) { + /* Wait for transmitter fifo to empty. + * Ready indicates output is ready, and xmt is doing + * that, not that it is ready for us to send. + */ + while ((bdp->cbd_sc & BD_SC_READY) != 0) + ; + + /* Send the character out. + * If the buffer address is in the CPM DPRAM, don't + * convert it. + */ + if ((uint) (bdp->cbd_bufaddr) > (uint) CPM_ADDR) + cp = (unsigned char *) (bdp->cbd_bufaddr); + else + cp = bus_to_virt(bdp->cbd_bufaddr); + + *cp = *s; + + bdp->cbd_datlen = 1; + bdp->cbd_sc |= BD_SC_READY; + + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = bdbase; + else + bdp++; + + /* if a LF, also do CR... */ + if (*s == 10) { + while ((bdp->cbd_sc & BD_SC_READY) != 0) + ; + + if ((uint) (bdp->cbd_bufaddr) > (uint) CPM_ADDR) + cp = (unsigned char *) (bdp->cbd_bufaddr); + else + cp = bus_to_virt(bdp->cbd_bufaddr); + + *cp = 13; + bdp->cbd_datlen = 1; + bdp->cbd_sc |= BD_SC_READY; + + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = bdbase; + else + bdp++; + } + } + + /* + * Finally, Wait for transmitter & holding register to empty + * and restore the IER + */ + while ((bdp->cbd_sc & BD_SC_READY) != 0) + ; + + pinfo->tx_cur = (volatile cbd_t *) bdp; +} + +/* + * Setup console. Be careful is called early ! + */ +static int __init cpm_uart_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + struct uart_cpm_port *pinfo; + int baud = 38400; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + port = + (struct uart_port *)&cpm_uart_ports[cpm_uart_port_map[co->index]]; + pinfo = (struct uart_cpm_port *)port; + + pinfo->flags |= FLAG_CONSOLE; + + if (options) { + uart_parse_options(options, &baud, &parity, &bits, &flow); + } else { + bd_t *bd = (bd_t *) __res; + + if (bd->bi_baudrate) + baud = bd->bi_baudrate; + else + baud = 9600; + } + + /* + * Setup any port IO, connect any baud rate generators, + * etc. This is expected to be handled by board + * dependant code + */ + if (pinfo->set_lineif) + pinfo->set_lineif(pinfo); + + ret = cpm_uart_allocbuf(pinfo, 1); + if (ret) + return ret; + + uart_set_options(port, co, baud, parity, bits, flow); + + return 0; +} + +extern struct uart_driver cpm_reg; +static struct console cpm_scc_uart_console = { + .name "ttyCPM", + .write cpm_uart_console_write, + .device uart_console_device, + .setup cpm_uart_console_setup, + .flags CON_PRINTBUFFER, + .index -1, + .data = &cpm_reg, +}; + +int __init cpm_uart_console_init(void) +{ + int ret = cpm_uart_init_portdesc(); + + if (!ret) + register_console(&cpm_scc_uart_console); + return ret; +} + +console_initcall(cpm_uart_console_init); + +#define CPM_UART_CONSOLE &cpm_scc_uart_console +#else +#define CPM_UART_CONSOLE NULL +#endif + +static struct uart_driver cpm_reg = { + .owner = THIS_MODULE, + .driver_name = "ttyCPM", + .dev_name = "ttyCPM", + .major = SERIAL_CPM_MAJOR, + .minor = SERIAL_CPM_MINOR, + .cons = CPM_UART_CONSOLE, +}; + +static int __init cpm_uart_init(void) +{ + int ret, i; + + printk(KERN_INFO "Serial: CPM driver $Revision: 0.01 $\n"); + +#ifndef CONFIG_SERIAL_CPM_CONSOLE + ret = cpm_uart_init_portdesc(); + if (ret) + return ret; +#endif + + cpm_reg.nr = cpm_uart_nr; + ret = uart_register_driver(&cpm_reg); + + if (ret) + return ret; + + for (i = 0; i < cpm_uart_nr; i++) { + int con = cpm_uart_port_map[i]; + cpm_uart_ports[con].port.line = i; + cpm_uart_ports[con].port.flags = UPF_BOOT_AUTOCONF; + uart_add_one_port(&cpm_reg, &cpm_uart_ports[con].port); + } + + return ret; +} + +static void __exit cpm_uart_exit(void) +{ + int i; + + for (i = 0; i < cpm_uart_nr; i++) { + int con = cpm_uart_port_map[i]; + uart_remove_one_port(&cpm_reg, &cpm_uart_ports[con].port); + } + + uart_unregister_driver(&cpm_reg); +} + +module_init(cpm_uart_init); +module_exit(cpm_uart_exit); + +MODULE_AUTHOR("Kumar Gala/Antoniou Pantelis"); +MODULE_DESCRIPTION("CPM SCC/SMC port driver $Revision: 0.01 $"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV(SERIAL_CPM_MAJOR, SERIAL_CPM_MINOR); diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/serial/cpm_uart/cpm_uart_cpm1.c new file mode 100644 index 000000000..7c940b5b5 --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.c @@ -0,0 +1,275 @@ +/* + * linux/drivers/serial/cpm_uart.c + * + * Driver for CPM (SCC/SMC) serial ports; CPM1 definitions + * + * Maintainer: Kumar Gala (kumar.gala@freescale.com) (CPM2) + * Pantelis Antoniou (panto@intracom.gr) (CPM1) + * + * Copyright (C) 2004 Freescale Semiconductor, Inc. + * (C) 2004 Intracom, S.A. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "cpm_uart.h" + +/**************************************************************/ + +void cpm_line_cr_cmd(int line, int cmd) +{ + ushort val; + volatile cpm8xx_t *cp = cpmp; + + switch (line) { + case UART_SMC1: + val = mk_cr_cmd(CPM_CR_CH_SMC1, cmd) | CPM_CR_FLG; + break; + case UART_SMC2: + val = mk_cr_cmd(CPM_CR_CH_SMC2, cmd) | CPM_CR_FLG; + break; + case UART_SCC1: + val = mk_cr_cmd(CPM_CR_CH_SCC1, cmd) | CPM_CR_FLG; + break; + case UART_SCC2: + val = mk_cr_cmd(CPM_CR_CH_SCC2, cmd) | CPM_CR_FLG; + break; + case UART_SCC3: + val = mk_cr_cmd(CPM_CR_CH_SCC3, cmd) | CPM_CR_FLG; + break; + case UART_SCC4: + val = mk_cr_cmd(CPM_CR_CH_SCC4, cmd) | CPM_CR_FLG; + break; + default: + return; + + } + cp->cp_cpcr = val; + while (cp->cp_cpcr & CPM_CR_FLG) ; +} + +void smc1_lineif(struct uart_cpm_port *pinfo) +{ + volatile cpm8xx_t *cp = cpmp; + + cp->cp_pbpar |= 0x000000c0; + cp->cp_pbdir &= ~0x000000c0; + cp->cp_pbodr &= ~0x000000c0; + + pinfo->brg = 1; +} + +void smc2_lineif(struct uart_cpm_port *pinfo) +{ + /* XXX SMC2: insert port configuration here */ + pinfo->brg = 2; +} + +void scc1_lineif(struct uart_cpm_port *pinfo) +{ + /* XXX SCC1: insert port configuration here */ + pinfo->brg = 1; +} + +void scc2_lineif(struct uart_cpm_port *pinfo) +{ + /* XXX SCC2: insert port configuration here */ + pinfo->brg = 2; +} + +void scc3_lineif(struct uart_cpm_port *pinfo) +{ + /* XXX SCC3: insert port configuration here */ + pinfo->brg = 3; +} + +void scc4_lineif(struct uart_cpm_port *pinfo) +{ + /* XXX SCC4: insert port configuration here */ + pinfo->brg = 4; +} + +/* + * Allocate DP-Ram and memory buffers. We need to allocate a transmit and + * receive buffer descriptors from dual port ram, and a character + * buffer area from host mem. If we are allocating for the console we need + * to do it from bootmem + */ +int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con) +{ + int dpmemsz, memsz; + u8 *dp_mem; + uint dp_addr; + u8 *mem_addr; + dma_addr_t dma_addr; + + pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line); + + dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos); + dp_mem = m8xx_cpm_dpalloc(dpmemsz); + if (dp_mem == NULL) { + printk(KERN_ERR + "cpm_uart_cpm1.c: could not allocate buffer descriptors\n"); + return -ENOMEM; + } + dp_addr = m8xx_cpm_dpram_offset(dp_mem); + + memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); + if (is_con) { + mem_addr = (u8 *) m8xx_cpm_hostalloc(memsz); + dma_addr = 0; + } else + mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr, + GFP_KERNEL); + + if (mem_addr == NULL) { + m8xx_cpm_dpfree(dp_mem); + printk(KERN_ERR + "cpm_uart_cpm1.c: could not allocate coherent memory\n"); + return -ENOMEM; + } + + pinfo->dp_addr = dp_addr; + pinfo->mem_addr = mem_addr; + pinfo->dma_addr = dma_addr; + + pinfo->rx_buf = mem_addr; + pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos + * pinfo->rx_fifosize); + + pinfo->rx_bd_base = (volatile cbd_t *)dp_mem; + pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos; + + return 0; +} + +void cpm_uart_freebuf(struct uart_cpm_port *pinfo) +{ + dma_free_coherent(NULL, L1_CACHE_ALIGN(pinfo->rx_nrfifos * + pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * + pinfo->tx_fifosize), pinfo->mem_addr, + pinfo->dma_addr); + + m8xx_cpm_dpfree(m8xx_cpm_dpram_addr(pinfo->dp_addr)); +} + +/* Setup any dynamic params in the uart desc */ +int cpm_uart_init_portdesc(void) +{ + pr_debug("CPM uart[-]:init portdesc\n"); + + cpm_uart_nr = 0; +#ifdef CONFIG_SERIAL_CPM_SMC1 + cpm_uart_ports[UART_SMC1].smcp = &cpmp->cp_smc[0]; + cpm_uart_ports[UART_SMC1].smcup = + (smc_uart_t *) & cpmp->cp_dparam[PROFF_SMC1]; + cpm_uart_ports[UART_SMC1].port.mapbase = + (unsigned long)&cpmp->cp_smc[0]; + cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); + cpm_uart_ports[UART_SMC1].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + cpm_uart_ports[UART_SMC1].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1; +#endif + +#ifdef CONFIG_SERIAL_CPM_SMC2 + cpm_uart_ports[UART_SMC2].smcp = &cpmp->cp_smc[1]; + cpm_uart_ports[UART_SMC2].smcup = + (smc_uart_t *) & cpmp->cp_dparam[PROFF_SMC2]; + cpm_uart_ports[UART_SMC2].port.mapbase = + (unsigned long)&cpmp->cp_smc[1]; + cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); + cpm_uart_ports[UART_SMC2].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + cpm_uart_ports[UART_SMC2].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC1 + cpm_uart_ports[UART_SCC1].sccp = &cpmp->cp_scc[0]; + cpm_uart_ports[UART_SCC1].sccup = + (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC1]; + cpm_uart_ports[UART_SCC1].port.mapbase = + (unsigned long)&cpmp->cp_scc[0]; + cpm_uart_ports[UART_SCC1].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC1].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC1].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC2 + cpm_uart_ports[UART_SCC2].sccp = &cpmp->cp_scc[1]; + cpm_uart_ports[UART_SCC2].sccup = + (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC2]; + cpm_uart_ports[UART_SCC2].port.mapbase = + (unsigned long)&cpmp->cp_scc[1]; + cpm_uart_ports[UART_SCC2].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC2].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC2].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC3 + cpm_uart_ports[UART_SCC3].sccp = &cpmp->cp_scc[2]; + cpm_uart_ports[UART_SCC3].sccup = + (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC3]; + cpm_uart_ports[UART_SCC3].port.mapbase = + (unsigned long)&cpmp->cp_scc[2]; + cpm_uart_ports[UART_SCC3].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC3].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC3].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC4 + cpm_uart_ports[UART_SCC4].sccp = &cpmp->cp_scc[3]; + cpm_uart_ports[UART_SCC4].sccup = + (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC4]; + cpm_uart_ports[UART_SCC4].port.mapbase = + (unsigned long)&cpmp->cp_scc[3]; + cpm_uart_ports[UART_SCC4].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC4].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC4].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4; +#endif + return 0; +} diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/serial/cpm_uart/cpm_uart_cpm1.h new file mode 100644 index 000000000..155050b7c --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.h @@ -0,0 +1,45 @@ +/* + * linux/drivers/serial/cpm_uart_cpm1.h + * + * Driver for CPM (SCC/SMC) serial ports + * + * definitions for cpm1 + * + */ + +#ifndef CPM_UART_CPM1_H +#define CPM_UART_CPM1_H + +#include + +/* defines for IRQs */ +#define SMC1_IRQ (CPM_IRQ_OFFSET + CPMVEC_SMC1) +#define SMC2_IRQ (CPM_IRQ_OFFSET + CPMVEC_SMC2) +#define SCC1_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC1) +#define SCC2_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC2) +#define SCC3_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC3) +#define SCC4_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC4) + +/* the CPM address */ +#define CPM_ADDR IMAP_ADDR + +static inline void cpm_set_brg(int brg, int baud) +{ + m8xx_cpm_setbrg(brg, baud); +} + +static inline void cpm_set_scc_fcr(volatile scc_uart_t * sup) +{ + sup->scc_genscc.scc_rfcr = SMC_EB; + sup->scc_genscc.scc_tfcr = SMC_EB; +} + +static inline void cpm_set_smc_fcr(volatile smc_uart_t * up) +{ + up->smc_rfcr = SMC_EB; + up->smc_tfcr = SMC_EB; +} + +#define DPRAM_BASE ((unsigned char *)&cpmp->cp_dpmem[0]) + +#endif diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c new file mode 100644 index 000000000..d2566889d --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c @@ -0,0 +1,328 @@ +/* + * linux/drivers/serial/cpm_uart_cpm2.c + * + * Driver for CPM (SCC/SMC) serial ports; CPM2 definitions + * + * Maintainer: Kumar Gala (kumar.gala@freescale.com) (CPM2) + * Pantelis Antoniou (panto@intracom.gr) (CPM1) + * + * Copyright (C) 2004 Freescale Semiconductor, Inc. + * (C) 2004 Intracom, S.A. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "cpm_uart.h" + +/**************************************************************/ + +void cpm_line_cr_cmd(int line, int cmd) +{ + volatile cpm_cpm2_t *cp = cpmp; + ulong val; + + switch (line) { + case UART_SMC1: + val = mk_cr_cmd(CPM_CR_SMC1_PAGE, CPM_CR_SMC1_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + case UART_SMC2: + val = mk_cr_cmd(CPM_CR_SMC2_PAGE, CPM_CR_SMC2_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + case UART_SCC1: + val = mk_cr_cmd(CPM_CR_SCC1_PAGE, CPM_CR_SCC1_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + case UART_SCC2: + val = mk_cr_cmd(CPM_CR_SCC2_PAGE, CPM_CR_SCC2_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + case UART_SCC3: + val = mk_cr_cmd(CPM_CR_SCC3_PAGE, CPM_CR_SCC3_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + case UART_SCC4: + val = mk_cr_cmd(CPM_CR_SCC4_PAGE, CPM_CR_SCC4_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + default: + return; + + } + cp->cp_cpcr = val; + while (cp->cp_cpcr & CPM_CR_FLG) ; +} + +void smc1_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + + /* SMC1 is only on port D */ + io->iop_ppard |= 0x00c00000; + io->iop_pdird |= 0x00400000; + io->iop_pdird &= ~0x00800000; + io->iop_psord &= ~0x00c00000; + + /* Wire BRG1 to SMC1 */ + cpm2_immr->im_cpmux.cmx_smr &= 0x0f; + pinfo->brg = 1; +} + +void smc2_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + + /* SMC2 is only on port A */ + io->iop_ppara |= 0x00c00000; + io->iop_pdira |= 0x00400000; + io->iop_pdira &= ~0x00800000; + io->iop_psora &= ~0x00c00000; + + /* Wire BRG2 to SMC2 */ + cpm2_immr->im_cpmux.cmx_smr &= 0xf0; + pinfo->brg = 2; +} + +void scc1_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + + /* Use Port D for SCC1 instead of other functions. */ + io->iop_ppard |= 0x00000003; + io->iop_psord &= ~0x00000001; /* Rx */ + io->iop_psord |= 0x00000002; /* Tx */ + io->iop_pdird &= ~0x00000001; /* Rx */ + io->iop_pdird |= 0x00000002; /* Tx */ + + /* Wire BRG1 to SCC1 */ + cpm2_immr->im_cpmux.cmx_scr &= ~0x00ffffff; + cpm2_immr->im_cpmux.cmx_scr |= 0x00000000; + pinfo->brg = 1; +} + +void scc2_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + io->iop_pparb |= 0x008b0000; + io->iop_pdirb |= 0x00880000; + io->iop_psorb |= 0x00880000; + io->iop_pdirb &= ~0x00030000; + io->iop_psorb &= ~0x00030000; + cpm2_immr->im_cpmux.cmx_scr &= ~0xff00ffff; + cpm2_immr->im_cpmux.cmx_scr |= 0x00090000; + pinfo->brg = 2; +} + +void scc3_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + io->iop_pparb |= 0x008b0000; + io->iop_pdirb |= 0x00880000; + io->iop_psorb |= 0x00880000; + io->iop_pdirb &= ~0x00030000; + io->iop_psorb &= ~0x00030000; + cpm2_immr->im_cpmux.cmx_scr &= ~0xffff00ff; + cpm2_immr->im_cpmux.cmx_scr |= 0x00001200; + pinfo->brg = 3; +} + +void scc4_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + + io->iop_ppard |= 0x00000600; + io->iop_psord &= ~0x00000600; /* Tx/Rx */ + io->iop_pdird &= ~0x00000200; /* Rx */ + io->iop_pdird |= 0x00000400; /* Tx */ + + cpm2_immr->im_cpmux.cmx_scr &= ~0xffffff00; + cpm2_immr->im_cpmux.cmx_scr |= 0x0000001b; + pinfo->brg = 4; +} + +/* + * Allocate DP-Ram and memory buffers. We need to allocate a transmit and + * receive buffer descriptors from dual port ram, and a character + * buffer area from host mem. If we are allocating for the console we need + * to do it from bootmem + */ +int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con) +{ + int dpmemsz, memsz; + u8 *dp_mem; + uint dp_addr; + u8 *mem_addr; + dma_addr_t dma_addr = 0; + + pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line); + + dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos); + dp_mem = cpm2_dpalloc(dpmemsz, 8); + if (dp_mem == NULL) { + printk(KERN_ERR + "cpm_uart_cpm1.c: could not allocate buffer descriptors\n"); + return -ENOMEM; + } + + dp_addr = cpm2_dpram_offset(dp_mem); + + memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); + if (is_con) + mem_addr = alloc_bootmem(memsz); + else + mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr, + GFP_KERNEL); + + if (mem_addr == NULL) { + cpm2_dpfree(dp_mem); + printk(KERN_ERR + "cpm_uart_cpm1.c: could not allocate coherent memory\n"); + return -ENOMEM; + } + + pinfo->dp_addr = dp_addr; + pinfo->mem_addr = mem_addr; + pinfo->dma_addr = dma_addr; + + pinfo->rx_buf = mem_addr; + pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos + * pinfo->rx_fifosize); + + pinfo->rx_bd_base = (volatile cbd_t *)dp_mem; + pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos; + + return 0; +} + +void cpm_uart_freebuf(struct uart_cpm_port *pinfo) +{ + dma_free_coherent(NULL, L1_CACHE_ALIGN(pinfo->rx_nrfifos * + pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * + pinfo->tx_fifosize), pinfo->mem_addr, + pinfo->dma_addr); + + cpm2_dpfree(&pinfo->dp_addr); +} + +/* Setup any dynamic params in the uart desc */ +int cpm_uart_init_portdesc(void) +{ + pr_debug("CPM uart[-]:init portdesc\n"); + + cpm_uart_nr = 0; +#ifdef CONFIG_SERIAL_CPM_SMC1 + cpm_uart_ports[UART_SMC1].smcp = (smc_t *) & cpm2_immr->im_smc[0]; + cpm_uart_ports[UART_SMC1].smcup = + (smc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SMC1]; + cpm_uart_ports[UART_SMC1].port.mapbase = + (unsigned long)&cpm2_immr->im_smc[0]; + cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); + cpm_uart_ports[UART_SMC1].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + cpm_uart_ports[UART_SMC1].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1; +#endif + +#ifdef CONFIG_SERIAL_CPM_SMC2 + cpm_uart_ports[UART_SMC2].smcp = (smc_t *) & cpm2_immr->im_smc[1]; + cpm_uart_ports[UART_SMC2].smcup = + (smc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SMC2]; + cpm_uart_ports[UART_SMC2].port.mapbase = + (unsigned long)&cpm2_immr->im_smc[1]; + cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); + cpm_uart_ports[UART_SMC2].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + cpm_uart_ports[UART_SMC2].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC1 + cpm_uart_ports[UART_SCC1].sccp = (scc_t *) & cpm2_immr->im_scc[0]; + cpm_uart_ports[UART_SCC1].sccup = + (scc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SCC1]; + cpm_uart_ports[UART_SCC1].port.mapbase = + (unsigned long)&cpm2_immr->im_scc[0]; + cpm_uart_ports[UART_SCC1].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC1].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC1].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC2 + cpm_uart_ports[UART_SCC2].sccp = (scc_t *) & cpm2_immr->im_scc[1]; + cpm_uart_ports[UART_SCC2].sccup = + (scc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SCC2]; + cpm_uart_ports[UART_SCC2].port.mapbase = + (unsigned long)&cpm2_immr->im_scc[1]; + cpm_uart_ports[UART_SCC2].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC2].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC2].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC3 + cpm_uart_ports[UART_SCC3].sccp = (scc_t *) & cpm2_immr->im_scc[2]; + cpm_uart_ports[UART_SCC3].sccup = + (scc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SCC3]; + cpm_uart_ports[UART_SCC3].port.mapbase = + (unsigned long)&cpm2_immr->im_scc[2]; + cpm_uart_ports[UART_SCC3].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC3].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC3].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC4 + cpm_uart_ports[UART_SCC4].sccp = (scc_t *) & cpm2_immr->im_scc[3]; + cpm_uart_ports[UART_SCC4].sccup = + (scc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SCC4]; + cpm_uart_ports[UART_SCC4].port.mapbase = + (unsigned long)&cpm2_immr->im_scc[3]; + cpm_uart_ports[UART_SCC4].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC4].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC4].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4; +#endif + + return 0; +} diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/serial/cpm_uart/cpm_uart_cpm2.h new file mode 100644 index 000000000..eb620bd98 --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.h @@ -0,0 +1,45 @@ +/* + * linux/drivers/serial/cpm_uart_cpm2.h + * + * Driver for CPM (SCC/SMC) serial ports + * + * definitions for cpm2 + * + */ + +#ifndef CPM_UART_CPM2_H +#define CPM_UART_CPM2_H + +#include + +/* defines for IRQs */ +#define SMC1_IRQ SIU_INT_SMC1 +#define SMC2_IRQ SIU_INT_SMC2 +#define SCC1_IRQ SIU_INT_SCC1 +#define SCC2_IRQ SIU_INT_SCC2 +#define SCC3_IRQ SIU_INT_SCC3 +#define SCC4_IRQ SIU_INT_SCC4 + +/* the CPM address */ +#define CPM_ADDR CPM_MAP_ADDR + +static inline void cpm_set_brg(int brg, int baud) +{ + cpm2_setbrg(brg, baud); +} + +static inline void cpm_set_scc_fcr(volatile scc_uart_t * sup) +{ + sup->scc_genscc.scc_rfcr = CPMFCR_GBL | CPMFCR_EB; + sup->scc_genscc.scc_tfcr = CPMFCR_GBL | CPMFCR_EB; +} + +static inline void cpm_set_smc_fcr(volatile smc_uart_t * up) +{ + up->smc_rfcr = CPMFCR_GBL | CPMFCR_EB; + up->smc_tfcr = CPMFCR_GBL | CPMFCR_EB; +} + +#define DPRAM_BASE ((unsigned char *)&cpm2_immr->im_dprambase[0]) + +#endif diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c new file mode 100644 index 000000000..3feba05b4 --- /dev/null +++ b/drivers/serial/mpc52xx_uart.c @@ -0,0 +1,869 @@ +/* + * drivers/serial/mpc52xx_uart.c + * + * Driver for the PSC of the Freescale MPC52xx PSCs configured as UARTs. + * + * FIXME According to the usermanual the status bits in the status register + * are only updated when the peripherals access the FIFO and not when the + * CPU access them. So since we use this bits to know when we stop writing + * and reading, they may not be updated in-time and a race condition may + * exists. But I haven't be able to prove this and I don't care. But if + * any problem arises, it might worth checking. The TX/RX FIFO Stats + * registers should be used in addition. + * Update: Actually, they seem updated ... At least the bits we use. + * + * + * Maintainer : Sylvain Munaut + * + * Some of the code has been inspired/copied from the 2.4 code written + * by Dale Farnsworth . + * + * Copyright (C) 2004 Sylvain Munaut + * Copyright (C) 2003 MontaVista, Software, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +/* OCP Usage : + * + * This drivers uses the OCP model. To load the serial driver for one of the + * PSCs, just add this to the core_ocp table : + * + * { + * .vendor = OCP_VENDOR_FREESCALE, + * .function = OCP_FUNC_PSC_UART, + * .index = 0, + * .paddr = MPC52xx_PSC1, + * .irq = MPC52xx_PSC1_IRQ, + * .pm = OCP_CPM_NA, + * }, + * + * This is for PSC1, replace the paddr and irq according to the PSC you want to + * use. The driver all necessary registers to place the PSC in uart mode without + * DCD. However, the pin multiplexing aren't changed and should be set either + * by the bootloader or in the platform init code. + * The index field must be equal to the PSC index ( e.g. 0 for PSC1, 1 for PSC2, + * and so on). So the PSC1 is mapped to /dev/ttyS0, PSC2 to /dev/ttyS1 and so + * on. But be warned, it's an ABSOLUTE REQUIREMENT ! This is needed mainly for + * the console code : without this 1:1 mapping, at early boot time, when we are + * parsing the kernel args console=ttyS?, we wouldn't know wich PSC it will be + * mapped to because OCP stuff is not yet initialized. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#if defined(CONFIG_SERIAL_MPC52xx_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +#define SUPPORT_SYSRQ +#endif + +#include + + + +#define ISR_PASS_LIMIT 256 /* Max number of iteration in the interrupt */ + + +static struct uart_port mpc52xx_uart_ports[MPC52xx_PSC_MAXNUM]; + /* Rem: - We use the read_status_mask as a shadow of + * psc->mpc52xx_psc_imr + * - It's important that is array is all zero on start as we + * use it to know if it's initialized or not ! If it's not sure + * it's cleared, then a memset(...,0,...) should be added to + * the console_init + */ + +#define PSC(port) ((struct mpc52xx_psc *)((port)->membase)) + + +/* Forward declaration of the interruption handling routine */ +static irqreturn_t mpc52xx_uart_int(int irq,void *dev_id,struct pt_regs *regs); + + +/* Simple macro to test if a port is console or not. This one is taken + * for serial_core.c and maybe should be moved to serial_core.h ? */ +#ifdef CONFIG_SERIAL_CORE_CONSOLE +#define uart_console(port) ((port)->cons && (port)->cons->index == (port)->line) +#else +#define uart_console(port) (0) +#endif + + +/* ======================================================================== */ +/* UART operations */ +/* ======================================================================== */ + +static unsigned int +mpc52xx_uart_tx_empty(struct uart_port *port) +{ + int status = in_be16(&PSC(port)->mpc52xx_psc_status); + return (status & MPC52xx_PSC_SR_TXEMP) ? TIOCSER_TEMT : 0; +} + +static void +mpc52xx_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* Not implemented */ +} + +static unsigned int +mpc52xx_uart_get_mctrl(struct uart_port *port) +{ + /* Not implemented */ + return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; +} + +static void +mpc52xx_uart_stop_tx(struct uart_port *port, unsigned int tty_stop) +{ + /* port->lock taken by caller */ + port->read_status_mask &= ~MPC52xx_PSC_IMR_TXRDY; + out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask); +} + +static void +mpc52xx_uart_start_tx(struct uart_port *port, unsigned int tty_start) +{ + /* port->lock taken by caller */ + port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY; + out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask); +} + +static void +mpc52xx_uart_send_xchar(struct uart_port *port, char ch) +{ + unsigned long flags; + spin_lock_irqsave(&port->lock, flags); + + port->x_char = ch; + if (ch) { + /* Make sure tx interrupts are on */ + /* Truly necessary ??? They should be anyway */ + port->read_status_mask |= MPC52xx_PSC_IMR_TXRDY; + out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask); + } + + spin_unlock_irqrestore(&port->lock, flags); +} + +static void +mpc52xx_uart_stop_rx(struct uart_port *port) +{ + /* port->lock taken by caller */ + port->read_status_mask &= ~MPC52xx_PSC_IMR_RXRDY; + out_be16(&PSC(port)->mpc52xx_psc_imr,port->read_status_mask); +} + +static void +mpc52xx_uart_enable_ms(struct uart_port *port) +{ + /* Not implemented */ +} + +static void +mpc52xx_uart_break_ctl(struct uart_port *port, int ctl) +{ + unsigned long flags; + spin_lock_irqsave(&port->lock, flags); + + if ( ctl == -1 ) + out_8(&PSC(port)->command,MPC52xx_PSC_START_BRK); + else + out_8(&PSC(port)->command,MPC52xx_PSC_STOP_BRK); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static int +mpc52xx_uart_startup(struct uart_port *port) +{ + struct mpc52xx_psc *psc = PSC(port); + + /* Reset/activate the port, clear and enable interrupts */ + out_8(&psc->command,MPC52xx_PSC_RST_RX); + out_8(&psc->command,MPC52xx_PSC_RST_TX); + + out_be32(&psc->sicr,0); /* UART mode DCD ignored */ + + out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); /* /16 prescaler on */ + + out_8(&psc->rfcntl, 0x00); + out_be16(&psc->rfalarm, 0x1ff); + out_8(&psc->tfcntl, 0x07); + out_be16(&psc->tfalarm, 0x80); + + port->read_status_mask |= MPC52xx_PSC_IMR_RXRDY | MPC52xx_PSC_IMR_TXRDY; + out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask); + + out_8(&psc->command,MPC52xx_PSC_TX_ENABLE); + out_8(&psc->command,MPC52xx_PSC_RX_ENABLE); + + return 0; +} + +static void +mpc52xx_uart_shutdown(struct uart_port *port) +{ + struct mpc52xx_psc *psc = PSC(port); + + /* Shut down the port, interrupt and all */ + out_8(&psc->command,MPC52xx_PSC_RST_RX); + out_8(&psc->command,MPC52xx_PSC_RST_TX); + + port->read_status_mask = 0; + out_be16(&psc->mpc52xx_psc_imr,port->read_status_mask); +} + +static void +mpc52xx_uart_set_termios(struct uart_port *port, struct termios *new, + struct termios *old) +{ + struct mpc52xx_psc *psc = PSC(port); + unsigned long flags; + unsigned char mr1, mr2; + unsigned short ctr; + unsigned int j, baud, quot; + + /* Prepare what we're gonna write */ + mr1 = 0; + + switch (new->c_cflag & CSIZE) { + case CS5: mr1 |= MPC52xx_PSC_MODE_5_BITS; + break; + case CS6: mr1 |= MPC52xx_PSC_MODE_6_BITS; + break; + case CS7: mr1 |= MPC52xx_PSC_MODE_7_BITS; + break; + case CS8: + default: mr1 |= MPC52xx_PSC_MODE_8_BITS; + } + + if (new->c_cflag & PARENB) { + mr1 |= (new->c_cflag & PARODD) ? + MPC52xx_PSC_MODE_PARODD : MPC52xx_PSC_MODE_PAREVEN; + } else + mr1 |= MPC52xx_PSC_MODE_PARNONE; + + + mr2 = 0; + + if (new->c_cflag & CSTOPB) + mr2 |= MPC52xx_PSC_MODE_TWO_STOP; + else + mr2 |= ((new->c_cflag & CSIZE) == CS5) ? + MPC52xx_PSC_MODE_ONE_STOP_5_BITS : + MPC52xx_PSC_MODE_ONE_STOP; + + + baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16); + quot = uart_get_divisor(port, baud); + ctr = quot & 0xffff; + + /* Get the lock */ + spin_lock_irqsave(&port->lock, flags); + + /* Update the per-port timeout */ + uart_update_timeout(port, new->c_cflag, baud); + + /* Do our best to flush TX & RX, so we don't loose anything */ + /* But we don't wait indefinitly ! */ + j = 5000000; /* Maximum wait */ + /* FIXME Can't receive chars since set_termios might be called at early + * boot for the console, all stuff is not yet ready to receive at that + * time and that just makes the kernel oops */ + /* while (j-- && mpc52xx_uart_int_rx_chars(port)); */ + while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) && + --j) + udelay(1); + + if (!j) + printk( KERN_ERR "mpc52xx_uart.c: " + "Unable to flush RX & TX fifos in-time in set_termios." + "Some chars may have been lost.\n" ); + + /* Reset the TX & RX */ + out_8(&psc->command,MPC52xx_PSC_RST_RX); + out_8(&psc->command,MPC52xx_PSC_RST_TX); + + /* Send new mode settings */ + out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1); + out_8(&psc->mode,mr1); + out_8(&psc->mode,mr2); + out_8(&psc->ctur,ctr >> 8); + out_8(&psc->ctlr,ctr & 0xff); + + /* Reenable TX & RX */ + out_8(&psc->command,MPC52xx_PSC_TX_ENABLE); + out_8(&psc->command,MPC52xx_PSC_RX_ENABLE); + + /* We're all set, release the lock */ + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char * +mpc52xx_uart_type(struct uart_port *port) +{ + return port->type == PORT_MPC52xx ? "MPC52xx PSC" : NULL; +} + +static void +mpc52xx_uart_release_port(struct uart_port *port) +{ + if (port->flags & UPF_IOREMAP) { /* remapped by us ? */ + iounmap(port->membase); + port->membase = NULL; + } +} + +static int +mpc52xx_uart_request_port(struct uart_port *port) +{ + if (port->flags & UPF_IOREMAP) /* Need to remap ? */ + port->membase = ioremap(port->mapbase, sizeof(struct mpc52xx_psc)); + + return port->membase != NULL ? 0 : -EBUSY; +} + +static void +mpc52xx_uart_config_port(struct uart_port *port, int flags) +{ + if ( (flags & UART_CONFIG_TYPE) && + (mpc52xx_uart_request_port(port) == 0) ) + port->type = PORT_MPC52xx; +} + +static int +mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if ( ser->type != PORT_UNKNOWN && ser->type != PORT_MPC52xx ) + return -EINVAL; + + if ( (ser->irq != port->irq) || + (ser->io_type != SERIAL_IO_MEM) || + (ser->baud_base != port->uartclk) || + // FIXME Should check addresses/irq as well ? + (ser->hub6 != 0 ) ) + return -EINVAL; + + return 0; +} + + +static struct uart_ops mpc52xx_uart_ops = { + .tx_empty = mpc52xx_uart_tx_empty, + .set_mctrl = mpc52xx_uart_set_mctrl, + .get_mctrl = mpc52xx_uart_get_mctrl, + .stop_tx = mpc52xx_uart_stop_tx, + .start_tx = mpc52xx_uart_start_tx, + .send_xchar = mpc52xx_uart_send_xchar, + .stop_rx = mpc52xx_uart_stop_rx, + .enable_ms = mpc52xx_uart_enable_ms, + .break_ctl = mpc52xx_uart_break_ctl, + .startup = mpc52xx_uart_startup, + .shutdown = mpc52xx_uart_shutdown, + .set_termios = mpc52xx_uart_set_termios, +/* .pm = mpc52xx_uart_pm, Not supported yet */ +/* .set_wake = mpc52xx_uart_set_wake, Not supported yet */ + .type = mpc52xx_uart_type, + .release_port = mpc52xx_uart_release_port, + .request_port = mpc52xx_uart_request_port, + .config_port = mpc52xx_uart_config_port, + .verify_port = mpc52xx_uart_verify_port +}; + + +/* ======================================================================== */ +/* Interrupt handling */ +/* ======================================================================== */ + +static inline int +mpc52xx_uart_int_rx_chars(struct uart_port *port, struct pt_regs *regs) +{ + struct tty_struct *tty = port->info->tty; + unsigned char ch; + unsigned short status; + + /* While we can read, do so ! */ + while ( (status = in_be16(&PSC(port)->mpc52xx_psc_status)) & + MPC52xx_PSC_SR_RXRDY) { + + /* If we are full, just stop reading */ + if (tty->flip.count >= TTY_FLIPBUF_SIZE) + break; + + /* Get the char */ + ch = in_8(&PSC(port)->mpc52xx_psc_buffer_8); + + /* Handle sysreq char */ +#ifdef SUPPORT_SYSRQ + if (uart_handle_sysrq_char(port, ch, regs)) { + port->sysrq = 0; + continue; + } +#endif + + /* Store it */ + *tty->flip.char_buf_ptr = ch; + *tty->flip.flag_buf_ptr = 0; + port->icount.rx++; + + if ( status & (MPC52xx_PSC_SR_PE | + MPC52xx_PSC_SR_FE | + MPC52xx_PSC_SR_RB | + MPC52xx_PSC_SR_OE) ) { + + if (status & MPC52xx_PSC_SR_RB) { + *tty->flip.flag_buf_ptr = TTY_BREAK; + uart_handle_break(port); + } else if (status & MPC52xx_PSC_SR_PE) + *tty->flip.flag_buf_ptr = TTY_PARITY; + else if (status & MPC52xx_PSC_SR_FE) + *tty->flip.flag_buf_ptr = TTY_FRAME; + if (status & MPC52xx_PSC_SR_OE) { + /* + * Overrun is special, since it's + * reported immediately, and doesn't + * affect the current character + */ + if (tty->flip.count < (TTY_FLIPBUF_SIZE-1)) { + tty->flip.flag_buf_ptr++; + tty->flip.char_buf_ptr++; + tty->flip.count++; + } + *tty->flip.flag_buf_ptr = TTY_OVERRUN; + } + + /* Clear error condition */ + out_8(&PSC(port)->command,MPC52xx_PSC_RST_ERR_STAT); + + } + + tty->flip.char_buf_ptr++; + tty->flip.flag_buf_ptr++; + tty->flip.count++; + + } + + tty_flip_buffer_push(tty); + + return in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_RXRDY; +} + +static inline int +mpc52xx_uart_int_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->info->xmit; + + /* Process out of band chars */ + if (port->x_char) { + out_8(&PSC(port)->mpc52xx_psc_buffer_8, port->x_char); + port->icount.tx++; + port->x_char = 0; + return 1; + } + + /* Nothing to do ? */ + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + mpc52xx_uart_stop_tx(port,0); + return 0; + } + + /* Send chars */ + while (in_be16(&PSC(port)->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXRDY) { + out_8(&PSC(port)->mpc52xx_psc_buffer_8, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } + + /* Wake up */ + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + /* Maybe we're done after all */ + if (uart_circ_empty(xmit)) { + mpc52xx_uart_stop_tx(port,0); + return 0; + } + + return 1; +} + +static irqreturn_t +mpc52xx_uart_int(int irq, void *dev_id, struct pt_regs *regs) +{ + struct uart_port *port = (struct uart_port *) dev_id; + unsigned long pass = ISR_PASS_LIMIT; + unsigned int keepgoing; + unsigned short status; + + if ( irq != port->irq ) { + printk( KERN_WARNING + "mpc52xx_uart_int : " \ + "Received wrong int %d. Waiting for %d\n", + irq, port->irq); + return IRQ_NONE; + } + + spin_lock(&port->lock); + + /* While we have stuff to do, we continue */ + do { + /* If we don't find anything to do, we stop */ + keepgoing = 0; + + /* Read status */ + status = in_be16(&PSC(port)->mpc52xx_psc_isr); + status &= port->read_status_mask; + + /* Do we need to receive chars ? */ + /* For this RX interrupts must be on and some chars waiting */ + if ( status & MPC52xx_PSC_IMR_RXRDY ) + keepgoing |= mpc52xx_uart_int_rx_chars(port, regs); + + /* Do we need to send chars ? */ + /* For this, TX must be ready and TX interrupt enabled */ + if ( status & MPC52xx_PSC_IMR_TXRDY ) + keepgoing |= mpc52xx_uart_int_tx_chars(port); + + /* Limit number of iteration */ + if ( !(--pass) ) + keepgoing = 0; + + } while (keepgoing); + + spin_unlock(&port->lock); + + return IRQ_HANDLED; +} + + +/* ======================================================================== */ +/* Console ( if applicable ) */ +/* ======================================================================== */ + +#ifdef CONFIG_SERIAL_MPC52xx_CONSOLE + +static void __init +mpc52xx_console_get_options(struct uart_port *port, + int *baud, int *parity, int *bits, int *flow) +{ + struct mpc52xx_psc *psc = PSC(port); + unsigned char mr1; + + /* Read the mode registers */ + out_8(&psc->command,MPC52xx_PSC_SEL_MODE_REG_1); + mr1 = in_8(&psc->mode); + + /* CT{U,L}R are write-only ! */ + *baud = __res.bi_baudrate ? + __res.bi_baudrate : CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD; + + /* Parse them */ + switch (mr1 & MPC52xx_PSC_MODE_BITS_MASK) { + case MPC52xx_PSC_MODE_5_BITS: *bits = 5; break; + case MPC52xx_PSC_MODE_6_BITS: *bits = 6; break; + case MPC52xx_PSC_MODE_7_BITS: *bits = 7; break; + case MPC52xx_PSC_MODE_8_BITS: + default: *bits = 8; + } + + if (mr1 & MPC52xx_PSC_MODE_PARNONE) + *parity = 'n'; + else + *parity = mr1 & MPC52xx_PSC_MODE_PARODD ? 'o' : 'e'; +} + +static void +mpc52xx_console_write(struct console *co, const char *s, unsigned int count) +{ + struct uart_port *port = &mpc52xx_uart_ports[co->index]; + struct mpc52xx_psc *psc = PSC(port); + unsigned int i, j; + + /* Disable interrupts */ + out_be16(&psc->mpc52xx_psc_imr, 0); + + /* Wait the TX buffer to be empty */ + j = 5000000; /* Maximum wait */ + while (!(in_be16(&psc->mpc52xx_psc_status) & MPC52xx_PSC_SR_TXEMP) && + --j) + udelay(1); + + /* Write all the chars */ + for ( i=0 ; impc52xx_psc_buffer_8, *s); + + /* Line return handling */ + if ( *s++ == '\n' ) + out_8(&psc->mpc52xx_psc_buffer_8, '\r'); + + /* Wait the TX buffer to be empty */ + j = 20000; /* Maximum wait */ + while (!(in_be16(&psc->mpc52xx_psc_status) & + MPC52xx_PSC_SR_TXEMP) && --j) + udelay(1); + } + + /* Restore interrupt state */ + out_be16(&psc->mpc52xx_psc_imr, port->read_status_mask); +} + +static int __init +mpc52xx_console_setup(struct console *co, char *options) +{ + struct uart_port *port = &mpc52xx_uart_ports[co->index]; + + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= MPC52xx_PSC_MAXNUM) + return -EINVAL; + + /* Basic port init. Needed since we use some uart_??? func before + * real init for early access */ + port->lock = SPIN_LOCK_UNLOCKED; + port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */ + port->ops = &mpc52xx_uart_ops; + port->mapbase = MPC52xx_PSCx(co->index); + + /* We ioremap ourself */ + port->membase = ioremap(port->mapbase, sizeof(struct mpc52xx_psc)); + if (port->membase == NULL) { + release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc)); + return -EBUSY; + } + + /* Setup the port parameters accoding to options */ + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + mpc52xx_console_get_options(port, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + + +extern struct uart_driver mpc52xx_uart_driver; + +static struct console mpc52xx_console = { + .name = "ttyS", + .write = mpc52xx_console_write, + .device = uart_console_device, + .setup = mpc52xx_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, /* Specified on the cmdline (e.g. console=ttyS0 ) */ + .data = &mpc52xx_uart_driver, +}; + + +static int __init +mpc52xx_console_init(void) +{ + register_console(&mpc52xx_console); + return 0; +} + +console_initcall(mpc52xx_console_init); + +#define MPC52xx_PSC_CONSOLE &mpc52xx_console +#else +#define MPC52xx_PSC_CONSOLE NULL +#endif + + +/* ======================================================================== */ +/* UART Driver */ +/* ======================================================================== */ + +static struct uart_driver mpc52xx_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "mpc52xx_psc_uart", + .dev_name = "ttyS", + .devfs_name = "ttyS", + .major = TTY_MAJOR, + .minor = 64, + .nr = MPC52xx_PSC_MAXNUM, + .cons = MPC52xx_PSC_CONSOLE, +}; + + +/* ======================================================================== */ +/* OCP Driver */ +/* ======================================================================== */ + +static int __devinit +mpc52xx_uart_probe(struct ocp_device *ocp) +{ + struct uart_port *port = NULL; + int idx, ret; + + /* Get the corresponding port struct */ + idx = ocp->def->index; + if (idx < 0 || idx >= MPC52xx_PSC_MAXNUM) + return -EINVAL; + + port = &mpc52xx_uart_ports[idx]; + + /* Init the port structure */ + port->lock = SPIN_LOCK_UNLOCKED; + port->mapbase = ocp->def->paddr; + port->irq = ocp->def->irq; + port->uartclk = __res.bi_ipbfreq / 2; /* Look at CTLR doc */ + port->fifosize = 255; /* Should be 512 ! But it can't be */ + /* stored in a unsigned char */ + port->iotype = UPIO_MEM; + port->flags = UPF_BOOT_AUTOCONF | + ( uart_console(port) ? 0 : UPF_IOREMAP ); + port->line = idx; + port->ops = &mpc52xx_uart_ops; + port->read_status_mask = 0; + + /* Requests the mem & irqs */ + /* Unlike other serial drivers, we reserve the resources here, so we + * can detect early if multiple drivers uses the same PSC. Special + * care must be taken with the console PSC + */ + ret = request_irq( + port->irq, mpc52xx_uart_int, + SA_INTERRUPT | SA_SAMPLE_RANDOM, "mpc52xx_psc_uart", port); + if (ret) + goto error; + + ret = request_mem_region(port->mapbase, sizeof(struct mpc52xx_psc), + "mpc52xx_psc_uart") != NULL ? 0 : -EBUSY; + if (ret) + goto free_irq; + + /* Add the port to the uart sub-system */ + ret = uart_add_one_port(&mpc52xx_uart_driver, port); + if (ret) + goto release_mem; + + ocp_set_drvdata(ocp, (void*)port); + + return 0; + + +free_irq: + free_irq(port->irq, mpc52xx_uart_int); + +release_mem: + release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc)); + +error: + if (uart_console(port)) + printk( "mpc52xx_uart.c: Error during resource alloction for " + "the console port !!! Check that the console PSC is " + "not used by another OCP driver !!!\n" ); + + return ret; +} + +static void +mpc52xx_uart_remove(struct ocp_device *ocp) +{ + struct uart_port *port = (struct uart_port *) ocp_get_drvdata(ocp); + + ocp_set_drvdata(ocp, NULL); + + if (port) { + uart_remove_one_port(&mpc52xx_uart_driver, port); + release_mem_region(port->mapbase, sizeof(struct mpc52xx_psc)); + free_irq(port->irq, mpc52xx_uart_int); + } +} + +#ifdef CONFIG_PM +static int +mpc52xx_uart_suspend(struct ocp_device *ocp, u32 state) +{ + struct uart_port *port = (struct uart_port *) ocp_get_drvdata(ocp); + + uart_suspend_port(&mpc52xx_uart_driver, port); + + return 0; +} + +static int +mpc52xx_uart_resume(struct ocp_device *ocp) +{ + struct uart_port *port = (struct uart_port *) ocp_get_drvdata(ocp); + + uart_resume_port(&mpc52xx_uart_driver, port); + + return 0; +} +#endif + +static struct ocp_device_id mpc52xx_uart_ids[] __devinitdata = { + { .vendor = OCP_VENDOR_FREESCALE, .function = OCP_FUNC_PSC_UART }, + { .vendor = OCP_VENDOR_INVALID /* Terminating entry */ } +}; + +MODULE_DEVICE_TABLE(ocp, mpc52xx_uart_ids); + +static struct ocp_driver mpc52xx_uart_ocp_driver = { + .name = "mpc52xx_psc_uart", + .id_table = mpc52xx_uart_ids, + .probe = mpc52xx_uart_probe, + .remove = mpc52xx_uart_remove, +#ifdef CONFIG_PM + .suspend = mpc52xx_uart_suspend, + .resume = mpc52xx_uart_resume, +#endif +}; + + +/* ======================================================================== */ +/* Module */ +/* ======================================================================== */ + +static int __init +mpc52xx_uart_init(void) +{ + int ret; + + printk(KERN_INFO "Serial: MPC52xx PSC driver\n"); + + ret = uart_register_driver(&mpc52xx_uart_driver); + if (ret) + return ret; + + ret = ocp_register_driver(&mpc52xx_uart_ocp_driver); + + return ret; +} + +static void __exit +mpc52xx_uart_exit(void) +{ + ocp_unregister_driver(&mpc52xx_uart_ocp_driver); + uart_unregister_driver(&mpc52xx_uart_driver); +} + + +module_init(mpc52xx_uart_init); +module_exit(mpc52xx_uart_exit); + +MODULE_AUTHOR("Sylvain Munaut "); +MODULE_DESCRIPTION("Freescale MPC52xx PSC UART"); +MODULE_LICENSE("GPL"); diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/serial/serial_lh7a40x.c new file mode 100644 index 000000000..d863368e4 --- /dev/null +++ b/drivers/serial/serial_lh7a40x.c @@ -0,0 +1,708 @@ +/* drivers/serial/serial_lh7a40x.c + * + * Copyright (C) 2004 Coastal Environmental Systems + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + */ + +/* Driver for Sharp LH7A40X embedded serial ports + * + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * Based on drivers/serial/amba.c, by Deep Blue Solutions Ltd. + * + * --- + * + * This driver supports the embedded UARTs of the Sharp LH7A40X series + * CPUs. While similar to the 16550 and other UART chips, there is + * nothing close to register compatibility. Moreover, some of the + * modem control lines are not available, either in the chip or they + * are lacking in the board-level implementation. + * + * - Use of SIRDIS + * For simplicity, we disable the IR functions of any UART whenever + * we enable it. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#if defined(CONFIG_SERIAL_LH7A40X_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +#define SUPPORT_SYSRQ +#endif + +#include + +#include + +#define DEV_MAJOR 204 +#define DEV_MINOR 16 +#define DEV_NR 3 + +#define ISR_LOOP_LIMIT 256 + +#define UR(p,o) _UR ((p)->membase, o) +#define _UR(b,o) (*((volatile unsigned int*)(((unsigned char*) b) + (o)))) +#define BIT_CLR(p,o,m) UR(p,o) = UR(p,o) & (~(unsigned int)m) +#define BIT_SET(p,o,m) UR(p,o) = UR(p,o) | ( (unsigned int)m) + +#define UART_REG_SIZE 32 + +#define UARTEN (0x01) /* UART enable */ +#define SIRDIS (0x02) /* Serial IR disable (UART1 only) */ + +#define RxEmpty (0x10) +#define TxEmpty (0x80) +#define TxFull (0x20) +#define nRxRdy RxEmpty +#define nTxRdy TxFull +#define TxBusy (0x08) + +#define RxBreak (0x0800) +#define RxOverrunError (0x0400) +#define RxParityError (0x0200) +#define RxFramingError (0x0100) +#define RxError (RxBreak | RxOverrunError | RxParityError | RxFramingError) + +#define DCD (0x04) +#define DSR (0x02) +#define CTS (0x01) + +#define RxInt (0x01) +#define TxInt (0x02) +#define ModemInt (0x04) +#define RxTimeoutInt (0x08) + +#define MSEOI (0x10) + +#define WLEN_8 (0x60) +#define WLEN_7 (0x40) +#define WLEN_6 (0x20) +#define WLEN_5 (0x00) +#define WLEN (0x60) /* Mask for all word-length bits */ +#define STP2 (0x08) +#define PEN (0x02) /* Parity Enable */ +#define EPS (0x04) /* Even Parity Set */ +#define FEN (0x10) /* FIFO Enable */ +#define BRK (0x01) /* Send Break */ + + +struct uart_port_lh7a40x { + struct uart_port port; + unsigned int statusPrev; /* Most recently read modem status */ +}; + +static void lh7a40xuart_stop_tx (struct uart_port* port, unsigned int tty_stop) +{ + BIT_CLR (port, UART_R_INTEN, TxInt); +} + +static void lh7a40xuart_start_tx (struct uart_port* port, + unsigned int tty_start) +{ + BIT_SET (port, UART_R_INTEN, TxInt); + + /* *** FIXME: do I need to check for startup of the + transmitter? The old driver did, but AMBA + doesn't . */ +} + +static void lh7a40xuart_stop_rx (struct uart_port* port) +{ + BIT_SET (port, UART_R_INTEN, RxTimeoutInt | RxInt); +} + +static void lh7a40xuart_enable_ms (struct uart_port* port) +{ + BIT_SET (port, UART_R_INTEN, ModemInt); +} + +static void +#ifdef SUPPORT_SYSRQ +lh7a40xuart_rx_chars (struct uart_port* port, struct pt_regs* regs) +#else +lh7a40xuart_rx_chars (struct uart_port* port) +#endif +{ + struct tty_struct* tty = port->info->tty; + int cbRxMax = 256; /* (Gross) limit on receive */ + unsigned int data; /* Received data and status */ + + while (!(UR (port, UART_R_STATUS) & nRxRdy) && --cbRxMax) { + if (tty->flip.count >= TTY_FLIPBUF_SIZE) { + tty->flip.work.func((void*)tty); + if (tty->flip.count >= TTY_FLIPBUF_SIZE) { + printk(KERN_WARNING "TTY_DONT_FLIP set\n"); + return; + } + } + + data = UR (port, UART_R_DATA); + + *tty->flip.char_buf_ptr = (unsigned char) data; + *tty->flip.flag_buf_ptr = TTY_NORMAL; + ++port->icount.rx; + + if (data & RxError) { /* Quick check, short-circuit */ + if (data & RxBreak) { + data &= ~(RxFramingError | RxParityError); + ++port->icount.brk; + if (uart_handle_break (port)) + continue; + } + else if (data & RxParityError) + ++port->icount.parity; + else if (data & RxFramingError) + ++port->icount.frame; + if (data & RxOverrunError) + ++port->icount.overrun; + + /* Mask by termios, leave Rx'd byte */ + data &= port->read_status_mask | 0xff; + + if (data & RxBreak) + *tty->flip.flag_buf_ptr = TTY_BREAK; + else if (data & RxParityError) + *tty->flip.flag_buf_ptr = TTY_PARITY; + else if (data & RxFramingError) + *tty->flip.flag_buf_ptr = TTY_FRAME; + } + + if (uart_handle_sysrq_char (port, (unsigned char) data, regs)) + continue; + + if ((data & port->ignore_status_mask) == 0) { + ++tty->flip.flag_buf_ptr; + ++tty->flip.char_buf_ptr; + ++tty->flip.count; + } + if ((data & RxOverrunError) + && tty->flip.count < TTY_FLIPBUF_SIZE) { + /* + * Overrun is special, since it's reported + * immediately, and doesn't affect the current + * character + */ + *tty->flip.char_buf_ptr++ = 0; + *tty->flip.flag_buf_ptr++ = TTY_OVERRUN; + ++tty->flip.count; + } + } + tty_flip_buffer_push (tty); + return; +} + +static void lh7a40xuart_tx_chars (struct uart_port* port) +{ + struct circ_buf* xmit = &port->info->xmit; + int cbTxMax = port->fifosize; + + if (port->x_char) { + UR (port, UART_R_DATA) = port->x_char; + ++port->icount.tx; + port->x_char = 0; + return; + } + if (uart_circ_empty (xmit) || uart_tx_stopped (port)) { + lh7a40xuart_stop_tx (port, 0); + return; + } + + /* Unlike the AMBA UART, the lh7a40x UART does not guarantee + that at least half of the FIFO is empty. Instead, we check + status for every character. Using the AMBA method causes + the transmitter to drop characters. */ + + do { + UR (port, UART_R_DATA) = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + ++port->icount.tx; + if (uart_circ_empty(xmit)) + break; + } while (!(UR (port, UART_R_STATUS) & nTxRdy) + && cbTxMax--); + + if (uart_circ_chars_pending (xmit) < WAKEUP_CHARS) + uart_write_wakeup (port); + + if (uart_circ_empty (xmit)) + lh7a40xuart_stop_tx (port, 0); +} + +static void lh7a40xuart_modem_status (struct uart_port* port) +{ + unsigned int status = UR (port, UART_R_STATUS); + unsigned int delta + = status ^ ((struct uart_port_lh7a40x*) port)->statusPrev; + + BIT_SET (port, UART_R_RAWISR, MSEOI); /* Clear modem status intr */ + + if (!delta) /* Only happens if we missed 2 transitions */ + return; + + ((struct uart_port_lh7a40x*) port)->statusPrev = status; + + if (delta & DCD) + uart_handle_dcd_change (port, status & DCD); + + if (delta & DSR) + ++port->icount.dsr; + + if (delta & CTS) + uart_handle_cts_change (port, status & CTS); + + wake_up_interruptible (&port->info->delta_msr_wait); +} + +static irqreturn_t lh7a40xuart_int (int irq, void* dev_id, + struct pt_regs* regs) +{ + struct uart_port* port = dev_id; + unsigned int cLoopLimit = ISR_LOOP_LIMIT; + unsigned int isr = UR (port, UART_R_ISR); + + + do { + if (isr & (RxInt | RxTimeoutInt)) +#ifdef SUPPORT_SYSRQ + lh7a40xuart_rx_chars(port, regs); +#else + lh7a40xuart_rx_chars(port); +#endif + if (isr & ModemInt) + lh7a40xuart_modem_status (port); + if (isr & TxInt) + lh7a40xuart_tx_chars (port); + + if (--cLoopLimit == 0) + break; + + isr = UR (port, UART_R_ISR); + } while (isr & (RxInt | TxInt | RxTimeoutInt)); + + return IRQ_HANDLED; +} + +static unsigned int lh7a40xuart_tx_empty (struct uart_port* port) +{ + return (UR (port, UART_R_STATUS) & TxEmpty) ? TIOCSER_TEMT : 0; +} + +static unsigned int lh7a40xuart_get_mctrl (struct uart_port* port) +{ + unsigned int result = 0; + unsigned int status = UR (port, UART_R_STATUS); + + if (status & DCD) + result |= TIOCM_CAR; + if (status & DSR) + result |= TIOCM_DSR; + if (status & CTS) + result |= TIOCM_CTS; + + return result; +} + +static void lh7a40xuart_set_mctrl (struct uart_port* port, unsigned int mctrl) +{ + /* None of the ports supports DTR. UART1 supports RTS through GPIO. */ + /* Note, kernel appears to be setting DTR and RTS on console. */ + + /* *** FIXME: this deserves more work. There's some work in + tracing all of the IO pins. */ +#if 0 + if( port->mapbase == UART1_PHYS) { + gpioRegs_t *gpio = (gpioRegs_t *)IO_ADDRESS(GPIO_PHYS); + + if (mctrl & TIOCM_RTS) + gpio->pbdr &= ~GPIOB_UART1_RTS; + else + gpio->pbdr |= GPIOB_UART1_RTS; + } +#endif +} + +static void lh7a40xuart_break_ctl (struct uart_port* port, int break_state) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + if (break_state == -1) + BIT_SET (port, UART_R_FCON, BRK); /* Assert break */ + else + BIT_CLR (port, UART_R_FCON, BRK); /* Deassert break */ + spin_unlock_irqrestore(&port->lock, flags); +} + +static int lh7a40xuart_startup (struct uart_port* port) +{ + int retval; + + retval = request_irq (port->irq, lh7a40xuart_int, 0, + "serial_lh7a40x", port); + if (retval) + return retval; + + /* Initial modem control-line settings */ + ((struct uart_port_lh7a40x*) port)->statusPrev + = UR (port, UART_R_STATUS); + + /* There is presently no configuration option to enable IR. + Thus, we always disable it. */ + + BIT_SET (port, UART_R_CON, UARTEN | SIRDIS); + BIT_SET (port, UART_R_INTEN, RxTimeoutInt | RxInt); + + return 0; +} + +static void lh7a40xuart_shutdown (struct uart_port* port) +{ + free_irq (port->irq, port); + BIT_CLR (port, UART_R_FCON, BRK | FEN); + BIT_CLR (port, UART_R_CON, UARTEN); +} + +static void lh7a40xuart_set_termios (struct uart_port* port, + struct termios* termios, + struct termios* old) +{ + unsigned int con; + unsigned int inten; + unsigned int fcon; + unsigned long flags; + unsigned int baud; + unsigned int quot; + + baud = uart_get_baud_rate (port, termios, old, 8, port->uartclk/16); + quot = uart_get_divisor (port, baud); /* -1 performed elsewhere */ + + switch (termios->c_cflag & CSIZE) { + case CS5: + fcon = WLEN_5; + break; + case CS6: + fcon = WLEN_6; + break; + case CS7: + fcon = WLEN_7; + break; + case CS8: + default: + fcon = WLEN_8; + break; + } + if (termios->c_cflag & CSTOPB) + fcon |= STP2; + if (termios->c_cflag & PARENB) { + fcon |= PEN; + if (!(termios->c_cflag & PARODD)) + fcon |= EPS; + } + if (port->fifosize > 1) + fcon |= FEN; + + spin_lock_irqsave (&port->lock, flags); + + uart_update_timeout (port, termios->c_cflag, baud); + + port->read_status_mask = RxOverrunError; + if (termios->c_iflag & INPCK) + port->read_status_mask |= RxFramingError | RxParityError; + if (termios->c_iflag & (BRKINT | PARMRK)) + port->read_status_mask |= RxBreak; + + /* Figure mask for status we ignore */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= RxFramingError | RxParityError; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= RxBreak; + /* Ignore overrun when ignorning parity */ + /* *** FIXME: is this in the right place? */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= RxOverrunError; + } + + /* Ignore all receive errors when receive disabled */ + if ((termios->c_cflag & CREAD) == 0) + port->ignore_status_mask |= RxError; + + con = UR (port, UART_R_CON); + inten = (UR (port, UART_R_INTEN) & ~ModemInt); + + if (UART_ENABLE_MS (port, termios->c_cflag)) + inten |= ModemInt; + + BIT_CLR (port, UART_R_CON, UARTEN); /* Disable UART */ + UR (port, UART_R_INTEN) = 0; /* Disable interrupts */ + UR (port, UART_R_BRCON) = quot - 1; /* Set baud rate divisor */ + UR (port, UART_R_FCON) = fcon; /* Set FIFO and frame ctrl */ + UR (port, UART_R_INTEN) = inten; /* Enable interrupts */ + UR (port, UART_R_CON) = con; /* Restore UART mode */ + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char* lh7a40xuart_type (struct uart_port* port) +{ + return port->type == PORT_LH7A40X ? "LH7A40X" : NULL; +} + +static void lh7a40xuart_release_port (struct uart_port* port) +{ + release_mem_region (port->mapbase, UART_REG_SIZE); +} + +static int lh7a40xuart_request_port (struct uart_port* port) +{ + return request_mem_region (port->mapbase, UART_REG_SIZE, + "serial_lh7a40x") != NULL + ? 0 : -EBUSY; +} + +static void lh7a40xuart_config_port (struct uart_port* port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_LH7A40X; + lh7a40xuart_request_port (port); + } +} + +static int lh7a40xuart_verify_port (struct uart_port* port, + struct serial_struct* ser) +{ + int ret = 0; + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_LH7A40X) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= NR_IRQS) + ret = -EINVAL; + if (ser->baud_base < 9600) /* *** FIXME: is this true? */ + ret = -EINVAL; + return ret; +} + +static struct uart_ops lh7a40x_uart_ops = { + .tx_empty = lh7a40xuart_tx_empty, + .set_mctrl = lh7a40xuart_set_mctrl, + .get_mctrl = lh7a40xuart_get_mctrl, + .stop_tx = lh7a40xuart_stop_tx, + .start_tx = lh7a40xuart_start_tx, + .stop_rx = lh7a40xuart_stop_rx, + .enable_ms = lh7a40xuart_enable_ms, + .break_ctl = lh7a40xuart_break_ctl, + .startup = lh7a40xuart_startup, + .shutdown = lh7a40xuart_shutdown, + .set_termios = lh7a40xuart_set_termios, + .type = lh7a40xuart_type, + .release_port = lh7a40xuart_release_port, + .request_port = lh7a40xuart_request_port, + .config_port = lh7a40xuart_config_port, + .verify_port = lh7a40xuart_verify_port, +}; + +static struct uart_port_lh7a40x lh7a40x_ports[DEV_NR] = { + { + .port = { + .membase = (void*) io_p2v (UART1_PHYS), + .mapbase = UART1_PHYS, + .iotype = SERIAL_IO_MEM, + .irq = IRQ_UART1INTR, + .uartclk = 14745600/2, + .fifosize = 16, + .ops = &lh7a40x_uart_ops, + .flags = ASYNC_BOOT_AUTOCONF, + .line = 0, + }, + }, + { + .port = { + .membase = (void*) io_p2v (UART2_PHYS), + .mapbase = UART2_PHYS, + .iotype = SERIAL_IO_MEM, + .irq = IRQ_UART2INTR, + .uartclk = 14745600/2, + .fifosize = 16, + .ops = &lh7a40x_uart_ops, + .flags = ASYNC_BOOT_AUTOCONF, + .line = 1, + }, + }, + { + .port = { + .membase = (void*) io_p2v (UART3_PHYS), + .mapbase = UART3_PHYS, + .iotype = SERIAL_IO_MEM, + .irq = IRQ_UART3INTR, + .uartclk = 14745600/2, + .fifosize = 16, + .ops = &lh7a40x_uart_ops, + .flags = ASYNC_BOOT_AUTOCONF, + .line = 2, + }, + }, +}; + +#ifndef CONFIG_SERIAL_LH7A40X_CONSOLE +# define LH7A40X_CONSOLE NULL +#else +# define LH7A40X_CONSOLE &lh7a40x_console + + +static void lh7a40xuart_console_write (struct console* co, + const char* s, + unsigned int count) +{ + struct uart_port* port = &lh7a40x_ports[co->index].port; + unsigned int con = UR (port, UART_R_CON); + unsigned int inten = UR (port, UART_R_INTEN); + + + UR (port, UART_R_INTEN) = 0; /* Disable all interrupts */ + BIT_SET (port, UART_R_CON, UARTEN | SIRDIS); /* Enable UART */ + + for (; count-- > 0; ++s) { + while (UR (port, UART_R_STATUS) & nTxRdy) + ; + UR (port, UART_R_DATA) = *s; + if (*s == '\n') { + while ((UR (port, UART_R_STATUS) & TxBusy)) + ; + UR (port, UART_R_DATA) = '\r'; + } + } + + /* Wait until all characters are sent */ + while (UR (port, UART_R_STATUS) & TxBusy) + ; + + /* Restore control and interrupt mask */ + UR (port, UART_R_CON) = con; + UR (port, UART_R_INTEN) = inten; +} + +static void __init lh7a40xuart_console_get_options (struct uart_port* port, + int* baud, + int* parity, + int* bits) +{ + if (UR (port, UART_R_CON) & UARTEN) { + unsigned int fcon = UR (port, UART_R_FCON); + unsigned int quot = UR (port, UART_R_BRCON) + 1; + + switch (fcon & (PEN | EPS)) { + default: *parity = 'n'; break; + case PEN: *parity = 'o'; break; + case PEN | EPS: *parity = 'e'; break; + } + + switch (fcon & WLEN) { + default: + case WLEN_8: *bits = 8; break; + case WLEN_7: *bits = 7; break; + case WLEN_6: *bits = 6; break; + case WLEN_5: *bits = 5; break; + } + + *baud = port->uartclk/(16*quot); + } +} + +static int __init lh7a40xuart_console_setup (struct console* co, char* options) +{ + struct uart_port* port; + int baud = 38400; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index >= DEV_NR) /* Bounds check on device number */ + co->index = 0; + port = &lh7a40x_ports[co->index].port; + + if (options) + uart_parse_options (options, &baud, &parity, &bits, &flow); + else + lh7a40xuart_console_get_options (port, &baud, &parity, &bits); + + return uart_set_options (port, co, baud, parity, bits, flow); +} + +extern struct uart_driver lh7a40x_reg; +static struct console lh7a40x_console = { + .name = "ttyAM", + .write = lh7a40xuart_console_write, + .device = uart_console_device, + .setup = lh7a40xuart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &lh7a40x_reg, +}; + +static int __init lh7a40xuart_console_init(void) +{ + register_console (&lh7a40x_console); + return 0; +} + +console_initcall (lh7a40xuart_console_init); + +#endif + +static struct uart_driver lh7a40x_reg = { + .owner = THIS_MODULE, + .driver_name = "ttyAM", + .dev_name = "ttyAM", + .major = DEV_MAJOR, + .minor = DEV_MINOR, + .nr = DEV_NR, + .cons = LH7A40X_CONSOLE, +}; + +static int __init lh7a40xuart_init(void) +{ + int ret; + + printk (KERN_INFO "serial: LH7A40X serial driver\n"); + + ret = uart_register_driver (&lh7a40x_reg); + + if (ret == 0) { + int i; + + for (i = 0; i < DEV_NR; i++) + uart_add_one_port (&lh7a40x_reg, + &lh7a40x_ports[i].port); + } + return ret; +} + +static void __exit lh7a40xuart_exit(void) +{ + int i; + + for (i = 0; i < DEV_NR; i++) + uart_remove_one_port (&lh7a40x_reg, &lh7a40x_ports[i].port); + + uart_unregister_driver (&lh7a40x_reg); +} + +module_init (lh7a40xuart_init); +module_exit (lh7a40xuart_exit); + +MODULE_AUTHOR ("Marc Singer"); +MODULE_DESCRIPTION ("Sharp LH7A40X serial port driver"); +MODULE_LICENSE ("GPL"); diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c new file mode 100644 index 000000000..98b96c546 --- /dev/null +++ b/drivers/serial/sn_console.c @@ -0,0 +1,1194 @@ +/* + * C-Brick Serial Port (and console) driver for SGI Altix machines. + * + * This driver is NOT suitable for talking to the l1-controller for + * anything other than 'console activities' --- please use the l1 + * driver for that. + * + * + * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + * Further, this software is distributed without any warranty that it is + * free of the rightful claim of any third person regarding infringement + * or the like. Any license provided herein, whether implied or + * otherwise, applies only to this software file. Patent licenses, if + * any, provided herein do not apply to combinations of this program with + * other software, or any other product whatsoever. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + * Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane, + * Mountain View, CA 94043, or: + * + * http://www.sgi.com + * + * For further information regarding this notice, see: + * + * http://oss.sgi.com/projects/GenInfo/NoticeExplan + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for mdelay */ +#include +#include + +#include +#include +#include + +/* number of characters we can transmit to the SAL console at a time */ +#define SN_SAL_MAX_CHARS 120 + +/* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to + * avoid losing chars, (always has to be a power of 2) */ +#define SN_SAL_BUFFER_SIZE (64 * (1 << 10)) + +#define SN_SAL_UART_FIFO_DEPTH 16 +#define SN_SAL_UART_FIFO_SPEED_CPS 9600/10 + +/* sn_transmit_chars() calling args */ +#define TRANSMIT_BUFFERED 0 +#define TRANSMIT_RAW 1 + +/* To use dynamic numbers only and not use the assigned major and minor, + * define the following.. */ +/* #define USE_DYNAMIC_MINOR 1 */ /* use dynamic minor number */ +#define USE_DYNAMIC_MINOR 0 /* Don't rely on misc_register dynamic minor */ + +/* Device name we're using */ +#define DEVICE_NAME "ttySG" +#define DEVICE_NAME_DYNAMIC "ttySG0" /* need full name for misc_register */ +/* The major/minor we are using, ignored for USE_DYNAMIC_MINOR */ +#define DEVICE_MAJOR 204 +#define DEVICE_MINOR 40 + +/* + * Port definition - this kinda drives it all + */ +struct sn_cons_port { + struct timer_list sc_timer; + struct uart_port sc_port; + struct sn_sal_ops { + int (*sal_puts_raw) (const char *s, int len); + int (*sal_puts) (const char *s, int len); + int (*sal_getc) (void); + int (*sal_input_pending) (void); + void (*sal_wakeup_transmit) (struct sn_cons_port *, int); + } *sc_ops; + unsigned long sc_interrupt_timeout; + int sc_is_asynch; +}; + +static struct sn_cons_port sal_console_port; + +/* Only used if USE_DYNAMIC_MINOR is set to 1 */ +static struct miscdevice misc; /* used with misc_register for dynamic */ + +extern u64 master_node_bedrock_address; +extern void early_sn_setup(void); + +#undef DEBUG +#ifdef DEBUG +static int sn_debug_printf(const char *fmt, ...); +#define DPRINTF(x...) sn_debug_printf(x) +#else +#define DPRINTF(x...) do { } while (0) +#endif + +/* Prototypes */ +static int snt_hw_puts_raw(const char *, int); +static int snt_hw_puts_buffered(const char *, int); +static int snt_poll_getc(void); +static int snt_poll_input_pending(void); +static int snt_sim_puts(const char *, int); +static int snt_sim_getc(void); +static int snt_sim_input_pending(void); +static int snt_intr_getc(void); +static int snt_intr_input_pending(void); +static void sn_transmit_chars(struct sn_cons_port *, int); + +/* A table for polling: + */ +static struct sn_sal_ops poll_ops = { + .sal_puts_raw = snt_hw_puts_raw, + .sal_puts = snt_hw_puts_raw, + .sal_getc = snt_poll_getc, + .sal_input_pending = snt_poll_input_pending +}; + +/* A table for the simulator */ +static struct sn_sal_ops sim_ops = { + .sal_puts_raw = snt_sim_puts, + .sal_puts = snt_sim_puts, + .sal_getc = snt_sim_getc, + .sal_input_pending = snt_sim_input_pending +}; + +/* A table for interrupts enabled */ +static struct sn_sal_ops intr_ops = { + .sal_puts_raw = snt_hw_puts_raw, + .sal_puts = snt_hw_puts_buffered, + .sal_getc = snt_intr_getc, + .sal_input_pending = snt_intr_input_pending, + .sal_wakeup_transmit = sn_transmit_chars +}; + +/* the console does output in two distinctly different ways: + * synchronous (raw) and asynchronous (buffered). initally, early_printk + * does synchronous output. any data written goes directly to the SAL + * to be output (incidentally, it is internally buffered by the SAL) + * after interrupts and timers are initialized and available for use, + * the console init code switches to asynchronous output. this is + * also the earliest opportunity to begin polling for console input. + * after console initialization, console output and tty (serial port) + * output is buffered and sent to the SAL asynchronously (either by + * timer callback or by UART interrupt) */ + + +/* routines for running the console in polling mode */ + +/** + * snt_poll_getc - Get a character from the console in polling mode + * + */ +static int +snt_poll_getc(void) +{ + int ch; + + ia64_sn_console_getc(&ch); + return ch; +} + +/** + * snt_poll_input_pending - Check if any input is waiting - polling mode. + * + */ +static int +snt_poll_input_pending(void) +{ + int status, input; + + status = ia64_sn_console_check(&input); + return !status && input; +} + +/* routines for running the console on the simulator */ + +/** + * snt_sim_puts - send to the console, used in simulator mode + * @str: String to send + * @count: length of string + * + */ +static int +snt_sim_puts(const char *str, int count) +{ + int counter = count; + +#ifdef FLAG_DIRECT_CONSOLE_WRITES + /* This is an easy way to pre-pend the output to know whether the output + * was done via sal or directly */ + writeb('[', master_node_bedrock_address + (UART_TX << 3)); + writeb('+', master_node_bedrock_address + (UART_TX << 3)); + writeb(']', master_node_bedrock_address + (UART_TX << 3)); + writeb(' ', master_node_bedrock_address + (UART_TX << 3)); +#endif /* FLAG_DIRECT_CONSOLE_WRITES */ + while (counter > 0) { + writeb(*str, master_node_bedrock_address + (UART_TX << 3)); + counter--; + str++; + } + return count; +} + +/** + * snt_sim_getc - Get character from console in simulator mode + * + */ +static int +snt_sim_getc(void) +{ + return readb(master_node_bedrock_address + (UART_RX << 3)); +} + +/** + * snt_sim_input_pending - Check if there is input pending in simulator mode + * + */ +static int +snt_sim_input_pending(void) +{ + return readb(master_node_bedrock_address + + (UART_LSR << 3)) & UART_LSR_DR; +} + +/* routines for an interrupt driven console (normal) */ + +/** + * snt_intr_getc - Get a character from the console, interrupt mode + * + */ +static int +snt_intr_getc(void) +{ + return ia64_sn_console_readc(); +} + +/** + * snt_intr_input_pending - Check if input is pending, interrupt mode + * + */ +static int +snt_intr_input_pending(void) +{ + return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV; +} + +/* these functions are polled and interrupt */ + +/** + * snt_hw_puts_raw - Send raw string to the console, polled or interrupt mode + * @s: String + * @len: Length + * + */ +static int +snt_hw_puts_raw(const char *s, int len) +{ + /* this will call the PROM and not return until this is done */ + return ia64_sn_console_putb(s, len); +} + +/** + * snt_hw_puts_buffered - Send string to console, polled or interrupt mode + * @s: String + * @len: Length + * + */ +static int +snt_hw_puts_buffered(const char *s, int len) +{ + /* queue data to the PROM */ + return ia64_sn_console_xmit_chars((char *)s, len); +} + +/* uart interface structs + * These functions are associated with the uart_port that the serial core + * infrastructure calls. + * + * Note: Due to how the console works, many routines are no-ops. + */ + +/** + * snp_type - What type of console are we? + * @port: Port to operate with (we ignore since we only have one port) + * + */ +static const char * +snp_type(struct uart_port *port) +{ + return ("SGI SN L1"); +} + +/** + * snp_tx_empty - Is the transmitter empty? We pretend we're always empty + * @port: Port to operate on (we ignore since we only have one port) + * + */ +static unsigned int +snp_tx_empty(struct uart_port *port) +{ + return 1; +} + +/** + * snp_stop_tx - stop the transmitter - no-op for us + * @port: Port to operat eon - we ignore - no-op function + * @tty_stop: Set to 1 if called via uart_stop + * + */ +static void +snp_stop_tx(struct uart_port *port, unsigned int tty_stop) +{ +} + +/** + * snp_release_port - Free i/o and resources for port - no-op for us + * @port: Port to operate on - we ignore - no-op function + * + */ +static void +snp_release_port(struct uart_port *port) +{ +} + +/** + * snp_enable_ms - Force modem status interrupts on - no-op for us + * @port: Port to operate on - we ignore - no-op function + * + */ +static void +snp_enable_ms(struct uart_port *port) +{ +} + +/** + * snp_shutdown - shut down the port - free irq and disable - no-op for us + * @port: Port to shut down - we ignore + * + */ +static void +snp_shutdown(struct uart_port *port) +{ +} + +/** + * snp_set_mctrl - set control lines (dtr, rts, etc) - no-op for our console + * @port: Port to operate on - we ignore + * @mctrl: Lines to set/unset - we ignore + * + */ +static void +snp_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +/** + * snp_get_mctrl - get contorl line info, we just return a static value + * @port: port to operate on - we only have one port so we ignore this + * + */ +static unsigned int +snp_get_mctrl(struct uart_port *port) +{ + return TIOCM_CAR | TIOCM_RNG | TIOCM_DSR | TIOCM_CTS; +} + +/** + * snp_stop_rx - Stop the receiver - we ignor ethis + * @port: Port to operate on - we ignore + * + */ +static void +snp_stop_rx(struct uart_port *port) +{ +} + +/** + * snp_start_tx - Start transmitter + * @port: Port to operate on + * @tty_stop: Set to 1 if called via uart_start + * + */ +static void +snp_start_tx(struct uart_port *port, unsigned int tty_stop) +{ + if (sal_console_port.sc_ops->sal_wakeup_transmit) + sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port, TRANSMIT_BUFFERED); + +} + +/** + * snp_break_ctl - handle breaks - ignored by us + * @port: Port to operate on + * @break_state: Break state + * + */ +static void +snp_break_ctl(struct uart_port *port, int break_state) +{ +} + +/** + * snp_startup - Start up the serial port - always return 0 (We're always on) + * @port: Port to operate on + * + */ +static int +snp_startup(struct uart_port *port) +{ + return 0; +} + +/** + * snp_set_termios - set termios stuff - we ignore these + * @port: port to operate on + * @termios: New settings + * @termios: Old + * + */ +static void +snp_set_termios(struct uart_port *port, struct termios *termios, + struct termios *old) +{ +} + +/** + * snp_request_port - allocate resources for port - ignored by us + * @port: port to operate on + * + */ +static int +snp_request_port(struct uart_port *port) +{ + return 0; +} + +/** + * snp_config_port - allocate resources, set up - we ignore, we're always on + * @port: Port to operate on + * @flags: flags used for port setup + * + */ +static void +snp_config_port(struct uart_port *port, int flags) +{ +} + +/* Associate the uart functions above - given to serial core */ + +static struct uart_ops sn_console_ops = { + .tx_empty = snp_tx_empty, + .set_mctrl = snp_set_mctrl, + .get_mctrl = snp_get_mctrl, + .stop_tx = snp_stop_tx, + .start_tx = snp_start_tx, + .stop_rx = snp_stop_rx, + .enable_ms = snp_enable_ms, + .break_ctl = snp_break_ctl, + .startup = snp_startup, + .shutdown = snp_shutdown, + .set_termios = snp_set_termios, + .pm = NULL, + .type = snp_type, + .release_port = snp_release_port, + .request_port = snp_request_port, + .config_port = snp_config_port, + .verify_port = NULL, +}; + +/* End of uart struct functions and defines */ + +#ifdef DEBUG + +/** + * sn_debug_printf - close to hardware debugging printf + * @fmt: printf format + * + * This is as "close to the metal" as we can get, used when the driver + * itself may be broken. + * + */ +static int +sn_debug_printf(const char *fmt, ...) +{ + static char printk_buf[1024]; + int printed_len; + va_list args; + + va_start(args, fmt); + printed_len = vsnprintf(printk_buf, sizeof (printk_buf), fmt, args); + + if (!sal_console_port.sc_ops) { + if (IS_RUNNING_ON_SIMULATOR()) + sal_console_port.sc_ops = &sim_ops; + else + sal_console_port.sc_ops = &poll_ops; + + early_sn_setup(); + } + sal_console_port.sc_ops->sal_puts_raw(printk_buf, printed_len); + + va_end(args); + return printed_len; +} +#endif /* DEBUG */ + +/* + * Interrupt handling routines. + */ + + +/** + * sn_receive_chars - Grab characters, pass them to tty layer + * @port: Port to operate on + * @regs: Saved registers (needed by uart_handle_sysrq_char) + * + * Note: If we're not registered with the serial core infrastructure yet, + * we don't try to send characters to it... + * + */ +static void +sn_receive_chars(struct sn_cons_port *port, struct pt_regs *regs) +{ + int ch; + struct tty_struct *tty; + + if (!port) { + printk(KERN_ERR "sn_receive_chars - port NULL so can't receieve\n"); + return; + } + + if (!port->sc_ops) { + printk(KERN_ERR "sn_receive_chars - port->sc_ops NULL so can't receieve\n"); + return; + } + + if (port->sc_port.info) { + /* The serial_core stuffs are initilized, use them */ + tty = port->sc_port.info->tty; + } + else { + /* Not registered yet - can't pass to tty layer. */ + tty = NULL; + } + + while (port->sc_ops->sal_input_pending()) { + ch = port->sc_ops->sal_getc(); + if (ch < 0) { + printk(KERN_ERR "sn_console: An error occured while " + "obtaining data from the console (0x%0x)\n", ch); + break; + } +#if defined(CONFIG_SERIAL_SGI_L1_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) + if (uart_handle_sysrq_char(&port->sc_port, ch, regs)) + continue; +#endif /* CONFIG_SERIAL_SGI_L1_CONSOLE && CONFIG_MAGIC_SYSRQ */ + + /* record the character to pass up to the tty layer */ + if (tty) { + *tty->flip.char_buf_ptr = ch; + *tty->flip.flag_buf_ptr = TTY_NORMAL; + tty->flip.char_buf_ptr++; + tty->flip.count++; + if (tty->flip.count == TTY_FLIPBUF_SIZE) + break; + } + else { + } + port->sc_port.icount.rx++; + } + + if (tty) + tty_flip_buffer_push(tty); +} + +/** + * sn_transmit_chars - grab characters from serial core, send off + * @port: Port to operate on + * @raw: Transmit raw or buffered + * + * Note: If we're early, before we're registered with serial core, the + * writes are going through sn_sal_console_write because that's how + * register_console has been set up. We currently could have asynch + * polls calling this function due to sn_sal_switch_to_asynch but we can + * ignore them until we register with the serial core stuffs. + * + */ +static void +sn_transmit_chars(struct sn_cons_port *port, int raw) +{ + int xmit_count, tail, head, loops, ii; + int result; + char *start; + struct circ_buf *xmit; + + if (!port) + return; + + BUG_ON(!port->sc_is_asynch); + + if (port->sc_port.info) { + /* We're initilized, using serial core infrastructure */ + xmit = &port->sc_port.info->xmit; + } + else { + /* Probably sn_sal_switch_to_asynch has been run but serial core isn't + * initilized yet. Just return. Writes are going through + * sn_sal_console_write (due to register_console) at this time. + */ + return; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(&port->sc_port)) { + /* Nothing to do. */ + return; + } + + head = xmit->head; + tail = xmit->tail; + start = &xmit->buf[tail]; + + /* twice around gets the tail to the end of the buffer and + * then to the head, if needed */ + loops = (head < tail) ? 2 : 1; + + for (ii = 0; ii < loops; ii++) { + xmit_count = (head < tail) ? + (UART_XMIT_SIZE - tail) : (head - tail); + + if (xmit_count > 0) { + if (raw == TRANSMIT_RAW) + result = + port->sc_ops->sal_puts_raw(start, + xmit_count); + else + result = + port->sc_ops->sal_puts(start, xmit_count); +#ifdef DEBUG + if (!result) + DPRINTF("`"); +#endif + if (result > 0) { + xmit_count -= result; + port->sc_port.icount.tx += result; + tail += result; + tail &= UART_XMIT_SIZE - 1; + xmit->tail = tail; + start = &xmit->buf[tail]; + } + } + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&port->sc_port); + + if (uart_circ_empty(xmit)) + snp_stop_tx(&port->sc_port, 0); /* no-op for us */ +} + +/** + * sn_sal_interrupt - Handle console interrupts + * @irq: irq #, useful for debug statements + * @dev_id: our pointer to our port (sn_cons_port which contains the uart port) + * @regs: Saved registers, used by sn_receive_chars for uart_handle_sysrq_char + * + */ +static irqreturn_t +sn_sal_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct sn_cons_port *port = (struct sn_cons_port *) dev_id; + unsigned long flags; + int status = ia64_sn_console_intr_status(); + + if (!port) + return IRQ_NONE; + + spin_lock_irqsave(&port->sc_port.lock, flags); + if (status & SAL_CONSOLE_INTR_RECV) { + sn_receive_chars(port, regs); + } + if (status & SAL_CONSOLE_INTR_XMIT) { + sn_transmit_chars(port, TRANSMIT_BUFFERED); + } + spin_unlock_irqrestore(&port->sc_port.lock, flags); + return IRQ_HANDLED; +} + +/** + * sn_sal_connect_interrupt - Request interrupt, handled by sn_sal_interrupt + * @port: Our sn_cons_port (which contains the uart port) + * + * returns the console irq if interrupt is successfully registered, else 0 + * + */ +static int +sn_sal_connect_interrupt(struct sn_cons_port *port) +{ + if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt, SA_INTERRUPT, + "SAL console driver", port) >= 0) { + return SGI_UART_VECTOR; + } + + printk(KERN_INFO "sn_console: console proceeding in polled mode\n"); + return 0; +} + +/** + * sn_sal_timer_poll - this function handles polled console mode + * @data: A pointer to our sn_cons_port (which contains the uart port) + * + * data is the pointer that init_timer will store for us. This function is + * associated with init_timer to see if there is any console traffic. + * Obviously not used in interrupt mode + * + */ +static void +sn_sal_timer_poll(unsigned long data) +{ + struct sn_cons_port *port = (struct sn_cons_port *) data; + unsigned long flags; + + if (!port) + return; + + if (!port->sc_port.irq) { + spin_lock_irqsave(&port->sc_port.lock, flags); + sn_receive_chars(port, NULL); + sn_transmit_chars(port, TRANSMIT_RAW); + spin_unlock_irqrestore(&port->sc_port.lock, flags); + mod_timer(&port->sc_timer, + jiffies + port->sc_interrupt_timeout); + } +} + +/* + * Boot-time initialization code + */ + +/** + * sn_sal_switch_to_asynch - Switch to async mode (as opposed to synch) + * @port: Our sn_cons_port (which contains the uart port) + * + * So this is used by sn_sal_serial_console_init (early on, before we're + * registered with serial core). It's also used by sn_sal_module_init + * right after we've registered with serial core. The later only happens + * if we didn't already come through here via sn_sal_serial_console_init. + * + */ +static void __init +sn_sal_switch_to_asynch(struct sn_cons_port *port) +{ + unsigned long flags; + + if (!port) + return; + + DPRINTF("sn_console: about to switch to asynchronous console\n"); + + /* without early_printk, we may be invoked late enough to race + * with other cpus doing console IO at this point, however + * console interrupts will never be enabled */ + spin_lock_irqsave(&port->sc_port.lock, flags); + + /* early_printk invocation may have done this for us */ + if (!port->sc_ops) { + if (IS_RUNNING_ON_SIMULATOR()) + port->sc_ops = &sim_ops; + else + port->sc_ops = &poll_ops; + } + + /* we can't turn on the console interrupt (as request_irq + * calls kmalloc, which isn't set up yet), so we rely on a + * timer to poll for input and push data from the console + * buffer. + */ + init_timer(&port->sc_timer); + port->sc_timer.function = sn_sal_timer_poll; + port->sc_timer.data = (unsigned long) port; + + if (IS_RUNNING_ON_SIMULATOR()) + port->sc_interrupt_timeout = 6; + else { + /* 960cps / 16 char FIFO = 60HZ + * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */ + port->sc_interrupt_timeout = + HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS; + } + mod_timer(&port->sc_timer, jiffies + port->sc_interrupt_timeout); + + port->sc_is_asynch = 1; + spin_unlock_irqrestore(&port->sc_port.lock, flags); +} + +/** + * sn_sal_switch_to_interrupts - Switch to interrupt driven mode + * @port: Our sn_cons_port (which contains the uart port) + * + * In sn_sal_module_init, after we're registered with serial core and + * the port is added, this function is called to switch us to interrupt + * mode. We were previously in asynch/polling mode (using init_timer). + * + * We attempt to switch to interrupt mode here by calling + * sn_sal_connect_interrupt. If that works out, we enable receive interrupts. + */ +static void __init +sn_sal_switch_to_interrupts(struct sn_cons_port *port) +{ + int irq; + unsigned long flags; + + if (!port) + return; + + DPRINTF("sn_console: switching to interrupt driven console\n"); + + spin_lock_irqsave(&port->sc_port.lock, flags); + + irq = sn_sal_connect_interrupt(port); + + if (irq) { + port->sc_port.irq = irq; + port->sc_ops = &intr_ops; + + /* turn on receive interrupts */ + ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV); + } + spin_unlock_irqrestore(&port->sc_port.lock, flags); +} + +/* + * Kernel console definitions + */ + +#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE +static void sn_sal_console_write(struct console *, const char *, unsigned); +static int __init sn_sal_console_setup(struct console *, char *); +extern struct uart_driver sal_console_uart; +extern struct tty_driver *uart_console_device(struct console *, int *); + +static struct console sal_console = { + .name = DEVICE_NAME, + .write = sn_sal_console_write, + .device = uart_console_device, + .setup = sn_sal_console_setup, + .index = -1, /* unspecified */ + .data = &sal_console_uart, +}; + +#define SAL_CONSOLE &sal_console +#else +#define SAL_CONSOLE 0 +#endif /* CONFIG_SERIAL_SGI_L1_CONSOLE */ + +static struct uart_driver sal_console_uart = { + .owner = THIS_MODULE, + .driver_name = "sn_console", + .dev_name = DEVICE_NAME, + .major = 0, /* major/minor set at registration time per USE_DYNAMIC_MINOR */ + .minor = 0, + .nr = 1, /* one port */ + .cons = SAL_CONSOLE, +}; + +/** + * sn_sal_module_init - When the kernel loads us, get us rolling w/ serial core + * + * Before this is called, we've been printing kernel messages in a special + * early mode not making use of the serial core infrastructure. When our + * driver is loaded for real, we register the driver and port with serial + * core and try to enable interrupt driven mode. + * + */ +static int __init +sn_sal_module_init(void) +{ + int retval; + + printk(KERN_INFO "sn_console: Console driver init\n"); + + if (!ia64_platform_is("sn2")) + return -ENODEV; + + if (USE_DYNAMIC_MINOR == 1) { + misc.minor = MISC_DYNAMIC_MINOR; + misc.name = DEVICE_NAME_DYNAMIC; + retval = misc_register(&misc); + if (retval != 0) { + printk("Failed to register console device using misc_register.\n"); + return -ENODEV; + } + sal_console_uart.major = MISC_MAJOR; + sal_console_uart.minor = misc.minor; + } + else { + sal_console_uart.major = DEVICE_MAJOR; + sal_console_uart.minor = DEVICE_MINOR; + } + + /* We register the driver and the port before switching to interrupts + * or async above so the proper uart structures are populated */ + + if (uart_register_driver(&sal_console_uart) < 0) { + printk("ERROR sn_sal_module_init failed uart_register_driver, line %d\n", + __LINE__); + return -ENODEV; + } + + sal_console_port.sc_port.lock = SPIN_LOCK_UNLOCKED; + + /* Setup the port struct with the minimum needed */ + sal_console_port.sc_port.membase = (char *)1; /* just needs to be non-zero */ + sal_console_port.sc_port.type = PORT_16550A; + sal_console_port.sc_port.fifosize = SN_SAL_MAX_CHARS; + sal_console_port.sc_port.ops = &sn_console_ops; + sal_console_port.sc_port.line = 0; + + if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) { + /* error - not sure what I'd do - so I'll do nothing */ + printk(KERN_ERR "%s: unable to add port\n", __FUNCTION__); + } + + /* when this driver is compiled in, the console initialization + * will have already switched us into asynchronous operation + * before we get here through the module initcalls */ + if (!sal_console_port.sc_is_asynch) { + sn_sal_switch_to_asynch(&sal_console_port); + } + + /* at this point (module_init) we can try to turn on interrupts */ + if (!IS_RUNNING_ON_SIMULATOR()) { + sn_sal_switch_to_interrupts(&sal_console_port); + } + return 0; +} + +/** + * sn_sal_module_exit - When we're unloaded, remove the driver/port + * + */ +static void __exit +sn_sal_module_exit(void) +{ + del_timer_sync(&sal_console_port.sc_timer); + uart_remove_one_port(&sal_console_uart, &sal_console_port.sc_port); + uart_unregister_driver(&sal_console_uart); + misc_deregister(&misc); +} + +module_init(sn_sal_module_init); +module_exit(sn_sal_module_exit); + +#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE + +/** + * puts_raw_fixed - sn_sal_console_write helper for adding \r's as required + * @puts_raw : puts function to do the writing + * @s: input string + * @count: length + * + * We need a \r ahead of every \n for direct writes through + * ia64_sn_console_putb (what sal_puts_raw below actually does). + * + */ + +static void puts_raw_fixed(int (*puts_raw) (const char *s, int len), const char *s, int count) +{ + const char *s1; + + /* Output '\r' before each '\n' */ + while ((s1 = memchr(s, '\n', count)) != NULL) { + puts_raw(s, s1 - s); + puts_raw("\r\n", 2); + count -= s1 + 1 - s; + s = s1 + 1; + } + puts_raw(s, count); +} + +/** + * sn_sal_console_write - Print statements before serial core available + * @console: Console to operate on - we ignore since we have just one + * @s: String to send + * @count: length + * + * This is referenced in the console struct. It is used for early + * console printing before we register with serial core and for things + * such as kdb. The console_lock must be held when we get here. + * + * This function has some code for trying to print output even if the lock + * is held. We try to cover the case where a lock holder could have died. + * We don't use this special case code if we're not registered with serial + * core yet. After we're registered with serial core, the only time this + * function would be used is for high level kernel output like magic sys req, + * kdb, and printk's. + */ +static void +sn_sal_console_write(struct console *co, const char *s, unsigned count) +{ + unsigned long flags = 0; + struct sn_cons_port *port = &sal_console_port; +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) + static int stole_lock = 0; +#endif + + BUG_ON(!port->sc_is_asynch); + + /* We can't look at the xmit buffer if we're not registered with serial core + * yet. So only do the fancy recovery after registering + */ + if (port->sc_port.info) { + + /* somebody really wants this output, might be an + * oops, kdb, panic, etc. make sure they get it. */ +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) + if (spin_is_locked(&port->sc_port.lock)) { + int lhead = port->sc_port.info->xmit.head; + int ltail = port->sc_port.info->xmit.tail; + int counter, got_lock = 0; + + /* + * We attempt to determine if someone has died with the + * lock. We wait ~20 secs after the head and tail ptrs + * stop moving and assume the lock holder is not functional + * and plow ahead. If the lock is freed within the time out + * period we re-get the lock and go ahead normally. We also + * remember if we have plowed ahead so that we don't have + * to wait out the time out period again - the asumption + * is that we will time out again. + */ + + for (counter = 0; counter < 150; mdelay(125), counter++) { + if (!spin_is_locked(&port->sc_port.lock) || stole_lock) { + if (!stole_lock) { + spin_lock_irqsave(&port->sc_port.lock, flags); + got_lock = 1; + } + break; + } + else { + /* still locked */ + if ((lhead != port->sc_port.info->xmit.head) || (ltail != port->sc_port.info->xmit.tail)) { + lhead = port->sc_port.info->xmit.head; + ltail = port->sc_port.info->xmit.tail; + counter = 0; + } + } + } + /* flush anything in the serial core xmit buffer, raw */ + sn_transmit_chars(port, 1); + if (got_lock) { + spin_unlock_irqrestore(&port->sc_port.lock, flags); + stole_lock = 0; + } + else { + /* fell thru */ + stole_lock = 1; + } + puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); + } + else { + stole_lock = 0; +#endif + spin_lock_irqsave(&port->sc_port.lock, flags); + sn_transmit_chars(port, 1); + spin_unlock_irqrestore(&port->sc_port.lock, flags); + + puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); + } + } + else { + /* Not yet registered with serial core - simple case */ + puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); + } +} + + +/** + * sn_sal_console_setup - Set up console for early printing + * @co: Console to work with + * @options: Options to set + * + * Altix console doesn't do anything with baud rates, etc, anyway. + * + * This isn't required since not providing the setup function in the + * console struct is ok. However, other patches like KDB plop something + * here so providing it is easier. + * + */ +static int __init +sn_sal_console_setup(struct console *co, char *options) +{ + return 0; +} + +/** + * sn_sal_console_write_early - simple early output routine + * @co - console struct + * @s - string to print + * @count - count + * + * Simple function to provide early output, before even + * sn_sal_serial_console_init is called. Referenced in the + * console struct registerd in sn_serial_console_early_setup. + * + */ +static void __init +sn_sal_console_write_early(struct console *co, const char *s, unsigned count) +{ + puts_raw_fixed(sal_console_port.sc_ops->sal_puts_raw, s, count); +} + +/* Used for very early console printing - again, before + * sn_sal_serial_console_init is run */ +static struct console sal_console_early __initdata = { + .name = "sn_sal", + .write = sn_sal_console_write_early, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +/** + * sn_serial_console_early_setup - Sets up early console output support + * + * Register a console early on... This is for output before even + * sn_sal_serial_cosnole_init is called. This function is called from + * setup.c. This allows us to do really early polled writes. When + * sn_sal_serial_console_init is called, this console is unregistered + * and a new one registered. + */ +int __init +sn_serial_console_early_setup(void) +{ + if (!ia64_platform_is("sn2")) + return -1; + + if (IS_RUNNING_ON_SIMULATOR()) + sal_console_port.sc_ops = &sim_ops; + else + sal_console_port.sc_ops = &poll_ops; + + early_sn_setup(); /* Find SAL entry points */ + register_console(&sal_console_early); + + return 0; +} + + +/** + * sn_sal_serial_console_init - Early console output - set up for register + * + * This function is called when regular console init happens. Because we + * support even earlier console output with sn_serial_console_early_setup + * (called from setup.c directly), this function unregisters the really + * early console. + * + * Note: Even if setup.c doesn't register sal_console_early, unregistering + * it here doesn't hurt anything. + * + */ +static int __init +sn_sal_serial_console_init(void) +{ + if (ia64_platform_is("sn2")) { + sn_sal_switch_to_asynch(&sal_console_port); + DPRINTF ("sn_sal_serial_console_init : register console\n"); + register_console(&sal_console); + unregister_console(&sal_console_early); + } + return 0; +} + +console_initcall(sn_sal_serial_console_init); + +#endif /* CONFIG_SERIAL_SGI_L1_CONSOLE */ diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h new file mode 100644 index 000000000..2e4f49a0c --- /dev/null +++ b/drivers/usb/class/cdc-acm.h @@ -0,0 +1,115 @@ +/* + * + * Includes for cdc-acm.c + * + * Mainly take from usbnet's cdc-ether part + * + */ + +/* + * CMSPAR, some architectures can't have space and mark parity. + */ + +#ifndef CMSPAR +#define CMSPAR 0 +#endif + +/* + * Major and minor numbers. + */ + +#define ACM_TTY_MAJOR 166 +#define ACM_TTY_MINORS 32 + +/* + * Requests. + */ + +#define USB_RT_ACM (USB_TYPE_CLASS | USB_RECIP_INTERFACE) + +#define ACM_REQ_COMMAND 0x00 +#define ACM_REQ_RESPONSE 0x01 +#define ACM_REQ_SET_FEATURE 0x02 +#define ACM_REQ_GET_FEATURE 0x03 +#define ACM_REQ_CLEAR_FEATURE 0x04 + +#define ACM_REQ_SET_LINE 0x20 +#define ACM_REQ_GET_LINE 0x21 +#define ACM_REQ_SET_CONTROL 0x22 +#define ACM_REQ_SEND_BREAK 0x23 + +/* + * IRQs. + */ + +#define ACM_IRQ_NETWORK 0x00 +#define ACM_IRQ_LINE_STATE 0x20 + +/* + * Output control lines. + */ + +#define ACM_CTRL_DTR 0x01 +#define ACM_CTRL_RTS 0x02 + +/* + * Input control lines and line errors. + */ + +#define ACM_CTRL_DCD 0x01 +#define ACM_CTRL_DSR 0x02 +#define ACM_CTRL_BRK 0x04 +#define ACM_CTRL_RI 0x08 + +#define ACM_CTRL_FRAMING 0x10 +#define ACM_CTRL_PARITY 0x20 +#define ACM_CTRL_OVERRUN 0x40 + +/* + * Line speed and caracter encoding. + */ + +struct acm_line { + __u32 speed; + __u8 stopbits; + __u8 parity; + __u8 databits; +} __attribute__ ((packed)); + +/* + * Internal driver structures. + */ + +struct acm { + struct usb_device *dev; /* the corresponding usb device */ + struct usb_interface *control; /* control interface */ + struct usb_interface *data; /* data interface */ + struct tty_struct *tty; /* the corresponding tty */ + struct urb *ctrlurb, *readurb, *writeurb; /* urbs */ + struct acm_line line; /* line coding (bits, stop, parity) */ + struct work_struct work; /* work queue entry for line discipline waking up */ + struct tasklet_struct bh; /* rx processing */ + unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ + unsigned int ctrlout; /* output control lines (DTR, RTS) */ + unsigned int writesize; /* max packet size for the output bulk endpoint */ + unsigned int used; /* someone has this acm's device open */ + unsigned int minor; /* acm minor number */ + unsigned char throttle; /* throttled by tty layer */ + unsigned char clocal; /* termios CLOCAL */ + unsigned char ready_for_write; /* write urb can be used */ +}; + +/* "Union Functional Descriptor" from CDC spec 5.2.3.X */ +struct union_desc { + u8 bLength; + u8 bDescriptorType; + u8 bDescriptorSubType; + + u8 bMasterInterface0; + u8 bSlaveInterface0; + /* ... and there could be other slave interfaces */ +} __attribute__ ((packed)); + +#define CDC_UNION_TYPE 0x06 +#define CDC_DATA_INTERFACE_TYPE 0x0a + diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c new file mode 100644 index 000000000..38c64f656 --- /dev/null +++ b/drivers/usb/core/sysfs.c @@ -0,0 +1,229 @@ +/* + * drivers/usb/core/sysfs.c + * + * (C) Copyright 2002 David Brownell + * (C) Copyright 2002 Greg Kroah-Hartman + * (C) Copyright 2002 IBM Corp. + * + * All of the sysfs file attributes for usb devices and interfaces. + * + */ + + +#include +#include + +#ifdef CONFIG_USB_DEBUG + #define DEBUG +#else + #undef DEBUG +#endif +#include + +#include "usb.h" + +/* Active configuration fields */ +#define usb_actconfig_show(field, multiplier, format_string) \ +static ssize_t show_##field (struct device *dev, char *buf) \ +{ \ + struct usb_device *udev; \ + \ + udev = to_usb_device (dev); \ + if (udev->actconfig) \ + return sprintf (buf, format_string, \ + udev->actconfig->desc.field * multiplier); \ + else \ + return 0; \ +} \ + +#define usb_actconfig_attr(field, multiplier, format_string) \ +usb_actconfig_show(field, multiplier, format_string) \ +static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); + +usb_actconfig_attr (bNumInterfaces, 1, "%2d\n") +usb_actconfig_attr (bmAttributes, 1, "%2x\n") +usb_actconfig_attr (bMaxPower, 2, "%3dmA\n") + +/* configuration value is always present, and r/w */ +usb_actconfig_show(bConfigurationValue, 1, "%u\n"); + +static ssize_t +set_bConfigurationValue (struct device *dev, const char *buf, size_t count) +{ + struct usb_device *udev = udev = to_usb_device (dev); + int config, value; + + if (sscanf (buf, "%u", &config) != 1 || config > 255) + return -EINVAL; + down(&udev->serialize); + value = usb_set_configuration (udev, config); + up(&udev->serialize); + return (value < 0) ? value : count; +} + +static DEVICE_ATTR(bConfigurationValue, S_IRUGO | S_IWUSR, + show_bConfigurationValue, set_bConfigurationValue); + +/* String fields */ +#define usb_string_attr(name, field) \ +static ssize_t show_##name(struct device *dev, char *buf) \ +{ \ + struct usb_device *udev; \ + int len; \ + \ + udev = to_usb_device (dev); \ + len = usb_string(udev, udev->descriptor.field, buf, PAGE_SIZE); \ + if (len < 0) \ + return 0; \ + buf[len] = '\n'; \ + buf[len+1] = 0; \ + return len+1; \ +} \ +static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL); + +usb_string_attr(product, iProduct); +usb_string_attr(manufacturer, iManufacturer); +usb_string_attr(serial, iSerialNumber); + +static ssize_t +show_speed (struct device *dev, char *buf) +{ + struct usb_device *udev; + char *speed; + + udev = to_usb_device (dev); + + switch (udev->speed) { + case USB_SPEED_LOW: + speed = "1.5"; + break; + case USB_SPEED_UNKNOWN: + case USB_SPEED_FULL: + speed = "12"; + break; + case USB_SPEED_HIGH: + speed = "480"; + break; + default: + speed = "unknown"; + } + return sprintf (buf, "%s\n", speed); +} +static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL); + +static ssize_t +show_devnum (struct device *dev, char *buf) +{ + struct usb_device *udev; + + udev = to_usb_device (dev); + return sprintf (buf, "%d\n", udev->devnum); +} +static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL); + +static ssize_t +show_version (struct device *dev, char *buf) +{ + struct usb_device *udev; + + udev = to_usb_device (dev); + return sprintf (buf, "%2x.%02x\n", udev->descriptor.bcdUSB >> 8, + udev->descriptor.bcdUSB & 0xff); +} +static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); + +static ssize_t +show_maxchild (struct device *dev, char *buf) +{ + struct usb_device *udev; + + udev = to_usb_device (dev); + return sprintf (buf, "%d\n", udev->maxchild); +} +static DEVICE_ATTR(maxchild, S_IRUGO, show_maxchild, NULL); + +/* Descriptor fields */ +#define usb_descriptor_attr(field, format_string) \ +static ssize_t \ +show_##field (struct device *dev, char *buf) \ +{ \ + struct usb_device *udev; \ + \ + udev = to_usb_device (dev); \ + return sprintf (buf, format_string, udev->descriptor.field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); + +usb_descriptor_attr (idVendor, "%04x\n") +usb_descriptor_attr (idProduct, "%04x\n") +usb_descriptor_attr (bcdDevice, "%04x\n") +usb_descriptor_attr (bDeviceClass, "%02x\n") +usb_descriptor_attr (bDeviceSubClass, "%02x\n") +usb_descriptor_attr (bDeviceProtocol, "%02x\n") +usb_descriptor_attr (bNumConfigurations, "%d\n") + + +void usb_create_sysfs_dev_files (struct usb_device *udev) +{ + struct device *dev = &udev->dev; + + /* current configuration's attributes */ + device_create_file (dev, &dev_attr_bNumInterfaces); + device_create_file (dev, &dev_attr_bConfigurationValue); + device_create_file (dev, &dev_attr_bmAttributes); + device_create_file (dev, &dev_attr_bMaxPower); + + /* device attributes */ + device_create_file (dev, &dev_attr_idVendor); + device_create_file (dev, &dev_attr_idProduct); + device_create_file (dev, &dev_attr_bcdDevice); + device_create_file (dev, &dev_attr_bDeviceClass); + device_create_file (dev, &dev_attr_bDeviceSubClass); + device_create_file (dev, &dev_attr_bDeviceProtocol); + device_create_file (dev, &dev_attr_bNumConfigurations); + + /* speed varies depending on how you connect the device */ + device_create_file (dev, &dev_attr_speed); + // FIXME iff there are other speed configs, show how many + + if (udev->descriptor.iManufacturer) + device_create_file (dev, &dev_attr_manufacturer); + if (udev->descriptor.iProduct) + device_create_file (dev, &dev_attr_product); + if (udev->descriptor.iSerialNumber) + device_create_file (dev, &dev_attr_serial); + + device_create_file (dev, &dev_attr_devnum); + device_create_file (dev, &dev_attr_version); + device_create_file (dev, &dev_attr_maxchild); +} + +/* Interface fields */ +#define usb_intf_attr(field, format_string) \ +static ssize_t \ +show_##field (struct device *dev, char *buf) \ +{ \ + struct usb_interface *intf = to_usb_interface (dev); \ + \ + return sprintf (buf, format_string, intf->cur_altsetting->desc.field); \ +} \ +static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL); + +usb_intf_attr (bInterfaceNumber, "%02x\n") +usb_intf_attr (bAlternateSetting, "%2d\n") +usb_intf_attr (bNumEndpoints, "%02x\n") +usb_intf_attr (bInterfaceClass, "%02x\n") +usb_intf_attr (bInterfaceSubClass, "%02x\n") +usb_intf_attr (bInterfaceProtocol, "%02x\n") +usb_intf_attr (iInterface, "%02x\n") + +void usb_create_sysfs_intf_files (struct usb_interface *intf) +{ + device_create_file (&intf->dev, &dev_attr_bInterfaceNumber); + device_create_file (&intf->dev, &dev_attr_bAlternateSetting); + device_create_file (&intf->dev, &dev_attr_bNumEndpoints); + device_create_file (&intf->dev, &dev_attr_bInterfaceClass); + device_create_file (&intf->dev, &dev_attr_bInterfaceSubClass); + device_create_file (&intf->dev, &dev_attr_bInterfaceProtocol); + device_create_file (&intf->dev, &dev_attr_iInterface); +} diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c new file mode 100644 index 000000000..4e11a8eae --- /dev/null +++ b/drivers/usb/host/ohci-lh7a404.c @@ -0,0 +1,385 @@ +/* + * OHCI HCD (Host Controller Driver) for USB. + * + * (C) Copyright 1999 Roman Weissgaerber + * (C) Copyright 2000-2002 David Brownell + * (C) Copyright 2002 Hewlett-Packard Company + * + * Bus Glue for Sharp LH7A404 + * + * Written by Christopher Hoover + * Based on fragments of previous driver by Rusell King et al. + * + * Modified for LH7A404 from ohci-sa1111.c + * by Durgesh Pattamatta + * + * This file is licenced under the GPL. + */ + +#include +#include +#include + + +extern int usb_disabled(void); + +/*-------------------------------------------------------------------------*/ + +static void lh7a404_start_hc(struct platform_device *dev) +{ + printk(KERN_DEBUG __FILE__ + ": starting LH7A404 OHCI USB Controller\n"); + + /* + * Now, carefully enable the USB clock, and take + * the USB host controller out of reset. + */ + CSC_PWRCNT |= CSC_PWRCNT_USBH_EN; /* Enable clock */ + udelay(1000); + USBH_CMDSTATUS = OHCI_HCR; + + printk(KERN_DEBUG __FILE__ + ": Clock to USB host has been enabled \n"); +} + +static void lh7a404_stop_hc(struct platform_device *dev) +{ + printk(KERN_DEBUG __FILE__ + ": stopping LH7A404 OHCI USB Controller\n"); + + CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */ +} + + +/*-------------------------------------------------------------------------*/ + + +static irqreturn_t usb_hcd_lh7a404_hcim_irq (int irq, void *__hcd, + struct pt_regs * r) +{ + struct usb_hcd *hcd = __hcd; + + return usb_hcd_irq(irq, hcd, r); +} + +/*-------------------------------------------------------------------------*/ + +void usb_hcd_lh7a404_remove (struct usb_hcd *, struct platform_device *); + +/* configure so an HC device and id are always provided */ +/* always called with process context; sleeping is OK */ + + +/** + * usb_hcd_lh7a404_probe - initialize LH7A404-based HCDs + * Context: !in_interrupt() + * + * Allocates basic resources for this USB host controller, and + * then invokes the start() method for the HCD associated with it + * through the hotplug entry's driver_data. + * + */ +int usb_hcd_lh7a404_probe (const struct hc_driver *driver, + struct usb_hcd **hcd_out, + struct platform_device *dev) +{ + int retval; + struct usb_hcd *hcd = 0; + + unsigned int *addr = NULL; + + if (!request_mem_region(dev->resource[0].start, + dev->resource[0].end + - dev->resource[0].start + 1, hcd_name)) { + pr_debug("request_mem_region failed"); + return -EBUSY; + } + + + lh7a404_start_hc(dev); + + addr = ioremap(dev->resource[0].start, + dev->resource[0].end + - dev->resource[0].start + 1); + if (!addr) { + pr_debug("ioremap failed"); + retval = -ENOMEM; + goto err1; + } + + + hcd = driver->hcd_alloc (); + if (hcd == NULL){ + pr_debug ("hcd_alloc failed"); + retval = -ENOMEM; + goto err1; + } + + if(dev->resource[1].flags != IORESOURCE_IRQ){ + pr_debug ("resource[1] is not IORESOURCE_IRQ"); + retval = -ENOMEM; + goto err1; + } + + hcd->driver = (struct hc_driver *) driver; + hcd->description = driver->description; + hcd->irq = dev->resource[1].start; + hcd->regs = addr; + hcd->self.controller = &dev->dev; + + retval = hcd_buffer_create (hcd); + if (retval != 0) { + pr_debug ("pool alloc fail"); + goto err1; + } + + retval = request_irq (hcd->irq, usb_hcd_lh7a404_hcim_irq, SA_INTERRUPT, + hcd->description, hcd); + if (retval != 0) { + pr_debug("request_irq failed"); + retval = -EBUSY; + goto err2; + } + + pr_debug ("%s (LH7A404) at 0x%p, irq %d", + hcd->description, hcd->regs, hcd->irq); + + usb_bus_init (&hcd->self); + hcd->self.op = &usb_hcd_operations; + hcd->self.hcpriv = (void *) hcd; + hcd->self.bus_name = "lh7a404"; + hcd->product_desc = "LH7A404 OHCI"; + + INIT_LIST_HEAD (&hcd->dev_list); + + usb_register_bus (&hcd->self); + + if ((retval = driver->start (hcd)) < 0) + { + usb_hcd_lh7a404_remove(hcd, dev); + return retval; + } + + *hcd_out = hcd; + return 0; + + err2: + hcd_buffer_destroy (hcd); + if (hcd) + driver->hcd_free(hcd); + err1: + lh7a404_stop_hc(dev); + release_mem_region(dev->resource[0].start, + dev->resource[0].end + - dev->resource[0].start + 1); + return retval; +} + + +/* may be called without controller electrically present */ +/* may be called with controller, bus, and devices active */ + +/** + * usb_hcd_lh7a404_remove - shutdown processing for LH7A404-based HCDs + * @dev: USB Host Controller being removed + * Context: !in_interrupt() + * + * Reverses the effect of usb_hcd_lh7a404_probe(), first invoking + * the HCD's stop() method. It is always called from a thread + * context, normally "rmmod", "apmd", or something similar. + * + */ +void usb_hcd_lh7a404_remove (struct usb_hcd *hcd, struct platform_device *dev) +{ + void *base; + + pr_debug ("remove: %s, state %x", hcd->self.bus_name, hcd->state); + + if (in_interrupt ()) + BUG (); + + hcd->state = USB_STATE_QUIESCING; + + pr_debug ("%s: roothub graceful disconnect", hcd->self.bus_name); + usb_disconnect (&hcd->self.root_hub); + + hcd->driver->stop (hcd); + hcd->state = USB_STATE_HALT; + + free_irq (hcd->irq, hcd); + hcd_buffer_destroy (hcd); + + usb_deregister_bus (&hcd->self); + + base = hcd->regs; + hcd->driver->hcd_free (hcd); + + lh7a404_stop_hc(dev); + release_mem_region(dev->resource[0].start, + dev->resource[0].end + - dev->resource[0].start + 1); +} + +/*-------------------------------------------------------------------------*/ + +static int __devinit +ohci_lh7a404_start (struct usb_hcd *hcd) +{ + struct ohci_hcd *ohci = hcd_to_ohci (hcd); + int ret; + + ohci_dbg (ohci, "ohci_lh7a404_start, ohci:%p", ohci); + + ohci->hcca = dma_alloc_coherent (hcd->self.controller, + sizeof *ohci->hcca, &ohci->hcca_dma, 0); + if (!ohci->hcca) + return -ENOMEM; + + ohci_dbg (ohci, "ohci_lh7a404_start, ohci->hcca:%p", + ohci->hcca); + + memset (ohci->hcca, 0, sizeof (struct ohci_hcca)); + + if ((ret = ohci_mem_init (ohci)) < 0) { + ohci_stop (hcd); + return ret; + } + ohci->regs = hcd->regs; + + if (hc_reset (ohci) < 0) { + ohci_stop (hcd); + return -ENODEV; + } + + if (hc_start (ohci) < 0) { + err ("can't start %s", ohci->hcd.self.bus_name); + ohci_stop (hcd); + return -EBUSY; + } + create_debug_files (ohci); + +#ifdef DEBUG + ohci_dump (ohci, 1); +#endif /*DEBUG*/ + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static const struct hc_driver ohci_lh7a404_hc_driver = { + .description = hcd_name, + + /* + * generic hardware linkage + */ + .irq = ohci_irq, + .flags = HCD_USB11, + + /* + * basic lifecycle operations + */ + .start = ohci_lh7a404_start, +#ifdef CONFIG_PM + /* suspend: ohci_lh7a404_suspend, -- tbd */ + /* resume: ohci_lh7a404_resume, -- tbd */ +#endif /*CONFIG_PM*/ + .stop = ohci_stop, + + /* + * memory lifecycle (except per-request) + */ + .hcd_alloc = ohci_hcd_alloc, + .hcd_free = ohci_hcd_free, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ohci_urb_enqueue, + .urb_dequeue = ohci_urb_dequeue, + .endpoint_disable = ohci_endpoint_disable, + + /* + * scheduling support + */ + .get_frame_number = ohci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ohci_hub_status_data, + .hub_control = ohci_hub_control, +}; + +/*-------------------------------------------------------------------------*/ + +static int ohci_hcd_lh7a404_drv_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct usb_hcd *hcd = NULL; + int ret; + + pr_debug ("In ohci_hcd_lh7a404_drv_probe"); + + if (usb_disabled()) + return -ENODEV; + + ret = usb_hcd_lh7a404_probe(&ohci_lh7a404_hc_driver, &hcd, pdev); + + if (ret == 0) + dev_set_drvdata(dev, hcd); + + return ret; +} + +static int ohci_hcd_lh7a404_drv_remove(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct usb_hcd *hcd = dev_get_drvdata(dev); + + usb_hcd_lh7a404_remove(hcd, pdev); + dev_set_drvdata(dev, NULL); + return 0; +} + /*TBD*/ +/*static int ohci_hcd_lh7a404_drv_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct usb_hcd *hcd = dev_get_drvdata(dev); + + return 0; +} +static int ohci_hcd_lh7a404_drv_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct usb_hcd *hcd = dev_get_drvdata(dev); + + + return 0; +} +*/ + +static struct device_driver ohci_hcd_lh7a404_driver = { + .name = "lh7a404-ohci", + .bus = &platform_bus_type, + .probe = ohci_hcd_lh7a404_drv_probe, + .remove = ohci_hcd_lh7a404_drv_remove, + /*.suspend = ohci_hcd_lh7a404_drv_suspend, */ + /*.resume = ohci_hcd_lh7a404_drv_resume, */ +}; + +static int __init ohci_hcd_lh7a404_init (void) +{ + pr_debug (DRIVER_INFO " (LH7A404)"); + pr_debug ("block sizes: ed %d td %d\n", + sizeof (struct ed), sizeof (struct td)); + + return driver_register(&ohci_hcd_lh7a404_driver); +} + +static void __exit ohci_hcd_lh7a404_cleanup (void) +{ + driver_unregister(&ohci_hcd_lh7a404_driver); +} + +module_init (ohci_hcd_lh7a404_init); +module_exit (ohci_hcd_lh7a404_cleanup); diff --git a/drivers/usb/input/touchkitusb.c b/drivers/usb/input/touchkitusb.c new file mode 100644 index 000000000..4917b042e --- /dev/null +++ b/drivers/usb/input/touchkitusb.c @@ -0,0 +1,310 @@ +/****************************************************************************** + * touchkitusb.c -- Driver for eGalax TouchKit USB Touchscreens + * + * Copyright (C) 2004 by Daniel Ritz + * Copyright (C) by Todd E. Johnson (mtouchusb.c) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Based upon mtouchusb.c + * + *****************************************************************************/ + +//#define DEBUG + +#include +#include +#include +#include +#include +#include + +#if !defined(DEBUG) && defined(CONFIG_USB_DEBUG) +#define DEBUG +#endif +#include + + +#define TOUCHKIT_MIN_XC 0x0 +#define TOUCHKIT_MAX_XC 0x07ff +#define TOUCHKIT_XC_FUZZ 0x0 +#define TOUCHKIT_XC_FLAT 0x0 +#define TOUCHKIT_MIN_YC 0x0 +#define TOUCHKIT_MAX_YC 0x07ff +#define TOUCHKIT_YC_FUZZ 0x0 +#define TOUCHKIT_YC_FLAT 0x0 +#define TOUCHKIT_REPORT_DATA_SIZE 8 + +#define TOUCHKIT_DOWN 0x01 +#define TOUCHKIT_POINT_TOUCH 0x81 +#define TOUCHKIT_POINT_NOTOUCH 0x80 + +#define TOUCHKIT_GET_TOUCHED(dat) ((((dat)[0]) & TOUCHKIT_DOWN) ? 1 : 0) +#define TOUCHKIT_GET_X(dat) (((dat)[3] << 7) | (dat)[4]) +#define TOUCHKIT_GET_Y(dat) (((dat)[1] << 7) | (dat)[2]) + +#define DRIVER_VERSION "v0.1" +#define DRIVER_AUTHOR "Daniel Ritz " +#define DRIVER_DESC "eGalax TouchKit USB HID Touchscreen Driver" + +struct touchkit_usb { + unsigned char *data; + dma_addr_t data_dma; + struct urb *irq; + struct usb_device *udev; + struct input_dev input; + int open; + char name[128]; + char phys[64]; +}; + +static struct usb_device_id touchkit_devices[] = { + {USB_DEVICE(0x3823, 0x0001)}, + {USB_DEVICE(0x0eef, 0x0001)}, + {} +}; + +static void touchkit_irq(struct urb *urb, struct pt_regs *regs) +{ + struct touchkit_usb *touchkit = urb->context; + int retval; + + switch (urb->status) { + case 0: + /* success */ + break; + case -ETIMEDOUT: + /* this urb is timing out */ + dbg("%s - urb timed out - was the device unplugged?", + __FUNCTION__); + return; + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* this urb is terminated, clean up */ + dbg("%s - urb shutting down with status: %d", + __FUNCTION__, urb->status); + return; + default: + dbg("%s - nonzero urb status received: %d", + __FUNCTION__, urb->status); + goto exit; + } + + input_regs(&touchkit->input, regs); + input_report_key(&touchkit->input, BTN_TOUCH, + TOUCHKIT_GET_TOUCHED(touchkit->data)); + input_report_abs(&touchkit->input, ABS_X, + TOUCHKIT_GET_X(touchkit->data)); + input_report_abs(&touchkit->input, ABS_Y, + TOUCHKIT_GET_Y(touchkit->data)); + input_sync(&touchkit->input); + +exit: + retval = usb_submit_urb(urb, GFP_ATOMIC); + if (retval) + err("%s - usb_submit_urb failed with result: %d", + __FUNCTION__, retval); +} + +static int touchkit_open(struct input_dev *input) +{ + struct touchkit_usb *touchkit = input->private; + + if (touchkit->open++) + return 0; + + touchkit->irq->dev = touchkit->udev; + + if (usb_submit_urb(touchkit->irq, GFP_ATOMIC)) { + touchkit->open--; + return -EIO; + } + + return 0; +} + +static void touchkit_close(struct input_dev *input) +{ + struct touchkit_usb *touchkit = input->private; + + if (!--touchkit->open) + usb_unlink_urb(touchkit->irq); +} + +static int touchkit_alloc_buffers(struct usb_device *udev, + struct touchkit_usb *touchkit) +{ + touchkit->data = usb_buffer_alloc(udev, TOUCHKIT_REPORT_DATA_SIZE, + SLAB_ATOMIC, &touchkit->data_dma); + + if (!touchkit->data) + return -1; + + return 0; +} + +static void touchkit_free_buffers(struct usb_device *udev, + struct touchkit_usb *touchkit) +{ + if (touchkit->data) + usb_buffer_free(udev, TOUCHKIT_REPORT_DATA_SIZE, + touchkit->data, touchkit->data_dma); +} + +static int touchkit_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + int ret; + struct touchkit_usb *touchkit; + struct usb_host_interface *interface; + struct usb_endpoint_descriptor *endpoint; + struct usb_device *udev = interface_to_usbdev(intf); + char path[64]; + char *buf; + + interface = intf->cur_altsetting; + endpoint = &interface->endpoint[0].desc; + + touchkit = kmalloc(sizeof(struct touchkit_usb), GFP_KERNEL); + if (!touchkit) + return -ENOMEM; + + memset(touchkit, 0, sizeof(struct touchkit_usb)); + touchkit->udev = udev; + + if (touchkit_alloc_buffers(udev, touchkit)) { + ret = -ENOMEM; + goto out_free; + } + + touchkit->input.private = touchkit; + touchkit->input.open = touchkit_open; + touchkit->input.close = touchkit_close; + + usb_make_path(udev, path, 64); + sprintf(touchkit->phys, "%s/input0", path); + + touchkit->input.name = touchkit->name; + touchkit->input.phys = touchkit->phys; + touchkit->input.id.bustype = BUS_USB; + touchkit->input.id.vendor = udev->descriptor.idVendor; + touchkit->input.id.product = udev->descriptor.idProduct; + touchkit->input.id.version = udev->descriptor.bcdDevice; + touchkit->input.dev = &intf->dev; + + touchkit->input.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS); + touchkit->input.absbit[0] = BIT(ABS_X) | BIT(ABS_Y); + touchkit->input.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH); + + /* Used to Scale Compensated Data */ + touchkit->input.absmin[ABS_X] = TOUCHKIT_MIN_XC; + touchkit->input.absmax[ABS_X] = TOUCHKIT_MAX_XC; + touchkit->input.absfuzz[ABS_X] = TOUCHKIT_XC_FUZZ; + touchkit->input.absflat[ABS_X] = TOUCHKIT_XC_FLAT; + touchkit->input.absmin[ABS_Y] = TOUCHKIT_MIN_YC; + touchkit->input.absmax[ABS_Y] = TOUCHKIT_MAX_YC; + touchkit->input.absfuzz[ABS_Y] = TOUCHKIT_YC_FUZZ; + touchkit->input.absflat[ABS_Y] = TOUCHKIT_YC_FLAT; + + buf = kmalloc(63, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out_free_buffers; + } + + if (udev->descriptor.iManufacturer && + usb_string(udev, udev->descriptor.iManufacturer, buf, 63) > 0) + strcat(touchkit->name, buf); + if (udev->descriptor.iProduct && + usb_string(udev, udev->descriptor.iProduct, buf, 63) > 0) + sprintf(touchkit->name, "%s %s", touchkit->name, buf); + + if (!strlen(touchkit->name)) + sprintf(touchkit->name, "USB Touchscreen %04x:%04x", + touchkit->input.id.vendor, touchkit->input.id.product); + + kfree(buf); + + touchkit->irq = usb_alloc_urb(0, GFP_KERNEL); + if (!touchkit->irq) { + dbg("%s - usb_alloc_urb failed: touchkit->irq", __FUNCTION__); + ret = -ENOMEM; + goto out_free_buffers; + } + + usb_fill_int_urb(touchkit->irq, touchkit->udev, + usb_rcvintpipe(touchkit->udev, 0x81), + touchkit->data, TOUCHKIT_REPORT_DATA_SIZE, + touchkit_irq, touchkit, endpoint->bInterval); + + input_register_device(&touchkit->input); + + printk(KERN_INFO "input: %s on %s\n", touchkit->name, path); + usb_set_intfdata(intf, touchkit); + + return 0; + +out_free_buffers: + touchkit_free_buffers(udev, touchkit); +out_free: + kfree(touchkit); + return ret; +} + +static void touchkit_disconnect(struct usb_interface *intf) +{ + struct touchkit_usb *touchkit = usb_get_intfdata(intf); + + dbg("%s - called", __FUNCTION__); + + if (!touchkit) + return; + + dbg("%s - touchkit is initialized, cleaning up", __FUNCTION__); + usb_set_intfdata(intf, NULL); + input_unregister_device(&touchkit->input); + usb_unlink_urb(touchkit->irq); + usb_free_urb(touchkit->irq); + touchkit_free_buffers(interface_to_usbdev(intf), touchkit); + kfree(touchkit); +} + +MODULE_DEVICE_TABLE(usb, touchkit_devices); + +static struct usb_driver touchkit_driver = { + .owner = THIS_MODULE, + .name = "touchkitusb", + .probe = touchkit_probe, + .disconnect = touchkit_disconnect, + .id_table = touchkit_devices, +}; + +static int __init touchkit_init(void) +{ + return usb_register(&touchkit_driver); +} + +static void __exit touchkit_cleanup(void) +{ + usb_deregister(&touchkit_driver); +} + +module_init(touchkit_init); +module_exit(touchkit_cleanup); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/media/sn9c102.h b/drivers/usb/media/sn9c102.h new file mode 100644 index 000000000..fc9cb47e6 --- /dev/null +++ b/drivers/usb/media/sn9c102.h @@ -0,0 +1,181 @@ +/*************************************************************************** + * V4L2 driver for SN9C10[12] PC Camera Controllers * + * * + * Copyright (C) 2004 by Luca Risolia * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program; if not, write to the Free Software * + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * + ***************************************************************************/ + +#ifndef _SN9C102_H_ +#define _SN9C102_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sn9c102_sensor.h" + +/*****************************************************************************/ + +#define SN9C102_DEBUG +#define SN9C102_DEBUG_LEVEL 2 +#define SN9C102_MAX_DEVICES 64 +#define SN9C102_MAX_FRAMES 32 +#define SN9C102_URBS 2 +#define SN9C102_ISO_PACKETS 7 +#define SN9C102_ALTERNATE_SETTING 8 +#define SN9C102_CTRL_TIMEOUT 10*HZ + +/*****************************************************************************/ + +#define SN9C102_MODULE_NAME "V4L2 driver for SN9C10[12] PC Camera Controllers" +#define SN9C102_MODULE_AUTHOR "(C) 2004 Luca Risolia" +#define SN9C102_AUTHOR_EMAIL "" +#define SN9C102_MODULE_LICENSE "GPL" +#define SN9C102_MODULE_VERSION "1:1.01-beta" +#define SN9C102_MODULE_VERSION_CODE KERNEL_VERSION(1, 0, 1) + +SN9C102_ID_TABLE; +SN9C102_SENSOR_TABLE; + +enum sn9c102_frame_state { + F_UNUSED, + F_QUEUED, + F_GRABBING, + F_DONE, + F_ERROR, +}; + +struct sn9c102_frame_t { + void* bufmem; + struct v4l2_buffer buf; + enum sn9c102_frame_state state; + struct list_head frame; + unsigned long vma_use_count; +}; + +enum sn9c102_dev_state { + DEV_INITIALIZED = 0x01, + DEV_DISCONNECTED = 0x02, + DEV_MISCONFIGURED = 0x04, +}; + +enum sn9c102_io_method { + IO_NONE, + IO_READ, + IO_MMAP, +}; + +enum sn9c102_stream_state { + STREAM_OFF, + STREAM_INTERRUPT, + STREAM_ON, +}; + +struct sn9c102_sysfs_attr { + u8 reg, val, i2c_reg, i2c_val; +}; + +static DECLARE_MUTEX(sn9c102_sysfs_lock); +static DECLARE_RWSEM(sn9c102_disconnect); + +struct sn9c102_device { + struct device dev; + + struct video_device* v4ldev; + + struct sn9c102_sensor* sensor; + + struct usb_device* usbdev; + struct urb* urb[SN9C102_URBS]; + void* transfer_buffer[SN9C102_URBS]; + u8* control_buffer; + + struct sn9c102_frame_t *frame_current, frame[SN9C102_MAX_FRAMES]; + struct list_head inqueue, outqueue; + u32 frame_count, nbuffers; + + enum sn9c102_io_method io; + enum sn9c102_stream_state stream; + + struct sn9c102_sysfs_attr sysfs; + u16 reg[32]; + + enum sn9c102_dev_state state; + u8 users; + + struct semaphore dev_sem, fileop_sem; + spinlock_t queue_lock; + wait_queue_head_t open, wait_frame, wait_stream; +}; + +/*****************************************************************************/ + +void +sn9c102_attach_sensor(struct sn9c102_device* cam, + struct sn9c102_sensor* sensor) +{ + cam->sensor = sensor; + cam->sensor->dev = &cam->dev; + cam->sensor->usbdev = cam->usbdev; +} + +/*****************************************************************************/ + +#undef DBG +#undef KDBG +#ifdef SN9C102_DEBUG +# define DBG(level, fmt, args...) \ +{ \ + if (debug >= (level)) { \ + if ((level) == 1) \ + dev_err(&cam->dev, fmt "\n", ## args); \ + else if ((level) == 2) \ + dev_info(&cam->dev, fmt "\n", ## args); \ + else if ((level) >= 3) \ + dev_info(&cam->dev, "[%s:%d] " fmt "\n", \ + __FUNCTION__, __LINE__ , ## args); \ + } \ +} +# define KDBG(level, fmt, args...) \ +{ \ + if (debug >= (level)) { \ + if ((level) == 1 || (level) == 2) \ + pr_info("sn9c102: " fmt "\n", ## args); \ + else if ((level) == 3) \ + pr_debug("sn9c102: [%s:%d] " fmt "\n", __FUNCTION__, \ + __LINE__ , ## args); \ + } \ +} +#else +# define KDBG(level, fmt, args...) do {;} while(0); +# define DBG(level, fmt, args...) do {;} while(0); +#endif + +#undef PDBG +#define PDBG(fmt, args...) \ +dev_info(&cam->dev, "[%s:%d] " fmt "\n", __FUNCTION__, __LINE__ , ## args); + +#undef PDBGG +#define PDBGG(fmt, args...) do {;} while(0); /* placeholder */ + +#endif /* _SN9C102_H_ */ diff --git a/drivers/usb/media/sn9c102_core.c b/drivers/usb/media/sn9c102_core.c new file mode 100644 index 000000000..bcd9fbd3b --- /dev/null +++ b/drivers/usb/media/sn9c102_core.c @@ -0,0 +1,2439 @@ +/*************************************************************************** + * V4L2 driver for SN9C10[12] PC Camera Controllers * + * * + * Copyright (C) 2004 by Luca Risolia * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program; if not, write to the Free Software * + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * + ***************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sn9c102.h" + +/*****************************************************************************/ + +MODULE_DEVICE_TABLE(usb, sn9c102_id_table); + +MODULE_AUTHOR(SN9C102_MODULE_AUTHOR " " SN9C102_AUTHOR_EMAIL); +MODULE_DESCRIPTION(SN9C102_MODULE_NAME); +MODULE_VERSION(SN9C102_MODULE_VERSION); +MODULE_LICENSE(SN9C102_MODULE_LICENSE); + +static short video_nr[] = {[0 ... SN9C102_MAX_DEVICES-1] = -1}; +static unsigned int nv; +module_param_array(video_nr, short, nv, 0444); +MODULE_PARM_DESC(video_nr, + "\n<-1|n[,...]> Specify V4L2 minor mode number." + "\n -1 = use next available (default)" + "\n n = use minor number n (integer >= 0)" + "\nYou can specify up to "__MODULE_STRING(SN9C102_MAX_DEVICES) + " cameras this way." + "\nFor example:" + "\nvideo_nr=-1,2,-1 would assign minor number 2 to" + "\nthe second camera and use auto for the first" + "\none and for every other camera." + "\n"); + +#ifdef SN9C102_DEBUG +static unsigned short debug = SN9C102_DEBUG_LEVEL; +module_param(debug, ushort, 0644); +MODULE_PARM_DESC(debug, + "\n Debugging information level, from 0 to 3:" + "\n0 = none (use carefully)" + "\n1 = critical errors" + "\n2 = significant informations" + "\n3 = more verbose messages" + "\nLevel 3 is useful for testing only, when only " + "one device is used." + "\nDefault value is "__MODULE_STRING(SN9C102_DEBUG_LEVEL)"." + "\n"); +#endif + +/*****************************************************************************/ + +typedef char sn9c102_sof_header_t[7]; +typedef char sn9c102_eof_header_t[4]; + +static sn9c102_sof_header_t sn9c102_sof_header[] = { + {0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96, 0x00}, + {0xff, 0xff, 0x00, 0xc4, 0xc4, 0x96, 0x01}, +}; + +/* Number of random bytes that complete the SOF above headers */ +#define SN9C102_SOFLEN 5 + +static sn9c102_eof_header_t sn9c102_eof_header[] = { + {0x00, 0x00, 0x00, 0x00}, + {0x40, 0x00, 0x00, 0x00}, + {0x80, 0x00, 0x00, 0x00}, + {0xc0, 0x00, 0x00, 0x00}, +}; + +/*****************************************************************************/ + +static inline unsigned long kvirt_to_pa(unsigned long adr) +{ + unsigned long kva, ret; + + kva = (unsigned long)page_address(vmalloc_to_page((void *)adr)); + kva |= adr & (PAGE_SIZE-1); + ret = __pa(kva); + return ret; +} + + +static void* rvmalloc(size_t size) +{ + void* mem; + unsigned long adr; + + size = PAGE_ALIGN(size); + + mem = vmalloc_32((unsigned long)size); + if (!mem) + return NULL; + + memset(mem, 0, size); + + adr = (unsigned long)mem; + while (size > 0) { + SetPageReserved(vmalloc_to_page((void *)adr)); + adr += PAGE_SIZE; + size -= PAGE_SIZE; + } + + return mem; +} + + +static void rvfree(void* mem, size_t size) +{ + unsigned long adr; + + if (!mem) + return; + + size = PAGE_ALIGN(size); + + adr = (unsigned long)mem; + while (size > 0) { + ClearPageReserved(vmalloc_to_page((void *)adr)); + adr += PAGE_SIZE; + size -= PAGE_SIZE; + } + + vfree(mem); +} + + +static u32 sn9c102_request_buffers(struct sn9c102_device* cam, u32 count) +{ + struct v4l2_pix_format* p = &(cam->sensor->pix_format); + const size_t imagesize = (p->width * p->height * p->priv)/8; + void* buff = NULL; + u32 i; + + if (count > SN9C102_MAX_FRAMES) + count = SN9C102_MAX_FRAMES; + + cam->nbuffers = count; + while (cam->nbuffers > 0) { + if ((buff = rvmalloc(cam->nbuffers * imagesize))) + break; + cam->nbuffers--; + } + + for (i = 0; i < cam->nbuffers; i++) { + cam->frame[i].bufmem = buff + i*imagesize; + cam->frame[i].buf.index = i; + cam->frame[i].buf.m.offset = i*imagesize; + cam->frame[i].buf.length = imagesize; + cam->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + cam->frame[i].buf.sequence = 0; + cam->frame[i].buf.field = V4L2_FIELD_NONE; + cam->frame[i].buf.memory = V4L2_MEMORY_MMAP; + cam->frame[i].buf.flags = 0; + } + + return cam->nbuffers; +} + + +static void sn9c102_release_buffers(struct sn9c102_device* cam) +{ + if (cam->nbuffers) { + rvfree(cam->frame[0].bufmem, + cam->nbuffers * cam->frame[0].buf.length); + cam->nbuffers = 0; + } +} + + +static void sn9c102_empty_framequeues(struct sn9c102_device* cam) +{ + u32 i; + + INIT_LIST_HEAD(&cam->inqueue); + INIT_LIST_HEAD(&cam->outqueue); + + for (i = 0; i < SN9C102_MAX_FRAMES; i++) { + cam->frame[i].state = F_UNUSED; + cam->frame[i].buf.bytesused = 0; + } +} + + +static void sn9c102_queue_unusedframes(struct sn9c102_device* cam) +{ + unsigned long lock_flags; + u32 i; + + for (i = 0; i < cam->nbuffers; i++) + if (cam->frame[i].state == F_UNUSED) { + cam->frame[i].state = F_QUEUED; + spin_lock_irqsave(&cam->queue_lock, lock_flags); + list_add_tail(&cam->frame[i].frame, &cam->inqueue); + spin_unlock_irqrestore(&cam->queue_lock, lock_flags); + } +} + +/*****************************************************************************/ + +int sn9c102_write_reg(struct sn9c102_device* cam, u8 value, u16 index) +{ + struct usb_device* udev = cam->usbdev; + u8* buff = cam->control_buffer; + int res; + + if (index == 0x18) + value = (value & 0xcf) | (cam->reg[0x18] & 0x30); + + *buff = value; + + res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x08, 0x41, + index, 0, buff, 1, SN9C102_CTRL_TIMEOUT); + if (res < 0) { + DBG(3, "Failed to write a register (value 0x%02X, index " + "0x%02X, error %d)", value, index, res) + return -1; + } + + cam->reg[index] = value; + + return 0; +} + + +/* NOTE: reading some registers always returns 0 */ +static int sn9c102_read_reg(struct sn9c102_device* cam, u16 index) +{ + struct usb_device* udev = cam->usbdev; + u8* buff = cam->control_buffer; + int res; + + res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, 0xc1, + index, 0, buff, 1, SN9C102_CTRL_TIMEOUT); + if (res < 0) + DBG(3, "Failed to read a register (index 0x%02X, error %d)", + index, res) + + return (res >= 0) ? (int)(*buff) : -1; +} + + +int sn9c102_pread_reg(struct sn9c102_device* cam, u16 index) +{ + if (index > 0x1f) + return -EINVAL; + + return cam->reg[index]; +} + + +static int +sn9c102_i2c_wait(struct sn9c102_device* cam, struct sn9c102_sensor* sensor) +{ + int i, r; + + for (i = 1; i <= 5; i++) { + r = sn9c102_read_reg(cam, 0x08); + if (r < 0) + return -EIO; + if (r & 0x04) + return 0; + if (sensor->frequency & SN9C102_I2C_400KHZ) + udelay(5*8); + else + udelay(16*8); + } + return -EBUSY; +} + + +static int +sn9c102_i2c_detect_read_error(struct sn9c102_device* cam, + struct sn9c102_sensor* sensor) +{ + int r; + r = sn9c102_read_reg(cam, 0x08); + return (r < 0 || (r >= 0 && !(r & 0x08))) ? -EIO : 0; +} + + +static int +sn9c102_i2c_detect_write_error(struct sn9c102_device* cam, + struct sn9c102_sensor* sensor) +{ + int r; + r = sn9c102_read_reg(cam, 0x08); + return (r < 0 || (r >= 0 && (r & 0x08))) ? -EIO : 0; +} + + +int +sn9c102_i2c_try_read(struct sn9c102_device* cam, + struct sn9c102_sensor* sensor, u8 address) +{ + struct usb_device* udev = cam->usbdev; + u8* data = cam->control_buffer; + int err = 0, res; + + /* Write cycle - address */ + data[0] = ((sensor->interface == SN9C102_I2C_2WIRES) ? 0x80 : 0) | + ((sensor->frequency & SN9C102_I2C_400KHZ) ? 0x01 : 0) | 0x10; + data[1] = sensor->slave_write_id; + data[2] = address; + data[7] = 0x10; + res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x08, 0x41, + 0x08, 0, data, 8, SN9C102_CTRL_TIMEOUT); + if (res < 0) + err += res; + + err += sn9c102_i2c_wait(cam, sensor); + + /* Read cycle - 1 byte */ + data[0] = ((sensor->interface == SN9C102_I2C_2WIRES) ? 0x80 : 0) | + ((sensor->frequency & SN9C102_I2C_400KHZ) ? 0x01 : 0) | + 0x10 | 0x02; + data[1] = sensor->slave_read_id; + data[7] = 0x10; + res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x08, 0x41, + 0x08, 0, data, 8, SN9C102_CTRL_TIMEOUT); + if (res < 0) + err += res; + + err += sn9c102_i2c_wait(cam, sensor); + + /* The read byte will be placed in data[4] */ + res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x00, 0xc1, + 0x0a, 0, data, 5, SN9C102_CTRL_TIMEOUT); + if (res < 0) + err += res; + + err += sn9c102_i2c_detect_read_error(cam, sensor); + + if (err) + DBG(3, "I2C read failed for %s image sensor", sensor->name) + + PDBGG("I2C read: address 0x%02X, value: 0x%02X", address, data[4]) + + return err ? -1 : (int)data[4]; +} + + +int +sn9c102_i2c_try_raw_write(struct sn9c102_device* cam, + struct sn9c102_sensor* sensor, u8 n, u8 data0, + u8 data1, u8 data2, u8 data3, u8 data4, u8 data5) +{ + struct usb_device* udev = cam->usbdev; + u8* data = cam->control_buffer; + int err = 0, res; + + /* Write cycle. It usually is address + value */ + data[0] = ((sensor->interface == SN9C102_I2C_2WIRES) ? 0x80 : 0) | + ((sensor->frequency & SN9C102_I2C_400KHZ) ? 0x01 : 0) + | ((n - 1) << 4); + data[1] = data0; + data[2] = data1; + data[3] = data2; + data[4] = data3; + data[5] = data4; + data[6] = data5; + data[7] = 0x10; + res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x08, 0x41, + 0x08, 0, data, 8, SN9C102_CTRL_TIMEOUT); + if (res < 0) + err += res; + + err += sn9c102_i2c_wait(cam, sensor); + err += sn9c102_i2c_detect_write_error(cam, sensor); + + if (err) + DBG(3, "I2C write failed for %s image sensor", sensor->name) + + PDBGG("I2C write: %u bytes, data0 = 0x%02X, data1 = 0x%02X, " + "data2 = 0x%02X, data3 = 0x%02X, data4 = 0x%02X, data5 = 0x%02X", + n, data0, data1, data2, data3, data4, data5) + + return err ? -1 : 0; +} + + +int +sn9c102_i2c_try_write(struct sn9c102_device* cam, + struct sn9c102_sensor* sensor, u8 address, u8 value) +{ + return sn9c102_i2c_try_raw_write(cam, sensor, 3, + sensor->slave_write_id, address, + value, 0, 0, 0); +} + + +int sn9c102_i2c_read(struct sn9c102_device* cam, u8 address) +{ + if (!cam->sensor) + return -1; + + return sn9c102_i2c_try_read(cam, cam->sensor, address); +} + + +int sn9c102_i2c_write(struct sn9c102_device* cam, u8 address, u8 value) +{ + if (!cam->sensor) + return -1; + + return sn9c102_i2c_try_write(cam, cam->sensor, address, value); +} + +/*****************************************************************************/ + +static void* sn9c102_find_sof_header(void* mem, size_t len) +{ + size_t soflen=sizeof(sn9c102_sof_header_t), SOFLEN=SN9C102_SOFLEN, i; + u8 j, n = sizeof(sn9c102_sof_header) / soflen; + + for (i = 0; (len >= soflen+SOFLEN) && (i <= len-soflen-SOFLEN); i++) + for (j = 0; j < n; j++) + if (!memcmp(mem + i, sn9c102_sof_header[j], soflen)) + /* Skips the header */ + return mem + i + soflen + SOFLEN; + + return NULL; +} + + +static void* sn9c102_find_eof_header(void* mem, size_t len) +{ + size_t eoflen = sizeof(sn9c102_eof_header_t), i; + unsigned j, n = sizeof(sn9c102_eof_header) / eoflen; + + for (i = 0; (len >= eoflen) && (i <= len - eoflen); i++) + for (j = 0; j < n; j++) + if (!memcmp(mem + i, sn9c102_eof_header[j], eoflen)) + return mem + i; + + return NULL; +} + + +static void sn9c102_urb_complete(struct urb *urb, struct pt_regs* regs) +{ + struct sn9c102_device* cam = urb->context; + struct sn9c102_frame_t** f; + unsigned long lock_flags; + u8 i; + int err = 0; + + if (urb->status == -ENOENT) + return; + + f = &cam->frame_current; + + if (cam->stream == STREAM_INTERRUPT) { + cam->stream = STREAM_OFF; + if ((*f)) + (*f)->state = F_QUEUED; + DBG(3, "Stream interrupted") + wake_up_interruptible(&cam->wait_stream); + } + + if ((cam->state & DEV_DISCONNECTED)||(cam->state & DEV_MISCONFIGURED)) + return; + + if (cam->stream == STREAM_OFF || list_empty(&cam->inqueue)) + goto resubmit_urb; + + if (!(*f)) + (*f) = list_entry(cam->inqueue.next, struct sn9c102_frame_t, + frame); + + for (i = 0; i < urb->number_of_packets; i++) { + unsigned int img, len, status; + void *pos, *sof, *eof; + + len = urb->iso_frame_desc[i].actual_length; + status = urb->iso_frame_desc[i].status; + pos = urb->iso_frame_desc[i].offset + urb->transfer_buffer; + + if (status) { + DBG(3, "Error in isochronous frame") + (*f)->state = F_ERROR; + continue; + } + + PDBGG("Isochrnous frame: length %u, #%u i", len, i) + + /* NOTE: It is probably correct to assume that SOF and EOF + headers do not occur between two consecutive packets, + but who knows..Whatever is the truth, this assumption + doesn't introduce bugs. */ + +redo: + sof = sn9c102_find_sof_header(pos, len); + if (!sof) { + eof = sn9c102_find_eof_header(pos, len); + if ((*f)->state == F_GRABBING) { +end_of_frame: + img = len; + + if (eof) + img = (eof > pos) ? eof - pos - 1 : 0; + + if ((*f)->buf.bytesused+img>(*f)->buf.length) { + u32 b = (*f)->buf.bytesused + img - + (*f)->buf.length; + img = (*f)->buf.length - + (*f)->buf.bytesused; + DBG(3, "Expected EOF not found: " + "video frame cut") + if (eof) + DBG(3, "Exceeded limit: +%u " + "bytes", (unsigned)(b)) + } + + memcpy((*f)->bufmem + (*f)->buf.bytesused, pos, + img); + + if ((*f)->buf.bytesused == 0) + do_gettimeofday(&(*f)->buf.timestamp); + + (*f)->buf.bytesused += img; + + if ((*f)->buf.bytesused == (*f)->buf.length) { + u32 b = (*f)->buf.bytesused; + (*f)->state = F_DONE; + (*f)->buf.sequence= ++cam->frame_count; + spin_lock_irqsave(&cam->queue_lock, + lock_flags); + list_move_tail(&(*f)->frame, + &cam->outqueue); + if (!list_empty(&cam->inqueue)) + (*f) = list_entry( + cam->inqueue.next, + struct sn9c102_frame_t, + frame ); + else + (*f) = NULL; + spin_unlock_irqrestore(&cam->queue_lock + , lock_flags); + DBG(3, "Video frame captured: " + "%lu bytes", (unsigned long)(b)) + + if (!(*f)) + goto resubmit_urb; + + } else if (eof) { + (*f)->state = F_ERROR; + DBG(3, "Not expected EOF after %lu " + "bytes of image data", + (unsigned long)((*f)->buf.bytesused)) + } + + if (sof) /* (1) */ + goto start_of_frame; + + } else if (eof) { + DBG(3, "EOF without SOF") + continue; + + } else { + PDBGG("Ignoring pointless isochronous frame") + continue; + } + + } else if ((*f)->state == F_QUEUED || (*f)->state == F_ERROR) { +start_of_frame: + (*f)->state = F_GRABBING; + (*f)->buf.bytesused = 0; + len -= (sof - pos); + pos = sof; + DBG(3, "SOF detected: new video frame") + if (len) + goto redo; + + } else if ((*f)->state == F_GRABBING) { + eof = sn9c102_find_eof_header(pos, len); + if (eof && eof < sof) + goto end_of_frame; /* (1) */ + else { + DBG(3, "SOF before expected EOF after %lu " + "bytes of image data", + (unsigned long)((*f)->buf.bytesused)) + goto start_of_frame; + } + } + } + +resubmit_urb: + urb->dev = cam->usbdev; + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0 && err != -EPERM) { + cam->state |= DEV_MISCONFIGURED; + DBG(1, "usb_submit_urb() failed") + } + + wake_up_interruptible(&cam->wait_frame); +} + + +static int sn9c102_start_transfer(struct sn9c102_device* cam) +{ + struct usb_device *udev = cam->usbdev; + struct urb* urb; + const unsigned int wMaxPacketSize[] = {0, 128, 256, 384, 512, + 680, 800, 900, 1023}; + const unsigned int psz = wMaxPacketSize[SN9C102_ALTERNATE_SETTING]; + s8 i, j; + int err = 0; + + for (i = 0; i < SN9C102_URBS; i++) { + cam->transfer_buffer[i] = kmalloc(SN9C102_ISO_PACKETS * psz, + GFP_KERNEL); + if (!cam->transfer_buffer[i]) { + err = -ENOMEM; + DBG(1, "Not enough memory") + goto free_buffers; + } + } + + for (i = 0; i < SN9C102_URBS; i++) { + urb = usb_alloc_urb(SN9C102_ISO_PACKETS, GFP_KERNEL); + cam->urb[i] = urb; + if (!urb) { + err = -ENOMEM; + DBG(1, "usb_alloc_urb() failed") + goto free_urbs; + } + urb->dev = udev; + urb->context = cam; + urb->pipe = usb_rcvisocpipe(udev, 1); + urb->transfer_flags = URB_ISO_ASAP; + urb->number_of_packets = SN9C102_ISO_PACKETS; + urb->complete = sn9c102_urb_complete; + urb->transfer_buffer = cam->transfer_buffer[i]; + urb->transfer_buffer_length = psz * SN9C102_ISO_PACKETS; + urb->interval = 1; + for (j = 0; j < SN9C102_ISO_PACKETS; j++) { + urb->iso_frame_desc[j].offset = psz * j; + urb->iso_frame_desc[j].length = psz; + } + } + + /* Enable video */ + if (!(cam->reg[0x01] & 0x04)) { + err = sn9c102_write_reg(cam, cam->reg[0x01] | 0x04, 0x01); + if (err) { + err = -EIO; + DBG(1, "I/O hardware error") + goto free_urbs; + } + } + + err = usb_set_interface(udev, 0, SN9C102_ALTERNATE_SETTING); + if (err) { + DBG(1, "usb_set_interface() failed") + goto free_urbs; + } + + cam->frame_current = NULL; + + for (i = 0; i < SN9C102_URBS; i++) { + err = usb_submit_urb(cam->urb[i], GFP_KERNEL); + if (err) { + for (j = i-1; j >= 0; j--) + usb_kill_urb(cam->urb[j]); + DBG(1, "usb_submit_urb() failed, error %d", err) + goto free_urbs; + } + } + + return 0; + +free_urbs: + for (i = 0; (i < SN9C102_URBS) && cam->urb[i]; i++) + usb_free_urb(cam->urb[i]); + +free_buffers: + for (i = 0; (i < SN9C102_URBS) && cam->transfer_buffer[i]; i++) + kfree(cam->transfer_buffer[i]); + + return err; +} + + +static int sn9c102_stop_transfer(struct sn9c102_device* cam) +{ + struct usb_device *udev = cam->usbdev; + s8 i; + int err = 0; + + if (cam->state & DEV_DISCONNECTED) + return 0; + + for (i = SN9C102_URBS-1; i >= 0; i--) { + usb_kill_urb(cam->urb[i]); + usb_free_urb(cam->urb[i]); + kfree(cam->transfer_buffer[i]); + } + + err = usb_set_interface(udev, 0, 0); /* 0 Mb/s */ + if (err) + DBG(3, "usb_set_interface() failed") + + return err; +} + +/*****************************************************************************/ + +static u8 sn9c102_strtou8(const char* buff, size_t len, ssize_t* count) +{ + char str[5]; + char* endp; + unsigned long val; + + if (len < 4) { + strncpy(str, buff, len); + str[len+1] = '\0'; + } else { + strncpy(str, buff, 4); + str[4] = '\0'; + } + + val = simple_strtoul(str, &endp, 0); + + *count = 0; + if (val <= 0xff) + *count = (ssize_t)(endp - str); + if ((*count) && (len == *count+1) && (buff[*count] == '\n')) + *count += 1; + + return (u8)val; +} + +/* NOTE 1: being inside one of the following methods implies that the v4l + device exists for sure (see kobjects and reference counters) + NOTE 2: buffers are PAGE_SIZE long */ + +static ssize_t sn9c102_show_reg(struct class_device* cd, char* buf) +{ + struct sn9c102_device* cam; + ssize_t count; + + if (down_interruptible(&sn9c102_sysfs_lock)) + return -ERESTARTSYS; + + cam = video_get_drvdata(to_video_device(cd)); + if (!cam) { + up(&sn9c102_sysfs_lock); + return -ENODEV; + } + + count = sprintf(buf, "%u\n", cam->sysfs.reg); + + up(&sn9c102_sysfs_lock); + + return count; +} + + +static ssize_t +sn9c102_store_reg(struct class_device* cd, const char* buf, size_t len) +{ + struct sn9c102_device* cam; + u8 index; + ssize_t count; + + if (down_interruptible(&sn9c102_sysfs_lock)) + return -ERESTARTSYS; + + cam = video_get_drvdata(to_video_device(cd)); + if (!cam) { + up(&sn9c102_sysfs_lock); + return -ENODEV; + } + + index = sn9c102_strtou8(buf, len, &count); + if (index > 0x1f || !count) { + up(&sn9c102_sysfs_lock); + return -EINVAL; + } + + cam->sysfs.reg = index; + + DBG(2, "Moved SN9C10X register index to 0x%02X", cam->sysfs.reg) + DBG(3, "Written bytes: %zd", count) + + up(&sn9c102_sysfs_lock); + + return count; +} + + +static ssize_t sn9c102_show_val(struct class_device* cd, char* buf) +{ + struct sn9c102_device* cam; + ssize_t count; + int val; + + if (down_interruptible(&sn9c102_sysfs_lock)) + return -ERESTARTSYS; + + cam = video_get_drvdata(to_video_device(cd)); + if (!cam) { + up(&sn9c102_sysfs_lock); + return -ENODEV; + } + + if ((val = sn9c102_read_reg(cam, cam->sysfs.reg)) < 0) { + up(&sn9c102_sysfs_lock); + return -EIO; + } + + count = sprintf(buf, "%d\n", val); + + DBG(3, "Read bytes: %zd", count) + + up(&sn9c102_sysfs_lock); + + return count; +} + + +static ssize_t +sn9c102_store_val(struct class_device* cd, const char* buf, size_t len) +{ + struct sn9c102_device* cam; + u8 value; + ssize_t count; + int err; + + if (down_interruptible(&sn9c102_sysfs_lock)) + return -ERESTARTSYS; + + cam = video_get_drvdata(to_video_device(cd)); + if (!cam) { + up(&sn9c102_sysfs_lock); + return -ENODEV; + } + + value = sn9c102_strtou8(buf, len, &count); + if (!count) { + up(&sn9c102_sysfs_lock); + return -EINVAL; + } + + err = sn9c102_write_reg(cam, value, cam->sysfs.reg); + if (err) { + up(&sn9c102_sysfs_lock); + return -EIO; + } + + DBG(2, "Written SN9C10X reg. 0x%02X, val. 0x%02X", + cam->sysfs.reg, value) + DBG(3, "Written bytes: %zd", count) + + up(&sn9c102_sysfs_lock); + + return count; +} + + +static ssize_t sn9c102_show_i2c_reg(struct class_device* cd, char* buf) +{ + struct sn9c102_device* cam; + ssize_t count; + + if (down_interruptible(&sn9c102_sysfs_lock)) + return -ERESTARTSYS; + + cam = video_get_drvdata(to_video_device(cd)); + if (!cam) { + up(&sn9c102_sysfs_lock); + return -ENODEV; + } + + count = sprintf(buf, "%u\n", cam->sysfs.i2c_reg); + + DBG(3, "Read bytes: %zd", count) + + up(&sn9c102_sysfs_lock); + + return count; +} + + +static ssize_t +sn9c102_store_i2c_reg(struct class_device* cd, const char* buf, size_t len) +{ + struct sn9c102_device* cam; + u8 index; + ssize_t count; + + if (down_interruptible(&sn9c102_sysfs_lock)) + return -ERESTARTSYS; + + cam = video_get_drvdata(to_video_device(cd)); + if (!cam) { + up(&sn9c102_sysfs_lock); + return -ENODEV; + } + + index = sn9c102_strtou8(buf, len, &count); + if (!count) { + up(&sn9c102_sysfs_lock); + return -EINVAL; + } + + cam->sysfs.i2c_reg = index; + + DBG(2, "Moved sensor register index to 0x%02X", cam->sysfs.i2c_reg) + DBG(3, "Written bytes: %zd", count) + + up(&sn9c102_sysfs_lock); + + return count; +} + + +static ssize_t sn9c102_show_i2c_val(struct class_device* cd, char* buf) +{ + struct sn9c102_device* cam; + ssize_t count; + int val; + + if (down_interruptible(&sn9c102_sysfs_lock)) + return -ERESTARTSYS; + + cam = video_get_drvdata(to_video_device(cd)); + if (!cam) { + up(&sn9c102_sysfs_lock); + return -ENODEV; + } + + if ((val = sn9c102_i2c_read(cam, cam->sysfs.i2c_reg)) < 0) { + up(&sn9c102_sysfs_lock); + return -EIO; + } + + count = sprintf(buf, "%d\n", val); + + DBG(3, "Read bytes: %zd", count) + + up(&sn9c102_sysfs_lock); + + return count; +} + + +static ssize_t +sn9c102_store_i2c_val(struct class_device* cd, const char* buf, size_t len) +{ + struct sn9c102_device* cam; + u8 value; + ssize_t count; + int err; + + if (down_interruptible(&sn9c102_sysfs_lock)) + return -ERESTARTSYS; + + cam = video_get_drvdata(to_video_device(cd)); + if (!cam) { + up(&sn9c102_sysfs_lock); + return -ENODEV; + } + + value = sn9c102_strtou8(buf, len, &count); + if (!count) { + up(&sn9c102_sysfs_lock); + return -EINVAL; + } + + err = sn9c102_i2c_write(cam, cam->sysfs.i2c_reg, value); + if (err) { + up(&sn9c102_sysfs_lock); + return -EIO; + } + + DBG(2, "Written sensor reg. 0x%02X, val. 0x%02X", + cam->sysfs.i2c_reg, value) + DBG(3, "Written bytes: %zd", count) + + up(&sn9c102_sysfs_lock); + + return count; +} + + +static ssize_t +sn9c102_store_redblue(struct class_device* cd, const char* buf, size_t len) +{ + ssize_t res = 0; + u8 value; + ssize_t count; + + value = sn9c102_strtou8(buf, len, &count); + if (!count) + return -EINVAL; + + if ((res = sn9c102_store_reg(cd, "0x10", 4)) >= 0) + res = sn9c102_store_val(cd, buf, len); + + return res; +} + + +static ssize_t +sn9c102_store_green(struct class_device* cd, const char* buf, size_t len) +{ + ssize_t res = 0; + u8 value; + ssize_t count; + + value = sn9c102_strtou8(buf, len, &count); + if (!count || value > 0x0f) + return -EINVAL; + + if ((res = sn9c102_store_reg(cd, "0x11", 4)) >= 0) + res = sn9c102_store_val(cd, buf, len); + + return res; +} + + +static CLASS_DEVICE_ATTR(reg, S_IRUGO | S_IWUSR, + sn9c102_show_reg, sn9c102_store_reg); +static CLASS_DEVICE_ATTR(val, S_IRUGO | S_IWUSR, + sn9c102_show_val, sn9c102_store_val); +static CLASS_DEVICE_ATTR(i2c_reg, S_IRUGO | S_IWUSR, + sn9c102_show_i2c_reg, sn9c102_store_i2c_reg); +static CLASS_DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR, + sn9c102_show_i2c_val, sn9c102_store_i2c_val); +static CLASS_DEVICE_ATTR(redblue, S_IWUGO, NULL, sn9c102_store_redblue); +static CLASS_DEVICE_ATTR(green, S_IWUGO, NULL, sn9c102_store_green); + + +static void sn9c102_create_sysfs(struct sn9c102_device* cam) +{ + struct video_device *v4ldev = cam->v4ldev; + + video_device_create_file(v4ldev, &class_device_attr_reg); + video_device_create_file(v4ldev, &class_device_attr_val); + video_device_create_file(v4ldev, &class_device_attr_redblue); + video_device_create_file(v4ldev, &class_device_attr_green); + if (cam->sensor->slave_write_id && cam->sensor->slave_read_id) { + video_device_create_file(v4ldev, &class_device_attr_i2c_reg); + video_device_create_file(v4ldev, &class_device_attr_i2c_val); + } +} + +/*****************************************************************************/ + +static int sn9c102_set_scale(struct sn9c102_device* cam, u8 scale) +{ + u8 r = 0; + int err = 0; + + if (scale == 1) + r = cam->reg[0x18] & 0xcf; + else if (scale == 2) { + r = cam->reg[0x18] & 0xcf; + r |= 0x10; + } else if (scale == 4) + r = cam->reg[0x18] | 0x20; + + err += sn9c102_write_reg(cam, r, 0x18); + if (err) + return -EIO; + + PDBGG("Scaling factor: %u", scale) + + return 0; +} + + +static int sn9c102_set_crop(struct sn9c102_device* cam, struct v4l2_rect* rect) +{ + struct sn9c102_sensor* s = cam->sensor; + u8 h_start = (u8)(rect->left - s->cropcap.bounds.left), + v_start = (u8)(rect->top - s->cropcap.bounds.top), + h_size = (u8)(rect->width / 16), + v_size = (u8)(rect->height / 16), + ae_strx = 0x00, + ae_stry = 0x00, + ae_endx = h_size / 2, + ae_endy = v_size / 2; + int err = 0; + + /* These are a sort of stroboscopic signal for some sensors */ + err += sn9c102_write_reg(cam, h_size, 0x1a); + err += sn9c102_write_reg(cam, v_size, 0x1b); + + err += sn9c102_write_reg(cam, h_start, 0x12); + err += sn9c102_write_reg(cam, v_start, 0x13); + err += sn9c102_write_reg(cam, h_size, 0x15); + err += sn9c102_write_reg(cam, v_size, 0x16); + err += sn9c102_write_reg(cam, ae_strx, 0x1c); + err += sn9c102_write_reg(cam, ae_stry, 0x1d); + err += sn9c102_write_reg(cam, ae_endx, 0x1e); + err += sn9c102_write_reg(cam, ae_endy, 0x1f); + if (err) + return -EIO; + + PDBGG("h_start, v_start, h_size, v_size, ho_size, vo_size " + "%u %u %u %u %u %u", h_start, v_start, h_size, v_size, ho_size, + vo_size) + + return 0; +} + + +static int sn9c102_init(struct sn9c102_device* cam) +{ + struct sn9c102_sensor* s = cam->sensor; + struct v4l2_control ctrl; + struct v4l2_queryctrl *qctrl; + struct v4l2_rect* rect; + u8 i = 0, n = 0; + int err = 0; + + if (!(cam->state & DEV_INITIALIZED)) { + init_waitqueue_head(&cam->open); + qctrl = s->qctrl; + rect = &(s->cropcap.defrect); + } else { /* use current values */ + qctrl = s->_qctrl; + rect = &(s->_rect); + } + + err += sn9c102_set_scale(cam, rect->width / s->pix_format.width); + err += sn9c102_set_crop(cam, rect); + if (err) + return err; + + if (s->init) { + err = s->init(cam); + if (err) { + DBG(3, "Sensor initialization failed") + return err; + } + } + + if (s->set_crop) + if ((err = s->set_crop(cam, rect))) { + DBG(3, "set_crop() failed") + return err; + } + + if (s->set_ctrl) { + n = sizeof(s->qctrl) / sizeof(s->qctrl[0]); + for (i = 0; i < n; i++) + if (s->qctrl[i].id != 0 && + !(s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)) { + ctrl.id = s->qctrl[i].id; + ctrl.value = qctrl[i].default_value; + err = s->set_ctrl(cam, &ctrl); + if (err) { + DBG(3, "Set control failed") + return err; + } + } + } + + if (!(cam->state & DEV_INITIALIZED)) { + init_MUTEX(&cam->fileop_sem); + spin_lock_init(&cam->queue_lock); + init_waitqueue_head(&cam->wait_frame); + init_waitqueue_head(&cam->wait_stream); + memcpy(s->_qctrl, s->qctrl, sizeof(s->qctrl)); + memcpy(&(s->_rect), &(s->cropcap.defrect), + sizeof(struct v4l2_rect)); + cam->state |= DEV_INITIALIZED; + } + + DBG(2, "Initialization succeeded") + return 0; +} + + +static void sn9c102_release_resources(struct sn9c102_device* cam) +{ + down(&sn9c102_sysfs_lock); + + DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->minor) + video_set_drvdata(cam->v4ldev, NULL); + video_unregister_device(cam->v4ldev); + + up(&sn9c102_sysfs_lock); + + kfree(cam->control_buffer); +} + +/*****************************************************************************/ + +static int sn9c102_open(struct inode* inode, struct file* filp) +{ + struct sn9c102_device* cam; + int err = 0; + + /* This the only safe way to prevent race conditions with disconnect */ + if (!down_read_trylock(&sn9c102_disconnect)) + return -ERESTARTSYS; + + cam = video_get_drvdata(video_devdata(filp)); + + if (down_interruptible(&cam->dev_sem)) { + up_read(&sn9c102_disconnect); + return -ERESTARTSYS; + } + + if (cam->users) { + DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->minor) + if ((filp->f_flags & O_NONBLOCK) || + (filp->f_flags & O_NDELAY)) { + err = -EWOULDBLOCK; + goto out; + } + up(&cam->dev_sem); + err = wait_event_interruptible_exclusive(cam->open, + cam->state & DEV_DISCONNECTED + || !cam->users); + if (err) { + up_read(&sn9c102_disconnect); + return err; + } + if (cam->state & DEV_DISCONNECTED) { + up_read(&sn9c102_disconnect); + return -ENODEV; + } + down(&cam->dev_sem); + } + + + if (cam->state & DEV_MISCONFIGURED) { + err = sn9c102_init(cam); + if (err) { + DBG(1, "Initialization failed again. " + "I will retry on next open().") + goto out; + } + cam->state &= ~DEV_MISCONFIGURED; + } + + if ((err = sn9c102_start_transfer(cam))) + goto out; + + filp->private_data = cam; + cam->users++; + cam->io = IO_NONE; + cam->stream = STREAM_OFF; + cam->nbuffers = 0; + cam->frame_count = 0; + sn9c102_empty_framequeues(cam); + + DBG(3, "Video device /dev/video%d is open", cam->v4ldev->minor) + +out: + up(&cam->dev_sem); + up_read(&sn9c102_disconnect); + return err; +} + + +static int sn9c102_release(struct inode* inode, struct file* filp) +{ + struct sn9c102_device* cam = video_get_drvdata(video_devdata(filp)); + + down(&cam->dev_sem); /* prevent disconnect() to be called */ + + sn9c102_stop_transfer(cam); + + sn9c102_release_buffers(cam); + + if (cam->state & DEV_DISCONNECTED) { + sn9c102_release_resources(cam); + up(&cam->dev_sem); + kfree(cam); + return 0; + } + + cam->users--; + wake_up_interruptible_nr(&cam->open, 1); + + DBG(3, "Video device /dev/video%d closed", cam->v4ldev->minor) + + up(&cam->dev_sem); + + return 0; +} + + +static ssize_t +sn9c102_read(struct file* filp, char __user * buf, size_t count, loff_t* f_pos) +{ + struct sn9c102_device* cam = video_get_drvdata(video_devdata(filp)); + struct sn9c102_frame_t* f, * i; + unsigned long lock_flags; + int err = 0; + + if (down_interruptible(&cam->fileop_sem)) + return -ERESTARTSYS; + + if (cam->state & DEV_DISCONNECTED) { + DBG(1, "Device not present") + up(&cam->fileop_sem); + return -ENODEV; + } + + if (cam->state & DEV_MISCONFIGURED) { + DBG(1, "The camera is misconfigured. Close and open it again.") + up(&cam->fileop_sem); + return -EIO; + } + + if (cam->io == IO_MMAP) { + DBG(3, "Close and open the device again to choose " + "the read method") + up(&cam->fileop_sem); + return -EINVAL; + } + + if (cam->io == IO_NONE) { + if (!sn9c102_request_buffers(cam, 2)) { + DBG(1, "read() failed, not enough memory") + up(&cam->fileop_sem); + return -ENOMEM; + } + cam->io = IO_READ; + cam->stream = STREAM_ON; + sn9c102_queue_unusedframes(cam); + } + + if (!count) { + up(&cam->fileop_sem); + return 0; + } + + if (list_empty(&cam->outqueue)) { + if (filp->f_flags & O_NONBLOCK) { + up(&cam->fileop_sem); + return -EAGAIN; + } + err = wait_event_interruptible + ( cam->wait_frame, + (!list_empty(&cam->outqueue)) || + (cam->state & DEV_DISCONNECTED) ); + if (err) { + up(&cam->fileop_sem); + return err; + } + if (cam->state & DEV_DISCONNECTED) { + up(&cam->fileop_sem); + return -ENODEV; + } + } + + f = list_entry(cam->outqueue.prev, struct sn9c102_frame_t, frame); + + spin_lock_irqsave(&cam->queue_lock, lock_flags); + list_for_each_entry(i, &cam->outqueue, frame) + i->state = F_UNUSED; + INIT_LIST_HEAD(&cam->outqueue); + spin_unlock_irqrestore(&cam->queue_lock, lock_flags); + + sn9c102_queue_unusedframes(cam); + + if (count > f->buf.length) + count = f->buf.length; + + if (copy_to_user(buf, f->bufmem, count)) { + up(&cam->fileop_sem); + return -EFAULT; + } + *f_pos += count; + + PDBGG("Frame #%lu, bytes read: %zu", (unsigned long)f->buf.index,count) + + up(&cam->fileop_sem); + + return count; +} + + +static unsigned int sn9c102_poll(struct file *filp, poll_table *wait) +{ + struct sn9c102_device* cam = video_get_drvdata(video_devdata(filp)); + unsigned int mask = 0; + + if (down_interruptible(&cam->fileop_sem)) + return POLLERR; + + if (cam->state & DEV_DISCONNECTED) { + DBG(1, "Device not present") + goto error; + } + + if (cam->state & DEV_MISCONFIGURED) { + DBG(1, "The camera is misconfigured. Close and open it again.") + goto error; + } + + if (cam->io == IO_NONE) { + if (!sn9c102_request_buffers(cam, 2)) { + DBG(1, "poll() failed, not enough memory") + goto error; + } + cam->io = IO_READ; + cam->stream = STREAM_ON; + } + + if (cam->io == IO_READ) + sn9c102_queue_unusedframes(cam); + + poll_wait(filp, &cam->wait_frame, wait); + + if (!list_empty(&cam->outqueue)) + mask |= POLLIN | POLLRDNORM; + + up(&cam->fileop_sem); + + return mask; + +error: + up(&cam->fileop_sem); + return POLLERR; +} + + +static void sn9c102_vm_open(struct vm_area_struct* vma) +{ + struct sn9c102_frame_t* f = vma->vm_private_data; + f->vma_use_count++; +} + + +static void sn9c102_vm_close(struct vm_area_struct* vma) +{ + /* NOTE: buffers are not freed here */ + struct sn9c102_frame_t* f = vma->vm_private_data; + f->vma_use_count--; +} + + +static struct vm_operations_struct sn9c102_vm_ops = { + .open = sn9c102_vm_open, + .close = sn9c102_vm_close, +}; + + +static int sn9c102_mmap(struct file* filp, struct vm_area_struct *vma) +{ + struct sn9c102_device* cam = video_get_drvdata(video_devdata(filp)); + unsigned long size = vma->vm_end - vma->vm_start, + start = vma->vm_start, + pos, + page; + u32 i; + + if (down_interruptible(&cam->fileop_sem)) + return -ERESTARTSYS; + + if (cam->state & DEV_DISCONNECTED) { + DBG(1, "Device not present") + up(&cam->fileop_sem); + return -ENODEV; + } + + if (cam->state & DEV_MISCONFIGURED) { + DBG(1, "The camera is misconfigured. Close and open it again.") + up(&cam->fileop_sem); + return -EIO; + } + + if (cam->io != IO_MMAP || !(vma->vm_flags & VM_WRITE) || + size != PAGE_ALIGN(cam->frame[0].buf.length)) { + up(&cam->fileop_sem); + return -EINVAL; + } + + for (i = 0; i < cam->nbuffers; i++) { + if ((cam->frame[i].buf.m.offset>>PAGE_SHIFT) == vma->vm_pgoff) + break; + } + if (i == cam->nbuffers) { + up(&cam->fileop_sem); + return -EINVAL; + } + + pos = (unsigned long)cam->frame[i].bufmem; + while (size > 0) { /* size is page-aligned */ + page = kvirt_to_pa(pos); + if (remap_page_range(vma, start, page, PAGE_SIZE, + vma->vm_page_prot)) { + up(&cam->fileop_sem); + return -EAGAIN; + } + start += PAGE_SIZE; + pos += PAGE_SIZE; + size -= PAGE_SIZE; + } + + vma->vm_ops = &sn9c102_vm_ops; + vma->vm_flags &= ~VM_IO; /* not I/O memory */ + vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ + vma->vm_private_data = &cam->frame[i]; + + sn9c102_vm_open(vma); + + up(&cam->fileop_sem); + + return 0; +} + + +static int sn9c102_v4l2_ioctl(struct inode* inode, struct file* filp, + unsigned int cmd, void __user * arg) +{ + struct sn9c102_device* cam = video_get_drvdata(video_devdata(filp)); + + switch (cmd) { + + case VIDIOC_QUERYCAP: + { + struct v4l2_capability cap = { + .driver = "sn9c102", + .version = SN9C102_MODULE_VERSION_CODE, + .capabilities = V4L2_CAP_VIDEO_CAPTURE | + V4L2_CAP_READWRITE | + V4L2_CAP_STREAMING, + }; + + strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card)); + strlcpy(cap.bus_info, cam->dev.bus_id, sizeof(cap.bus_info)); + + if (copy_to_user(arg, &cap, sizeof(cap))) + return -EFAULT; + + return 0; + } + + case VIDIOC_ENUMINPUT: + { + struct v4l2_input i; + + if (copy_from_user(&i, arg, sizeof(i))) + return -EFAULT; + + if (i.index) + return -EINVAL; + + memset(&i, 0, sizeof(i)); + strcpy(i.name, "USB"); + + if (copy_to_user(arg, &i, sizeof(i))) + return -EFAULT; + + return 0; + } + + case VIDIOC_G_INPUT: + case VIDIOC_S_INPUT: + { + int index; + + if (copy_from_user(&index, arg, sizeof(index))) + return -EFAULT; + + if (index != 0) + return -EINVAL; + + return 0; + } + + case VIDIOC_QUERYCTRL: + { + struct sn9c102_sensor* s = cam->sensor; + struct v4l2_queryctrl qc; + u8 i, n; + + if (copy_from_user(&qc, arg, sizeof(qc))) + return -EFAULT; + + n = sizeof(s->qctrl) / sizeof(s->qctrl[0]); + for (i = 0; i < n; i++) + if (qc.id && qc.id == s->qctrl[i].id) { + memcpy(&qc, &(s->qctrl[i]), sizeof(qc)); + if (copy_to_user(arg, &qc, sizeof(qc))) + return -EFAULT; + return 0; + } + + return -EINVAL; + } + + case VIDIOC_G_CTRL: + { + struct sn9c102_sensor* s = cam->sensor; + struct v4l2_control ctrl; + int err = 0; + + if (!s->get_ctrl) + return -EINVAL; + + if (copy_from_user(&ctrl, arg, sizeof(ctrl))) + return -EFAULT; + + err = s->get_ctrl(cam, &ctrl); + + if (copy_to_user(arg, &ctrl, sizeof(ctrl))) + return -EFAULT; + + return err; + } + + case VIDIOC_S_CTRL: + { + struct sn9c102_sensor* s = cam->sensor; + struct v4l2_control ctrl; + u8 i, n; + int err = 0; + + if (!s->set_ctrl) + return -EINVAL; + + if (copy_from_user(&ctrl, arg, sizeof(ctrl))) + return -EFAULT; + + if ((err = s->set_ctrl(cam, &ctrl))) + return err; + + n = sizeof(s->qctrl) / sizeof(s->qctrl[0]); + for (i = 0; i < n; i++) + if (ctrl.id == s->qctrl[i].id) { + s->_qctrl[i].default_value = ctrl.value; + break; + } + + return 0; + } + + case VIDIOC_CROPCAP: + { + struct v4l2_cropcap* cc = &(cam->sensor->cropcap); + + cc->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + cc->pixelaspect.numerator = 1; + cc->pixelaspect.denominator = 1; + + if (copy_to_user(arg, cc, sizeof(*cc))) + return -EFAULT; + + return 0; + } + + case VIDIOC_G_CROP: + { + struct sn9c102_sensor* s = cam->sensor; + struct v4l2_crop crop = { + .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + }; + + memcpy(&(crop.c), &(s->_rect), sizeof(struct v4l2_rect)); + + if (copy_to_user(arg, &crop, sizeof(crop))) + return -EFAULT; + + return 0; + } + + case VIDIOC_S_CROP: + { + struct sn9c102_sensor* s = cam->sensor; + struct v4l2_crop crop; + struct v4l2_rect* rect; + struct v4l2_rect* bounds = &(s->cropcap.bounds); + struct v4l2_pix_format* pix_format = &(s->pix_format); + u8 scale; + const enum sn9c102_stream_state stream = cam->stream; + const u32 nbuffers = cam->nbuffers; + u32 i; + int err = 0; + + if (copy_from_user(&crop, arg, sizeof(crop))) + return -EFAULT; + + rect = &(crop.c); + + if (crop.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + for (i = 0; i < cam->nbuffers; i++) + if (cam->frame[i].vma_use_count) { + DBG(3, "VIDIOC_S_CROP failed. " + "Unmap the buffers first.") + return -EINVAL; + } + + if (rect->width < 16) + rect->width = 16; + if (rect->height < 16) + rect->height = 16; + if (rect->width > bounds->width) + rect->width = bounds->width; + if (rect->height > bounds->height) + rect->height = bounds->height; + if (rect->left < bounds->left) + rect->left = bounds->left; + if (rect->top < bounds->top) + rect->top = bounds->top; + if (rect->left + rect->width > bounds->left + bounds->width) + rect->left = bounds->left+bounds->width - rect->width; + if (rect->top + rect->height > bounds->top + bounds->height) + rect->top = bounds->top+bounds->height - rect->height; + + rect->width &= ~15L; + rect->height &= ~15L; + + { /* calculate the scaling factor */ + u32 a, b; + a = rect->width * rect->height; + b = pix_format->width * pix_format->height; + scale = b ? (u8)((a / b) <= 1 ? 1 : ((a / b) == 3 ? 2 : + ((a / b) > 4 ? 4 : (a / b)))) : 1; + } + + if (cam->stream == STREAM_ON) { + cam->stream = STREAM_INTERRUPT; + err = wait_event_interruptible + ( cam->wait_stream, + (cam->stream == STREAM_OFF) || + (cam->state & DEV_DISCONNECTED) ); + if (err) { + cam->state |= DEV_MISCONFIGURED; + DBG(1, "The camera is misconfigured. To use " + "it, close and open /dev/video%d " + "again.", cam->v4ldev->minor) + return err; + } + if (cam->state & DEV_DISCONNECTED) + return -ENODEV; + } + + if (copy_to_user(arg, &crop, sizeof(crop))) { + cam->stream = stream; + return -EFAULT; + } + + sn9c102_release_buffers(cam); + + err = sn9c102_set_crop(cam, rect); + if (s->set_crop) + err += s->set_crop(cam, rect); + err += sn9c102_set_scale(cam, scale); + + if (err) { /* atomic, no rollback in ioctl() */ + cam->state |= DEV_MISCONFIGURED; + DBG(1, "VIDIOC_S_CROP failed because of hardware " + "problems. To use the camera, close and open " + "/dev/video%d again.", cam->v4ldev->minor) + return err; + } + + s->pix_format.width = rect->width/scale; + s->pix_format.height = rect->height/scale; + memcpy(&(s->_rect), rect, sizeof(*rect)); + + if (nbuffers != sn9c102_request_buffers(cam, nbuffers)) { + cam->state |= DEV_MISCONFIGURED; + DBG(1, "VIDIOC_S_CROP failed because of not enough " + "memory. To use the camera, close and open " + "/dev/video%d again.", cam->v4ldev->minor) + return -ENOMEM; + } + + cam->stream = stream; + + return 0; + } + + case VIDIOC_ENUM_FMT: + { + struct sn9c102_sensor* s = cam->sensor; + struct v4l2_fmtdesc fmtd; + + if (copy_from_user(&fmtd, arg, sizeof(fmtd))) + return -EFAULT; + + if (fmtd.index != 0) + return -EINVAL; + + memset(&fmtd, 0, sizeof(fmtd)); + + fmtd.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + strcpy(fmtd.description, "bayer rgb"); + fmtd.pixelformat = s->pix_format.pixelformat; + + if (copy_to_user(arg, &fmtd, sizeof(fmtd))) + return -EFAULT; + + return 0; + } + + case VIDIOC_G_FMT: + { + struct v4l2_format format; + struct v4l2_pix_format* pfmt = &(cam->sensor->pix_format); + + if (copy_from_user(&format, arg, sizeof(format))) + return -EFAULT; + + if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + pfmt->bytesperline = (pfmt->width * pfmt->priv) / 8; + pfmt->sizeimage = pfmt->height * pfmt->bytesperline; + pfmt->field = V4L2_FIELD_NONE; + memcpy(&(format.fmt.pix), pfmt, sizeof(*pfmt)); + + if (copy_to_user(arg, &format, sizeof(format))) + return -EFAULT; + + return 0; + } + + case VIDIOC_TRY_FMT: + case VIDIOC_S_FMT: + { + struct sn9c102_sensor* s = cam->sensor; + struct v4l2_format format; + struct v4l2_pix_format* pix; + struct v4l2_pix_format* pfmt = &(s->pix_format); + struct v4l2_rect* bounds = &(s->cropcap.bounds); + struct v4l2_rect rect; + u8 scale; + const enum sn9c102_stream_state stream = cam->stream; + const u32 nbuffers = cam->nbuffers; + u32 i; + int err = 0; + + if (copy_from_user(&format, arg, sizeof(format))) + return -EFAULT; + + pix = &(format.fmt.pix); + + if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + + memcpy(&rect, &(s->_rect), sizeof(rect)); + + { /* calculate the scaling factor */ + u32 a, b; + a = rect.width * rect.height; + b = pix->width * pix->height; + scale = b ? (u8)((a / b) <= 1 ? 1 : ((a / b) == 3 ? 2 : + ((a / b) > 4 ? 4 : (a / b)))) : 1; + } + + rect.width = scale * pix->width; + rect.height = scale * pix->height; + + if (rect.width < 16) + rect.width = 16; + if (rect.height < 16) + rect.height = 16; + if (rect.width > bounds->left + bounds->width - rect.left) + rect.width = bounds->left+bounds->width - rect.left; + if (rect.height > bounds->top + bounds->height - rect.top) + rect.height = bounds->top + bounds->height - rect.top; + + rect.width &= ~15L; + rect.height &= ~15L; + + pix->width = rect.width / scale; + pix->height = rect.height / scale; + + pix->pixelformat = pfmt->pixelformat; + pix->priv = pfmt->priv; /* bpp */ + pix->colorspace = pfmt->colorspace; + pix->bytesperline = (pix->width * pix->priv) / 8; + pix->sizeimage = pix->height * pix->bytesperline; + pix->field = V4L2_FIELD_NONE; + + if (cmd == VIDIOC_TRY_FMT) + return 0; + + for (i = 0; i < cam->nbuffers; i++) + if (cam->frame[i].vma_use_count) { + DBG(3, "VIDIOC_S_FMT failed. " + "Unmap the buffers first.") + return -EINVAL; + } + + if (cam->stream == STREAM_ON) { + cam->stream = STREAM_INTERRUPT; + err = wait_event_interruptible + ( cam->wait_stream, + (cam->stream == STREAM_OFF) || + (cam->state & DEV_DISCONNECTED) ); + if (err) { + cam->state |= DEV_MISCONFIGURED; + DBG(1, "The camera is misconfigured. To use " + "it, close and open /dev/video%d " + "again.", cam->v4ldev->minor) + return err; + } + if (cam->state & DEV_DISCONNECTED) + return -ENODEV; + } + + if (copy_to_user(arg, &format, sizeof(format))) { + cam->stream = stream; + return -EFAULT; + } + + sn9c102_release_buffers(cam); + + err = sn9c102_set_crop(cam, &rect); + if (s->set_crop) + err += s->set_crop(cam, &rect); + err += sn9c102_set_scale(cam, scale); + + if (err) { /* atomic, no rollback in ioctl() */ + cam->state |= DEV_MISCONFIGURED; + DBG(1, "VIDIOC_S_FMT failed because of hardware " + "problems. To use the camera, close and open " + "/dev/video%d again.", cam->v4ldev->minor) + return err; + } + + memcpy(pfmt, pix, sizeof(*pix)); + memcpy(&(s->_rect), &rect, sizeof(rect)); + + if (nbuffers != sn9c102_request_buffers(cam, nbuffers)) { + cam->state |= DEV_MISCONFIGURED; + DBG(1, "VIDIOC_S_FMT failed because of not enough " + "memory. To use the camera, close and open " + "/dev/video%d again.", cam->v4ldev->minor) + return -ENOMEM; + } + + cam->stream = stream; + + return 0; + } + + case VIDIOC_REQBUFS: + { + struct v4l2_requestbuffers rb; + u32 i; + int err; + + if (copy_from_user(&rb, arg, sizeof(rb))) + return -EFAULT; + + if (rb.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || + rb.memory != V4L2_MEMORY_MMAP) + return -EINVAL; + + if (cam->io == IO_READ) { + DBG(3, "Close and open the device again to choose " + "the mmap I/O method") + return -EINVAL; + } + + for (i = 0; i < cam->nbuffers; i++) + if (cam->frame[i].vma_use_count) { + DBG(3, "VIDIOC_REQBUFS failed. " + "Previous buffers are still mapped.") + return -EINVAL; + } + + if (cam->stream == STREAM_ON) { + cam->stream = STREAM_INTERRUPT; + err = wait_event_interruptible + ( cam->wait_stream, + (cam->stream == STREAM_OFF) || + (cam->state & DEV_DISCONNECTED) ); + if (err) { + cam->state |= DEV_MISCONFIGURED; + DBG(1, "The camera is misconfigured. To use " + "it, close and open /dev/video%d " + "again.", cam->v4ldev->minor) + return err; + } + if (cam->state & DEV_DISCONNECTED) + return -ENODEV; + } + + sn9c102_empty_framequeues(cam); + + sn9c102_release_buffers(cam); + if (rb.count) + rb.count = sn9c102_request_buffers(cam, rb.count); + + if (copy_to_user(arg, &rb, sizeof(rb))) { + sn9c102_release_buffers(cam); + cam->io = IO_NONE; + return -EFAULT; + } + + cam->io = rb.count ? IO_MMAP : IO_NONE; + + return 0; + } + + case VIDIOC_QUERYBUF: + { + struct v4l2_buffer b; + + if (copy_from_user(&b, arg, sizeof(b))) + return -EFAULT; + + if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || + b.index >= cam->nbuffers || cam->io != IO_MMAP) + return -EINVAL; + + memcpy(&b, &cam->frame[b.index].buf, sizeof(b)); + + if (cam->frame[b.index].vma_use_count) + b.flags |= V4L2_BUF_FLAG_MAPPED; + + if (cam->frame[b.index].state == F_DONE) + b.flags |= V4L2_BUF_FLAG_DONE; + else if (cam->frame[b.index].state != F_UNUSED) + b.flags |= V4L2_BUF_FLAG_QUEUED; + + if (copy_to_user(arg, &b, sizeof(b))) + return -EFAULT; + + return 0; + } + + case VIDIOC_QBUF: + { + struct v4l2_buffer b; + unsigned long lock_flags; + + if (copy_from_user(&b, arg, sizeof(b))) + return -EFAULT; + + if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || + b.index >= cam->nbuffers || cam->io != IO_MMAP) + return -EINVAL; + + if (cam->frame[b.index].state != F_UNUSED) + return -EINVAL; + + cam->frame[b.index].state = F_QUEUED; + + spin_lock_irqsave(&cam->queue_lock, lock_flags); + list_add_tail(&cam->frame[b.index].frame, &cam->inqueue); + spin_unlock_irqrestore(&cam->queue_lock, lock_flags); + + PDBGG("Frame #%lu queued", (unsigned long)b.index) + + return 0; + } + + case VIDIOC_DQBUF: + { + struct v4l2_buffer b; + struct sn9c102_frame_t *f; + unsigned long lock_flags; + int err = 0; + + if (copy_from_user(&b, arg, sizeof(b))) + return -EFAULT; + + if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io!= IO_MMAP) + return -EINVAL; + + if (list_empty(&cam->outqueue)) { + if (cam->stream == STREAM_OFF) + return -EINVAL; + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + err = wait_event_interruptible + ( cam->wait_frame, + (!list_empty(&cam->outqueue)) || + (cam->state & DEV_DISCONNECTED) ); + if (err) + return err; + if (cam->state & DEV_DISCONNECTED) + return -ENODEV; + } + + spin_lock_irqsave(&cam->queue_lock, lock_flags); + f = list_entry(cam->outqueue.next, struct sn9c102_frame_t, + frame); + list_del(&cam->outqueue); + spin_unlock_irqrestore(&cam->queue_lock, lock_flags); + + f->state = F_UNUSED; + + memcpy(&b, &f->buf, sizeof(b)); + if (f->vma_use_count) + b.flags |= V4L2_BUF_FLAG_MAPPED; + + if (copy_to_user(arg, &b, sizeof(b))) + return -EFAULT; + + PDBGG("Frame #%lu dequeued", (unsigned long)f->buf.index) + + return 0; + } + + case VIDIOC_STREAMON: + { + int type; + + if (copy_from_user(&type, arg, sizeof(type))) + return -EFAULT; + + if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP) + return -EINVAL; + + if (list_empty(&cam->inqueue)) + return -EINVAL; + + cam->stream = STREAM_ON; + + DBG(3, "Stream on") + + return 0; + } + + case VIDIOC_STREAMOFF: + { + int type, err; + + if (copy_from_user(&type, arg, sizeof(type))) + return -EFAULT; + + if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP) + return -EINVAL; + + if (cam->stream == STREAM_ON) { + cam->stream = STREAM_INTERRUPT; + err = wait_event_interruptible + ( cam->wait_stream, + (cam->stream == STREAM_OFF) || + (cam->state & DEV_DISCONNECTED) ); + if (err) { + cam->state |= DEV_MISCONFIGURED; + DBG(1, "The camera is misconfigured. To use " + "it, close and open /dev/video%d " + "again.", cam->v4ldev->minor) + return err; + } + if (cam->state & DEV_DISCONNECTED) + return -ENODEV; + } + + sn9c102_empty_framequeues(cam); + + DBG(3, "Stream off") + + return 0; + } + + case VIDIOC_G_STD: + case VIDIOC_S_STD: + case VIDIOC_QUERYSTD: + case VIDIOC_ENUMSTD: + case VIDIOC_QUERYMENU: + case VIDIOC_G_PARM: + case VIDIOC_S_PARM: + return -EINVAL; + + default: + return -EINVAL; + + } +} + + +static int sn9c102_ioctl(struct inode* inode, struct file* filp, + unsigned int cmd, unsigned long arg) +{ + struct sn9c102_device* cam = video_get_drvdata(video_devdata(filp)); + int err = 0; + + if (down_interruptible(&cam->fileop_sem)) + return -ERESTARTSYS; + + if (cam->state & DEV_DISCONNECTED) { + DBG(1, "Device not present") + up(&cam->fileop_sem); + return -ENODEV; + } + + if (cam->state & DEV_MISCONFIGURED) { + DBG(1, "The camera is misconfigured. Close and open it again.") + up(&cam->fileop_sem); + return -EIO; + } + + err = sn9c102_v4l2_ioctl(inode, filp, cmd, (void __user *)arg); + + up(&cam->fileop_sem); + + return err; +} + + +static struct file_operations sn9c102_fops = { + .owner = THIS_MODULE, + .open = sn9c102_open, + .release = sn9c102_release, + .ioctl = sn9c102_ioctl, + .read = sn9c102_read, + .poll = sn9c102_poll, + .mmap = sn9c102_mmap, + .llseek = no_llseek, +}; + +/*****************************************************************************/ + +/* It exists a single interface only. We do not need to validate anything. */ +static int +sn9c102_usb_probe(struct usb_interface* intf, const struct usb_device_id* id) +{ + struct usb_device *udev = interface_to_usbdev(intf); + struct sn9c102_device* cam; + static unsigned int dev_nr = 0; + unsigned int i, n; + int err = 0, r; + + n = sizeof(sn9c102_id_table)/sizeof(sn9c102_id_table[0]); + for (i = 0; i < n-1; i++) + if (udev->descriptor.idVendor==sn9c102_id_table[i].idVendor && + udev->descriptor.idProduct==sn9c102_id_table[i].idProduct) + break; + if (i == n-1) + return -ENODEV; + + if (!(cam = kmalloc(sizeof(struct sn9c102_device), GFP_KERNEL))) + return -ENOMEM; + memset(cam, 0, sizeof(*cam)); + + cam->usbdev = udev; + + memcpy(&cam->dev, &udev->dev, sizeof(struct device)); + + if (!(cam->control_buffer = kmalloc(8, GFP_KERNEL))) { + DBG(1, "kmalloc() failed") + err = -ENOMEM; + goto fail; + } + memset(cam->control_buffer, 0, 8); + + if (!(cam->v4ldev = video_device_alloc())) { + DBG(1, "video_device_alloc() failed") + err = -ENOMEM; + goto fail; + } + + init_MUTEX(&cam->dev_sem); + + r = sn9c102_read_reg(cam, 0x00); + if (r < 0 || r != 0x10) { + DBG(1, "Sorry, this is not a SN9C10[12] based camera " + "(vid/pid 0x%04X/0x%04X)", + sn9c102_id_table[i].idVendor,sn9c102_id_table[i].idProduct) + err = -ENODEV; + goto fail; + } + + DBG(2, "SN9C10[12] PC Camera Controller detected " + "(vid/pid 0x%04X/0x%04X)", + sn9c102_id_table[i].idVendor, sn9c102_id_table[i].idProduct) + + for (i = 0; sn9c102_sensor_table[i]; i++) { + err = sn9c102_sensor_table[i](cam); + if (!err) + break; + } + + if (!err && cam->sensor) { + DBG(2, "%s image sensor detected", cam->sensor->name) + DBG(3, "Support for %s maintained by %s", + cam->sensor->name, cam->sensor->maintainer) + } else { + DBG(1, "No supported image sensor detected") + err = -ENODEV; + goto fail; + } + + if (sn9c102_init(cam)) { + DBG(1, "Initialization failed. I will retry on open().") + cam->state |= DEV_MISCONFIGURED; + } + + strcpy(cam->v4ldev->name, "SN9C10[12] PC Camera"); + cam->v4ldev->owner = THIS_MODULE; + cam->v4ldev->type = VID_TYPE_CAPTURE | VID_TYPE_SCALES; + cam->v4ldev->hardware = VID_HARDWARE_SN9C102; + cam->v4ldev->fops = &sn9c102_fops; + cam->v4ldev->minor = video_nr[dev_nr]; + cam->v4ldev->release = video_device_release; + video_set_drvdata(cam->v4ldev, cam); + + down(&cam->dev_sem); + + err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER, + video_nr[dev_nr]); + if (err) { + DBG(1, "V4L2 device registration failed") + if (err == -ENFILE && video_nr[dev_nr] == -1) + DBG(1, "Free /dev/videoX node not found") + video_nr[dev_nr] = -1; + dev_nr = (dev_nr < SN9C102_MAX_DEVICES-1) ? dev_nr+1 : 0; + up(&cam->dev_sem); + goto fail; + } + + DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->minor) + + sn9c102_create_sysfs(cam); + + usb_set_intfdata(intf, cam); + + up(&cam->dev_sem); + + return 0; + +fail: + if (cam) { + kfree(cam->control_buffer); + if (cam->v4ldev) + video_device_release(cam->v4ldev); + kfree(cam); + } + return err; +} + + +static void sn9c102_usb_disconnect(struct usb_interface* intf) +{ + struct sn9c102_device* cam = usb_get_intfdata(intf); + + if (!cam) + return; + + down_write(&sn9c102_disconnect); + + down(&cam->dev_sem); + + DBG(2, "Disconnecting %s...", cam->v4ldev->name) + + wake_up_interruptible_all(&cam->open); + + if (cam->users) { + DBG(2, "Device /dev/video%d is open! Deregistration and " + "memory deallocation are deferred on close.", + cam->v4ldev->minor) + cam->state |= DEV_MISCONFIGURED; + sn9c102_stop_transfer(cam); + cam->state |= DEV_DISCONNECTED; + wake_up_interruptible(&cam->wait_frame); + wake_up_interruptible(&cam->wait_stream); + } else { + cam->state |= DEV_DISCONNECTED; + sn9c102_release_resources(cam); + } + + up(&cam->dev_sem); + + if (!cam->users) + kfree(cam); + + up_write(&sn9c102_disconnect); +} + + +static struct usb_driver sn9c102_usb_driver = { + .owner = THIS_MODULE, + .name = "sn9c102", + .id_table = sn9c102_id_table, + .probe = sn9c102_usb_probe, + .disconnect = sn9c102_usb_disconnect, +}; + +/*****************************************************************************/ + +static int __init sn9c102_module_init(void) +{ + int err = 0; + + KDBG(2, SN9C102_MODULE_NAME " v" SN9C102_MODULE_VERSION) + KDBG(3, SN9C102_MODULE_AUTHOR) + + if ((err = usb_register(&sn9c102_usb_driver))) + KDBG(1, "usb_register() failed") + + return err; +} + + +static void __exit sn9c102_module_exit(void) +{ + usb_deregister(&sn9c102_usb_driver); +} + + +module_init(sn9c102_module_init); +module_exit(sn9c102_module_exit); diff --git a/drivers/usb/media/sn9c102_pas106b.c b/drivers/usb/media/sn9c102_pas106b.c new file mode 100644 index 000000000..34bdfcf8e --- /dev/null +++ b/drivers/usb/media/sn9c102_pas106b.c @@ -0,0 +1,209 @@ +/*************************************************************************** + * Driver for PAS106B image sensor connected to the SN9C10[12] PC Camera * + * Controllers * + * * + * Copyright (C) 2004 by Luca Risolia * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program; if not, write to the Free Software * + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * + ***************************************************************************/ + +#include +#include "sn9c102_sensor.h" + + +static struct sn9c102_sensor pas106b; + + +static int pas106b_init(struct sn9c102_device* cam) +{ + int err = 0; + + err += sn9c102_write_reg(cam, 0x00, 0x10); + err += sn9c102_write_reg(cam, 0x00, 0x11); + err += sn9c102_write_reg(cam, 0x00, 0x14); + err += sn9c102_write_reg(cam, 0x20, 0x17); + err += sn9c102_write_reg(cam, 0x20, 0x19); + err += sn9c102_write_reg(cam, 0x09, 0x18); + + err += sn9c102_i2c_write(cam, 0x02, 0x0c); + err += sn9c102_i2c_write(cam, 0x03, 0x12); + err += sn9c102_i2c_write(cam, 0x04, 0x05); + err += sn9c102_i2c_write(cam, 0x05, 0x22); + err += sn9c102_i2c_write(cam, 0x06, 0xac); + err += sn9c102_i2c_write(cam, 0x07, 0x00); + err += sn9c102_i2c_write(cam, 0x08, 0x01); + err += sn9c102_i2c_write(cam, 0x0a, 0x00); + err += sn9c102_i2c_write(cam, 0x0b, 0x00); + err += sn9c102_i2c_write(cam, 0x0d, 0x00); + err += sn9c102_i2c_write(cam, 0x10, 0x06); + err += sn9c102_i2c_write(cam, 0x11, 0x06); + err += sn9c102_i2c_write(cam, 0x12, 0x00); + err += sn9c102_i2c_write(cam, 0x14, 0x02); + err += sn9c102_i2c_write(cam, 0x13, 0x01); + + msleep(400); + + return err; +} + + +static int pas106b_get_ctrl(struct sn9c102_device* cam, + struct v4l2_control* ctrl) +{ + switch (ctrl->id) { + case V4L2_CID_RED_BALANCE: + return (ctrl->value = sn9c102_i2c_read(cam, 0x0c))<0 ? -EIO:0; + case V4L2_CID_BLUE_BALANCE: + return (ctrl->value = sn9c102_i2c_read(cam, 0x09))<0 ? -EIO:0; + case V4L2_CID_BRIGHTNESS: + return (ctrl->value = sn9c102_i2c_read(cam, 0x0e))<0 ? -EIO:0; + default: + return -EINVAL; + } +} + + +static int pas106b_set_ctrl(struct sn9c102_device* cam, + const struct v4l2_control* ctrl) +{ + int err = 0; + + switch (ctrl->id) { + case V4L2_CID_RED_BALANCE: + err += sn9c102_i2c_write(cam, 0x0c, ctrl->value & 0x1f); + break; + case V4L2_CID_BLUE_BALANCE: + err += sn9c102_i2c_write(cam, 0x09, ctrl->value & 0x1f); + break; + case V4L2_CID_BRIGHTNESS: + err += sn9c102_i2c_write(cam, 0x0e, ctrl->value & 0x1f); + break; + default: + return -EINVAL; + } + err += sn9c102_i2c_write(cam, 0x13, 0x01); + + return err; +} + + +static int pas106b_set_crop(struct sn9c102_device* cam, + const struct v4l2_rect* rect) +{ + struct sn9c102_sensor* s = &pas106b; + int err = 0; + u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 4, + v_start = (u8)(rect->top - s->cropcap.bounds.top) + 3; + + err += sn9c102_write_reg(cam, h_start, 0x12); + err += sn9c102_write_reg(cam, v_start, 0x13); + + return err; +} + + +static struct sn9c102_sensor pas106b = { + .name = "PAS106B", + .maintainer = "Luca Risolia ", + .frequency = SN9C102_I2C_400KHZ | SN9C102_I2C_100KHZ, + .interface = SN9C102_I2C_2WIRES, + .slave_read_id = 0x40, + .slave_write_id = 0x40, + .init = &pas106b_init, + .qctrl = { + { + .id = V4L2_CID_RED_BALANCE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "red balance", + .minimum = 0x00, + .maximum = 0x1f, + .step = 0x01, + .default_value = 0x03, + .flags = 0, + }, + { + .id = V4L2_CID_BLUE_BALANCE, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "blue balance", + .minimum = 0x00, + .maximum = 0x1f, + .step = 0x01, + .default_value = 0x02, + .flags = 0, + }, + { + .id = V4L2_CID_BRIGHTNESS, + .type = V4L2_CTRL_TYPE_INTEGER, + .name = "brightness", + .minimum = 0x00, + .maximum = 0x1f, + .step = 0x01, + .default_value = 0x06, + .flags = 0, + }, + }, + .get_ctrl = &pas106b_get_ctrl, + .set_ctrl = &pas106b_set_ctrl, + .cropcap = { + .bounds = { + .left = 0, + .top = 0, + .width = 352, + .height = 288, + }, + .defrect = { + .left = 0, + .top = 0, + .width = 352, + .height = 288, + }, + }, + .set_crop = &pas106b_set_crop, + .pix_format = { + .width = 352, + .height = 288, + .pixelformat = V4L2_PIX_FMT_SBGGR8, + .priv = 8, /* we use this field as 'bits per pixel' */ + } +}; + + +int sn9c102_probe_pas106b(struct sn9c102_device* cam) +{ + int r0 = 0, r1 = 0, err = 0; + unsigned int pid = 0; + + /* Minimal initialization to enable the I2C communication + NOTE: do NOT change the values! */ + err += sn9c102_write_reg(cam, 0x01, 0x01); /* sensor power down */ + err += sn9c102_write_reg(cam, 0x00, 0x01); /* sensor power on */ + err += sn9c102_write_reg(cam, 0x28, 0x17); /* sensor clock at 48 MHz */ + if (err) + return -EIO; + + r0 = sn9c102_i2c_try_read(cam, &pas106b, 0x00); + r1 = sn9c102_i2c_try_read(cam, &pas106b, 0x01); + + if (r0 < 0 || r1 < 0) + return -EIO; + + pid = (r0 << 11) | ((r1 & 0xf0) >> 4); + if (pid != 0x007) + return -ENODEV; + + sn9c102_attach_sensor(cam, &pas106b); + + return 0; +} diff --git a/drivers/usb/media/sn9c102_sensor.h b/drivers/usb/media/sn9c102_sensor.h new file mode 100644 index 000000000..54a3499b5 --- /dev/null +++ b/drivers/usb/media/sn9c102_sensor.h @@ -0,0 +1,270 @@ +/*************************************************************************** + * API for image sensors connected to the SN9C10[12] PC Camera Controllers * + * * + * Copyright (C) 2004 by Luca Risolia * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program; if not, write to the Free Software * + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * + ***************************************************************************/ + +#ifndef _SN9C102_SENSOR_H_ +#define _SN9C102_SENSOR_H_ + +#include +#include +#include +#include +#include +#include + +struct sn9c102_device; +struct sn9c102_sensor; + +/*****************************************************************************/ + +/* OVERVIEW. + This is a small interface that allows you to add support for any CCD/CMOS + image sensors connected to the SN9C10X bridges. The entire API is documented + below. In the most general case, to support a sensor there are three steps + you have to follow: + 1) define the main "sn9c102_sensor" structure by setting the basic fields; + 2) write a probing function to be called by the core module when the USB + camera is recognized, then add both the USB ids and the name of that + function to the two corresponding tables SENSOR_TABLE and ID_TABLE (see + below); + 3) implement the methods that you want/need (and fill the rest of the main + structure accordingly). + "sn9c102_pas106b.c" is an example of all this stuff. Remember that you do + NOT need to touch the source code of the core module for the things to work + properly, unless you find bugs or flaws in it. Finally, do not forget to + read the V4L2 API for completeness. */ + +/*****************************************************************************/ + +/* Probing functions: on success, you must attach the sensor to the camera + by calling sn9c102_attach_sensor() provided below. + To enable the I2C communication, you might need to perform a really basic + initialization of the SN9C10X chip by using the write function declared + ahead. + Functions must return 0 on success, the appropriate error otherwise. */ +extern int sn9c102_probe_pas106b(struct sn9c102_device* cam); +extern int sn9c102_probe_tas5110c1b(struct sn9c102_device* cam); +extern int sn9c102_probe_tas5130d1b(struct sn9c102_device* cam); + +/* Add the above entries to this table. Be sure to add the entry in the right + place, since, on failure, the next probing routine is called according to + the order of the list below, from top to bottom */ +#define SN9C102_SENSOR_TABLE \ +static int (*sn9c102_sensor_table[])(struct sn9c102_device*) = { \ + &sn9c102_probe_pas106b, /* strong detection based on SENSOR vid/pid */\ + &sn9c102_probe_tas5110c1b, /* detection based on USB pid/vid */ \ + &sn9c102_probe_tas5130d1b, /* detection based on USB pid/vid */ \ + NULL, \ +}; + +/* Attach a probed sensor to the camera. */ +extern void +sn9c102_attach_sensor(struct sn9c102_device* cam, + struct sn9c102_sensor* sensor); + +/* Each SN9C10X camera has proper PID/VID identifiers. Add them here in case.*/ +#define SN9C102_ID_TABLE \ +static const struct usb_device_id sn9c102_id_table[] = { \ + { USB_DEVICE(0xc45, 0x6001), }, \ + { USB_DEVICE(0xc45, 0x6005), }, /* TAS5110C1B */ \ + { USB_DEVICE(0xc45, 0x6009), }, /* PAS106B */ \ + { USB_DEVICE(0xc45, 0x600d), }, /* PAS106B */ \ + { USB_DEVICE(0xc45, 0x6024), }, \ + { USB_DEVICE(0xc45, 0x6025), }, /* TAS5130D1B Maybe also TAS5110C1B */\ + { USB_DEVICE(0xc45, 0x6028), }, /* Maybe PAS202B */ \ + { USB_DEVICE(0xc45, 0x6029), }, \ + { USB_DEVICE(0xc45, 0x602a), }, /* Maybe HV7131[D|E1] */ \ + { USB_DEVICE(0xc45, 0x602c), }, /* Maybe OV7620 */ \ + { USB_DEVICE(0xc45, 0x6030), }, /* Maybe MI03 */ \ + { USB_DEVICE(0xc45, 0x8001), }, \ + { } \ +}; + +/*****************************************************************************/ + +/* Read/write routines: they always return -1 on error, 0 or the read value + otherwise. NOTE that a real read operation is not supported by the SN9C10X + chip for some of its registers. To work around this problem, a pseudo-read + call is provided instead: it returns the last successfully written value + on the register (0 if it has never been written), the usual -1 on error. */ + +/* The "try" I2C I/O versions are used when probing the sensor */ +extern int sn9c102_i2c_try_write(struct sn9c102_device*,struct sn9c102_sensor*, + u8 address, u8 value); +extern int sn9c102_i2c_try_read(struct sn9c102_device*,struct sn9c102_sensor*, + u8 address); + +/* This must be used if and only if the sensor doesn't implement the standard + I2C protocol, like the TASC sensors. There a number of good reasons why you + must use the single-byte versions of this function: do not abuse. It writes + n bytes, from data0 to datan, (registers 0x09 - 0x09+n of SN9C10X chip) */ +extern int sn9c102_i2c_try_raw_write(struct sn9c102_device* cam, + struct sn9c102_sensor* sensor, u8 n, + u8 data0, u8 data1, u8 data2, u8 data3, + u8 data4, u8 data5); + +/* To be used after the sensor struct has been attached to the camera struct */ +extern int sn9c102_i2c_write(struct sn9c102_device*, u8 address, u8 value); +extern int sn9c102_i2c_read(struct sn9c102_device*, u8 address); + +/* I/O on registers in the bridge. Could be used by the sensor methods too */ +extern int sn9c102_write_reg(struct sn9c102_device*, u8 value, u16 index); +extern int sn9c102_pread_reg(struct sn9c102_device*, u16 index); + +/* NOTE: there are no debugging functions here. To uniform the output you must + use the dev_info()/dev_warn()/dev_err() macros defined in device.h, already + included here, the argument being the struct device 'dev' of the sensor + structure. Do NOT use these macros before the sensor is attached or the + kernel will crash! However you should not need to notify the user about + common errors or other messages, since this is done by the master module. */ + +/*****************************************************************************/ + +enum sn9c102_i2c_frequency { /* sensors may support both the frequencies */ + SN9C102_I2C_100KHZ = 0x01, + SN9C102_I2C_400KHZ = 0x02, +}; + +enum sn9c102_i2c_interface { + SN9C102_I2C_2WIRES, + SN9C102_I2C_3WIRES, +}; + +struct sn9c102_sensor { + char name[32], /* sensor name */ + maintainer[64]; /* name of the mantainer */ + + /* These sensor capabilities must be provided if the SN9C10X controller + needs to communicate through the sensor serial interface by using + at least one of the i2c functions available */ + enum sn9c102_i2c_frequency frequency; + enum sn9c102_i2c_interface interface; + + /* These identifiers must be provided if the image sensor implements + the standard I2C protocol. TASC sensors don't, although they have a + serial interface: so this is a case where the "raw" I2C version + could be helpful. */ + u8 slave_read_id, slave_write_id; /* reg. 0x09 */ + + /* NOTE: Where not noted,most of the functions below are not mandatory. + Set to null if you do not implement them. If implemented, + they must return 0 on success, the proper error otherwise. */ + + int (*init)(struct sn9c102_device* cam); + /* This function is called after the sensor has been attached. + It should be used to initialize the sensor only, but may also + configure part of the SN9C10X chip if necessary. You don't need to + setup picture settings like brightness, contrast, etc.. here, if + the corrisponding controls are implemented (see below), since + they are adjusted in the core driver by calling the set_ctrl() + method after init(), where the arguments are the default values + specified in the v4l2_queryctrl list of supported controls; + Same suggestions apply for other settings, _if_ the corresponding + methods are present; if not, the initialization must configure the + sensor according to the default configuration structures below. */ + + struct v4l2_queryctrl qctrl[V4L2_CID_LASTP1-V4L2_CID_BASE]; + /* Optional list of default controls, defined as indicated in the + V4L2 API. Menu type controls are not handled by this interface. */ + + int (*get_ctrl)(struct sn9c102_device* cam, struct v4l2_control* ctrl); + int (*set_ctrl)(struct sn9c102_device* cam, + const struct v4l2_control* ctrl); + /* You must implement at least the set_ctrl method if you have defined + the list above. The returned value must follow the V4L2 + specifications for the VIDIOC_G|C_CTRL ioctls. V4L2_CID_H|VCENTER + are not supported by this driver, so do not implement them. Also, + passed values are NOT checked to see if they are out of bounds. */ + + struct v4l2_cropcap cropcap; + /* Think the image sensor as a grid of R,G,B monochromatic pixels + disposed according to a particular Bayer pattern, which describes + the complete array of pixels, from (0,0) to (xmax, ymax). We will + use this coordinate system from now on. It is assumed the sensor + chip can be programmed to capture/transmit a subsection of that + array of pixels: we will call this subsection "active window". + It is not always true that the largest achievable active window can + cover the whole array of pixels. The V4L2 API defines another + area called "source rectangle", which, in turn, is a subrectangle of + the active window. The SN9C10X chip is always programmed to read the + source rectangle. + The bounds of both the active window and the source rectangle are + specified in the cropcap substructures 'bounds' and 'defrect'. + By default, the source rectangle should cover the largest possible + area. Again, it is not always true that the largest source rectangle + can cover the entire active window, although it is a rare case for + the hardware we have. The bounds of the source rectangle _must_ be + multiple of 16 and must use the same coordinate system as indicated + before; their centers shall align initially. + If necessary, the sensor chip must be initialized during init() to + set the bounds of the active sensor window; however, by default, it + usually covers the largest achievable area (maxwidth x maxheight) + of pixels, so no particular initialization is needed, if you have + defined the correct default bounds in the structures. + See the V4L2 API for further details. + NOTE: once you have defined the bounds of the active window + (struct cropcap.bounds) you must not change them.anymore. + Only 'bounds' and 'defrect' fields are mandatory, other fields + will be ignored. */ + + int (*set_crop)(struct sn9c102_device* cam, + const struct v4l2_rect* rect); + /* To be called on VIDIOC_C_SETCROP. The core module always calls a + default routine which configures the appropriate SN9C10X regs (also + scaling), but you may need to override/adjust specific stuff. + 'rect' contains width and height values that are multiple of 16: in + case you override the default function, you always have to program + the chip to match those values; on error return the corresponding + error code without rolling back. + NOTE: in case, you must program the SN9C10X chip to get rid of + blank pixels or blank lines at the _start_ of each line or + frame after each HSYNC or VSYNC, so that the image starts with + real RGB data (see regs 0x12,0x13) (having set H_SIZE and, + V_SIZE you don't have to care about blank pixels or blank + lines at the end of each line or frame). */ + + struct v4l2_pix_format pix_format; + /* What you have to define here are: initial 'width' and 'height' of + the target rectangle, the bayer 'pixelformat' and 'priv' which we'll + be used to indicate the number of bits per pixel, 8 or 9. + Nothing more. + NOTE 1: both 'width' and 'height' _must_ be either 1/1 or 1/2 or 1/4 + of cropcap.defrect.width and cropcap.defrect.height. I + suggest 1/1. + NOTE 2: as said above, you have to program the SN9C10X chip to get + rid of any blank pixels, so that the output of the sensor + matches the RGB bayer sequence (i.e. BGBGBG...GRGRGR). */ + + const struct device* dev; + /* This is the argument for dev_err(), dev_info() and dev_warn(). It + is used for debugging purposes. You must not access the struct + before the sensor is attached. */ + + const struct usb_device* usbdev; + /* Points to the usb_device struct after the sensor is attached. + Do not touch unless you know what you are doing. */ + + /* Do NOT write to the data below, it's READ ONLY. It is used by the + core module to store successfully updated values of the above + settings, for rollbacks..etc..in case of errors during atomic I/O */ + struct v4l2_queryctrl _qctrl[V4L2_CID_LASTP1-V4L2_CID_BASE]; + struct v4l2_rect _rect; +}; + +#endif /* _SN9C102_SENSOR_H_ */ diff --git a/drivers/usb/media/sn9c102_tas5110c1b.c b/drivers/usb/media/sn9c102_tas5110c1b.c new file mode 100644 index 000000000..d67470013 --- /dev/null +++ b/drivers/usb/media/sn9c102_tas5110c1b.c @@ -0,0 +1,98 @@ +/*************************************************************************** + * Driver for TAS5110C1B image sensor connected to the SN9C10[12] PC * + * Camera Controllers * + * * + * Copyright (C) 2004 by Luca Risolia * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program; if not, write to the Free Software * + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * + ***************************************************************************/ + +#include "sn9c102_sensor.h" + + +static struct sn9c102_sensor tas5110c1b; + + +static int tas5110c1b_init(struct sn9c102_device* cam) +{ + int err = 0; + + err += sn9c102_write_reg(cam, 0x01, 0x01); + err += sn9c102_write_reg(cam, 0x44, 0x01); + err += sn9c102_write_reg(cam, 0x00, 0x10); + err += sn9c102_write_reg(cam, 0x00, 0x11); + err += sn9c102_write_reg(cam, 0x00, 0x14); + err += sn9c102_write_reg(cam, 0x60, 0x17); + err += sn9c102_write_reg(cam, 0x06, 0x18); + err += sn9c102_write_reg(cam, 0xcb, 0x19); + + return err; +} + + +static int tas5110c1b_set_crop(struct sn9c102_device* cam, + const struct v4l2_rect* rect) +{ + struct sn9c102_sensor* s = &tas5110c1b; + int err = 0; + u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 69, + v_start = (u8)(rect->top - s->cropcap.bounds.top) + 9; + + err += sn9c102_write_reg(cam, h_start, 0x12); + err += sn9c102_write_reg(cam, v_start, 0x13); + + return err; +} + + +static struct sn9c102_sensor tas5110c1b = { + .name = "TAS5110C1B", + .maintainer = "Luca Risolia ", + .init = &tas5110c1b_init, + .cropcap = { + .bounds = { + .left = 0, + .top = 0, + .width = 352, + .height = 288, + }, + .defrect = { + .left = 0, + .top = 0, + .width = 352, + .height = 288, + }, + }, + .set_crop = &tas5110c1b_set_crop, + .pix_format = { + .width = 352, + .height = 288, + .pixelformat = V4L2_PIX_FMT_SBGGR8, + .priv = 8, + } +}; + + +int sn9c102_probe_tas5110c1b(struct sn9c102_device* cam) +{ + /* This sensor has no identifiers, so let's attach it anyway */ + sn9c102_attach_sensor(cam, &tas5110c1b); + + /* At the moment, only devices whose PID is 0x6005 have this sensor */ + if (tas5110c1b.usbdev->descriptor.idProduct != 0x6005) + return -ENODEV; + + return 0; +} diff --git a/drivers/usb/media/sn9c102_tas5130d1b.c b/drivers/usb/media/sn9c102_tas5130d1b.c new file mode 100644 index 000000000..5f1b0f9a6 --- /dev/null +++ b/drivers/usb/media/sn9c102_tas5130d1b.c @@ -0,0 +1,120 @@ +/*************************************************************************** + * Driver for TAS5130D1B image sensor connected to the SN9C10[12] PC * + * Camera Controllers * + * * + * Copyright (C) 2004 by Luca Risolia * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program; if not, write to the Free Software * + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * + ***************************************************************************/ + +#include "sn9c102_sensor.h" + + +static struct sn9c102_sensor tas5130d1b; + + +static int tas5130d1b_init(struct sn9c102_device* cam) +{ + int err = 0; + + err += sn9c102_write_reg(cam, 0x01, 0x01); + err += sn9c102_write_reg(cam, 0x20, 0x17); + err += sn9c102_write_reg(cam, 0x04, 0x01); + err += sn9c102_write_reg(cam, 0x01, 0x10); + err += sn9c102_write_reg(cam, 0x00, 0x11); + err += sn9c102_write_reg(cam, 0x00, 0x14); + err += sn9c102_write_reg(cam, 0x60, 0x17); + err += sn9c102_write_reg(cam, 0x07, 0x18); + err += sn9c102_write_reg(cam, 0x33, 0x19); + + err += sn9c102_i2c_try_raw_write(cam, &tas5130d1b, 4, 0x11, 0x00, 0x40, + 0x47, 0, 0); + err += sn9c102_i2c_try_raw_write(cam, &tas5130d1b, 4, 0x11, 0x02, 0x20, + 0xa9, 0, 0); + err += sn9c102_i2c_try_raw_write(cam, &tas5130d1b, 4, 0x11, 0x00, 0xc0, + 0x49, 0, 0); + err += sn9c102_i2c_try_raw_write(cam, &tas5130d1b, 4, 0x11, 0x02, 0x20, + 0x6c, 0, 0); + err += sn9c102_i2c_try_raw_write(cam, &tas5130d1b, 4, 0x11, 0x00, 0xc0, + 0x08, 0, 0); + err += sn9c102_i2c_try_raw_write(cam, &tas5130d1b, 4, 0x11, 0x00, 0x20, + 0x00, 0, 0); + + err += sn9c102_write_reg(cam, 0x63, 0x19); + + return err; +} + + +static int tas5130d1b_set_crop(struct sn9c102_device* cam, + const struct v4l2_rect* rect) +{ + struct sn9c102_sensor* s = &tas5130d1b; + int err = 0; + u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 104, + v_start = (u8)(rect->top - s->cropcap.bounds.top) + 12; + + err += sn9c102_write_reg(cam, h_start, 0x12); + err += sn9c102_write_reg(cam, v_start, 0x13); + + return err; +} + + +static struct sn9c102_sensor tas5130d1b = { + .name = "TAS5130D1B", + .maintainer = "Luca Risolia ", + .frequency = SN9C102_I2C_100KHZ, + .interface = SN9C102_I2C_3WIRES, + .init = &tas5130d1b_init, + .cropcap = { + .bounds = { + .left = 0, + .top = 0, + .width = 640, + .height = 480, + }, + .defrect = { + .left = 0, + .top = 0, + .width = 640, + .height = 480, + }, + }, + .set_crop = &tas5130d1b_set_crop, + .pix_format = { + .width = 640, + .height = 480, + .pixelformat = V4L2_PIX_FMT_SBGGR8, + .priv = 8, + } +}; + + +int sn9c102_probe_tas5130d1b(struct sn9c102_device* cam) +{ + /* This sensor has no identifiers, so let's attach it anyway */ + sn9c102_attach_sensor(cam, &tas5130d1b); + + /* At the moment, only devices whose PID is 0x6025 have this sensor */ + if (tas5130d1b.usbdev->descriptor.idProduct != 0x6025) + return -ENODEV; + + dev_info(tas5130d1b.dev, "TAS5130D1B detected, but the support for it " + "is disabled at the moment - needs further " + "testing -\n"); + + return -ENODEV; +} diff --git a/drivers/usb/media/w9968cf_vpp.h b/drivers/usb/media/w9968cf_vpp.h new file mode 100644 index 000000000..3f5317dc4 --- /dev/null +++ b/drivers/usb/media/w9968cf_vpp.h @@ -0,0 +1,43 @@ +/*************************************************************************** + * Interface for video post-processing functions for the W996[87]CF driver * + * for Linux. * + * * + * Copyright (C) 2002-2004 by Luca Risolia * + * * + * This program is free software; you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation; either version 2 of the License, or * + * (at your option) any later version. * + * * + * This program is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * + * GNU General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this program; if not, write to the Free Software * + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * + ***************************************************************************/ + +#ifndef _W9968CF_VPP_H_ +#define _W9968CF_VPP_H_ + +#include +#include + +struct w9968cf_vpp_t { + struct module* owner; + int (*check_headers)(const unsigned char*, const unsigned long); + int (*decode)(const char*, const unsigned long, const unsigned, + const unsigned, char*); + void (*swap_yuvbytes)(void*, unsigned long); + void (*uyvy_to_rgbx)(u8*, unsigned long, u8*, u16, u8); + void (*scale_up)(u8*, u8*, u16, u16, u16, u16, u16); + + u8 busy; /* read-only flag: module is/is not in use */ +}; + +extern int w9968cf_vppmod_register(struct w9968cf_vpp_t*); +extern int w9968cf_vppmod_deregister(struct w9968cf_vpp_t*); + +#endif /* _W9968CF_VPP_H_ */ diff --git a/drivers/usb/misc/phidgetservo.c b/drivers/usb/misc/phidgetservo.c new file mode 100644 index 000000000..9018774ae --- /dev/null +++ b/drivers/usb/misc/phidgetservo.c @@ -0,0 +1,327 @@ +/* + * USB PhidgetServo driver 1.0 + * + * Copyright (C) 2004 Sean Young + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This is a driver for the USB PhidgetServo version 2.0 and 3.0 servo + * controllers available at: http://www.phidgets.com/ + * + * Note that the driver takes input as: degrees.minutes + * -23 < degrees < 203 + * 0 < minutes < 59 + * + * CAUTION: Generally you should use 0 < degrees < 180 as anything else + * is probably beyond the range of your servo and may damage it. + */ + +#include +#ifdef CONFIG_USB_DEBUG +#define DEBUG 1 +#endif +#include +#include +#include +#include +#include +#include + +#define DRIVER_AUTHOR "Sean Young " +#define DRIVER_DESC "USB PhidgetServo Driver" + +#define VENDOR_ID_GLAB 0x06c2 +#define DEVICE_ID_4MOTOR_SERVO_30 0x0038 +#define DEVICE_ID_1MOTOR_SERVO_30 0x0039 + +#define VENDOR_ID_WISEGROUP 0x0925 +#define DEVICE_ID_1MOTOR_SERVO_20 0x8101 +#define DEVICE_ID_4MOTOR_SERVO_20 0x8104 + +static struct usb_device_id id_table[] = { + {USB_DEVICE(VENDOR_ID_GLAB, DEVICE_ID_4MOTOR_SERVO_30)}, + {USB_DEVICE(VENDOR_ID_GLAB, DEVICE_ID_1MOTOR_SERVO_30)}, + {USB_DEVICE(VENDOR_ID_WISEGROUP, DEVICE_ID_4MOTOR_SERVO_20)}, + {USB_DEVICE(VENDOR_ID_WISEGROUP, DEVICE_ID_1MOTOR_SERVO_20)}, + {} +}; + +MODULE_DEVICE_TABLE(usb, id_table); + +struct phidget_servo { + struct usb_device *udev; + int version; + int quad_servo; + int pulse[4]; + int degrees[4]; + int minutes[4]; +}; + +static void +change_position_v30(struct phidget_servo *servo, int servo_no, int degrees, + int minutes) +{ + int retval; + unsigned char *buffer; + + buffer = kmalloc(6, GFP_KERNEL); + if (!buffer) { + dev_err(&servo->udev->dev, "%s - out of memory\n", + __FUNCTION__); + return; + } + + /* + * pulse = 0 - 4095 + * angle = 0 - 180 degrees + * + * pulse = angle * 10.6 + 243.8 + */ + servo->pulse[servo_no] = ((degrees*60 + minutes)*106 + 2438*60)/600; + servo->degrees[servo_no]= degrees; + servo->minutes[servo_no]= minutes; + + /* + * The PhidgetServo v3.0 is controlled by sending 6 bytes, + * 4 * 12 bits for each servo. + * + * low = lower 8 bits pulse + * high = higher 4 bits pulse + * + * offset bits + * +---+-----------------+ + * | 0 | low 0 | + * +---+--------+--------+ + * | 1 | high 1 | high 0 | + * +---+--------+--------+ + * | 2 | low 1 | + * +---+-----------------+ + * | 3 | low 2 | + * +---+--------+--------+ + * | 4 | high 3 | high 2 | + * +---+--------+--------+ + * | 5 | low 3 | + * +---+-----------------+ + */ + + buffer[0] = servo->pulse[0] & 0xff; + buffer[1] = (servo->pulse[0] >> 8 & 0x0f) + | (servo->pulse[1] >> 4 & 0xf0); + buffer[2] = servo->pulse[1] & 0xff; + buffer[3] = servo->pulse[2] & 0xff; + buffer[4] = (servo->pulse[2] >> 8 & 0x0f) + | (servo->pulse[3] >> 4 & 0xf0); + buffer[5] = servo->pulse[3] & 0xff; + + dev_dbg(&servo->udev->dev, + "data: %02x %02x %02x %02x %02x %02x\n", + buffer[0], buffer[1], buffer[2], + buffer[3], buffer[4], buffer[5]); + + retval = usb_control_msg(servo->udev, + usb_sndctrlpipe(servo->udev, 0), + 0x09, 0x21, 0x0200, 0x0000, buffer, 6, 2 * HZ); + if (retval != 6) + dev_err(&servo->udev->dev, "retval = %d\n", retval); + kfree(buffer); +} + +static void +change_position_v20(struct phidget_servo *servo, int servo_no, int degrees, + int minutes) +{ + int retval; + unsigned char *buffer; + + buffer = kmalloc(2, GFP_KERNEL); + if (!buffer) { + dev_err(&servo->udev->dev, "%s - out of memory\n", + __FUNCTION__); + return; + } + + /* + * angle = 0 - 180 degrees + * pulse = angle + 23 + */ + servo->pulse[servo_no]= degrees + 23; + servo->degrees[servo_no]= degrees; + servo->minutes[servo_no]= 0; + + /* + * The PhidgetServo v2.0 is controlled by sending two bytes. The + * first byte is the servo number xor'ed with 2: + * + * servo 0 = 2 + * servo 1 = 3 + * servo 2 = 0 + * servo 3 = 1 + * + * The second byte is the position. + */ + + buffer[0] = servo_no ^ 2; + buffer[1] = servo->pulse[servo_no]; + + dev_dbg(&servo->udev->dev, "data: %02x %02x\n", buffer[0], buffer[1]); + + retval = usb_control_msg(servo->udev, + usb_sndctrlpipe(servo->udev, 0), + 0x09, 0x21, 0x0200, 0x0000, buffer, 2, 2 * HZ); + if (retval != 2) + dev_err(&servo->udev->dev, "retval = %d\n", retval); + kfree(buffer); +} + +#define show_set(value) \ +static ssize_t set_servo##value (struct device *dev, \ + const char *buf, size_t count) \ +{ \ + int degrees, minutes; \ + struct usb_interface *intf = to_usb_interface (dev); \ + struct phidget_servo *servo = usb_get_intfdata (intf); \ + \ + minutes = 0; \ + /* must at least convert degrees */ \ + if (sscanf (buf, "%d.%d", °rees, &minutes) < 1) { \ + return -EINVAL; \ + } \ + \ + if (degrees < -23 || degrees > (180 + 23) || \ + minutes < 0 || minutes > 59) { \ + return -EINVAL; \ + } \ + \ + if (servo->version >= 3) \ + change_position_v30 (servo, value, degrees, minutes); \ + else \ + change_position_v20 (servo, value, degrees, minutes); \ + \ + return count; \ +} \ + \ +static ssize_t show_servo##value (struct device *dev, char *buf) \ +{ \ + struct usb_interface *intf = to_usb_interface (dev); \ + struct phidget_servo *servo = usb_get_intfdata (intf); \ + \ + return sprintf (buf, "%d.%02d\n", servo->degrees[value], \ + servo->minutes[value]); \ +} \ +static DEVICE_ATTR(servo##value, S_IWUGO | S_IRUGO, \ + show_servo##value, set_servo##value); + +show_set(0); +show_set(1); +show_set(2); +show_set(3); + +static int +servo_probe(struct usb_interface *interface, const struct usb_device_id *id) +{ + struct usb_device *udev = interface_to_usbdev(interface); + struct phidget_servo *dev = NULL; + + dev = kmalloc(sizeof (struct phidget_servo), GFP_KERNEL); + if (dev == NULL) { + dev_err(&interface->dev, "%s - out of memory\n", __FUNCTION__); + return -ENOMEM; + } + memset(dev, 0x00, sizeof (*dev)); + + dev->udev = usb_get_dev(udev); + switch (udev->descriptor.idVendor) { + case VENDOR_ID_WISEGROUP: + dev->version = 2; + break; + case VENDOR_ID_GLAB: + dev->version = 3; + break; + } + switch (udev->descriptor.idProduct) { + case DEVICE_ID_4MOTOR_SERVO_20: + case DEVICE_ID_4MOTOR_SERVO_30: + dev->quad_servo = 1; + break; + case DEVICE_ID_1MOTOR_SERVO_20: + case DEVICE_ID_1MOTOR_SERVO_30: + dev->quad_servo = 0; + break; + } + + usb_set_intfdata(interface, dev); + + device_create_file(&interface->dev, &dev_attr_servo0); + if (dev->quad_servo) { + device_create_file(&interface->dev, &dev_attr_servo1); + device_create_file(&interface->dev, &dev_attr_servo2); + device_create_file(&interface->dev, &dev_attr_servo3); + } + + dev_info(&interface->dev, "USB %d-Motor PhidgetServo v%d.0 attached\n", + dev->quad_servo ? 4 : 1, dev->version); + if (dev->version == 2) + dev_info(&interface->dev, + "WARNING: v2.0 not tested! Please report if it works.\n"); + + return 0; +} + +static void +servo_disconnect(struct usb_interface *interface) +{ + struct phidget_servo *dev; + + dev = usb_get_intfdata(interface); + usb_set_intfdata(interface, NULL); + + device_remove_file(&interface->dev, &dev_attr_servo0); + if (dev->quad_servo) { + device_remove_file(&interface->dev, &dev_attr_servo1); + device_remove_file(&interface->dev, &dev_attr_servo2); + device_remove_file(&interface->dev, &dev_attr_servo3); + } + + usb_put_dev(dev->udev); + + kfree(dev); + + dev_info(&interface->dev, "USB %d-Motor PhidgetServo v%d.0 detached\n", + dev->quad_servo ? 4 : 1, dev->version); +} + +static struct usb_driver servo_driver = { + .owner = THIS_MODULE, + .name = "phidgetservo", + .probe = servo_probe, + .disconnect = servo_disconnect, + .id_table = id_table +}; + +static int __init +phidget_servo_init(void) +{ + int retval = 0; + + retval = usb_register(&servo_driver); + if (retval) + err("usb_register failed. Error number %d", retval); + + return retval; +} + +static void __exit +phidget_servo_exit(void) +{ + usb_deregister(&servo_driver); +} + +module_init(phidget_servo_init); +module_exit(phidget_servo_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/video/asiliantfb.c b/drivers/video/asiliantfb.c new file mode 100644 index 000000000..034ec2996 --- /dev/null +++ b/drivers/video/asiliantfb.c @@ -0,0 +1,620 @@ +/* + * drivers/video/asiliantfb.c + * frame buffer driver for Asiliant 69000 chip + * Copyright (C) 2001-2003 Saito.K & Jeanne + * + * from driver/video/chipsfb.c and, + * + * drivers/video/asiliantfb.c -- frame buffer device for + * Asiliant 69030 chip (formerly Intel, formerly Chips & Technologies) + * Author: apc@agelectronics.co.uk + * Copyright (C) 2000 AG Electronics + * Note: the data sheets don't seem to be available from Asiliant. + * They are available by searching developer.intel.com, but are not otherwise + * linked to. + * + * This driver should be portable with minimal effort to the 69000 display + * chip, and to the twin-display mode of the 69030. + * Contains code from Thomas Hhenleitner (thanks) + * + * Derived from the CT65550 driver chipsfb.c: + * Copyright (C) 1998 Paul Mackerras + * ...which was derived from the Powermac "chips" driver: + * Copyright (C) 1997 Fabio Riccardi. + * And from the frame buffer device for Open Firmware-initialized devices: + * Copyright (C) 1997 Geert Uytterhoeven. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Built in clock of the 69030 */ +const unsigned Fref = 14318180; + +#define mmio_base (p->screen_base + 0x400000) + +#define mm_write_ind(num, val, ap, dp) do { \ + writeb((num), mmio_base + (ap)); writeb((val), mmio_base + (dp)); \ +} while (0) + +static void mm_write_xr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x7ac, 0x7ad); +} +#define write_xr(num, val) mm_write_xr(p, num, val) + +static void mm_write_fr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x7a0, 0x7a1); +} +#define write_fr(num, val) mm_write_fr(p, num, val) + +static void mm_write_cr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x7a8, 0x7a9); +} +#define write_cr(num, val) mm_write_cr(p, num, val) + +static void mm_write_gr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x79c, 0x79d); +} +#define write_gr(num, val) mm_write_gr(p, num, val) + +static void mm_write_sr(struct fb_info *p, u8 reg, u8 data) +{ + mm_write_ind(reg, data, 0x788, 0x789); +} +#define write_sr(num, val) mm_write_sr(p, num, val) + +static void mm_write_ar(struct fb_info *p, u8 reg, u8 data) +{ + readb(mmio_base + 0x7b4); + mm_write_ind(reg, data, 0x780, 0x780); +} +#define write_ar(num, val) mm_write_ar(p, num, val) + +/* + * Exported functions + */ +int asiliantfb_init(void); + +static int asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *); +static int asiliantfb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); +static int asiliantfb_set_par(struct fb_info *info); +static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *info); + +static struct fb_ops asiliantfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = asiliantfb_check_var, + .fb_set_par = asiliantfb_set_par, + .fb_setcolreg = asiliantfb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_cursor = soft_cursor, +}; + +/* Calculate the ratios for the dot clocks without using a single long long + * value */ +static void asiliant_calc_dclk2(u32 *ppixclock, u8 *dclk2_m, u8 *dclk2_n, u8 *dclk2_div) +{ + unsigned pixclock = *ppixclock; + unsigned Ftarget = 1000000 * (1000000 / pixclock); + unsigned n; + unsigned best_error = 0xffffffff; + unsigned best_m = 0xffffffff, + best_n = 0xffffffff; + unsigned ratio; + unsigned remainder; + unsigned char divisor = 0; + + /* Calculate the frequency required. This is hard enough. */ + ratio = 1000000 / pixclock; + remainder = 1000000 % pixclock; + Ftarget = 1000000 * ratio + (1000000 * remainder) / pixclock; + + while (Ftarget < 100000000) { + divisor += 0x10; + Ftarget <<= 1; + } + + ratio = Ftarget / Fref; + remainder = Ftarget % Fref; + + /* This expresses the constraint that 150kHz <= Fref/n <= 5Mhz, + * together with 3 <= n <= 257. */ + for (n = 3; n <= 257; n++) { + unsigned m = n * ratio + (n * remainder) / Fref; + + /* 3 <= m <= 257 */ + if (m >= 3 && m <= 257) { + unsigned new_error = ((Ftarget * n) - (Fref * m)) >= 0 ? + ((Ftarget * n) - (Fref * m)) : ((Fref * m) - (Ftarget * n)); + if (new_error < best_error) { + best_n = n; + best_m = m; + best_error = new_error; + } + } + /* But if VLD = 4, then 4m <= 1028 */ + else if (m <= 1028) { + /* remember there are still only 8-bits of precision in m, so + * avoid over-optimistic error calculations */ + unsigned new_error = ((Ftarget * n) - (Fref * (m & ~3))) >= 0 ? + ((Ftarget * n) - (Fref * (m & ~3))) : ((Fref * (m & ~3)) - (Ftarget * n)); + if (new_error < best_error) { + best_n = n; + best_m = m; + best_error = new_error; + } + } + } + if (best_m > 257) + best_m >>= 2; /* divide m by 4, and leave VCO loop divide at 4 */ + else + divisor |= 4; /* or set VCO loop divide to 1 */ + *dclk2_m = best_m - 2; + *dclk2_n = best_n - 2; + *dclk2_div = divisor; + *ppixclock = pixclock; + return; +} + +static void asiliant_set_timing(struct fb_info *p) +{ + unsigned hd = p->var.xres / 8; + unsigned hs = (p->var.xres + p->var.right_margin) / 8; + unsigned he = (p->var.xres + p->var.right_margin + p->var.hsync_len) / 8; + unsigned ht = (p->var.left_margin + p->var.xres + p->var.right_margin + p->var.hsync_len) / 8; + unsigned vd = p->var.yres; + unsigned vs = p->var.yres + p->var.lower_margin; + unsigned ve = p->var.yres + p->var.lower_margin + p->var.vsync_len; + unsigned vt = p->var.upper_margin + p->var.yres + p->var.lower_margin + p->var.vsync_len; + unsigned wd = (p->var.xres_virtual * ((p->var.bits_per_pixel+7)/8)) / 8; + + if ((p->var.xres == 640) && (p->var.yres == 480) && (p->var.pixclock == 39722)) { + write_fr(0x01, 0x02); /* LCD */ + } else { + write_fr(0x01, 0x01); /* CRT */ + } + + write_cr(0x11, (ve - 1) & 0x0f); + write_cr(0x00, (ht - 5) & 0xff); + write_cr(0x01, hd - 1); + write_cr(0x02, hd); + write_cr(0x03, ((ht - 1) & 0x1f) | 0x80); + write_cr(0x04, hs); + write_cr(0x05, (((ht - 1) & 0x20) <<2) | (he & 0x1f)); + write_cr(0x3c, (ht - 1) & 0xc0); + write_cr(0x06, (vt - 2) & 0xff); + write_cr(0x30, (vt - 2) >> 8); + write_cr(0x07, 0x00); + write_cr(0x08, 0x00); + write_cr(0x09, 0x00); + write_cr(0x10, (vs - 1) & 0xff); + write_cr(0x32, ((vs - 1) >> 8) & 0xf); + write_cr(0x11, ((ve - 1) & 0x0f) | 0x80); + write_cr(0x12, (vd - 1) & 0xff); + write_cr(0x31, ((vd - 1) & 0xf00) >> 8); + write_cr(0x13, wd & 0xff); + write_cr(0x41, (wd & 0xf00) >> 8); + write_cr(0x15, (vs - 1) & 0xff); + write_cr(0x33, ((vs - 1) >> 8) & 0xf); + write_cr(0x38, ((ht - 5) & 0x100) >> 8); + write_cr(0x16, (vt - 1) & 0xff); + write_cr(0x18, 0x00); + + if (p->var.xres == 640) { + writeb(0xc7, mmio_base + 0x784); /* set misc output reg */ + } else { + writeb(0x07, mmio_base + 0x784); /* set misc output reg */ + } +} + +static int asiliantfb_check_var(struct fb_var_screeninfo *var, + struct fb_info *p) +{ + unsigned long Ftarget, ratio, remainder; + + ratio = 1000000 / var->pixclock; + remainder = 1000000 % var->pixclock; + Ftarget = 1000000 * ratio + (1000000 * remainder) / var->pixclock; + + /* First check the constraint that the maximum post-VCO divisor is 32, + * and the maximum Fvco is 220MHz */ + if (Ftarget > 220000000 || Ftarget < 3125000) { + printk(KERN_ERR "asiliantfb dotclock must be between 3.125 and 220MHz\n"); + return -ENXIO; + } + var->xres_virtual = var->xres; + var->yres_virtual = var->yres; + + if (var->bits_per_pixel == 24) { + var->red.offset = 16; + var->green.offset = 8; + var->blue.offset = 0; + var->red.length = var->blue.length = var->green.length = 8; + } else if (var->bits_per_pixel == 16) { + switch (var->red.offset) { + case 11: + var->green.length = 6; + break; + case 10: + var->green.length = 5; + break; + default: + return -EINVAL; + } + var->green.offset = 5; + var->blue.offset = 0; + var->red.length = var->blue.length = 5; + } else if (var->bits_per_pixel == 8) { + var->red.offset = var->green.offset = var->blue.offset = 0; + var->red.length = var->green.length = var->blue.length = 8; + } + return 0; +} + +static int asiliantfb_set_par(struct fb_info *p) +{ + u8 dclk2_m; /* Holds m-2 value for register */ + u8 dclk2_n; /* Holds n-2 value for register */ + u8 dclk2_div; /* Holds divisor bitmask */ + + /* Set pixclock */ + asiliant_calc_dclk2(&p->var.pixclock, &dclk2_m, &dclk2_n, &dclk2_div); + + /* Set color depth */ + if (p->var.bits_per_pixel == 24) { + write_xr(0x81, 0x16); /* 24 bit packed color mode */ + write_xr(0x82, 0x00); /* Disable palettes */ + write_xr(0x20, 0x20); /* 24 bit blitter mode */ + } else if (p->var.bits_per_pixel == 16) { + if (p->var.red.offset == 11) + write_xr(0x81, 0x15); /* 16 bit color mode */ + else + write_xr(0x81, 0x14); /* 15 bit color mode */ + write_xr(0x82, 0x00); /* Disable palettes */ + write_xr(0x20, 0x10); /* 16 bit blitter mode */ + } else if (p->var.bits_per_pixel == 8) { + write_xr(0x0a, 0x02); /* Linear */ + write_xr(0x81, 0x12); /* 8 bit color mode */ + write_xr(0x82, 0x00); /* Graphics gamma enable */ + write_xr(0x20, 0x00); /* 8 bit blitter mode */ + } + p->fix.line_length = p->var.xres * (p->var.bits_per_pixel >> 3); + p->fix.visual = (p->var.bits_per_pixel == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; + write_xr(0xc4, dclk2_m); + write_xr(0xc5, dclk2_n); + write_xr(0xc7, dclk2_div); + /* Set up the CR registers */ + asiliant_set_timing(p); + return 0; +} + +static int asiliantfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *p) +{ + if (regno > 255) + return 1; + red >>= 8; + green >>= 8; + blue >>= 8; + + /* Set hardware palete */ + writeb(regno, mmio_base + 0x790); + udelay(1); + writeb(red, mmio_base + 0x791); + writeb(green, mmio_base + 0x791); + writeb(blue, mmio_base + 0x791); + + switch(p->var.bits_per_pixel) { + case 15: + if (regno < 16) { + ((u32 *)(p->pseudo_palette))[regno] = + ((red & 0xf8) << 7) | + ((green & 0xf8) << 2) | + ((blue & 0xf8) >> 3); + } + break; + case 16: + if (regno < 16) { + ((u32 *)(p->pseudo_palette))[regno] = + ((red & 0xf8) << 8) | + ((green & 0xfc) << 3) | + ((blue & 0xf8) >> 3); + } + break; + case 24: + if (regno < 24) { + ((u32 *)(p->pseudo_palette))[regno] = + (red << 16) | + (green << 8) | + (blue); + } + break; + } + return 0; +} + +struct chips_init_reg { + unsigned char addr; + unsigned char data; +}; + +#define N_ELTS(x) (sizeof(x) / sizeof(x[0])) + +static struct chips_init_reg chips_init_sr[] = +{ + {0x00, 0x03}, /* Reset register */ + {0x01, 0x01}, /* Clocking mode */ + {0x02, 0x0f}, /* Plane mask */ + {0x04, 0x0e} /* Memory mode */ +}; + +static struct chips_init_reg chips_init_gr[] = +{ + {0x03, 0x00}, /* Data rotate */ + {0x05, 0x00}, /* Graphics mode */ + {0x06, 0x01}, /* Miscellaneous */ + {0x08, 0x00} /* Bit mask */ +}; + +static struct chips_init_reg chips_init_ar[] = +{ + {0x10, 0x01}, /* Mode control */ + {0x11, 0x00}, /* Overscan */ + {0x12, 0x0f}, /* Memory plane enable */ + {0x13, 0x00} /* Horizontal pixel panning */ +}; + +static struct chips_init_reg chips_init_cr[] = +{ + {0x0c, 0x00}, /* Start address high */ + {0x0d, 0x00}, /* Start address low */ + {0x40, 0x00}, /* Extended Start Address */ + {0x41, 0x00}, /* Extended Start Address */ + {0x14, 0x00}, /* Underline location */ + {0x17, 0xe3}, /* CRT mode control */ + {0x70, 0x00} /* Interlace control */ +}; + + +static struct chips_init_reg chips_init_fr[] = +{ + {0x01, 0x02}, + {0x03, 0x08}, + {0x08, 0xcc}, + {0x0a, 0x08}, + {0x18, 0x00}, + {0x1e, 0x80}, + {0x40, 0x83}, + {0x41, 0x00}, + {0x48, 0x13}, + {0x4d, 0x60}, + {0x4e, 0x0f}, + + {0x0b, 0x01}, + + {0x21, 0x51}, + {0x22, 0x1d}, + {0x23, 0x5f}, + {0x20, 0x4f}, + {0x34, 0x00}, + {0x24, 0x51}, + {0x25, 0x00}, + {0x27, 0x0b}, + {0x26, 0x00}, + {0x37, 0x80}, + {0x33, 0x0b}, + {0x35, 0x11}, + {0x36, 0x02}, + {0x31, 0xea}, + {0x32, 0x0c}, + {0x30, 0xdf}, + {0x10, 0x0c}, + {0x11, 0xe0}, + {0x12, 0x50}, + {0x13, 0x00}, + {0x16, 0x03}, + {0x17, 0xbd}, + {0x1a, 0x00}, +}; + + +static struct chips_init_reg chips_init_xr[] = +{ + {0xce, 0x00}, /* set default memory clock */ + {0xcc, 200 }, /* MCLK ratio M */ + {0xcd, 18 }, /* MCLK ratio N */ + {0xce, 0x90}, /* MCLK divisor = 2 */ + + {0xc4, 209 }, + {0xc5, 118 }, + {0xc7, 32 }, + {0xcf, 0x06}, + {0x09, 0x01}, /* IO Control - CRT controller extensions */ + {0x0a, 0x02}, /* Frame buffer mapping */ + {0x0b, 0x01}, /* PCI burst write */ + {0x40, 0x03}, /* Memory access control */ + {0x80, 0x82}, /* Pixel pipeline configuration 0 */ + {0x81, 0x12}, /* Pixel pipeline configuration 1 */ + {0x82, 0x08}, /* Pixel pipeline configuration 2 */ + + {0xd0, 0x0f}, + {0xd1, 0x01}, +}; + +static void __init chips_hw_init(struct fb_info *p) +{ + int i; + + for (i = 0; i < N_ELTS(chips_init_xr); ++i) + write_xr(chips_init_xr[i].addr, chips_init_xr[i].data); + write_xr(0x81, 0x12); + write_xr(0x82, 0x08); + write_xr(0x20, 0x00); + for (i = 0; i < N_ELTS(chips_init_sr); ++i) + write_sr(chips_init_sr[i].addr, chips_init_sr[i].data); + for (i = 0; i < N_ELTS(chips_init_gr); ++i) + write_gr(chips_init_gr[i].addr, chips_init_gr[i].data); + for (i = 0; i < N_ELTS(chips_init_ar); ++i) + write_ar(chips_init_ar[i].addr, chips_init_ar[i].data); + /* Enable video output in attribute index register */ + writeb(0x20, mmio_base + 0x780); + for (i = 0; i < N_ELTS(chips_init_cr); ++i) + write_cr(chips_init_cr[i].addr, chips_init_cr[i].data); + for (i = 0; i < N_ELTS(chips_init_fr); ++i) + write_fr(chips_init_fr[i].addr, chips_init_fr[i].data); +} + +static struct fb_fix_screeninfo asiliantfb_fix __initdata = { + .id = "Asiliant 69000", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_PSEUDOCOLOR, + .accel = FB_ACCEL_NONE, + .line_length = 640, + .smem_len = 0x200000, /* 2MB */ +}; + +static struct fb_var_screeninfo asiliantfb_var __initdata = { + .xres = 640, + .yres = 480, + .xres_virtual = 640, + .yres_virtual = 480, + .bits_per_pixel = 8, + .red = { .length = 8 }, + .green = { .length = 8 }, + .blue = { .length = 8 }, + .height = -1, + .width = -1, + .vmode = FB_VMODE_NONINTERLACED, + .pixclock = 39722, + .left_margin = 48, + .right_margin = 16, + .upper_margin = 33, + .lower_margin = 10, + .hsync_len = 96, + .vsync_len = 2, +}; + +static void __init init_asiliant(struct fb_info *p, unsigned long addr) +{ + p->fix = asiliantfb_fix; + p->fix.smem_start = addr; + p->var = asiliantfb_var; + p->fbops = &asiliantfb_ops; + p->flags = FBINFO_FLAG_DEFAULT; + + fb_alloc_cmap(&p->cmap, 256, 0); + + if (register_framebuffer(p) < 0) { + printk(KERN_ERR "C&T 69000 framebuffer failed to register\n"); + return; + } + + printk(KERN_INFO "fb%d: Asiliant 69000 frame buffer (%dK RAM detected)\n", + p->node, p->fix.smem_len / 1024); + + writeb(0xff, mmio_base + 0x78c); + chips_hw_init(p); +} + +static int __devinit +asiliantfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) +{ + unsigned long addr, size; + struct fb_info *p; + + if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) + return -ENODEV; + addr = pci_resource_start(dp, 0); + size = pci_resource_len(dp, 0); + if (addr == 0) + return -ENODEV; + if (!request_mem_region(addr, size, "asiliantfb")) + return -EBUSY; + + p = framebuffer_alloc(sizeof(u32) * 256, &dp->dev); + if (!p) { + release_mem_region(addr, size); + return -ENOMEM; + } + p->pseudo_palette = p->par; + p->par = NULL; + + p->screen_base = ioremap(addr, 0x800000); + if (p->screen_base == NULL) { + release_mem_region(addr, size); + framebuffer_release(p); + return -ENOMEM; + } + + pci_write_config_dword(dp, 4, 0x02800083); + writeb(3, addr + 0x400784); + + init_asiliant(p, addr); + + /* Clear the entire framebuffer */ + memset(p->screen_base, 0, 0x200000); + + pci_set_drvdata(dp, p); + return 0; +} + +static void __devexit asiliantfb_remove(struct pci_dev *dp) +{ + struct fb_info *p = pci_get_drvdata(dp); + + unregister_framebuffer(p); + iounmap(p->screen_base); + release_mem_region(pci_resource_start(dp, 0), pci_resource_len(dp, 0)); + pci_set_drvdata(dp, NULL); + framebuffer_release(p); +} + +static struct pci_device_id asiliantfb_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_CT, PCI_DEVICE_ID_CT_69000, PCI_ANY_ID, PCI_ANY_ID }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, asiliantfb_pci_tbl); + +static struct pci_driver asiliantfb_driver = { + .name = "asiliantfb", + .id_table = asiliantfb_pci_tbl, + .probe = asiliantfb_pci_init, + .remove = __devexit_p(asiliantfb_remove), +}; + +int __init asiliantfb_init(void) +{ + return pci_module_init(&asiliantfb_driver); +} + +static void __exit asiliantfb_exit(void) +{ + pci_unregister_driver(&asiliantfb_driver); +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c new file mode 100644 index 000000000..2afc4148b --- /dev/null +++ b/drivers/video/gbefb.c @@ -0,0 +1,1200 @@ +/* + * SGI GBE frame buffer driver + * + * Copyright (C) 1999 Silicon Graphics, Inc. - Jeffrey Newquist + * Copyright (C) 2002 Vivien Chappelier + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_X86 +#include +#endif +#ifdef CONFIG_MIPS +#include +#endif +#include +#include +#include + +#include