From 760483db2132f99fd32f6342e5751cbd30bc29b9 Mon Sep 17 00:00:00 2001 From: Planet-Lab Support Date: Wed, 15 Sep 2004 17:00:02 +0000 Subject: [PATCH] This commit was manufactured by cvs2svn to create branch 'ckrm'. --- Documentation/arm/Sharp-LH/SDRAM | 51 + Documentation/arm/VFP/release-notes.txt | 55 + Documentation/device-mapper/dm-io.txt | 75 + Documentation/device-mapper/kcopyd.txt | 47 + Documentation/device-mapper/linear.txt | 61 + Documentation/device-mapper/striped.txt | 58 + Documentation/device-mapper/zero.txt | 37 + Documentation/hpet.txt | 298 ++ Documentation/powerpc/mpc52xx.txt | 48 + arch/arm/boot/bootp/initrd.S | 6 + arch/arm/boot/bootp/kernel.S | 6 + arch/arm/boot/compressed/piggy.S | 6 + arch/arm/mach-integrator/clock.c | 138 + arch/arm/mach-integrator/clock.h | 25 + arch/arm/mach-sa1100/collie.c | 144 + arch/arm/mach-versatile/clock.c | 146 + arch/arm/mach-versatile/clock.h | 25 + arch/i386/lib/bitops.c | 70 + arch/mips/configs/ocelot_g_defconfig | 592 ++++ arch/mips/pci/fixup-jaguar.c | 42 + arch/mips/pci/fixup-ocelot-c.c | 39 + arch/mips/pci/fixup-ocelot-g.c | 35 + arch/mips/pci/fixup-tb0219.c | 64 + arch/mips/pci/ops-marvell.c | 93 + arch/mips/pci/ops-titan-ht.c | 125 + arch/mips/pci/ops-vr41xx.c | 126 + arch/mips/pmc-sierra/yosemite/dbg_io.c | 184 ++ arch/mips/pmc-sierra/yosemite/py-console.c | 130 + arch/ppc/configs/ads8272_defconfig | 583 ++++ arch/ppc/configs/lite5200_defconfig | 436 +++ arch/ppc/configs/rpx8260_defconfig | 556 ++++ arch/ppc/kernel/head_e500.S | 1329 ++++++++ arch/ppc/lib/rheap.c | 692 ++++ arch/ppc/platforms/85xx/Kconfig | 44 + arch/ppc/platforms/85xx/Makefile | 7 + arch/ppc/platforms/85xx/mpc8540_ads.c | 238 ++ arch/ppc/platforms/85xx/mpc8540_ads.h | 30 + arch/ppc/platforms/85xx/sbc8560.c | 249 ++ arch/ppc/platforms/85xx/sbc8560.h | 48 + arch/ppc/platforms/85xx/sbc85xx.c | 215 ++ arch/ppc/platforms/rpx8260.h | 74 + arch/ppc/syslib/cpm2_common.c | 205 ++ arch/ppc/syslib/m8260_pci_erratum9.c | 474 +++ arch/ppc/syslib/ppc85xx_setup.c | 341 ++ arch/ppc/syslib/ppc85xx_setup.h | 67 + arch/s390/mm/mmap.c | 83 + arch/sh/boards/renesas/systemh/Makefile | 13 + arch/sh/boards/se/7300/Makefile | 7 + arch/sh/cchips/voyagergx/Makefile | 8 + arch/sh/cchips/voyagergx/consistent.c | 126 + arch/sh/configs/rts7751r2d_defconfig | 809 +++++ arch/sh/kernel/cpu/adc.c | 36 + arch/sh64/boot/Makefile | 20 + arch/sh64/boot/compressed/Makefile | 46 + arch/sh64/boot/compressed/cache.c | 39 + arch/sh64/boot/compressed/head.S | 164 + arch/sh64/boot/compressed/install.sh | 56 + arch/sh64/configs/cayman_defconfig | 660 ++++ arch/sh64/defconfig | 668 ++++ arch/sh64/kernel/Makefile | 38 + arch/sh64/kernel/asm-offsets.c | 33 + arch/sh64/kernel/early_printk.c | 107 + arch/sh64/kernel/fpu.c | 170 + arch/sh64/kernel/init_task.c | 46 + arch/sh64/kernel/pci-dma.c | 50 + arch/sh64/kernel/pci_sh5.h | 107 + arch/sh64/kernel/semaphore.c | 140 + arch/sh64/kernel/switchto.S | 199 ++ arch/sh64/kernel/syscalls.S | 340 ++ arch/sh64/kernel/traps.c | 958 ++++++ arch/sh64/kernel/unwind.c | 326 ++ arch/sh64/lib/Makefile | 19 + arch/sh64/lib/copy_user_memcpy.S | 213 ++ arch/sh64/lib/page_clear.S | 51 + arch/sh64/lib/page_copy.S | 82 + arch/sh64/lib/panic.c | 58 + arch/sh64/mach-cayman/Makefile | 11 + arch/sh64/mach-harp/Makefile | 14 + arch/sh64/mach-romram/Makefile | 14 + arch/sh64/mach-sim/Makefile | 14 + arch/sh64/mm/Makefile | 44 + arch/sh64/mm/tlb.c | 166 + arch/sh64/mm/tlbmiss.c | 282 ++ arch/sh64/oprofile/Kconfig | 23 + arch/sh64/oprofile/Makefile | 12 + drivers/char/hpet.c | 1076 ++++++ drivers/char/lcd.h | 184 ++ drivers/firmware/pcdp.c | 213 ++ drivers/firmware/pcdp.h | 80 + drivers/md/dm-io.h | 77 + drivers/md/dm-raid1.c | 1278 ++++++++ drivers/md/kcopyd.c | 699 ++++ drivers/md/kcopyd.h | 42 + drivers/media/video/ovcamchip/ov6x20.c | 415 +++ drivers/media/video/ovcamchip/ov6x30.c | 374 +++ drivers/media/video/ovcamchip/ov76be.c | 303 ++ drivers/media/video/ovcamchip/ov7x10.c | 335 ++ drivers/media/video/ovcamchip/ov7x20.c | 455 +++ drivers/mtd/chips/cfi_util.c | 92 + drivers/mtd/devices/phram.c | 362 ++ drivers/mtd/maps/ichxrom.c | 407 +++ drivers/mtd/nand/diskonchip.c | 1237 +++++++ drivers/mtd/nand/nand_base.c | 2581 +++++++++++++++ drivers/mtd/nand/tx4925ndfmc.c | 442 +++ drivers/net/fec_8xx/Kconfig | 14 + drivers/net/fec_8xx/Makefile | 12 + drivers/net/fec_8xx/fec_8xx.h | 218 ++ drivers/net/gianfar.c | 1926 +++++++++++ drivers/net/gianfar.h | 537 +++ drivers/net/gianfar_ethtool.c | 484 +++ drivers/net/gianfar_phy.c | 622 ++++ drivers/net/gianfar_phy.h | 202 ++ drivers/net/via-velocity.c | 3277 +++++++++++++++++++ drivers/pcmcia/pd6729.c | 732 +++++ drivers/pcmcia/pd6729.h | 28 + drivers/s390/net/ctcdbug.c | 83 + drivers/s390/net/ctcdbug.h | 123 + drivers/scsi/3w-9xxx.c | 2153 ++++++++++++ drivers/scsi/fdomain.h | 24 + drivers/scsi/sata_nv.c | 355 ++ drivers/serial/cpm_uart/Makefile | 11 + drivers/serial/cpm_uart/cpm_uart_core.c | 1179 +++++++ drivers/serial/cpm_uart/cpm_uart_cpm1.c | 275 ++ drivers/serial/cpm_uart/cpm_uart_cpm1.h | 45 + drivers/serial/cpm_uart/cpm_uart_cpm2.c | 328 ++ drivers/serial/cpm_uart/cpm_uart_cpm2.h | 45 + drivers/usb/class/cdc-acm.h | 115 + drivers/w1/Kconfig | 31 + drivers/w1/matrox_w1.c | 246 ++ drivers/w1/w1.c | 623 ++++ drivers/w1/w1_int.c | 207 ++ drivers/w1/w1_int.h | 36 + drivers/w1/w1_io.c | 138 + drivers/w1/w1_log.h | 38 + fs/isofs/export.c | 228 ++ fs/jffs2/compr.h | 122 + fs/ntfs/collate.c | 121 + include/asm-alpha/setup.h | 6 + include/asm-arm/hardware/clock.h | 121 + include/asm-arm/vfp.h | 78 + include/asm-ia64/setup.h | 6 + include/asm-mips/gt64240.h | 1235 +++++++ include/asm-ppc/cpm2.h | 1041 ++++++ include/asm-ppc/immap_85xx.h | 126 + include/asm-ppc/immap_cpm2.h | 648 ++++ include/asm-ppc/m8260_pci.h | 186 ++ include/asm-ppc/mpc52xx_psc.h | 191 ++ include/asm-ppc/mpc85xx.h | 128 + include/asm-sh/adc.h | 12 + include/asm-sh/cpu-sh3/adc.h | 28 + include/asm-sh64/a.out.h | 37 + include/asm-sh64/atomic.h | 126 + include/asm-sh64/bugs.h | 38 + include/asm-sh64/cache.h | 141 + include/asm-sh64/cayman.h | 20 + include/asm-sh64/cpumask.h | 6 + include/asm-sh64/current.h | 28 + include/asm-sh64/delay.h | 11 + include/asm-sh64/div64.h | 6 + include/asm-sh64/dma.h | 41 + include/asm-sh64/elf.h | 101 + include/asm-sh64/errno.h | 6 + include/asm-sh64/fcntl.h | 7 + include/asm-sh64/hardware.h | 45 + include/asm-sh64/ioctl.h | 83 + include/asm-sh64/ioctls.h | 111 + include/asm-sh64/ipc.h | 6 + include/asm-sh64/ipcbuf.h | 40 + include/asm-sh64/kmap_types.h | 7 + include/asm-sh64/linkage.h | 7 + include/asm-sh64/local.h | 7 + include/asm-sh64/mc146818rtc.h | 7 + include/asm-sh64/mman.h | 6 + include/asm-sh64/mmu.h | 7 + include/asm-sh64/module.h | 12 + include/asm-sh64/msgbuf.h | 42 + include/asm-sh64/namei.h | 24 + include/asm-sh64/pci.h | 110 + include/asm-sh64/percpu.h | 6 + include/asm-sh64/posix_types.h | 131 + include/asm-sh64/registers.h | 106 + include/asm-sh64/resource.h | 6 + include/asm-sh64/scatterlist.h | 23 + include/asm-sh64/sections.h | 7 + include/asm-sh64/segment.h | 6 + include/asm-sh64/semaphore-helper.h | 101 + include/asm-sh64/semaphore.h | 146 + include/asm-sh64/sembuf.h | 36 + include/asm-sh64/shmbuf.h | 53 + include/asm-sh64/sigcontext.h | 30 + include/asm-sh64/siginfo.h | 6 + include/asm-sh64/smp.h | 15 + include/asm-sh64/socket.h | 6 + include/asm-sh64/sockios.h | 24 + include/asm-sh64/spinlock.h | 17 + include/asm-sh64/stat.h | 88 + include/asm-sh64/statfs.h | 6 + include/asm-sh64/string.h | 21 + include/asm-sh64/termbits.h | 6 + include/asm-sh64/termios.h | 117 + include/asm-sh64/thread_info.h | 82 + include/asm-sh64/tlb.h | 92 + include/asm-sh64/tlbflush.h | 31 + include/asm-sh64/topology.h | 6 + include/asm-sh64/types.h | 76 + include/asm-sh64/uaccess.h | 320 ++ include/asm-sh64/ucontext.h | 23 + include/asm-sh64/unaligned.h | 28 + include/asm-sparc64/cmt.h | 59 + include/asm-um/setup.h | 6 + include/asm-v850/setup.h | 6 + include/linux/crc-ccitt.h | 15 + include/linux/ds1286.h | 54 + include/linux/hpet.h | 133 + include/linux/mtd/physmap.h | 61 + include/linux/netfilter_ipv4/ipt_addrtype.h | 11 + include/linux/netfilter_ipv4/ipt_realm.h | 10 + include/mtd/inftl-user.h | 91 + include/mtd/jffs2-user.h | 35 + include/mtd/mtd-abi.h | 97 + include/mtd/nftl-user.h | 76 + net/bluetooth/hidp/Makefile | 7 + net/bluetooth/hidp/core.c | 649 ++++ net/bluetooth/hidp/hidp.h | 122 + net/core/stream.c | 41 + net/ipv4/xfrm4_output.c | 120 + net/ipv6/xfrm6_tunnel.c | 634 ++++ net/sched/sch_netem.c | 858 +++++ scripts/mod/Makefile | 16 + scripts/mod/empty.c | 1 + scripts/package/Makefile | 71 + scripts/package/builddeb | 79 + scripts/package/mkspec | 63 + 233 files changed, 49614 insertions(+) create mode 100644 Documentation/arm/Sharp-LH/SDRAM create mode 100644 Documentation/arm/VFP/release-notes.txt create mode 100644 Documentation/device-mapper/dm-io.txt create mode 100644 Documentation/device-mapper/kcopyd.txt create mode 100644 Documentation/device-mapper/linear.txt create mode 100644 Documentation/device-mapper/striped.txt create mode 100644 Documentation/device-mapper/zero.txt create mode 100644 Documentation/hpet.txt create mode 100644 Documentation/powerpc/mpc52xx.txt create mode 100644 arch/arm/boot/bootp/initrd.S create mode 100644 arch/arm/boot/bootp/kernel.S create mode 100644 arch/arm/boot/compressed/piggy.S create mode 100644 arch/arm/mach-integrator/clock.c create mode 100644 arch/arm/mach-integrator/clock.h create mode 100644 arch/arm/mach-sa1100/collie.c create mode 100644 arch/arm/mach-versatile/clock.c create mode 100644 arch/arm/mach-versatile/clock.h create mode 100644 arch/i386/lib/bitops.c create mode 100644 arch/mips/configs/ocelot_g_defconfig create mode 100644 arch/mips/pci/fixup-jaguar.c create mode 100644 arch/mips/pci/fixup-ocelot-c.c create mode 100644 arch/mips/pci/fixup-ocelot-g.c create mode 100644 arch/mips/pci/fixup-tb0219.c create mode 100644 arch/mips/pci/ops-marvell.c create mode 100644 arch/mips/pci/ops-titan-ht.c create mode 100644 arch/mips/pci/ops-vr41xx.c create mode 100644 arch/mips/pmc-sierra/yosemite/dbg_io.c create mode 100644 arch/mips/pmc-sierra/yosemite/py-console.c create mode 100644 arch/ppc/configs/ads8272_defconfig create mode 100644 arch/ppc/configs/lite5200_defconfig create mode 100644 arch/ppc/configs/rpx8260_defconfig create mode 100644 arch/ppc/kernel/head_e500.S create mode 100644 arch/ppc/lib/rheap.c create mode 100644 arch/ppc/platforms/85xx/Kconfig create mode 100644 arch/ppc/platforms/85xx/Makefile create mode 100644 arch/ppc/platforms/85xx/mpc8540_ads.c create mode 100644 arch/ppc/platforms/85xx/mpc8540_ads.h create mode 100644 arch/ppc/platforms/85xx/sbc8560.c create mode 100644 arch/ppc/platforms/85xx/sbc8560.h create mode 100644 arch/ppc/platforms/85xx/sbc85xx.c create mode 100644 arch/ppc/platforms/rpx8260.h create mode 100644 arch/ppc/syslib/cpm2_common.c create mode 100644 arch/ppc/syslib/m8260_pci_erratum9.c create mode 100644 arch/ppc/syslib/ppc85xx_setup.c create mode 100644 arch/ppc/syslib/ppc85xx_setup.h create mode 100644 arch/s390/mm/mmap.c create mode 100644 arch/sh/boards/renesas/systemh/Makefile create mode 100644 arch/sh/boards/se/7300/Makefile create mode 100644 arch/sh/cchips/voyagergx/Makefile create mode 100644 arch/sh/cchips/voyagergx/consistent.c create mode 100644 arch/sh/configs/rts7751r2d_defconfig create mode 100644 arch/sh/kernel/cpu/adc.c create mode 100644 arch/sh64/boot/Makefile create mode 100644 arch/sh64/boot/compressed/Makefile create mode 100644 arch/sh64/boot/compressed/cache.c create mode 100644 arch/sh64/boot/compressed/head.S create mode 100644 arch/sh64/boot/compressed/install.sh create mode 100644 arch/sh64/configs/cayman_defconfig create mode 100644 arch/sh64/defconfig create mode 100644 arch/sh64/kernel/Makefile create mode 100644 arch/sh64/kernel/asm-offsets.c create mode 100644 arch/sh64/kernel/early_printk.c create mode 100644 arch/sh64/kernel/fpu.c create mode 100644 arch/sh64/kernel/init_task.c create mode 100644 arch/sh64/kernel/pci-dma.c create mode 100644 arch/sh64/kernel/pci_sh5.h create mode 100644 arch/sh64/kernel/semaphore.c create mode 100644 arch/sh64/kernel/switchto.S create mode 100644 arch/sh64/kernel/syscalls.S create mode 100644 arch/sh64/kernel/traps.c create mode 100644 arch/sh64/kernel/unwind.c create mode 100644 arch/sh64/lib/Makefile create mode 100644 arch/sh64/lib/copy_user_memcpy.S create mode 100644 arch/sh64/lib/page_clear.S create mode 100644 arch/sh64/lib/page_copy.S create mode 100644 arch/sh64/lib/panic.c create mode 100644 arch/sh64/mach-cayman/Makefile create mode 100644 arch/sh64/mach-harp/Makefile create mode 100644 arch/sh64/mach-romram/Makefile create mode 100644 arch/sh64/mach-sim/Makefile create mode 100644 arch/sh64/mm/Makefile create mode 100644 arch/sh64/mm/tlb.c create mode 100644 arch/sh64/mm/tlbmiss.c create mode 100644 arch/sh64/oprofile/Kconfig create mode 100644 arch/sh64/oprofile/Makefile create mode 100644 drivers/char/hpet.c create mode 100644 drivers/char/lcd.h create mode 100644 drivers/firmware/pcdp.c create mode 100644 drivers/firmware/pcdp.h create mode 100644 drivers/md/dm-io.h create mode 100644 drivers/md/dm-raid1.c create mode 100644 drivers/md/kcopyd.c create mode 100644 drivers/md/kcopyd.h create mode 100644 drivers/media/video/ovcamchip/ov6x20.c create mode 100644 drivers/media/video/ovcamchip/ov6x30.c create mode 100644 drivers/media/video/ovcamchip/ov76be.c create mode 100644 drivers/media/video/ovcamchip/ov7x10.c create mode 100644 drivers/media/video/ovcamchip/ov7x20.c create mode 100644 drivers/mtd/chips/cfi_util.c create mode 100644 drivers/mtd/devices/phram.c create mode 100644 drivers/mtd/maps/ichxrom.c create mode 100644 drivers/mtd/nand/diskonchip.c create mode 100644 drivers/mtd/nand/nand_base.c create mode 100644 drivers/mtd/nand/tx4925ndfmc.c create mode 100644 drivers/net/fec_8xx/Kconfig create mode 100644 drivers/net/fec_8xx/Makefile create mode 100644 drivers/net/fec_8xx/fec_8xx.h create mode 100644 drivers/net/gianfar.c create mode 100644 drivers/net/gianfar.h create mode 100644 drivers/net/gianfar_ethtool.c create mode 100644 drivers/net/gianfar_phy.c create mode 100644 drivers/net/gianfar_phy.h create mode 100644 drivers/net/via-velocity.c create mode 100644 drivers/pcmcia/pd6729.c create mode 100644 drivers/pcmcia/pd6729.h create mode 100644 drivers/s390/net/ctcdbug.c create mode 100644 drivers/s390/net/ctcdbug.h create mode 100644 drivers/scsi/3w-9xxx.c create mode 100644 drivers/scsi/fdomain.h create mode 100644 drivers/scsi/sata_nv.c create mode 100644 drivers/serial/cpm_uart/Makefile create mode 100644 drivers/serial/cpm_uart/cpm_uart_core.c create mode 100644 drivers/serial/cpm_uart/cpm_uart_cpm1.c create mode 100644 drivers/serial/cpm_uart/cpm_uart_cpm1.h create mode 100644 drivers/serial/cpm_uart/cpm_uart_cpm2.c create mode 100644 drivers/serial/cpm_uart/cpm_uart_cpm2.h create mode 100644 drivers/usb/class/cdc-acm.h create mode 100644 drivers/w1/Kconfig create mode 100644 drivers/w1/matrox_w1.c create mode 100644 drivers/w1/w1.c create mode 100644 drivers/w1/w1_int.c create mode 100644 drivers/w1/w1_int.h create mode 100644 drivers/w1/w1_io.c create mode 100644 drivers/w1/w1_log.h create mode 100644 fs/isofs/export.c create mode 100644 fs/jffs2/compr.h create mode 100644 fs/ntfs/collate.c create mode 100644 include/asm-alpha/setup.h create mode 100644 include/asm-arm/hardware/clock.h create mode 100644 include/asm-arm/vfp.h create mode 100644 include/asm-ia64/setup.h create mode 100644 include/asm-mips/gt64240.h create mode 100644 include/asm-ppc/cpm2.h create mode 100644 include/asm-ppc/immap_85xx.h create mode 100644 include/asm-ppc/immap_cpm2.h create mode 100644 include/asm-ppc/m8260_pci.h create mode 100644 include/asm-ppc/mpc52xx_psc.h create mode 100644 include/asm-ppc/mpc85xx.h create mode 100644 include/asm-sh/adc.h create mode 100644 include/asm-sh/cpu-sh3/adc.h create mode 100644 include/asm-sh64/a.out.h create mode 100644 include/asm-sh64/atomic.h create mode 100644 include/asm-sh64/bugs.h create mode 100644 include/asm-sh64/cache.h create mode 100644 include/asm-sh64/cayman.h create mode 100644 include/asm-sh64/cpumask.h create mode 100644 include/asm-sh64/current.h create mode 100644 include/asm-sh64/delay.h create mode 100644 include/asm-sh64/div64.h create mode 100644 include/asm-sh64/dma.h create mode 100644 include/asm-sh64/elf.h create mode 100644 include/asm-sh64/errno.h create mode 100644 include/asm-sh64/fcntl.h create mode 100644 include/asm-sh64/hardware.h create mode 100644 include/asm-sh64/ioctl.h create mode 100644 include/asm-sh64/ioctls.h create mode 100644 include/asm-sh64/ipc.h create mode 100644 include/asm-sh64/ipcbuf.h create mode 100644 include/asm-sh64/kmap_types.h create mode 100644 include/asm-sh64/linkage.h create mode 100644 include/asm-sh64/local.h create mode 100644 include/asm-sh64/mc146818rtc.h create mode 100644 include/asm-sh64/mman.h create mode 100644 include/asm-sh64/mmu.h create mode 100644 include/asm-sh64/module.h create mode 100644 include/asm-sh64/msgbuf.h create mode 100644 include/asm-sh64/namei.h create mode 100644 include/asm-sh64/pci.h create mode 100644 include/asm-sh64/percpu.h create mode 100644 include/asm-sh64/posix_types.h create mode 100644 include/asm-sh64/registers.h create mode 100644 include/asm-sh64/resource.h create mode 100644 include/asm-sh64/scatterlist.h create mode 100644 include/asm-sh64/sections.h create mode 100644 include/asm-sh64/segment.h create mode 100644 include/asm-sh64/semaphore-helper.h create mode 100644 include/asm-sh64/semaphore.h create mode 100644 include/asm-sh64/sembuf.h create mode 100644 include/asm-sh64/shmbuf.h create mode 100644 include/asm-sh64/sigcontext.h create mode 100644 include/asm-sh64/siginfo.h create mode 100644 include/asm-sh64/smp.h create mode 100644 include/asm-sh64/socket.h create mode 100644 include/asm-sh64/sockios.h create mode 100644 include/asm-sh64/spinlock.h create mode 100644 include/asm-sh64/stat.h create mode 100644 include/asm-sh64/statfs.h create mode 100644 include/asm-sh64/string.h create mode 100644 include/asm-sh64/termbits.h create mode 100644 include/asm-sh64/termios.h create mode 100644 include/asm-sh64/thread_info.h create mode 100644 include/asm-sh64/tlb.h create mode 100644 include/asm-sh64/tlbflush.h create mode 100644 include/asm-sh64/topology.h create mode 100644 include/asm-sh64/types.h create mode 100644 include/asm-sh64/uaccess.h create mode 100644 include/asm-sh64/ucontext.h create mode 100644 include/asm-sh64/unaligned.h create mode 100644 include/asm-sparc64/cmt.h create mode 100644 include/asm-um/setup.h create mode 100644 include/asm-v850/setup.h create mode 100644 include/linux/crc-ccitt.h create mode 100644 include/linux/ds1286.h create mode 100644 include/linux/hpet.h create mode 100644 include/linux/mtd/physmap.h create mode 100644 include/linux/netfilter_ipv4/ipt_addrtype.h create mode 100644 include/linux/netfilter_ipv4/ipt_realm.h create mode 100644 include/mtd/inftl-user.h create mode 100644 include/mtd/jffs2-user.h create mode 100644 include/mtd/mtd-abi.h create mode 100644 include/mtd/nftl-user.h create mode 100644 net/bluetooth/hidp/Makefile create mode 100644 net/bluetooth/hidp/core.c create mode 100644 net/bluetooth/hidp/hidp.h create mode 100644 net/core/stream.c create mode 100644 net/ipv4/xfrm4_output.c create mode 100644 net/ipv6/xfrm6_tunnel.c create mode 100644 net/sched/sch_netem.c create mode 100644 scripts/mod/Makefile create mode 100644 scripts/mod/empty.c create mode 100644 scripts/package/Makefile create mode 100644 scripts/package/builddeb create mode 100644 scripts/package/mkspec diff --git a/Documentation/arm/Sharp-LH/SDRAM b/Documentation/arm/Sharp-LH/SDRAM new file mode 100644 index 000000000..93ddc23c2 --- /dev/null +++ b/Documentation/arm/Sharp-LH/SDRAM @@ -0,0 +1,51 @@ +README on the SDRAM Controller for the LH7a40X +============================================== + +The standard configuration for the SDRAM controller generates a sparse +memory array. The precise layout is determined by the SDRAM chips. A +default kernel configuration assembles the discontiguous memory +regions into separate memory nodes via the NUMA (Non-Uniform Memory +Architecture) facilities. In this default configuration, the kernel +is forgiving about the precise layout. As long as it is given an +accurate picture of available memory by the bootloader the kernel will +execute correctly. + +The SDRC supports a mode where some of the chip select lines are +swapped in order to make SDRAM look like a synchronous ROM. Setting +this bit means that the RAM will present as a contiguous array. Some +programmers prefer this to the discontiguous layout. Be aware that +may be a penalty for this feature where some some configurations of +memory are significantly reduced; i.e. 64MiB of RAM appears as only 32 +MiB. + +There are a couple of configuration options to override the default +behavior. When the SROMLL bit is set and memory appears as a +contiguous array, there is no reason to support NUMA. +CONFIG_LH7A40X_CONTIGMEM disables NUMA support. When physical memory +is discontiguous, the memory tables are organized such that there are +two banks per nodes with a small gap between them. This layout wastes +some kernel memory for page tables representing non-existent memory. +CONFIG_LH7A40X_ONE_BANK_PER_NODE optimizes the node tables such that +there are no gaps. These options control the low level organization +of the memory management tables in ways that may prevent the kernel +from booting or may cause the kernel to allocated excessively large +page tables. Be warned. Only change these options if you know what +you are doing. The default behavior is a reasonable compromise that +will suit all users. + +-- + +A typical 32MiB system with the default configuration options will +find physical memory managed as follows. + + node 0: 0xc0000000 4MiB + 0xc1000000 4MiB + node 1: 0xc4000000 4MiB + 0xc5000000 4MiB + node 2: 0xc8000000 4MiB + 0xc9000000 4MiB + node 3: 0xcc000000 4MiB + 0xcd000000 4MiB + +Setting CONFIG_LH7A40X_ONE_BANK_PER_NODE will put each bank into a +separate node. diff --git a/Documentation/arm/VFP/release-notes.txt b/Documentation/arm/VFP/release-notes.txt new file mode 100644 index 000000000..f28e0222f --- /dev/null +++ b/Documentation/arm/VFP/release-notes.txt @@ -0,0 +1,55 @@ +Release notes for Linux Kernel VFP support code +----------------------------------------------- + +Date: 20 May 2004 +Author: Russell King + +This is the first release of the Linux Kernel VFP support code. It +provides support for the exceptions bounced from VFP hardware found +on ARM926EJ-S. + +This release has been validated against the SoftFloat-2b library by +John R. Hauser using the TestFloat-2a test suite. Details of this +library and test suite can be found at: + + http://www.cs.berkeley.edu/~jhauser/arithmetic/SoftFloat.html + +The operations which have been tested with this package are: + + - fdiv + - fsub + - fadd + - fmul + - fcmp + - fcmpe + - fcvtd + - fcvts + - fsito + - ftosi + - fsqrt + +All the above pass softfloat tests with the following exceptions: + +- fadd/fsub shows some differences in the handling of +0 / -0 results + when input operands differ in signs. +- the handling of underflow exceptions is slightly different. If a + result underflows before rounding, but becomes a normalised number + after rounding, we do not signal an underflow exception. + +Other operations which have been tested by basic assembly-only tests +are: + + - fcpy + - fabs + - fneg + - ftoui + - ftosiz + - ftouiz + +The combination operations have not been tested: + + - fmac + - fnmac + - fmsc + - fnmsc + - fnmul diff --git a/Documentation/device-mapper/dm-io.txt b/Documentation/device-mapper/dm-io.txt new file mode 100644 index 000000000..3b5d9a52c --- /dev/null +++ b/Documentation/device-mapper/dm-io.txt @@ -0,0 +1,75 @@ +dm-io +===== + +Dm-io provides synchronous and asynchronous I/O services. There are three +types of I/O services available, and each type has a sync and an async +version. + +The user must set up an io_region structure to describe the desired location +of the I/O. Each io_region indicates a block-device along with the starting +sector and size of the region. + + struct io_region { + struct block_device *bdev; + sector_t sector; + sector_t count; + }; + +Dm-io can read from one io_region or write to one or more io_regions. Writes +to multiple regions are specified by an array of io_region structures. + +The first I/O service type takes a list of memory pages as the data buffer for +the I/O, along with an offset into the first page. + + struct page_list { + struct page_list *next; + struct page *page; + }; + + int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, + struct page_list *pl, unsigned int offset, + unsigned long *error_bits); + int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, + struct page_list *pl, unsigned int offset, + io_notify_fn fn, void *context); + +The second I/O service type takes an array of bio vectors as the data buffer +for the I/O. This service can be handy if the caller has a pre-assembled bio, +but wants to direct different portions of the bio to different devices. + + int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, + int rw, struct bio_vec *bvec, + unsigned long *error_bits); + int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, + int rw, struct bio_vec *bvec, + io_notify_fn fn, void *context); + +The third I/O service type takes a pointer to a vmalloc'd memory buffer as the +data buffer for the I/O. This service can be handy if the caller needs to do +I/O to a large region but doesn't want to allocate a large number of individual +memory pages. + + int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, + void *data, unsigned long *error_bits); + int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, + void *data, io_notify_fn fn, void *context); + +Callers of the asynchronous I/O services must include the name of a completion +callback routine and a pointer to some context data for the I/O. + + typedef void (*io_notify_fn)(unsigned long error, void *context); + +The "error" parameter in this callback, as well as the "*error" parameter in +all of the synchronous versions, is a bitset (instead of a simple error value). +In the case of an write-I/O to multiple regions, this bitset allows dm-io to +indicate success or failure on each individual region. + +Before using any of the dm-io services, the user should call dm_io_get() +and specify the number of pages they expect to perform I/O on concurrently. +Dm-io will attempt to resize its mempool to make sure enough pages are +always available in order to avoid unnecessary waiting while performing I/O. + +When the user is finished using the dm-io services, they should call +dm_io_put() and specify the same number of pages that were given on the +dm_io_get() call. + diff --git a/Documentation/device-mapper/kcopyd.txt b/Documentation/device-mapper/kcopyd.txt new file mode 100644 index 000000000..820382c4c --- /dev/null +++ b/Documentation/device-mapper/kcopyd.txt @@ -0,0 +1,47 @@ +kcopyd +====== + +Kcopyd provides the ability to copy a range of sectors from one block-device +to one or more other block-devices, with an asynchronous completion +notification. It is used by dm-snapshot and dm-mirror. + +Users of kcopyd must first create a client and indicate how many memory pages +to set aside for their copy jobs. This is done with a call to +kcopyd_client_create(). + + int kcopyd_client_create(unsigned int num_pages, + struct kcopyd_client **result); + +To start a copy job, the user must set up io_region structures to describe +the source and destinations of the copy. Each io_region indicates a +block-device along with the starting sector and size of the region. The source +of the copy is given as one io_region structure, and the destinations of the +copy are given as an array of io_region structures. + + struct io_region { + struct block_device *bdev; + sector_t sector; + sector_t count; + }; + +To start the copy, the user calls kcopyd_copy(), passing in the client +pointer, pointers to the source and destination io_regions, the name of a +completion callback routine, and a pointer to some context data for the copy. + + int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, + unsigned int num_dests, struct io_region *dests, + unsigned int flags, kcopyd_notify_fn fn, void *context); + + typedef void (*kcopyd_notify_fn)(int read_err, unsigned int write_err, + void *context); + +When the copy completes, kcopyd will call the user's completion routine, +passing back the user's context pointer. It will also indicate if a read or +write error occurred during the copy. + +When a user is done with all their copy jobs, they should call +kcopyd_client_destroy() to delete the kcopyd client, which will release the +associated memory pages. + + void kcopyd_client_destroy(struct kcopyd_client *kc); + diff --git a/Documentation/device-mapper/linear.txt b/Documentation/device-mapper/linear.txt new file mode 100644 index 000000000..d5307d380 --- /dev/null +++ b/Documentation/device-mapper/linear.txt @@ -0,0 +1,61 @@ +dm-linear +========= + +Device-Mapper's "linear" target maps a linear range of the Device-Mapper +device onto a linear range of another device. This is the basic building +block of logical volume managers. + +Parameters: + : Full pathname to the underlying block-device, or a + "major:minor" device-number. + : Starting sector within the device. + + +Example scripts +=============== +[[ +#!/bin/sh +# Create an identity mapping for a device +echo "0 `blockdev --getsize $1` linear $1 0" | dmsetup create identity +]] + + +[[ +#!/bin/sh +# Join 2 devices together +size1=`blockdev --getsize $1` +size2=`blockdev --getsize $2` +echo "0 $size1 linear $1 0 +$size1 $size2 linear $2 0" | dmsetup create joined +]] + + +[[ +#!/usr/bin/perl -w +# Split a device into 4M chunks and then join them together in reverse order. + +my $name = "reverse"; +my $extent_size = 4 * 1024 * 2; +my $dev = $ARGV[0]; +my $table = ""; +my $count = 0; + +if (!defined($dev)) { + die("Please specify a device.\n"); +} + +my $dev_size = `blockdev --getsize $dev`; +my $extents = int($dev_size / $extent_size) - + (($dev_size % $extent_size) ? 1 : 0); + +while ($extents > 0) { + my $this_start = $count * $extent_size; + $extents--; + $count++; + my $this_offset = $extents * $extent_size; + + $table .= "$this_start $extent_size linear $dev $this_offset\n"; +} + +`echo \"$table\" | dmsetup create $name`; +]] diff --git a/Documentation/device-mapper/striped.txt b/Documentation/device-mapper/striped.txt new file mode 100644 index 000000000..f34d3236b --- /dev/null +++ b/Documentation/device-mapper/striped.txt @@ -0,0 +1,58 @@ +dm-stripe +========= + +Device-Mapper's "striped" target is used to create a striped (i.e. RAID-0) +device across one or more underlying devices. Data is written in "chunks", +with consecutive chunks rotating among the underlying devices. This can +potentially provide improved I/O throughput by utilizing several physical +devices in parallel. + +Parameters: [ ]+ + : Number of underlying devices. + : Size of each chunk of data. Must be a power-of-2 and at + least as large as the system's PAGE_SIZE. + : Full pathname to the underlying block-device, or a + "major:minor" device-number. + : Starting sector within the device. + +One or more underlying devices can be specified. The striped device size must +be a multiple of the chunk size and a multiple of the number of underlying +devices. + + +Example scripts +=============== + +[[ +#!/usr/bin/perl -w +# Create a striped device across any number of underlying devices. The device +# will be called "stripe_dev" and have a chunk-size of 128k. + +my $chunk_size = 128 * 2; +my $dev_name = "stripe_dev"; +my $num_devs = @ARGV; +my @devs = @ARGV; +my ($min_dev_size, $stripe_dev_size, $i); + +if (!$num_devs) { + die("Specify at least one device\n"); +} + +$min_dev_size = `blockdev --getsize $devs[0]`; +for ($i = 1; $i < $num_devs; $i++) { + my $this_size = `blockdev --getsize $devs[$i]`; + $min_dev_size = ($min_dev_size < $this_size) ? + $min_dev_size : $this_size; +} + +$stripe_dev_size = $min_dev_size * $num_devs; +$stripe_dev_size -= $stripe_dev_size % ($chunk_size * $num_devs); + +$table = "0 $stripe_dev_size striped $num_devs $chunk_size"; +for ($i = 0; $i < $num_devs; $i++) { + $table .= " $devs[$i] 0"; +} + +`echo $table | dmsetup create $dev_name`; +]] + diff --git a/Documentation/device-mapper/zero.txt b/Documentation/device-mapper/zero.txt new file mode 100644 index 000000000..20fb38e7f --- /dev/null +++ b/Documentation/device-mapper/zero.txt @@ -0,0 +1,37 @@ +dm-zero +======= + +Device-Mapper's "zero" target provides a block-device that always returns +zero'd data on reads and silently drops writes. This is similar behavior to +/dev/zero, but as a block-device instead of a character-device. + +Dm-zero has no target-specific parameters. + +One very interesting use of dm-zero is for creating "sparse" devices in +conjunction with dm-snapshot. A sparse device reports a device-size larger +than the amount of actual storage space available for that device. A user can +write data anywhere within the sparse device and read it back like a normal +device. Reads to previously unwritten areas will return a zero'd buffer. When +enough data has been written to fill up the actual storage space, the sparse +device is deactivated. This can be very useful for testing device and +filesystem limitations. + +To create a sparse device, start by creating a dm-zero device that's the +desired size of the sparse device. For this example, we'll assume a 10TB +sparse device. + +TEN_TERABYTES=`expr 10 \* 1024 \* 1024 \* 1024 \* 2` # 10 TB in sectors +echo "0 $TEN_TERABYTES zero" | dmsetup create zero1 + +Then create a snapshot of the zero device, using any available block-device as +the COW device. The size of the COW device will determine the amount of real +space available to the sparse device. For this example, we'll assume /dev/sdb1 +is an available 10GB partition. + +echo "0 $TEN_TERABYTES snapshot /dev/mapper/zero1 /dev/sdb1 p 128" | \ + dmsetup create sparse1 + +This will create a 10TB sparse device called /dev/mapper/sparse1 that has +10GB of actual storage space available. If more than 10GB of data is written +to this device, it will start returning I/O errors. + diff --git a/Documentation/hpet.txt b/Documentation/hpet.txt new file mode 100644 index 000000000..584ebc277 --- /dev/null +++ b/Documentation/hpet.txt @@ -0,0 +1,298 @@ + High Precision Event Timer Driver for Linux + +The High Precision Event Timer (HPET) hardware is the future replacement for the 8254 and Real +Time Clock (RTC) periodic timer functionality. Each HPET can have up two 32 timers. It is possible +to configure the first two timers as legacy replacements for 8254 and RTC periodic. A specification +done by INTEL and Microsoft can be found at http://www.intel.com/labs/platcomp/hpet/hpetspec.htm. + +The driver supports detection of HPET driver allocation and initialization of the HPET before the +driver module_init routine is called. This enables platform code which uses timer 0 or 1 as the +main timer to intercept HPET initialization. An example of this initialization can be found in +arch/i386/kernel/time_hpet.c. + +The driver provides two APIs which are very similar to the API found in the rtc.c driver. +There is a user space API and a kernel space API. An example user space program is provided +below. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +extern void hpet_open_close(int, const char **); +extern void hpet_info(int, const char **); +extern void hpet_poll(int, const char **); +extern void hpet_fasync(int, const char **); +extern void hpet_read(int, const char **); + +#include +#include +#include + +struct hpet_command { + char *command; + void (*func)(int argc, const char ** argv); +} hpet_command[] = { + { + "open-close", + hpet_open_close + }, + { + "info", + hpet_info + }, + { + "poll", + hpet_poll + }, + { + "fasync", + hpet_fasync + }, +}; + +int +main(int argc, const char ** argv) +{ + int i; + + argc--; + argv++; + + if (!argc) { + fprintf(stderr, "-hpet: requires command\n"); + return -1; + } + + + for (i = 0; i < (sizeof (hpet_command) / sizeof (hpet_command[0])); i++) + if (!strcmp(argv[0], hpet_command[i].command)) { + argc--; + argv++; + fprintf(stderr, "-hpet: executing %s\n", + hpet_command[i].command); + hpet_command[i].func(argc, argv); + return 0; + } + + fprintf(stderr, "do_hpet: command %s not implemented\n", argv[0]); + + return -1; +} + +void +hpet_open_close(int argc, const char **argv) +{ + int fd; + + if (argc != 1) { + fprintf(stderr, "hpet_open_close: device-name\n"); + return; + } + + fd = open(argv[0], O_RDWR); + if (fd < 0) + fprintf(stderr, "hpet_open_close: open failed\n"); + else + close(fd); + + return; +} + +void +hpet_info(int argc, const char **argv) +{ +} + +void +hpet_poll(int argc, const char **argv) +{ + unsigned long freq; + int iterations, i, fd; + struct pollfd pfd; + struct hpet_info info; + struct timeval stv, etv; + struct timezone tz; + long usec; + + if (argc != 3) { + fprintf(stderr, "hpet_poll: device-name freq iterations\n"); + return; + } + + freq = atoi(argv[1]); + iterations = atoi(argv[2]); + + fd = open(argv[0], O_RDWR); + + if (fd < 0) { + fprintf(stderr, "hpet_poll: open of %s failed\n", argv[0]); + return; + } + + if (ioctl(fd, HPET_IRQFREQ, freq) < 0) { + fprintf(stderr, "hpet_poll: HPET_IRQFREQ failed\n"); + goto out; + } + + if (ioctl(fd, HPET_INFO, &info) < 0) { + fprintf(stderr, "hpet_poll: failed to get info\n"); + goto out; + } + + fprintf(stderr, "hpet_poll: info.hi_flags 0x%lx\n", info.hi_flags); + + if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) { + fprintf(stderr, "hpet_poll: HPET_EPI failed\n"); + goto out; + } + + if (ioctl(fd, HPET_IE_ON, 0) < 0) { + fprintf(stderr, "hpet_poll, HPET_IE_ON failed\n"); + goto out; + } + + pfd.fd = fd; + pfd.events = POLLIN; + + for (i = 0; i < iterations; i++) { + pfd.revents = 0; + gettimeofday(&stv, &tz); + if (poll(&pfd, 1, -1) < 0) + fprintf(stderr, "hpet_poll: poll failed\n"); + else { + long data; + + gettimeofday(&etv, &tz); + usec = stv.tv_sec * 1000000 + stv.tv_usec; + usec = (etv.tv_sec * 1000000 + etv.tv_usec) - usec; + + fprintf(stderr, + "hpet_poll: expired time = 0x%lx\n", usec); + + fprintf(stderr, "hpet_poll: revents = 0x%x\n", + pfd.revents); + + if (read(fd, &data, sizeof(data)) != sizeof(data)) { + fprintf(stderr, "hpet_poll: read failed\n"); + } + else + fprintf(stderr, "hpet_poll: data 0x%lx\n", + data); + } + } + +out: + close(fd); + return; +} + +static int hpet_sigio_count; + +static void +hpet_sigio(int val) +{ + fprintf(stderr, "hpet_sigio: called\n"); + hpet_sigio_count++; +} + +void +hpet_fasync(int argc, const char **argv) +{ + unsigned long freq; + int iterations, i, fd, value; + sig_t oldsig; + struct hpet_info info; + + hpet_sigio_count = 0; + fd = -1; + + if ((oldsig = signal(SIGIO, hpet_sigio)) == SIG_ERR) { + fprintf(stderr, "hpet_fasync: failed to set signal handler\n"); + return; + } + + if (argc != 3) { + fprintf(stderr, "hpet_fasync: device-name freq iterations\n"); + goto out; + } + + fd = open(argv[0], O_RDWR); + + if (fd < 0) { + fprintf(stderr, "hpet_fasync: failed to open %s\n", argv[0]); + return; + } + + + if ((fcntl(fd, F_SETOWN, getpid()) == 1) || + ((value = fcntl(fd, F_GETFL)) == 1) || + (fcntl(fd, F_SETFL, value | O_ASYNC) == 1)) { + fprintf(stderr, "hpet_fasync: fcntl failed\n"); + goto out; + } + + freq = atoi(argv[1]); + iterations = atoi(argv[2]); + + if (ioctl(fd, HPET_IRQFREQ, freq) < 0) { + fprintf(stderr, "hpet_fasync: HPET_IRQFREQ failed\n"); + goto out; + } + + if (ioctl(fd, HPET_INFO, &info) < 0) { + fprintf(stderr, "hpet_fasync: failed to get info\n"); + goto out; + } + + fprintf(stderr, "hpet_fasync: info.hi_flags 0x%lx\n", info.hi_flags); + + if (info.hi_flags && (ioctl(fd, HPET_EPI, 0) < 0)) { + fprintf(stderr, "hpet_fasync: HPET_EPI failed\n"); + goto out; + } + + if (ioctl(fd, HPET_IE_ON, 0) < 0) { + fprintf(stderr, "hpet_fasync, HPET_IE_ON failed\n"); + goto out; + } + + for (i = 0; i < iterations; i++) { + (void) pause(); + fprintf(stderr, "hpet_fasync: count = %d\n", hpet_sigio_count); + } + +out: + signal(SIGIO, oldsig); + + if (fd >= 0) + close(fd); + + return; +} + +The kernel API has three interfaces exported from the driver: + + hpet_register(struct hpet_task *tp, int periodic) + hpet_unregister(struct hpet_task *tp) + hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg) + +The kernel module using this interface fills in the ht_func and ht_data members of the +hpet_task structure before calling hpet_register. hpet_control simply vectors to the hpet_ioctl +routine and has the same commands and respective arguments as the user API. hpet_unregister +is used to terminate usage of the HPET timer reserved by hpet_register. + + diff --git a/Documentation/powerpc/mpc52xx.txt b/Documentation/powerpc/mpc52xx.txt new file mode 100644 index 000000000..6efe0a0a5 --- /dev/null +++ b/Documentation/powerpc/mpc52xx.txt @@ -0,0 +1,48 @@ +Linux 2.6.x on MPC52xx family +----------------------------- + +For the latest info, go to http://www.246tNt.com/mpc52xx/state.txt + +To compile/use : + + - U-Boot: + # tftpboot 200000 uImage + => tftpboot 400000 pRamdisk + => bootm 200000 400000 + + - DBug: + # dn -i zImage.initrd.lite5200 + + +Some remarks : + - The port is named mpc52xxx, and config options are PPC_MPC52xx. The MGT5100 + is not supported, and I'm not sure anyone is interesting in working on it + so. I didn't took 5xxx because there's apparently a lot of 5xxx that have + nothing to do with the MPC5200. I also included the 'MPC' for the same + reason. + - Of course, I inspired myself from the 2.4 port. If you think I forgot to + mention you/your company in the copyright of some code, I'll correct it + ASAP. + - The codes wants the MBAR to be set at 0xf0000000 by the bootloader. It's + mapped 1:1 with the MMU. If for whatever reason, you want to change this, + beware that some code depends on the 0xf0000000 address and other depends + on the 1:1 mapping. + - Most of the code assumes that port multiplexing, frequency selection, ... + has already been done. IMHO this should be done as early as possible, in + the bootloader. If for whatever reason you can't do it there, do it in the + platform setup code (if U-Boot) or in the arch/ppc/boot/simple/... (if + DBug) diff --git a/arch/arm/boot/bootp/initrd.S b/arch/arm/boot/bootp/initrd.S new file mode 100644 index 000000000..d81ea1837 --- /dev/null +++ b/arch/arm/boot/bootp/initrd.S @@ -0,0 +1,6 @@ + .type initrd_start,#object + .globl initrd_start +initrd_start: + .incbin INITRD + .globl initrd_end +initrd_end: diff --git a/arch/arm/boot/bootp/kernel.S b/arch/arm/boot/bootp/kernel.S new file mode 100644 index 000000000..b87a25c7e --- /dev/null +++ b/arch/arm/boot/bootp/kernel.S @@ -0,0 +1,6 @@ + .globl kernel_start +kernel_start: + .incbin "arch/arm/boot/zImage" + .globl kernel_end +kernel_end: + .align 2 diff --git a/arch/arm/boot/compressed/piggy.S b/arch/arm/boot/compressed/piggy.S new file mode 100644 index 000000000..54c951800 --- /dev/null +++ b/arch/arm/boot/compressed/piggy.S @@ -0,0 +1,6 @@ + .section .piggydata,#alloc + .globl input_data +input_data: + .incbin "arch/arm/boot/compressed/piggy.gz" + .globl input_data_end +input_data_end: diff --git a/arch/arm/mach-integrator/clock.c b/arch/arm/mach-integrator/clock.c new file mode 100644 index 000000000..6af3715ad --- /dev/null +++ b/arch/arm/mach-integrator/clock.c @@ -0,0 +1,138 @@ +/* + * linux/arch/arm/mach-integrator/clock.c + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "clock.h" + +static LIST_HEAD(clocks); +static DECLARE_MUTEX(clocks_sem); + +struct clk *clk_get(struct device *dev, const char *id) +{ + struct clk *p, *clk = ERR_PTR(-ENOENT); + + down(&clocks_sem); + list_for_each_entry(p, &clocks, node) { + if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { + clk = p; + break; + } + } + up(&clocks_sem); + + return clk; +} +EXPORT_SYMBOL(clk_get); + +void clk_put(struct clk *clk) +{ + module_put(clk->owner); +} +EXPORT_SYMBOL(clk_put); + +int clk_enable(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_disable); + +int clk_use(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_use); + +void clk_unuse(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_unuse); + +unsigned long clk_get_rate(struct clk *clk) +{ + return clk->rate; +} +EXPORT_SYMBOL(clk_get_rate); + +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + return rate; +} +EXPORT_SYMBOL(clk_round_rate); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + int ret = -EIO; + if (clk->setvco) { + struct icst525_vco vco; + + vco = icst525_khz_to_vco(clk->params, rate); + clk->rate = icst525_khz(clk->params, vco); + + printk("Clock %s: setting VCO reg params: S=%d R=%d V=%d\n", + clk->name, vco.s, vco.r, vco.v); + + clk->setvco(clk, vco); + ret = 0; + } + return 0; +} +EXPORT_SYMBOL(clk_set_rate); + +/* + * These are fixed clocks. + */ +static struct clk kmi_clk = { + .name = "KMIREFCLK", + .rate = 24000000, +}; + +static struct clk uart_clk = { + .name = "UARTCLK", + .rate = 14745600, +}; + +int clk_register(struct clk *clk) +{ + down(&clocks_sem); + list_add(&clk->node, &clocks); + up(&clocks_sem); + return 0; +} +EXPORT_SYMBOL(clk_register); + +void clk_unregister(struct clk *clk) +{ + down(&clocks_sem); + list_del(&clk->node); + up(&clocks_sem); +} +EXPORT_SYMBOL(clk_unregister); + +static int __init clk_init(void) +{ + clk_register(&kmi_clk); + clk_register(&uart_clk); + return 0; +} +arch_initcall(clk_init); diff --git a/arch/arm/mach-integrator/clock.h b/arch/arm/mach-integrator/clock.h new file mode 100644 index 000000000..09e6328ce --- /dev/null +++ b/arch/arm/mach-integrator/clock.h @@ -0,0 +1,25 @@ +/* + * linux/arch/arm/mach-integrator/clock.h + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +struct module; +struct icst525_params; + +struct clk { + struct list_head node; + unsigned long rate; + struct module *owner; + const char *name; + const struct icst525_params *params; + void *data; + void (*setvco)(struct clk *, struct icst525_vco vco); +}; + +int clk_register(struct clk *clk); +void clk_unregister(struct clk *clk); diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c new file mode 100644 index 000000000..dcbb3c751 --- /dev/null +++ b/arch/arm/mach-sa1100/collie.c @@ -0,0 +1,144 @@ +/* + * linux/arch/arm/mach-sa1100/collie.c + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * This file contains all Collie-specific tweaks. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ChangeLog: + * 03-06-2004 John Lenz + * 06-04-2002 Chris Larson + * 04-16-2001 Lineo Japan,Inc. ... + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +#include "generic.h" + +static void __init scoop_init(void) +{ + +#define COLLIE_SCP_INIT_DATA(adr,dat) (((adr)<<16)|(dat)) +#define COLLIE_SCP_INIT_DATA_END ((unsigned long)-1) + static const unsigned long scp_init[] = { + COLLIE_SCP_INIT_DATA(COLLIE_SCP_MCR, 0x0140), // 00 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_MCR, 0x0100), + COLLIE_SCP_INIT_DATA(COLLIE_SCP_CDR, 0x0000), // 04 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_CPR, 0x0000), // 0C + COLLIE_SCP_INIT_DATA(COLLIE_SCP_CCR, 0x0000), // 10 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_IMR, 0x0000), // 18 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_IRM, 0x00FF), // 14 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_ISR, 0x0000), // 1C + COLLIE_SCP_INIT_DATA(COLLIE_SCP_IRM, 0x0000), + COLLIE_SCP_INIT_DATA(COLLIE_SCP_GPCR, COLLIE_SCP_IO_DIR), // 20 + COLLIE_SCP_INIT_DATA(COLLIE_SCP_GPWR, COLLIE_SCP_IO_OUT), // 24 + COLLIE_SCP_INIT_DATA_END + }; + int i; + for (i = 0; scp_init[i] != COLLIE_SCP_INIT_DATA_END; i++) { + int adr = scp_init[i] >> 16; + COLLIE_SCP_REG(adr) = scp_init[i] & 0xFFFF; + } + +} + +static struct resource locomo_resources[] = { + [0] = { + .start = 0x40000000, + .end = 0x40001fff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = IRQ_GPIO25, + .end = IRQ_GPIO25, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device locomo_device = { + .name = "locomo", + .id = 0, + .num_resources = ARRAY_SIZE(locomo_resources), + .resource = locomo_resources, +}; + +static struct platform_device *devices[] __initdata = { + &locomo_device, +}; + +static void __init collie_init(void) +{ + int ret = 0; + + /* cpu initialize */ + GAFR = ( GPIO_SSP_TXD | \ + GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SSP_CLK | GPIO_TIC_ACK | \ + GPIO_32_768kHz ); + + GPDR = ( GPIO_LDD8 | GPIO_LDD9 | GPIO_LDD10 | GPIO_LDD11 | GPIO_LDD12 | \ + GPIO_LDD13 | GPIO_LDD14 | GPIO_LDD15 | GPIO_SSP_TXD | \ + GPIO_SSP_SCLK | GPIO_SSP_SFRM | GPIO_SDLC_SCLK | \ + GPIO_SDLC_AAF | GPIO_UART_SCLK1 | GPIO_32_768kHz ); + GPLR = GPIO_GPIO18; + + // PPC pin setting + PPDR = ( PPC_LDD0 | PPC_LDD1 | PPC_LDD2 | PPC_LDD3 | PPC_LDD4 | PPC_LDD5 | \ + PPC_LDD6 | PPC_LDD7 | PPC_L_PCLK | PPC_L_LCLK | PPC_L_FCLK | PPC_L_BIAS | \ + PPC_TXD1 | PPC_TXD2 | PPC_RXD2 | PPC_TXD3 | PPC_TXD4 | PPC_SCLK | PPC_SFRM ); + + PSDR = ( PPC_RXD1 | PPC_RXD2 | PPC_RXD3 | PPC_RXD4 ); + + GAFR |= GPIO_32_768kHz; + GPDR |= GPIO_32_768kHz; + TUCR = TUCR_32_768kHz; + + scoop_init(); + + ret = platform_add_devices(devices, ARRAY_SIZE(devices)); + if (ret) { + printk(KERN_WARNING "collie: Unable to register LoCoMo device\n"); + } +} + +static struct map_desc collie_io_desc[] __initdata = { + /* virtual physical length type */ + {0xe8000000, 0x00000000, 0x02000000, MT_DEVICE}, /* 32M main flash (cs0) */ + {0xea000000, 0x08000000, 0x02000000, MT_DEVICE}, /* 32M boot flash (cs1) */ + {0xf0000000, 0x40000000, 0x01000000, MT_DEVICE}, /* 16M LOCOMO & SCOOP (cs4) */ +}; + +static void __init collie_map_io(void) +{ + sa1100_map_io(); + iotable_init(collie_io_desc, ARRAY_SIZE(collie_io_desc)); +} + +MACHINE_START(COLLIE, "Sharp-Collie") + BOOT_MEM(0xc0000000, 0x80000000, 0xf8000000) + MAPIO(collie_map_io) + INITIRQ(sa1100_init_irq) + INIT_MACHINE(collie_init) +MACHINE_END diff --git a/arch/arm/mach-versatile/clock.c b/arch/arm/mach-versatile/clock.c new file mode 100644 index 000000000..c1f91fad8 --- /dev/null +++ b/arch/arm/mach-versatile/clock.c @@ -0,0 +1,146 @@ +/* + * linux/arch/arm/mach-versatile/clock.c + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "clock.h" + +static LIST_HEAD(clocks); +static DECLARE_MUTEX(clocks_sem); + +struct clk *clk_get(struct device *dev, const char *id) +{ + struct clk *p, *clk = ERR_PTR(-ENOENT); + + down(&clocks_sem); + list_for_each_entry(p, &clocks, node) { + if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) { + clk = p; + break; + } + } + up(&clocks_sem); + + return clk; +} +EXPORT_SYMBOL(clk_get); + +void clk_put(struct clk *clk) +{ + module_put(clk->owner); +} +EXPORT_SYMBOL(clk_put); + +int clk_enable(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_enable); + +void clk_disable(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_disable); + +int clk_use(struct clk *clk) +{ + return 0; +} +EXPORT_SYMBOL(clk_use); + +void clk_unuse(struct clk *clk) +{ +} +EXPORT_SYMBOL(clk_unuse); + +unsigned long clk_get_rate(struct clk *clk) +{ + return clk->rate; +} +EXPORT_SYMBOL(clk_get_rate); + +long clk_round_rate(struct clk *clk, unsigned long rate) +{ + return rate; +} +EXPORT_SYMBOL(clk_round_rate); + +int clk_set_rate(struct clk *clk, unsigned long rate) +{ + int ret = -EIO; +#if 0 // Not yet + if (clk->setvco) { + struct icst525_vco vco; + + vco = icst525_khz_to_vco(clk->params, rate); + clk->rate = icst525_khz(clk->params, vco); + + printk("Clock %s: setting VCO reg params: S=%d R=%d V=%d\n", + clk->name, vco.s, vco.r, vco.v); + + clk->setvco(clk, vco); + ret = 0; + } +#endif + return 0; +} +EXPORT_SYMBOL(clk_set_rate); + +/* + * These are fixed clocks. + */ +static struct clk kmi_clk = { + .name = "KMIREFCLK", + .rate = 24000000, +}; + +static struct clk uart_clk = { + .name = "UARTCLK", + .rate = 24000000, +}; + +static struct clk mmci_clk = { + .name = "MCLK", + .rate = 33000000, +}; + +int clk_register(struct clk *clk) +{ + down(&clocks_sem); + list_add(&clk->node, &clocks); + up(&clocks_sem); + return 0; +} +EXPORT_SYMBOL(clk_register); + +void clk_unregister(struct clk *clk) +{ + down(&clocks_sem); + list_del(&clk->node); + up(&clocks_sem); +} +EXPORT_SYMBOL(clk_unregister); + +static int __init clk_init(void) +{ + clk_register(&kmi_clk); + clk_register(&uart_clk); + clk_register(&mmci_clk); + return 0; +} +arch_initcall(clk_init); diff --git a/arch/arm/mach-versatile/clock.h b/arch/arm/mach-versatile/clock.h new file mode 100644 index 000000000..12e68ecde --- /dev/null +++ b/arch/arm/mach-versatile/clock.h @@ -0,0 +1,25 @@ +/* + * linux/arch/arm/mach-versatile/clock.h + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +struct module; +struct icst525_params; + +struct clk { + struct list_head node; + unsigned long rate; + struct module *owner; + const char *name; + const struct icst525_params *params; + void *data; + void (*setvco)(struct clk *, struct icst525_vco vco); +}; + +int clk_register(struct clk *clk); +void clk_unregister(struct clk *clk); diff --git a/arch/i386/lib/bitops.c b/arch/i386/lib/bitops.c new file mode 100644 index 000000000..97db3853d --- /dev/null +++ b/arch/i386/lib/bitops.c @@ -0,0 +1,70 @@ +#include +#include + +/** + * find_next_bit - find the first set bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +int find_next_bit(const unsigned long *addr, int size, int offset) +{ + const unsigned long *p = addr + (offset >> 5); + int set = 0, bit = offset & 31, res; + + if (bit) { + /* + * Look for nonzero in the first 32 bits: + */ + __asm__("bsfl %1,%0\n\t" + "jne 1f\n\t" + "movl $32, %0\n" + "1:" + : "=r" (set) + : "r" (*p >> bit)); + if (set < (32 - bit)) + return set + offset; + set = 32 - bit; + p++; + } + /* + * No set bit yet, search remaining full words for a bit + */ + res = find_first_bit (p, size - 32 * (p - addr)); + return (offset + set + res); +} +EXPORT_SYMBOL(find_next_bit); + +/** + * find_next_zero_bit - find the first zero bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +int find_next_zero_bit(const unsigned long *addr, int size, int offset) +{ + unsigned long * p = ((unsigned long *) addr) + (offset >> 5); + int set = 0, bit = offset & 31, res; + + if (bit) { + /* + * Look for zero in the first 32 bits. + */ + __asm__("bsfl %1,%0\n\t" + "jne 1f\n\t" + "movl $32, %0\n" + "1:" + : "=r" (set) + : "r" (~(*p >> bit))); + if (set < (32 - bit)) + return set + offset; + set = 32 - bit; + p++; + } + /* + * No zero yet, search remaining full bytes for a zero + */ + res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr)); + return (offset + set + res); +} +EXPORT_SYMBOL(find_next_zero_bit); diff --git a/arch/mips/configs/ocelot_g_defconfig b/arch/mips/configs/ocelot_g_defconfig new file mode 100644 index 000000000..fa2584125 --- /dev/null +++ b/arch/mips/configs/ocelot_g_defconfig @@ -0,0 +1,592 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_MIPS=y +CONFIG_MIPS64=y +CONFIG_64BIT=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Machine selection +# +# CONFIG_MACH_JAZZ is not set +# CONFIG_MACH_VR41XX is not set +# CONFIG_MIPS_COBALT is not set +# CONFIG_MACH_DECSTATION is not set +# CONFIG_MIPS_EV64120 is not set +# CONFIG_MIPS_EV96100 is not set +# CONFIG_MIPS_IVR is not set +# CONFIG_LASAT is not set +# CONFIG_MIPS_ITE8172 is not set +# CONFIG_MIPS_ATLAS is not set +# CONFIG_MIPS_MALTA is not set +# CONFIG_MIPS_SEAD is not set +# CONFIG_MOMENCO_OCELOT is not set +CONFIG_MOMENCO_OCELOT_G=y +# CONFIG_MOMENCO_OCELOT_C is not set +# CONFIG_MOMENCO_JAGUAR_ATX is not set +# CONFIG_PMC_YOSEMITE is not set +# CONFIG_DDB5074 is not set +# CONFIG_DDB5476 is not set +# CONFIG_DDB5477 is not set +# CONFIG_NEC_OSPREY is not set +# CONFIG_SGI_IP22 is not set +# CONFIG_SGI_IP27 is not set +# CONFIG_SGI_IP32 is not set +# CONFIG_SIBYTE_SB1xxx_SOC is not set +# CONFIG_SNI_RM200_PCI is not set +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_HAVE_DEC_LOCK=y +CONFIG_DMA_NONCOHERENT=y +# CONFIG_CPU_LITTLE_ENDIAN is not set +CONFIG_IRQ_CPU=y +CONFIG_IRQ_CPU_RM7K=y +CONFIG_PCI_MARVELL=y +CONFIG_SWAP_IO_SPACE=y +# CONFIG_SYSCLK_75 is not set +# CONFIG_SYSCLK_83 is not set +CONFIG_SYSCLK_100=y +CONFIG_MIPS_L1_CACHE_SHIFT=5 +# CONFIG_FB is not set + +# +# CPU selection +# +# CONFIG_CPU_MIPS32 is not set +# CONFIG_CPU_MIPS64 is not set +# CONFIG_CPU_R3000 is not set +# CONFIG_CPU_TX39XX is not set +# CONFIG_CPU_VR41XX is not set +# CONFIG_CPU_R4300 is not set +# CONFIG_CPU_R4X00 is not set +# CONFIG_CPU_TX49XX is not set +# CONFIG_CPU_R5000 is not set +# CONFIG_CPU_R5432 is not set +# CONFIG_CPU_R6000 is not set +# CONFIG_CPU_NEVADA is not set +# CONFIG_CPU_R8000 is not set +# CONFIG_CPU_R10000 is not set +CONFIG_CPU_RM7000=y +# CONFIG_CPU_RM9000 is not set +# CONFIG_CPU_SB1 is not set +CONFIG_PAGE_SIZE_4KB=y +# CONFIG_PAGE_SIZE_8KB is not set +# CONFIG_PAGE_SIZE_16KB is not set +# CONFIG_PAGE_SIZE_64KB is not set +CONFIG_BOARD_SCACHE=y +CONFIG_RM7000_CPU_SCACHE=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_CPU_HAS_LLSC=y +CONFIG_CPU_HAS_LLDSCD=y +CONFIG_CPU_HAS_SYNC=y +# CONFIG_PREEMPT is not set +# CONFIG_DEBUG_SPINLOCK_SLEEP is not set + +# +# Bus options (PCI, PCMCIA, EISA, ISA, TC) +# +CONFIG_HW_HAS_PCI=y +CONFIG_PCI=y +CONFIG_PCI_LEGACY_PROC=y +CONFIG_PCI_NAMES=y +CONFIG_MMU=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +CONFIG_MIPS32_COMPAT=y +CONFIG_COMPAT=y +CONFIG_MIPS32_O32=y +CONFIG_MIPS32_N32=y +CONFIG_BINFMT_ELF32=y + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_SX8 is not set +# CONFIG_BLK_DEV_RAM is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +# CONFIG_PACKET is not set +CONFIG_NETLINK_DEV=y +CONFIG_UNIX=y +CONFIG_NET_KEY=y +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set +CONFIG_XFRM=y +# CONFIG_XFRM_USER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_ETHERTAP is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_GALILEO_64240_ETH=y +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set + +# +# Tulip family network device support +# +# CONFIG_NET_TULIP is not set +# CONFIG_HP100 is not set +# CONFIG_NET_PCI is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=4 +# CONFIG_SERIAL_8250_EXTENDED is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +CONFIG_DEVPTS_FS_XATTR=y +CONFIG_DEVPTS_FS_SECURITY=y +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +CONFIG_NFSD=y +# CONFIG_NFSD_V3 is not set +# CONFIG_NFSD_TCP is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_EXPORTFS=y +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Kernel hacking +# +CONFIG_CROSSCOMPILE=y +CONFIG_CMDLINE="" +# CONFIG_DEBUG_KERNEL is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC16=y +# CONFIG_CRC32 is not set +# CONFIG_LIBCRC32C is not set diff --git a/arch/mips/pci/fixup-jaguar.c b/arch/mips/pci/fixup-jaguar.c new file mode 100644 index 000000000..fa78b9b1f --- /dev/null +++ b/arch/mips/pci/fixup-jaguar.c @@ -0,0 +1,42 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Marvell MV64340 interrupt fixup code. + * + * Marvell wants an NDA for their docs so this was written without + * documentation. You've been warned. + * + * Copyright (C) 2004 Ralf Baechle + */ +#include +#include +#include + +#include +#include + +/* + * WARNING: Example of how _NOT_ to do it. + */ +int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int bus = dev->bus->number; + + if (bus == 0 && slot == 1) + return 3; /* PCI-X A */ + if (bus == 0 && slot == 2) + return 4; /* PCI-X B */ + if (bus == 1 && slot == 1) + return 5; /* PCI A */ + if (bus == 1 && slot == 2) + return 6; /* PCI B */ + +return 0; + panic("Whooops in pcibios_map_irq"); +} + +struct pci_fixup pcibios_fixups[] = { + {0} +}; diff --git a/arch/mips/pci/fixup-ocelot-c.c b/arch/mips/pci/fixup-ocelot-c.c new file mode 100644 index 000000000..0cc86eceb --- /dev/null +++ b/arch/mips/pci/fixup-ocelot-c.c @@ -0,0 +1,39 @@ +/* + * Copyright 2002 Momentum Computer Inc. + * Author: Matthew Dharm + * + * Based on work for the Linux port to the Ocelot board, which is + * Copyright 2001 MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * + * arch/mips/momentum/ocelot_g/pci.c + * Board-specific PCI routines for mv64340 controller. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include + +int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int bus = dev->bus->number; + + if (bus == 0 && slot == 1) + return 2; /* PCI-X A */ + if (bus == 1 && slot == 1) + return 12; /* PCI-X B */ + if (bus == 1 && slot == 2) + return 4; /* PCI B */ + +return 0; + panic("Whooops in pcibios_map_irq"); +} + +struct pci_fixup pcibios_fixups[] = { + {0} +}; diff --git a/arch/mips/pci/fixup-ocelot-g.c b/arch/mips/pci/fixup-ocelot-g.c new file mode 100644 index 000000000..9a2cc8505 --- /dev/null +++ b/arch/mips/pci/fixup-ocelot-g.c @@ -0,0 +1,35 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org) + */ +#include +#include +#include +#include + +int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int bus = dev->bus->number; + + if (bus == 0 && slot == 1) /* Intel 82543 Gigabit MAC */ + return 2; /* irq_nr is 2 for INT0 */ + + if (bus == 0 && slot == 2) /* Intel 82543 Gigabit MAC */ + return 3; /* irq_nr is 3 for INT1 */ + + if (bus == 1 && slot == 3) /* Intel 21555 bridge */ + return 5; /* irq_nr is 8 for INT6 */ + + if (bus == 1 && slot == 4) /* PMC Slot */ + return 9; /* irq_nr is 9 for INT7 */ + + return -1; +} + +struct pci_fixup pcibios_fixups[] = { + {0} +}; diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c new file mode 100644 index 000000000..ca4d99fbe --- /dev/null +++ b/arch/mips/pci/fixup-tb0219.c @@ -0,0 +1,64 @@ +/* + * fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups. + * + * Copyright (C) 2003 Megasolution Inc. + * Copyright (C) 2004 Yoichi Yuasa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +#include +#include + +#include + +int __init pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) +{ + int irq = -1; + + switch (slot) { + case 12: + vr41xx_set_irq_trigger(TB0219_PCI_SLOT1_PIN, + TRIGGER_LEVEL, + SIGNAL_THROUGH); + vr41xx_set_irq_level(TB0219_PCI_SLOT1_PIN, + LEVEL_LOW); + irq = TB0219_PCI_SLOT1_IRQ; + break; + case 13: + vr41xx_set_irq_trigger(TB0219_PCI_SLOT2_PIN, + TRIGGER_LEVEL, + SIGNAL_THROUGH); + vr41xx_set_irq_level(TB0219_PCI_SLOT2_PIN, + LEVEL_LOW); + irq = TB0219_PCI_SLOT2_IRQ; + break; + case 14: + vr41xx_set_irq_trigger(TB0219_PCI_SLOT3_PIN, + TRIGGER_LEVEL, + SIGNAL_THROUGH); + vr41xx_set_irq_level(TB0219_PCI_SLOT3_PIN, + LEVEL_LOW); + irq = TB0219_PCI_SLOT3_IRQ; + break; + default: + break; + } + + return irq; +} + +struct pci_fixup pcibios_fixups[] __initdata = { + { .pass = 0, }, +}; diff --git a/arch/mips/pci/ops-marvell.c b/arch/mips/pci/ops-marvell.c new file mode 100644 index 000000000..1ac5c5919 --- /dev/null +++ b/arch/mips/pci/ops-marvell.c @@ -0,0 +1,93 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) + */ +#include +#include +#include + +#include + +static int mv_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 * val) +{ + struct mv_pci_controller *mvbc = bus->sysdata; + unsigned long address_reg, data_reg; + u32 address; + + address_reg = mvbc->config_addr; + data_reg = mvbc->config_vreg; + + /* Accessing device 31 crashes those Marvells. Since years. + Will they ever make sane controllers ... */ + if (PCI_SLOT(devfn) == 31) + return PCIBIOS_DEVICE_NOT_FOUND; + + address = (bus->number << 16) | (devfn << 8) | + (where & 0xfc) | 0x80000000; + + /* start the configuration cycle */ + MV_WRITE(address_reg, address); + + switch (size) { + case 1: + *val = MV_READ_8(data_reg + (where & 0x3)); + break; + + case 2: + *val = MV_READ_16(data_reg + (where & 0x3)); + break; + + case 4: + *val = MV_READ(data_reg); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int mv_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct mv_pci_controller *mvbc = bus->sysdata; + unsigned long address_reg, data_reg; + u32 address; + + address_reg = mvbc->config_addr; + data_reg = mvbc->config_vreg; + + /* Accessing device 31 crashes those Marvells. Since years. + Will they ever make sane controllers ... */ + if (PCI_SLOT(devfn) == 31) + return PCIBIOS_DEVICE_NOT_FOUND; + + address = (bus->number << 16) | (devfn << 8) | + (where & 0xfc) | 0x80000000; + + /* start the configuration cycle */ + MV_WRITE(address_reg, address); + + switch (size) { + case 1: + MV_WRITE_8(data_reg + (where & 0x3), val); + break; + + case 2: + MV_WRITE_16(data_reg + (where & 0x3), val); + break; + + case 4: + MV_WRITE(data_reg, val); + break; + } + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops mv_pci_ops = { + .read = mv_read_config, + .write = mv_write_config +}; diff --git a/arch/mips/pci/ops-titan-ht.c b/arch/mips/pci/ops-titan-ht.c new file mode 100644 index 000000000..46c636c27 --- /dev/null +++ b/arch/mips/pci/ops-titan-ht.c @@ -0,0 +1,125 @@ +/* + * Copyright 2003 PMC-Sierra + * Author: Manish Lachwani (lachwani@pmc-sierra.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include + +#include + +static int titan_ht_config_read_dword(struct pci_bus *bus, unsigned int devfn, + int offset, u32 * val) +{ + volatile uint32_t address; + int busno; + + busno = bus->number; + + address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000; + if (busno != 0) + address |= 1; + + /* + * RM9000 HT Errata: Issue back to back HT config + * transcations. Issue a BIU sync before and + * after the HT cycle + */ + + *(volatile int32_t *) 0xfb0000f0 |= 0x2; + + udelay(30); + + *(volatile int32_t *) 0xfb0006f8 = address; + *(val) = *(volatile int32_t *) 0xfb0006fc; + + udelay(30); + + * (volatile int32_t *) 0xfb0000f0 |= 0x2; + + return PCIBIOS_SUCCESSFUL; +} + +static int titan_ht_config_read(struct pci_bus *bus, unsigned int devfn, + int offset, int size, u32 * val) +{ + uint32_t dword; + + titan_ht_config_read_dword(bus, devfn, offset, &dword); + + dword >>= ((offset & 3) << 3); + dword &= (0xffffffffU >> ((4 - size) << 8)); + + return PCIBIOS_SUCCESSFUL; +} + +static inline int titan_ht_config_write_dword(struct pci_bus *bus, + unsigned int devfn, int offset, u32 val) +{ + volatile uint32_t address; + int busno; + + busno = bus->number; + + address = (busno << 16) | (devfn << 8) | (offset & 0xfc) | 0x80000000; + if (busno != 0) + address |= 1; + + *(volatile int32_t *) 0xfb0000f0 |= 0x2; + + udelay(30); + + *(volatile int32_t *) 0xfb0006f8 = address; + *(volatile int32_t *) 0xfb0006fc = val; + + udelay(30); + + *(volatile int32_t *) 0xfb0000f0 |= 0x2; + + return PCIBIOS_SUCCESSFUL; +} + +static int titan_ht_config_write(struct pci_bus *bus, unsigned int devfn, + int offset, int size, u32 val) +{ + uint32_t val1, val2, mask; + + titan_ht_config_read_dword(bus, devfn, offset, &val2); + + val1 = val << ((offset & 3) << 3); + mask = ~(0xffffffffU >> ((4 - size) << 8)); + val2 &= ~(mask << ((offset & 3) << 8)); + + titan_ht_config_write_dword(bus, devfn, offset, val1 | val2); + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops titan_ht_pci_ops = { + .read = titan_ht_config_read, + .write = titan_ht_config_write, +}; diff --git a/arch/mips/pci/ops-vr41xx.c b/arch/mips/pci/ops-vr41xx.c new file mode 100644 index 000000000..44654605e --- /dev/null +++ b/arch/mips/pci/ops-vr41xx.c @@ -0,0 +1,126 @@ +/* + * ops-vr41xx.c, PCI configuration routines for the PCIU of NEC VR4100 series. + * + * Copyright (C) 2001-2003 MontaVista Software Inc. + * Author: Yoichi Yuasa + * Copyright (C) 2004 Yoichi Yuasa + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +/* + * Changes: + * MontaVista Software Inc. or + * - New creation, NEC VR4122 and VR4131 are supported. + */ +#include +#include + +#include + +#define PCICONFDREG KSEG1ADDR(0x0f000c14) +#define PCICONFAREG KSEG1ADDR(0x0f000c18) + +static inline int set_pci_configuration_address(unsigned char number, + unsigned int devfn, int where) +{ + if (number == 0) { + /* + * Type 0 configuration + */ + if (PCI_SLOT(devfn) < 11 || where > 0xff) + return -EINVAL; + + writel((1U << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) | + (where & 0xfc), PCICONFAREG); + } else { + /* + * Type 1 configuration + */ + if (where > 0xff) + return -EINVAL; + + writel(((uint32_t)number << 16) | ((devfn & 0xff) << 8) | + (where & 0xfc) | 1U, PCICONFAREG); + } + + return 0; +} + +static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, + int size, uint32_t *val) +{ + uint32_t data; + + *val = 0xffffffffU; + if (set_pci_configuration_address(bus->number, devfn, where) < 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + data = readl(PCICONFDREG); + + switch (size) { + case 1: + *val = (data >> ((where & 3) << 3)) & 0xffU; + break; + case 2: + *val = (data >> ((where & 2) << 3)) & 0xffffU; + break; + case 4: + *val = data; + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + + return PCIBIOS_SUCCESSFUL; +} + +static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, + int size, uint32_t val) +{ + uint32_t data; + int shift; + + if (set_pci_configuration_address(bus->number, devfn, where) < 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + data = readl(PCICONFDREG); + + switch (size) { + case 1: + shift = (where & 3) << 3; + data &= ~(0xffU << shift); + data |= ((val & 0xffU) << shift); + break; + case 2: + shift = (where & 2) << 3; + data &= ~(0xffffU << shift); + data |= ((val & 0xffffU) << shift); + break; + case 4: + data = val; + break; + default: + return PCIBIOS_FUNC_NOT_SUPPORTED; + } + + writel(data, PCICONFDREG); + + return PCIBIOS_SUCCESSFUL; +} + +struct pci_ops vr41xx_pci_ops = { + .read = pci_config_read, + .write = pci_config_write, +}; diff --git a/arch/mips/pmc-sierra/yosemite/dbg_io.c b/arch/mips/pmc-sierra/yosemite/dbg_io.c new file mode 100644 index 000000000..1ff8d95d0 --- /dev/null +++ b/arch/mips/pmc-sierra/yosemite/dbg_io.c @@ -0,0 +1,184 @@ +/* + * Copyright 2003 PMC-Sierra + * Author: Manish Lachwani (lachwani@pmc-sierra.com) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * Support for KGDB for the Yosemite board. We make use of single serial + * port to be used for KGDB as well as console. The second serial port + * seems to be having a problem. Single IRQ is allocated for both the + * ports. Hence, the interrupt routing code needs to figure out whether + * the interrupt came from channel A or B. + */ + +#include + +#if defined(CONFIG_KGDB) +#include + +/* + * Baud rate, Parity, Data and Stop bit settings for the + * serial port on the Yosemite. Note that the Early printk + * patch has been added. So, we should be all set to go + */ +#define YOSEMITE_BAUD_2400 2400 +#define YOSEMITE_BAUD_4800 4800 +#define YOSEMITE_BAUD_9600 9600 +#define YOSEMITE_BAUD_19200 19200 +#define YOSEMITE_BAUD_38400 38400 +#define YOSEMITE_BAUD_57600 57600 +#define YOSEMITE_BAUD_115200 115200 + +#define YOSEMITE_PARITY_NONE 0 +#define YOSEMITE_PARITY_ODD 0x08 +#define YOSEMITE_PARITY_EVEN 0x18 +#define YOSEMITE_PARITY_MARK 0x28 +#define YOSEMITE_PARITY_SPACE 0x38 + +#define YOSEMITE_DATA_5BIT 0x0 +#define YOSEMITE_DATA_6BIT 0x1 +#define YOSEMITE_DATA_7BIT 0x2 +#define YOSEMITE_DATA_8BIT 0x3 + +#define YOSEMITE_STOP_1BIT 0x0 +#define YOSEMITE_STOP_2BIT 0x4 + +/* This is crucial */ +#define SERIAL_REG_OFS 0x1 + +#define SERIAL_RCV_BUFFER 0x0 +#define SERIAL_TRANS_HOLD 0x0 +#define SERIAL_SEND_BUFFER 0x0 +#define SERIAL_INTR_ENABLE (1 * SERIAL_REG_OFS) +#define SERIAL_INTR_ID (2 * SERIAL_REG_OFS) +#define SERIAL_DATA_FORMAT (3 * SERIAL_REG_OFS) +#define SERIAL_LINE_CONTROL (3 * SERIAL_REG_OFS) +#define SERIAL_MODEM_CONTROL (4 * SERIAL_REG_OFS) +#define SERIAL_RS232_OUTPUT (4 * SERIAL_REG_OFS) +#define SERIAL_LINE_STATUS (5 * SERIAL_REG_OFS) +#define SERIAL_MODEM_STATUS (6 * SERIAL_REG_OFS) +#define SERIAL_RS232_INPUT (6 * SERIAL_REG_OFS) +#define SERIAL_SCRATCH_PAD (7 * SERIAL_REG_OFS) + +#define SERIAL_DIVISOR_LSB (0 * SERIAL_REG_OFS) +#define SERIAL_DIVISOR_MSB (1 * SERIAL_REG_OFS) + +/* + * Functions to READ and WRITE to serial port 0 + */ +#define SERIAL_READ(ofs) (*((volatile unsigned char*) \ + (TITAN_SERIAL_BASE + ofs))) + +#define SERIAL_WRITE(ofs, val) ((*((volatile unsigned char*) \ + (TITAN_SERIAL_BASE + ofs))) = val) + +/* + * Functions to READ and WRITE to serial port 1 + */ +#define SERIAL_READ_1(ofs) (*((volatile unsigned char*) \ + (TITAN_SERIAL_BASE_1 + ofs) + +#define SERIAL_WRITE_1(ofs, val) ((*((volatile unsigned char*) \ + (TITAN_SERIAL_BASE_1 + ofs))) = val) + +/* + * Second serial port initialization + */ +void init_second_port(void) +{ + /* Disable Interrupts */ + SERIAL_WRITE_1(SERIAL_LINE_CONTROL, 0x0); + SERIAL_WRITE_1(SERIAL_INTR_ENABLE, 0x0); + + { + unsigned int divisor; + + SERIAL_WRITE_1(SERIAL_LINE_CONTROL, 0x80); + divisor = TITAN_SERIAL_BASE_BAUD / YOSEMITE_BAUD_115200; + SERIAL_WRITE_1(SERIAL_DIVISOR_LSB, divisor & 0xff); + + SERIAL_WRITE_1(SERIAL_DIVISOR_MSB, + (divisor & 0xff00) >> 8); + SERIAL_WRITE_1(SERIAL_LINE_CONTROL, 0x0); + } + + SERIAL_WRITE_1(SERIAL_DATA_FORMAT, YOSEMITE_DATA_8BIT | + YOSEMITE_PARITY_NONE | YOSEMITE_STOP_1BIT); + + /* Enable Interrupts */ + SERIAL_WRITE_1(SERIAL_INTR_ENABLE, 0xf); +} + +/* Initialize the serial port for KGDB debugging */ +void debugInit(unsigned int baud, unsigned char data, unsigned char parity, + unsigned char stop) +{ + /* Disable Interrupts */ + SERIAL_WRITE(SERIAL_LINE_CONTROL, 0x0); + SERIAL_WRITE(SERIAL_INTR_ENABLE, 0x0); + + { + unsigned int divisor; + + SERIAL_WRITE(SERIAL_LINE_CONTROL, 0x80); + + divisor = TITAN_SERIAL_BASE_BAUD / baud; + SERIAL_WRITE(SERIAL_DIVISOR_LSB, divisor & 0xff); + + SERIAL_WRITE(SERIAL_DIVISOR_MSB, (divisor & 0xff00) >> 8); + SERIAL_WRITE(SERIAL_LINE_CONTROL, 0x0); + } + + SERIAL_WRITE(SERIAL_DATA_FORMAT, data | parity | stop); +} + +static int remoteDebugInitialized = 0; + +unsigned char getDebugChar(void) +{ + if (!remoteDebugInitialized) { + remoteDebugInitialized = 1; + debugInit(YOSEMITE_BAUD_115200, + YOSEMITE_DATA_8BIT, + YOSEMITE_PARITY_NONE, YOSEMITE_STOP_1BIT); + } + + while ((SERIAL_READ(SERIAL_LINE_STATUS) & 0x1) == 0); + return SERIAL_READ(SERIAL_RCV_BUFFER); +} + +int putDebugChar(unsigned char byte) +{ + if (!remoteDebugInitialized) { + remoteDebugInitialized = 1; + debugInit(YOSEMITE_BAUD_115200, + YOSEMITE_DATA_8BIT, + YOSEMITE_PARITY_NONE, YOSEMITE_STOP_1BIT); + } + + while ((SERIAL_READ(SERIAL_LINE_STATUS) & 0x20) == 0); + SERIAL_WRITE(SERIAL_SEND_BUFFER, byte); + + return 1; +} +#endif diff --git a/arch/mips/pmc-sierra/yosemite/py-console.c b/arch/mips/pmc-sierra/yosemite/py-console.c new file mode 100644 index 000000000..22c336f9a --- /dev/null +++ b/arch/mips/pmc-sierra/yosemite/py-console.c @@ -0,0 +1,130 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001, 2002, 2004 Ralf Baechle + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* SUPERIO uart register map */ +struct yo_uartregs { + union { + volatile u8 rbr; /* read only, DLAB == 0 */ + volatile u8 thr; /* write only, DLAB == 0 */ + volatile u8 dll; /* DLAB == 1 */ + } u1; + union { + volatile u8 ier; /* DLAB == 0 */ + volatile u8 dlm; /* DLAB == 1 */ + } u2; + union { + volatile u8 iir; /* read only */ + volatile u8 fcr; /* write only */ + } u3; + volatile u8 iu_lcr; + volatile u8 iu_mcr; + volatile u8 iu_lsr; + volatile u8 iu_msr; + volatile u8 iu_scr; +} yo_uregs_t; + +#define iu_rbr u1.rbr +#define iu_thr u1.thr +#define iu_dll u1.dll +#define iu_ier u2.ier +#define iu_dlm u2.dlm +#define iu_iir u3.iir +#define iu_fcr u3.fcr + +extern unsigned long uart_base; + +#define IO_BASE_64 0x9000000000000000ULL + +static unsigned char readb_outer_space(unsigned long phys) +{ + unsigned long long vaddr = IO_BASE_64 | phys; + unsigned char res; + unsigned int sr; + + sr = read_c0_status(); + write_c0_status((sr | ST0_KX) & ~ ST0_IE); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + + __asm__ __volatile__ ( + " .set mips3 \n" + " ld %0, (%0) \n" + " lbu %0, (%0) \n" + " .set mips0 \n" + : "=r" (res) + : "0" (&vaddr)); + + write_c0_status(sr); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + + return res; +} + +static void writeb_outer_space(unsigned long phys, unsigned char c) +{ + unsigned long long vaddr = IO_BASE_64 | phys; + unsigned long tmp; + unsigned int sr; + + sr = read_c0_status(); + write_c0_status((sr | ST0_KX) & ~ ST0_IE); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + + __asm__ __volatile__ ( + " .set mips3 \n" + " ld %0, (%1) \n" + " sb %2, (%0) \n" + " .set mips0 \n" + : "=r" (tmp) + : "r" (&vaddr), "r" (c)); + + write_c0_status(sr); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); + __asm__("sll $0, $0, 2\n"); +} + +static inline struct yo_uartregs *console_uart(void) +{ + return (struct yo_uartregs *) (uart_base + 8); +} + +void prom_putchar(char c) +{ + unsigned long lsr = 0xfd000008UL + offsetof(struct yo_uartregs, iu_lsr); + unsigned long thr = 0xfd000008UL + offsetof(struct yo_uartregs, iu_thr); + + while ((readb_outer_space(lsr) & 0x20) == 0); + writeb_outer_space(thr, c); +} + +char __init prom_getchar(void) +{ + return 0; +} diff --git a/arch/ppc/configs/ads8272_defconfig b/arch/ppc/configs/ads8272_defconfig new file mode 100644 index 000000000..709a18066 --- /dev/null +++ b/arch/ppc/configs/ads8272_defconfig @@ -0,0 +1,583 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_MMU=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_HAVE_DEC_LOCK=y +CONFIG_PPC=y +CONFIG_PPC32=y +CONFIG_GENERIC_NVRAM=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +# CONFIG_KALLSYMS is not set +CONFIG_FUTEX=y +# CONFIG_EPOLL is not set +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Processor +# +CONFIG_6xx=y +# CONFIG_40x is not set +# CONFIG_44x is not set +# CONFIG_POWER3 is not set +# CONFIG_POWER4 is not set +# CONFIG_8xx is not set +# CONFIG_CPU_FREQ is not set +CONFIG_EMBEDDEDBOOT=y +CONFIG_PPC_STD_MMU=y + +# +# Platform options +# +# CONFIG_PPC_MULTIPLATFORM is not set +# CONFIG_APUS is not set +# CONFIG_WILLOW is not set +# CONFIG_PCORE is not set +# CONFIG_POWERPMC250 is not set +# CONFIG_EV64260 is not set +# CONFIG_SPRUCE is not set +# CONFIG_LOPEC is not set +# CONFIG_MCPN765 is not set +# CONFIG_MVME5100 is not set +# CONFIG_PPLUS is not set +# CONFIG_PRPMC750 is not set +# CONFIG_PRPMC800 is not set +# CONFIG_SANDPOINT is not set +# CONFIG_ADIR is not set +# CONFIG_K2 is not set +# CONFIG_PAL4 is not set +# CONFIG_GEMINI is not set +# CONFIG_EST8260 is not set +# CONFIG_SBC82xx is not set +# CONFIG_SBS8260 is not set +# CONFIG_RPX6 is not set +# CONFIG_TQM8260 is not set +CONFIG_ADS8272=y +CONFIG_PQ2ADS=y +CONFIG_8260=y +CONFIG_8272=y +CONFIG_CPM2=y +# CONFIG_PC_KEYBOARD is not set +CONFIG_SERIAL_CONSOLE=y +# CONFIG_SMP is not set +# CONFIG_PREEMPT is not set +# CONFIG_HIGHMEM is not set +CONFIG_KERNEL_ELF=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +# CONFIG_CMDLINE_BOOL is not set + +# +# Bus options +# +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +# CONFIG_PCI_LEGACY_PROC is not set +# CONFIG_PCI_NAMES is not set + +# +# Advanced setup +# +# CONFIG_ADVANCED_OPTIONS is not set + +# +# Default settings for advanced configuration options are used +# +CONFIG_HIGHMEM_START=0xfe000000 +CONFIG_LOWMEM_SIZE=0x30000000 +CONFIG_KERNEL_START=0xc0000000 +CONFIG_TASK_SIZE=0x80000000 +CONFIG_BOOT_LOAD=0x00400000 + +# +# Device Drivers +# + +# +# Generic Driver Options +# + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_CARMEL is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=32768 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# Macintosh device drivers +# + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +# CONFIG_OAKNET is not set +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set + +# +# Tulip family network device support +# +# CONFIG_NET_TULIP is not set +# CONFIG_HP100 is not set +# CONFIG_NET_PCI is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +# CONFIG_SERIO is not set +# CONFIG_SERIO_I8042 is not set + +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +CONFIG_GEN_RTC=y +# CONFIG_GEN_RTC_X is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +# CONFIG_NFS_V3 is not set +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_NEC98_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set +# CONFIG_SCC_ENET is not set +CONFIG_FEC_ENET=y +# CONFIG_USE_MDIO is not set + +# +# CPM2 Options +# +CONFIG_SCC_CONSOLE=y +CONFIG_FCC1_ENET=y +# CONFIG_FCC2_ENET is not set +# CONFIG_FCC3_ENET is not set + +# +# Library routines +# +# CONFIG_CRC32 is not set +# CONFIG_LIBCRC32C is not set + +# +# Kernel hacking +# +# CONFIG_DEBUG_KERNEL is not set +# CONFIG_KGDB_CONSOLE is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set diff --git a/arch/ppc/configs/lite5200_defconfig b/arch/ppc/configs/lite5200_defconfig new file mode 100644 index 000000000..7e7a943d8 --- /dev/null +++ b/arch/ppc/configs/lite5200_defconfig @@ -0,0 +1,436 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_MMU=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_HAVE_DEC_LOCK=y +CONFIG_PPC=y +CONFIG_PPC32=y +CONFIG_GENERIC_NVRAM=y +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_ALL is not set +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +# +# Loadable module support +# +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +CONFIG_OBSOLETE_MODPARM=y +CONFIG_MODVERSIONS=y +CONFIG_KMOD=y +# +# Processor +# +CONFIG_6xx=y +# CONFIG_40x is not set +# CONFIG_44x is not set +# CONFIG_POWER3 is not set +# CONFIG_POWER4 is not set +# CONFIG_8xx is not set +# CONFIG_E500 is not set +# CONFIG_ALTIVEC is not set +# CONFIG_TAU is not set +# CONFIG_CPU_FREQ is not set +CONFIG_FSL_OCP=y +CONFIG_PPC_STD_MMU=y +# +# Platform options +# +# CONFIG_PPC_MULTIPLATFORM is not set +# CONFIG_APUS is not set +# CONFIG_WILLOW is not set +# CONFIG_PCORE is not set +# CONFIG_POWERPMC250 is not set +# CONFIG_EV64260 is not set +# CONFIG_SPRUCE is not set +# CONFIG_LOPEC is not set +# CONFIG_MCPN765 is not set +# CONFIG_MVME5100 is not set +# CONFIG_PPLUS is not set +# CONFIG_PRPMC750 is not set +# CONFIG_PRPMC800 is not set +# CONFIG_SANDPOINT is not set +# CONFIG_ADIR is not set +# CONFIG_K2 is not set +# CONFIG_PAL4 is not set +# CONFIG_GEMINI is not set +# CONFIG_EST8260 is not set +# CONFIG_SBC82xx is not set +# CONFIG_SBS8260 is not set +# CONFIG_RPX6 is not set +# CONFIG_TQM8260 is not set +# CONFIG_ADS8272 is not set +CONFIG_LITE5200=y +CONFIG_PPC_MPC52xx=y +# CONFIG_SMP is not set +# CONFIG_PREEMPT is not set +# CONFIG_HIGHMEM is not set +CONFIG_KERNEL_ELF=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttyS0 root=/dev/ram0 rw" +# +# Bus options +# +CONFIG_GENERIC_ISA_DMA=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +# CONFIG_PCI_LEGACY_PROC is not set +# CONFIG_PCI_NAMES is not set +# +# Advanced setup +# +CONFIG_ADVANCED_OPTIONS=y +CONFIG_HIGHMEM_START=0xfe000000 +# CONFIG_LOWMEM_SIZE_BOOL is not set +CONFIG_LOWMEM_SIZE=0x30000000 +# CONFIG_KERNEL_START_BOOL is not set +CONFIG_KERNEL_START=0xc0000000 +# CONFIG_TASK_SIZE_BOOL is not set +CONFIG_TASK_SIZE=0x80000000 +# CONFIG_BOOT_LOAD_BOOL is not set +CONFIG_BOOT_LOAD=0x00800000 +# +# Device Drivers +# +# +# Generic Driver Options +# +CONFIG_PREVENT_FIRMWARE_BUILD=y +# CONFIG_DEBUG_DRIVER is not set +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set +# +# Parallel port support +# +# CONFIG_PARPORT is not set +# +# Plug and Play support +# +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_LBD is not set +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set +# +# SCSI device support +# +# CONFIG_SCSI is not set +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set +# +# Fusion MPT device support +# +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set +# +# I2O device support +# +# CONFIG_I2O is not set +# +# Macintosh device drivers +# +# +# Networking support +# +# CONFIG_NET is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# +# ISDN subsystem +# +# +# Telephony Support +# +# CONFIG_PHONE is not set +# +# Input device support +# +CONFIG_INPUT=y +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_EVBUG=y +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +# CONFIG_SERIO_I8042 is not set +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set +# +# Input Device Drivers +# +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_MPC52xx=y +CONFIG_SERIAL_MPC52xx_CONSOLE=y +CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600 +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set +# +# I2C support +# +# CONFIG_I2C is not set +# +# Misc devices +# +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set +# +# Digital Video Broadcasting Devices +# +# +# Graphics support +# +# CONFIG_FB is not set +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +# +# Sound +# +# CONFIG_SOUND is not set +# +# USB support +# +# CONFIG_USB is not set +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +# +# Native Language Support +# +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +CONFIG_NLS_ISO8859_1=m +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set +# +# Library routines +# +# CONFIG_CRC16 is not set +# CONFIG_CRC32 is not set +# CONFIG_LIBCRC32C is not set +# +# Profiling support +# +# CONFIG_PROFILING is not set +# +# Kernel hacking +# +CONFIG_DEBUG_KERNEL=y +# CONFIG_DEBUG_SLAB is not set +CONFIG_MAGIC_SYSRQ=y +# CONFIG_DEBUG_SPINLOCK is not set +CONFIG_DEBUG_SPINLOCK_SLEEP=y +# CONFIG_KGDB is not set +# CONFIG_XMON is not set +# CONFIG_BDI_SWITCH is not set +CONFIG_DEBUG_INFO=y +CONFIG_SERIAL_TEXT_DEBUG=y +CONFIG_PPC_OCP=y +# +# Security options +# +# CONFIG_SECURITY is not set +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set diff --git a/arch/ppc/configs/rpx8260_defconfig b/arch/ppc/configs/rpx8260_defconfig new file mode 100644 index 000000000..a69e0b485 --- /dev/null +++ b/arch/ppc/configs/rpx8260_defconfig @@ -0,0 +1,556 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_MMU=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_HAVE_DEC_LOCK=y +CONFIG_PPC=y +CONFIG_PPC32=y +CONFIG_GENERIC_NVRAM=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +# CONFIG_SWAP is not set +CONFIG_SYSVIPC=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +# CONFIG_KALLSYMS is not set +CONFIG_FUTEX=y +# CONFIG_EPOLL is not set +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# Processor +# +CONFIG_6xx=y +# CONFIG_40x is not set +# CONFIG_44x is not set +# CONFIG_POWER3 is not set +# CONFIG_POWER4 is not set +# CONFIG_8xx is not set +# CONFIG_E500 is not set +# CONFIG_CPU_FREQ is not set +CONFIG_EMBEDDEDBOOT=y +CONFIG_PPC_STD_MMU=y + +# +# Platform options +# +# CONFIG_PPC_MULTIPLATFORM is not set +# CONFIG_APUS is not set +# CONFIG_WILLOW is not set +# CONFIG_PCORE is not set +# CONFIG_POWERPMC250 is not set +# CONFIG_EV64260 is not set +# CONFIG_SPRUCE is not set +# CONFIG_LOPEC is not set +# CONFIG_MCPN765 is not set +# CONFIG_MVME5100 is not set +# CONFIG_PPLUS is not set +# CONFIG_PRPMC750 is not set +# CONFIG_PRPMC800 is not set +# CONFIG_SANDPOINT is not set +# CONFIG_ADIR is not set +# CONFIG_K2 is not set +# CONFIG_PAL4 is not set +# CONFIG_GEMINI is not set +# CONFIG_EST8260 is not set +# CONFIG_SBC82xx is not set +# CONFIG_SBS8260 is not set +CONFIG_RPX8260=y +# CONFIG_TQM8260 is not set +# CONFIG_ADS8272 is not set +CONFIG_8260=y +CONFIG_CPM2=y +# CONFIG_PC_KEYBOARD is not set +# CONFIG_SMP is not set +# CONFIG_PREEMPT is not set +# CONFIG_HIGHMEM is not set +CONFIG_KERNEL_ELF=y +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_MISC is not set +# CONFIG_CMDLINE_BOOL is not set + +# +# Bus options +# +# CONFIG_PCI is not set +# CONFIG_PCI_DOMAINS is not set + +# +# Advanced setup +# +# CONFIG_ADVANCED_OPTIONS is not set + +# +# Default settings for advanced configuration options are used +# +CONFIG_HIGHMEM_START=0xfe000000 +CONFIG_LOWMEM_SIZE=0x30000000 +CONFIG_KERNEL_START=0xc0000000 +CONFIG_TASK_SIZE=0x80000000 +CONFIG_BOOT_LOAD=0x00400000 + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +CONFIG_BLK_DEV_INITRD=y +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# + +# +# Macintosh device drivers +# + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +CONFIG_IP_PNP_BOOTP=y +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_IP_MROUTE is not set +# CONFIG_ARPD is not set +CONFIG_SYN_COOKIES=y +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +# CONFIG_OAKNET is not set + +# +# Ethernet (1000 Mbit) +# + +# +# Ethernet (10000 Mbit) +# + +# +# Token Ring devices +# + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +# CONFIG_INPUT is not set + +# +# Userland interfaces +# + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +# CONFIG_SERIO is not set +# CONFIG_SERIO_I8042 is not set + +# +# Input Device Drivers +# + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_CPM=y +CONFIG_SERIAL_CPM_CONSOLE=y +# CONFIG_SERIAL_CPM_SCC1 is not set +# CONFIG_SERIAL_CPM_SCC2 is not set +# CONFIG_SERIAL_CPM_SCC3 is not set +# CONFIG_SERIAL_CPM_SCC4 is not set +CONFIG_SERIAL_CPM_SMC1=y +# CONFIG_SERIAL_CPM_SMC2 is not set +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_NVRAM is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Dallas's 1-wire bus +# +# CONFIG_W1 is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_XATTR=y +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_JBD=y +# CONFIG_JBD_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +# CONFIG_MSDOS_PARTITION is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_EFI_PARTITION is not set + +# +# Native Language Support +# +# CONFIG_NLS is not set +# CONFIG_SCC_ENET is not set +CONFIG_FEC_ENET=y +# CONFIG_USE_MDIO is not set + +# +# CPM2 Options +# +# CONFIG_FCC1_ENET is not set +# CONFIG_FCC2_ENET is not set +CONFIG_FCC3_ENET=y + +# +# Library routines +# +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC32 is not set +# CONFIG_LIBCRC32C is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_DEBUG_KERNEL is not set +# CONFIG_KGDB_CONSOLE is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set diff --git a/arch/ppc/kernel/head_e500.S b/arch/ppc/kernel/head_e500.S new file mode 100644 index 000000000..8f0c590f1 --- /dev/null +++ b/arch/ppc/kernel/head_e500.S @@ -0,0 +1,1329 @@ +/* + * arch/ppc/kernel/head_e500.S + * + * Kernel execution entry point code. + * + * Copyright (c) 1995-1996 Gary Thomas + * Initial PowerPC version. + * Copyright (c) 1996 Cort Dougan + * Rewritten for PReP + * Copyright (c) 1996 Paul Mackerras + * Low-level exception handers, MMU support, and rewrite. + * Copyright (c) 1997 Dan Malek + * PowerPC 8xx modifications. + * Copyright (c) 1998-1999 TiVo, Inc. + * PowerPC 403GCX modifications. + * Copyright (c) 1999 Grant Erickson + * PowerPC 403GCX/405GP modifications. + * Copyright 2000 MontaVista Software Inc. + * PPC405 modifications + * PowerPC 403GCX/405GP modifications. + * Author: MontaVista Software, Inc. + * frank_rowand@mvista.com or source@mvista.com + * debbie_chu@mvista.com + * Copyright 2002-2004 MontaVista Software, Inc. + * PowerPC 44x support, Matt Porter + * Copyright 2004 Freescale Semiconductor, Inc + * PowerPC e500 modifications, Kumar Gala + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Macros + */ + +#define SET_IVOR(vector_number, vector_label) \ + li r26,vector_label@l; \ + mtspr SPRN_IVOR##vector_number,r26; \ + sync + +/* As with the other PowerPC ports, it is expected that when code + * execution begins here, the following registers contain valid, yet + * optional, information: + * + * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) + * r4 - Starting address of the init RAM disk + * r5 - Ending address of the init RAM disk + * r6 - Start of kernel command line string (e.g. "mem=128") + * r7 - End of kernel command line string + * + */ + .text +_GLOBAL(_stext) +_GLOBAL(_start) + /* + * Reserve a word at a fixed location to store the address + * of abatron_pteptrs + */ + nop +/* + * Save parameters we are passed + */ + mr r31,r3 + mr r30,r4 + mr r29,r5 + mr r28,r6 + mr r27,r7 + li r24,0 /* CPU number */ + +/* We try to not make any assumptions about how the boot loader + * setup or used the TLBs. We invalidate all mappings from the + * boot loader and load a single entry in TLB1[0] to map the + * first 16M of kernel memory. Any boot info passed from the + * bootloader needs to live in this first 16M. + * + * Requirement on bootloader: + * - The page we're executing in needs to reside in TLB1 and + * have IPROT=1. If not an invalidate broadcast could + * evict the entry we're currently executing in. + * + * r3 = Index of TLB1 were executing in + * r4 = Current MSR[IS] + * r5 = Index of TLB1 temp mapping + * + * Later in mapin_ram we will correctly map lowmem, and resize TLB1[0] + * if needed + */ + +/* 1. Find the index of the entry we're executing in */ + bl invstr /* Find our address */ +invstr: mflr r6 /* Make it accessible */ + mfmsr r7 + rlwinm r4,r7,27,31,31 /* extract MSR[IS] */ + mfspr r7, SPRN_PID0 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */ + mfspr r7,SPRN_MAS1 + andis. r7,r7,MAS1_VALID@h + bne match_TLB + mfspr r7,SPRN_PID1 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */ + mfspr r7,SPRN_MAS1 + andis. r7,r7,MAS1_VALID@h + bne match_TLB + mfspr r7, SPRN_PID2 + slwi r7,r7,16 + or r7,r7,r4 + mtspr SPRN_MAS6,r7 + tlbsx 0,r6 /* Fall through, we had to match */ +match_TLB: + mfspr r7,SPRN_MAS0 + rlwinm r3,r7,16,28,31 /* Extract MAS0(Entry) */ + + mfspr r7,SPRN_MAS1 /* Insure IPROT set */ + oris r7,r7,MAS1_IPROT@h + mtspr SPRN_MAS1,r7 + tlbwe + +/* 2. Invalidate all entries except the entry we're executing in */ + mfspr r9,SPRN_TLB1CFG + andi. r9,r9,0xfff + li r6,0 /* Set Entry counter to 0 */ +1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r6,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ + mtspr SPRN_MAS0,r7 + tlbre + mfspr r7,SPRN_MAS1 + rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ + cmpw r3,r6 + beq skpinv /* Dont update the current execution TLB */ + mtspr SPRN_MAS1,r7 + tlbwe + isync +skpinv: addi r6,r6,1 /* Increment */ + cmpw r6,r9 /* Are we done? */ + bne 1b /* If not, repeat */ + + /* Invalidate TLB0 */ + li r6,0x04 + tlbivax 0,r6 +#ifdef CONFIG_SMP + tlbsync +#endif + /* Invalidate TLB1 */ + li r6,0x0c + tlbivax 0,r6 +#ifdef CONFIG_SMP + tlbsync +#endif + msync + +/* 3. Setup a temp mapping and jump to it */ + andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */ + addi r5, r5, 0x1 + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r3,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r7 + tlbre + + /* Just modify the entry ID and EPN for the temp mapping */ + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r5,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ + mtspr SPRN_MAS0,r7 + xori r6,r4,1 /* Setup TMP mapping in the other Address space */ + slwi r6,r6,12 + oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h + ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_4K))@l + mtspr SPRN_MAS1,r6 + mfspr r6,SPRN_MAS2 + li r7,0 /* temp EPN = 0 */ + rlwimi r7,r6,0,20,31 + mtspr SPRN_MAS2,r7 + tlbwe + + xori r6,r4,1 + slwi r6,r6,5 /* setup new context with other address space */ + bl 1f /* Find our address */ +1: mflr r9 + rlwimi r7,r9,0,20,31 + addi r7,r7,24 + mtspr SRR0,r7 + mtspr SRR1,r6 + rfi + +/* 4. Clear out PIDs & Search info */ + li r6,0 + mtspr SPRN_PID0,r6 + mtspr SPRN_PID1,r6 + mtspr SPRN_PID2,r6 + mtspr SPRN_MAS6,r6 + +/* 5. Invalidate mapping we started in */ + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r3,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */ + mtspr SPRN_MAS0,r7 + tlbre + li r6,0 + mtspr SPRN_MAS1,r6 + tlbwe + /* Invalidate TLB1 */ + li r9,0x0c + tlbivax 0,r9 +#ifdef CONFIG_SMP + tlbsync +#endif + msync + +/* 6. Setup KERNELBASE mapping in TLB1[0] */ + lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ + mtspr SPRN_MAS0,r6 + lis r6,(MAS1_VALID|MAS1_IPROT)@h + ori r6,r6,(MAS1_TSIZE(BOOKE_PAGESZ_16M))@l + mtspr SPRN_MAS1,r6 + li r7,0 + lis r6,KERNELBASE@h + ori r6,r6,KERNELBASE@l + rlwimi r6,r7,0,20,31 + mtspr SPRN_MAS2,r6 + li r7,(MAS3_SX|MAS3_SW|MAS3_SR) + mtspr SPRN_MAS3,r7 + tlbwe + +/* 7. Jump to KERNELBASE mapping */ + li r7,0 + bl 1f /* Find our address */ +1: mflr r9 + rlwimi r6,r9,0,20,31 + addi r6,r6,24 + mtspr SRR0,r6 + mtspr SRR1,r7 + rfi /* start execution out of TLB1[0] entry */ + +/* 8. Clear out the temp mapping */ + lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */ + rlwimi r7,r5,16,12,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */ + mtspr SPRN_MAS0,r7 + tlbre + mtspr SPRN_MAS1,r8 + tlbwe + /* Invalidate TLB1 */ + li r9,0x0c + tlbivax 0,r9 +#ifdef CONFIG_SMP + tlbsync +#endif + msync + + /* Establish the interrupt vector offsets */ + SET_IVOR(0, CriticalInput); + SET_IVOR(1, MachineCheck); + SET_IVOR(2, DataStorage); + SET_IVOR(3, InstructionStorage); + SET_IVOR(4, ExternalInput); + SET_IVOR(5, Alignment); + SET_IVOR(6, Program); + SET_IVOR(7, FloatingPointUnavailable); + SET_IVOR(8, SystemCall); + SET_IVOR(9, AuxillaryProcessorUnavailable); + SET_IVOR(10, Decrementer); + SET_IVOR(11, FixedIntervalTimer); + SET_IVOR(12, WatchdogTimer); + SET_IVOR(13, DataTLBError); + SET_IVOR(14, InstructionTLBError); + SET_IVOR(15, Debug); + SET_IVOR(32, SPEUnavailable); + SET_IVOR(33, SPEFloatingPointData); + SET_IVOR(34, SPEFloatingPointRound); + SET_IVOR(35, PerformanceMonitor); + + /* Establish the interrupt vector base */ + lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ + mtspr SPRN_IVPR,r4 + + /* Setup the defaults for TLB entries */ + li r2,MAS4_TSIZED(BOOKE_PAGESZ_4K) + mtspr SPRN_MAS4, r2 + +#if 0 + /* Enable DOZE */ + mfspr r2,SPRN_HID0 + oris r2,r2,HID0_DOZE@h + mtspr SPRN_HID0, r2 +#endif + + /* + * This is where the main kernel code starts. + */ + + /* ptr to current */ + lis r2,init_task@h + ori r2,r2,init_task@l + + /* ptr to current thread */ + addi r4,r2,THREAD /* init task's THREAD */ + mtspr SPRG3,r4 + + /* stack */ + lis r1,init_thread_union@h + ori r1,r1,init_thread_union@l + li r0,0 + stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) + + bl early_init + + mfspr r3,SPRN_TLB1CFG + andi. r3,r3,0xfff + lis r4,num_tlbcam_entries@ha + stw r3,num_tlbcam_entries@l(r4) +/* + * Decide what sort of machine this is and initialize the MMU. + */ + mr r3,r31 + mr r4,r30 + mr r5,r29 + mr r6,r28 + mr r7,r27 + bl machine_init + bl MMU_init + + /* Setup PTE pointers for the Abatron bdiGDB */ + lis r6, swapper_pg_dir@h + ori r6, r6, swapper_pg_dir@l + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + lis r4, KERNELBASE@h + ori r4, r4, KERNELBASE@l + stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ + stw r6, 0(r5) + + /* Let's move on */ + lis r4,start_kernel@h + ori r4,r4,start_kernel@l + lis r3,MSR_KERNEL@h + ori r3,r3,MSR_KERNEL@l + mtspr SRR0,r4 + mtspr SRR1,r3 + rfi /* change context and jump to start_kernel */ + +/* + * Interrupt vector entry code + * + * The Book E MMUs are always on so we don't need to handle + * interrupts in real mode as with previous PPC processors. In + * this case we handle interrupts in the kernel virtual address + * space. + * + * Interrupt vectors are dynamically placed relative to the + * interrupt prefix as determined by the address of interrupt_base. + * The interrupt vectors offsets are programmed using the labels + * for each interrupt vector entry. + * + * Interrupt vectors must be aligned on a 16 byte boundary. + * We align on a 32 byte cache line boundary for good measure. + */ + +#define NORMAL_EXCEPTION_PROLOG \ + mtspr SPRN_SPRG0,r10; /* save two registers to work with */\ + mtspr SPRN_SPRG1,r11; \ + mtspr SPRN_SPRG4W,r1; \ + mfcr r10; /* save CR in r10 for now */\ + mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ + andi. r11,r11,MSR_PR; \ + beq 1f; \ + mfspr r1,SPRG3; /* if from user, start at top of */\ + lwz r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack */\ + addi r1,r1,THREAD_SIZE; \ +1: subi r1,r1,INT_FRAME_SIZE; /* Allocate an exception frame */\ + tophys(r11,r1); \ + stw r10,_CCR(r11); /* save various registers */\ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mfspr r10,SPRG0; \ + stw r10,GPR10(r11); \ + mfspr r12,SPRG1; \ + stw r12,GPR11(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r10,SPRG4R; \ + mfspr r12,SRR0; \ + stw r10,GPR1(r11); \ + mfspr r9,SRR1; \ + stw r10,0(r11); \ + rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Exception prolog for critical exceptions. This is a little different + * from the normal exception prolog above since a critical exception + * can potentially occur at any point during normal exception processing. + * Thus we cannot use the same SPRG registers as the normal prolog above. + * Instead we use a couple of words of memory at low physical addresses. + * This is OK since we don't support SMP on these processors. For Book E + * processors, we also have a reserved register (SPRG2) that is only used + * in critical exceptions so we can free up a GPR to use as the base for + * indirect access to the critical exception save area. This is necessary + * since the MMU is always on and the save area is offset from KERNELBASE. + */ +#define CRITICAL_EXCEPTION_PROLOG \ + mtspr SPRG2,r8; /* SPRG2 only used in criticals */ \ + lis r8,crit_save@ha; \ + stw r10,crit_r10@l(r8); \ + stw r11,crit_r11@l(r8); \ + mfspr r10,SPRG0; \ + stw r10,crit_sprg0@l(r8); \ + mfspr r10,SPRG1; \ + stw r10,crit_sprg1@l(r8); \ + mfspr r10,SPRG4R; \ + stw r10,crit_sprg4@l(r8); \ + mfspr r10,SPRG5R; \ + stw r10,crit_sprg5@l(r8); \ + mfspr r10,SPRG7R; \ + stw r10,crit_sprg7@l(r8); \ + mfspr r10,SPRN_PID; \ + stw r10,crit_pid@l(r8); \ + mfspr r10,SRR0; \ + stw r10,crit_srr0@l(r8); \ + mfspr r10,SRR1; \ + stw r10,crit_srr1@l(r8); \ + mfspr r8,SPRG2; /* SPRG2 only used in criticals */ \ + mfcr r10; /* save CR in r10 for now */\ + mfspr r11,SPRN_CSRR1; /* check whether user or kernel */\ + andi. r11,r11,MSR_PR; \ + lis r11,critical_stack_top@h; \ + ori r11,r11,critical_stack_top@l; \ + beq 1f; \ + /* COMING FROM USER MODE */ \ + mfspr r11,SPRG3; /* if from user, start at top of */\ + lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ + addi r11,r11,THREAD_SIZE; \ +1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ + stw r10,_CCR(r11); /* save various registers */\ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ + stw r12,_DEAR(r11); /* since they may have had stuff */\ + mfspr r9,SPRN_ESR; /* in them at the point where the */\ + stw r9,_ESR(r11); /* exception was taken */\ + mfspr r12,CSRR0; \ + stw r1,GPR1(r11); \ + mfspr r9,CSRR1; \ + stw r1,0(r11); \ + tovirt(r1,r11); \ + rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Exception prolog for machine check exceptions. This is similar to + * the critical exception prolog, except that machine check exceptions + * have their own save area. For Book E processors, we also have a + * reserved register (SPRG6) that is only used in machine check exceptions + * so we can free up a GPR to use as the base for indirect access to the + * machine check exception save area. This is necessary since the MMU + * is always on and the save area is offset from KERNELBASE. + */ +#define MCHECK_EXCEPTION_PROLOG \ + mtspr SPRG6W,r8; /* SPRG6 used in machine checks */ \ + lis r8,mcheck_save@ha; \ + stw r10,mcheck_r10@l(r8); \ + stw r11,mcheck_r11@l(r8); \ + mfspr r10,SPRG0; \ + stw r10,mcheck_sprg0@l(r8); \ + mfspr r10,SPRG1; \ + stw r10,mcheck_sprg1@l(r8); \ + mfspr r10,SPRG4R; \ + stw r10,mcheck_sprg4@l(r8); \ + mfspr r10,SPRG5R; \ + stw r10,mcheck_sprg5@l(r8); \ + mfspr r10,SPRG7R; \ + stw r10,mcheck_sprg7@l(r8); \ + mfspr r10,SPRN_PID; \ + stw r10,mcheck_pid@l(r8); \ + mfspr r10,SRR0; \ + stw r10,mcheck_srr0@l(r8); \ + mfspr r10,SRR1; \ + stw r10,mcheck_srr1@l(r8); \ + mfspr r10,CSRR0; \ + stw r10,mcheck_csrr0@l(r8); \ + mfspr r10,CSRR1; \ + stw r10,mcheck_csrr1@l(r8); \ + mfspr r8,SPRG6R; /* SPRG6 used in machine checks */ \ + mfcr r10; /* save CR in r10 for now */\ + mfspr r11,SPRN_MCSRR1; /* check whether user or kernel */\ + andi. r11,r11,MSR_PR; \ + lis r11,mcheck_stack_top@h; \ + ori r11,r11,mcheck_stack_top@l; \ + beq 1f; \ + /* COMING FROM USER MODE */ \ + mfspr r11,SPRG3; /* if from user, start at top of */\ + lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ + addi r11,r11,THREAD_SIZE; \ +1: subi r11,r11,INT_FRAME_SIZE; /* Allocate an exception frame */\ + stw r10,_CCR(r11); /* save various registers */\ + stw r12,GPR12(r11); \ + stw r9,GPR9(r11); \ + mflr r10; \ + stw r10,_LINK(r11); \ + mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\ + stw r12,_DEAR(r11); /* since they may have had stuff */\ + mfspr r9,SPRN_ESR; /* in them at the point where the */\ + stw r9,_ESR(r11); /* exception was taken */\ + mfspr r12,MCSRR0; \ + stw r1,GPR1(r11); \ + mfspr r9,MCSRR1; \ + stw r1,0(r11); \ + tovirt(r1,r11); \ + rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ + stw r0,GPR0(r11); \ + SAVE_4GPRS(3, r11); \ + SAVE_2GPRS(7, r11) + +/* + * Exception vectors. + */ +#define START_EXCEPTION(label) \ + .align 5; \ +label: + +#define FINISH_EXCEPTION(func) \ + bl transfer_to_handler_full; \ + .long func; \ + .long ret_from_except_full + +#define EXCEPTION(n, label, hdlr, xfer) \ + START_EXCEPTION(label); \ + NORMAL_EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + xfer(n, hdlr) + +#define CRITICAL_EXCEPTION(n, label, hdlr) \ + START_EXCEPTION(label); \ + CRITICAL_EXCEPTION_PROLOG; \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ + NOCOPY, transfer_to_handler_full, \ + ret_from_except_full) + +#define MCHECK_EXCEPTION(n, label, hdlr) \ + START_EXCEPTION(label); \ + MCHECK_EXCEPTION_PROLOG; \ + mfspr r5,SPRN_ESR; \ + stw r5,_ESR(r11); \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ + NOCOPY, mcheck_transfer_to_handler, \ + ret_from_mcheck_exc) + +#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret) \ + li r10,trap; \ + stw r10,TRAP(r11); \ + lis r10,msr@h; \ + ori r10,r10,msr@l; \ + copyee(r10, r9); \ + bl tfer; \ + .long hdlr; \ + .long ret + +#define COPY_EE(d, s) rlwimi d,s,0,16,16 +#define NOCOPY(d, s) + +#define EXC_XFER_STD(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \ + ret_from_except) + +#define EXC_XFER_EE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \ + ret_from_except_full) + +#define EXC_XFER_EE_LITE(n, hdlr) \ + EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \ + ret_from_except) + +interrupt_base: + /* Critical Input Interrupt */ + CRITICAL_EXCEPTION(0x0100, CriticalInput, UnknownException) + + /* Machine Check Interrupt */ + MCHECK_EXCEPTION(0x0200, MachineCheck, MachineCheckException) + + /* Data Storage Interrupt */ + START_EXCEPTION(DataStorage) + mtspr SPRG0, r10 /* Save some working registers */ + mtspr SPRG1, r11 + mtspr SPRG4W, r12 + mtspr SPRG5W, r13 + mfcr r11 + mtspr SPRG7W, r11 + + /* + * Check if it was a store fault, if not then bail + * because a user tried to access a kernel or + * read-protected page. Otherwise, get the + * offending address and handle it. + */ + mfspr r10, SPRN_ESR + andis. r10, r10, ESR_ST@h + beq 2f + + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + ori r11, r11, TASK_SIZE@l + cmplw 0, r10, r11 + bge 2f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRG3 + lwz r11,PGDIR(r11) +4: + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r11, 0(r11) /* Get L1 entry */ + rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + + /* Are _PAGE_USER & _PAGE_RW set & _PAGE_HWWRITE not? */ + andi. r13, r11, _PAGE_RW|_PAGE_USER|_PAGE_HWWRITE + cmpwi 0, r13, _PAGE_RW|_PAGE_USER + bne 2f /* Bail if not */ + + /* Update 'changed'. */ + ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE + stw r11, 0(r12) /* Update Linux page table */ + + /* MAS2 not updated as the entry does exist in the tlb, this + fault taken to detect state transition (eg: COW -> DIRTY) + */ + lis r12, MAS3_RPN@h + ori r12, r12, _PAGE_HWEXEC | MAS3_RPN@l + and r11, r11, r12 + rlwimi r11, r11, 31, 27, 27 /* SX <- _PAGE_HWEXEC */ + ori r11, r11, (MAS3_UW|MAS3_SW|MAS3_UR|MAS3_SR)@l /* set static perms */ + + /* update search PID in MAS6, AS = 0 */ + mfspr r12, SPRN_PID0 + slwi r12, r12, 16 + mtspr SPRN_MAS6, r12 + + /* find the TLB index that caused the fault. It has to be here. */ + tlbsx 0, r10 + + mtspr SPRN_MAS3,r11 + tlbwe + + /* Done...restore registers and get out of here. */ + mfspr r11, SPRG7R + mtcr r11 + mfspr r13, SPRG5R + mfspr r12, SPRG4R + mfspr r11, SPRG1 + mfspr r10, SPRG0 + rfi /* Force context change */ + +2: + /* + * The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRG7R + mtcr r11 + mfspr r13, SPRG5R + mfspr r12, SPRG4R + mfspr r11, SPRG1 + mfspr r10, SPRG0 + b data_access + + /* Instruction Storage Interrupt */ + START_EXCEPTION(InstructionStorage) + NORMAL_EXCEPTION_PROLOG + mfspr r5,SPRN_ESR /* Grab the ESR and save it */ + stw r5,_ESR(r11) + mr r4,r12 /* Pass SRR0 as arg2 */ + li r5,0 /* Pass zero as arg3 */ + EXC_XFER_EE_LITE(0x0400, handle_page_fault) + + /* External Input Interrupt */ + EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) + + /* Alignment Interrupt */ + START_EXCEPTION(Alignment) + NORMAL_EXCEPTION_PROLOG + mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */ + stw r4,_DEAR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE(0x0600, AlignmentException) + + /* Program Interrupt */ + START_EXCEPTION(Program) + NORMAL_EXCEPTION_PROLOG + mfspr r4,SPRN_ESR /* Grab the ESR and save it */ + stw r4,_ESR(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_STD(0x0700, ProgramCheckException) + + /* Floating Point Unavailable Interrupt */ + EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) + + /* System Call Interrupt */ + START_EXCEPTION(SystemCall) + NORMAL_EXCEPTION_PROLOG + EXC_XFER_EE_LITE(0x0c00, DoSyscall) + + /* Auxillary Processor Unavailable Interrupt */ + EXCEPTION(0x2900, AuxillaryProcessorUnavailable, UnknownException, EXC_XFER_EE) + + /* Decrementer Interrupt */ + START_EXCEPTION(Decrementer) + NORMAL_EXCEPTION_PROLOG + lis r0,TSR_DIS@h /* Setup the DEC interrupt mask */ + mtspr SPRN_TSR,r0 /* Clear the DEC interrupt */ + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_LITE(0x0900, timer_interrupt) + + /* Fixed Internal Timer Interrupt */ + /* TODO: Add FIT support */ + EXCEPTION(0x3100, FixedIntervalTimer, UnknownException, EXC_XFER_EE) + + /* Watchdog Timer Interrupt */ + /* TODO: Add watchdog support */ + CRITICAL_EXCEPTION(0x3200, WatchdogTimer, UnknownException) + + /* Data TLB Error Interrupt */ + START_EXCEPTION(DataTLBError) + mtspr SPRG0, r10 /* Save some working registers */ + mtspr SPRG1, r11 + mtspr SPRG4W, r12 + mtspr SPRG5W, r13 + mfcr r11 + mtspr SPRG7W, r11 + mfspr r10, SPRN_DEAR /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + ori r11, r11, TASK_SIZE@l + cmplw 5, r10, r11 + blt 5, 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MAS1 /* Set TID to 0 */ + li r13,MAS1_TID@l + andc r12,r12,r13 + mtspr SPRN_MAS1,r12 + + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRG3 + lwz r11,PGDIR(r11) + +4: + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r11, 0(r11) /* Get L1 entry */ + rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + andi. r13, r11, _PAGE_PRESENT + beq 2f + + ori r11, r11, _PAGE_ACCESSED + stw r11, 0(r12) + + /* Jump to common tlb load */ + b finish_tlb_load +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRG7R + mtcr r11 + mfspr r13, SPRG5R + mfspr r12, SPRG4R + mfspr r11, SPRG1 + mfspr r10, SPRG0 + b data_access + + /* Instruction TLB Error Interrupt */ + /* + * Nearly the same as above, except we get our + * information from different registers and bailout + * to a different point. + */ + START_EXCEPTION(InstructionTLBError) + mtspr SPRG0, r10 /* Save some working registers */ + mtspr SPRG1, r11 + mtspr SPRG4W, r12 + mtspr SPRG5W, r13 + mfcr r11 + mtspr SPRG7W, r11 + mfspr r10, SRR0 /* Get faulting address */ + + /* If we are faulting a kernel address, we have to use the + * kernel page tables. + */ + lis r11, TASK_SIZE@h + ori r11, r11, TASK_SIZE@l + cmplw 5, r10, r11 + blt 5, 3f + lis r11, swapper_pg_dir@h + ori r11, r11, swapper_pg_dir@l + + mfspr r12,SPRN_MAS1 /* Set TID to 0 */ + li r13,MAS1_TID@l + andc r12,r12,r13 + mtspr SPRN_MAS1,r12 + + b 4f + + /* Get the PGD for the current thread */ +3: + mfspr r11,SPRG3 + lwz r11,PGDIR(r11) + +4: + rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */ + lwz r11, 0(r11) /* Get L1 entry */ + rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */ + beq 2f /* Bail if no table */ + + rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */ + lwz r11, 0(r12) /* Get Linux PTE */ + andi. r13, r11, _PAGE_PRESENT + beq 2f + + ori r11, r11, _PAGE_ACCESSED + stw r11, 0(r12) + + /* Jump to common TLB load point */ + b finish_tlb_load + +2: + /* The bailout. Restore registers to pre-exception conditions + * and call the heavyweights to help us out. + */ + mfspr r11, SPRG7R + mtcr r11 + mfspr r13, SPRG5R + mfspr r12, SPRG4R + mfspr r11, SPRG1 + mfspr r10, SPRG0 + b InstructionStorage + +#ifdef CONFIG_SPE + /* SPE Unavailable */ + START_EXCEPTION(SPEUnavailable) + NORMAL_EXCEPTION_PROLOG + bne load_up_spe + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE_LITE(0x2010, KernelSPE) +#else + EXCEPTION(0x2020, SPEUnavailable, UnknownException, EXC_XFER_EE) +#endif /* CONFIG_SPE */ + + /* SPE Floating Point Data */ +#ifdef CONFIG_SPE + EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); +#else + EXCEPTION(0x2040, SPEFloatingPointData, UnknownException, EXC_XFER_EE) +#endif /* CONFIG_SPE */ + + /* SPE Floating Point Round */ + EXCEPTION(0x2050, SPEFloatingPointRound, UnknownException, EXC_XFER_EE) + + /* Performance Monitor */ + EXCEPTION(0x2060, PerformanceMonitor, UnknownException, EXC_XFER_EE) + +/* Check for a single step debug exception while in an exception + * handler before state has been saved. This is to catch the case + * where an instruction that we are trying to single step causes + * an exception (eg ITLB/DTLB miss) and thus the first instruction of + * the exception handler generates a single step debug exception. + * + * If we get a debug trap on the first instruction of an exception handler, + * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is + * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR). + * The exception handler was handling a non-critical interrupt, so it will + * save (and later restore) the MSR via SPRN_SRR1, which will still have + * the MSR_DE bit set. + */ + /* Debug Interrupt */ + START_EXCEPTION(Debug) + CRITICAL_EXCEPTION_PROLOG + + /* + * If this is a single step or branch-taken exception in an + * exception entry sequence, it was probably meant to apply to + * the code where the exception occurred (since exception entry + * doesn't turn off DE automatically). We simulate the effect + * of turning off DE on entry to an exception handler by turning + * off DE in the CSRR1 value and clearing the debug status. + */ + mfspr r10,SPRN_DBSR /* check single-step/branch taken */ + andis. r10,r10,(DBSR_IC|DBSR_BT)@h + beq+ 1f + andi. r0,r9,MSR_PR /* check supervisor */ + beq 2f /* branch if we need to fix it up... */ + + /* continue normal handling for a critical exception... */ +1: mfspr r4,SPRN_DBSR + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_TEMPLATE(DebugException, 0x2002, \ + (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ + NOCOPY, crit_transfer_to_handler, ret_from_crit_exc) + + /* here it looks like we got an inappropriate debug exception. */ +2: rlwinm r9,r9,0,~MSR_DE /* clear DE in the CSRR1 value */ + mtspr SPRN_DBSR,r10 /* clear the IC/BT debug intr status */ + /* restore state and get out */ + lwz r10,_CCR(r11) + lwz r0,GPR0(r11) + lwz r1,GPR1(r11) + mtcrf 0x80,r10 + mtspr CSRR0,r12 + mtspr CSRR1,r9 + lwz r9,GPR9(r11) + + mtspr SPRG2,r8; /* SPRG2 only used in criticals */ + lis r8,crit_save@ha; + lwz r10,crit_r10@l(r8) + lwz r11,crit_r11@l(r8) + mfspr r8,SPRG2 + + rfci + b . + +/* + * Local functions + */ + /* + * Data TLB exceptions will bail out to this point + * if they can't resolve the lightweight TLB fault. + */ +data_access: + NORMAL_EXCEPTION_PROLOG + mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ + stw r5,_ESR(r11) + mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ + andis. r10,r5,(ESR_ILK|ESR_DLK)@h + bne 1f + EXC_XFER_EE_LITE(0x0300, handle_page_fault) +1: + addi r3,r1,STACK_FRAME_OVERHEAD + EXC_XFER_EE_LITE(0x0300, CacheLockingException) + +/* + + * Both the instruction and data TLB miss get to this + * point to load the TLB. + * r10 - EA of fault + * r11 - TLB (info from Linux PTE) + * r12, r13 - available to use + * CR5 - results of addr < TASK_SIZE + * MAS0, MAS1 - loaded with proper value when we get here + * MAS2, MAS3 - will need additional info from Linux PTE + * Upon exit, we reload everything and RFI. + */ +finish_tlb_load: + /* + * We set execute, because we don't have the granularity to + * properly set this at the page level (Linux problem). + * Many of these bits are software only. Bits we don't set + * here we (properly should) assume have the appropriate value. + */ + + mfspr r12, SPRN_MAS2 + rlwimi r12, r11, 26, 27, 31 /* extract WIMGE from pte */ + mtspr SPRN_MAS2, r12 + + bge 5, 1f + + /* addr > TASK_SIZE */ + li r10, (MAS3_UX | MAS3_UW | MAS3_UR) + andi. r13, r11, (_PAGE_USER | _PAGE_HWWRITE | _PAGE_HWEXEC) + andi. r12, r11, _PAGE_USER /* Test for _PAGE_USER */ + iseleq r12, 0, r10 + and r10, r12, r13 + srwi r12, r10, 1 + or r12, r12, r10 /* Copy user perms into supervisor */ + b 2f + + /* addr <= TASK_SIZE */ +1: rlwinm r12, r11, 31, 29, 29 /* Extract _PAGE_HWWRITE into SW */ + ori r12, r12, (MAS3_SX | MAS3_SR) + +2: rlwimi r11, r12, 0, 20, 31 /* Extract RPN from PTE and merge with perms */ + mtspr SPRN_MAS3, r11 + tlbwe + + /* Done...restore registers and get out of here. */ + mfspr r11, SPRG7R + mtcr r11 + mfspr r13, SPRG5R + mfspr r12, SPRG4R + mfspr r11, SPRG1 + mfspr r10, SPRG0 + rfi /* Force context change */ + +#ifdef CONFIG_SPE +/* Note that the SPE support is closely modeled after the AltiVec + * support. Changes to one are likely to be applicable to the + * other! */ +load_up_spe: +/* + * Disable SPE for the task which had SPE previously, + * and save its SPE registers in its thread_struct. + * Enables SPE for use in the kernel on return. + * On SMP we know the SPE units are free, since we give it up every + * switch. -- Kumar + */ + mfmsr r5 + oris r5,r5,MSR_SPE@h + mtmsr r5 /* enable use of SPE now */ + isync +/* + * For SMP, we don't do lazy SPE switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_spe in switch_to. + */ +#ifndef CONFIG_SMP + lis r3,last_task_used_spe@ha + lwz r4,last_task_used_spe@l(r3) + cmpi 0,r4,0 + beq 1f + addi r4,r4,THREAD /* want THREAD of last_task_used_spe */ + SAVE_32EVR(0,r10,r4) + evxor evr10, evr10, evr10 /* clear out evr10 */ + evmwumiaa evr10, evr10, evr10 /* evr10 <- ACC = 0 * 0 + ACC */ + li r5,THREAD_ACC + evstddx evr10, r4, r5 /* save off accumulator */ + lwz r5,PT_REGS(r4) + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r10,MSR_SPE@h + andc r4,r4,r10 /* disable SPE for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of SPE after return */ + oris r9,r9,MSR_SPE@h + mfspr r5,SPRG3 /* current task's THREAD (phys) */ + li r4,1 + li r10,THREAD_ACC + stw r4,THREAD_USED_SPE(r5) + evlddx evr4,r10,r5 + evmra evr4,evr4 + REST_32EVR(0,r10,r5) +#ifndef CONFIG_SMP + subi r4,r5,THREAD + stw r4,last_task_used_spe@l(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ +2: REST_4GPRS(3, r11) + lwz r10,_CCR(r11) + REST_GPR(1, r11) + mtcr r10 + lwz r10,_LINK(r11) + mtlr r10 + REST_GPR(10, r11) + mtspr SRR1,r9 + mtspr SRR0,r12 + REST_GPR(9, r11) + REST_GPR(12, r11) + lwz r11,GPR11(r11) + SYNC + rfi + + + +/* + * SPE unavailable trap from kernel - print a message, but let + * the task use SPE in the kernel until it returns to user mode. + */ +KernelSPE: + lwz r3,_MSR(r1) + oris r3,r3,MSR_SPE@h + stw r3,_MSR(r1) /* enable use of SPE after return */ + lis r3,87f@h + ori r3,r3,87f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl printk + b ret_from_except +87: .string "SPE used in kernel (task=%p, pc=%x) \n" + .align 4,0 + +#endif /* CONFIG_SPE */ + +/* + * Global functions + */ + +/* + * extern void loadcam_entry(unsigned int index) + * + * Load TLBCAM[index] entry in to the L2 CAM MMU + */ +_GLOBAL(loadcam_entry) + lis r4,TLBCAM@ha + addi r4,r4,TLBCAM@l + mulli r5,r3,20 + add r3,r5,r4 + lwz r4,0(r3) + mtspr SPRN_MAS0,r4 + lwz r4,4(r3) + mtspr SPRN_MAS1,r4 + lwz r4,8(r3) + mtspr SPRN_MAS2,r4 + lwz r4,12(r3) + mtspr SPRN_MAS3,r4 + tlbwe + isync + blr + +/* + * extern void giveup_altivec(struct task_struct *prev) + * + * The e500 core does not have an AltiVec unit. + */ +_GLOBAL(giveup_altivec) + blr + +#ifdef CONFIG_SPE +/* + * extern void giveup_spe(struct task_struct *prev) + * + */ +_GLOBAL(giveup_spe) + mfmsr r5 + oris r5,r5,MSR_SPE@h + SYNC + mtmsr r5 /* enable use of SPE now */ + isync + cmpi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpi 0,r5,0 + SAVE_32EVR(0, r4, r3) + evxor evr6, evr6, evr6 /* clear out evr6 */ + evmwumiaa evr6, evr6, evr6 /* evr6 <- ACC = 0 * 0 + ACC */ + li r4,THREAD_ACC + evstddx evr6, r4, r3 /* save off accumulator */ + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r3,MSR_SPE@h + andc r4,r4,r3 /* disable SPE for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + lis r4,last_task_used_spe@ha + stw r5,last_task_used_spe@l(r4) +#endif /* CONFIG_SMP */ + blr +#endif /* CONFIG_SPE */ + +/* + * extern void giveup_fpu(struct task_struct *prev) + * + * The e500 core does not have an FPU. + */ +_GLOBAL(giveup_fpu) + blr + +/* + * extern void abort(void) + * + * At present, this routine just applies a system reset. + */ +_GLOBAL(abort) + li r13,0 + mtspr SPRN_DBCR0,r13 /* disable all debug events */ + mfmsr r13 + ori r13,r13,MSR_DE@l /* Enable Debug Events */ + mtmsr r13 + mfspr r13,SPRN_DBCR0 + lis r13,(DBCR0_IDM|DBCR0_RST_CHIP)@h + mtspr SPRN_DBCR0,r13 + +_GLOBAL(set_context) + +#ifdef CONFIG_BDI_SWITCH + /* Context switch the PTE pointer for the Abatron BDI2000. + * The PGDIR is the second parameter. + */ + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l + stw r4, 0x4(r5) +#endif + mtspr SPRN_PID,r3 + isync /* Force context change */ + blr + +/* + * We put a few things here that have to be page-aligned. This stuff + * goes at the beginning of the data segment, which is page-aligned. + */ + .data +_GLOBAL(sdata) +_GLOBAL(empty_zero_page) + .space 4096 +_GLOBAL(swapper_pg_dir) + .space 4096 + + .section .bss +/* Stack for handling critical exceptions from kernel mode */ +critical_stack_bottom: + .space 4096 +critical_stack_top: + .previous + +/* Stack for handling machine check exceptions from kernel mode */ +mcheck_stack_bottom: + .space 4096 +mcheck_stack_top: + .previous + +/* + * This area is used for temporarily saving registers during the + * critical and machine check exception prologs. It must always + * follow the page aligned allocations, so it starts on a page + * boundary, ensuring that all crit_save areas are in a single + * page. + */ + +/* crit_save */ +_GLOBAL(crit_save) + .space 4 +_GLOBAL(crit_r10) + .space 4 +_GLOBAL(crit_r11) + .space 4 +_GLOBAL(crit_sprg0) + .space 4 +_GLOBAL(crit_sprg1) + .space 4 +_GLOBAL(crit_sprg4) + .space 4 +_GLOBAL(crit_sprg5) + .space 4 +_GLOBAL(crit_sprg7) + .space 4 +_GLOBAL(crit_pid) + .space 4 +_GLOBAL(crit_srr0) + .space 4 +_GLOBAL(crit_srr1) + .space 4 + +/* mcheck_save */ +_GLOBAL(mcheck_save) + .space 4 +_GLOBAL(mcheck_r10) + .space 4 +_GLOBAL(mcheck_r11) + .space 4 +_GLOBAL(mcheck_sprg0) + .space 4 +_GLOBAL(mcheck_sprg1) + .space 4 +_GLOBAL(mcheck_sprg4) + .space 4 +_GLOBAL(mcheck_sprg5) + .space 4 +_GLOBAL(mcheck_sprg7) + .space 4 +_GLOBAL(mcheck_pid) + .space 4 +_GLOBAL(mcheck_srr0) + .space 4 +_GLOBAL(mcheck_srr1) + .space 4 +_GLOBAL(mcheck_csrr0) + .space 4 +_GLOBAL(mcheck_csrr1) + .space 4 + +/* + * This space gets a copy of optional info passed to us by the bootstrap + * which is used to pass parameters into the kernel like root=/dev/sda1, etc. + */ +_GLOBAL(cmd_line) + .space 512 + +/* + * Room for two PTE pointers, usually the kernel and current user pointers + * to their respective root page table. + */ +abatron_pteptrs: + .space 8 + + diff --git a/arch/ppc/lib/rheap.c b/arch/ppc/lib/rheap.c new file mode 100644 index 000000000..4d938a062 --- /dev/null +++ b/arch/ppc/lib/rheap.c @@ -0,0 +1,692 @@ +/* + * arch/ppc/syslib/rheap.c + * + * A Remote Heap. Remote means that we don't touch the memory that the + * heap points to. Normal heap implementations use the memory they manage + * to place their list. We cannot do that because the memory we manage may + * have special properties, for example it is uncachable or of different + * endianess. + * + * Author: Pantelis Antoniou + * + * 2004 (c) INTRACOM S.A. Greece. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#include +#include +#include +#include + +#include + +/* + * Fixup a list_head, needed when copying lists. If the pointers fall + * between s and e, apply the delta. This assumes that + * sizeof(struct list_head *) == sizeof(unsigned long *). + */ +static inline void fixup(unsigned long s, unsigned long e, int d, + struct list_head *l) +{ + unsigned long *pp; + + pp = (unsigned long *)&l->next; + if (*pp >= s && *pp < e) + *pp += d; + + pp = (unsigned long *)&l->prev; + if (*pp >= s && *pp < e) + *pp += d; +} + +/* Grow the allocated blocks */ +static int grow(rh_info_t * info, int max_blocks) +{ + rh_block_t *block, *blk; + int i, new_blocks; + int delta; + unsigned long blks, blke; + + if (max_blocks <= info->max_blocks) + return -EINVAL; + + new_blocks = max_blocks - info->max_blocks; + + block = kmalloc(sizeof(rh_block_t) * max_blocks, GFP_KERNEL); + if (block == NULL) + return -ENOMEM; + + if (info->max_blocks > 0) { + + /* copy old block area */ + memcpy(block, info->block, + sizeof(rh_block_t) * info->max_blocks); + + delta = (char *)block - (char *)info->block; + + /* and fixup list pointers */ + blks = (unsigned long)info->block; + blke = (unsigned long)(info->block + info->max_blocks); + + for (i = 0, blk = block; i < info->max_blocks; i++, blk++) + fixup(blks, blke, delta, &blk->list); + + fixup(blks, blke, delta, &info->empty_list); + fixup(blks, blke, delta, &info->free_list); + fixup(blks, blke, delta, &info->taken_list); + + /* free the old allocated memory */ + if ((info->flags & RHIF_STATIC_BLOCK) == 0) + kfree(info->block); + } + + info->block = block; + info->empty_slots += new_blocks; + info->max_blocks = max_blocks; + info->flags &= ~RHIF_STATIC_BLOCK; + + /* add all new blocks to the free list */ + for (i = 0, blk = block + info->max_blocks; i < new_blocks; i++, blk++) + list_add(&blk->list, &info->empty_list); + + return 0; +} + +/* + * Assure at least the required amount of empty slots. If this function + * causes a grow in the block area then all pointers kept to the block + * area are invalid! + */ +static int assure_empty(rh_info_t * info, int slots) +{ + int max_blocks; + + /* This function is not meant to be used to grow uncontrollably */ + if (slots >= 4) + return -EINVAL; + + /* Enough space */ + if (info->empty_slots >= slots) + return 0; + + /* Next 16 sized block */ + max_blocks = ((info->max_blocks + slots) + 15) & ~15; + + return grow(info, max_blocks); +} + +static rh_block_t *get_slot(rh_info_t * info) +{ + rh_block_t *blk; + + /* If no more free slots, and failure to extend. */ + /* XXX: You should have called assure_empty before */ + if (info->empty_slots == 0) { + printk(KERN_ERR "rh: out of slots; crash is imminent.\n"); + return NULL; + } + + /* Get empty slot to use */ + blk = list_entry(info->empty_list.next, rh_block_t, list); + list_del_init(&blk->list); + info->empty_slots--; + + /* Initialize */ + blk->start = NULL; + blk->size = 0; + blk->owner = NULL; + + return blk; +} + +static inline void release_slot(rh_info_t * info, rh_block_t * blk) +{ + list_add(&blk->list, &info->empty_list); + info->empty_slots++; +} + +static void attach_free_block(rh_info_t * info, rh_block_t * blkn) +{ + rh_block_t *blk; + rh_block_t *before; + rh_block_t *after; + rh_block_t *next; + int size; + unsigned long s, e, bs, be; + struct list_head *l; + + /* We assume that they are aligned properly */ + size = blkn->size; + s = (unsigned long)blkn->start; + e = s + size; + + /* Find the blocks immediately before and after the given one + * (if any) */ + before = NULL; + after = NULL; + next = NULL; + + list_for_each(l, &info->free_list) { + blk = list_entry(l, rh_block_t, list); + + bs = (unsigned long)blk->start; + be = bs + blk->size; + + if (next == NULL && s >= bs) + next = blk; + + if (be == s) + before = blk; + + if (e == bs) + after = blk; + + /* If both are not null, break now */ + if (before != NULL && after != NULL) + break; + } + + /* Now check if they are really adjacent */ + if (before != NULL && s != (unsigned long)before->start + before->size) + before = NULL; + + if (after != NULL && e != (unsigned long)after->start) + after = NULL; + + /* No coalescing; list insert and return */ + if (before == NULL && after == NULL) { + + if (next != NULL) + list_add(&blkn->list, &next->list); + else + list_add(&blkn->list, &info->free_list); + + return; + } + + /* We don't need it anymore */ + release_slot(info, blkn); + + /* Grow the before block */ + if (before != NULL && after == NULL) { + before->size += size; + return; + } + + /* Grow the after block backwards */ + if (before == NULL && after != NULL) { + (int8_t *) after->start -= size; + after->size += size; + return; + } + + /* Grow the before block, and release the after block */ + before->size += size + after->size; + list_del(&after->list); + release_slot(info, after); +} + +static void attach_taken_block(rh_info_t * info, rh_block_t * blkn) +{ + rh_block_t *blk; + struct list_head *l; + + /* Find the block immediately before the given one (if any) */ + list_for_each(l, &info->taken_list) { + blk = list_entry(l, rh_block_t, list); + if (blk->start > blkn->start) { + list_add_tail(&blkn->list, &blk->list); + return; + } + } + + list_add_tail(&blkn->list, &info->taken_list); +} + +/* + * Create a remote heap dynamically. Note that no memory for the blocks + * are allocated. It will upon the first allocation + */ +rh_info_t *rh_create(unsigned int alignment) +{ + rh_info_t *info; + + /* Alignment must be a power of two */ + if ((alignment & (alignment - 1)) != 0) + return NULL; + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (info == NULL) + return NULL; + + info->alignment = alignment; + + /* Initially everything as empty */ + info->block = NULL; + info->max_blocks = 0; + info->empty_slots = 0; + info->flags = 0; + + INIT_LIST_HEAD(&info->empty_list); + INIT_LIST_HEAD(&info->free_list); + INIT_LIST_HEAD(&info->taken_list); + + return info; +} + +/* + * Destroy a dynamically created remote heap. Deallocate only if the areas + * are not static + */ +void rh_destroy(rh_info_t * info) +{ + if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL) + kfree(info->block); + + if ((info->flags & RHIF_STATIC_INFO) == 0) + kfree(info); +} + +/* + * Initialize in place a remote heap info block. This is needed to support + * operation very early in the startup of the kernel, when it is not yet safe + * to call kmalloc. + */ +void rh_init(rh_info_t * info, unsigned int alignment, int max_blocks, + rh_block_t * block) +{ + int i; + rh_block_t *blk; + + /* Alignment must be a power of two */ + if ((alignment & (alignment - 1)) != 0) + return; + + info->alignment = alignment; + + /* Initially everything as empty */ + info->block = block; + info->max_blocks = max_blocks; + info->empty_slots = max_blocks; + info->flags = RHIF_STATIC_INFO | RHIF_STATIC_BLOCK; + + INIT_LIST_HEAD(&info->empty_list); + INIT_LIST_HEAD(&info->free_list); + INIT_LIST_HEAD(&info->taken_list); + + /* Add all new blocks to the free list */ + for (i = 0, blk = block; i < max_blocks; i++, blk++) + list_add(&blk->list, &info->empty_list); +} + +/* Attach a free memory region, coalesces regions if adjuscent */ +int rh_attach_region(rh_info_t * info, void *start, int size) +{ + rh_block_t *blk; + unsigned long s, e, m; + int r; + + /* The region must be aligned */ + s = (unsigned long)start; + e = s + size; + m = info->alignment - 1; + + /* Round start up */ + s = (s + m) & ~m; + + /* Round end down */ + e = e & ~m; + + /* Take final values */ + start = (void *)s; + size = (int)(e - s); + + /* Grow the blocks, if needed */ + r = assure_empty(info, 1); + if (r < 0) + return r; + + blk = get_slot(info); + blk->start = start; + blk->size = size; + blk->owner = NULL; + + attach_free_block(info, blk); + + return 0; +} + +/* Detatch given address range, splits free block if needed. */ +void *rh_detach_region(rh_info_t * info, void *start, int size) +{ + struct list_head *l; + rh_block_t *blk, *newblk; + unsigned long s, e, m, bs, be; + + /* Validate size */ + if (size <= 0) + return NULL; + + /* The region must be aligned */ + s = (unsigned long)start; + e = s + size; + m = info->alignment - 1; + + /* Round start up */ + s = (s + m) & ~m; + + /* Round end down */ + e = e & ~m; + + if (assure_empty(info, 1) < 0) + return NULL; + + blk = NULL; + list_for_each(l, &info->free_list) { + blk = list_entry(l, rh_block_t, list); + /* The range must lie entirely inside one free block */ + bs = (unsigned long)blk->start; + be = (unsigned long)blk->start + blk->size; + if (s >= bs && e <= be) + break; + blk = NULL; + } + + if (blk == NULL) + return NULL; + + /* Perfect fit */ + if (bs == s && be == e) { + /* Delete from free list, release slot */ + list_del(&blk->list); + release_slot(info, blk); + return (void *)s; + } + + /* blk still in free list, with updated start and/or size */ + if (bs == s || be == e) { + if (bs == s) + (int8_t *) blk->start += size; + blk->size -= size; + + } else { + /* The front free fragment */ + blk->size = s - bs; + + /* the back free fragment */ + newblk = get_slot(info); + newblk->start = (void *)e; + newblk->size = be - e; + + list_add(&newblk->list, &blk->list); + } + + return (void *)s; +} + +void *rh_alloc(rh_info_t * info, int size, const char *owner) +{ + struct list_head *l; + rh_block_t *blk; + rh_block_t *newblk; + void *start; + + /* Validate size */ + if (size <= 0) + return NULL; + + /* Align to configured alignment */ + size = (size + (info->alignment - 1)) & ~(info->alignment - 1); + + if (assure_empty(info, 1) < 0) + return NULL; + + blk = NULL; + list_for_each(l, &info->free_list) { + blk = list_entry(l, rh_block_t, list); + if (size <= blk->size) + break; + blk = NULL; + } + + if (blk == NULL) + return NULL; + + /* Just fits */ + if (blk->size == size) { + /* Move from free list to taken list */ + list_del(&blk->list); + blk->owner = owner; + start = blk->start; + + attach_taken_block(info, blk); + + return start; + } + + newblk = get_slot(info); + newblk->start = blk->start; + newblk->size = size; + newblk->owner = owner; + + /* blk still in free list, with updated start, size */ + (int8_t *) blk->start += size; + blk->size -= size; + + start = newblk->start; + + attach_taken_block(info, newblk); + + return start; +} + +/* allocate at precisely the given address */ +void *rh_alloc_fixed(rh_info_t * info, void *start, int size, const char *owner) +{ + struct list_head *l; + rh_block_t *blk, *newblk1, *newblk2; + unsigned long s, e, m, bs, be; + + /* Validate size */ + if (size <= 0) + return NULL; + + /* The region must be aligned */ + s = (unsigned long)start; + e = s + size; + m = info->alignment - 1; + + /* Round start up */ + s = (s + m) & ~m; + + /* Round end down */ + e = e & ~m; + + if (assure_empty(info, 2) < 0) + return NULL; + + blk = NULL; + list_for_each(l, &info->free_list) { + blk = list_entry(l, rh_block_t, list); + /* The range must lie entirely inside one free block */ + bs = (unsigned long)blk->start; + be = (unsigned long)blk->start + blk->size; + if (s >= bs && e <= be) + break; + } + + if (blk == NULL) + return NULL; + + /* Perfect fit */ + if (bs == s && be == e) { + /* Move from free list to taken list */ + list_del(&blk->list); + blk->owner = owner; + + start = blk->start; + attach_taken_block(info, blk); + + return start; + + } + + /* blk still in free list, with updated start and/or size */ + if (bs == s || be == e) { + if (bs == s) + (int8_t *) blk->start += size; + blk->size -= size; + + } else { + /* The front free fragment */ + blk->size = s - bs; + + /* The back free fragment */ + newblk2 = get_slot(info); + newblk2->start = (void *)e; + newblk2->size = be - e; + + list_add(&newblk2->list, &blk->list); + } + + newblk1 = get_slot(info); + newblk1->start = (void *)s; + newblk1->size = e - s; + newblk1->owner = owner; + + start = newblk1->start; + attach_taken_block(info, newblk1); + + return start; +} + +int rh_free(rh_info_t * info, void *start) +{ + rh_block_t *blk, *blk2; + struct list_head *l; + int size; + + /* Linear search for block */ + blk = NULL; + list_for_each(l, &info->taken_list) { + blk2 = list_entry(l, rh_block_t, list); + if (start < blk2->start) + break; + blk = blk2; + } + + if (blk == NULL || start > (blk->start + blk->size)) + return -EINVAL; + + /* Remove from taken list */ + list_del(&blk->list); + + /* Get size of freed block */ + size = blk->size; + attach_free_block(info, blk); + + return size; +} + +int rh_get_stats(rh_info_t * info, int what, int max_stats, rh_stats_t * stats) +{ + rh_block_t *blk; + struct list_head *l; + struct list_head *h; + int nr; + + switch (what) { + + case RHGS_FREE: + h = &info->free_list; + break; + + case RHGS_TAKEN: + h = &info->taken_list; + break; + + default: + return -EINVAL; + } + + /* Linear search for block */ + nr = 0; + list_for_each(l, h) { + blk = list_entry(l, rh_block_t, list); + if (stats != NULL && nr < max_stats) { + stats->start = blk->start; + stats->size = blk->size; + stats->owner = blk->owner; + stats++; + } + nr++; + } + + return nr; +} + +int rh_set_owner(rh_info_t * info, void *start, const char *owner) +{ + rh_block_t *blk, *blk2; + struct list_head *l; + int size; + + /* Linear search for block */ + blk = NULL; + list_for_each(l, &info->taken_list) { + blk2 = list_entry(l, rh_block_t, list); + if (start < blk2->start) + break; + blk = blk2; + } + + if (blk == NULL || start > (blk->start + blk->size)) + return -EINVAL; + + blk->owner = owner; + + return size; +} + +void rh_dump(rh_info_t * info) +{ + static rh_stats_t st[32]; /* XXX maximum 32 blocks */ + int maxnr; + int i, nr; + + maxnr = sizeof(st) / sizeof(st[0]); + + printk(KERN_INFO + "info @0x%p (%d slots empty / %d max)\n", + info, info->empty_slots, info->max_blocks); + + printk(KERN_INFO " Free:\n"); + nr = rh_get_stats(info, RHGS_FREE, maxnr, st); + if (nr > maxnr) + nr = maxnr; + for (i = 0; i < nr; i++) + printk(KERN_INFO + " 0x%p-0x%p (%u)\n", + st[i].start, (int8_t *) st[i].start + st[i].size, + st[i].size); + printk(KERN_INFO "\n"); + + printk(KERN_INFO " Taken:\n"); + nr = rh_get_stats(info, RHGS_TAKEN, maxnr, st); + if (nr > maxnr) + nr = maxnr; + for (i = 0; i < nr; i++) + printk(KERN_INFO + " 0x%p-0x%p (%u) %s\n", + st[i].start, (int8_t *) st[i].start + st[i].size, + st[i].size, st[i].owner != NULL ? st[i].owner : ""); + printk(KERN_INFO "\n"); +} + +void rh_dump_blk(rh_info_t * info, rh_block_t * blk) +{ + printk(KERN_INFO + "blk @0x%p: 0x%p-0x%p (%u)\n", + blk, blk->start, (int8_t *) blk->start + blk->size, blk->size); +} diff --git a/arch/ppc/platforms/85xx/Kconfig b/arch/ppc/platforms/85xx/Kconfig new file mode 100644 index 000000000..cfe29eb04 --- /dev/null +++ b/arch/ppc/platforms/85xx/Kconfig @@ -0,0 +1,44 @@ +config 85xx + bool + depends on E500 + default y + +config PPC_INDIRECT_PCI_BE + bool + depends on 85xx + default y + +menu "Freescale 85xx options" + depends on E500 + +choice + prompt "Machine Type" + depends on 85xx + default MPC8540_ADS + +config MPC8540_ADS + bool "MPC8540ADS" + help + This option enables support for the MPC 8540 ADS evaluation board. + +endchoice + +# It's often necessary to know the specific 85xx processor type. +# Fortunately, it is implied (so far) from the board type, so we +# don't need to ask more redundant questions. +config MPC8540 + bool + depends on MPC8540_ADS + default y + +config FSL_OCP + bool + depends on 85xx + default y + +config PPC_GEN550 + bool + depends on MPC8540 + default y + +endmenu diff --git a/arch/ppc/platforms/85xx/Makefile b/arch/ppc/platforms/85xx/Makefile new file mode 100644 index 000000000..92a88233f --- /dev/null +++ b/arch/ppc/platforms/85xx/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the PowerPC 85xx linux kernel. +# + +obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads_common.o mpc8540_ads.o + +obj-$(CONFIG_MPC8540) += mpc8540.o diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.c b/arch/ppc/platforms/85xx/mpc8540_ads.c new file mode 100644 index 000000000..aada593a6 --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc8540_ads.c @@ -0,0 +1,238 @@ +/* + * arch/ppc/platforms/85xx/mpc8540_ads.c + * + * MPC8540ADS board specific routines + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for linux/serial_core.h */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct ocp_gfar_data mpc85xx_tsec1_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC1_TX, + .interruptError = MPC85xx_IRQ_TSEC1_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC1_RX, + .interruptPHY = MPC85xx_IRQ_EXT5, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR + | GFAR_HAS_RMON + | GFAR_HAS_PHY_INTR | GFAR_HAS_COALESCE), + .phyid = 0, + .phyregidx = 0, +}; + +struct ocp_gfar_data mpc85xx_tsec2_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC2_TX, + .interruptError = MPC85xx_IRQ_TSEC2_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC2_RX, + .interruptPHY = MPC85xx_IRQ_EXT5, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR + | GFAR_HAS_RMON + | GFAR_HAS_PHY_INTR | GFAR_HAS_COALESCE), + .phyid = 1, + .phyregidx = 0, +}; + +struct ocp_gfar_data mpc85xx_fec_def = { + .interruptTransmit = MPC85xx_IRQ_FEC, + .interruptError = MPC85xx_IRQ_FEC, + .interruptReceive = MPC85xx_IRQ_FEC, + .interruptPHY = MPC85xx_IRQ_EXT5, + .flags = 0, + .phyid = 3, + .phyregidx = 0, +}; + +struct ocp_fs_i2c_data mpc85xx_i2c1_def = { + .flags = FS_I2C_SEPARATE_DFSRR, +}; + +/* ************************************************************************ + * + * Setup the architecture + * + */ +static void __init +mpc8540ads_setup_arch(void) +{ + struct ocp_def *def; + struct ocp_gfar_data *einfo; + bd_t *binfo = (bd_t *) __res; + unsigned int freq; + + /* get the core frequency */ + freq = binfo->bi_intfreq; + + if (ppc_md.progress) + ppc_md.progress("mpc8540ads_setup_arch()", 0); + + /* Set loops_per_jiffy to a half-way reasonable value, + for use until calibrate_delay gets called. */ + loops_per_jiffy = freq / HZ; + +#ifdef CONFIG_PCI + /* setup PCI host bridges */ + mpc85xx_setup_hose(); +#endif + +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#endif + +#ifdef CONFIG_SERIAL_8250 + mpc85xx_early_serial_map(); +#endif + +#ifdef CONFIG_SERIAL_TEXT_DEBUG + /* Invalidate the entry we stole earlier the serial ports + * should be properly mapped */ + invalidate_tlbcam_entry(NUM_TLBCAMS - 1); +#endif + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6); + } + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6); + } + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 2); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enet2addr, 6); + } + +#ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start) + ROOT_DEV = Root_RAM0; + else +#endif +#ifdef CONFIG_ROOT_NFS + ROOT_DEV = Root_NFS; +#else + ROOT_DEV = Root_HDA1; +#endif + + ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base)); +} + +/* ************************************************************************ */ +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* parse_bootinfo must always be called first */ + parse_bootinfo(find_bootinfo()); + + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *) __res, (void *) (r3 + KERNELBASE), + sizeof (bd_t)); + } +#ifdef CONFIG_SERIAL_TEXT_DEBUG + { + bd_t *binfo = (bd_t *) __res; + + /* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */ + settlbcam(NUM_TLBCAMS - 1, binfo->bi_immr_base, + binfo->bi_immr_base, MPC85xx_CCSRBAR_SIZE, _PAGE_IO, 0); + } +#endif + +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *) (r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *) (r6 + KERNELBASE)); + } + + /* setup the PowerPC module struct */ + ppc_md.setup_arch = mpc8540ads_setup_arch; + ppc_md.show_cpuinfo = mpc85xx_ads_show_cpuinfo; + + ppc_md.init_IRQ = mpc85xx_ads_init_IRQ; + ppc_md.get_irq = openpic_get_irq; + + ppc_md.restart = mpc85xx_restart; + ppc_md.power_off = mpc85xx_power_off; + ppc_md.halt = mpc85xx_halt; + + ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory; + + ppc_md.time_init = NULL; + ppc_md.set_rtc_time = NULL; + ppc_md.get_rtc_time = NULL; + ppc_md.calibrate_decr = mpc85xx_calibrate_decr; + +#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG) + ppc_md.progress = gen550_progress; +#endif /* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */ + + if (ppc_md.progress) + ppc_md.progress("mpc8540ads_init(): exit", 0); + + return; +} diff --git a/arch/ppc/platforms/85xx/mpc8540_ads.h b/arch/ppc/platforms/85xx/mpc8540_ads.h new file mode 100644 index 000000000..9056361f7 --- /dev/null +++ b/arch/ppc/platforms/85xx/mpc8540_ads.h @@ -0,0 +1,30 @@ +/* + * arch/ppc/platforms/85xx/mpc8540_ads.h + * + * MPC8540ADS board definitions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MACH_MPC8540ADS_H__ +#define __MACH_MPC8540ADS_H__ + +#include +#include +#include +#include +#include + +#define SERIAL_PORT_DFNS \ + STD_UART_OP(0) \ + STD_UART_OP(1) + +#endif /* __MACH_MPC8540ADS_H__ */ diff --git a/arch/ppc/platforms/85xx/sbc8560.c b/arch/ppc/platforms/85xx/sbc8560.c new file mode 100644 index 000000000..c32d662fe --- /dev/null +++ b/arch/ppc/platforms/85xx/sbc8560.c @@ -0,0 +1,249 @@ +/* + * arch/ppc/platforms/85xx/sbc8560.c + * + * Wind River SBC8560 board specific routines + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for linux/serial_core.h */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +struct ocp_gfar_data mpc85xx_tsec1_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC1_TX, + .interruptError = MPC85xx_IRQ_TSEC1_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC1_RX, + .interruptPHY = MPC85xx_IRQ_EXT6, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR | GFAR_HAS_PHY_INTR), + .phyid = 25, + .phyregidx = 0, +}; + +struct ocp_gfar_data mpc85xx_tsec2_def = { + .interruptTransmit = MPC85xx_IRQ_TSEC2_TX, + .interruptError = MPC85xx_IRQ_TSEC2_ERROR, + .interruptReceive = MPC85xx_IRQ_TSEC2_RX, + .interruptPHY = MPC85xx_IRQ_EXT7, + .flags = (GFAR_HAS_GIGABIT | GFAR_HAS_MULTI_INTR | GFAR_HAS_PHY_INTR), + .phyid = 26, + .phyregidx = 0, +}; + +struct ocp_fs_i2c_data mpc85xx_i2c1_def = { + .flags = FS_I2C_SEPARATE_DFSRR, +}; + + +#ifdef CONFIG_SERIAL_8250 +static void __init +sbc8560_early_serial_map(void) +{ + struct uart_port uart_req; + + /* Setup serial port access */ + memset(&uart_req, 0, sizeof (uart_req)); + uart_req.irq = MPC85xx_IRQ_EXT9; + uart_req.flags = STD_COM_FLAGS; + uart_req.uartclk = BASE_BAUD * 16; + uart_req.iotype = SERIAL_IO_MEM; + uart_req.mapbase = UARTA_ADDR; + uart_req.membase = ioremap(uart_req.mapbase, MPC85xx_UART0_SIZE); + +#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB) + gen550_init(0, &uart_req); +#endif + + if (early_serial_setup(&uart_req) != 0) + printk("Early serial init of port 0 failed\n"); + + /* Assume early_serial_setup() doesn't modify uart_req */ + uart_req.line = 1; + uart_req.mapbase = UARTB_ADDR; + uart_req.membase = ioremap(uart_req.mapbase, MPC85xx_UART1_SIZE); + uart_req.irq = MPC85xx_IRQ_EXT10; + +#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB) + gen550_init(1, &uart_req); +#endif + + if (early_serial_setup(&uart_req) != 0) + printk("Early serial init of port 1 failed\n"); +} +#endif + +/* ************************************************************************ + * + * Setup the architecture + * + */ +static void __init +sbc8560_setup_arch(void) +{ + struct ocp_def *def; + struct ocp_gfar_data *einfo; + bd_t *binfo = (bd_t *) __res; + unsigned int freq; + + /* get the core frequency */ + freq = binfo->bi_intfreq; + + if (ppc_md.progress) + ppc_md.progress("sbc8560_setup_arch()", 0); + + /* Set loops_per_jiffy to a half-way reasonable value, + for use until calibrate_delay gets called. */ + loops_per_jiffy = freq / HZ; + +#ifdef CONFIG_PCI + /* setup PCI host bridges */ + mpc85xx_setup_hose(); +#endif +#ifdef CONFIG_DUMMY_CONSOLE + conswitchp = &dummy_con; +#endif +#ifdef CONFIG_SERIAL_8250 + sbc8560_early_serial_map(); +#endif +#ifdef CONFIG_SERIAL_TEXT_DEBUG + /* Invalidate the entry we stole earlier the serial ports + * should be properly mapped */ + invalidate_tlbcam_entry(NUM_TLBCAMS - 1); +#endif + + /* Set up MAC addresses for the Ethernet devices */ + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 0); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enetaddr, 6); + } + + def = ocp_get_one_device(OCP_VENDOR_FREESCALE, OCP_FUNC_GFAR, 1); + if (def) { + einfo = (struct ocp_gfar_data *) def->additions; + memcpy(einfo->mac_addr, binfo->bi_enet1addr, 6); + } + +#ifdef CONFIG_BLK_DEV_INITRD + if (initrd_start) + ROOT_DEV = Root_RAM0; + else +#endif +#ifdef CONFIG_ROOT_NFS + ROOT_DEV = Root_NFS; +#else + ROOT_DEV = Root_HDA1; +#endif + + ocp_for_each_device(mpc85xx_update_paddr_ocp, &(binfo->bi_immr_base)); +} + +/* ************************************************************************ */ +void __init +platform_init(unsigned long r3, unsigned long r4, unsigned long r5, + unsigned long r6, unsigned long r7) +{ + /* parse_bootinfo must always be called first */ + parse_bootinfo(find_bootinfo()); + + /* + * If we were passed in a board information, copy it into the + * residual data area. + */ + if (r3) { + memcpy((void *) __res, (void *) (r3 + KERNELBASE), + sizeof (bd_t)); + } + +#ifdef CONFIG_SERIAL_TEXT_DEBUG + /* Use the last TLB entry to map CCSRBAR to allow access to DUART regs */ + settlbcam(NUM_TLBCAMS - 1, UARTA_ADDR, + UARTA_ADDR, 0x1000, _PAGE_IO, 0); +#endif + +#if defined(CONFIG_BLK_DEV_INITRD) + /* + * If the init RAM disk has been configured in, and there's a valid + * starting address for it, set it up. + */ + if (r4) { + initrd_start = r4 + KERNELBASE; + initrd_end = r5 + KERNELBASE; + } +#endif /* CONFIG_BLK_DEV_INITRD */ + + /* Copy the kernel command line arguments to a safe place. */ + + if (r6) { + *(char *) (r7 + KERNELBASE) = 0; + strcpy(cmd_line, (char *) (r6 + KERNELBASE)); + } + + /* setup the PowerPC module struct */ + ppc_md.setup_arch = sbc8560_setup_arch; + ppc_md.show_cpuinfo = sbc8560_show_cpuinfo; + + ppc_md.init_IRQ = sbc8560_init_IRQ; + ppc_md.get_irq = openpic_get_irq; + + ppc_md.restart = mpc85xx_restart; + ppc_md.power_off = mpc85xx_power_off; + ppc_md.halt = mpc85xx_halt; + + ppc_md.find_end_of_memory = mpc85xx_find_end_of_memory; + + ppc_md.time_init = NULL; + ppc_md.set_rtc_time = NULL; + ppc_md.get_rtc_time = NULL; + ppc_md.calibrate_decr = mpc85xx_calibrate_decr; + +#if defined(CONFIG_SERIAL_8250) && defined(CONFIG_SERIAL_TEXT_DEBUG) + ppc_md.progress = gen550_progress; +#endif /* CONFIG_SERIAL_8250 && CONFIG_SERIAL_TEXT_DEBUG */ + + if (ppc_md.progress) + ppc_md.progress("sbc8560_init(): exit", 0); +} diff --git a/arch/ppc/platforms/85xx/sbc8560.h b/arch/ppc/platforms/85xx/sbc8560.h new file mode 100644 index 000000000..5c86187f8 --- /dev/null +++ b/arch/ppc/platforms/85xx/sbc8560.h @@ -0,0 +1,48 @@ +/* + * arch/ppc/platforms/85xx/sbc8560.h + * + * Wind River SBC8560 board definitions + * + * Copyright 2003 Motorola Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MACH_SBC8560_H__ +#define __MACH_SBC8560_H__ + +#include +#include +#include + +#ifdef CONFIG_SERIAL_MANY_PORTS +#define RS_TABLE_SIZE 64 +#else +#define RS_TABLE_SIZE 2 +#endif + +/* Rate for the 1.8432 Mhz clock for the onboard serial chip */ +#define BASE_BAUD ( 1843200 / 16 ) + +#ifdef CONFIG_SERIAL_DETECT_IRQ +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF|ASYNC_SKIP_TEST|ASYNC_AUTO_IRQ) +#else +#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF|ASYNC_SKIP_TEST) +#endif + +#define STD_SERIAL_PORT_DFNS \ + { 0, BASE_BAUD, UARTA_ADDR, MPC85xx_IRQ_EXT9, STD_COM_FLAGS, /* ttyS0 */ \ + iomem_base: (u8 *)UARTA_ADDR, \ + io_type: SERIAL_IO_MEM }, \ + { 0, BASE_BAUD, UARTB_ADDR, MPC85xx_IRQ_EXT10, STD_COM_FLAGS, /* ttyS1 */ \ + iomem_base: (u8 *)UARTB_ADDR, \ + io_type: SERIAL_IO_MEM }, + +#define SERIAL_PORT_DFNS \ + STD_SERIAL_PORT_DFNS + +#endif /* __MACH_SBC8560_H__ */ diff --git a/arch/ppc/platforms/85xx/sbc85xx.c b/arch/ppc/platforms/85xx/sbc85xx.c new file mode 100644 index 000000000..a7a33fdff --- /dev/null +++ b/arch/ppc/platforms/85xx/sbc85xx.c @@ -0,0 +1,215 @@ +/* + * arch/ppc/platform/85xx/sbc85xx.c + * + * WindRiver PowerQUICC III SBC85xx board common routines + * + * Copyright 2002, 2003 Motorola Inc. + * Copyright 2004 Red Hat, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +unsigned char __res[sizeof (bd_t)]; + +#ifndef CONFIG_PCI +unsigned long isa_io_base = 0; +unsigned long isa_mem_base = 0; +unsigned long pci_dram_offset = 0; +#endif + +extern unsigned long total_memory; /* in mm/init */ + +/* Internal interrupts are all Level Sensitive, and Positive Polarity */ + +static u_char sbc8560_openpic_initsenses[] __initdata = { + (IRQ_POLARITY_POSITIVE), /* Internal 0: L2 Cache */ + (IRQ_POLARITY_POSITIVE), /* Internal 1: ECM */ + (IRQ_POLARITY_POSITIVE), /* Internal 2: DDR DRAM */ + (IRQ_POLARITY_POSITIVE), /* Internal 3: LBIU */ + (IRQ_POLARITY_POSITIVE), /* Internal 4: DMA 0 */ + (IRQ_POLARITY_POSITIVE), /* Internal 5: DMA 1 */ + (IRQ_POLARITY_POSITIVE), /* Internal 6: DMA 2 */ + (IRQ_POLARITY_POSITIVE), /* Internal 7: DMA 3 */ + (IRQ_POLARITY_POSITIVE), /* Internal 8: PCI/PCI-X */ + (IRQ_POLARITY_POSITIVE), /* Internal 9: RIO Inbound Port Write Error */ + (IRQ_POLARITY_POSITIVE), /* Internal 10: RIO Doorbell Inbound */ + (IRQ_POLARITY_POSITIVE), /* Internal 11: RIO Outbound Message */ + (IRQ_POLARITY_POSITIVE), /* Internal 12: RIO Inbound Message */ + (IRQ_POLARITY_POSITIVE), /* Internal 13: TSEC 0 Transmit */ + (IRQ_POLARITY_POSITIVE), /* Internal 14: TSEC 0 Receive */ + (IRQ_POLARITY_POSITIVE), /* Internal 15: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 16: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 17: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 18: TSEC 0 Receive/Transmit Error */ + (IRQ_POLARITY_POSITIVE), /* Internal 19: TSEC 1 Transmit */ + (IRQ_POLARITY_POSITIVE), /* Internal 20: TSEC 1 Receive */ + (IRQ_POLARITY_POSITIVE), /* Internal 21: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 22: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 23: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 24: TSEC 1 Receive/Transmit Error */ + (IRQ_POLARITY_POSITIVE), /* Internal 25: Fast Ethernet */ + (IRQ_POLARITY_POSITIVE), /* Internal 26: DUART */ + (IRQ_POLARITY_POSITIVE), /* Internal 27: I2C */ + (IRQ_POLARITY_POSITIVE), /* Internal 28: Performance Monitor */ + (IRQ_POLARITY_POSITIVE), /* Internal 29: Unused */ + (IRQ_POLARITY_POSITIVE), /* Internal 30: CPM */ + (IRQ_POLARITY_POSITIVE), /* Internal 31: Unused */ + 0x0, /* External 0: */ + 0x0, /* External 1: */ +#if defined(CONFIG_PCI) + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 2: PCI slot 0 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 3: PCI slot 1 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 4: PCI slot 2 */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 5: PCI slot 3 */ +#else + 0x0, /* External 2: */ + 0x0, /* External 3: */ + 0x0, /* External 4: */ + 0x0, /* External 5: */ +#endif + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 6: PHY */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_NEGATIVE), /* External 7: PHY */ + 0x0, /* External 8: */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* External 9: PHY */ + (IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE), /* External 10: PHY */ + 0x0, /* External 11: */ +}; + +/* ************************************************************************ */ +int +sbc8560_show_cpuinfo(struct seq_file *m) +{ + uint pvid, svid, phid1; + uint memsize = total_memory; + bd_t *binfo = (bd_t *) __res; + unsigned int freq; + + /* get the core frequency */ + freq = binfo->bi_intfreq; + + pvid = mfspr(PVR); + svid = mfspr(SVR); + + seq_printf(m, "Vendor\t\t: Wind River\n"); + + switch (svid & 0xffff0000) { + case SVR_8540: + seq_printf(m, "Machine\t\t: hhmmm, this board isn't made yet!\n"); + break; + case SVR_8560: + seq_printf(m, "Machine\t\t: SBC8560\n"); + break; + default: + seq_printf(m, "Machine\t\t: unknown\n"); + break; + } + seq_printf(m, "bus freq\t: %u.%.6u MHz\n", freq / 1000000, + freq % 1000000); + seq_printf(m, "PVR\t\t: 0x%x\n", pvid); + seq_printf(m, "SVR\t\t: 0x%x\n", svid); + + /* Display cpu Pll setting */ + phid1 = mfspr(HID1); + seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); + + /* Display the amount of memory */ + seq_printf(m, "Memory\t\t: %d MB\n", memsize / (1024 * 1024)); + + return 0; +} + +void __init +sbc8560_init_IRQ(void) +{ + bd_t *binfo = (bd_t *) __res; + /* Determine the Physical Address of the OpenPIC regs */ + phys_addr_t OpenPIC_PAddr = + binfo->bi_immr_base + MPC85xx_OPENPIC_OFFSET; + OpenPIC_Addr = ioremap(OpenPIC_PAddr, MPC85xx_OPENPIC_SIZE); + OpenPIC_InitSenses = sbc8560_openpic_initsenses; + OpenPIC_NumInitSenses = sizeof (sbc8560_openpic_initsenses); + + /* Skip reserved space and internal sources */ + openpic_set_sources(0, 32, OpenPIC_Addr + 0x10200); + /* Map PIC IRQs 0-11 */ + openpic_set_sources(32, 12, OpenPIC_Addr + 0x10000); + + /* we let openpic interrupts starting from an offset, to + * leave space for cascading interrupts underneath. + */ + openpic_init(MPC85xx_OPENPIC_IRQ_OFFSET); + + return; +} + +/* + * interrupt routing + */ + +#ifdef CONFIG_PCI +int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, + unsigned char pin) +{ + static char pci_irq_table[][4] = + /* + * PCI IDSEL/INTPIN->INTLINE + * A B C D + */ + { + {PIRQA, PIRQB, PIRQC, PIRQD}, + {PIRQD, PIRQA, PIRQB, PIRQC}, + {PIRQC, PIRQD, PIRQA, PIRQB}, + {PIRQB, PIRQC, PIRQD, PIRQA}, + }; + + const long min_idsel = 12, max_idsel = 15, irqs_per_slot = 4; + return PCI_IRQ_TABLE_LOOKUP; +} + +int mpc85xx_exclude_device(u_char bus, u_char devfn) +{ + if (bus == 0 && PCI_SLOT(devfn) == 0) + return PCIBIOS_DEVICE_NOT_FOUND; + else + return PCIBIOS_SUCCESSFUL; +} +#endif /* CONFIG_PCI */ diff --git a/arch/ppc/platforms/rpx8260.h b/arch/ppc/platforms/rpx8260.h new file mode 100644 index 000000000..7d5cd8893 --- /dev/null +++ b/arch/ppc/platforms/rpx8260.h @@ -0,0 +1,74 @@ +/* + * A collection of structures, addresses, and values associated with + * the Embedded Planet RPX6 (or RPX Super) MPC8260 board. + * Copied from the RPX-Classic and SBS8260 stuff. + * + * Copyright (c) 2001 Dan Malek + */ +#ifdef __KERNEL__ +#ifndef __ASM_PLATFORMS_RPX8260_H__ +#define __ASM_PLATFORMS_RPX8260_H__ + +/* A Board Information structure that is given to a program when + * prom starts it up. + */ +typedef struct bd_info { + unsigned int bi_memstart; /* Memory start address */ + unsigned int bi_memsize; /* Memory (end) size in bytes */ + unsigned int bi_nvsize; /* NVRAM size in bytes (can be 0) */ + unsigned int bi_intfreq; /* Internal Freq, in Hz */ + unsigned int bi_busfreq; /* Bus Freq, in MHz */ + unsigned int bi_cpmfreq; /* CPM Freq, in MHz */ + unsigned int bi_brgfreq; /* BRG Freq, in MHz */ + unsigned int bi_vco; /* VCO Out from PLL */ + unsigned int bi_baudrate; /* Default console baud rate */ + unsigned int bi_immr; /* IMMR when called from boot rom */ + unsigned char bi_enetaddr[6]; +} bd_t; + +extern bd_t m8xx_board_info; + +/* Memory map is configured by the PROM startup. + * We just map a few things we need. The CSR is actually 4 byte-wide + * registers that can be accessed as 8-, 16-, or 32-bit values. + */ +#define CPM_MAP_ADDR ((uint)0xf0000000) +#define RPX_CSR_ADDR ((uint)0xfa000000) +#define RPX_CSR_SIZE ((uint)(512 * 1024)) +#define RPX_NVRTC_ADDR ((uint)0xfa080000) +#define RPX_NVRTC_SIZE ((uint)(512 * 1024)) + +/* The RPX6 has 16, byte wide control/status registers. + * Not all are used (yet). + */ +extern volatile u_char *rpx6_csr_addr; + +/* Things of interest in the CSR. +*/ +#define BCSR0_ID_MASK ((u_char)0xf0) /* Read only */ +#define BCSR0_SWITCH_MASK ((u_char)0x0f) /* Read only */ +#define BCSR1_XCVR_SMC1 ((u_char)0x80) +#define BCSR1_XCVR_SMC2 ((u_char)0x40) +#define BCSR2_FLASH_WENABLE ((u_char)0x20) +#define BCSR2_NVRAM_ENABLE ((u_char)0x10) +#define BCSR2_ALT_IRQ2 ((u_char)0x08) +#define BCSR2_ALT_IRQ3 ((u_char)0x04) +#define BCSR2_PRST ((u_char)0x02) /* Force reset */ +#define BCSR2_ENPRST ((u_char)0x01) /* Enable POR */ +#define BCSR3_MODCLK_MASK ((u_char)0xe0) +#define BCSR3_ENCLKHDR ((u_char)0x10) +#define BCSR3_LED5 ((u_char)0x04) /* 0 == on */ +#define BCSR3_LED6 ((u_char)0x02) /* 0 == on */ +#define BCSR3_LED7 ((u_char)0x01) /* 0 == on */ +#define BCSR4_EN_PHY ((u_char)0x80) /* Enable PHY */ +#define BCSR4_EN_MII ((u_char)0x40) /* Enable PHY */ +#define BCSR4_MII_READ ((u_char)0x04) +#define BCSR4_MII_MDC ((u_char)0x02) +#define BCSR4_MII_MDIO ((u_char)0x01) +#define BCSR13_FETH_IRQMASK ((u_char)0xf0) +#define BCSR15_FETH_IRQ ((u_char)0x20) + +#define PHY_INTERRUPT SIU_INT_IRQ7 + +#endif /* __ASM_PLATFORMS_RPX8260_H__ */ +#endif /* __KERNEL__ */ diff --git a/arch/ppc/syslib/cpm2_common.c b/arch/ppc/syslib/cpm2_common.c new file mode 100644 index 000000000..5ca6d4cf3 --- /dev/null +++ b/arch/ppc/syslib/cpm2_common.c @@ -0,0 +1,205 @@ +/* + * General Purpose functions for the global management of the + * 8260 Communication Processor Module. + * Copyright (c) 1999 Dan Malek (dmalek@jlc.net) + * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com) + * 2.3.99 Updates + * + * In addition to the individual control of the communication + * channels, there are a few functions that globally affect the + * communication processor. + * + * Buffer descriptors must be allocated from the dual ported memory + * space. The allocator for that is here. When the communication + * process is reset, we reclaim the memory available. There is + * currently no deallocator for this memory. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void cpm2_dpinit(void); +cpm_cpm2_t *cpmp; /* Pointer to comm processor space */ + +/* We allocate this here because it is used almost exclusively for + * the communication processor devices. + */ +cpm2_map_t *cpm2_immr; + +void +cpm2_reset(void) +{ + cpm2_immr = (cpm2_map_t *)CPM_MAP_ADDR; + + /* Reclaim the DP memory for our use. + */ + cpm2_dpinit(); + + /* Tell everyone where the comm processor resides. + */ + cpmp = &cpm2_immr->im_cpm; +} + +/* Set a baud rate generator. This needs lots of work. There are + * eight BRGs, which can be connected to the CPM channels or output + * as clocks. The BRGs are in two different block of internal + * memory mapped space. + * The baud rate clock is the system clock divided by something. + * It was set up long ago during the initial boot phase and is + * is given to us. + * Baud rate clocks are zero-based in the driver code (as that maps + * to port numbers). Documentation uses 1-based numbering. + */ +#define BRG_INT_CLK (((bd_t *)__res)->bi_brgfreq) +#define BRG_UART_CLK (BRG_INT_CLK/16) + +/* This function is used by UARTS, or anything else that uses a 16x + * oversampled clock. + */ +void +cpm2_setbrg(uint brg, uint rate) +{ + volatile uint *bp; + + /* This is good enough to get SMCs running..... + */ + if (brg < 4) { + bp = (uint *)&cpm2_immr->im_brgc1; + } + else { + bp = (uint *)&cpm2_immr->im_brgc5; + brg -= 4; + } + bp += brg; + *bp = ((BRG_UART_CLK / rate) << 1) | CPM_BRG_EN; +} + +/* This function is used to set high speed synchronous baud rate + * clocks. + */ +void +cpm2_fastbrg(uint brg, uint rate, int div16) +{ + volatile uint *bp; + + if (brg < 4) { + bp = (uint *)&cpm2_immr->im_brgc1; + } + else { + bp = (uint *)&cpm2_immr->im_brgc5; + brg -= 4; + } + bp += brg; + *bp = ((BRG_INT_CLK / rate) << 1) | CPM_BRG_EN; + if (div16) + *bp |= CPM_BRG_DIV16; +} + +/* + * dpalloc / dpfree bits. + */ +static spinlock_t cpm_dpmem_lock; +/* 16 blocks should be enough to satisfy all requests + * until the memory subsystem goes up... */ +static rh_block_t cpm_boot_dpmem_rh_block[16]; +static rh_info_t cpm_dpmem_info; + +static void cpm2_dpinit(void) +{ + void *dprambase = &((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase; + + spin_lock_init(&cpm_dpmem_lock); + + /* initialize the info header */ + rh_init(&cpm_dpmem_info, 1, + sizeof(cpm_boot_dpmem_rh_block) / + sizeof(cpm_boot_dpmem_rh_block[0]), + cpm_boot_dpmem_rh_block); + + /* Attach the usable dpmem area */ + /* XXX: This is actually crap. CPM_DATAONLY_BASE and + * CPM_DATAONLY_SIZE is only a subset of the available dpram. It + * varies with the processor and the microcode patches activated. + * But the following should be at least safe. + */ + rh_attach_region(&cpm_dpmem_info, dprambase + CPM_DATAONLY_BASE, + CPM_DATAONLY_SIZE); +} + +/* This function used to return an index into the DPRAM area. + * Now it returns the actuall physical address of that area. + * use cpm2_dpram_offset() to get the index + */ +void *cpm2_dpalloc(uint size, uint align) +{ + void *start; + unsigned long flags; + + spin_lock_irqsave(&cpm_dpmem_lock, flags); + cpm_dpmem_info.alignment = align; + start = rh_alloc(&cpm_dpmem_info, size, "commproc"); + spin_unlock_irqrestore(&cpm_dpmem_lock, flags); + + return start; +} +EXPORT_SYMBOL(cpm2_dpalloc); + +int cpm2_dpfree(void *addr) +{ + int ret; + unsigned long flags; + + spin_lock_irqsave(&cpm_dpmem_lock, flags); + ret = rh_free(&cpm_dpmem_info, addr); + spin_unlock_irqrestore(&cpm_dpmem_lock, flags); + + return ret; +} +EXPORT_SYMBOL(cpm2_dpfree); + +/* not sure if this is ever needed */ +void *cpm2_dpalloc_fixed(void *addr, uint size, uint align) +{ + void *start; + unsigned long flags; + + spin_lock_irqsave(&cpm_dpmem_lock, flags); + cpm_dpmem_info.alignment = align; + start = rh_alloc_fixed(&cpm_dpmem_info, addr, size, "commproc"); + spin_unlock_irqrestore(&cpm_dpmem_lock, flags); + + return start; +} +EXPORT_SYMBOL(cpm2_dpalloc_fixed); + +void cpm2_dpdump(void) +{ + rh_dump(&cpm_dpmem_info); +} +EXPORT_SYMBOL(cpm2_dpdump); + +uint cpm2_dpram_offset(void *addr) +{ + return (uint)((u_char *)addr - + ((uint)((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase)); +} +EXPORT_SYMBOL(cpm2_dpram_offset); + +void *cpm2_dpram_addr(int offset) +{ + return (void *)&((cpm2_map_t *)CPM_MAP_ADDR)->im_dprambase[offset]; +} +EXPORT_SYMBOL(cpm2_dpram_addr); diff --git a/arch/ppc/syslib/m8260_pci_erratum9.c b/arch/ppc/syslib/m8260_pci_erratum9.c new file mode 100644 index 000000000..ae76a1b52 --- /dev/null +++ b/arch/ppc/syslib/m8260_pci_erratum9.c @@ -0,0 +1,474 @@ +/* + * arch/ppc/platforms/mpc8260_pci9.c + * + * Workaround for device erratum PCI 9. + * See Motorola's "XPC826xA Family Device Errata Reference." + * The erratum applies to all 8260 family Hip4 processors. It is scheduled + * to be fixed in HiP4 Rev C. Erratum PCI 9 states that a simultaneous PCI + * inbound write transaction and PCI outbound read transaction can result in a + * bus deadlock. The suggested workaround is to use the IDMA controller to + * perform all reads from PCI configuration, memory, and I/O space. + * + * Author: andy_lowe@mvista.com + * + * 2003 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "m8260_pci.h" + +#ifdef CONFIG_8260_PCI9 +/*#include */ /* included in asm/io.h */ + +#define IDMA_XFER_BUF_SIZE 64 /* size of the IDMA transfer buffer */ + +/* define a structure for the IDMA dpram usage */ +typedef struct idma_dpram_s { + idma_t pram; /* IDMA parameter RAM */ + u_char xfer_buf[IDMA_XFER_BUF_SIZE]; /* IDMA transfer buffer */ + idma_bd_t bd; /* buffer descriptor */ +} idma_dpram_t; + +/* define offsets relative to start of IDMA dpram */ +#define IDMA_XFER_BUF_OFFSET (sizeof(idma_t)) +#define IDMA_BD_OFFSET (sizeof(idma_t) + IDMA_XFER_BUF_SIZE) + +/* define globals */ +static volatile idma_dpram_t *idma_dpram; + +/* Exactly one of CONFIG_8260_PCI9_IDMAn must be defined, + * where n is 1, 2, 3, or 4. This selects the IDMA channel used for + * the PCI9 workaround. + */ +#ifdef CONFIG_8260_PCI9_IDMA1 +#define IDMA_CHAN 0 +#define PROFF_IDMA PROFF_IDMA1_BASE +#define IDMA_PAGE CPM_CR_IDMA1_PAGE +#define IDMA_SBLOCK CPM_CR_IDMA1_SBLOCK +#endif +#ifdef CONFIG_8260_PCI9_IDMA2 +#define IDMA_CHAN 1 +#define PROFF_IDMA PROFF_IDMA2_BASE +#define IDMA_PAGE CPM_CR_IDMA2_PAGE +#define IDMA_SBLOCK CPM_CR_IDMA2_SBLOCK +#endif +#ifdef CONFIG_8260_PCI9_IDMA3 +#define IDMA_CHAN 2 +#define PROFF_IDMA PROFF_IDMA3_BASE +#define IDMA_PAGE CPM_CR_IDMA3_PAGE +#define IDMA_SBLOCK CPM_CR_IDMA3_SBLOCK +#endif +#ifdef CONFIG_8260_PCI9_IDMA4 +#define IDMA_CHAN 3 +#define PROFF_IDMA PROFF_IDMA4_BASE +#define IDMA_PAGE CPM_CR_IDMA4_PAGE +#define IDMA_SBLOCK CPM_CR_IDMA4_SBLOCK +#endif + +void idma_pci9_init(void) +{ + uint dpram_offset; + volatile idma_t *pram; + volatile im_idma_t *idma_reg; + volatile cpm2_map_t *immap = cpm2_immr; + + /* allocate IDMA dpram */ + dpram_offset = cpm2_dpalloc(sizeof(idma_dpram_t), 64); + idma_dpram = + (volatile idma_dpram_t *)&immap->im_dprambase[dpram_offset]; + + /* initialize the IDMA parameter RAM */ + memset((void *)idma_dpram, 0, sizeof(idma_dpram_t)); + pram = &idma_dpram->pram; + pram->ibase = dpram_offset + IDMA_BD_OFFSET; + pram->dpr_buf = dpram_offset + IDMA_XFER_BUF_OFFSET; + pram->ss_max = 32; + pram->dts = 32; + + /* initialize the IDMA_BASE pointer to the IDMA parameter RAM */ + *((ushort *) &immap->im_dprambase[PROFF_IDMA]) = dpram_offset; + + /* initialize the IDMA registers */ + idma_reg = (volatile im_idma_t *) &immap->im_sdma.sdma_idsr1; + idma_reg[IDMA_CHAN].idmr = 0; /* mask all IDMA interrupts */ + idma_reg[IDMA_CHAN].idsr = 0xff; /* clear all event flags */ + + printk("<4>Using IDMA%d for MPC8260 device erratum PCI 9 workaround\n", + IDMA_CHAN + 1); + + return; +} + +/* Use the IDMA controller to transfer data from I/O memory to local RAM. + * The src address must be a physical address suitable for use by the DMA + * controller with no translation. The dst address must be a kernel virtual + * address. The dst address is translated to a physical address via + * virt_to_phys(). + * The sinc argument specifies whether or not the source address is incremented + * by the DMA controller. The source address is incremented if and only if sinc + * is non-zero. The destination address is always incremented since the + * destination is always host RAM. + */ +static void +idma_pci9_read(u8 *dst, u8 *src, int bytes, int unit_size, int sinc) +{ + unsigned long flags; + volatile idma_t *pram = &idma_dpram->pram; + volatile idma_bd_t *bd = &idma_dpram->bd; + volatile cpm2_map_t *immap = cpm2_immr; + + local_irq_save(flags); + + /* initialize IDMA parameter RAM for this transfer */ + if (sinc) + pram->dcm = IDMA_DCM_DMA_WRAP_64 | IDMA_DCM_SINC + | IDMA_DCM_DINC | IDMA_DCM_SD_MEM2MEM; + else + pram->dcm = IDMA_DCM_DMA_WRAP_64 | IDMA_DCM_DINC + | IDMA_DCM_SD_MEM2MEM; + pram->ibdptr = pram->ibase; + pram->sts = unit_size; + pram->istate = 0; + + /* initialize the buffer descriptor */ + bd->dst = virt_to_phys(dst); + bd->src = (uint) src; + bd->len = bytes; + bd->flags = IDMA_BD_V | IDMA_BD_W | IDMA_BD_I | IDMA_BD_L | IDMA_BD_DGBL + | IDMA_BD_DBO_BE | IDMA_BD_SBO_BE | IDMA_BD_SDTB; + + /* issue the START_IDMA command to the CP */ + while (immap->im_cpm.cp_cpcr & CPM_CR_FLG); + immap->im_cpm.cp_cpcr = mk_cr_cmd(IDMA_PAGE, IDMA_SBLOCK, 0, + CPM_CR_START_IDMA) | CPM_CR_FLG; + while (immap->im_cpm.cp_cpcr & CPM_CR_FLG); + + /* wait for transfer to complete */ + while(bd->flags & IDMA_BD_V); + + local_irq_restore(flags); + + return; +} + +/* Use the IDMA controller to transfer data from I/O memory to local RAM. + * The dst address must be a physical address suitable for use by the DMA + * controller with no translation. The src address must be a kernel virtual + * address. The src address is translated to a physical address via + * virt_to_phys(). + * The dinc argument specifies whether or not the dest address is incremented + * by the DMA controller. The source address is incremented if and only if sinc + * is non-zero. The source address is always incremented since the + * source is always host RAM. + */ +static void +idma_pci9_write(u8 *dst, u8 *src, int bytes, int unit_size, int dinc) +{ + unsigned long flags; + volatile idma_t *pram = &idma_dpram->pram; + volatile idma_bd_t *bd = &idma_dpram->bd; + volatile cpm2_map_t *immap = cpm2_immr; + + local_irq_save(flags); + + /* initialize IDMA parameter RAM for this transfer */ + if (dinc) + pram->dcm = IDMA_DCM_DMA_WRAP_64 | IDMA_DCM_SINC + | IDMA_DCM_DINC | IDMA_DCM_SD_MEM2MEM; + else + pram->dcm = IDMA_DCM_DMA_WRAP_64 | IDMA_DCM_SINC + | IDMA_DCM_SD_MEM2MEM; + pram->ibdptr = pram->ibase; + pram->sts = unit_size; + pram->istate = 0; + + /* initialize the buffer descriptor */ + bd->dst = (uint) dst; + bd->src = virt_to_phys(src); + bd->len = bytes; + bd->flags = IDMA_BD_V | IDMA_BD_W | IDMA_BD_I | IDMA_BD_L | IDMA_BD_DGBL + | IDMA_BD_DBO_BE | IDMA_BD_SBO_BE | IDMA_BD_SDTB; + + /* issue the START_IDMA command to the CP */ + while (immap->im_cpm.cp_cpcr & CPM_CR_FLG); + immap->im_cpm.cp_cpcr = mk_cr_cmd(IDMA_PAGE, IDMA_SBLOCK, 0, + CPM_CR_START_IDMA) | CPM_CR_FLG; + while (immap->im_cpm.cp_cpcr & CPM_CR_FLG); + + /* wait for transfer to complete */ + while(bd->flags & IDMA_BD_V); + + local_irq_restore(flags); + + return; +} + +/* Same as idma_pci9_read, but 16-bit little-endian byte swapping is performed + * if the unit_size is 2, and 32-bit little-endian byte swapping is performed if + * the unit_size is 4. + */ +static void +idma_pci9_read_le(u8 *dst, u8 *src, int bytes, int unit_size, int sinc) +{ + int i; + u8 *p; + + idma_pci9_read(dst, src, bytes, unit_size, sinc); + switch(unit_size) { + case 2: + for (i = 0, p = dst; i < bytes; i += 2, p += 2) + swab16s((u16 *) p); + break; + case 4: + for (i = 0, p = dst; i < bytes; i += 4, p += 4) + swab32s((u32 *) p); + break; + default: + break; + } +} +EXPORT_SYMBOL(idma_pci9_init); +EXPORT_SYMBOL(idma_pci9_read); +EXPORT_SYMBOL(idma_pci9_read_le); + +static inline int is_pci_mem(unsigned long addr) +{ + if (addr >= MPC826x_PCI_LOWER_MMIO && + addr <= MPC826x_PCI_UPPER_MMIO) + return 1; + if (addr >= MPC826x_PCI_LOWER_MEM && + addr <= MPC826x_PCI_UPPER_MEM) + return 1; + return 0; +} + +#define is_pci_mem(pa) ( (pa > 0x80000000) && (pa < 0xc0000000)) +int readb(volatile unsigned char *addr) +{ + u8 val; + unsigned long pa = iopa((unsigned long) addr); + + if (!is_pci_mem(pa)) + return in_8(addr); + + idma_pci9_read((u8 *)&val, (u8 *)pa, sizeof(val), sizeof(val), 0); + return val; +} + +int readw(volatile unsigned short *addr) +{ + u16 val; + unsigned long pa = iopa((unsigned long) addr); + + if (!is_pci_mem(pa)) + return in_le16(addr); + + idma_pci9_read((u8 *)&val, (u8 *)pa, sizeof(val), sizeof(val), 0); + return swab16(val); +} + +unsigned readl(volatile unsigned *addr) +{ + u32 val; + unsigned long pa = iopa((unsigned long) addr); + + if (!is_pci_mem(pa)) + return in_le32(addr); + + idma_pci9_read((u8 *)&val, (u8 *)pa, sizeof(val), sizeof(val), 0); + return swab32(val); +} + +int inb(unsigned port) +{ + u8 val; + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)&val, (u8 *)addr, sizeof(val), sizeof(val), 0); + return val; +} + +int inw(unsigned port) +{ + u16 val; + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)&val, (u8 *)addr, sizeof(val), sizeof(val), 0); + return swab16(val); +} + +unsigned inl(unsigned port) +{ + u32 val; + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)&val, (u8 *)addr, sizeof(val), sizeof(val), 0); + return swab32(val); +} + +void insb(unsigned port, void *buf, int ns) +{ + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)buf, (u8 *)addr, ns*sizeof(u8), sizeof(u8), 0); +} + +void insw(unsigned port, void *buf, int ns) +{ + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)buf, (u8 *)addr, ns*sizeof(u16), sizeof(u16), 0); +} + +void insl(unsigned port, void *buf, int nl) +{ + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)buf, (u8 *)addr, nl*sizeof(u32), sizeof(u32), 0); +} + +void insw_ns(unsigned port, void *buf, int ns) +{ + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)buf, (u8 *)addr, ns*sizeof(u16), sizeof(u16), 0); +} + +void insl_ns(unsigned port, void *buf, int nl) +{ + u8 *addr = (u8 *)(port + _IO_BASE); + + idma_pci9_read((u8 *)buf, (u8 *)addr, nl*sizeof(u32), sizeof(u32), 0); +} + +void *memcpy_fromio(void *dest, unsigned long src, size_t count) +{ + unsigned long pa = iopa((unsigned long) src); + + if (is_pci_mem(pa)) + idma_pci9_read((u8 *)dest, (u8 *)pa, count, 32, 1); + else + memcpy(dest, (void *)src, count); + return dest; +} + +EXPORT_SYMBOL(readb); +EXPORT_SYMBOL(readw); +EXPORT_SYMBOL(readl); +EXPORT_SYMBOL(inb); +EXPORT_SYMBOL(inw); +EXPORT_SYMBOL(inl); +EXPORT_SYMBOL(insb); +EXPORT_SYMBOL(insw); +EXPORT_SYMBOL(insl); +EXPORT_SYMBOL(insw_ns); +EXPORT_SYMBOL(insl_ns); +EXPORT_SYMBOL(memcpy_fromio); + +#endif /* ifdef CONFIG_8260_PCI9 */ + +/* Indirect PCI routines adapted from arch/ppc/kernel/indirect_pci.c. + * Copyright (C) 1998 Gabriel Paubert. + */ +#ifndef CONFIG_8260_PCI9 +#define cfg_read(val, addr, type, op) *val = op((type)(addr)) +#else +#define cfg_read(val, addr, type, op) \ + idma_pci9_read_le((u8*)(val),(u8*)(addr),sizeof(*(val)),sizeof(*(val)),0) +#endif + +#define cfg_write(val, addr, type, op) op((type *)(addr), (val)) + +static int indirect_write_config(struct pci_bus *pbus, unsigned int devfn, int where, + int size, u32 value) +{ + struct pci_controller *hose = pbus->sysdata; + u8 cfg_type = 0; + if (ppc_md.pci_exclude_device) + if (ppc_md.pci_exclude_device(pbus->number, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (hose->set_cfg_type) + if (pbus->number != hose->first_busno) + cfg_type = 1; + + out_be32(hose->cfg_addr, + (((where & 0xfc) | cfg_type) << 24) | (devfn << 16) + | ((pbus->number - hose->bus_offset) << 8) | 0x80); + + switch (size) + { + case 1: + cfg_write(value, hose->cfg_data + (where & 3), u8, out_8); + break; + case 2: + cfg_write(value, hose->cfg_data + (where & 2), u16, out_le16); + break; + case 4: + cfg_write(value, hose->cfg_data + (where & 0), u32, out_le32); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static int indirect_read_config(struct pci_bus *pbus, unsigned int devfn, int where, + int size, u32 *value) +{ + struct pci_controller *hose = pbus->sysdata; + u8 cfg_type = 0; + if (ppc_md.pci_exclude_device) + if (ppc_md.pci_exclude_device(pbus->number, devfn)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (hose->set_cfg_type) + if (pbus->number != hose->first_busno) + cfg_type = 1; + + out_be32(hose->cfg_addr, + (((where & 0xfc) | cfg_type) << 24) | (devfn << 16) + | ((pbus->number - hose->bus_offset) << 8) | 0x80); + + switch (size) + { + case 1: + cfg_read(value, hose->cfg_data + (where & 3), u8 *, in_8); + break; + case 2: + cfg_read(value, hose->cfg_data + (where & 2), u16 *, in_le16); + break; + case 4: + cfg_read(value, hose->cfg_data + (where & 0), u32 *, in_le32); + break; + } + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops indirect_pci_ops = +{ + .read = indirect_read_config, + .write = indirect_write_config, +}; + +void +setup_m8260_indirect_pci(struct pci_controller* hose, u32 cfg_addr, u32 cfg_data) +{ + hose->ops = &indirect_pci_ops; + hose->cfg_addr = (unsigned int *) ioremap(cfg_addr, 4); + hose->cfg_data = (unsigned char *) ioremap(cfg_data, 4); +} diff --git a/arch/ppc/syslib/ppc85xx_setup.c b/arch/ppc/syslib/ppc85xx_setup.c new file mode 100644 index 000000000..33aa1dc93 --- /dev/null +++ b/arch/ppc/syslib/ppc85xx_setup.c @@ -0,0 +1,341 @@ +/* + * arch/ppc/syslib/ppc85xx_setup.c + * + * MPC85XX common board code + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include /* for linux/serial_core.h */ +#include + +#include +#include +#include +#include +#include +#include +#include + +/* Return the amount of memory */ +unsigned long __init +mpc85xx_find_end_of_memory(void) +{ + bd_t *binfo; + + binfo = (bd_t *) __res; + + return binfo->bi_memsize; +} + +/* The decrementer counts at the system (internal) clock freq divided by 8 */ +void __init +mpc85xx_calibrate_decr(void) +{ + bd_t *binfo = (bd_t *) __res; + unsigned int freq, divisor; + + /* get the core frequency */ + freq = binfo->bi_busfreq; + + /* The timebase is updated every 8 bus clocks, HID0[SEL_TBCLK] = 0 */ + divisor = 8; + tb_ticks_per_jiffy = freq / divisor / HZ; + tb_to_us = mulhwu_scale_factor(freq / divisor, 1000000); + + /* Set the time base to zero */ + mtspr(SPRN_TBWL, 0); + mtspr(SPRN_TBWU, 0); + + /* Clear any pending timer interrupts */ + mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); + + /* Enable decrementer interrupt */ + mtspr(SPRN_TCR, TCR_DIE); +} + +#ifdef CONFIG_SERIAL_8250 +void __init +mpc85xx_early_serial_map(void) +{ + struct uart_port serial_req; + bd_t *binfo = (bd_t *) __res; + phys_addr_t duart_paddr = binfo->bi_immr_base + MPC85xx_UART0_OFFSET; + + /* Setup serial port access */ + memset(&serial_req, 0, sizeof (serial_req)); + serial_req.uartclk = binfo->bi_busfreq; + serial_req.line = 0; + serial_req.irq = MPC85xx_IRQ_DUART; + serial_req.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST; + serial_req.iotype = SERIAL_IO_MEM; + serial_req.membase = ioremap(duart_paddr, MPC85xx_UART0_SIZE); + serial_req.mapbase = duart_paddr; + serial_req.regshift = 0; + +#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB) + gen550_init(0, &serial_req); +#endif + + if (early_serial_setup(&serial_req) != 0) + printk("Early serial init of port 0 failed\n"); + + /* Assume early_serial_setup() doesn't modify serial_req */ + duart_paddr = binfo->bi_immr_base + MPC85xx_UART1_OFFSET; + serial_req.line = 1; + serial_req.mapbase = duart_paddr; + serial_req.membase = ioremap(duart_paddr, MPC85xx_UART1_SIZE); + +#if defined(CONFIG_SERIAL_TEXT_DEBUG) || defined(CONFIG_KGDB) + gen550_init(1, &serial_req); +#endif + + if (early_serial_setup(&serial_req) != 0) + printk("Early serial init of port 1 failed\n"); +} +#endif + +void +mpc85xx_restart(char *cmd) +{ + local_irq_disable(); + abort(); +} + +void +mpc85xx_power_off(void) +{ + local_irq_disable(); + for(;;); +} + +void +mpc85xx_halt(void) +{ + local_irq_disable(); + for(;;); +} + +#ifdef CONFIG_PCI +static void __init +mpc85xx_setup_pci1(struct pci_controller *hose) +{ + volatile struct ccsr_pci *pci; + volatile struct ccsr_guts *guts; + unsigned short temps; + bd_t *binfo = (bd_t *) __res; + + pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI1_OFFSET, + MPC85xx_PCI1_SIZE); + + guts = ioremap(binfo->bi_immr_base + MPC85xx_GUTS_OFFSET, + MPC85xx_GUTS_SIZE); + + early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps); + temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; + early_write_config_word(hose, 0, 0, PCI_COMMAND, temps); + +#define PORDEVSR_PCI (0x00800000) /* PCI Mode */ + if (guts->pordevsr & PORDEVSR_PCI) { + early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80); + } else { + /* PCI-X init */ + temps = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ + | PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E; + early_write_config_word(hose, 0, 0, PCIX_COMMAND, temps); + } + + /* Disable all windows (except powar0 since its ignored) */ + pci->powar1 = 0; + pci->powar2 = 0; + pci->powar3 = 0; + pci->powar4 = 0; + pci->piwar1 = 0; + pci->piwar2 = 0; + pci->piwar3 = 0; + + /* Setup 512M Phys:PCI 1:1 outbound mem window @ 0x80000000 */ + pci->potar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff; + pci->potear1 = 0x00000000; + pci->powbar1 = (MPC85XX_PCI1_LOWER_MEM >> 12) & 0x000fffff; + pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */ + + /* Setup 16M outboud IO windows @ 0xe2000000 */ + pci->potar2 = 0x00000000; + pci->potear2 = 0x00000000; + pci->powbar2 = (MPC85XX_PCI1_IO_BASE >> 12) & 0x000fffff; + pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */ + + /* Setup 2G inbound Memory Window @ 0 */ + pci->pitar1 = 0x00000000; + pci->piwbar1 = 0x00000000; + pci->piwar1 = 0xa0f5501e; /* Enable, Prefetch, Local + Mem, Snoop R/W, 2G */ +} + + +extern int mpc85xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin); +extern int mpc85xx_exclude_device(u_char bus, u_char devfn); + +#if CONFIG_85xx_PCI2 +static void __init +mpc85xx_setup_pci2(struct pci_controller *hose) +{ + volatile struct ccsr_pci *pci; + unsigned short temps; + bd_t *binfo = (bd_t *) __res; + + pci = ioremap(binfo->bi_immr_base + MPC85xx_PCI2_OFFSET, + MPC85xx_PCI2_SIZE); + + early_read_config_word(hose, 0, 0, PCI_COMMAND, &temps); + temps |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; + early_write_config_word(hose, 0, 0, PCI_COMMAND, temps); + early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80); + + /* Disable all windows (except powar0 since its ignored) */ + pci->powar1 = 0; + pci->powar2 = 0; + pci->powar3 = 0; + pci->powar4 = 0; + pci->piwar1 = 0; + pci->piwar2 = 0; + pci->piwar3 = 0; + + /* Setup 512M Phys:PCI 1:1 outbound mem window @ 0xa0000000 */ + pci->potar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff; + pci->potear1 = 0x00000000; + pci->powbar1 = (MPC85XX_PCI2_LOWER_MEM >> 12) & 0x000fffff; + pci->powar1 = 0x8004401c; /* Enable, Mem R/W, 512M */ + + /* Setup 16M outboud IO windows @ 0xe3000000 */ + pci->potar2 = 0x00000000; + pci->potear2 = 0x00000000; + pci->powbar2 = (MPC85XX_PCI2_IO_BASE >> 12) & 0x000fffff; + pci->powar2 = 0x80088017; /* Enable, IO R/W, 16M */ + + /* Setup 2G inbound Memory Window @ 0 */ + pci->pitar1 = 0x00000000; + pci->piwbar1 = 0x00000000; + pci->piwar1 = 0xa0f5501e; /* Enable, Prefetch, Local + Mem, Snoop R/W, 2G */ +} +#endif /* CONFIG_85xx_PCI2 */ + +void __init +mpc85xx_setup_hose(void) +{ + struct pci_controller *hose_a; +#ifdef CONFIG_85xx_PCI2 + struct pci_controller *hose_b; +#endif + bd_t *binfo = (bd_t *) __res; + + hose_a = pcibios_alloc_controller(); + + if (!hose_a) + return; + + ppc_md.pci_swizzle = common_swizzle; + ppc_md.pci_map_irq = mpc85xx_map_irq; + + hose_a->first_busno = 0; + hose_a->bus_offset = 0; + hose_a->last_busno = 0xff; + + setup_indirect_pci(hose_a, binfo->bi_immr_base + PCI1_CFG_ADDR_OFFSET, + binfo->bi_immr_base + PCI1_CFG_DATA_OFFSET); + hose_a->set_cfg_type = 1; + + mpc85xx_setup_pci1(hose_a); + + hose_a->pci_mem_offset = MPC85XX_PCI1_MEM_OFFSET; + hose_a->mem_space.start = MPC85XX_PCI1_LOWER_MEM; + hose_a->mem_space.end = MPC85XX_PCI1_UPPER_MEM; + + hose_a->io_space.start = MPC85XX_PCI1_LOWER_IO; + hose_a->io_space.end = MPC85XX_PCI1_UPPER_IO; + hose_a->io_base_phys = MPC85XX_PCI1_IO_BASE; +#if CONFIG_85xx_PCI2 + isa_io_base = + (unsigned long) ioremap(MPC85XX_PCI1_IO_BASE, + MPC85XX_PCI1_IO_SIZE + + MPC85XX_PCI2_IO_SIZE); +#else + isa_io_base = + (unsigned long) ioremap(MPC85XX_PCI1_IO_BASE, + MPC85XX_PCI1_IO_SIZE); +#endif + hose_a->io_base_virt = (void *) isa_io_base; + + /* setup resources */ + pci_init_resource(&hose_a->mem_resources[0], + MPC85XX_PCI1_LOWER_MEM, + MPC85XX_PCI1_UPPER_MEM, + IORESOURCE_MEM, "PCI1 host bridge"); + + pci_init_resource(&hose_a->io_resource, + MPC85XX_PCI1_LOWER_IO, + MPC85XX_PCI1_UPPER_IO, + IORESOURCE_IO, "PCI1 host bridge"); + + ppc_md.pci_exclude_device = mpc85xx_exclude_device; + + hose_a->last_busno = pciauto_bus_scan(hose_a, hose_a->first_busno); + +#if CONFIG_85xx_PCI2 + hose_b = pcibios_alloc_controller(); + + if (!hose_b) + return; + + hose_b->bus_offset = hose_a->last_busno + 1; + hose_b->first_busno = hose_a->last_busno + 1; + hose_b->last_busno = 0xff; + + setup_indirect_pci(hose_b, binfo->bi_immr_base + PCI2_CFG_ADDR_OFFSET, + binfo->bi_immr_base + PCI2_CFG_DATA_OFFSET); + hose_b->set_cfg_type = 1; + + mpc85xx_setup_pci2(hose_b); + + hose_b->pci_mem_offset = MPC85XX_PCI2_MEM_OFFSET; + hose_b->mem_space.start = MPC85XX_PCI2_LOWER_MEM; + hose_b->mem_space.end = MPC85XX_PCI2_UPPER_MEM; + + hose_b->io_space.start = MPC85XX_PCI2_LOWER_IO; + hose_b->io_space.end = MPC85XX_PCI2_UPPER_IO; + hose_b->io_base_phys = MPC85XX_PCI2_IO_BASE; + hose_b->io_base_virt = (void *) isa_io_base + MPC85XX_PCI1_IO_SIZE; + + /* setup resources */ + pci_init_resource(&hose_b->mem_resources[0], + MPC85XX_PCI2_LOWER_MEM, + MPC85XX_PCI2_UPPER_MEM, + IORESOURCE_MEM, "PCI2 host bridge"); + + pci_init_resource(&hose_b->io_resource, + MPC85XX_PCI2_LOWER_IO, + MPC85XX_PCI2_UPPER_IO, + IORESOURCE_IO, "PCI2 host bridge"); + + hose_b->last_busno = pciauto_bus_scan(hose_b, hose_b->first_busno); +#endif + return; +} +#endif /* CONFIG_PCI */ + + diff --git a/arch/ppc/syslib/ppc85xx_setup.h b/arch/ppc/syslib/ppc85xx_setup.h new file mode 100644 index 000000000..311b8a418 --- /dev/null +++ b/arch/ppc/syslib/ppc85xx_setup.h @@ -0,0 +1,67 @@ +/* + * arch/ppc/syslib/ppc85xx_setup.h + * + * MPC85XX common board definitions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __PPC_SYSLIB_PPC85XX_SETUP_H +#define __PPC_SYSLIB_PPC85XX_SETUP_H + +#include +#include +#include +#include + +extern unsigned long mpc85xx_find_end_of_memory(void) __init; +extern void mpc85xx_calibrate_decr(void) __init; +extern void mpc85xx_early_serial_map(void) __init; +extern void mpc85xx_restart(char *cmd); +extern void mpc85xx_power_off(void); +extern void mpc85xx_halt(void); +extern void mpc85xx_setup_hose(void) __init; + +/* PCI config */ +#define PCI1_CFG_ADDR_OFFSET (0x8000) +#define PCI1_CFG_DATA_OFFSET (0x8004) + +#define PCI2_CFG_ADDR_OFFSET (0x9000) +#define PCI2_CFG_DATA_OFFSET (0x9004) + +/* Additional register for PCI-X configuration */ +#define PCIX_NEXT_CAP 0x60 +#define PCIX_CAP_ID 0x61 +#define PCIX_COMMAND 0x62 +#define PCIX_STATUS 0x64 + +/* Serial Config */ +#define MPC85XX_0_SERIAL (CCSRBAR + 0x4500) +#define MPC85XX_1_SERIAL (CCSRBAR + 0x4600) + +#ifdef CONFIG_SERIAL_MANY_PORTS +#define RS_TABLE_SIZE 64 +#else +#define RS_TABLE_SIZE 2 +#endif + +#define BASE_BAUD 0 + +#define STD_UART_OP(num) \ + { 0, BASE_BAUD, num, MPC85xx_IRQ_DUART, \ + (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST), \ + iomem_base: (u8 *)MPC85XX_##num##_SERIAL, \ + io_type: SERIAL_IO_MEM}, + +/* Offset of CPM register space */ +#define CPM_MAP_ADDR (CCSRBAR + MPC85xx_CPM_OFFSET) + +#endif /* __PPC_SYSLIB_PPC85XX_SETUP_H */ diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c new file mode 100644 index 000000000..1039196ef --- /dev/null +++ b/arch/s390/mm/mmap.c @@ -0,0 +1,83 @@ +/* + * linux/arch/s390/mm/mmap.c + * + * flexible mmap layout support + * + * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * Started by Ingo Molnar + */ + +#include +#include + +/* + * Top of mmap area (just below the process stack). + * + * Leave an at least ~128 MB hole. + */ +#define MIN_GAP (128*1024*1024) +#define MAX_GAP (TASK_SIZE/6*5) + +static inline unsigned long mmap_base(void) +{ + unsigned long gap = current->rlim[RLIMIT_STACK].rlim_cur; + + if (gap < MIN_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; + + return TASK_SIZE - (gap & PAGE_MASK); +} + +static inline int mmap_is_legacy(void) +{ +#ifdef CONFIG_ARCH_S390X + /* + * Force standard allocation for 64 bit programs. + */ + if (!test_thread_flag(TIF_31BIT)) + return 1; +#endif + return sysctl_legacy_va_layout || + (current->personality & ADDR_COMPAT_LAYOUT) || + current->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY; +} + +/* + * This function, called very early during the creation of a new + * process VM image, sets up which VM layout function to use: + */ +void arch_pick_mmap_layout(struct mm_struct *mm) +{ + /* + * Fall back to the standard layout if the personality + * bit is set, or if the expected stack growth is unlimited: + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +} diff --git a/arch/sh/boards/renesas/systemh/Makefile b/arch/sh/boards/renesas/systemh/Makefile new file mode 100644 index 000000000..2cc6a23d9 --- /dev/null +++ b/arch/sh/boards/renesas/systemh/Makefile @@ -0,0 +1,13 @@ +# +# Makefile for the SystemH specific parts of the kernel +# + +obj-y := setup.o irq.o io.o + +# XXX: This wants to be consolidated in arch/sh/drivers/pci, and more +# importantly, with the generic sh7751_pcic_init() code. For now, we'll +# just abuse the hell out of kbuild, because we can.. + +obj-$(CONFIG_PCI) += pci.o +pci-y := ../../se/7751/pci.o + diff --git a/arch/sh/boards/se/7300/Makefile b/arch/sh/boards/se/7300/Makefile new file mode 100644 index 000000000..0fbd4f478 --- /dev/null +++ b/arch/sh/boards/se/7300/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the 7300 SolutionEngine specific parts of the kernel +# + +obj-y := setup.o io.o irq.o + +obj-$(CONFIG_HEARTBEAT) += led.o diff --git a/arch/sh/cchips/voyagergx/Makefile b/arch/sh/cchips/voyagergx/Makefile new file mode 100644 index 000000000..085de72fd --- /dev/null +++ b/arch/sh/cchips/voyagergx/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for VoyagerGX +# + +obj-y := irq.o setup.o + +obj-$(CONFIG_USB_OHCI_HCD) += consistent.o + diff --git a/arch/sh/cchips/voyagergx/consistent.c b/arch/sh/cchips/voyagergx/consistent.c new file mode 100644 index 000000000..95a309d14 --- /dev/null +++ b/arch/sh/cchips/voyagergx/consistent.c @@ -0,0 +1,126 @@ +/* + * arch/sh/cchips/voyagergx/consistent.c + * + * Copyright (C) 2004 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct voya_alloc_entry { + struct list_head list; + unsigned long ofs; + unsigned long len; +}; + +static spinlock_t voya_list_lock = SPIN_LOCK_UNLOCKED; +static LIST_HEAD(voya_alloc_list); + +#define OHCI_SRAM_START 0xb0000000 +#define OHCI_HCCA_SIZE 0x100 +#define OHCI_SRAM_SIZE 0x10000 + +void *voyagergx_consistent_alloc(struct device *dev, size_t size, + dma_addr_t *handle, int flag) +{ + struct list_head *list = &voya_alloc_list; + struct voya_alloc_entry *entry; + struct sh_dev *shdev = to_sh_dev(dev); + unsigned long start, end; + unsigned long flags; + + /* + * The SM501 contains an integrated 8051 with its own SRAM. + * Devices within the cchip can all hook into the 8051 SRAM. + * We presently use this for the OHCI. + * + * Everything else goes through consistent_alloc(). + */ + if (!dev || dev->bus != &sh_bus_types[SH_BUS_VIRT] || + (dev->bus == &sh_bus_types[SH_BUS_VIRT] && + shdev->dev_id != SH_DEV_ID_USB_OHCI)) + return consistent_alloc(flag, size, handle); + + start = OHCI_SRAM_START + OHCI_HCCA_SIZE; + + entry = kmalloc(sizeof(struct voya_alloc_entry), GFP_ATOMIC); + if (!entry) + return NULL; + + entry->len = (size + 15) & ~15; + + /* + * The basis for this allocator is dwmw2's malloc.. the + * Matrox allocator :-) + */ + spin_lock_irqsave(&voya_list_lock, flags); + list_for_each(list, &voya_alloc_list) { + struct voya_alloc_entry *p; + + p = list_entry(list, struct voya_alloc_entry, list); + + if (p->ofs - start >= size) + goto out; + + start = p->ofs + p->len; + } + + end = start + (OHCI_SRAM_SIZE - OHCI_HCCA_SIZE); + list = &voya_alloc_list; + + if (end - start >= size) { +out: + entry->ofs = start; + list_add_tail(&entry->list, list); + spin_unlock_irqrestore(&voya_list_lock, flags); + + *handle = start; + return (void *)start; + } + + kfree(entry); + spin_unlock_irqrestore(&voya_list_lock, flags); + + return NULL; +} + +void voyagergx_consistent_free(struct device *dev, size_t size, + void *vaddr, dma_addr_t handle) +{ + struct voya_alloc_entry *entry; + struct sh_dev *shdev = to_sh_dev(dev); + unsigned long flags; + + if (!dev || dev->bus != &sh_bus_types[SH_BUS_VIRT] || + (dev->bus == &sh_bus_types[SH_BUS_VIRT] && + shdev->dev_id != SH_DEV_ID_USB_OHCI)) { + consistent_free(vaddr, size); + return; + } + + spin_lock_irqsave(&voya_list_lock, flags); + list_for_each_entry(entry, &voya_alloc_list, list) { + if (entry->ofs != handle) + continue; + + list_del(&entry->list); + kfree(entry); + + break; + } + spin_unlock_irqrestore(&voya_list_lock, flags); +} + +EXPORT_SYMBOL(voyagergx_consistent_alloc); +EXPORT_SYMBOL(voyagergx_consistent_free); + diff --git a/arch/sh/configs/rts7751r2d_defconfig b/arch/sh/configs/rts7751r2d_defconfig new file mode 100644 index 000000000..f9e1f7c5a --- /dev/null +++ b/arch/sh/configs/rts7751r2d_defconfig @@ -0,0 +1,809 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_SUPERH=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_IKCONFIG is not set +CONFIG_EMBEDDED=y +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y + +# +# Loadable module support +# +CONFIG_MODULES=y +# CONFIG_MODULE_UNLOAD is not set +CONFIG_OBSOLETE_MODPARM=y +# CONFIG_MODVERSIONS is not set +# CONFIG_KMOD is not set + +# +# System type +# +# CONFIG_SH_SOLUTION_ENGINE is not set +# CONFIG_SH_7751_SOLUTION_ENGINE is not set +# CONFIG_SH_7751_SYSTEMH is not set +# CONFIG_SH_STB1_HARP is not set +# CONFIG_SH_STB1_OVERDRIVE is not set +# CONFIG_SH_HP620 is not set +# CONFIG_SH_HP680 is not set +# CONFIG_SH_HP690 is not set +# CONFIG_SH_CQREEK is not set +# CONFIG_SH_DMIDA is not set +# CONFIG_SH_EC3104 is not set +# CONFIG_SH_SATURN is not set +# CONFIG_SH_DREAMCAST is not set +# CONFIG_SH_CAT68701 is not set +# CONFIG_SH_BIGSUR is not set +# CONFIG_SH_SH2000 is not set +# CONFIG_SH_ADX is not set +# CONFIG_SH_MPC1211 is not set +# CONFIG_SH_SECUREEDGE5410 is not set +CONFIG_SH_RTS7751R2D=y +# CONFIG_SH_UNKNOWN is not set +# CONFIG_CPU_SH2 is not set +# CONFIG_CPU_SH3 is not set +CONFIG_CPU_SH4=y +# CONFIG_CPU_SUBTYPE_SH7604 is not set +# CONFIG_CPU_SUBTYPE_SH7300 is not set +# CONFIG_CPU_SUBTYPE_SH7707 is not set +# CONFIG_CPU_SUBTYPE_SH7708 is not set +# CONFIG_CPU_SUBTYPE_SH7709 is not set +# CONFIG_CPU_SUBTYPE_SH7750 is not set +CONFIG_CPU_SUBTYPE_SH7751=y +# CONFIG_CPU_SUBTYPE_SH7760 is not set +# CONFIG_CPU_SUBTYPE_ST40STB1 is not set +CONFIG_MMU=y +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="mem=64M console=ttySC0,115200 root=/dev/hda1" +CONFIG_MEMORY_START=0x0c000000 +CONFIG_MEMORY_SIZE=0x04000000 +CONFIG_MEMORY_SET=y +# CONFIG_MEMORY_OVERRIDE is not set +CONFIG_SH_RTC=y +CONFIG_ZERO_PAGE_OFFSET=0x00010000 +CONFIG_BOOT_LINK_OFFSET=0x00800000 +CONFIG_CPU_LITTLE_ENDIAN=y +# CONFIG_PREEMPT is not set +# CONFIG_UBC_WAKEUP is not set +# CONFIG_SH_WRITETHROUGH is not set +# CONFIG_SH_OCRAM is not set +# CONFIG_SH_STORE_QUEUES is not set +# CONFIG_SMP is not set +CONFIG_VOYAGERGX=y +CONFIG_RTS7751R2D_REV11=y +CONFIG_SH_PCLK_FREQ=60000000 +# CONFIG_CPU_FREQ is not set +CONFIG_SH_DMA=y +CONFIG_NR_ONCHIP_DMA_CHANNELS=8 +# CONFIG_NR_DMA_CHANNELS_BOOL is not set +# CONFIG_DMA_PAGE_OPS is not set + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +CONFIG_ISA=y +CONFIG_PCI=y +CONFIG_SH_PCIDMA_NONCOHERENT=y +CONFIG_PCI_AUTO=y +CONFIG_PCI_AUTO_UPDATE_RESOURCES=y +CONFIG_PCI_DMA=y +# CONFIG_PCI_LEGACY_PROC is not set +CONFIG_PCI_NAMES=y +CONFIG_HOTPLUG=y + +# +# PCMCIA/CardBus support +# +CONFIG_PCMCIA=m +CONFIG_YENTA=m +CONFIG_CARDBUS=y +# CONFIG_I82092 is not set +# CONFIG_I82365 is not set +# CONFIG_TCIC is not set +CONFIG_PCMCIA_PROBE=y + +# +# PCI Hotplug Support +# +CONFIG_HOTPLUG_PCI=y +# CONFIG_HOTPLUG_PCI_FAKE is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_FLAT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Generic Driver Options +# +# CONFIG_FW_LOADER is not set + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_DEV_XD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +# CONFIG_BLK_DEV_LOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +CONFIG_IDE=y +CONFIG_BLK_DEV_IDE=y + +# +# Please see Documentation/ide.txt for help/info on IDE drives +# +CONFIG_BLK_DEV_IDEDISK=y +# CONFIG_IDEDISK_MULTI_MODE is not set +# CONFIG_IDEDISK_STROKE is not set +CONFIG_BLK_DEV_IDECS=m +# CONFIG_BLK_DEV_IDECD is not set +# CONFIG_BLK_DEV_IDETAPE is not set +# CONFIG_BLK_DEV_IDEFLOPPY is not set +# CONFIG_IDE_TASK_IOCTL is not set +# CONFIG_IDE_TASKFILE_IO is not set + +# +# IDE chipset support/bugfixes +# +# CONFIG_BLK_DEV_IDEPCI is not set +# CONFIG_IDE_CHIPSETS is not set +# CONFIG_BLK_DEV_IDEDMA is not set +# CONFIG_IDEDMA_AUTO is not set +# CONFIG_DMA_NONPCI is not set +# CONFIG_BLK_DEV_HD is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Old CD-ROM drivers (not SCSI, not IDE) +# +# CONFIG_CD_NO_IDESCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# IEEE 1394 (FireWire) support (EXPERIMENTAL) +# +# CONFIG_IEEE1394 is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +# CONFIG_IP_PNP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_INET_ECN is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_DECNET is not set +# CONFIG_BRIDGE is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +CONFIG_IPV6_SCTP__=y +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +CONFIG_NETDEVICES=y + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_STNIC is not set +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_LANCE is not set +# CONFIG_NET_VENDOR_SMC is not set +# CONFIG_NET_VENDOR_RACAL is not set + +# +# Tulip family network device support +# +# CONFIG_NET_TULIP is not set +# CONFIG_AT1700 is not set +# CONFIG_DEPCA is not set +# CONFIG_HP100 is not set +CONFIG_NET_ISA=y +# CONFIG_E2100 is not set +# CONFIG_EWRK3 is not set +# CONFIG_EEXPRESS is not set +# CONFIG_EEXPRESS_PRO is not set +# CONFIG_HPLAN_PLUS is not set +# CONFIG_HPLAN is not set +# CONFIG_LP486E is not set +# CONFIG_ETH16I is not set +CONFIG_NE2000=m +# CONFIG_ZNET is not set +# CONFIG_SEEQ8005 is not set +CONFIG_NET_PCI=y +# CONFIG_PCNET32 is not set +# CONFIG_AMD8111_ETH is not set +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_AC3200 is not set +# CONFIG_APRICOT is not set +# CONFIG_B44 is not set +# CONFIG_CS89x0 is not set +# CONFIG_DGRS is not set +# CONFIG_EEPRO100 is not set +# CONFIG_E100 is not set +# CONFIG_FEALNX is not set +# CONFIG_NATSEMI is not set +# CONFIG_NE2K_PCI is not set +# CONFIG_8139CP is not set +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +# CONFIG_8139TOO_8129 is not set +# CONFIG_8139_OLD_RX_RESET is not set +# CONFIG_SIS900 is not set +# CONFIG_EPIC100 is not set +# CONFIG_SUNDANCE is not set +# CONFIG_TLAN is not set +# CONFIG_VIA_RHINE is not set +# CONFIG_NET_POCKET is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SIS190 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set + +# +# Wireless LAN (non-hamradio) +# +CONFIG_NET_RADIO=y + +# +# Obsolete Wireless cards support (pre-802.11) +# +# CONFIG_STRIP is not set +# CONFIG_ARLAN is not set +# CONFIG_WAVELAN is not set +# CONFIG_PCMCIA_WAVELAN is not set +# CONFIG_PCMCIA_NETWAVE is not set + +# +# Wireless 802.11 Frequency Hopping cards support +# +# CONFIG_PCMCIA_RAYCS is not set + +# +# Wireless 802.11b ISA/PCI cards support +# +# CONFIG_AIRO is not set +CONFIG_HERMES=m +# CONFIG_PLX_HERMES is not set +# CONFIG_TMD_HERMES is not set +# CONFIG_PCI_HERMES is not set + +# +# Wireless 802.11b Pcmcia/Cardbus cards support +# +CONFIG_PCMCIA_HERMES=m +# CONFIG_AIRO_CS is not set +# CONFIG_PCMCIA_ATMEL is not set +# CONFIG_PCMCIA_WL3501 is not set +CONFIG_NET_WIRELESS=y + +# +# Token Ring devices +# +# CONFIG_TR is not set +# CONFIG_RCPCI is not set +# CONFIG_SHAPER is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set + +# +# PCMCIA network device support +# +# CONFIG_NET_PCMCIA is not set + +# +# Amateur Radio support +# +# CONFIG_HAMRADIO is not set + +# +# IrDA (infrared) support +# +# CONFIG_IRDA is not set + +# +# Bluetooth support +# +# CONFIG_BT is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN_BOOL is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +# CONFIG_INPUT is not set + +# +# Userland interfaces +# + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +# CONFIG_SERIO is not set +# CONFIG_SERIO_I8042 is not set + +# +# Input Device Drivers +# + +# +# Character devices +# +# CONFIG_VT is not set +# CONFIG_SERIAL is not set +CONFIG_SH_SCI=y +CONFIG_SERIAL_CONSOLE=y +CONFIG_RTC_9701JE=y + +# +# Unix 98 PTY support +# +# CONFIG_UNIX98_PTYS is not set +CONFIG_HEARTBEAT=y +# CONFIG_PSMOUSE is not set + +# +# Watchdog Cards +# +# CONFIG_WATCHDOG is not set +# CONFIG_RTC is not set + +# +# PCMCIA character devices +# +# CONFIG_SYNCLINK_CS is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_SH_SCI is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# I2C Algorithms +# + +# +# I2C Hardware Bus support +# + +# +# I2C Hardware Sensors Chip support +# +# CONFIG_I2C_SENSOR is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +CONFIG_MINIX_FS=y +# CONFIG_ROMFS_FS is not set +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +CONFIG_FAT_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +# CONFIG_DEVFS_FS is not set +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +# CONFIG_NFS_FS is not set +# CONFIG_NFSD is not set +# CONFIG_EXPORTFS is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_INTERMEZZO_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y + +# +# Native Language Support +# +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +CONFIG_NLS_CODEPAGE_932=y +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +# CONFIG_FB is not set + +# +# Sound +# +CONFIG_SOUND=y + +# +# Advanced Linux Sound Architecture +# +CONFIG_SND=m +# CONFIG_SND_SEQUENCER is not set +# CONFIG_SND_OSSEMUL is not set +# CONFIG_SND_VERBOSE_PRINTK is not set +# CONFIG_SND_DEBUG is not set + +# +# Generic devices +# +# CONFIG_SND_DUMMY is not set +# CONFIG_SND_MTPAV is not set +# CONFIG_SND_SERIAL_U16550 is not set +# CONFIG_SND_MPU401 is not set + +# +# ISA devices +# +# CONFIG_SND_AD1848 is not set +# CONFIG_SND_CS4231 is not set +# CONFIG_SND_CS4232 is not set +# CONFIG_SND_CS4236 is not set +# CONFIG_SND_ES1688 is not set +# CONFIG_SND_ES18XX is not set +# CONFIG_SND_GUSCLASSIC is not set +# CONFIG_SND_GUSEXTREME is not set +# CONFIG_SND_GUSMAX is not set +# CONFIG_SND_INTERWAVE is not set +# CONFIG_SND_INTERWAVE_STB is not set +# CONFIG_SND_OPTI92X_AD1848 is not set +# CONFIG_SND_OPTI92X_CS4231 is not set +# CONFIG_SND_OPTI93X is not set +# CONFIG_SND_SB8 is not set +# CONFIG_SND_SB16 is not set +# CONFIG_SND_SBAWE is not set +# CONFIG_SND_WAVEFRONT is not set +# CONFIG_SND_CMI8330 is not set +# CONFIG_SND_OPL3SA2 is not set +# CONFIG_SND_SGALAXY is not set +# CONFIG_SND_SSCAPE is not set + +# +# PCI devices +# +# CONFIG_SND_ALI5451 is not set +# CONFIG_SND_AZT3328 is not set +# CONFIG_SND_CS46XX is not set +# CONFIG_SND_CS4281 is not set +# CONFIG_SND_EMU10K1 is not set +# CONFIG_SND_KORG1212 is not set +# CONFIG_SND_NM256 is not set +# CONFIG_SND_RME32 is not set +# CONFIG_SND_RME96 is not set +# CONFIG_SND_RME9652 is not set +# CONFIG_SND_HDSP is not set +# CONFIG_SND_TRIDENT is not set +CONFIG_SND_YMFPCI=m +# CONFIG_SND_ALS4000 is not set +# CONFIG_SND_CMIPCI is not set +# CONFIG_SND_ENS1370 is not set +# CONFIG_SND_ENS1371 is not set +# CONFIG_SND_ES1938 is not set +# CONFIG_SND_ES1968 is not set +# CONFIG_SND_MAESTRO3 is not set +# CONFIG_SND_FM801 is not set +# CONFIG_SND_ICE1712 is not set +# CONFIG_SND_ICE1724 is not set +# CONFIG_SND_INTEL8X0 is not set +# CONFIG_SND_SONICVIBES is not set +# CONFIG_SND_VIA82XX is not set +# CONFIG_SND_VX222 is not set + +# +# PCMCIA devices +# +# CONFIG_SND_VXPOCKET is not set +# CONFIG_SND_VXP440 is not set + +# +# Open Sound System +# +CONFIG_SOUND_PRIME=m +# CONFIG_SOUND_BT878 is not set +CONFIG_SOUND_CMPCI=m +# CONFIG_SOUND_CMPCI_FM is not set +# CONFIG_SOUND_CMPCI_MIDI is not set +# CONFIG_SOUND_CMPCI_JOYSTICK is not set +# CONFIG_SOUND_CMPCI_CM8738 is not set +# CONFIG_SOUND_EMU10K1 is not set +# CONFIG_SOUND_FUSION is not set +# CONFIG_SOUND_CS4281 is not set +# CONFIG_SOUND_ES1370 is not set +# CONFIG_SOUND_ES1371 is not set +# CONFIG_SOUND_ESSSOLO1 is not set +# CONFIG_SOUND_MAESTRO is not set +# CONFIG_SOUND_MAESTRO3 is not set +# CONFIG_SOUND_ICH is not set +# CONFIG_SOUND_SONICVIBES is not set +# CONFIG_SOUND_TRIDENT is not set +# CONFIG_SOUND_MSNDCLAS is not set +# CONFIG_SOUND_MSNDPIN is not set +# CONFIG_SOUND_VIA82CXXX is not set +# CONFIG_SOUND_OSS is not set +# CONFIG_SOUND_ALI5455 is not set +# CONFIG_SOUND_FORTE is not set +# CONFIG_SOUND_RME96XX is not set +# CONFIG_SOUND_AD1980 is not set +CONFIG_SOUND_VOYAGERGX=m + +# +# USB support +# +# CONFIG_USB is not set +# CONFIG_USB_GADGET is not set + +# +# Profiling support +# +# CONFIG_PROFILING is not set + +# +# Kernel hacking +# +# CONFIG_MAGIC_SYSRQ is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_SH_STANDARD_BIOS is not set +# CONFIG_KGDB is not set +# CONFIG_FRAME_POINTER is not set + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c new file mode 100644 index 000000000..da3d6877f --- /dev/null +++ b/arch/sh/kernel/cpu/adc.c @@ -0,0 +1,36 @@ +/* + * linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support + * + * Copyright (C) 2004 Andriy Skulysh + */ + +#include +#include +#include + + +int adc_single(unsigned int channel) +{ + int off; + unsigned char csr; + + if (channel >= 8) return -1; + + off = (channel & 0x03) << 2; + + csr = ctrl_inb(ADCSR); + csr = channel | ADCSR_ADST | ADCSR_CKS; + ctrl_outb(csr, ADCSR); + + do { + csr = ctrl_inb(ADCSR); + } while ((csr & ADCSR_ADF) == 0); + + csr &= ~(ADCSR_ADF | ADCSR_ADST); + ctrl_outb(csr, ADCSR); + + return (((ctrl_inb(ADDRAH + off) << 8) | + ctrl_inb(ADDRAL + off)) >> 6); +} + +EXPORT_SYMBOL(adc_single); diff --git a/arch/sh64/boot/Makefile b/arch/sh64/boot/Makefile new file mode 100644 index 000000000..fb71087b7 --- /dev/null +++ b/arch/sh64/boot/Makefile @@ -0,0 +1,20 @@ +# +# arch/sh64/boot/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2002 Stuart Menefy +# + +targets := zImage +subdir- := compressed + +$(obj)/zImage: $(obj)/compressed/vmlinux FORCE + $(call if_changed,objcopy) + @echo 'Kernel: $@ is ready' + +$(obj)/compressed/vmlinux: FORCE + $(Q)$(MAKE) $(build)=$(obj)/compressed $@ + diff --git a/arch/sh64/boot/compressed/Makefile b/arch/sh64/boot/compressed/Makefile new file mode 100644 index 000000000..3f6cbf017 --- /dev/null +++ b/arch/sh64/boot/compressed/Makefile @@ -0,0 +1,46 @@ +# +# linux/arch/sh64/boot/compressed/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2002 Stuart Menefy +# Copyright (C) 2004 Paul Mundt +# +# create a compressed vmlinux image from the original vmlinux +# + +targets := vmlinux vmlinux.bin vmlinux.bin.gz \ + head.o misc.o cache.o piggy.o vmlinux.lds.o + +EXTRA_AFLAGS := -traditional + +OBJECTS := $(obj)/head.o $(obj)/misc.o $(obj)/cache.o + +# +# ZIMAGE_OFFSET is the load offset of the compression loader +# (4M for the kernel plus 64K for this loader) +# +ZIMAGE_OFFSET = $(shell printf "0x%8x" $$[$(CONFIG_MEMORY_START)+0x400000+0x10000]) + +LDFLAGS_vmlinux := -Ttext $(ZIMAGE_OFFSET) -e startup \ + -T $(obj)/../../kernel/vmlinux.lds.s \ + --no-warn-mismatch + +$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE + $(call if_changed,ld) + @: + +$(obj)/vmlinux.bin: vmlinux FORCE + $(call if_changed,objcopy) + +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) + +LDFLAGS_piggy.o := -r --format binary --oformat elf32-sh64-linux -T +OBJCOPYFLAGS += -R .empty_zero_page + +$(obj)/piggy.o: $(obj)/vmlinux.lds.s $(obj)/vmlinux.bin.gz FORCE + $(call if_changed,ld) + diff --git a/arch/sh64/boot/compressed/cache.c b/arch/sh64/boot/compressed/cache.c new file mode 100644 index 000000000..708707355 --- /dev/null +++ b/arch/sh64/boot/compressed/cache.c @@ -0,0 +1,39 @@ +/* + * arch/shmedia/boot/compressed/cache.c -- simple cache management functions + * + * Code extracted from sh-ipl+g, sh-stub.c, which has the copyright: + * + * This is originally based on an m68k software stub written by Glenn + * Engel at HP, but has changed quite a bit. + * + * Modifications for the SH by Ben Lee and Steve Chamberlain + * +**************************************************************************** + + THIS SOFTWARE IS NOT COPYRIGHTED + + HP offers the following for use in the public domain. HP makes no + warranty with regard to the software or it's performance and the + user accepts the software "AS IS" with all faults. + + HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD + TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +****************************************************************************/ + +#define CACHE_ENABLE 0 +#define CACHE_DISABLE 1 + +int cache_control(unsigned int command) +{ + volatile unsigned int *p = (volatile unsigned int *) 0x80000000; + int i; + + for (i = 0; i < (32 * 1024); i += 32) { + (void *) *p; + p += (32 / sizeof (int)); + } + + return 0; +} diff --git a/arch/sh64/boot/compressed/head.S b/arch/sh64/boot/compressed/head.S new file mode 100644 index 000000000..82040b1a2 --- /dev/null +++ b/arch/sh64/boot/compressed/head.S @@ -0,0 +1,164 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/shmedia/boot/compressed/head.S + * + * Copied from + * arch/shmedia/kernel/head.S + * which carried the copyright: + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * Modification for compressed loader: + * Copyright (C) 2002 Stuart Menefy (stuart.menefy@st.com) + */ + +#include +#include +#include +#include + +/* + * Fixed TLB entries to identity map the beginning of RAM + */ +#define MMUIR_TEXT_H 0x0000000000000003 | CONFIG_MEMORY_START + /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ +#define MMUIR_TEXT_L 0x000000000000009a | CONFIG_MEMORY_START + /* 512 Mb, Cacheable (Write-back), execute, Not User, Ph. Add. */ + +#define MMUDR_CACHED_H 0x0000000000000003 | CONFIG_MEMORY_START + /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ +#define MMUDR_CACHED_L 0x000000000000015a | CONFIG_MEMORY_START + /* 512 Mb, Cacheable (Write-back), read/write, Not User, Ph. Add. */ + +#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */ +#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */ + +#if 1 +#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* OCE + OCI + WB */ +#else +#define OCCR0_INIT_VAL OCCR0_OFF +#endif +#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */ + + .text + + .global startup +startup: + /* + * Prevent speculative fetch on device memory due to + * uninitialized target registers. + * This must be executed before the first branch. + */ + ptabs/u ZERO, tr0 + ptabs/u ZERO, tr1 + ptabs/u ZERO, tr2 + ptabs/u ZERO, tr3 + ptabs/u ZERO, tr4 + ptabs/u ZERO, tr5 + ptabs/u ZERO, tr6 + ptabs/u ZERO, tr7 + synci + + /* + * Set initial TLB entries for cached and uncached regions. + * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't ! + */ + /* Clear ITLBs */ + pta 1f, tr1 + movi ITLB_FIXED, r21 + movi ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22 +1: putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */ + addi r21, TLB_STEP, r21 + bne r21, r22, tr1 + + /* Clear DTLBs */ + pta 1f, tr1 + movi DTLB_FIXED, r21 + movi DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP, r22 +1: putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */ + addi r21, TLB_STEP, r21 + bne r21, r22, tr1 + + /* Map one big (512Mb) page for ITLB */ + movi ITLB_FIXED, r21 + movi MMUIR_TEXT_L, r22 /* PTEL first */ + putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ + movi MMUIR_TEXT_H, r22 /* PTEH last */ + putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */ + + /* Map one big CACHED (512Mb) page for DTLB */ + movi DTLB_FIXED, r21 + movi MMUDR_CACHED_L, r22 /* PTEL first */ + putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */ + movi MMUDR_CACHED_H, r22 /* PTEH last */ + putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */ + + /* ICache */ + movi ICCR_BASE, r21 + movi ICCR0_INIT_VAL, r22 + movi ICCR1_INIT_VAL, r23 + putcfg r21, ICCR_REG0, r22 + putcfg r21, ICCR_REG1, r23 + synci + + /* OCache */ + movi OCCR_BASE, r21 + movi OCCR0_INIT_VAL, r22 + movi OCCR1_INIT_VAL, r23 + putcfg r21, OCCR_REG0, r22 + putcfg r21, OCCR_REG1, r23 + synco + + /* + * Enable the MMU. + * From here-on code can be non-PIC. + */ + movi SR_HARMLESS | SR_ENABLE_MMU, r22 + putcon r22, SSR + movi 1f, r22 + putcon r22, SPC + synco + rte /* And now go into the hyperspace ... */ +1: /* ... that's the next instruction ! */ + + /* Set initial stack pointer */ + movi datalabel stack_start, r0 + ld.l r0, 0, r15 + + /* + * Clear bss + */ + pt 1f, tr1 + movi datalabel __bss_start, r22 + movi datalabel _end, r23 +1: st.l r22, 0, ZERO + addi r22, 4, r22 + bne r22, r23, tr1 + + /* + * Decompress the kernel. + */ + pt decompress_kernel, tr0 + blink tr0, r18 + + /* + * Disable the MMU. + */ + movi SR_HARMLESS, r22 + putcon r22, SSR + movi 1f, r22 + putcon r22, SPC + synco + rte /* And now go into the hyperspace ... */ +1: /* ... that's the next instruction ! */ + + /* Jump into the decompressed kernel */ + movi datalabel (CONFIG_MEMORY_START + 0x2000)+1, r19 + ptabs r19, tr0 + blink tr0, r18 + + /* Shouldn't return here, but just in case, loop forever */ + pt 1f, tr0 +1: blink tr0, ZERO diff --git a/arch/sh64/boot/compressed/install.sh b/arch/sh64/boot/compressed/install.sh new file mode 100644 index 000000000..90589f0fe --- /dev/null +++ b/arch/sh64/boot/compressed/install.sh @@ -0,0 +1,56 @@ +#!/bin/sh +# +# arch/sh/boot/install.sh +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1995 by Linus Torvalds +# +# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin +# Adapted from code in arch/i386/boot/install.sh by Russell King +# Adapted from code in arch/arm/boot/install.sh by Stuart Menefy +# +# "make install" script for sh architecture +# +# Arguments: +# $1 - kernel version +# $2 - kernel image file +# $3 - kernel map file +# $4 - default install path (blank if root directory) +# + +# User may have a custom install script + +if [ -x /sbin/installkernel ]; then + exec /sbin/installkernel "$@" +fi + +if [ "$2" = "zImage" ]; then +# Compressed install + echo "Installing compressed kernel" + if [ -f $4/vmlinuz-$1 ]; then + mv $4/vmlinuz-$1 $4/vmlinuz.old + fi + + if [ -f $4/System.map-$1 ]; then + mv $4/System.map-$1 $4/System.old + fi + + cat $2 > $4/vmlinuz-$1 + cp $3 $4/System.map-$1 +else +# Normal install + echo "Installing normal kernel" + if [ -f $4/vmlinux-$1 ]; then + mv $4/vmlinux-$1 $4/vmlinux.old + fi + + if [ -f $4/System.map ]; then + mv $4/System.map $4/System.old + fi + + cat $2 > $4/vmlinux-$1 + cp $3 $4/System.map +fi diff --git a/arch/sh64/configs/cayman_defconfig b/arch/sh64/configs/cayman_defconfig new file mode 100644 index 000000000..6dd7cea5d --- /dev/null +++ b/arch/sh64/configs/cayman_defconfig @@ -0,0 +1,660 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_SUPERH=y +CONFIG_SUPERH64=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_LOG_BUF_SHIFT=14 + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +CONFIG_POSIX_MQUEUE=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# System type +# +# CONFIG_SH_GENERIC is not set +# CONFIG_SH_SIMULATOR is not set +CONFIG_SH_CAYMAN=y +# CONFIG_SH_ROMRAM is not set +# CONFIG_SH_HARP is not set + +# +# Processor type and features +# +CONFIG_CPU_SH5=y +CONFIG_CPU_SUBTYPE_SH5_101=y +# CONFIG_CPU_SUBTYPE_SH5_103 is not set +CONFIG_LITTLE_ENDIAN=y +# CONFIG_BIG_ENDIAN is not set +# CONFIG_SH64_FPU_DENORM_FLUSH is not set +CONFIG_SH64_PGTABLE_2_LEVEL=y +# CONFIG_SH64_PGTABLE_3_LEVEL is not set +CONFIG_HUGETLB_PAGE_SIZE_64K=y +# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set +# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set +CONFIG_SH64_USER_MISALIGNED_FIXUP=y + +# +# Memory options +# +CONFIG_CACHED_MEMORY_OFFSET=0x20000000 +CONFIG_MEMORY_START=0x80000000 +CONFIG_MEMORY_SIZE_IN_MB=128 + +# +# Cache options +# +# CONFIG_DCACHE_DISABLED is not set +CONFIG_DCACHE_WRITE_BACK=y +# CONFIG_DCACHE_WRITE_THROUGH is not set +# CONFIG_ICACHE_DISABLED is not set +CONFIG_PCIDEVICE_MEMORY_START=C0000000 +CONFIG_DEVICE_MEMORY_START=E0000000 +CONFIG_FLASH_MEMORY_START=0x00000000 +CONFIG_PCI_BLOCK_START=0x40000000 + +# +# CPU Subtype specific options +# +CONFIG_SH64_ID2815_WORKAROUND=y + +# +# Misc options +# +CONFIG_HEARTBEAT=y +CONFIG_HDSP253_LED=y +CONFIG_SH_DMA=y +CONFIG_PREEMPT=y + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +CONFIG_PCI=y +CONFIG_SH_PCIDMA_NONCOHERENT=y +CONFIG_PCI_LEGACY_PROC=y +CONFIG_PCI_NAMES=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_FLAT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_CARMEL is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +# CONFIG_STNIC is not set +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set + +# +# Tulip family network device support +# +CONFIG_NET_TULIP=y +# CONFIG_DE2104X is not set +CONFIG_TULIP=y +# CONFIG_TULIP_MWI is not set +# CONFIG_TULIP_MMIO is not set +# CONFIG_TULIP_NAPI is not set +# CONFIG_DE4X5 is not set +# CONFIG_WINBOND_840 is not set +# CONFIG_DM9102 is not set +# CONFIG_HP100 is not set +CONFIG_NET_PCI=y +# CONFIG_PCNET32 is not set +# CONFIG_AMD8111_ETH is not set +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_B44 is not set +# CONFIG_FORCEDETH is not set +# CONFIG_DGRS is not set +# CONFIG_EEPRO100 is not set +# CONFIG_E100 is not set +# CONFIG_FEALNX is not set +# CONFIG_NATSEMI is not set +# CONFIG_NE2K_PCI is not set +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_SIS900 is not set +# CONFIG_EPIC100 is not set +# CONFIG_SUNDANCE is not set +# CONFIG_TLAN is not set +# CONFIG_VIA_RHINE is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_SH_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +CONFIG_FB=y +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_E1355 is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON_OLD is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +CONFIG_FB_KYRO=y +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_VIRTUAL is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_PCI_CONSOLE=y +CONFIG_FONTS=y +# CONFIG_FONT_8x8 is not set +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set + +# +# Logo configuration +# +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +# CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_LOGO_SUPERH_MONO is not set +# CONFIG_LOGO_SUPERH_VGA16 is not set +CONFIG_LOGO_SUPERH_CLUT224=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +CONFIG_MINIX_FS=y +CONFIG_ROMFS_FS=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Kernel hacking +# +CONFIG_MAGIC_SYSRQ=y +# CONFIG_EARLY_PRINTK is not set +# CONFIG_DEBUG_KERNEL_WITH_GDB_STUB is not set +# CONFIG_SH64_PROC_TLB is not set +# CONFIG_SH64_PROC_ASIDS is not set +CONFIG_SH64_SR_WATCH=y +# CONFIG_SH_ALPHANUMERIC is not set +# CONFIG_SH_NO_BSS_INIT is not set +CONFIG_FRAME_POINTER=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set diff --git a/arch/sh64/defconfig b/arch/sh64/defconfig new file mode 100644 index 000000000..e6233e8fb --- /dev/null +++ b/arch/sh64/defconfig @@ -0,0 +1,668 @@ +# +# Automatically generated make config: don't edit +# +CONFIG_SUPERH=y +CONFIG_SUPERH64=y +CONFIG_MMU=y +CONFIG_UID16=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_LOG_BUF_SHIFT=14 + +# +# Code maturity level options +# +CONFIG_EXPERIMENTAL=y +CONFIG_CLEAN_COMPILE=y +CONFIG_STANDALONE=y +CONFIG_BROKEN_ON_SMP=y + +# +# General setup +# +CONFIG_SWAP=y +# CONFIG_SYSVIPC is not set +CONFIG_POSIX_MQUEUE=y +# CONFIG_BSD_PROCESS_ACCT is not set +CONFIG_SYSCTL=y +# CONFIG_AUDIT is not set +# CONFIG_HOTPLUG is not set +# CONFIG_IKCONFIG is not set +# CONFIG_EMBEDDED is not set +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set + +# +# Loadable module support +# +# CONFIG_MODULES is not set + +# +# System type +# +# CONFIG_SH_GENERIC is not set +# CONFIG_SH_SIMULATOR is not set +CONFIG_SH_CAYMAN=y +# CONFIG_SH_ROMRAM is not set +# CONFIG_SH_HARP is not set +CONFIG_CPU_SH5=y +CONFIG_CPU_SUBTYPE_SH5_101=y +# CONFIG_CPU_SUBTYPE_SH5_103 is not set +CONFIG_LITTLE_ENDIAN=y +# CONFIG_BIG_ENDIAN is not set +# CONFIG_SH64_FPU_DENORM_FLUSH is not set +CONFIG_SH64_PGTABLE_2_LEVEL=y +# CONFIG_SH64_PGTABLE_3_LEVEL is not set +CONFIG_HUGETLB_PAGE_SIZE_64K=y +# CONFIG_HUGETLB_PAGE_SIZE_1MB is not set +# CONFIG_HUGETLB_PAGE_SIZE_512MB is not set +CONFIG_SH64_USER_MISALIGNED_FIXUP=y + +# +# Memory options +# +CONFIG_CACHED_MEMORY_OFFSET=0x20000000 +CONFIG_MEMORY_START=0x80000000 +CONFIG_MEMORY_SIZE_IN_MB=128 + +# +# Cache options +# +# CONFIG_DCACHE_DISABLED is not set +CONFIG_DCACHE_WRITE_BACK=y +# CONFIG_DCACHE_WRITE_THROUGH is not set +# CONFIG_ICACHE_DISABLED is not set +CONFIG_PCIDEVICE_MEMORY_START=C0000000 +CONFIG_DEVICE_MEMORY_START=E0000000 +CONFIG_FLASH_MEMORY_START=0x00000000 +CONFIG_PCI_BLOCK_START=0x40000000 + +# +# CPU Subtype specific options +# +CONFIG_SH64_ID2815_WORKAROUND=y + +# +# Misc options +# +CONFIG_HEARTBEAT=y +CONFIG_HDSP253_LED=y +CONFIG_SH_DMA=y +CONFIG_PREEMPT=y + +# +# Bus options (PCI, PCMCIA, EISA, MCA, ISA) +# +CONFIG_PCI=y +CONFIG_SH_PCIDMA_NONCOHERENT=y +CONFIG_PCI_LEGACY_PROC=y +CONFIG_PCI_NAMES=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_BINFMT_FLAT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Memory Technology Devices (MTD) +# +# CONFIG_MTD is not set + +# +# Parallel port support +# +# CONFIG_PARPORT is not set + +# +# Plug and Play support +# + +# +# Block devices +# +# CONFIG_BLK_DEV_FD is not set +# CONFIG_BLK_CPQ_DA is not set +# CONFIG_BLK_CPQ_CISS_DA is not set +# CONFIG_BLK_DEV_DAC960 is not set +# CONFIG_BLK_DEV_UMEM is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +# CONFIG_BLK_DEV_SX8 is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_LBD is not set + +# +# ATA/ATAPI/MFM/RLL support +# +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_SCSI is not set + +# +# Multi-device support (RAID and LVM) +# +# CONFIG_MD is not set + +# +# Fusion MPT device support +# + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_IEEE1394 is not set + +# +# I2O device support +# +# CONFIG_I2O is not set + +# +# Networking support +# +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +# CONFIG_NETLINK_DEV is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_IPV6 is not set +# CONFIG_NETFILTER is not set + +# +# SCTP Configuration (EXPERIMENTAL) +# +# CONFIG_IP_SCTP is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_NET_DIVERT is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_NET_FASTROUTE is not set +# CONFIG_NET_HW_FLOWCONTROL is not set + +# +# QoS and/or fair queueing +# +# CONFIG_NET_SCHED is not set +# CONFIG_NET_CLS_ROUTE is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_HAMRADIO is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set + +# +# ARCnet devices +# +# CONFIG_ARCNET is not set + +# +# Ethernet (10 or 100Mbit) +# +CONFIG_NET_ETHERNET=y +# CONFIG_MII is not set +# CONFIG_STNIC is not set +# CONFIG_HAPPYMEAL is not set +# CONFIG_SUNGEM is not set +# CONFIG_NET_VENDOR_3COM is not set + +# +# Tulip family network device support +# +CONFIG_NET_TULIP=y +# CONFIG_DE2104X is not set +CONFIG_TULIP=y +# CONFIG_TULIP_MWI is not set +# CONFIG_TULIP_MMIO is not set +# CONFIG_TULIP_NAPI is not set +# CONFIG_DE4X5 is not set +# CONFIG_WINBOND_840 is not set +# CONFIG_DM9102 is not set +# CONFIG_HP100 is not set +CONFIG_NET_PCI=y +# CONFIG_PCNET32 is not set +# CONFIG_AMD8111_ETH is not set +# CONFIG_ADAPTEC_STARFIRE is not set +# CONFIG_B44 is not set +# CONFIG_FORCEDETH is not set +# CONFIG_DGRS is not set +# CONFIG_EEPRO100 is not set +# CONFIG_E100 is not set +# CONFIG_FEALNX is not set +# CONFIG_NATSEMI is not set +# CONFIG_NE2K_PCI is not set +# CONFIG_8139CP is not set +# CONFIG_8139TOO is not set +# CONFIG_SIS900 is not set +# CONFIG_EPIC100 is not set +# CONFIG_SUNDANCE is not set +# CONFIG_TLAN is not set +# CONFIG_VIA_RHINE is not set +# CONFIG_VIA_VELOCITY is not set + +# +# Ethernet (1000 Mbit) +# +# CONFIG_ACENIC is not set +# CONFIG_DL2K is not set +# CONFIG_E1000 is not set +# CONFIG_NS83820 is not set +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +# CONFIG_R8169 is not set +# CONFIG_SK98LIN is not set +# CONFIG_TIGON3 is not set + +# +# Ethernet (10000 Mbit) +# +# CONFIG_IXGB is not set +# CONFIG_S2IO is not set + +# +# Token Ring devices +# +# CONFIG_TR is not set + +# +# Wireless LAN (non-hamradio) +# +# CONFIG_NET_RADIO is not set + +# +# Wan interfaces +# +# CONFIG_WAN is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_SHAPER is not set +# CONFIG_NETCONSOLE is not set + +# +# ISDN subsystem +# +# CONFIG_ISDN is not set + +# +# Telephony Support +# +# CONFIG_PHONE is not set + +# +# Input device support +# +CONFIG_INPUT=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_TSDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input I/O drivers +# +# CONFIG_GAMEPORT is not set +CONFIG_SOUND_GAMEPORT=y +CONFIG_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PCIPS2 is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=256 +# CONFIG_QIC02_TAPE is not set + +# +# IPMI +# +# CONFIG_IPMI_HANDLER is not set + +# +# Watchdog Cards +# +CONFIG_WATCHDOG=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_SH_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set +# CONFIG_RTC is not set +# CONFIG_GEN_RTC is not set +# CONFIG_DTLK is not set +# CONFIG_R3964 is not set +# CONFIG_APPLICOM is not set + +# +# Ftape, the floppy tape device driver +# +# CONFIG_FTAPE is not set +# CONFIG_AGP is not set +# CONFIG_DRM is not set +# CONFIG_RAW_DRIVER is not set + +# +# I2C support +# +# CONFIG_I2C is not set + +# +# Misc devices +# + +# +# Multimedia devices +# +# CONFIG_VIDEO_DEV is not set + +# +# Digital Video Broadcasting Devices +# +# CONFIG_DVB is not set + +# +# Graphics support +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_E1355 is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON_OLD is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +CONFIG_FB_KYRO=y +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_VIRTUAL is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +# CONFIG_MDA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_PCI_CONSOLE=y +CONFIG_FONTS=y +# CONFIG_FONT_8x8 is not set +CONFIG_FONT_8x16=y +# CONFIG_FONT_6x11 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set + +# +# Logo configuration +# +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +# CONFIG_LOGO_LINUX_CLUT224 is not set +# CONFIG_LOGO_SUPERH_MONO is not set +# CONFIG_LOGO_SUPERH_VGA16 is not set +CONFIG_LOGO_SUPERH_CLUT224=y + +# +# Sound +# +# CONFIG_SOUND is not set + +# +# USB support +# +# CONFIG_USB is not set + +# +# USB Gadget Support +# +# CONFIG_USB_GADGET is not set + +# +# File systems +# +CONFIG_EXT2_FS=y +# CONFIG_EXT2_FS_XATTR is not set +# CONFIG_EXT3_FS is not set +# CONFIG_JBD is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +# CONFIG_XFS_FS is not set +CONFIG_MINIX_FS=y +CONFIG_ROMFS_FS=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_FAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_SYSFS=y +# CONFIG_DEVFS_FS is not set +# CONFIG_DEVPTS_FS_XATTR is not set +CONFIG_TMPFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_RAMFS=y + +# +# Miscellaneous filesystems +# +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_CRAMFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set + +# +# Network File Systems +# +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +# CONFIG_NFS_V4 is not set +# CONFIG_NFS_DIRECTIO is not set +# CONFIG_NFSD is not set +CONFIG_ROOT_NFS=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +# CONFIG_EXPORTFS is not set +CONFIG_SUNRPC=y +# CONFIG_RPCSEC_GSS_KRB5 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y + +# +# Native Language Support +# +# CONFIG_NLS is not set + +# +# Profiling support +# +CONFIG_PROFILING=y +# CONFIG_OPROFILE is not set + +# +# Kernel hacking +# +CONFIG_MAGIC_SYSRQ=y +# CONFIG_EARLY_PRINTK is not set +# CONFIG_DEBUG_KERNEL_WITH_GDB_STUB is not set +# CONFIG_SH64_PROC_TLB is not set +# CONFIG_SH64_PROC_ASIDS is not set +CONFIG_SH64_SR_WATCH=y +# CONFIG_SH_ALPHANUMERIC is not set +# CONFIG_SH_NO_BSS_INIT is not set +CONFIG_FRAME_POINTER=y + +# +# Security options +# +# CONFIG_SECURITY is not set + +# +# Cryptographic options +# +# CONFIG_CRYPTO is not set + +# +# Library routines +# +# CONFIG_CRC16 is not set +CONFIG_CRC32=y +# CONFIG_LIBCRC32C is not set diff --git a/arch/sh64/kernel/Makefile b/arch/sh64/kernel/Makefile new file mode 100644 index 000000000..2f8d07746 --- /dev/null +++ b/arch/sh64/kernel/Makefile @@ -0,0 +1,38 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2000, 2001 Paolo Alberelli +# Copyright (C) 2003 Paul Mundt +# +# Makefile for the Linux sh64 kernel. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +extra-y := head.o init_task.o vmlinux.lds.s + +obj-y := process.o signal.o entry.o traps.o irq.o irq_intc.o \ + ptrace.o setup.o time.o sys_sh64.o semaphore.o sh_ksyms.o \ + switchto.o syscalls.o + +obj-$(CONFIG_HEARTBEAT) += led.o +obj-$(CONFIG_SH_ALPHANUMERIC) += alphanum.o +obj-$(CONFIG_SH_DMA) += dma.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_KALLSYMS) += unwind.o +obj-$(CONFIG_PCI) += pci-dma.o pcibios.o + +ifeq ($(CONFIG_PCI),y) +obj-$(CONFIG_CPU_SH5) += pci_sh5.o +endif + +ifndef CONFIG_NOFPU_SUPPORT +obj-y += fpu.o +endif + +USE_STANDARD_AS_RULE := true + diff --git a/arch/sh64/kernel/asm-offsets.c b/arch/sh64/kernel/asm-offsets.c new file mode 100644 index 000000000..ca76537c1 --- /dev/null +++ b/arch/sh64/kernel/asm-offsets.c @@ -0,0 +1,33 @@ +/* + * This program is used to generate definitions needed by + * assembly language modules. + * + * We use the technique used in the OSF Mach kernel code: + * generate asm statements containing #defines, + * compile this file to assembler, and then extract the + * #defines from the assembly-language output. + */ + +#include +#include +#include +#include + +#define DEFINE(sym, val) \ + asm volatile("\n->" #sym " %0 " #val : : "i" (val)) + +#define BLANK() asm volatile("\n->" : : ) + +int main(void) +{ + /* offsets into the thread_info struct */ + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); + + return 0; +} diff --git a/arch/sh64/kernel/early_printk.c b/arch/sh64/kernel/early_printk.c new file mode 100644 index 000000000..3d03ca737 --- /dev/null +++ b/arch/sh64/kernel/early_printk.c @@ -0,0 +1,107 @@ +/* + * arch/sh64/kernel/early_printk.c + * + * SH-5 Early SCIF console (cloned and hacked from sh implementation) + * + * Copyright (C) 2003, 2004 Paul Mundt + * Copyright (C) 2002 M. R. Brown + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include + +extern void cpu_relax(void); + +#define SCIF_BASE_ADDR 0x01030000 +#define SCIF_ADDR_SH5 PHYS_PERIPHERAL_BLOCK+SCIF_BASE_ADDR + +/* + * Fixed virtual address where SCIF is mapped (should already be done + * in arch/sh64/kernel/head.S!). + */ +#define SCIF_REG 0xfa030000 + +enum { + SCIF_SCSMR2 = SCIF_REG + 0x00, + SCIF_SCBRR2 = SCIF_REG + 0x04, + SCIF_SCSCR2 = SCIF_REG + 0x08, + SCIF_SCFTDR2 = SCIF_REG + 0x0c, + SCIF_SCFSR2 = SCIF_REG + 0x10, + SCIF_SCFRDR2 = SCIF_REG + 0x14, + SCIF_SCFCR2 = SCIF_REG + 0x18, + SCIF_SCFDR2 = SCIF_REG + 0x1c, + SCIF_SCSPTR2 = SCIF_REG + 0x20, + SCIF_SCLSR2 = SCIF_REG + 0x24, +}; + +static void sh_console_putc(int c) +{ + while (!(ctrl_inw(SCIF_SCFSR2) & 0x20)) + cpu_relax(); + + ctrl_outb(c, SCIF_SCFTDR2); + ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0x9f), SCIF_SCFSR2); + + if (c == '\n') + sh_console_putc('\r'); +} + +static void sh_console_flush(void) +{ + ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2); + + while (!(ctrl_inw(SCIF_SCFSR2) & 0x40)) + cpu_relax(); + + ctrl_outw((ctrl_inw(SCIF_SCFSR2) & 0xbf), SCIF_SCFSR2); +} + +static void sh_console_write(struct console *con, const char *s, unsigned count) +{ + while (count-- > 0) + sh_console_putc(*s++); + + sh_console_flush(); +} + +static int __init sh_console_setup(struct console *con, char *options) +{ + con->cflag = CREAD | HUPCL | CLOCAL | B19200 | CS8; + + return 0; +} + +static struct console sh_console = { + .name = "scifcon", + .write = sh_console_write, + .setup = sh_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +void __init enable_early_printk(void) +{ + ctrl_outb(0x2a, SCIF_SCBRR2); /* 19200bps */ + + ctrl_outw(0x04, SCIF_SCFCR2); /* Reset TFRST */ + ctrl_outw(0x10, SCIF_SCFCR2); /* TTRG0=1 */ + + ctrl_outw(0, SCIF_SCSPTR2); + ctrl_outw(0x60, SCIF_SCFSR2); + ctrl_outw(0, SCIF_SCLSR2); + ctrl_outw(0x30, SCIF_SCSCR2); + + register_console(&sh_console); +} + +void disable_early_printk(void) +{ + unregister_console(&sh_console); +} + diff --git a/arch/sh64/kernel/fpu.c b/arch/sh64/kernel/fpu.c new file mode 100644 index 000000000..175c88a5d --- /dev/null +++ b/arch/sh64/kernel/fpu.c @@ -0,0 +1,170 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/fpu.c + * + * Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli + * Copyright (C) 2002 STMicroelectronics Limited + * Author : Stuart Menefy + * + * Started from SH4 version: + * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka + * + */ + +#include +#include +#include +#include +#include + +/* + * Initially load the FPU with signalling NANS. This bit pattern + * has the property that no matter whether considered as single or as + * double precision, it still represents a signalling NAN. + */ +#define sNAN64 0xFFFFFFFFFFFFFFFFULL +#define sNAN32 0xFFFFFFFFUL + +static union sh_fpu_union init_fpuregs = { + .hard = { + .fp_regs = { [0 ... 63] = sNAN32 }, + .fpscr = FPSCR_INIT + } +}; + +inline void fpsave(struct sh_fpu_hard_struct *fpregs) +{ + asm volatile("fst.p %0, (0*8), fp0\n\t" + "fst.p %0, (1*8), fp2\n\t" + "fst.p %0, (2*8), fp4\n\t" + "fst.p %0, (3*8), fp6\n\t" + "fst.p %0, (4*8), fp8\n\t" + "fst.p %0, (5*8), fp10\n\t" + "fst.p %0, (6*8), fp12\n\t" + "fst.p %0, (7*8), fp14\n\t" + "fst.p %0, (8*8), fp16\n\t" + "fst.p %0, (9*8), fp18\n\t" + "fst.p %0, (10*8), fp20\n\t" + "fst.p %0, (11*8), fp22\n\t" + "fst.p %0, (12*8), fp24\n\t" + "fst.p %0, (13*8), fp26\n\t" + "fst.p %0, (14*8), fp28\n\t" + "fst.p %0, (15*8), fp30\n\t" + "fst.p %0, (16*8), fp32\n\t" + "fst.p %0, (17*8), fp34\n\t" + "fst.p %0, (18*8), fp36\n\t" + "fst.p %0, (19*8), fp38\n\t" + "fst.p %0, (20*8), fp40\n\t" + "fst.p %0, (21*8), fp42\n\t" + "fst.p %0, (22*8), fp44\n\t" + "fst.p %0, (23*8), fp46\n\t" + "fst.p %0, (24*8), fp48\n\t" + "fst.p %0, (25*8), fp50\n\t" + "fst.p %0, (26*8), fp52\n\t" + "fst.p %0, (27*8), fp54\n\t" + "fst.p %0, (28*8), fp56\n\t" + "fst.p %0, (29*8), fp58\n\t" + "fst.p %0, (30*8), fp60\n\t" + "fst.p %0, (31*8), fp62\n\t" + + "fgetscr fr63\n\t" + "fst.s %0, (32*8), fr63\n\t" + : /* no output */ + : "r" (fpregs) + : "memory"); +} + + +static inline void +fpload(struct sh_fpu_hard_struct *fpregs) +{ + asm volatile("fld.p %0, (0*8), fp0\n\t" + "fld.p %0, (1*8), fp2\n\t" + "fld.p %0, (2*8), fp4\n\t" + "fld.p %0, (3*8), fp6\n\t" + "fld.p %0, (4*8), fp8\n\t" + "fld.p %0, (5*8), fp10\n\t" + "fld.p %0, (6*8), fp12\n\t" + "fld.p %0, (7*8), fp14\n\t" + "fld.p %0, (8*8), fp16\n\t" + "fld.p %0, (9*8), fp18\n\t" + "fld.p %0, (10*8), fp20\n\t" + "fld.p %0, (11*8), fp22\n\t" + "fld.p %0, (12*8), fp24\n\t" + "fld.p %0, (13*8), fp26\n\t" + "fld.p %0, (14*8), fp28\n\t" + "fld.p %0, (15*8), fp30\n\t" + "fld.p %0, (16*8), fp32\n\t" + "fld.p %0, (17*8), fp34\n\t" + "fld.p %0, (18*8), fp36\n\t" + "fld.p %0, (19*8), fp38\n\t" + "fld.p %0, (20*8), fp40\n\t" + "fld.p %0, (21*8), fp42\n\t" + "fld.p %0, (22*8), fp44\n\t" + "fld.p %0, (23*8), fp46\n\t" + "fld.p %0, (24*8), fp48\n\t" + "fld.p %0, (25*8), fp50\n\t" + "fld.p %0, (26*8), fp52\n\t" + "fld.p %0, (27*8), fp54\n\t" + "fld.p %0, (28*8), fp56\n\t" + "fld.p %0, (29*8), fp58\n\t" + "fld.p %0, (30*8), fp60\n\t" + + "fld.s %0, (32*8), fr63\n\t" + "fputscr fr63\n\t" + + "fld.p %0, (31*8), fp62\n\t" + : /* no output */ + : "r" (fpregs) ); +} + +void fpinit(struct sh_fpu_hard_struct *fpregs) +{ + *fpregs = init_fpuregs.hard; +} + +asmlinkage void +do_fpu_error(unsigned long ex, struct pt_regs *regs) +{ + struct task_struct *tsk = current; + + regs->pc += 4; + + tsk->thread.trap_no = 11; + tsk->thread.error_code = 0; + force_sig(SIGFPE, tsk); +} + + +asmlinkage void +do_fpu_state_restore(unsigned long ex, struct pt_regs *regs) +{ + void die(const char *str, struct pt_regs *regs, long err); + + if (! user_mode(regs)) + die("FPU used in kernel", regs, ex); + + regs->sr &= ~SR_FD; + + if (last_task_used_math == current) + return; + + grab_fpu(); + if (last_task_used_math != NULL) { + /* Other processes fpu state, save away */ + fpsave(&last_task_used_math->thread.fpu.hard); + } + last_task_used_math = current; + if (current->used_math) { + fpload(¤t->thread.fpu.hard); + } else { + /* First time FPU user. */ + fpload(&init_fpuregs.hard); + current->used_math = 1; + } + release_fpu(); +} + diff --git a/arch/sh64/kernel/init_task.c b/arch/sh64/kernel/init_task.c new file mode 100644 index 000000000..de2d07db1 --- /dev/null +++ b/arch/sh64/kernel/init_task.c @@ -0,0 +1,46 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/init_task.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + */ +#include +#include +#include +#include +#include + +#include +#include + +static struct fs_struct init_fs = INIT_FS; +static struct files_struct init_files = INIT_FILES; +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); +struct mm_struct init_mm = INIT_MM(init_mm); + +struct pt_regs fake_swapper_regs; + +/* + * Initial thread structure. + * + * We need to make sure that this is THREAD_SIZE-byte aligned due + * to the way process stacks are handled. This is done by having a + * special "init_task" linker map entry.. + */ +union thread_union init_thread_union + __attribute__((__section__(".data.init_task"))) = + { INIT_THREAD_INFO(init_task) }; + +/* + * Initial task structure. + * + * All other task structs will be allocated on slabs in fork.c + */ +struct task_struct init_task = INIT_TASK(init_task); + diff --git a/arch/sh64/kernel/pci-dma.c b/arch/sh64/kernel/pci-dma.c new file mode 100644 index 000000000..a36c3d71a --- /dev/null +++ b/arch/sh64/kernel/pci-dma.c @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) + * Copyright (C) 2003 Paul Mundt (lethal@linux-sh.org) + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Dynamic DMA mapping support. + */ +#include +#include +#include +#include +#include + +void *consistent_alloc(struct pci_dev *hwdev, size_t size, + dma_addr_t *dma_handle) +{ + void *ret; + int gfp = GFP_ATOMIC; + void *vp; + + if (hwdev == NULL || hwdev->dma_mask != 0xffffffff) + gfp |= GFP_DMA; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + + /* now call our friend ioremap_nocache to give us an uncached area */ + vp = ioremap_nocache(virt_to_phys(ret), size); + + if (vp != NULL) { + memset(vp, 0, size); + *dma_handle = virt_to_bus(ret); + dma_cache_wback_inv((unsigned long)ret, size); + } + + return vp; +} + +void consistent_free(struct pci_dev *hwdev, size_t size, + void *vaddr, dma_addr_t dma_handle) +{ + void *alloc; + + alloc = bus_to_virt((unsigned long)dma_handle); + free_pages((unsigned long)alloc, get_order(size)); + + iounmap(vaddr); +} + diff --git a/arch/sh64/kernel/pci_sh5.h b/arch/sh64/kernel/pci_sh5.h new file mode 100644 index 000000000..8f21f5d2a --- /dev/null +++ b/arch/sh64/kernel/pci_sh5.h @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2001 David J. Mckay (david.mckay@st.com) + * + * May be copied or modified under the terms of the GNU General Public + * License. See linux/COPYING for more information. + * + * Defintions for the SH5 PCI hardware. + */ + +/* Product ID */ +#define PCISH5_PID 0x350d + +/* vendor ID */ +#define PCISH5_VID 0x1054 + +/* Configuration types */ +#define ST_TYPE0 0x00 /* Configuration cycle type 0 */ +#define ST_TYPE1 0x01 /* Configuration cycle type 1 */ + +/* VCR data */ +#define PCISH5_VCR_STATUS 0x00 +#define PCISH5_VCR_VERSION 0x08 + +/* +** ICR register offsets and bits +*/ +#define PCISH5_ICR_CR 0x100 /* PCI control register values */ +#define CR_PBAM (1<<12) +#define CR_PFCS (1<<11) +#define CR_FTO (1<<10) +#define CR_PFE (1<<9) +#define CR_TBS (1<<8) +#define CR_SPUE (1<<7) +#define CR_BMAM (1<<6) +#define CR_HOST (1<<5) +#define CR_CLKEN (1<<4) +#define CR_SOCS (1<<3) +#define CR_IOCS (1<<2) +#define CR_RSTCTL (1<<1) +#define CR_CFINT (1<<0) +#define CR_LOCK_MASK 0xa5000000 + +#define PCISH5_ICR_INT 0x114 /* Interrupt registert values */ +#define INT_MADIM (1<<2) + +#define PCISH5_ICR_LSR0 0X104 /* Local space register values */ +#define PCISH5_ICR_LSR1 0X108 /* Local space register values */ +#define PCISH5_ICR_LAR0 0x10c /* Local address register values */ +#define PCISH5_ICR_LAR1 0x110 /* Local address register values */ +#define PCISH5_ICR_INTM 0x118 /* Interrupt mask register values */ +#define PCISH5_ICR_AIR 0x11c /* Interrupt error address information register values */ +#define PCISH5_ICR_CIR 0x120 /* Interrupt error command information register values */ +#define PCISH5_ICR_AINT 0x130 /* Interrupt error arbiter interrupt register values */ +#define PCISH5_ICR_AINTM 0x134 /* Interrupt error arbiter interrupt mask register values */ +#define PCISH5_ICR_BMIR 0x138 /* Interrupt error info register of bus master values */ +#define PCISH5_ICR_PAR 0x1c0 /* Pio address register values */ +#define PCISH5_ICR_MBR 0x1c4 /* Memory space bank register values */ +#define PCISH5_ICR_IOBR 0x1c8 /* I/O space bank register values */ +#define PCISH5_ICR_PINT 0x1cc /* power management interrupt register values */ +#define PCISH5_ICR_PINTM 0x1d0 /* power management interrupt mask register values */ +#define PCISH5_ICR_MBMR 0x1d8 /* memory space bank mask register values */ +#define PCISH5_ICR_IOBMR 0x1dc /* I/O space bank mask register values */ +#define PCISH5_ICR_CSCR0 0x210 /* PCI cache snoop control register 0 */ +#define PCISH5_ICR_CSCR1 0x214 /* PCI cache snoop control register 1 */ +#define PCISH5_ICR_PDR 0x220 /* Pio data register values */ + +/* These are configs space registers */ +#define PCISH5_ICR_CSR_VID 0x000 /* Vendor id */ +#define PCISH5_ICR_CSR_DID 0x002 /* Device id */ +#define PCISH5_ICR_CSR_CMD 0x004 /* Command register */ +#define PCISH5_ICR_CSR_STATUS 0x006 /* Stautus */ +#define PCISH5_ICR_CSR_IBAR0 0x010 /* I/O base address register */ +#define PCISH5_ICR_CSR_MBAR0 0x014 /* First Memory base address register */ +#define PCISH5_ICR_CSR_MBAR1 0x018 /* Second Memory base address register */ + + + +/* Base address of registers */ +#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000) +#define SH5PCI_IO_BASE (PHYS_PCI_BLOCK + 0x00800000) +/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG) */ + +/* Register selection macro */ +#define PCISH5_ICR_REG(x) ( pcicr_virt + (PCISH5_ICR_##x)) +/* #define PCISH5_VCR_REG(x) ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */ + +/* Write I/O functions */ +#define SH5PCI_WRITE(reg,val) ctrl_outl((u32)(val),PCISH5_ICR_REG(reg)) +#define SH5PCI_WRITE_SHORT(reg,val) ctrl_outw((u16)(val),PCISH5_ICR_REG(reg)) +#define SH5PCI_WRITE_BYTE(reg,val) ctrl_outb((u8)(val),PCISH5_ICR_REG(reg)) + +/* Read I/O functions */ +#define SH5PCI_READ(reg) ctrl_inl(PCISH5_ICR_REG(reg)) +#define SH5PCI_READ_SHORT(reg) ctrl_inw(PCISH5_ICR_REG(reg)) +#define SH5PCI_READ_BYTE(reg) ctrl_inb(PCISH5_ICR_REG(reg)) + +/* Set PCI config bits */ +#define SET_CONFIG_BITS(bus,devfn,where) ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000) + +/* Set PCI command register */ +#define CONFIG_CMD(bus, devfn, where) SET_CONFIG_BITS(bus->number,devfn,where) + +/* Size converters */ +#define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18) +#define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18) + + diff --git a/arch/sh64/kernel/semaphore.c b/arch/sh64/kernel/semaphore.c new file mode 100644 index 000000000..72c165334 --- /dev/null +++ b/arch/sh64/kernel/semaphore.c @@ -0,0 +1,140 @@ +/* + * Just taken from alpha implementation. + * This can't work well, perhaps. + */ +/* + * Generic semaphore code. Buyer beware. Do your own + * specific changes in + */ + +#include +#include +#include +#include +#include +#include +#include + +spinlock_t semaphore_wake_lock; + +/* + * Semaphores are implemented using a two-way counter: + * The "count" variable is decremented for each process + * that tries to sleep, while the "waking" variable is + * incremented when the "up()" code goes to wake up waiting + * processes. + * + * Notably, the inline "up()" and "down()" functions can + * efficiently test if they need to do any extra work (up + * needs to do something only if count was negative before + * the increment operation. + * + * waking_non_zero() (from asm/semaphore.h) must execute + * atomically. + * + * When __up() is called, the count was negative before + * incrementing it, and we need to wake up somebody. + * + * This routine adds one to the count of processes that need to + * wake up and exit. ALL waiting processes actually wake up but + * only the one that gets to the "waking" field first will gate + * through and acquire the semaphore. The others will go back + * to sleep. + * + * Note that these functions are only called when there is + * contention on the lock, and as such all this is the + * "non-critical" part of the whole semaphore business. The + * critical part is the inline stuff in + * where we want to avoid any extra jumps and calls. + */ +void __up(struct semaphore *sem) +{ + wake_one_more(sem); + wake_up(&sem->wait); +} + +/* + * Perform the "down" function. Return zero for semaphore acquired, + * return negative for signalled out of the function. + * + * If called from __down, the return is ignored and the wait loop is + * not interruptible. This means that a task waiting on a semaphore + * using "down()" cannot be killed until someone does an "up()" on + * the semaphore. + * + * If called from __down_interruptible, the return value gets checked + * upon return. If the return value is negative then the task continues + * with the negative value in the return register (it can be tested by + * the caller). + * + * Either form may be used in conjunction with "up()". + * + */ + +#define DOWN_VAR \ + struct task_struct *tsk = current; \ + wait_queue_t wait; \ + init_waitqueue_entry(&wait, tsk); + +#define DOWN_HEAD(task_state) \ + \ + \ + tsk->state = (task_state); \ + add_wait_queue(&sem->wait, &wait); \ + \ + /* \ + * Ok, we're set up. sem->count is known to be less than zero \ + * so we must wait. \ + * \ + * We can let go the lock for purposes of waiting. \ + * We re-acquire it after awaking so as to protect \ + * all semaphore operations. \ + * \ + * If "up()" is called before we call waking_non_zero() then \ + * we will catch it right away. If it is called later then \ + * we will have to go through a wakeup cycle to catch it. \ + * \ + * Multiple waiters contend for the semaphore lock to see \ + * who gets to gate through and who has to wait some more. \ + */ \ + for (;;) { + +#define DOWN_TAIL(task_state) \ + tsk->state = (task_state); \ + } \ + tsk->state = TASK_RUNNING; \ + remove_wait_queue(&sem->wait, &wait); + +void __sched __down(struct semaphore * sem) +{ + DOWN_VAR + DOWN_HEAD(TASK_UNINTERRUPTIBLE) + if (waking_non_zero(sem)) + break; + schedule(); + DOWN_TAIL(TASK_UNINTERRUPTIBLE) +} + +int __sched __down_interruptible(struct semaphore * sem) +{ + int ret = 0; + DOWN_VAR + DOWN_HEAD(TASK_INTERRUPTIBLE) + + ret = waking_non_zero_interruptible(sem, tsk); + if (ret) + { + if (ret == 1) + /* ret != 0 only if we get interrupted -arca */ + ret = 0; + break; + } + schedule(); + DOWN_TAIL(TASK_INTERRUPTIBLE) + return ret; +} + +int __down_trylock(struct semaphore * sem) +{ + return waking_non_zero_trylock(sem); +} diff --git a/arch/sh64/kernel/switchto.S b/arch/sh64/kernel/switchto.S new file mode 100644 index 000000000..24ef14c83 --- /dev/null +++ b/arch/sh64/kernel/switchto.S @@ -0,0 +1,199 @@ +/* + * arch/sh64/kernel/switchto.S + * + * sh64 context switch + * + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. +*/ + + .section .text..SHmedia32,"ax" + .little + + .balign 32 + + .type sh64_switch_to,@function + .global sh64_switch_to + .global __sh64_switch_to_end +sh64_switch_to: + +/* Incoming args + r2 - prev + r3 - &prev->thread + r4 - next + r5 - &next->thread + + Outgoing results + r2 - last (=prev) + + Want to create a full (struct pt_regs) on the stack to allow backtracing + functions to work. However, we only need to populate the callee-save + register slots in this structure; since we're a function our ancestors must + have themselves preserved all caller saved state in the stack. This saves + some wasted effort since we won't need to look at the values. + + In particular, all caller-save registers are immediately available for + scratch use. + +*/ + +#define FRAME_SIZE (76*8 + 8) + + movi FRAME_SIZE, r0 + sub.l r15, r0, r15 + ! Do normal-style register save to support backtrace + + st.l r15, 0, r18 ! save link reg + st.l r15, 4, r14 ! save fp + add.l r15, r63, r14 ! setup frame pointer + + ! hopefully this looks normal to the backtrace now. + + addi.l r15, 8, r1 ! base of pt_regs + addi.l r1, 24, r0 ! base of pt_regs.regs + addi.l r0, (63*8), r8 ! base of pt_regs.trregs + + /* Note : to be fixed? + struct pt_regs is really designed for holding the state on entry + to an exception, i.e. pc,sr,regs etc. However, for the context + switch state, some of this is not required. But the unwinder takes + struct pt_regs * as an arg so we have to build this structure + to allow unwinding switched tasks in show_state() */ + + st.q r0, ( 9*8), r9 + st.q r0, (10*8), r10 + st.q r0, (11*8), r11 + st.q r0, (12*8), r12 + st.q r0, (13*8), r13 + st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at + ! the point where the process is left in suspended animation, i.e. current + ! fp here, not the saved one. + st.q r0, (16*8), r16 + + st.q r0, (24*8), r24 + st.q r0, (25*8), r25 + st.q r0, (26*8), r26 + st.q r0, (27*8), r27 + st.q r0, (28*8), r28 + st.q r0, (29*8), r29 + st.q r0, (30*8), r30 + st.q r0, (31*8), r31 + st.q r0, (32*8), r32 + st.q r0, (33*8), r33 + st.q r0, (34*8), r34 + st.q r0, (35*8), r35 + + st.q r0, (44*8), r44 + st.q r0, (45*8), r45 + st.q r0, (46*8), r46 + st.q r0, (47*8), r47 + st.q r0, (48*8), r48 + st.q r0, (49*8), r49 + st.q r0, (50*8), r50 + st.q r0, (51*8), r51 + st.q r0, (52*8), r52 + st.q r0, (53*8), r53 + st.q r0, (54*8), r54 + st.q r0, (55*8), r55 + st.q r0, (56*8), r56 + st.q r0, (57*8), r57 + st.q r0, (58*8), r58 + st.q r0, (59*8), r59 + + ! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency) + ! Use a local label to avoid creating a symbol that will confuse the ! + ! backtrace + pta .Lsave_pc, tr0 + + gettr tr5, r45 + gettr tr6, r46 + gettr tr7, r47 + st.q r8, (5*8), r45 + st.q r8, (6*8), r46 + st.q r8, (7*8), r47 + + ! Now switch context + gettr tr0, r9 + st.l r3, 0, r15 ! prev->thread.sp + st.l r3, 8, r1 ! prev->thread.kregs + st.l r3, 4, r9 ! prev->thread.pc + st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc + + ! Load PC for next task (init value or save_pc later) + ld.l r5, 4, r18 ! next->thread.pc + ! Switch stacks + ld.l r5, 0, r15 ! next->thread.sp + ptabs r18, tr0 + + ! Update current + ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct) + putcon r9, kcr0 ! current = next->thread_info + + ! go to save_pc for a reschedule, or the initial thread.pc for a new process + blink tr0, r63 + + ! Restore (when we come back to a previously saved task) +.Lsave_pc: + addi.l r15, 32, r0 ! r0 = next's regs + addi.l r0, (63*8), r8 ! r8 = next's tr_regs + + ld.q r8, (5*8), r45 + ld.q r8, (6*8), r46 + ld.q r8, (7*8), r47 + ptabs r45, tr5 + ptabs r46, tr6 + ptabs r47, tr7 + + ld.q r0, ( 9*8), r9 + ld.q r0, (10*8), r10 + ld.q r0, (11*8), r11 + ld.q r0, (12*8), r12 + ld.q r0, (13*8), r13 + ld.q r0, (14*8), r14 + ld.q r0, (16*8), r16 + + ld.q r0, (24*8), r24 + ld.q r0, (25*8), r25 + ld.q r0, (26*8), r26 + ld.q r0, (27*8), r27 + ld.q r0, (28*8), r28 + ld.q r0, (29*8), r29 + ld.q r0, (30*8), r30 + ld.q r0, (31*8), r31 + ld.q r0, (32*8), r32 + ld.q r0, (33*8), r33 + ld.q r0, (34*8), r34 + ld.q r0, (35*8), r35 + + ld.q r0, (44*8), r44 + ld.q r0, (45*8), r45 + ld.q r0, (46*8), r46 + ld.q r0, (47*8), r47 + ld.q r0, (48*8), r48 + ld.q r0, (49*8), r49 + ld.q r0, (50*8), r50 + ld.q r0, (51*8), r51 + ld.q r0, (52*8), r52 + ld.q r0, (53*8), r53 + ld.q r0, (54*8), r54 + ld.q r0, (55*8), r55 + ld.q r0, (56*8), r56 + ld.q r0, (57*8), r57 + ld.q r0, (58*8), r58 + ld.q r0, (59*8), r59 + + ! epilogue + ld.l r15, 0, r18 + ld.l r15, 4, r14 + ori r4, 0, r2 ! last = prev + ptabs r18, tr0 + movi FRAME_SIZE, r0 + add r15, r0, r15 + blink tr0, r63 +__sh64_switch_to_end: +.LFE1: + .size sh64_switch_to,.LFE1-sh64_switch_to + diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S new file mode 100644 index 000000000..819e335af --- /dev/null +++ b/arch/sh64/kernel/syscalls.S @@ -0,0 +1,340 @@ +/* + * arch/sh64/kernel/syscalls.S + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2004 Paul Mundt + * Copyright (C) 2003, 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include + + .section .data, "aw" + .balign 32 + +/* + * System calls jump table + */ + .globl sys_call_table +sys_call_table: + .long sys_ni_syscall /* 0 - old "setup()" system call */ + .long sys_exit + .long sys_fork + .long sys_read + .long sys_write + .long sys_open /* 5 */ + .long sys_close + .long sys_waitpid + .long sys_creat + .long sys_link + .long sys_unlink /* 10 */ + .long sys_execve + .long sys_chdir + .long sys_time + .long sys_mknod + .long sys_chmod /* 15 */ + .long sys_lchown16 + .long sys_ni_syscall /* old break syscall holder */ + .long sys_stat + .long sys_lseek + .long sys_getpid /* 20 */ + .long sys_mount + .long sys_oldumount + .long sys_setuid16 + .long sys_getuid16 + .long sys_stime /* 25 */ + .long sys_ptrace + .long sys_alarm + .long sys_fstat + .long sys_pause + .long sys_utime /* 30 */ + .long sys_ni_syscall /* old stty syscall holder */ + .long sys_ni_syscall /* old gtty syscall holder */ + .long sys_access + .long sys_nice + .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */ + .long sys_sync + .long sys_kill + .long sys_rename + .long sys_mkdir + .long sys_rmdir /* 40 */ + .long sys_dup + .long sys_pipe + .long sys_times + .long sys_ni_syscall /* old prof syscall holder */ + .long sys_brk /* 45 */ + .long sys_setgid16 + .long sys_getgid16 + .long sys_signal + .long sys_geteuid16 + .long sys_getegid16 /* 50 */ + .long sys_acct + .long sys_umount /* recycled never used phys( */ + .long sys_ni_syscall /* old lock syscall holder */ + .long sys_ioctl + .long sys_fcntl /* 55 */ + .long sys_ni_syscall /* old mpx syscall holder */ + .long sys_setpgid + .long sys_ni_syscall /* old ulimit syscall holder */ + .long sys_ni_syscall /* sys_olduname */ + .long sys_umask /* 60 */ + .long sys_chroot + .long sys_ustat + .long sys_dup2 + .long sys_getppid + .long sys_getpgrp /* 65 */ + .long sys_setsid + .long sys_sigaction + .long sys_sgetmask + .long sys_ssetmask + .long sys_setreuid16 /* 70 */ + .long sys_setregid16 + .long sys_sigsuspend + .long sys_sigpending + .long sys_sethostname + .long sys_setrlimit /* 75 */ + .long sys_old_getrlimit + .long sys_getrusage + .long sys_gettimeofday + .long sys_settimeofday + .long sys_getgroups16 /* 80 */ + .long sys_setgroups16 + .long sys_ni_syscall /* sys_oldselect */ + .long sys_symlink + .long sys_lstat + .long sys_readlink /* 85 */ + .long sys_uselib + .long sys_swapon + .long sys_reboot + .long old_readdir + .long old_mmap /* 90 */ + .long sys_munmap + .long sys_truncate + .long sys_ftruncate + .long sys_fchmod + .long sys_fchown16 /* 95 */ + .long sys_getpriority + .long sys_setpriority + .long sys_ni_syscall /* old profil syscall holder */ + .long sys_statfs + .long sys_fstatfs /* 100 */ + .long sys_ni_syscall /* ioperm */ + .long sys_socketcall /* Obsolete implementation of socket syscall */ + .long sys_syslog + .long sys_setitimer + .long sys_getitimer /* 105 */ + .long sys_newstat + .long sys_newlstat + .long sys_newfstat + .long sys_uname + .long sys_ni_syscall /* 110 */ /* iopl */ + .long sys_vhangup + .long sys_ni_syscall /* idle */ + .long sys_ni_syscall /* vm86old */ + .long sys_wait4 + .long sys_swapoff /* 115 */ + .long sys_sysinfo + .long sys_ipc /* Obsolete ipc syscall implementation */ + .long sys_fsync + .long sys_sigreturn + .long sys_clone /* 120 */ + .long sys_setdomainname + .long sys_newuname + .long sys_ni_syscall /* sys_modify_ldt */ + .long sys_adjtimex + .long sys_mprotect /* 125 */ + .long sys_sigprocmask + .long sys_ni_syscall /* old "create_module" */ + .long sys_init_module + .long sys_delete_module + .long sys_ni_syscall /* 130: old "get_kernel_syms" */ + .long sys_quotactl + .long sys_getpgid + .long sys_fchdir + .long sys_bdflush + .long sys_sysfs /* 135 */ + .long sys_personality + .long sys_ni_syscall /* for afs_syscall */ + .long sys_setfsuid16 + .long sys_setfsgid16 + .long sys_llseek /* 140 */ + .long sys_getdents + .long sys_select + .long sys_flock + .long sys_msync + .long sys_readv /* 145 */ + .long sys_writev + .long sys_getsid + .long sys_fdatasync + .long sys_sysctl + .long sys_mlock /* 150 */ + .long sys_munlock + .long sys_mlockall + .long sys_munlockall + .long sys_sched_setparam + .long sys_sched_getparam /* 155 */ + .long sys_sched_setscheduler + .long sys_sched_getscheduler + .long sys_sched_yield + .long sys_sched_get_priority_max + .long sys_sched_get_priority_min /* 160 */ + .long sys_sched_rr_get_interval + .long sys_nanosleep + .long sys_mremap + .long sys_setresuid16 + .long sys_getresuid16 /* 165 */ + .long sys_ni_syscall /* vm86 */ + .long sys_ni_syscall /* old "query_module" */ + .long sys_poll + .long sys_nfsservctl + .long sys_setresgid16 /* 170 */ + .long sys_getresgid16 + .long sys_prctl + .long sys_rt_sigreturn + .long sys_rt_sigaction + .long sys_rt_sigprocmask /* 175 */ + .long sys_rt_sigpending + .long sys_rt_sigtimedwait + .long sys_rt_sigqueueinfo + .long sys_rt_sigsuspend + .long sys_pread64 /* 180 */ + .long sys_pwrite64 + .long sys_chown16 + .long sys_getcwd + .long sys_capget + .long sys_capset /* 185 */ + .long sys_sigaltstack + .long sys_sendfile + .long sys_ni_syscall /* streams1 */ + .long sys_ni_syscall /* streams2 */ + .long sys_vfork /* 190 */ + .long sys_getrlimit + .long sys_mmap2 + .long sys_truncate64 + .long sys_ftruncate64 + .long sys_stat64 /* 195 */ + .long sys_lstat64 + .long sys_fstat64 + .long sys_lchown + .long sys_getuid + .long sys_getgid /* 200 */ + .long sys_geteuid + .long sys_getegid + .long sys_setreuid + .long sys_setregid + .long sys_getgroups /* 205 */ + .long sys_setgroups + .long sys_fchown + .long sys_setresuid + .long sys_getresuid + .long sys_setresgid /* 210 */ + .long sys_getresgid + .long sys_chown + .long sys_setuid + .long sys_setgid + .long sys_setfsuid /* 215 */ + .long sys_setfsgid + .long sys_pivot_root + .long sys_mincore + .long sys_madvise + /* Broken-out socket family (maintain backwards compatibility in syscall + numbering with 2.4) */ + .long sys_socket /* 220 */ + .long sys_bind + .long sys_connect + .long sys_listen + .long sys_accept + .long sys_getsockname /* 225 */ + .long sys_getpeername + .long sys_socketpair + .long sys_send + .long sys_sendto + .long sys_recv /* 230*/ + .long sys_recvfrom + .long sys_shutdown + .long sys_setsockopt + .long sys_getsockopt + .long sys_sendmsg /* 235 */ + .long sys_recvmsg + /* Broken-out IPC family (maintain backwards compatibility in syscall + numbering with 2.4) */ + .long sys_semop + .long sys_semget + .long sys_semctl + .long sys_msgsnd /* 240 */ + .long sys_msgrcv + .long sys_msgget + .long sys_msgctl + .long sys_ni_syscall /* sys_shmatcall */ + .long sys_shmdt /* 245 */ + .long sys_shmget + .long sys_shmctl + /* Rest of syscalls listed in 2.4 i386 unistd.h */ + .long sys_getdents64 + .long sys_fcntl64 + .long sys_ni_syscall /* 250 reserved for TUX */ + .long sys_ni_syscall /* Reserved for Security */ + .long sys_gettid + .long sys_readahead + .long sys_setxattr + .long sys_lsetxattr /* 255 */ + .long sys_fsetxattr + .long sys_getxattr + .long sys_lgetxattr + .long sys_fgetxattr + .long sys_listxattr /* 260 */ + .long sys_llistxattr + .long sys_flistxattr + .long sys_removexattr + .long sys_lremovexattr + .long sys_fremovexattr /* 265 */ + .long sys_tkill + .long sys_sendfile64 + .long sys_futex + .long sys_sched_setaffinity + .long sys_sched_getaffinity /* 270 */ + .long sys_ni_syscall + .long sys_ni_syscall + .long sys_io_setup + .long sys_io_destroy + .long sys_io_getevents /* 275 */ + .long sys_io_submit + .long sys_io_cancel + .long sys_fadvise64 + .long sys_ni_syscall + .long sys_exit_group /* 280 */ + /* Rest of new 2.6 syscalls */ + .long sys_lookup_dcookie + .long sys_epoll_create + .long sys_epoll_ctl + .long sys_epoll_wait + .long sys_remap_file_pages /* 285 */ + .long sys_set_tid_address + .long sys_timer_create + .long sys_timer_settime + .long sys_timer_gettime + .long sys_timer_getoverrun /* 290 */ + .long sys_timer_delete + .long sys_clock_settime + .long sys_clock_gettime + .long sys_clock_getres + .long sys_clock_nanosleep /* 295 */ + .long sys_statfs64 + .long sys_fstatfs64 + .long sys_tgkill + .long sys_utimes + .long sys_fadvise64_64 /* 300 */ + .long sys_ni_syscall /* Reserved for vserver */ + .long sys_ni_syscall /* Reserved for mbind */ + .long sys_ni_syscall /* get_mempolicy */ + .long sys_ni_syscall /* set_mempolicy */ + .long sys_mq_open /* 305 */ + .long sys_mq_unlink + .long sys_mq_timedsend + .long sys_mq_timedreceive + .long sys_mq_notify + .long sys_mq_getsetattr /* 310 */ + diff --git a/arch/sh64/kernel/traps.c b/arch/sh64/kernel/traps.c new file mode 100644 index 000000000..b2b2bde30 --- /dev/null +++ b/arch/sh64/kernel/traps.c @@ -0,0 +1,958 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/kernel/traps.c + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003, 2004 Paul Mundt + * Copyright (C) 2003, 2004 Richard Curnow + * + */ + +/* + * 'Traps.c' handles hardware traps and faults after we have saved some + * state in 'entry.S'. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#undef DEBUG_EXCEPTION +#ifdef DEBUG_EXCEPTION +/* implemented in ../lib/dbg.c */ +extern void show_excp_regs(char *fname, int trapnr, int signr, + struct pt_regs *regs); +#else +#define show_excp_regs(a, b, c, d) +#endif + +static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, + unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk); + +#define DO_ERROR(trapnr, signr, str, name, tsk) \ +asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \ +{ \ + do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \ +} + +spinlock_t die_lock; + +void die(const char * str, struct pt_regs * regs, long err) +{ + console_verbose(); + spin_lock_irq(&die_lock); + printk("%s: %lx\n", str, (err & 0xffffff)); + show_regs(regs); + spin_unlock_irq(&die_lock); + do_exit(SIGSEGV); +} + +static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) +{ + if (!user_mode(regs)) + die(str, regs, err); +} + +static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err) +{ + if (!user_mode(regs)) { + const struct exception_table_entry *fixup; + fixup = search_exception_tables(regs->pc); + if (fixup) { + regs->pc = fixup->fixup; + return; + } + die(str, regs, err); + } +} + +DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current) +DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current) + + +/* Implement misaligned load/store handling for kernel (and optionally for user + mode too). Limitation : only SHmedia mode code is handled - there is no + handling at all for misaligned accesses occurring in SHcompact code yet. */ + +static int misaligned_fixup(struct pt_regs *regs); + +asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs) +{ + if (misaligned_fixup(regs) < 0) { + do_unhandled_exception(7, SIGSEGV, "address error(load)", + "do_address_error_load", + error_code, regs, current); + } + return; +} + +asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs) +{ + if (misaligned_fixup(regs) < 0) { + do_unhandled_exception(8, SIGSEGV, "address error(store)", + "do_address_error_store", + error_code, regs, current); + } + return; +} + +#if defined(CONFIG_SH64_ID2815_WORKAROUND) + +#define OPCODE_INVALID 0 +#define OPCODE_USER_VALID 1 +#define OPCODE_PRIV_VALID 2 + +/* getcon/putcon - requires checking which control register is referenced. */ +#define OPCODE_CTRL_REG 3 + +/* Table of valid opcodes for SHmedia mode. + Form a 10-bit value by concatenating the major/minor opcodes i.e. + opcode[31:26,20:16]. The 6 MSBs of this value index into the following + array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to + LSBs==4'b0000 etc). */ +static unsigned long shmedia_opcode_table[64] = { + 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015, + 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000, + 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000, + 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000, + 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, + 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, + 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, + 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000 +}; + +void do_reserved_inst(unsigned long error_code, struct pt_regs *regs) +{ + /* Workaround SH5-101 cut2 silicon defect #2815 : + in some situations, inter-mode branches from SHcompact -> SHmedia + which should take ITLBMISS or EXECPROT exceptions at the target + falsely take RESINST at the target instead. */ + + unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */ + unsigned long pc, aligned_pc; + int get_user_error; + int trapnr = 12; + int signr = SIGILL; + char *exception_name = "reserved_instruction"; + + pc = regs->pc; + if ((pc & 3) == 1) { + /* SHmedia : check for defect. This requires executable vmas + to be readable too. */ + aligned_pc = pc & ~3; + if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { + get_user_error = -EFAULT; + } else { + get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); + } + if (get_user_error >= 0) { + unsigned long index, shift; + unsigned long major, minor, combined; + unsigned long reserved_field; + reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */ + major = (opcode >> 26) & 0x3f; + minor = (opcode >> 16) & 0xf; + combined = (major << 4) | minor; + index = major; + shift = minor << 1; + if (reserved_field == 0) { + int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3; + switch (opcode_state) { + case OPCODE_INVALID: + /* Trap. */ + break; + case OPCODE_USER_VALID: + /* Restart the instruction : the branch to the instruction will now be from an RTE + not from SHcompact so the silicon defect won't be triggered. */ + return; + case OPCODE_PRIV_VALID: + if (!user_mode(regs)) { + /* Should only ever get here if a module has + SHcompact code inside it. If so, the same fix up is needed. */ + return; /* same reason */ + } + /* Otherwise, user mode trying to execute a privileged instruction - + fall through to trap. */ + break; + case OPCODE_CTRL_REG: + /* If in privileged mode, return as above. */ + if (!user_mode(regs)) return; + /* In user mode ... */ + if (combined == 0x9f) { /* GETCON */ + unsigned long regno = (opcode >> 20) & 0x3f; + if (regno >= 62) { + return; + } + /* Otherwise, reserved or privileged control register, => trap */ + } else if (combined == 0x1bf) { /* PUTCON */ + unsigned long regno = (opcode >> 4) & 0x3f; + if (regno >= 62) { + return; + } + /* Otherwise, reserved or privileged control register, => trap */ + } else { + /* Trap */ + } + break; + default: + /* Fall through to trap. */ + break; + } + } + /* fall through to normal resinst processing */ + } else { + /* Error trying to read opcode. This typically means a + real fault, not a RESINST any more. So change the + codes. */ + trapnr = 87; + exception_name = "address error (exec)"; + signr = SIGSEGV; + } + } + + do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current); +} + +#else /* CONFIG_SH64_ID2815_WORKAROUND */ + +/* If the workaround isn't needed, this is just a straightforward reserved + instruction */ +DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current) + +#endif /* CONFIG_SH64_ID2815_WORKAROUND */ + + +#include + +/* Called with interrupts disabled */ +asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs) +{ + PLS(); + show_excp_regs(__FUNCTION__, -1, -1, regs); + die_if_kernel("exception", regs, ex); +} + +int do_unknown_trapa(unsigned long scId, struct pt_regs *regs) +{ + /* Syscall debug */ + printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId); + + die_if_kernel("unknown trapa", regs, scId); + + return -ENOSYS; +} + +void show_stack(struct task_struct *tsk, unsigned long *sp) +{ +#ifdef CONFIG_KALLSYMS + extern void sh64_unwind(struct pt_regs *regs); + struct pt_regs *regs; + + regs = tsk ? tsk->thread.kregs : NULL; + + sh64_unwind(regs); +#else + printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n"); +#endif +} + +void show_task(unsigned long *sp) +{ + show_stack(NULL, sp); +} + +void dump_stack(void) +{ + show_task(NULL); +} + +static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, + unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk) +{ + show_excp_regs(fn_name, trapnr, signr, regs); + tsk->thread.error_code = error_code; + tsk->thread.trap_no = trapnr; + + if (user_mode(regs)) + force_sig(signr, tsk); + + die_if_no_fixup(str, regs, error_code); +} + +static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode) +{ + int get_user_error; + unsigned long aligned_pc; + unsigned long opcode; + + if ((pc & 3) == 1) { + /* SHmedia */ + aligned_pc = pc & ~3; + if (from_user_mode) { + if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { + get_user_error = -EFAULT; + } else { + get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); + *result_opcode = opcode; + } + return get_user_error; + } else { + /* If the fault was in the kernel, we can either read + * this directly, or if not, we fault. + */ + *result_opcode = *(unsigned long *) aligned_pc; + return 0; + } + } else if ((pc & 1) == 0) { + /* SHcompact */ + /* TODO : provide handling for this. We don't really support + user-mode SHcompact yet, and for a kernel fault, this would + have to come from a module built for SHcompact. */ + return -EFAULT; + } else { + /* misaligned */ + return -EFAULT; + } +} + +static int address_is_sign_extended(__u64 a) +{ + __u64 b; +#if (NEFF == 32) + b = (__u64)(__s64)(__s32)(a & 0xffffffffUL); + return (b == a) ? 1 : 0; +#else +#error "Sign extend check only works for NEFF==32" +#endif +} + +static int generate_and_check_address(struct pt_regs *regs, + __u32 opcode, + int displacement_not_indexed, + int width_shift, + __u64 *address) +{ + /* return -1 for fault, 0 for OK */ + + __u64 base_address, addr; + int basereg; + + basereg = (opcode >> 20) & 0x3f; + base_address = regs->regs[basereg]; + if (displacement_not_indexed) { + __s64 displacement; + displacement = (opcode >> 10) & 0x3ff; + displacement = ((displacement << 54) >> 54); /* sign extend */ + addr = (__u64)((__s64)base_address + (displacement << width_shift)); + } else { + __u64 offset; + int offsetreg; + offsetreg = (opcode >> 10) & 0x3f; + offset = regs->regs[offsetreg]; + addr = base_address + offset; + } + + /* Check sign extended */ + if (!address_is_sign_extended(addr)) { + return -1; + } + +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + /* Check accessible. For misaligned access in the kernel, assume the + address is always accessible (and if not, just fault when the + load/store gets done.) */ + if (user_mode(regs)) { + if (addr >= TASK_SIZE) { + return -1; + } + /* Do access_ok check later - it depends on whether it's a load or a store. */ + } +#endif + + *address = addr; + return 0; +} + +/* Default value as for sh */ +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) +static int user_mode_unaligned_fixup_count = 10; +static int user_mode_unaligned_fixup_enable = 1; +#endif + +static int kernel_mode_unaligned_fixup_count = 32; + +static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) +{ + unsigned short x; + unsigned char *p, *q; + p = (unsigned char *) (int) address; + q = (unsigned char *) &x; + q[0] = p[0]; + q[1] = p[1]; + + if (do_sign_extend) { + *result = (__u64)(__s64) *(short *) &x; + } else { + *result = (__u64) x; + } +} + +static void misaligned_kernel_word_store(__u64 address, __u64 value) +{ + unsigned short x; + unsigned char *p, *q; + p = (unsigned char *) (int) address; + q = (unsigned char *) &x; + + x = (__u16) value; + p[0] = q[0]; + p[1] = q[1]; +} + +static int misaligned_load(struct pt_regs *regs, + __u32 opcode, + int displacement_not_indexed, + int width_shift, + int do_sign_extend) +{ + /* Return -1 for a fault, 0 for OK */ + int error; + int destreg; + __u64 address; + + error = generate_and_check_address(regs, opcode, + displacement_not_indexed, width_shift, &address); + if (error < 0) { + return error; + } + + destreg = (opcode >> 4) & 0x3f; +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + if (user_mode(regs)) { + __u64 buffer; + + if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL< 0) { + return -1; /* fault */ + } + switch (width_shift) { + case 1: + if (do_sign_extend) { + regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer; + } else { + regs->regs[destreg] = (__u64) *(__u16 *) &buffer; + } + break; + case 2: + regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer; + break; + case 3: + regs->regs[destreg] = buffer; + break; + default: + printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + } else +#endif + { + /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ + __u64 lo, hi; + + switch (width_shift) { + case 1: + misaligned_kernel_word_load(address, do_sign_extend, ®s->regs[destreg]); + break; + case 2: + asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address)); + asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address)); + regs->regs[destreg] = lo | hi; + break; + case 3: + asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address)); + asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address)); + regs->regs[destreg] = lo | hi; + break; + + default: + printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + } + + return 0; + +} + +static int misaligned_store(struct pt_regs *regs, + __u32 opcode, + int displacement_not_indexed, + int width_shift) +{ + /* Return -1 for a fault, 0 for OK */ + int error; + int srcreg; + __u64 address; + + error = generate_and_check_address(regs, opcode, + displacement_not_indexed, width_shift, &address); + if (error < 0) { + return error; + } + + srcreg = (opcode >> 4) & 0x3f; +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + if (user_mode(regs)) { + __u64 buffer; + + if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<regs[srcreg]; + break; + case 2: + *(__u32 *) &buffer = (__u32) regs->regs[srcreg]; + break; + case 3: + buffer = regs->regs[srcreg]; + break; + default: + printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + + if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { + return -1; /* fault */ + } + } else +#endif + { + /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ + __u64 val = regs->regs[srcreg]; + + switch (width_shift) { + case 1: + misaligned_kernel_word_store(address, val); + break; + case 2: + asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address)); + asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address)); + break; + case 3: + asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address)); + asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address)); + break; + + default: + printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + } + + return 0; + +} + +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) +/* Never need to fix up misaligned FPU accesses within the kernel since that's a real + error. */ +static int misaligned_fpu_load(struct pt_regs *regs, + __u32 opcode, + int displacement_not_indexed, + int width_shift, + int do_paired_load) +{ + /* Return -1 for a fault, 0 for OK */ + int error; + int destreg; + __u64 address; + + error = generate_and_check_address(regs, opcode, + displacement_not_indexed, width_shift, &address); + if (error < 0) { + return error; + } + + destreg = (opcode >> 4) & 0x3f; + if (user_mode(regs)) { + __u64 buffer; + __u32 buflo, bufhi; + + if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL< 0) { + return -1; /* fault */ + } + /* 'current' may be the current owner of the FPU state, so + context switch the registers into memory so they can be + indexed by register number. */ + if (last_task_used_math == current) { + grab_fpu(); + fpsave(¤t->thread.fpu.hard); + release_fpu(); + last_task_used_math = NULL; + regs->sr |= SR_FD; + } + + buflo = *(__u32*) &buffer; + bufhi = *(1 + (__u32*) &buffer); + + switch (width_shift) { + case 2: + current->thread.fpu.hard.fp_regs[destreg] = buflo; + break; + case 3: + if (do_paired_load) { + current->thread.fpu.hard.fp_regs[destreg] = buflo; + current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; + } else { +#if defined(CONFIG_LITTLE_ENDIAN) + current->thread.fpu.hard.fp_regs[destreg] = bufhi; + current->thread.fpu.hard.fp_regs[destreg+1] = buflo; +#else + current->thread.fpu.hard.fp_regs[destreg] = buflo; + current->thread.fpu.hard.fp_regs[destreg+1] = bufhi; +#endif + } + break; + default: + printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + return 0; + } else { + die ("Misaligned FPU load inside kernel", regs, 0); + return -1; + } + + +} + +static int misaligned_fpu_store(struct pt_regs *regs, + __u32 opcode, + int displacement_not_indexed, + int width_shift, + int do_paired_load) +{ + /* Return -1 for a fault, 0 for OK */ + int error; + int srcreg; + __u64 address; + + error = generate_and_check_address(regs, opcode, + displacement_not_indexed, width_shift, &address); + if (error < 0) { + return error; + } + + srcreg = (opcode >> 4) & 0x3f; + if (user_mode(regs)) { + __u64 buffer; + /* Initialise these to NaNs. */ + __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; + + if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<thread.fpu.hard); + release_fpu(); + last_task_used_math = NULL; + regs->sr |= SR_FD; + } + + switch (width_shift) { + case 2: + buflo = current->thread.fpu.hard.fp_regs[srcreg]; + break; + case 3: + if (do_paired_load) { + buflo = current->thread.fpu.hard.fp_regs[srcreg]; + bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; + } else { +#if defined(CONFIG_LITTLE_ENDIAN) + bufhi = current->thread.fpu.hard.fp_regs[srcreg]; + buflo = current->thread.fpu.hard.fp_regs[srcreg+1]; +#else + buflo = current->thread.fpu.hard.fp_regs[srcreg]; + bufhi = current->thread.fpu.hard.fp_regs[srcreg+1]; +#endif + } + break; + default: + printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n", + width_shift, (unsigned long) regs->pc); + break; + } + + *(__u32*) &buffer = buflo; + *(1 + (__u32*) &buffer) = bufhi; + if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { + return -1; /* fault */ + } + return 0; + } else { + die ("Misaligned FPU load inside kernel", regs, 0); + return -1; + } +} +#endif + +static int misaligned_fixup(struct pt_regs *regs) +{ + unsigned long opcode; + int error; + int major, minor; + +#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + /* Never fixup user mode misaligned accesses without this option enabled. */ + return -1; +#else + if (!user_mode_unaligned_fixup_enable) return -1; +#endif + + error = read_opcode(regs->pc, &opcode, user_mode(regs)); + if (error < 0) { + return error; + } + major = (opcode >> 26) & 0x3f; + minor = (opcode >> 16) & 0xf; + +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) { + --user_mode_unaligned_fixup_count; + /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */ + printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", + current->comm, current->pid, (__u32)regs->pc, opcode); + } else +#endif + if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) { + --kernel_mode_unaligned_fixup_count; + if (in_interrupt()) { + printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n", + (__u32)regs->pc, opcode); + } else { + printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", + current->comm, current->pid, (__u32)regs->pc, opcode); + } + } + + + switch (major) { + case (0x84>>2): /* LD.W */ + error = misaligned_load(regs, opcode, 1, 1, 1); + break; + case (0xb0>>2): /* LD.UW */ + error = misaligned_load(regs, opcode, 1, 1, 0); + break; + case (0x88>>2): /* LD.L */ + error = misaligned_load(regs, opcode, 1, 2, 1); + break; + case (0x8c>>2): /* LD.Q */ + error = misaligned_load(regs, opcode, 1, 3, 0); + break; + + case (0xa4>>2): /* ST.W */ + error = misaligned_store(regs, opcode, 1, 1); + break; + case (0xa8>>2): /* ST.L */ + error = misaligned_store(regs, opcode, 1, 2); + break; + case (0xac>>2): /* ST.Q */ + error = misaligned_store(regs, opcode, 1, 3); + break; + + case (0x40>>2): /* indexed loads */ + switch (minor) { + case 0x1: /* LDX.W */ + error = misaligned_load(regs, opcode, 0, 1, 1); + break; + case 0x5: /* LDX.UW */ + error = misaligned_load(regs, opcode, 0, 1, 0); + break; + case 0x2: /* LDX.L */ + error = misaligned_load(regs, opcode, 0, 2, 1); + break; + case 0x3: /* LDX.Q */ + error = misaligned_load(regs, opcode, 0, 3, 0); + break; + default: + error = -1; + break; + } + break; + + case (0x60>>2): /* indexed stores */ + switch (minor) { + case 0x1: /* STX.W */ + error = misaligned_store(regs, opcode, 0, 1); + break; + case 0x2: /* STX.L */ + error = misaligned_store(regs, opcode, 0, 2); + break; + case 0x3: /* STX.Q */ + error = misaligned_store(regs, opcode, 0, 3); + break; + default: + error = -1; + break; + } + break; + +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + case (0x94>>2): /* FLD.S */ + error = misaligned_fpu_load(regs, opcode, 1, 2, 0); + break; + case (0x98>>2): /* FLD.P */ + error = misaligned_fpu_load(regs, opcode, 1, 3, 1); + break; + case (0x9c>>2): /* FLD.D */ + error = misaligned_fpu_load(regs, opcode, 1, 3, 0); + break; + case (0x1c>>2): /* floating indexed loads */ + switch (minor) { + case 0x8: /* FLDX.S */ + error = misaligned_fpu_load(regs, opcode, 0, 2, 0); + break; + case 0xd: /* FLDX.P */ + error = misaligned_fpu_load(regs, opcode, 0, 3, 1); + break; + case 0x9: /* FLDX.D */ + error = misaligned_fpu_load(regs, opcode, 0, 3, 0); + break; + default: + error = -1; + break; + } + break; + case (0xb4>>2): /* FLD.S */ + error = misaligned_fpu_store(regs, opcode, 1, 2, 0); + break; + case (0xb8>>2): /* FLD.P */ + error = misaligned_fpu_store(regs, opcode, 1, 3, 1); + break; + case (0xbc>>2): /* FLD.D */ + error = misaligned_fpu_store(regs, opcode, 1, 3, 0); + break; + case (0x3c>>2): /* floating indexed stores */ + switch (minor) { + case 0x8: /* FSTX.S */ + error = misaligned_fpu_store(regs, opcode, 0, 2, 0); + break; + case 0xd: /* FSTX.P */ + error = misaligned_fpu_store(regs, opcode, 0, 3, 1); + break; + case 0x9: /* FSTX.D */ + error = misaligned_fpu_store(regs, opcode, 0, 3, 0); + break; + default: + error = -1; + break; + } + break; +#endif + + default: + /* Fault */ + error = -1; + break; + } + + if (error < 0) { + return error; + } else { + regs->pc += 4; /* Skip the instruction that's just been emulated */ + return 0; + } + +} + +static ctl_table unaligned_table[] = { + {1, "kernel_reports", &kernel_mode_unaligned_fixup_count, + sizeof(int), 0644, NULL, &proc_dointvec}, +#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP) + {2, "user_reports", &user_mode_unaligned_fixup_count, + sizeof(int), 0644, NULL, &proc_dointvec}, + {3, "user_enable", &user_mode_unaligned_fixup_enable, + sizeof(int), 0644, NULL, &proc_dointvec}, +#endif + {0} +}; + +static ctl_table unaligned_root[] = { + {1, "unaligned_fixup", NULL, 0, 0555, unaligned_table}, + {0} +}; + +static ctl_table sh64_root[] = { + {1, "sh64", NULL, 0, 0555, unaligned_root}, + {0} +}; +static struct ctl_table_header *sysctl_header; +static int __init init_sysctl(void) +{ + sysctl_header = register_sysctl_table(sh64_root, 0); + return 0; +} + +__initcall(init_sysctl); + + +asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) +{ + u64 peek_real_address_q(u64 addr); + u64 poke_real_address_q(u64 addr, u64 val); + unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010; + unsigned long long exp_cause; + /* It's not worth ioremapping the debug module registers for the amount + of access we make to them - just go direct to their physical + addresses. */ + exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY); + if (exp_cause & ~4) { + printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n", + (unsigned long)(exp_cause & 0xffffffff)); + } + show_state(); + /* Clear all DEBUGINT causes */ + poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); +} + diff --git a/arch/sh64/kernel/unwind.c b/arch/sh64/kernel/unwind.c new file mode 100644 index 000000000..f934f97f9 --- /dev/null +++ b/arch/sh64/kernel/unwind.c @@ -0,0 +1,326 @@ +/* + * arch/sh64/kernel/unwind.c + * + * Copyright (C) 2004 Paul Mundt + * Copyright (C) 2004 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +static u8 regcache[63]; + +/* + * Finding the previous stack frame isn't horribly straightforward as it is + * on some other platforms. In the sh64 case, we don't have "linked" stack + * frames, so we need to do a bit of work to determine the previous frame, + * and in turn, the previous r14/r18 pair. + * + * There are generally a few cases which determine where we can find out + * the r14/r18 values. In the general case, this can be determined by poking + * around the prologue of the symbol PC is in (note that we absolutely must + * have frame pointer support as well as the kernel symbol table mapped, + * otherwise we can't even get this far). + * + * In other cases, such as the interrupt/exception path, we can poke around + * the sp/fp. + * + * Notably, this entire approach is somewhat error prone, and in the event + * that the previous frame cannot be determined, that's all we can do. + * Either way, this still leaves us with a more correct backtrace then what + * we would be able to come up with by walking the stack (which is garbage + * for anything beyond the first frame). + * -- PFM. + */ +static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc, + unsigned long *pprev_fp, unsigned long *pprev_pc, + struct pt_regs *regs) +{ + const char *sym; + char *modname, namebuf[128]; + unsigned long offset, size; + unsigned long prologue = 0; + unsigned long fp_displacement = 0; + unsigned long fp_prev = 0; + unsigned long offset_r14 = 0, offset_r18 = 0; + int i, found_prologue_end = 0; + + sym = kallsyms_lookup(pc, &size, &offset, &modname, namebuf); + if (!sym) + return -EINVAL; + + prologue = pc - offset; + if (!prologue) + return -EINVAL; + + /* Validate fp, to avoid risk of dereferencing a bad pointer later. + Assume 128Mb since that's the amount of RAM on a Cayman. Modify + when there is an SH-5 board with more. */ + if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) || + (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) || + ((fp & 7) != 0)) { + return -EINVAL; + } + + /* + * Depth to walk, depth is completely arbitrary. + */ + for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) { + unsigned long op; + u8 major, minor; + u8 src, dest, disp; + + op = *(unsigned long *)prologue; + + major = (op >> 26) & 0x3f; + src = (op >> 20) & 0x3f; + minor = (op >> 16) & 0xf; + disp = (op >> 10) & 0x3f; + dest = (op >> 4) & 0x3f; + + /* + * Stack frame creation happens in a number of ways.. in the + * general case when the stack frame is less than 511 bytes, + * it's generally created by an addi or addi.l: + * + * addi/addi.l r15, -FRAME_SIZE, r15 + * + * in the event that the frame size is bigger than this, it's + * typically created using a movi/sub pair as follows: + * + * movi FRAME_SIZE, rX + * sub r15, rX, r15 + */ + + switch (major) { + case (0x00 >> 2): + switch (minor) { + case 0x8: /* add.l */ + case 0x9: /* add */ + /* Look for r15, r63, r14 */ + if (src == 15 && disp == 63 && dest == 14) + found_prologue_end = 1; + + break; + case 0xa: /* sub.l */ + case 0xb: /* sub */ + if (src != 15 || dest != 15) + continue; + + fp_displacement -= regcache[disp]; + fp_prev = fp - fp_displacement; + break; + } + break; + case (0xa8 >> 2): /* st.l */ + if (src != 15) + continue; + + switch (dest) { + case 14: + if (offset_r14 || fp_displacement == 0) + continue; + + offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); + offset_r14 *= sizeof(unsigned long); + offset_r14 += fp_displacement; + break; + case 18: + if (offset_r18 || fp_displacement == 0) + continue; + + offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); + offset_r18 *= sizeof(unsigned long); + offset_r18 += fp_displacement; + break; + } + + break; + case (0xcc >> 2): /* movi */ + if (dest >= 63) { + printk(KERN_NOTICE "%s: Invalid dest reg %d " + "specified in movi handler. Failed " + "opcode was 0x%lx: ", __FUNCTION__, + dest, op); + + continue; + } + + /* Sign extend */ + regcache[dest] = + ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54; + break; + case (0xd0 >> 2): /* addi */ + case (0xd4 >> 2): /* addi.l */ + /* Look for r15, -FRAME_SIZE, r15 */ + if (src != 15 || dest != 15) + continue; + + /* Sign extended frame size.. */ + fp_displacement += + (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); + fp_prev = fp - fp_displacement; + break; + } + + if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev) + break; + } + + if (offset_r14 == 0 || fp_prev == 0) { + if (!offset_r14) + pr_debug("Unable to find r14 offset\n"); + if (!fp_prev) + pr_debug("Unable to find previous fp\n"); + + return -EINVAL; + } + + /* For innermost leaf function, there might not be a offset_r18 */ + if (!*pprev_pc && (offset_r18 == 0)) + return -EINVAL; + + *pprev_fp = *(unsigned long *)(fp_prev + offset_r14); + + if (offset_r18) + *pprev_pc = *(unsigned long *)(fp_prev + offset_r18); + + *pprev_pc &= ~1; + + return 0; +} + +/* Don't put this on the stack since we'll want to call sh64_unwind + * when we're close to underflowing the stack anyway. */ +static struct pt_regs here_regs; + +extern const char syscall_ret; +extern const char ret_from_syscall; +extern const char ret_from_exception; +extern const char ret_from_irq; + +static void sh64_unwind_inner(struct pt_regs *regs); + +static void unwind_nested (unsigned long pc, unsigned long fp) +{ + if ((fp >= __MEMORY_START) && + ((fp & 7) == 0)) { + sh64_unwind_inner((struct pt_regs *) fp); + } +} + +static void sh64_unwind_inner(struct pt_regs *regs) +{ + unsigned long pc, fp; + int ofs = 0; + int first_pass; + + pc = regs->pc & ~1; + fp = regs->regs[14]; + + first_pass = 1; + for (;;) { + int cond; + unsigned long next_fp, next_pc; + + if (pc == ((unsigned long) &syscall_ret & ~1)) { + printk("SYSCALL\n"); + unwind_nested(pc,fp); + return; + } + + if (pc == ((unsigned long) &ret_from_syscall & ~1)) { + printk("SYSCALL (PREEMPTED)\n"); + unwind_nested(pc,fp); + return; + } + + /* In this case, the PC is discovered by lookup_prev_stack_frame but + it has 4 taken off it to look like the 'caller' */ + if (pc == ((unsigned long) &ret_from_exception & ~1)) { + printk("EXCEPTION\n"); + unwind_nested(pc,fp); + return; + } + + if (pc == ((unsigned long) &ret_from_irq & ~1)) { + printk("IRQ\n"); + unwind_nested(pc,fp); + return; + } + + cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) && + ((pc & 3) == 0) && ((fp & 7) == 0)); + + pc -= ofs; + + printk("[<%08lx>] ", pc); + print_symbol("%s\n", pc); + + if (first_pass) { + /* If the innermost frame is a leaf function, it's + * possible that r18 is never saved out to the stack. + */ + next_pc = regs->regs[18]; + } else { + next_pc = 0; + } + + if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) { + ofs = sizeof(unsigned long); + pc = next_pc & ~1; + fp = next_fp; + } else { + printk("Unable to lookup previous stack frame\n"); + break; + } + first_pass = 0; + } + + printk("\n"); + +} + +void sh64_unwind(struct pt_regs *regs) +{ + if (!regs) { + /* + * Fetch current regs if we have no other saved state to back + * trace from. + */ + regs = &here_regs; + + __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14])); + __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15])); + __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18])); + + __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0])); + __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1])); + __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2])); + __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3])); + __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4])); + __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5])); + __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6])); + __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7])); + + __asm__ __volatile__ ( + "pta 0f, tr0\n\t" + "blink tr0, %0\n\t" + "0: nop" + : "=r" (regs->pc) + ); + } + + printk("\nCall Trace:\n"); + sh64_unwind_inner(regs); +} + diff --git a/arch/sh64/lib/Makefile b/arch/sh64/lib/Makefile new file mode 100644 index 000000000..0a2dc69be --- /dev/null +++ b/arch/sh64/lib/Makefile @@ -0,0 +1,19 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2000, 2001 Paolo Alberelli +# Coprygith (C) 2003 Paul Mundt +# +# Makefile for the SH-5 specific library files.. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +# Panic should really be compiled as PIC +lib-y := udelay.o c-checksum.o dbg.o io.o panic.o memcpy.o copy_user_memcpy.o \ + page_copy.o page_clear.o + diff --git a/arch/sh64/lib/copy_user_memcpy.S b/arch/sh64/lib/copy_user_memcpy.S new file mode 100644 index 000000000..5b6ca3923 --- /dev/null +++ b/arch/sh64/lib/copy_user_memcpy.S @@ -0,0 +1,213 @@ +! +! Fast SH memcpy +! +! by Toshiyasu Morita (tm@netcom.com) +! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut) +! SH5 code Copyright 2002 SuperH Ltd. +! +! Entry: ARG0: destination pointer +! ARG1: source pointer +! ARG2: byte count +! +! Exit: RESULT: destination pointer +! any other registers in the range r0-r7: trashed +! +! Notes: Usually one wants to do small reads and write a longword, but +! unfortunately it is difficult in some cases to concatanate bytes +! into a longword on the SH, so this does a longword read and small +! writes. +! +! This implementation makes two assumptions about how it is called: +! +! 1.: If the byte count is nonzero, the address of the last byte to be +! copied is unsigned greater than the address of the first byte to +! be copied. This could be easily swapped for a signed comparison, +! but the algorithm used needs some comparison. +! +! 2.: When there are two or three bytes in the last word of an 11-or-more +! bytes memory chunk to b copied, the rest of the word can be read +! without side effects. +! This could be easily changed by increasing the minumum size of +! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2, +! however, this would cost a few extra cyles on average. +! For SHmedia, the assumption is that any quadword can be read in its +! enirety if at least one byte is included in the copy. + +/* Imported into Linux kernel by Richard Curnow. This is used to implement the + __copy_user function in the general case, so it has to be a distinct + function from intra-kernel memcpy to allow for exception fix-ups in the + event that the user pointer is bad somewhere in the copy (e.g. due to + running off the end of the vma). + + Note, this algorithm will be slightly wasteful in the case where the source + and destination pointers are equally aligned, because the stlo/sthi pairs + could then be merged back into single stores. If there are a lot of cache + misses, this is probably offset by the stall lengths on the preloads. + +*/ + + .section .text..SHmedia32,"ax" + .little + .balign 32 + .global copy_user_memcpy + .global copy_user_memcpy_end +copy_user_memcpy: + +#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1 +#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1 +#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1 +#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1 + + ld.b r3,0,r63 + pta/l Large,tr0 + movi 25,r0 + bgeu/u r4,r0,tr0 + nsb r4,r0 + shlli r0,5,r0 + movi (L1-L0+63*32 + 1) & 0xffff,r1 + sub r1, r0, r0 +L0: ptrel r0,tr0 + add r2,r4,r5 + ptabs r18,tr1 + add r3,r4,r6 + blink tr0,r63 + +/* Rearranged to make cut2 safe */ + .balign 8 +L4_7: /* 4..7 byte memcpy cntd. */ + stlo.l r2, 0, r0 + or r6, r7, r6 + sthi.l r5, -1, r6 + stlo.l r5, -4, r6 + blink tr1,r63 + + .balign 8 +L1: /* 0 byte memcpy */ + nop + blink tr1,r63 + nop + nop + nop + nop + +L2_3: /* 2 or 3 byte memcpy cntd. */ + st.b r5,-1,r6 + blink tr1,r63 + + /* 1 byte memcpy */ + ld.b r3,0,r0 + st.b r2,0,r0 + blink tr1,r63 + +L8_15: /* 8..15 byte memcpy cntd. */ + stlo.q r2, 0, r0 + or r6, r7, r6 + sthi.q r5, -1, r6 + stlo.q r5, -8, r6 + blink tr1,r63 + + /* 2 or 3 byte memcpy */ + ld.b r3,0,r0 + ld.b r2,0,r63 + ld.b r3,1,r1 + st.b r2,0,r0 + pta/l L2_3,tr0 + ld.b r6,-1,r6 + st.b r2,1,r1 + blink tr0, r63 + + /* 4 .. 7 byte memcpy */ + LDUAL (r3, 0, r0, r1) + pta L4_7, tr0 + ldlo.l r6, -4, r7 + or r0, r1, r0 + sthi.l r2, 3, r0 + ldhi.l r6, -1, r6 + blink tr0, r63 + + /* 8 .. 15 byte memcpy */ + LDUAQ (r3, 0, r0, r1) + pta L8_15, tr0 + ldlo.q r6, -8, r7 + or r0, r1, r0 + sthi.q r2, 7, r0 + ldhi.q r6, -1, r6 + blink tr0, r63 + + /* 16 .. 24 byte memcpy */ + LDUAQ (r3, 0, r0, r1) + LDUAQ (r3, 8, r8, r9) + or r0, r1, r0 + sthi.q r2, 7, r0 + or r8, r9, r8 + sthi.q r2, 15, r8 + ldlo.q r6, -8, r7 + ldhi.q r6, -1, r6 + stlo.q r2, 8, r8 + stlo.q r2, 0, r0 + or r6, r7, r6 + sthi.q r5, -1, r6 + stlo.q r5, -8, r6 + blink tr1,r63 + +Large: + ld.b r2, 0, r63 + pta/l Loop_ua, tr1 + ori r3, -8, r7 + sub r2, r7, r22 + sub r3, r2, r6 + add r2, r4, r5 + ldlo.q r3, 0, r0 + addi r5, -16, r5 + movi 64+8, r27 ! could subtract r7 from that. + stlo.q r2, 0, r0 + sthi.q r2, 7, r0 + ldx.q r22, r6, r0 + bgtu/l r27, r4, tr1 + + addi r5, -48, r27 + pta/l Loop_line, tr0 + addi r6, 64, r36 + addi r6, -24, r19 + addi r6, -16, r20 + addi r6, -8, r21 + +Loop_line: + ldx.q r22, r36, r63 + synco + alloco r22, 32 + synco + addi r22, 32, r22 + ldx.q r22, r19, r23 + sthi.q r22, -25, r0 + ldx.q r22, r20, r24 + ldx.q r22, r21, r25 + stlo.q r22, -32, r0 + ldx.q r22, r6, r0 + sthi.q r22, -17, r23 + sthi.q r22, -9, r24 + sthi.q r22, -1, r25 + stlo.q r22, -24, r23 + stlo.q r22, -16, r24 + stlo.q r22, -8, r25 + bgeu r27, r22, tr0 + +Loop_ua: + addi r22, 8, r22 + sthi.q r22, -1, r0 + stlo.q r22, -8, r0 + ldx.q r22, r6, r0 + bgtu/l r5, r22, tr1 + + add r3, r4, r7 + ldlo.q r7, -8, r1 + sthi.q r22, 7, r0 + ldhi.q r7, -1, r7 + ptabs r18,tr1 + stlo.q r22, 0, r0 + or r1, r7, r1 + sthi.q r5, 15, r1 + stlo.q r5, 8, r1 + blink tr1, r63 +copy_user_memcpy_end: + nop diff --git a/arch/sh64/lib/page_clear.S b/arch/sh64/lib/page_clear.S new file mode 100644 index 000000000..2aadd2c0f --- /dev/null +++ b/arch/sh64/lib/page_clear.S @@ -0,0 +1,51 @@ +/* + Copyright 2003 Richard Curnow, SuperH (UK) Ltd. + + This file is subject to the terms and conditions of the GNU General Public + License. See the file "COPYING" in the main directory of this archive + for more details. + + Tight version of memset for the case of just clearing a page. It turns out + that having the alloco's spaced out slightly due to the increment/branch + pair causes them to contend less for access to the cache. Similarly, + keeping the stores apart from the allocos causes less contention. => Do two + separate loops. Do multiple stores per loop to amortise the + increment/branch cost a little. + + Parameters: + r2 : source effective address (start of page) + + Always clears 4096 bytes. + +*/ + + .section .text..SHmedia32,"ax" + .little + + .balign 8 + .global sh64_page_clear +sh64_page_clear: + pta/l 1f, tr1 + pta/l 2f, tr2 + ptabs/l r18, tr0 + + movi 4096, r7 + add r2, r7, r7 + add r2, r63, r6 +1: + alloco r6, 0 + addi r6, 32, r6 + bgt/l r7, r6, tr1 + + add r2, r63, r6 +2: + st.q r6, 0, r63 + st.q r6, 8, r63 + st.q r6, 16, r63 + st.q r6, 24, r63 + addi r6, 32, r6 + bgt/l r7, r6, tr2 + + blink tr0, r63 + + diff --git a/arch/sh64/lib/page_copy.S b/arch/sh64/lib/page_copy.S new file mode 100644 index 000000000..804d2a00d --- /dev/null +++ b/arch/sh64/lib/page_copy.S @@ -0,0 +1,82 @@ +/* + Copyright 2003 Richard Curnow, SuperH (UK) Ltd. + + This file is subject to the terms and conditions of the GNU General Public + License. See the file "COPYING" in the main directory of this archive + for more details. + + Tight version of mempy for the case of just copying a page. + Prefetch strategy empirically optimised against RTL simulations + of SH5-101 cut2 eval chip with Cayman board DDR memory. + + Parameters: + r2 : source effective address (start of page) + r3 : destination effective address (start of page) + + Always copies 4096 bytes. + + Points to review. + * Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead. + It seems like the prefetch needs to be at at least 4 lines ahead to get + the data into the cache in time, and the allocos contend with outstanding + prefetches for the same cache set, so it's better to have the numbers + different. + */ + + .section .text..SHmedia32,"ax" + .little + + .balign 8 + .global sh64_page_copy +sh64_page_copy: + + /* Copy 4096 bytes worth of data from r2 to r3. + Do prefetches 4 lines ahead. + Do alloco 2 lines ahead */ + + pta 1f, tr1 + pta 2f, tr2 + pta 3f, tr3 + ptabs r18, tr0 + + ld.q r2, 0x00, r63 + ld.q r2, 0x20, r63 + ld.q r2, 0x40, r63 + ld.q r2, 0x60, r63 + alloco r3, 0x00 + alloco r3, 0x20 + + movi 3968, r6 + add r3, r6, r6 + addi r6, 64, r7 + addi r7, 64, r8 + sub r2, r3, r60 + addi r60, 8, r61 + addi r61, 8, r62 + addi r62, 8, r23 + addi r60, 0x80, r22 + +/* Minimal code size. The extra branches inside the loop don't cost much + because they overlap with the time spent waiting for prefetches to + complete. */ +1: + bge/u r3, r6, tr2 ! skip prefetch for last 4 lines + ldx.q r3, r22, r63 ! prefetch 4 lines hence +2: + bge/u r3, r7, tr3 ! skip alloco for last 2 lines + alloco r3, 0x40 ! alloc destination line 2 lines ahead +3: + ldx.q r3, r60, r36 + ldx.q r3, r61, r37 + ldx.q r3, r62, r38 + ldx.q r3, r23, r39 + st.q r3, 0, r36 + st.q r3, 8, r37 + st.q r3, 16, r38 + st.q r3, 24, r39 + addi r3, 32, r3 + bgt/l r8, r3, tr1 + + blink tr0, r63 ! return + + diff --git a/arch/sh64/lib/panic.c b/arch/sh64/lib/panic.c new file mode 100644 index 000000000..c9eb1cb50 --- /dev/null +++ b/arch/sh64/lib/panic.c @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2003 Richard Curnow, SuperH UK Limited + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include + +/* THIS IS A PHYSICAL ADDRESS */ +#define HDSP2534_ADDR (0x04002100) + +#ifdef CONFIG_SH_CAYMAN + +static void poor_mans_delay(void) +{ + int i; + for (i = 0; i < 2500000; i++) { + } /* poor man's delay */ +} + +static void show_value(unsigned long x) +{ + int i; + unsigned nibble; + for (i = 0; i < 8; i++) { + nibble = ((x >> (i * 4)) & 0xf); + + ctrl_outb(nibble + ((nibble > 9) ? 55 : 48), + HDSP2534_ADDR + 0xe0 + ((7 - i) << 2)); + } +} + +#endif + +void +panic_handler(unsigned long panicPC, unsigned long panicSSR, + unsigned long panicEXPEVT) +{ +#ifdef CONFIG_SH_CAYMAN + while (1) { + /* This piece of code displays the PC on the LED display */ + show_value(panicPC); + poor_mans_delay(); + show_value(panicSSR); + poor_mans_delay(); + show_value(panicEXPEVT); + poor_mans_delay(); + } +#endif + + /* Never return from the panic handler */ + for (;;) ; + +} diff --git a/arch/sh64/mach-cayman/Makefile b/arch/sh64/mach-cayman/Makefile new file mode 100644 index 000000000..4a48b53fc --- /dev/null +++ b/arch/sh64/mach-cayman/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Hitachi Cayman specific parts of the kernel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +obj-y := setup.o irq.o +obj-$(CONFIG_HEARTBEAT) += led.o + diff --git a/arch/sh64/mach-harp/Makefile b/arch/sh64/mach-harp/Makefile new file mode 100644 index 000000000..63f065bad --- /dev/null +++ b/arch/sh64/mach-harp/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the ST50 Harp specific parts of the kernel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +O_TARGET := harp.o + +obj-y := setup.o + +include $(TOPDIR)/Rules.make + diff --git a/arch/sh64/mach-romram/Makefile b/arch/sh64/mach-romram/Makefile new file mode 100644 index 000000000..02d05c05a --- /dev/null +++ b/arch/sh64/mach-romram/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the SH-5 ROM/RAM specific parts of the kernel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +O_TARGET := romram.o + +obj-y := setup.o + +include $(TOPDIR)/Rules.make + diff --git a/arch/sh64/mach-sim/Makefile b/arch/sh64/mach-sim/Makefile new file mode 100644 index 000000000..819c4078f --- /dev/null +++ b/arch/sh64/mach-sim/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the SH-5 Simulator specific parts of the kernel +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +O_TARGET := sim.o + +obj-y := setup.o + +include $(TOPDIR)/Rules.make + diff --git a/arch/sh64/mm/Makefile b/arch/sh64/mm/Makefile new file mode 100644 index 000000000..ff19378ac --- /dev/null +++ b/arch/sh64/mm/Makefile @@ -0,0 +1,44 @@ +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 2000, 2001 Paolo Alberelli +# Copyright (C) 2003, 2004 Paul Mundt +# +# Makefile for the sh64-specific parts of the Linux memory manager. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# + +obj-y := init.o fault.o ioremap.o extable.o cache.o tlbmiss.o tlb.o + +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o + +# Special flags for tlbmiss.o. This puts restrictions on the number of +# caller-save registers that the compiler can target when building this file. +# This is required because the code is called from a context in entry.S where +# very few registers have been saved in the exception handler (for speed +# reasons). +# The caller save registers that have been saved and which can be used are +# r2,r3,r4,r5 : argument passing +# r15, r18 : SP and LINK +# tr0-4 : allow all caller-save TR's. The compiler seems to be able to make +# use of them, so it's probably beneficial to performance to save them +# and have them available for it. +# +# The resources not listed below are callee save, i.e. the compiler is free to +# use any of them and will spill them to the stack itself. + +CFLAGS_tlbmiss.o += -ffixed-r7 \ + -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \ + -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \ + -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \ + -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \ + -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \ + -ffixed-r41 -ffixed-r42 -ffixed-r43 \ + -ffixed-r60 -ffixed-r61 -ffixed-r62 \ + -fomit-frame-pointer + diff --git a/arch/sh64/mm/tlb.c b/arch/sh64/mm/tlb.c new file mode 100644 index 000000000..d517e7d70 --- /dev/null +++ b/arch/sh64/mm/tlb.c @@ -0,0 +1,166 @@ +/* + * arch/sh64/mm/tlb.c + * + * Copyright (C) 2003 Paul Mundt + * Copyright (C) 2003 Richard Curnow + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ +#include +#include +#include +#include +#include + +/** + * sh64_tlb_init + * + * Perform initial setup for the DTLB and ITLB. + */ +int __init sh64_tlb_init(void) +{ + /* Assign some sane DTLB defaults */ + cpu_data->dtlb.entries = 64; + cpu_data->dtlb.step = 0x10; + + cpu_data->dtlb.first = DTLB_FIXED | cpu_data->dtlb.step; + cpu_data->dtlb.next = cpu_data->dtlb.first; + + cpu_data->dtlb.last = DTLB_FIXED | + ((cpu_data->dtlb.entries - 1) * + cpu_data->dtlb.step); + + /* And again for the ITLB */ + cpu_data->itlb.entries = 64; + cpu_data->itlb.step = 0x10; + + cpu_data->itlb.first = ITLB_FIXED | cpu_data->itlb.step; + cpu_data->itlb.next = cpu_data->itlb.first; + cpu_data->itlb.last = ITLB_FIXED | + ((cpu_data->itlb.entries - 1) * + cpu_data->itlb.step); + + return 0; +} + +/** + * sh64_next_free_dtlb_entry + * + * Find the next available DTLB entry + */ +unsigned long long sh64_next_free_dtlb_entry(void) +{ + return cpu_data->dtlb.next; +} + +/** + * sh64_get_wired_dtlb_entry + * + * Allocate a wired (locked-in) entry in the DTLB + */ +unsigned long long sh64_get_wired_dtlb_entry(void) +{ + unsigned long long entry = sh64_next_free_dtlb_entry(); + + cpu_data->dtlb.first += cpu_data->dtlb.step; + cpu_data->dtlb.next += cpu_data->dtlb.step; + + return entry; +} + +/** + * sh64_put_wired_dtlb_entry + * + * @entry: Address of TLB slot. + * + * Free a wired (locked-in) entry in the DTLB. + * + * Works like a stack, last one to allocate must be first one to free. + */ +int sh64_put_wired_dtlb_entry(unsigned long long entry) +{ + __flush_tlb_slot(entry); + + /* + * We don't do any particularly useful tracking of wired entries, + * so this approach works like a stack .. last one to be allocated + * has to be the first one to be freed. + * + * We could potentially load wired entries into a list and work on + * rebalancing the list periodically (which also entails moving the + * contents of a TLB entry) .. though I have a feeling that this is + * more trouble than it's worth. + */ + + /* + * Entry must be valid .. we don't want any ITLB addresses! + */ + if (entry <= DTLB_FIXED) + return -EINVAL; + + /* + * Next, check if we're within range to be freed. (ie, must be the + * entry beneath the first 'free' entry! + */ + if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step)) + return -EINVAL; + + /* If we are, then bring this entry back into the list */ + cpu_data->dtlb.first -= cpu_data->dtlb.step; + cpu_data->dtlb.next = entry; + + return 0; +} + +/** + * sh64_setup_tlb_slot + * + * @config_addr: Address of TLB slot. + * @eaddr: Virtual address. + * @asid: Address Space Identifier. + * @paddr: Physical address. + * + * Load up a virtual<->physical translation for @eaddr<->@paddr in the + * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). + */ +inline void sh64_setup_tlb_slot(unsigned long long config_addr, + unsigned long eaddr, + unsigned long asid, + unsigned long paddr) +{ + unsigned long long pteh, ptel; + + /* Sign extension */ +#if (NEFF == 32) + pteh = (unsigned long long)(signed long long)(signed long) eaddr; +#else +#error "Can't sign extend more than 32 bits yet" +#endif + pteh &= PAGE_MASK; + pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; +#if (NEFF == 32) + ptel = (unsigned long long)(signed long long)(signed long) paddr; +#else +#error "Can't sign extend more than 32 bits yet" +#endif + ptel &= PAGE_MASK; + ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); + + asm volatile("putcfg %0, 1, %1\n\t" + "putcfg %0, 0, %2\n" + : : "r" (config_addr), "r" (ptel), "r" (pteh)); +} + +/** + * sh64_teardown_tlb_slot + * + * @config_addr: Address of TLB slot. + * + * Teardown any existing mapping in the TLB slot @config_addr. + */ +inline void sh64_teardown_tlb_slot(unsigned long long config_addr) + __attribute__ ((alias("__flush_tlb_slot"))); + diff --git a/arch/sh64/mm/tlbmiss.c b/arch/sh64/mm/tlbmiss.c new file mode 100644 index 000000000..a69f7751c --- /dev/null +++ b/arch/sh64/mm/tlbmiss.c @@ -0,0 +1,282 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * arch/sh64/mm/tlbmiss.c + * + * Original code from fault.c + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * Fast PTE->TLB refill path + * Copyright (C) 2003 Richard.Curnow@superh.com + * + * IMPORTANT NOTES : + * The do_fast_page_fault function is called from a context in entry.S where very few registers + * have been saved. In particular, the code in this file must be compiled not to use ANY + * caller-save regiseters that are not part of the restricted save set. Also, it means that + * code in this file must not make calls to functions elsewhere in the kernel, or else the + * excepting context will see corruption in its caller-save registers. Plus, the entry.S save + * area is non-reentrant, so this code has to run with SR.BL==1, i.e. no interrupts taken inside + * it and panic on any exception. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include /* required by inline asm statements */ + +/* Callable from fault.c, so not static */ +inline void __do_tlb_refill(unsigned long address, + unsigned long long is_text_not_data, pte_t *pte) +{ + unsigned long long ptel; + unsigned long long pteh=0; + struct tlb_info *tlbp; + unsigned long long next; + + /* Get PTEL first */ + ptel = pte_val(*pte); + + /* + * Set PTEH register + */ + pteh = address & MMU_VPN_MASK; + + /* Sign extend based on neff. */ +#if (NEFF == 32) + /* Faster sign extension */ + pteh = (unsigned long long)(signed long long)(signed long)pteh; +#else + /* General case */ + pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh; +#endif + + /* Set the ASID. */ + pteh |= get_asid() << PTEH_ASID_SHIFT; + pteh |= PTEH_VALID; + + /* Set PTEL register, set_pte has performed the sign extension */ + ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ + ptel |= _PAGE_FLAGS_HARDWARE_DEFAULT; /* add default flags */ + + tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb); + next = tlbp->next; + __flush_tlb_slot(next); + asm volatile ("putcfg %0,1,%2\n\n\t" + "putcfg %0,0,%1\n" + : : "r" (next), "r" (pteh), "r" (ptel) ); + + next += TLB_STEP; + if (next > tlbp->last) next = tlbp->first; + tlbp->next = next; + +} + +static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long protection_flags, + unsigned long long textaccess, + unsigned long address) +{ + pgd_t *dir; + pmd_t *pmd; + static pte_t *pte; + pte_t entry; + + dir = pgd_offset_k(address); + pmd = pmd_offset(dir, address); + + if (pmd_none(*pmd)) { + return 0; + } + + if (pmd_bad(*pmd)) { + pmd_clear(pmd); + return 0; + } + + pte = pte_offset_kernel(pmd, address); + entry = *pte; + + if (pte_none(entry) || !pte_present(entry)) { + return 0; + } + + if ((pte_val(entry) & protection_flags) != protection_flags) { + return 0; + } + + __do_tlb_refill(address, textaccess, pte); + + return 1; +} + +static int handle_tlbmiss(struct mm_struct *mm, unsigned long long protection_flags, + unsigned long long textaccess, + unsigned long address) +{ + pgd_t *dir; + pmd_t *pmd; + pte_t *pte; + pte_t entry; + + /* NB. The PGD currently only contains a single entry - there is no + page table tree stored for the top half of the address space since + virtual pages in that region should never be mapped in user mode. + (In kernel mode, the only things in that region are the 512Mb super + page (locked in), and vmalloc (modules) + I/O device pages (handled + by handle_vmalloc_fault), so no PGD for the upper half is required + by kernel mode either). + + See how mm->pgd is allocated and initialised in pgd_alloc to see why + the next test is necessary. - RPC */ + if (address >= (unsigned long) TASK_SIZE) { + /* upper half - never has page table entries. */ + return 0; + } + dir = pgd_offset(mm, address); + if (pgd_none(*dir)) { + return 0; + } + if (!pgd_present(*dir)) { + return 0; + } + + pmd = pmd_offset(dir, address); + if (pmd_none(*pmd)) { + return 0; + } + if (!pmd_present(*pmd)) { + return 0; + } + pte = pte_offset_kernel(pmd, address); + entry = *pte; + if (pte_none(entry)) { + return 0; + } + if (!pte_present(entry)) { + return 0; + } + + /* If the page doesn't have sufficient protection bits set to service the + kind of fault being handled, there's not much point doing the TLB refill. + Punt the fault to the general handler. */ + if ((pte_val(entry) & protection_flags) != protection_flags) { + return 0; + } + + __do_tlb_refill(address, textaccess, pte); + + return 1; +} + +/* Put all this information into one structure so that everything is just arithmetic + relative to a single base address. This reduces the number of movi/shori pairs needed + just to load addresses of static data. */ +struct expevt_lookup { + unsigned short protection_flags[8]; + unsigned char is_text_access[8]; + unsigned char is_write_access[8]; +}; + +#define PRU (1<<9) +#define PRW (1<<8) +#define PRX (1<<7) +#define PRR (1<<6) + +#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED) +#define YOUNG (_PAGE_ACCESSED) + +/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether + the fault happened in user mode or privileged mode. */ +static struct expevt_lookup expevt_lookup_table = { + .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW}, + .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0} +}; + +/* + This routine handles page faults that can be serviced just by refilling a + TLB entry from an existing page table entry. (This case represents a very + large majority of page faults.) Return 1 if the fault was successfully + handled. Return 0 if the fault could not be handled. (This leads into the + general fault handling in fault.c which deals with mapping file-backed + pages, stack growth, segmentation faults, swapping etc etc) + */ +asmlinkage int do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt, + unsigned long address) +{ + struct task_struct *tsk; + struct mm_struct *mm; + unsigned long long textaccess; + unsigned long long protection_flags; + unsigned long long index; + unsigned long long expevt4; + + /* The next few lines implement a way of hashing EXPEVT into a small array index + which can be used to lookup parameters specific to the type of TLBMISS being + handled. Note: + ITLBMISS has EXPEVT==0xa40 + RTLBMISS has EXPEVT==0x040 + WTLBMISS has EXPEVT==0x060 + */ + + expevt4 = (expevt >> 4); + /* TODO : xor ssr_md into this expression too. Then we can check that PRU is set + when it needs to be. */ + index = expevt4 ^ (expevt4 >> 5); + index &= 7; + protection_flags = expevt_lookup_table.protection_flags[index]; + textaccess = expevt_lookup_table.is_text_access[index]; + +#ifdef CONFIG_SH64_PROC_TLB + ++calls_to_do_fast_page_fault; +#endif + + /* SIM + * Note this is now called with interrupts still disabled + * This is to cope with being called for a missing IO port + * address with interupts disabled. This should be fixed as + * soon as we have a better 'fast path' miss handler. + * + * Plus take care how you try and debug this stuff. + * For example, writing debug data to a port which you + * have just faulted on is not going to work. + */ + + tsk = current; + mm = tsk->mm; + + if ((address >= VMALLOC_START && address < VMALLOC_END) || + (address >= IOBASE_VADDR && address < IOBASE_END)) { + if (ssr_md) { + /* Process-contexts can never have this address range mapped */ + if (handle_vmalloc_fault(mm, protection_flags, textaccess, address)) { + return 1; + } + } + } else if (!in_interrupt() && mm) { + if (handle_tlbmiss(mm, protection_flags, textaccess, address)) { + return 1; + } + } + + return 0; +} + diff --git a/arch/sh64/oprofile/Kconfig b/arch/sh64/oprofile/Kconfig new file mode 100644 index 000000000..19d37730b --- /dev/null +++ b/arch/sh64/oprofile/Kconfig @@ -0,0 +1,23 @@ + +menu "Profiling support" + depends on EXPERIMENTAL + +config PROFILING + bool "Profiling support (EXPERIMENTAL)" + help + Say Y here to enable the extended profiling support mechanisms used + by profilers such as OProfile. + + +config OPROFILE + tristate "OProfile system profiling (EXPERIMENTAL)" + depends on PROFILING + help + OProfile is a profiling system capable of profiling the + whole system, include the kernel, kernel modules, libraries, + and applications. + + If unsure, say N. + +endmenu + diff --git a/arch/sh64/oprofile/Makefile b/arch/sh64/oprofile/Makefile new file mode 100644 index 000000000..11a451f6a --- /dev/null +++ b/arch/sh64/oprofile/Makefile @@ -0,0 +1,12 @@ +obj-$(CONFIG_OPROFILE) += oprofile.o + +DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ + oprof.o cpu_buffer.o buffer_sync.o \ + event_buffer.o oprofile_files.o \ + oprofilefs.o oprofile_stats.o \ + timer_int.o ) + +profdrvr-y := op_model_null.o + +oprofile-y := $(DRIVER_OBJS) $(profdrvr-y) + diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c new file mode 100644 index 000000000..cef6032b8 --- /dev/null +++ b/drivers/char/hpet.c @@ -0,0 +1,1076 @@ +/* + * Intel & MS High Precision Event Timer Implementation. + * Contributors: + * Venki Pallipadi + * Bob Picco + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * The High Precision Event Timer driver. + * This driver is closely modelled after the rtc.c driver. + * http://www.intel.com/labs/platcomp/hpet/hpetspec.htm + */ +#define HPET_USER_FREQ (64) +#define HPET_DRIFT (500) + +static u32 hpet_ntimer, hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; + +/* A lock for concurrent access by app and isr hpet activity. */ +static spinlock_t hpet_lock = SPIN_LOCK_UNLOCKED; +/* A lock for concurrent intermodule access to hpet and isr hpet activity. */ +static spinlock_t hpet_task_lock = SPIN_LOCK_UNLOCKED; + +struct hpet_dev { + struct hpets *hd_hpets; + struct hpet *hd_hpet; + struct hpet_timer *hd_timer; + unsigned long hd_ireqfreq; + unsigned long hd_irqdata; + wait_queue_head_t hd_waitqueue; + struct fasync_struct *hd_async_queue; + struct hpet_task *hd_task; + unsigned int hd_flags; + unsigned int hd_irq; + unsigned int hd_hdwirq; +}; + +struct hpets { + struct hpets *hp_next; + struct hpet *hp_hpet; + unsigned long hp_period; + unsigned long hp_delta; + unsigned int hp_ntimer; + unsigned int hp_which; + struct hpet_dev hp_dev[1]; +}; + +static struct hpets *hpets; + +#define HPET_OPEN 0x0001 +#define HPET_IE 0x0002 /* interrupt enabled */ +#define HPET_PERIODIC 0x0004 + +#if BITS_PER_LONG == 64 +#define write_counter(V, MC) writeq(V, MC) +#define read_counter(MC) readq(MC) +#else +#define write_counter(V, MC) writel(V, MC) +#define read_counter(MC) readl(MC) +#endif + +#ifndef readq +static unsigned long long __inline readq(void *addr) +{ + return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL); +} +#endif + +#ifndef writeq +static void __inline writeq(unsigned long long v, void *addr) +{ + writel(v & 0xffffffff, addr); + writel(v >> 32, addr + 4); +} +#endif + +static irqreturn_t hpet_interrupt(int irq, void *data, struct pt_regs *regs) +{ + struct hpet_dev *devp; + unsigned long isr; + + devp = data; + + spin_lock(&hpet_lock); + devp->hd_irqdata++; + + /* + * For non-periodic timers, increment the accumulator. + * This has the effect of treating non-periodic like periodic. + */ + if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { + unsigned long m, t; + + t = devp->hd_ireqfreq; + m = read_counter(&devp->hd_hpet->hpet_mc); + write_counter(t + m + devp->hd_hpets->hp_delta, + &devp->hd_timer->hpet_compare); + } + + isr = (1 << (devp - devp->hd_hpets->hp_dev)); + writeq(isr, &devp->hd_hpet->hpet_isr); + spin_unlock(&hpet_lock); + + spin_lock(&hpet_task_lock); + if (devp->hd_task) + devp->hd_task->ht_func(devp->hd_task->ht_data); + spin_unlock(&hpet_task_lock); + + wake_up_interruptible(&devp->hd_waitqueue); + + kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); + + return IRQ_HANDLED; +} + +static int hpet_open(struct inode *inode, struct file *file) +{ + struct hpet_dev *devp; + struct hpets *hpetp; + int i; + + spin_lock_irq(&hpet_lock); + + for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) + for (i = 0; i < hpetp->hp_ntimer; i++) + if (hpetp->hp_dev[i].hd_flags & HPET_OPEN + || hpetp->hp_dev[i].hd_task) + continue; + else { + devp = &hpetp->hp_dev[i]; + break; + } + + if (!devp) { + spin_unlock_irq(&hpet_lock); + return -EBUSY; + } + + file->private_data = devp; + devp->hd_irqdata = 0; + devp->hd_flags |= HPET_OPEN; + spin_unlock_irq(&hpet_lock); + + return 0; +} + +static ssize_t +hpet_read(struct file *file, char *buf, size_t count, loff_t * ppos) +{ + DECLARE_WAITQUEUE(wait, current); + unsigned long data; + ssize_t retval; + struct hpet_dev *devp; + + devp = file->private_data; + if (!devp->hd_ireqfreq) + return -EIO; + + if (count < sizeof(unsigned long)) + return -EINVAL; + + add_wait_queue(&devp->hd_waitqueue, &wait); + + do { + __set_current_state(TASK_INTERRUPTIBLE); + + spin_lock_irq(&hpet_lock); + data = devp->hd_irqdata; + devp->hd_irqdata = 0; + spin_unlock_irq(&hpet_lock); + + if (data) + break; + else if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto out; + } else if (signal_pending(current)) { + retval = -ERESTARTSYS; + goto out; + } + + schedule(); + + } while (1); + + retval = put_user(data, (unsigned long *)buf); + if (!retval) + retval = sizeof(unsigned long); + out: + current->state = TASK_RUNNING; + remove_wait_queue(&devp->hd_waitqueue, &wait); + + return retval; +} + +static unsigned int hpet_poll(struct file *file, poll_table * wait) +{ + unsigned long v; + struct hpet_dev *devp; + + devp = file->private_data; + + if (!devp->hd_ireqfreq) + return 0; + + poll_wait(file, &devp->hd_waitqueue, wait); + + spin_lock_irq(&hpet_lock); + v = devp->hd_irqdata; + spin_unlock_irq(&hpet_lock); + + if (v != 0) + return POLLIN | POLLRDNORM; + + return 0; +} + +static int hpet_mmap(struct file *file, struct vm_area_struct *vma) +{ +#ifdef CONFIG_HPET_NOMMAP + return -ENOSYS; +#else + struct hpet_dev *devp; + unsigned long addr; + + if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) + return -EINVAL; + + if (vma->vm_flags & VM_WRITE) + return -EPERM; + + devp = file->private_data; + addr = (unsigned long)devp->hd_hpet; + + if (addr & (PAGE_SIZE - 1)) + return -ENOSYS; + + vma->vm_flags |= VM_IO; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + addr = __pa(addr); + + if (remap_page_range + (vma, vma->vm_start, addr, PAGE_SIZE, vma->vm_page_prot)) { + printk(KERN_ERR "remap_page_range failed in hpet.c\n"); + return -EAGAIN; + } + + return 0; +#endif +} + +static int hpet_fasync(int fd, struct file *file, int on) +{ + struct hpet_dev *devp; + + devp = file->private_data; + + if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0) + return 0; + else + return -EIO; +} + +static int hpet_release(struct inode *inode, struct file *file) +{ + struct hpet_dev *devp; + struct hpet_timer *timer; + int irq = 0; + + devp = file->private_data; + timer = devp->hd_timer; + + spin_lock_irq(&hpet_lock); + + writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), + &timer->hpet_config); + + irq = devp->hd_irq; + devp->hd_irq = 0; + + devp->hd_ireqfreq = 0; + + if (devp->hd_flags & HPET_PERIODIC + && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { + unsigned long v; + + v = readq(&timer->hpet_config); + v ^= Tn_TYPE_CNF_MASK; + writeq(v, &timer->hpet_config); + } + + devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC); + spin_unlock_irq(&hpet_lock); + + if (irq) + free_irq(irq, devp); + + if (file->f_flags & FASYNC) + hpet_fasync(-1, file, 0); + + file->private_data = 0; + return 0; +} + +static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); + +static int +hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct hpet_dev *devp; + + devp = file->private_data; + return hpet_ioctl_common(devp, cmd, arg, 0); +} + +static int hpet_ioctl_ieon(struct hpet_dev *devp) +{ + struct hpet_timer *timer; + struct hpet *hpet; + struct hpets *hpetp; + int irq; + unsigned long g, v, t, m; + unsigned long flags, isr; + + timer = devp->hd_timer; + hpet = devp->hd_hpet; + hpetp = devp->hd_hpets; + + v = readq(&timer->hpet_config); + spin_lock_irq(&hpet_lock); + + if (devp->hd_flags & HPET_IE) { + spin_unlock_irq(&hpet_lock); + return -EBUSY; + } + + devp->hd_flags |= HPET_IE; + spin_unlock_irq(&hpet_lock); + + t = readq(&timer->hpet_config); + irq = devp->hd_hdwirq; + + if (irq) { + char name[7]; + + sprintf(name, "hpet%d", (int)(devp - hpetp->hp_dev)); + + if (request_irq + (irq, hpet_interrupt, SA_INTERRUPT, name, (void *)devp)) { + printk(KERN_ERR "hpet: IRQ %d is not free\n", irq); + irq = 0; + } + } + + if (irq == 0) { + spin_lock_irq(&hpet_lock); + devp->hd_flags ^= HPET_IE; + spin_unlock_irq(&hpet_lock); + return -EIO; + } + + devp->hd_irq = irq; + t = devp->hd_ireqfreq; + v = readq(&timer->hpet_config); + g = v | Tn_INT_ENB_CNF_MASK; + + if (devp->hd_flags & HPET_PERIODIC) { + write_counter(t, &timer->hpet_compare); + g |= Tn_TYPE_CNF_MASK; + v |= Tn_TYPE_CNF_MASK; + writeq(v, &timer->hpet_config); + v |= Tn_VAL_SET_CNF_MASK; + writeq(v, &timer->hpet_config); + local_irq_save(flags); + m = read_counter(&hpet->hpet_mc); + write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); + } else { + local_irq_save(flags); + m = read_counter(&hpet->hpet_mc); + write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); + } + + isr = (1 << (devp - hpets->hp_dev)); + writeq(isr, &hpet->hpet_isr); + writeq(g, &timer->hpet_config); + local_irq_restore(flags); + + return 0; +} + +static inline unsigned long hpet_time_div(unsigned long dis) +{ + unsigned long long m = 1000000000000000ULL; + + do_div(m, dis); + + return (unsigned long)m; +} + +static int +hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) +{ + struct hpet_timer *timer; + struct hpet *hpet; + struct hpets *hpetp; + int err; + unsigned long v; + + switch (cmd) { + case HPET_IE_OFF: + case HPET_INFO: + case HPET_EPI: + case HPET_DPI: + case HPET_IRQFREQ: + timer = devp->hd_timer; + hpet = devp->hd_hpet; + hpetp = devp->hd_hpets; + break; + case HPET_IE_ON: + return hpet_ioctl_ieon(devp); + default: + return -EINVAL; + } + + err = 0; + + switch (cmd) { + case HPET_IE_OFF: + if ((devp->hd_flags & HPET_IE) == 0) + break; + v = readq(&timer->hpet_config); + v &= ~Tn_INT_ENB_CNF_MASK; + writeq(v, &timer->hpet_config); + if (devp->hd_irq) { + free_irq(devp->hd_irq, devp); + devp->hd_irq = 0; + } + devp->hd_flags ^= HPET_IE; + break; + case HPET_INFO: + { + struct hpet_info info; + + info.hi_ireqfreq = hpet_time_div(hpetp->hp_period * + devp->hd_ireqfreq); + info.hi_flags = + readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; + info.hi_hpet = devp->hd_hpets->hp_which; + info.hi_timer = devp - devp->hd_hpets->hp_dev; + if (copy_to_user((void *)arg, &info, sizeof(info))) + err = -EFAULT; + break; + } + case HPET_EPI: + v = readq(&timer->hpet_config); + if ((v & Tn_PER_INT_CAP_MASK) == 0) { + err = -ENXIO; + break; + } + devp->hd_flags |= HPET_PERIODIC; + break; + case HPET_DPI: + v = readq(&timer->hpet_config); + if ((v & Tn_PER_INT_CAP_MASK) == 0) { + err = -ENXIO; + break; + } + if (devp->hd_flags & HPET_PERIODIC && + readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) { + v = readq(&timer->hpet_config); + v ^= Tn_TYPE_CNF_MASK; + writeq(v, &timer->hpet_config); + } + devp->hd_flags &= ~HPET_PERIODIC; + break; + case HPET_IRQFREQ: + if (!kernel && (arg > hpet_max_freq) && + !capable(CAP_SYS_RESOURCE)) { + err = -EACCES; + break; + } + + if (arg & (arg - 1)) { + err = -EINVAL; + break; + } + + devp->hd_ireqfreq = hpet_time_div(hpetp->hp_period * arg); + } + + return err; +} + +static struct file_operations hpet_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = hpet_read, + .poll = hpet_poll, + .ioctl = hpet_ioctl, + .open = hpet_open, + .release = hpet_release, + .fasync = hpet_fasync, + .mmap = hpet_mmap, +}; + +EXPORT_SYMBOL(hpet_alloc); +EXPORT_SYMBOL(hpet_register); +EXPORT_SYMBOL(hpet_unregister); +EXPORT_SYMBOL(hpet_control); + +int hpet_register(struct hpet_task *tp, int periodic) +{ + unsigned int i; + u64 mask; + struct hpet_timer *timer; + struct hpet_dev *devp; + struct hpets *hpetp; + + switch (periodic) { + case 1: + mask = Tn_PER_INT_CAP_MASK; + break; + case 0: + mask = 0; + break; + default: + return -EINVAL; + } + + spin_lock_irq(&hpet_task_lock); + spin_lock(&hpet_lock); + + for (devp = 0, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) + for (timer = hpetp->hp_hpet->hpet_timers, i = 0; + i < hpetp->hp_ntimer; i++, timer++) { + if ((readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK) + != mask) + continue; + + devp = &hpetp->hp_dev[i]; + + if (devp->hd_flags & HPET_OPEN || devp->hd_task) { + devp = 0; + continue; + } + + tp->ht_opaque = devp; + devp->hd_task = tp; + break; + } + + spin_unlock(&hpet_lock); + spin_unlock_irq(&hpet_task_lock); + + if (tp->ht_opaque) + return 0; + else + return -EBUSY; +} + +static inline int hpet_tpcheck(struct hpet_task *tp) +{ + struct hpet_dev *devp; + struct hpets *hpetp; + + devp = tp->ht_opaque; + + if (!devp) + return -ENXIO; + + for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) + if (devp >= hpetp->hp_dev + && devp < (hpetp->hp_dev + hpetp->hp_ntimer) + && devp->hd_hpet == hpetp->hp_hpet) + return 0; + + return -ENXIO; +} + +int hpet_unregister(struct hpet_task *tp) +{ + struct hpet_dev *devp; + struct hpet_timer *timer; + int err; + + if ((err = hpet_tpcheck(tp))) + return err; + + spin_lock_irq(&hpet_task_lock); + spin_lock(&hpet_lock); + + devp = tp->ht_opaque; + if (devp->hd_task != tp) { + spin_unlock(&hpet_lock); + spin_unlock_irq(&hpet_task_lock); + return -ENXIO; + } + + timer = devp->hd_timer; + writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), + &timer->hpet_config); + devp->hd_flags &= ~(HPET_IE | HPET_PERIODIC); + devp->hd_task = 0; + spin_unlock(&hpet_lock); + spin_unlock_irq(&hpet_task_lock); + + return 0; +} + +int hpet_control(struct hpet_task *tp, unsigned int cmd, unsigned long arg) +{ + struct hpet_dev *devp; + int err; + + if ((err = hpet_tpcheck(tp))) + return err; + + spin_lock_irq(&hpet_lock); + devp = tp->ht_opaque; + if (devp->hd_task != tp) { + spin_unlock_irq(&hpet_lock); + return -ENXIO; + } + spin_unlock_irq(&hpet_lock); + return hpet_ioctl_common(devp, cmd, arg, 1); +} + +#ifdef CONFIG_TIME_INTERPOLATION + +static unsigned long hpet_offset, last_wall_hpet; +static long hpet_nsecs_per_cycle, hpet_cycles_per_sec; + +static unsigned long hpet_getoffset(void) +{ + return hpet_offset + (read_counter(&hpets->hp_hpet->hpet_mc) - + last_wall_hpet) * hpet_nsecs_per_cycle; +} + +static void hpet_update(long delta) +{ + unsigned long mc; + unsigned long offset; + + mc = read_counter(&hpets->hp_hpet->hpet_mc); + offset = hpet_offset + (mc - last_wall_hpet) * hpet_nsecs_per_cycle; + + if (delta < 0 || (unsigned long)delta < offset) + hpet_offset = offset - delta; + else + hpet_offset = 0; + last_wall_hpet = mc; +} + +static void hpet_reset(void) +{ + hpet_offset = 0; + last_wall_hpet = read_counter(&hpets->hp_hpet->hpet_mc); +} + +static struct time_interpolator hpet_interpolator = { + .get_offset = hpet_getoffset, + .update = hpet_update, + .reset = hpet_reset +}; + +#endif + +static ctl_table hpet_table[] = { + { + .ctl_name = 1, + .procname = "max-user-freq", + .data = &hpet_max_freq, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, + {.ctl_name = 0} +}; + +static ctl_table hpet_root[] = { + { + .ctl_name = 1, + .procname = "hpet", + .maxlen = 0, + .mode = 0555, + .child = hpet_table, + }, + {.ctl_name = 0} +}; + +static ctl_table dev_root[] = { + { + .ctl_name = CTL_DEV, + .procname = "dev", + .maxlen = 0, + .mode = 0555, + .child = hpet_root, + }, + {.ctl_name = 0} +}; + +static struct ctl_table_header *sysctl_header; + +static void *hpet_start(struct seq_file *s, loff_t * pos) +{ + struct hpets *hpetp; + loff_t n; + + for (n = *pos, hpetp = hpets; hpetp; hpetp = hpetp->hp_next) + if (!n--) + return hpetp; + + return 0; +} + +static void *hpet_next(struct seq_file *s, void *v, loff_t * pos) +{ + struct hpets *hpetp; + + hpetp = v; + ++*pos; + return hpetp->hp_next; +} + +static void hpet_stop(struct seq_file *s, void *v) +{ + return; +} + +static int hpet_show(struct seq_file *s, void *v) +{ + struct hpets *hpetp; + struct hpet *hpet; + u64 cap, vendor, period; + + hpetp = v; + hpet = hpetp->hp_hpet; + + cap = readq(&hpet->hpet_cap); + period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> + HPET_COUNTER_CLK_PERIOD_SHIFT; + vendor = (cap & HPET_VENDOR_ID_MASK) >> HPET_VENDOR_ID_SHIFT; + + seq_printf(s, + "HPET%d period = %d 10**-15 vendor = 0x%x number timer = %d\n", + hpetp->hp_which, (u32) period, (u32) vendor, + hpetp->hp_ntimer); + + return 0; +} + +static struct seq_operations hpet_seq_ops = { + .start = hpet_start, + .next = hpet_next, + .stop = hpet_stop, + .show = hpet_show +}; + +static int hpet_proc_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &hpet_seq_ops); +} + +static struct file_operations hpet_proc_fops = { + .open = hpet_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release +}; + +/* + * Adjustment for when arming the timer with + * initial conditions. That is, main counter + * ticks expired before interrupts are enabled. + */ +#define TICK_CALIBRATE (1000UL) + +static unsigned long __init hpet_calibrate(struct hpets *hpetp) +{ + struct hpet_timer *timer; + unsigned long t, m, count, i, flags, start; + struct hpet_dev *devp; + int j; + struct hpet *hpet; + + for (timer = 0, j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; + j++, devp++) + if ((devp->hd_flags & HPET_OPEN) == 0) { + timer = devp->hd_timer; + break; + } + + if (!timer) + return 0; + + hpet = hpets->hp_hpet; + t = read_counter(&timer->hpet_compare); + + i = 0; + count = hpet_time_div(hpetp->hp_period * TICK_CALIBRATE); + + local_irq_save(flags); + + start = read_counter(&hpet->hpet_mc); + + do { + m = read_counter(&hpet->hpet_mc); + write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); + } while (i++, (m - start) < count); + + local_irq_restore(flags); + + return (m - start) / i; +} + +int __init hpet_alloc(struct hpet_data *hdp) +{ + u64 cap, mcfg; + struct hpet_dev *devp; + u32 i, ntimer; + struct hpets *hpetp; + size_t siz; + struct hpet *hpet; + static struct hpets *last __initdata = (struct hpets *)0; + + /* + * hpet_alloc can be called by platform dependent code. + * if platform dependent code has allocated the hpet + * ACPI also reports hpet, then we catch it here. + */ + for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) + if (hpetp->hp_hpet == (struct hpet *)(hdp->hd_address)) + return 0; + + siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) * + sizeof(struct hpet_dev)); + + hpetp = kmalloc(siz, GFP_KERNEL); + + if (!hpetp) + return -ENOMEM; + + memset(hpetp, 0, siz); + + hpetp->hp_which = hpet_nhpet++; + hpetp->hp_hpet = (struct hpet *)hdp->hd_address; + + hpetp->hp_ntimer = hdp->hd_nirqs; + + for (i = 0; i < hdp->hd_nirqs; i++) + hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; + + hpet = hpetp->hp_hpet; + + cap = readq(&hpet->hpet_cap); + + ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1; + + if (hpetp->hp_ntimer != ntimer) { + printk(KERN_WARNING "hpet: number irqs doesn't agree" + " with number of timers\n"); + kfree(hpetp); + return -ENODEV; + } + + if (last) + last->hp_next = hpetp; + else + hpets = hpetp; + + last = hpetp; + + hpetp->hp_period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> + HPET_COUNTER_CLK_PERIOD_SHIFT; + + mcfg = readq(&hpet->hpet_config); + if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { + write_counter(0L, &hpet->hpet_mc); + mcfg |= HPET_ENABLE_CNF_MASK; + writeq(mcfg, &hpet->hpet_config); + } + + for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; + i++, hpet_ntimer++, devp++) { + unsigned long v; + struct hpet_timer *timer; + + timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; + v = readq(&timer->hpet_config); + + devp->hd_hpets = hpetp; + devp->hd_hpet = hpet; + devp->hd_timer = timer; + + /* + * If the timer was reserved by platform code, + * then make timer unavailable for opens. + */ + if (hdp->hd_state & (1 << i)) { + devp->hd_flags = HPET_OPEN; + continue; + } + + init_waitqueue_head(&devp->hd_waitqueue); + } + + hpetp->hp_delta = hpet_calibrate(hpetp); + + return 0; +} + +static acpi_status __init hpet_resources(struct acpi_resource *res, void *data) +{ + struct hpet_data *hdp; + acpi_status status; + struct acpi_resource_address64 addr; + struct hpets *hpetp; + + hdp = data; + + status = acpi_resource_to_address64(res, &addr); + + if (ACPI_SUCCESS(status)) { + unsigned long size; + + size = addr.max_address_range - addr.min_address_range + 1; + hdp->hd_address = + (unsigned long)ioremap(addr.min_address_range, size); + + for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) + if (hpetp->hp_hpet == (struct hpet *)(hdp->hd_address)) + return -EBUSY; + } else if (res->id == ACPI_RSTYPE_EXT_IRQ) { + struct acpi_resource_ext_irq *irqp; + int i; + + irqp = &res->data.extended_irq; + + if (irqp->number_of_interrupts > 0) { + hdp->hd_nirqs = irqp->number_of_interrupts; + + for (i = 0; i < hdp->hd_nirqs; i++) +#ifdef CONFIG_IA64 + hdp->hd_irq[i] = + acpi_register_gsi(irqp->interrupts[i], + irqp->edge_level, + irqp->active_high_low); +#else + hdp->hd_irq[i] = irqp->interrupts[i]; +#endif + } + } + + return AE_OK; +} + +static int __init hpet_acpi_add(struct acpi_device *device) +{ + acpi_status result; + struct hpet_data data; + + memset(&data, 0, sizeof(data)); + + result = + acpi_walk_resources(device->handle, METHOD_NAME__CRS, + hpet_resources, &data); + + if (ACPI_FAILURE(result)) + return -ENODEV; + + if (!data.hd_address || !data.hd_nirqs) { + printk("%s: no address or irqs in _CRS\n", __FUNCTION__); + return -ENODEV; + } + + return hpet_alloc(&data); +} + +static int __init hpet_acpi_remove(struct acpi_device *device, int type) +{ + return 0; +} + +static struct acpi_driver hpet_acpi_driver __initdata = { + .name = "hpet", + .class = "", + .ids = "PNP0103", + .ops = { + .add = hpet_acpi_add, + .remove = hpet_acpi_remove, + }, +}; + +static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; + +static int __init hpet_init(void) +{ + struct proc_dir_entry *entry; + + (void)acpi_bus_register_driver(&hpet_acpi_driver); + + if (hpets) { + if (misc_register(&hpet_misc)) + return -ENODEV; + + entry = create_proc_entry("driver/hpet", 0, 0); + + if (entry) + entry->proc_fops = &hpet_proc_fops; + + sysctl_header = register_sysctl_table(dev_root, 0); + +#ifdef CONFIG_TIME_INTERPOLATION + { + struct hpet *hpet; + + hpet = hpets->hp_hpet; + hpet_cycles_per_sec = hpet_time_div(hpets->hp_period); + hpet_interpolator.frequency = hpet_cycles_per_sec; + hpet_interpolator.drift = hpet_cycles_per_sec * + HPET_DRIFT / 1000000; + hpet_nsecs_per_cycle = 1000000000 / hpet_cycles_per_sec; + register_time_interpolator(&hpet_interpolator); + } +#endif + return 0; + } else + return -ENODEV; +} + +static void __exit hpet_exit(void) +{ + acpi_bus_unregister_driver(&hpet_acpi_driver); + + if (hpets) { + unregister_sysctl_table(sysctl_header); + remove_proc_entry("driver/hpet", NULL); + } + + return; +} + +module_init(hpet_init); +module_exit(hpet_exit); +MODULE_AUTHOR("Bob Picco "); +MODULE_LICENSE("GPL"); diff --git a/drivers/char/lcd.h b/drivers/char/lcd.h new file mode 100644 index 000000000..8aed49850 --- /dev/null +++ b/drivers/char/lcd.h @@ -0,0 +1,184 @@ +/* + * LED, LCD and Button panel driver for Cobalt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 1996, 1997 by Andrew Bose + * + * Linux kernel version history: + * March 2001: Ported from 2.0.34 by Liam Davies + * + */ + +// function headers + +static int dqpoll(volatile unsigned long, volatile unsigned char ); +static int timeout(volatile unsigned long); + +#define LCD_CHARS_PER_LINE 40 +#define FLASH_SIZE 524288 +#define MAX_IDLE_TIME 120 + +struct lcd_display { + unsigned long buttons; + int size1; + int size2; + unsigned char line1[LCD_CHARS_PER_LINE]; + unsigned char line2[LCD_CHARS_PER_LINE]; + unsigned char cursor_address; + unsigned char character; + unsigned char leds; + unsigned char *RomImage; +}; + + + +#define LCD_DRIVER "Cobalt LCD Driver v2.10" + +#define kLCD_IR 0x0F000000 +#define kLCD_DR 0x0F000010 +#define kGPI 0x0D000000 +#define kLED 0x0C000000 + +#define kDD_R00 0x00 +#define kDD_R01 0x27 +#define kDD_R10 0x40 +#define kDD_R11 0x67 + +#define kLCD_Addr 0x00000080 + +#define LCDTimeoutValue 0xfff + + +// Flash definitions AMD 29F040 +#define kFlashBase 0x0FC00000 + +#define kFlash_Addr1 0x5555 +#define kFlash_Addr2 0x2AAA +#define kFlash_Data1 0xAA +#define kFlash_Data2 0x55 +#define kFlash_Prog 0xA0 +#define kFlash_Erase3 0x80 +#define kFlash_Erase6 0x10 +#define kFlash_Read 0xF0 + +#define kFlash_ID 0x90 +#define kFlash_VenAddr 0x00 +#define kFlash_DevAddr 0x01 +#define kFlash_VenID 0x01 +#define kFlash_DevID 0xA4 // 29F040 +//#define kFlash_DevID 0xAD // 29F016 + + +// Macros + +#define LCDWriteData(x) outl((x << 24), kLCD_DR) +#define LCDWriteInst(x) outl((x << 24), kLCD_IR) + +#define LCDReadData (inl(kLCD_DR) >> 24) +#define LCDReadInst (inl(kLCD_IR) >> 24) + +#define GPIRead (inl(kGPI) >> 24) + +#define LEDSet(x) outb((char)x, kLED) + +#define WRITE_GAL(x,y) outl(y, 0x04000000 | (x)) +#define BusyCheck() while ((LCDReadInst & 0x80) == 0x80) + +#define WRITE_FLASH(x,y) outb((char)y, kFlashBase | (x)) +#define READ_FLASH(x) (inb(kFlashBase | (x))) + + + +/* + * Function command codes for io_ctl. + */ +#define LCD_On 1 +#define LCD_Off 2 +#define LCD_Clear 3 +#define LCD_Reset 4 +#define LCD_Cursor_Left 5 +#define LCD_Cursor_Right 6 +#define LCD_Disp_Left 7 +#define LCD_Disp_Right 8 +#define LCD_Get_Cursor 9 +#define LCD_Set_Cursor 10 +#define LCD_Home 11 +#define LCD_Read 12 +#define LCD_Write 13 +#define LCD_Cursor_Off 14 +#define LCD_Cursor_On 15 +#define LCD_Get_Cursor_Pos 16 +#define LCD_Set_Cursor_Pos 17 +#define LCD_Blink_Off 18 + +#define LED_Set 40 +#define LED_Bit_Set 41 +#define LED_Bit_Clear 42 + + +// Button defs +#define BUTTON_Read 50 + +// Flash command codes +#define FLASH_Erase 60 +#define FLASH_Burn 61 +#define FLASH_Read 62 + + +// Ethernet LINK check hackaroo +#define LINK_Check 90 +#define LINK_Check_2 91 + +// Button patterns _B - single layer lcd boards + +#define BUTTON_NONE 0x3F +#define BUTTON_NONE_B 0xFE + +#define BUTTON_Left 0x3B +#define BUTTON_Left_B 0xFA + +#define BUTTON_Right 0x37 +#define BUTTON_Right_B 0xDE + +#define BUTTON_Up 0x2F +#define BUTTON_Up_B 0xF6 + +#define BUTTON_Down 0x1F +#define BUTTON_Down_B 0xEE + +#define BUTTON_Next 0x3D +#define BUTTON_Next_B 0x7E + +#define BUTTON_Enter 0x3E +#define BUTTON_Enter_B 0xBE + +#define BUTTON_Reset_B 0xFC + + +// debounce constants + +#define BUTTON_SENSE 160000 +#define BUTTON_DEBOUNCE 5000 + + +// Galileo register stuff + +#define kGal_DevBank2Cfg 0x1466DB33 +#define kGal_DevBank2PReg 0x464 +#define kGal_DevBank3Cfg 0x146FDFFB +#define kGal_DevBank3PReg 0x468 + +// Network + +#define kIPADDR 1 +#define kNETMASK 2 +#define kGATEWAY 3 +#define kDNS 4 + +#define kClassA 5 +#define kClassB 6 +#define kClassC 7 + diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c new file mode 100644 index 000000000..61457c505 --- /dev/null +++ b/drivers/firmware/pcdp.c @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2002, 2003, 2004 Hewlett-Packard Co. + * Khalid Aziz + * Alex Williamson + * Bjorn Helgaas + * + * Parse the EFI PCDP table to locate the console device. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "pcdp.h" + +static inline int +uart_irq_supported(int rev, struct pcdp_uart *uart) +{ + if (rev < 3) + return uart->pci_func & PCDP_UART_IRQ; + return uart->flags & PCDP_UART_IRQ; +} + +static inline int +uart_pci(int rev, struct pcdp_uart *uart) +{ + if (rev < 3) + return uart->pci_func & PCDP_UART_PCI; + return uart->flags & PCDP_UART_PCI; +} + +static inline int +uart_active_high_low(int rev, struct pcdp_uart *uart) +{ + if (uart_pci(rev, uart) || uart->flags & PCDP_UART_ACTIVE_LOW) + return ACPI_ACTIVE_LOW; + return ACPI_ACTIVE_HIGH; +} + +static inline int +uart_edge_level(int rev, struct pcdp_uart *uart) +{ + if (uart_pci(rev, uart)) + return ACPI_LEVEL_SENSITIVE; + if (rev < 3 || uart->flags & PCDP_UART_EDGE_SENSITIVE) + return ACPI_EDGE_SENSITIVE; + return ACPI_LEVEL_SENSITIVE; +} + +static void __init +setup_serial_console(int rev, struct pcdp_uart *uart) +{ +#ifdef CONFIG_SERIAL_8250_CONSOLE + struct uart_port port; + static char options[16]; + int mapsize = 64; + + memset(&port, 0, sizeof(port)); + port.uartclk = uart->clock_rate; + if (!port.uartclk) /* some FW doesn't supply this */ + port.uartclk = BASE_BAUD * 16; + + if (uart->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + port.mapbase = uart->addr.address; + port.membase = ioremap(port.mapbase, mapsize); + if (!port.membase) { + printk(KERN_ERR "%s: couldn't ioremap 0x%lx-0x%lx\n", + __FUNCTION__, port.mapbase, port.mapbase + mapsize); + return; + } + port.iotype = UPIO_MEM; + } else if (uart->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { + port.iobase = uart->addr.address; + port.iotype = UPIO_PORT; + } else + return; + + switch (uart->pci_prog_intfc) { + case 0x0: port.type = PORT_8250; break; + case 0x1: port.type = PORT_16450; break; + case 0x2: port.type = PORT_16550; break; + case 0x3: port.type = PORT_16650; break; + case 0x4: port.type = PORT_16750; break; + case 0x5: port.type = PORT_16850; break; + case 0x6: port.type = PORT_16C950; break; + default: port.type = PORT_UNKNOWN; break; + } + + port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; + + if (uart_irq_supported(rev, uart)) { + port.irq = acpi_register_gsi(uart->gsi, + uart_active_high_low(rev, uart), + uart_edge_level(rev, uart)); + port.flags |= UPF_AUTO_IRQ; /* some FW reported wrong GSI */ + if (uart_pci(rev, uart)) + port.flags |= UPF_SHARE_IRQ; + } + + if (early_serial_setup(&port) < 0) + return; + + snprintf(options, sizeof(options), "%lun%d", uart->baud, + uart->bits ? uart->bits : 8); + add_preferred_console("ttyS", port.line, options); + + printk(KERN_INFO "PCDP: serial console at %s 0x%lx (ttyS%d, options %s)\n", + port.iotype == UPIO_MEM ? "MMIO" : "I/O", + uart->addr.address, port.line, options); +#endif +} + +static void __init +setup_vga_console(struct pcdp_vga *vga) +{ +#ifdef CONFIG_VT +#ifdef CONFIG_VGA_CONSOLE + if (efi_mem_type(0xA0000) == EFI_CONVENTIONAL_MEMORY) { + printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n"); + return; + } + + conswitchp = &vga_con; + printk(KERN_INFO "PCDP: VGA console\n"); +#endif +#endif +} + +void __init +efi_setup_pcdp_console(char *cmdline) +{ + struct pcdp *pcdp; + struct pcdp_uart *uart; + struct pcdp_device *dev, *end; + int i, serial = 0; + + pcdp = efi.hcdp; + if (!pcdp) + return; + + printk(KERN_INFO "PCDP: v%d at 0x%p\n", pcdp->rev, pcdp); + + if (pcdp->rev < 3) { + if (strstr(cmdline, "console=ttyS0") || efi_uart_console_only()) + serial = 1; + } + + for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { + if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) { + if (uart->type == PCDP_CONSOLE_UART) { + setup_serial_console(pcdp->rev, uart); + return; + } + } + } + + end = (struct pcdp_device *) ((u8 *) pcdp + pcdp->length); + for (dev = (struct pcdp_device *) (pcdp->uart + pcdp->num_uarts); + dev < end; + dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) { + if (dev->flags & PCDP_PRIMARY_CONSOLE) { + if (dev->type == PCDP_CONSOLE_VGA) { + setup_vga_console((struct pcdp_vga *) dev); + return; + } + } + } +} + +#ifdef CONFIG_IA64_EARLY_PRINTK_UART +unsigned long +hcdp_early_uart (void) +{ + efi_system_table_t *systab; + efi_config_table_t *config_tables; + unsigned long addr = 0; + struct pcdp *pcdp = 0; + struct pcdp_uart *uart; + int i; + + systab = (efi_system_table_t *) ia64_boot_param->efi_systab; + if (!systab) + return 0; + systab = __va(systab); + + config_tables = (efi_config_table_t *) systab->tables; + if (!config_tables) + return 0; + config_tables = __va(config_tables); + + for (i = 0; i < systab->nr_tables; i++) { + if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { + pcdp = (struct pcdp *) config_tables[i].table; + break; + } + } + if (!pcdp) + return 0; + pcdp = __va(pcdp); + + for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { + if (uart->type == PCDP_CONSOLE_UART) { + addr = uart->addr.address; + break; + } + } + return addr; +} +#endif /* CONFIG_IA64_EARLY_PRINTK_UART */ diff --git a/drivers/firmware/pcdp.h b/drivers/firmware/pcdp.h new file mode 100644 index 000000000..4217c3b1c --- /dev/null +++ b/drivers/firmware/pcdp.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2002, 2004 Hewlett-Packard Co. + * Khalid Aziz + * Bjorn Helgaas + * + * Definitions for PCDP-defined console devices + * + * v1.0a: http://www.dig64.org/specifications/DIG64_HCDPv10a_01.pdf + * v2.0: http://www.dig64.org/specifications/DIG64_HCDPv20_042804.pdf + */ + +#define PCDP_CONSOLE 0 +#define PCDP_DEBUG 1 +#define PCDP_CONSOLE_OUTPUT 2 +#define PCDP_CONSOLE_INPUT 3 + +#define PCDP_UART (0 << 3) +#define PCDP_VGA (1 << 3) +#define PCDP_USB (2 << 3) + +/* pcdp_uart.type and pcdp_device.type */ +#define PCDP_CONSOLE_UART (PCDP_UART | PCDP_CONSOLE) +#define PCDP_DEBUG_UART (PCDP_UART | PCDP_DEBUG) +#define PCDP_CONSOLE_VGA (PCDP_VGA | PCDP_CONSOLE_OUTPUT) +#define PCDP_CONSOLE_USB (PCDP_USB | PCDP_CONSOLE_INPUT) + +/* pcdp_uart.flags */ +#define PCDP_UART_EDGE_SENSITIVE (1 << 0) +#define PCDP_UART_ACTIVE_LOW (1 << 1) +#define PCDP_UART_PRIMARY_CONSOLE (1 << 2) +#define PCDP_UART_IRQ (1 << 6) /* in pci_func for rev < 3 */ +#define PCDP_UART_PCI (1 << 7) /* in pci_func for rev < 3 */ + +struct pcdp_uart { + u8 type; + u8 bits; + u8 parity; + u8 stop_bits; + u8 pci_seg; + u8 pci_bus; + u8 pci_dev; + u8 pci_func; + u64 baud; + struct acpi_generic_address addr; + u16 pci_dev_id; + u16 pci_vendor_id; + u32 gsi; + u32 clock_rate; + u8 pci_prog_intfc; + u8 flags; +}; + +struct pcdp_vga { + u8 count; /* address space descriptors */ +}; + +/* pcdp_device.flags */ +#define PCDP_PRIMARY_CONSOLE 1 + +struct pcdp_device { + u8 type; + u8 flags; + u16 length; + u16 efi_index; +}; + +struct pcdp { + u8 signature[4]; + u32 length; + u8 rev; /* PCDP v2.0 is rev 3 */ + u8 chksum; + u8 oemid[6]; + u8 oem_tabid[8]; + u32 oem_rev; + u8 creator_id[4]; + u32 creator_rev; + u32 num_uarts; + struct pcdp_uart uart[0]; /* actual size is num_uarts */ + /* remainder of table is pcdp_device structures */ +}; diff --git a/drivers/md/dm-io.h b/drivers/md/dm-io.h new file mode 100644 index 000000000..1a77f3265 --- /dev/null +++ b/drivers/md/dm-io.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2003 Sistina Software + * + * This file is released under the GPL. + */ + +#ifndef _DM_IO_H +#define _DM_IO_H + +#include "dm.h" + +/* FIXME make this configurable */ +#define DM_MAX_IO_REGIONS 8 + +struct io_region { + struct block_device *bdev; + sector_t sector; + sector_t count; +}; + +struct page_list { + struct page_list *next; + struct page *page; +}; + + +/* + * 'error' is a bitset, with each bit indicating whether an error + * occurred doing io to the corresponding region. + */ +typedef void (*io_notify_fn)(unsigned long error, void *context); + + +/* + * Before anyone uses the IO interface they should call + * dm_io_get(), specifying roughly how many pages they are + * expecting to perform io on concurrently. + * + * This function may block. + */ +int dm_io_get(unsigned int num_pages); +void dm_io_put(unsigned int num_pages); + +/* + * Synchronous IO. + * + * Please ensure that the rw flag in the next two functions is + * either READ or WRITE, ie. we don't take READA. Any + * regions with a zero count field will be ignored. + */ +int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, + struct page_list *pl, unsigned int offset, + unsigned long *error_bits); + +int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, + struct bio_vec *bvec, unsigned long *error_bits); + +int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, + void *data, unsigned long *error_bits); + +/* + * Aynchronous IO. + * + * The 'where' array may be safely allocated on the stack since + * the function takes a copy. + */ +int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, + struct page_list *pl, unsigned int offset, + io_notify_fn fn, void *context); + +int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, + struct bio_vec *bvec, io_notify_fn fn, void *context); + +int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, + void *data, io_notify_fn fn, void *context); + +#endif diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c new file mode 100644 index 000000000..843e9b83d --- /dev/null +++ b/drivers/md/dm-raid1.c @@ -0,0 +1,1278 @@ +/* + * Copyright (C) 2003 Sistina Software Limited. + * + * This file is released under the GPL. + */ + +#include "dm.h" +#include "dm-bio-list.h" +#include "dm-io.h" +#include "dm-log.h" +#include "kcopyd.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct workqueue_struct *_kmirrord_wq; +static struct work_struct _kmirrord_work; + +static inline void wake(void) +{ + queue_work(_kmirrord_wq, &_kmirrord_work); +} + +/*----------------------------------------------------------------- + * Region hash + * + * The mirror splits itself up into discrete regions. Each + * region can be in one of three states: clean, dirty, + * nosync. There is no need to put clean regions in the hash. + * + * In addition to being present in the hash table a region _may_ + * be present on one of three lists. + * + * clean_regions: Regions on this list have no io pending to + * them, they are in sync, we are no longer interested in them, + * they are dull. rh_update_states() will remove them from the + * hash table. + * + * quiesced_regions: These regions have been spun down, ready + * for recovery. rh_recovery_start() will remove regions from + * this list and hand them to kmirrord, which will schedule the + * recovery io with kcopyd. + * + * recovered_regions: Regions that kcopyd has successfully + * recovered. rh_update_states() will now schedule any delayed + * io, up the recovery_count, and remove the region from the + * hash. + * + * There are 2 locks: + * A rw spin lock 'hash_lock' protects just the hash table, + * this is never held in write mode from interrupt context, + * which I believe means that we only have to disable irqs when + * doing a write lock. + * + * An ordinary spin lock 'region_lock' that protects the three + * lists in the region_hash, with the 'state', 'list' and + * 'bhs_delayed' fields of the regions. This is used from irq + * context, so all other uses will have to suspend local irqs. + *---------------------------------------------------------------*/ +struct mirror_set; +struct region_hash { + struct mirror_set *ms; + sector_t region_size; + unsigned region_shift; + + /* holds persistent region state */ + struct dirty_log *log; + + /* hash table */ + rwlock_t hash_lock; + mempool_t *region_pool; + unsigned int mask; + unsigned int nr_buckets; + struct list_head *buckets; + + spinlock_t region_lock; + struct semaphore recovery_count; + struct list_head clean_regions; + struct list_head quiesced_regions; + struct list_head recovered_regions; +}; + +enum { + RH_CLEAN, + RH_DIRTY, + RH_NOSYNC, + RH_RECOVERING +}; + +struct region { + struct region_hash *rh; /* FIXME: can we get rid of this ? */ + region_t key; + int state; + + struct list_head hash_list; + struct list_head list; + + atomic_t pending; + struct bio_list delayed_bios; +}; + +/* + * Conversion fns + */ +static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) +{ + return bio->bi_sector >> rh->region_shift; +} + +static inline sector_t region_to_sector(struct region_hash *rh, region_t region) +{ + return region << rh->region_shift; +} + +/* FIXME move this */ +static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); + +static void *region_alloc(int gfp_mask, void *pool_data) +{ + return kmalloc(sizeof(struct region), gfp_mask); +} + +static void region_free(void *element, void *pool_data) +{ + kfree(element); +} + +#define MIN_REGIONS 64 +#define MAX_RECOVERY 1 +static int rh_init(struct region_hash *rh, struct mirror_set *ms, + struct dirty_log *log, sector_t region_size, + region_t nr_regions) +{ + unsigned int nr_buckets, max_buckets; + size_t i; + + /* + * Calculate a suitable number of buckets for our hash + * table. + */ + max_buckets = nr_regions >> 6; + for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) + ; + nr_buckets >>= 1; + + rh->ms = ms; + rh->log = log; + rh->region_size = region_size; + rh->region_shift = ffs(region_size) - 1; + rwlock_init(&rh->hash_lock); + rh->mask = nr_buckets - 1; + rh->nr_buckets = nr_buckets; + + rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); + if (!rh->buckets) { + DMERR("unable to allocate region hash memory"); + return -ENOMEM; + } + + for (i = 0; i < nr_buckets; i++) + INIT_LIST_HEAD(rh->buckets + i); + + spin_lock_init(&rh->region_lock); + sema_init(&rh->recovery_count, 0); + INIT_LIST_HEAD(&rh->clean_regions); + INIT_LIST_HEAD(&rh->quiesced_regions); + INIT_LIST_HEAD(&rh->recovered_regions); + + rh->region_pool = mempool_create(MIN_REGIONS, region_alloc, + region_free, NULL); + if (!rh->region_pool) { + vfree(rh->buckets); + rh->buckets = NULL; + return -ENOMEM; + } + + return 0; +} + +static void rh_exit(struct region_hash *rh) +{ + unsigned int h; + struct region *reg, *nreg; + + BUG_ON(!list_empty(&rh->quiesced_regions)); + for (h = 0; h < rh->nr_buckets; h++) { + list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { + BUG_ON(atomic_read(®->pending)); + mempool_free(reg, rh->region_pool); + } + } + + if (rh->log) + dm_destroy_dirty_log(rh->log); + if (rh->region_pool) + mempool_destroy(rh->region_pool); + vfree(rh->buckets); +} + +#define RH_HASH_MULT 2654435387U + +static inline unsigned int rh_hash(struct region_hash *rh, region_t region) +{ + return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask; +} + +static struct region *__rh_lookup(struct region_hash *rh, region_t region) +{ + struct region *reg; + + list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list) + if (reg->key == region) + return reg; + + return NULL; +} + +static void __rh_insert(struct region_hash *rh, struct region *reg) +{ + unsigned int h = rh_hash(rh, reg->key); + list_add(®->hash_list, rh->buckets + h); +} + +static struct region *__rh_alloc(struct region_hash *rh, region_t region) +{ + struct region *reg, *nreg; + + read_unlock(&rh->hash_lock); + nreg = mempool_alloc(rh->region_pool, GFP_NOIO); + nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? + RH_CLEAN : RH_NOSYNC; + nreg->rh = rh; + nreg->key = region; + + INIT_LIST_HEAD(&nreg->list); + + atomic_set(&nreg->pending, 0); + bio_list_init(&nreg->delayed_bios); + write_lock_irq(&rh->hash_lock); + + reg = __rh_lookup(rh, region); + if (reg) + /* we lost the race */ + mempool_free(nreg, rh->region_pool); + + else { + __rh_insert(rh, nreg); + if (nreg->state == RH_CLEAN) { + spin_lock_irq(&rh->region_lock); + list_add(&nreg->list, &rh->clean_regions); + spin_unlock_irq(&rh->region_lock); + } + reg = nreg; + } + write_unlock_irq(&rh->hash_lock); + read_lock(&rh->hash_lock); + + return reg; +} + +static inline struct region *__rh_find(struct region_hash *rh, region_t region) +{ + struct region *reg; + + reg = __rh_lookup(rh, region); + if (!reg) + reg = __rh_alloc(rh, region); + + return reg; +} + +static int rh_state(struct region_hash *rh, region_t region, int may_block) +{ + int r; + struct region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_lookup(rh, region); + read_unlock(&rh->hash_lock); + + if (reg) + return reg->state; + + /* + * The region wasn't in the hash, so we fall back to the + * dirty log. + */ + r = rh->log->type->in_sync(rh->log, region, may_block); + + /* + * Any error from the dirty log (eg. -EWOULDBLOCK) gets + * taken as a RH_NOSYNC + */ + return r == 1 ? RH_CLEAN : RH_NOSYNC; +} + +static inline int rh_in_sync(struct region_hash *rh, + region_t region, int may_block) +{ + int state = rh_state(rh, region, may_block); + return state == RH_CLEAN || state == RH_DIRTY; +} + +static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) +{ + struct bio *bio; + + while ((bio = bio_list_pop(bio_list))) { + queue_bio(ms, bio, WRITE); + } +} + +static void rh_update_states(struct region_hash *rh) +{ + struct region *reg, *next; + + LIST_HEAD(clean); + LIST_HEAD(recovered); + + /* + * Quickly grab the lists. + */ + write_lock_irq(&rh->hash_lock); + spin_lock(&rh->region_lock); + if (!list_empty(&rh->clean_regions)) { + list_splice(&rh->clean_regions, &clean); + INIT_LIST_HEAD(&rh->clean_regions); + + list_for_each_entry (reg, &clean, list) { + rh->log->type->clear_region(rh->log, reg->key); + list_del(®->hash_list); + } + } + + if (!list_empty(&rh->recovered_regions)) { + list_splice(&rh->recovered_regions, &recovered); + INIT_LIST_HEAD(&rh->recovered_regions); + + list_for_each_entry (reg, &recovered, list) + list_del(®->hash_list); + } + spin_unlock(&rh->region_lock); + write_unlock_irq(&rh->hash_lock); + + /* + * All the regions on the recovered and clean lists have + * now been pulled out of the system, so no need to do + * any more locking. + */ + list_for_each_entry_safe (reg, next, &recovered, list) { + rh->log->type->clear_region(rh->log, reg->key); + rh->log->type->complete_resync_work(rh->log, reg->key, 1); + dispatch_bios(rh->ms, ®->delayed_bios); + up(&rh->recovery_count); + mempool_free(reg, rh->region_pool); + } + + if (!list_empty(&recovered)) + rh->log->type->flush(rh->log); + + list_for_each_entry_safe (reg, next, &clean, list) + mempool_free(reg, rh->region_pool); +} + +static void rh_inc(struct region_hash *rh, region_t region) +{ + struct region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + if (reg->state == RH_CLEAN) { + rh->log->type->mark_region(rh->log, reg->key); + + spin_lock_irq(&rh->region_lock); + reg->state = RH_DIRTY; + list_del_init(®->list); /* take off the clean list */ + spin_unlock_irq(&rh->region_lock); + } + + atomic_inc(®->pending); + read_unlock(&rh->hash_lock); +} + +static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios) +{ + struct bio *bio; + + for (bio = bios->head; bio; bio = bio->bi_next) + rh_inc(rh, bio_to_region(rh, bio)); +} + +static void rh_dec(struct region_hash *rh, region_t region) +{ + unsigned long flags; + struct region *reg; + int should_wake = 0; + + read_lock(&rh->hash_lock); + reg = __rh_lookup(rh, region); + read_unlock(&rh->hash_lock); + + if (atomic_dec_and_test(®->pending)) { + spin_lock_irqsave(&rh->region_lock, flags); + if (reg->state == RH_RECOVERING) { + list_add_tail(®->list, &rh->quiesced_regions); + } else { + reg->state = RH_CLEAN; + list_add(®->list, &rh->clean_regions); + } + spin_unlock_irqrestore(&rh->region_lock, flags); + should_wake = 1; + } + + if (should_wake) + wake(); +} + +/* + * Starts quiescing a region in preparation for recovery. + */ +static int __rh_recovery_prepare(struct region_hash *rh) +{ + int r; + struct region *reg; + region_t region; + + /* + * Ask the dirty log what's next. + */ + r = rh->log->type->get_resync_work(rh->log, ®ion); + if (r <= 0) + return r; + + /* + * Get this region, and start it quiescing by setting the + * recovering flag. + */ + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + read_unlock(&rh->hash_lock); + + spin_lock_irq(&rh->region_lock); + reg->state = RH_RECOVERING; + + /* Already quiesced ? */ + if (atomic_read(®->pending)) + list_del_init(®->list); + + else { + list_del_init(®->list); + list_add(®->list, &rh->quiesced_regions); + } + spin_unlock_irq(&rh->region_lock); + + return 1; +} + +static void rh_recovery_prepare(struct region_hash *rh) +{ + while (!down_trylock(&rh->recovery_count)) + if (__rh_recovery_prepare(rh) <= 0) { + up(&rh->recovery_count); + break; + } +} + +/* + * Returns any quiesced regions. + */ +static struct region *rh_recovery_start(struct region_hash *rh) +{ + struct region *reg = NULL; + + spin_lock_irq(&rh->region_lock); + if (!list_empty(&rh->quiesced_regions)) { + reg = list_entry(rh->quiesced_regions.next, + struct region, list); + list_del_init(®->list); /* remove from the quiesced list */ + } + spin_unlock_irq(&rh->region_lock); + + return reg; +} + +/* FIXME: success ignored for now */ +static void rh_recovery_end(struct region *reg, int success) +{ + struct region_hash *rh = reg->rh; + + spin_lock_irq(&rh->region_lock); + list_add(®->list, ®->rh->recovered_regions); + spin_unlock_irq(&rh->region_lock); + + wake(); +} + +static void rh_flush(struct region_hash *rh) +{ + rh->log->type->flush(rh->log); +} + +static void rh_delay(struct region_hash *rh, struct bio *bio) +{ + struct region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, bio_to_region(rh, bio)); + bio_list_add(®->delayed_bios, bio); + read_unlock(&rh->hash_lock); +} + +static void rh_stop_recovery(struct region_hash *rh) +{ + int i; + + /* wait for any recovering regions */ + for (i = 0; i < MAX_RECOVERY; i++) + down(&rh->recovery_count); +} + +static void rh_start_recovery(struct region_hash *rh) +{ + int i; + + for (i = 0; i < MAX_RECOVERY; i++) + up(&rh->recovery_count); + + wake(); +} + +/*----------------------------------------------------------------- + * Mirror set structures. + *---------------------------------------------------------------*/ +struct mirror { + atomic_t error_count; + struct dm_dev *dev; + sector_t offset; +}; + +struct mirror_set { + struct dm_target *ti; + struct list_head list; + struct region_hash rh; + struct kcopyd_client *kcopyd_client; + + spinlock_t lock; /* protects the next two lists */ + struct bio_list reads; + struct bio_list writes; + + /* recovery */ + region_t nr_regions; + int in_sync; + + unsigned int nr_mirrors; + struct mirror mirror[0]; +}; + +/* + * Every mirror should look like this one. + */ +#define DEFAULT_MIRROR 0 + +/* + * This is yucky. We squirrel the mirror_set struct away inside + * bi_next for write buffers. This is safe since the bh + * doesn't get submitted to the lower levels of block layer. + */ +static struct mirror_set *bio_get_ms(struct bio *bio) +{ + return (struct mirror_set *) bio->bi_next; +} + +static void bio_set_ms(struct bio *bio, struct mirror_set *ms) +{ + bio->bi_next = (struct bio *) ms; +} + +/*----------------------------------------------------------------- + * Recovery. + * + * When a mirror is first activated we may find that some regions + * are in the no-sync state. We have to recover these by + * recopying from the default mirror to all the others. + *---------------------------------------------------------------*/ +static void recovery_complete(int read_err, unsigned int write_err, + void *context) +{ + struct region *reg = (struct region *) context; + + /* FIXME: better error handling */ + rh_recovery_end(reg, read_err || write_err); +} + +static int recover(struct mirror_set *ms, struct region *reg) +{ + int r; + unsigned int i; + struct io_region from, to[ms->nr_mirrors - 1], *dest; + struct mirror *m; + unsigned long flags = 0; + + /* fill in the source */ + m = ms->mirror + DEFAULT_MIRROR; + from.bdev = m->dev->bdev; + from.sector = m->offset + region_to_sector(reg->rh, reg->key); + if (reg->key == (ms->nr_regions - 1)) { + /* + * The final region may be smaller than + * region_size. + */ + from.count = ms->ti->len & (reg->rh->region_size - 1); + if (!from.count) + from.count = reg->rh->region_size; + } else + from.count = reg->rh->region_size; + + /* fill in the destinations */ + for (i = 0, dest = to; i < ms->nr_mirrors; i++) { + if (i == DEFAULT_MIRROR) + continue; + + m = ms->mirror + i; + dest->bdev = m->dev->bdev; + dest->sector = m->offset + region_to_sector(reg->rh, reg->key); + dest->count = from.count; + dest++; + } + + /* hand to kcopyd */ + set_bit(KCOPYD_IGNORE_ERROR, &flags); + r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags, + recovery_complete, reg); + + return r; +} + +static void do_recovery(struct mirror_set *ms) +{ + int r; + struct region *reg; + struct dirty_log *log = ms->rh.log; + + /* + * Start quiescing some regions. + */ + rh_recovery_prepare(&ms->rh); + + /* + * Copy any already quiesced regions. + */ + while ((reg = rh_recovery_start(&ms->rh))) { + r = recover(ms, reg); + if (r) + rh_recovery_end(reg, 0); + } + + /* + * Update the in sync flag. + */ + if (!ms->in_sync && + (log->type->get_sync_count(log) == ms->nr_regions)) { + /* the sync is complete */ + dm_table_event(ms->ti->table); + ms->in_sync = 1; + } +} + +/*----------------------------------------------------------------- + * Reads + *---------------------------------------------------------------*/ +static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector) +{ + /* FIXME: add read balancing */ + return ms->mirror + DEFAULT_MIRROR; +} + +/* + * remap a buffer to a particular mirror. + */ +static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio) +{ + bio->bi_bdev = m->dev->bdev; + bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin); +} + +static void do_reads(struct mirror_set *ms, struct bio_list *reads) +{ + region_t region; + struct bio *bio; + struct mirror *m; + + while ((bio = bio_list_pop(reads))) { + region = bio_to_region(&ms->rh, bio); + + /* + * We can only read balance if the region is in sync. + */ + if (rh_in_sync(&ms->rh, region, 0)) + m = choose_mirror(ms, bio->bi_sector); + else + m = ms->mirror + DEFAULT_MIRROR; + + map_bio(ms, m, bio); + generic_make_request(bio); + } +} + +/*----------------------------------------------------------------- + * Writes. + * + * We do different things with the write io depending on the + * state of the region that it's in: + * + * SYNC: increment pending, use kcopyd to write to *all* mirrors + * RECOVERING: delay the io until recovery completes + * NOSYNC: increment pending, just write to the default mirror + *---------------------------------------------------------------*/ +static void write_callback(unsigned long error, void *context) +{ + unsigned int i; + int uptodate = 1; + struct bio *bio = (struct bio *) context; + struct mirror_set *ms; + + ms = bio_get_ms(bio); + bio_set_ms(bio, NULL); + + /* + * NOTE: We don't decrement the pending count here, + * instead it is done by the targets endio function. + * This way we handle both writes to SYNC and NOSYNC + * regions with the same code. + */ + + if (error) { + /* + * only error the io if all mirrors failed. + * FIXME: bogus + */ + uptodate = 0; + for (i = 0; i < ms->nr_mirrors; i++) + if (!test_bit(i, &error)) { + uptodate = 1; + break; + } + } + bio_endio(bio, bio->bi_size, 0); +} + +static void do_write(struct mirror_set *ms, struct bio *bio) +{ + unsigned int i; + struct io_region io[ms->nr_mirrors]; + struct mirror *m; + + for (i = 0; i < ms->nr_mirrors; i++) { + m = ms->mirror + i; + + io[i].bdev = m->dev->bdev; + io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin); + io[i].count = bio->bi_size >> 9; + } + + bio_set_ms(bio, ms); + dm_io_async_bvec(ms->nr_mirrors, io, WRITE, + bio->bi_io_vec + bio->bi_idx, + write_callback, bio); +} + +static void do_writes(struct mirror_set *ms, struct bio_list *writes) +{ + int state; + struct bio *bio; + struct bio_list sync, nosync, recover, *this_list = NULL; + + if (!writes->head) + return; + + /* + * Classify each write. + */ + bio_list_init(&sync); + bio_list_init(&nosync); + bio_list_init(&recover); + + while ((bio = bio_list_pop(writes))) { + state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); + switch (state) { + case RH_CLEAN: + case RH_DIRTY: + this_list = &sync; + break; + + case RH_NOSYNC: + this_list = &nosync; + break; + + case RH_RECOVERING: + this_list = &recover; + break; + } + + bio_list_add(this_list, bio); + } + + /* + * Increment the pending counts for any regions that will + * be written to (writes to recover regions are going to + * be delayed). + */ + rh_inc_pending(&ms->rh, &sync); + rh_inc_pending(&ms->rh, &nosync); + rh_flush(&ms->rh); + + /* + * Dispatch io. + */ + while ((bio = bio_list_pop(&sync))) + do_write(ms, bio); + + while ((bio = bio_list_pop(&recover))) + rh_delay(&ms->rh, bio); + + while ((bio = bio_list_pop(&nosync))) { + map_bio(ms, ms->mirror + DEFAULT_MIRROR, bio); + generic_make_request(bio); + } +} + +/*----------------------------------------------------------------- + * kmirrord + *---------------------------------------------------------------*/ +static LIST_HEAD(_mirror_sets); +static DECLARE_RWSEM(_mirror_sets_lock); + +static void do_mirror(struct mirror_set *ms) +{ + struct bio_list reads, writes; + + spin_lock(&ms->lock); + reads = ms->reads; + writes = ms->writes; + bio_list_init(&ms->reads); + bio_list_init(&ms->writes); + spin_unlock(&ms->lock); + + rh_update_states(&ms->rh); + do_recovery(ms); + do_reads(ms, &reads); + do_writes(ms, &writes); +} + +static void do_work(void *ignored) +{ + struct mirror_set *ms; + + down_read(&_mirror_sets_lock); + list_for_each_entry (ms, &_mirror_sets, list) + do_mirror(ms); + up_read(&_mirror_sets_lock); +} + +/*----------------------------------------------------------------- + * Target functions + *---------------------------------------------------------------*/ +static struct mirror_set *alloc_context(unsigned int nr_mirrors, + sector_t region_size, + struct dm_target *ti, + struct dirty_log *dl) +{ + size_t len; + struct mirror_set *ms = NULL; + + if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) + return NULL; + + len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); + + ms = kmalloc(len, GFP_KERNEL); + if (!ms) { + ti->error = "dm-mirror: Cannot allocate mirror context"; + return NULL; + } + + memset(ms, 0, len); + spin_lock_init(&ms->lock); + + ms->ti = ti; + ms->nr_mirrors = nr_mirrors; + ms->nr_regions = dm_div_up(ti->len, region_size); + ms->in_sync = 0; + + if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { + ti->error = "dm-mirror: Error creating dirty region hash"; + kfree(ms); + return NULL; + } + + return ms; +} + +static void free_context(struct mirror_set *ms, struct dm_target *ti, + unsigned int m) +{ + while (m--) + dm_put_device(ti, ms->mirror[m].dev); + + rh_exit(&ms->rh); + kfree(ms); +} + +static inline int _check_region_size(struct dm_target *ti, sector_t size) +{ + return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) || + size > ti->len); +} + +static int get_mirror(struct mirror_set *ms, struct dm_target *ti, + unsigned int mirror, char **argv) +{ + sector_t offset; + + if (sscanf(argv[1], SECTOR_FORMAT, &offset) != 1) { + ti->error = "dm-mirror: Invalid offset"; + return -EINVAL; + } + + if (dm_get_device(ti, argv[0], offset, ti->len, + dm_table_get_mode(ti->table), + &ms->mirror[mirror].dev)) { + ti->error = "dm-mirror: Device lookup failure"; + return -ENXIO; + } + + ms->mirror[mirror].offset = offset; + + return 0; +} + +static int add_mirror_set(struct mirror_set *ms) +{ + down_write(&_mirror_sets_lock); + list_add_tail(&ms->list, &_mirror_sets); + up_write(&_mirror_sets_lock); + wake(); + + return 0; +} + +static void del_mirror_set(struct mirror_set *ms) +{ + down_write(&_mirror_sets_lock); + list_del(&ms->list); + up_write(&_mirror_sets_lock); +} + +/* + * Create dirty log: log_type #log_params + */ +static struct dirty_log *create_dirty_log(struct dm_target *ti, + unsigned int argc, char **argv, + unsigned int *args_used) +{ + unsigned int param_count; + struct dirty_log *dl; + + if (argc < 2) { + ti->error = "dm-mirror: Insufficient mirror log arguments"; + return NULL; + } + + if (sscanf(argv[1], "%u", ¶m_count) != 1) { + ti->error = "dm-mirror: Invalid mirror log argument count"; + return NULL; + } + + *args_used = 2 + param_count; + + if (argc < *args_used) { + ti->error = "dm-mirror: Insufficient mirror log arguments"; + return NULL; + } + + dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2); + if (!dl) { + ti->error = "dm-mirror: Error creating mirror dirty log"; + return NULL; + } + + if (!_check_region_size(ti, dl->type->get_region_size(dl))) { + ti->error = "dm-mirror: Invalid region size"; + dm_destroy_dirty_log(dl); + return NULL; + } + + return dl; +} + +/* + * Construct a mirror mapping: + * + * log_type #log_params + * #mirrors [mirror_path offset]{2,} + * + * For now, #log_params = 1, log_type = "core" + * + */ +#define DM_IO_PAGES 64 +static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + int r; + unsigned int nr_mirrors, m, args_used; + struct mirror_set *ms; + struct dirty_log *dl; + + dl = create_dirty_log(ti, argc, argv, &args_used); + if (!dl) + return -EINVAL; + + argv += args_used; + argc -= args_used; + + if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 || + nr_mirrors < 2) { + ti->error = "dm-mirror: Invalid number of mirrors"; + dm_destroy_dirty_log(dl); + return -EINVAL; + } + + argv++, argc--; + + if (argc != nr_mirrors * 2) { + ti->error = "dm-mirror: Wrong number of mirror arguments"; + dm_destroy_dirty_log(dl); + return -EINVAL; + } + + ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl); + if (!ms) { + dm_destroy_dirty_log(dl); + return -ENOMEM; + } + + /* Get the mirror parameter sets */ + for (m = 0; m < nr_mirrors; m++) { + r = get_mirror(ms, ti, m, argv); + if (r) { + free_context(ms, ti, m); + return r; + } + argv += 2; + argc -= 2; + } + + ti->private = ms; + + r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); + if (r) { + free_context(ms, ti, ms->nr_mirrors); + return r; + } + + add_mirror_set(ms); + return 0; +} + +static void mirror_dtr(struct dm_target *ti) +{ + struct mirror_set *ms = (struct mirror_set *) ti->private; + + del_mirror_set(ms); + kcopyd_client_destroy(ms->kcopyd_client); + free_context(ms, ti, ms->nr_mirrors); +} + +static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) +{ + int should_wake = 0; + struct bio_list *bl; + + bl = (rw == WRITE) ? &ms->writes : &ms->reads; + spin_lock(&ms->lock); + should_wake = !(bl->head); + bio_list_add(bl, bio); + spin_unlock(&ms->lock); + + if (should_wake) + wake(); +} + +/* + * Mirror mapping function + */ +static int mirror_map(struct dm_target *ti, struct bio *bio, + union map_info *map_context) +{ + int r, rw = bio_rw(bio); + struct mirror *m; + struct mirror_set *ms = ti->private; + + map_context->ll = bio->bi_sector >> ms->rh.region_shift; + + if (rw == WRITE) { + queue_bio(ms, bio, rw); + return 0; + } + + r = ms->rh.log->type->in_sync(ms->rh.log, + bio_to_region(&ms->rh, bio), 0); + if (r < 0 && r != -EWOULDBLOCK) + return r; + + if (r == -EWOULDBLOCK) /* FIXME: ugly */ + r = 0; + + /* + * We don't want to fast track a recovery just for a read + * ahead. So we just let it silently fail. + * FIXME: get rid of this. + */ + if (!r && rw == READA) + return -EIO; + + if (!r) { + /* Pass this io over to the daemon */ + queue_bio(ms, bio, rw); + return 0; + } + + m = choose_mirror(ms, bio->bi_sector); + if (!m) + return -EIO; + + map_bio(ms, m, bio); + return 1; +} + +static int mirror_end_io(struct dm_target *ti, struct bio *bio, + int error, union map_info *map_context) +{ + int rw = bio_rw(bio); + struct mirror_set *ms = (struct mirror_set *) ti->private; + region_t region = map_context->ll; + + /* + * We need to dec pending if this was a write. + */ + if (rw == WRITE) + rh_dec(&ms->rh, region); + + return 0; +} + +static void mirror_suspend(struct dm_target *ti) +{ + struct mirror_set *ms = (struct mirror_set *) ti->private; + struct dirty_log *log = ms->rh.log; + rh_stop_recovery(&ms->rh); + if (log->type->suspend && log->type->suspend(log)) + /* FIXME: need better error handling */ + DMWARN("log suspend failed"); +} + +static void mirror_resume(struct dm_target *ti) +{ + struct mirror_set *ms = (struct mirror_set *) ti->private; + struct dirty_log *log = ms->rh.log; + if (log->type->resume && log->type->resume(log)) + /* FIXME: need better error handling */ + DMWARN("log resume failed"); + rh_start_recovery(&ms->rh); +} + +static int mirror_status(struct dm_target *ti, status_type_t type, + char *result, unsigned int maxlen) +{ + char buffer[32]; + unsigned int m, sz = 0; + struct mirror_set *ms = (struct mirror_set *) ti->private; + +#define EMIT(x...) sz += ((sz >= maxlen) ? \ + 0 : scnprintf(result + sz, maxlen - sz, x)) + + switch (type) { + case STATUSTYPE_INFO: + EMIT("%d ", ms->nr_mirrors); + + for (m = 0; m < ms->nr_mirrors; m++) { + format_dev_t(buffer, ms->mirror[m].dev->bdev->bd_dev); + EMIT("%s ", buffer); + } + + EMIT(SECTOR_FORMAT "/" SECTOR_FORMAT, + ms->rh.log->type->get_sync_count(ms->rh.log), + ms->nr_regions); + break; + + case STATUSTYPE_TABLE: + EMIT("%s 1 " SECTOR_FORMAT " %d ", + ms->rh.log->type->name, ms->rh.region_size, + ms->nr_mirrors); + + for (m = 0; m < ms->nr_mirrors; m++) { + format_dev_t(buffer, ms->mirror[m].dev->bdev->bd_dev); + EMIT("%s " SECTOR_FORMAT " ", + buffer, ms->mirror[m].offset); + } + } + + return 0; +} + +static struct target_type mirror_target = { + .name = "mirror", + .version = {1, 0, 1}, + .module = THIS_MODULE, + .ctr = mirror_ctr, + .dtr = mirror_dtr, + .map = mirror_map, + .end_io = mirror_end_io, + .suspend = mirror_suspend, + .resume = mirror_resume, + .status = mirror_status, +}; + +static int __init dm_mirror_init(void) +{ + int r; + + r = dm_dirty_log_init(); + if (r) + return r; + + _kmirrord_wq = create_workqueue("kmirrord"); + if (!_kmirrord_wq) { + DMERR("couldn't start kmirrord"); + dm_dirty_log_exit(); + return r; + } + INIT_WORK(&_kmirrord_work, do_work, NULL); + + r = dm_register_target(&mirror_target); + if (r < 0) { + DMERR("%s: Failed to register mirror target", + mirror_target.name); + dm_dirty_log_exit(); + destroy_workqueue(_kmirrord_wq); + } + + return r; +} + +static void __exit dm_mirror_exit(void) +{ + int r; + + r = dm_unregister_target(&mirror_target); + if (r < 0) + DMERR("%s: unregister failed %d", mirror_target.name, r); + + destroy_workqueue(_kmirrord_wq); + dm_dirty_log_exit(); +} + +/* Module hooks */ +module_init(dm_mirror_init); +module_exit(dm_mirror_exit); + +MODULE_DESCRIPTION(DM_NAME " mirror target"); +MODULE_AUTHOR("Joe Thornber"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c new file mode 100644 index 000000000..40e46944e --- /dev/null +++ b/drivers/md/kcopyd.c @@ -0,0 +1,699 @@ +/* + * Copyright (C) 2002 Sistina Software (UK) Limited. + * + * This file is released under the GPL. + * + * Kcopyd provides a simple interface for copying an area of one + * block-device to one or more other block-devices, with an asynchronous + * completion notification. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kcopyd.h" + +/* FIXME: this is only needed for the DMERR macros */ +#include "dm.h" + +static struct workqueue_struct *_kcopyd_wq; +static struct work_struct _kcopyd_work; + +static inline void wake(void) +{ + queue_work(_kcopyd_wq, &_kcopyd_work); +} + +/*----------------------------------------------------------------- + * Each kcopyd client has its own little pool of preallocated + * pages for kcopyd io. + *---------------------------------------------------------------*/ +struct kcopyd_client { + struct list_head list; + + spinlock_t lock; + struct page_list *pages; + unsigned int nr_pages; + unsigned int nr_free_pages; +}; + +static struct page_list *alloc_pl(void) +{ + struct page_list *pl; + + pl = kmalloc(sizeof(*pl), GFP_KERNEL); + if (!pl) + return NULL; + + pl->page = alloc_page(GFP_KERNEL); + if (!pl->page) { + kfree(pl); + return NULL; + } + + return pl; +} + +static void free_pl(struct page_list *pl) +{ + __free_page(pl->page); + kfree(pl); +} + +static int kcopyd_get_pages(struct kcopyd_client *kc, + unsigned int nr, struct page_list **pages) +{ + struct page_list *pl; + + spin_lock(&kc->lock); + if (kc->nr_free_pages < nr) { + spin_unlock(&kc->lock); + return -ENOMEM; + } + + kc->nr_free_pages -= nr; + for (*pages = pl = kc->pages; --nr; pl = pl->next) + ; + + kc->pages = pl->next; + pl->next = 0; + + spin_unlock(&kc->lock); + + return 0; +} + +static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl) +{ + struct page_list *cursor; + + spin_lock(&kc->lock); + for (cursor = pl; cursor->next; cursor = cursor->next) + kc->nr_free_pages++; + + kc->nr_free_pages++; + cursor->next = kc->pages; + kc->pages = pl; + spin_unlock(&kc->lock); +} + +/* + * These three functions resize the page pool. + */ +static void drop_pages(struct page_list *pl) +{ + struct page_list *next; + + while (pl) { + next = pl->next; + free_pl(pl); + pl = next; + } +} + +static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr) +{ + unsigned int i; + struct page_list *pl = NULL, *next; + + for (i = 0; i < nr; i++) { + next = alloc_pl(); + if (!next) { + if (pl) + drop_pages(pl); + return -ENOMEM; + } + next->next = pl; + pl = next; + } + + kcopyd_put_pages(kc, pl); + kc->nr_pages += nr; + return 0; +} + +static void client_free_pages(struct kcopyd_client *kc) +{ + BUG_ON(kc->nr_free_pages != kc->nr_pages); + drop_pages(kc->pages); + kc->pages = NULL; + kc->nr_free_pages = kc->nr_pages = 0; +} + +/*----------------------------------------------------------------- + * kcopyd_jobs need to be allocated by the *clients* of kcopyd, + * for this reason we use a mempool to prevent the client from + * ever having to do io (which could cause a deadlock). + *---------------------------------------------------------------*/ +struct kcopyd_job { + struct kcopyd_client *kc; + struct list_head list; + unsigned long flags; + + /* + * Error state of the job. + */ + int read_err; + unsigned int write_err; + + /* + * Either READ or WRITE + */ + int rw; + struct io_region source; + + /* + * The destinations for the transfer. + */ + unsigned int num_dests; + struct io_region dests[KCOPYD_MAX_REGIONS]; + + sector_t offset; + unsigned int nr_pages; + struct page_list *pages; + + /* + * Set this to ensure you are notified when the job has + * completed. 'context' is for callback to use. + */ + kcopyd_notify_fn fn; + void *context; + + /* + * These fields are only used if the job has been split + * into more manageable parts. + */ + struct semaphore lock; + atomic_t sub_jobs; + sector_t progress; +}; + +/* FIXME: this should scale with the number of pages */ +#define MIN_JOBS 512 + +static kmem_cache_t *_job_cache; +static mempool_t *_job_pool; + +/* + * We maintain three lists of jobs: + * + * i) jobs waiting for pages + * ii) jobs that have pages, and are waiting for the io to be issued. + * iii) jobs that have completed. + * + * All three of these are protected by job_lock. + */ +static spinlock_t _job_lock = SPIN_LOCK_UNLOCKED; + +static LIST_HEAD(_complete_jobs); +static LIST_HEAD(_io_jobs); +static LIST_HEAD(_pages_jobs); + +static int jobs_init(void) +{ + _job_cache = kmem_cache_create("kcopyd-jobs", + sizeof(struct kcopyd_job), + __alignof__(struct kcopyd_job), + 0, NULL, NULL); + if (!_job_cache) + return -ENOMEM; + + _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab, + mempool_free_slab, _job_cache); + if (!_job_pool) { + kmem_cache_destroy(_job_cache); + return -ENOMEM; + } + + return 0; +} + +static void jobs_exit(void) +{ + BUG_ON(!list_empty(&_complete_jobs)); + BUG_ON(!list_empty(&_io_jobs)); + BUG_ON(!list_empty(&_pages_jobs)); + + mempool_destroy(_job_pool); + kmem_cache_destroy(_job_cache); + _job_pool = NULL; + _job_cache = NULL; +} + +/* + * Functions to push and pop a job onto the head of a given job + * list. + */ +static inline struct kcopyd_job *pop(struct list_head *jobs) +{ + struct kcopyd_job *job = NULL; + unsigned long flags; + + spin_lock_irqsave(&_job_lock, flags); + + if (!list_empty(jobs)) { + job = list_entry(jobs->next, struct kcopyd_job, list); + list_del(&job->list); + } + spin_unlock_irqrestore(&_job_lock, flags); + + return job; +} + +static inline void push(struct list_head *jobs, struct kcopyd_job *job) +{ + unsigned long flags; + + spin_lock_irqsave(&_job_lock, flags); + list_add_tail(&job->list, jobs); + spin_unlock_irqrestore(&_job_lock, flags); +} + +/* + * These three functions process 1 item from the corresponding + * job list. + * + * They return: + * < 0: error + * 0: success + * > 0: can't process yet. + */ +static int run_complete_job(struct kcopyd_job *job) +{ + void *context = job->context; + int read_err = job->read_err; + unsigned int write_err = job->write_err; + kcopyd_notify_fn fn = job->fn; + + kcopyd_put_pages(job->kc, job->pages); + mempool_free(job, _job_pool); + fn(read_err, write_err, context); + return 0; +} + +static void complete_io(unsigned long error, void *context) +{ + struct kcopyd_job *job = (struct kcopyd_job *) context; + + if (error) { + if (job->rw == WRITE) + job->write_err &= error; + else + job->read_err = 1; + + if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { + push(&_complete_jobs, job); + wake(); + return; + } + } + + if (job->rw == WRITE) + push(&_complete_jobs, job); + + else { + job->rw = WRITE; + push(&_io_jobs, job); + } + + wake(); +} + +/* + * Request io on as many buffer heads as we can currently get for + * a particular job. + */ +static int run_io_job(struct kcopyd_job *job) +{ + int r; + + if (job->rw == READ) + r = dm_io_async(1, &job->source, job->rw, + job->pages, + job->offset, complete_io, job); + + else + r = dm_io_async(job->num_dests, job->dests, job->rw, + job->pages, + job->offset, complete_io, job); + + return r; +} + +static int run_pages_job(struct kcopyd_job *job) +{ + int r; + + job->nr_pages = dm_div_up(job->dests[0].count + job->offset, + PAGE_SIZE >> 9); + r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); + if (!r) { + /* this job is ready for io */ + push(&_io_jobs, job); + return 0; + } + + if (r == -ENOMEM) + /* can't complete now */ + return 1; + + return r; +} + +/* + * Run through a list for as long as possible. Returns the count + * of successful jobs. + */ +static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *)) +{ + struct kcopyd_job *job; + int r, count = 0; + + while ((job = pop(jobs))) { + + r = fn(job); + + if (r < 0) { + /* error this rogue job */ + if (job->rw == WRITE) + job->write_err = (unsigned int) -1; + else + job->read_err = 1; + push(&_complete_jobs, job); + break; + } + + if (r > 0) { + /* + * We couldn't service this job ATM, so + * push this job back onto the list. + */ + push(jobs, job); + break; + } + + count++; + } + + return count; +} + +/* + * kcopyd does this every time it's woken up. + */ +static void do_work(void *ignored) +{ + /* + * The order that these are called is *very* important. + * complete jobs can free some pages for pages jobs. + * Pages jobs when successful will jump onto the io jobs + * list. io jobs call wake when they complete and it all + * starts again. + */ + process_jobs(&_complete_jobs, run_complete_job); + process_jobs(&_pages_jobs, run_pages_job); + process_jobs(&_io_jobs, run_io_job); +} + +/* + * If we are copying a small region we just dispatch a single job + * to do the copy, otherwise the io has to be split up into many + * jobs. + */ +static void dispatch_job(struct kcopyd_job *job) +{ + push(&_pages_jobs, job); + wake(); +} + +#define SUB_JOB_SIZE 128 +static void segment_complete(int read_err, + unsigned int write_err, void *context) +{ + /* FIXME: tidy this function */ + sector_t progress = 0; + sector_t count = 0; + struct kcopyd_job *job = (struct kcopyd_job *) context; + + down(&job->lock); + + /* update the error */ + if (read_err) + job->read_err = 1; + + if (write_err) + job->write_err &= write_err; + + /* + * Only dispatch more work if there hasn't been an error. + */ + if ((!job->read_err && !job->write_err) || + test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) { + /* get the next chunk of work */ + progress = job->progress; + count = job->source.count - progress; + if (count) { + if (count > SUB_JOB_SIZE) + count = SUB_JOB_SIZE; + + job->progress += count; + } + } + up(&job->lock); + + if (count) { + int i; + struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO); + + *sub_job = *job; + sub_job->source.sector += progress; + sub_job->source.count = count; + + for (i = 0; i < job->num_dests; i++) { + sub_job->dests[i].sector += progress; + sub_job->dests[i].count = count; + } + + sub_job->fn = segment_complete; + sub_job->context = job; + dispatch_job(sub_job); + + } else if (atomic_dec_and_test(&job->sub_jobs)) { + + /* + * To avoid a race we must keep the job around + * until after the notify function has completed. + * Otherwise the client may try and stop the job + * after we've completed. + */ + job->fn(read_err, write_err, job->context); + mempool_free(job, _job_pool); + } +} + +/* + * Create some little jobs that will do the move between + * them. + */ +#define SPLIT_COUNT 8 +static void split_job(struct kcopyd_job *job) +{ + int i; + + atomic_set(&job->sub_jobs, SPLIT_COUNT); + for (i = 0; i < SPLIT_COUNT; i++) + segment_complete(0, 0u, job); +} + +int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, + unsigned int num_dests, struct io_region *dests, + unsigned int flags, kcopyd_notify_fn fn, void *context) +{ + struct kcopyd_job *job; + + /* + * Allocate a new job. + */ + job = mempool_alloc(_job_pool, GFP_NOIO); + + /* + * set up for the read. + */ + job->kc = kc; + job->flags = flags; + job->read_err = 0; + job->write_err = 0; + job->rw = READ; + + job->source = *from; + + job->num_dests = num_dests; + memcpy(&job->dests, dests, sizeof(*dests) * num_dests); + + job->offset = 0; + job->nr_pages = 0; + job->pages = NULL; + + job->fn = fn; + job->context = context; + + if (job->source.count < SUB_JOB_SIZE) + dispatch_job(job); + + else { + init_MUTEX(&job->lock); + job->progress = 0; + split_job(job); + } + + return 0; +} + +/* + * Cancels a kcopyd job, eg. someone might be deactivating a + * mirror. + */ +int kcopyd_cancel(struct kcopyd_job *job, int block) +{ + /* FIXME: finish */ + return -1; +} + +/*----------------------------------------------------------------- + * Unit setup + *---------------------------------------------------------------*/ +static DECLARE_MUTEX(_client_lock); +static LIST_HEAD(_clients); + +static int client_add(struct kcopyd_client *kc) +{ + down(&_client_lock); + list_add(&kc->list, &_clients); + up(&_client_lock); + return 0; +} + +static void client_del(struct kcopyd_client *kc) +{ + down(&_client_lock); + list_del(&kc->list); + up(&_client_lock); +} + +static DECLARE_MUTEX(kcopyd_init_lock); +static int kcopyd_clients = 0; + +static int kcopyd_init(void) +{ + int r; + + down(&kcopyd_init_lock); + + if (kcopyd_clients) { + /* Already initialized. */ + kcopyd_clients++; + up(&kcopyd_init_lock); + return 0; + } + + r = jobs_init(); + if (r) { + up(&kcopyd_init_lock); + return r; + } + + _kcopyd_wq = create_singlethread_workqueue("kcopyd"); + if (!_kcopyd_wq) { + jobs_exit(); + up(&kcopyd_init_lock); + return -ENOMEM; + } + + kcopyd_clients++; + INIT_WORK(&_kcopyd_work, do_work, NULL); + up(&kcopyd_init_lock); + return 0; +} + +static void kcopyd_exit(void) +{ + down(&kcopyd_init_lock); + kcopyd_clients--; + if (!kcopyd_clients) { + jobs_exit(); + destroy_workqueue(_kcopyd_wq); + _kcopyd_wq = NULL; + } + up(&kcopyd_init_lock); +} + +int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result) +{ + int r = 0; + struct kcopyd_client *kc; + + r = kcopyd_init(); + if (r) + return r; + + kc = kmalloc(sizeof(*kc), GFP_KERNEL); + if (!kc) { + kcopyd_exit(); + return -ENOMEM; + } + + kc->lock = SPIN_LOCK_UNLOCKED; + kc->pages = NULL; + kc->nr_pages = kc->nr_free_pages = 0; + r = client_alloc_pages(kc, nr_pages); + if (r) { + kfree(kc); + kcopyd_exit(); + return r; + } + + r = dm_io_get(nr_pages); + if (r) { + client_free_pages(kc); + kfree(kc); + kcopyd_exit(); + return r; + } + + r = client_add(kc); + if (r) { + dm_io_put(nr_pages); + client_free_pages(kc); + kfree(kc); + kcopyd_exit(); + return r; + } + + *result = kc; + return 0; +} + +void kcopyd_client_destroy(struct kcopyd_client *kc) +{ + dm_io_put(kc->nr_pages); + client_free_pages(kc); + client_del(kc); + kfree(kc); + kcopyd_exit(); +} + +EXPORT_SYMBOL(kcopyd_client_create); +EXPORT_SYMBOL(kcopyd_client_destroy); +EXPORT_SYMBOL(kcopyd_copy); +EXPORT_SYMBOL(kcopyd_cancel); diff --git a/drivers/md/kcopyd.h b/drivers/md/kcopyd.h new file mode 100644 index 000000000..4621ea055 --- /dev/null +++ b/drivers/md/kcopyd.h @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2001 Sistina Software + * + * This file is released under the GPL. + * + * Kcopyd provides a simple interface for copying an area of one + * block-device to one or more other block-devices, with an asynchronous + * completion notification. + */ + +#ifndef DM_KCOPYD_H +#define DM_KCOPYD_H + +#include "dm-io.h" + +/* FIXME: make this configurable */ +#define KCOPYD_MAX_REGIONS 8 + +#define KCOPYD_IGNORE_ERROR 1 + +/* + * To use kcopyd you must first create a kcopyd client object. + */ +struct kcopyd_client; +int kcopyd_client_create(unsigned int num_pages, struct kcopyd_client **result); +void kcopyd_client_destroy(struct kcopyd_client *kc); + +/* + * Submit a copy job to kcopyd. This is built on top of the + * previous three fns. + * + * read_err is a boolean, + * write_err is a bitset, with 1 bit for each destination region + */ +typedef void (*kcopyd_notify_fn)(int read_err, + unsigned int write_err, void *context); + +int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from, + unsigned int num_dests, struct io_region *dests, + unsigned int flags, kcopyd_notify_fn fn, void *context); + +#endif diff --git a/drivers/media/video/ovcamchip/ov6x20.c b/drivers/media/video/ovcamchip/ov6x20.c new file mode 100644 index 000000000..3433619ad --- /dev/null +++ b/drivers/media/video/ovcamchip/ov6x20.c @@ -0,0 +1,415 @@ +/* OmniVision OV6620/OV6120 Camera Chip Support Code + * + * Copyright (c) 1999-2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include "ovcamchip_priv.h" + +/* Registers */ +#define REG_GAIN 0x00 /* gain [5:0] */ +#define REG_BLUE 0x01 /* blue gain */ +#define REG_RED 0x02 /* red gain */ +#define REG_SAT 0x03 /* saturation */ +#define REG_CNT 0x05 /* Y contrast */ +#define REG_BRT 0x06 /* Y brightness */ +#define REG_WB_BLUE 0x0C /* WB blue ratio [5:0] */ +#define REG_WB_RED 0x0D /* WB red ratio [5:0] */ +#define REG_EXP 0x10 /* exposure */ + +/* Window parameters */ +#define HWSBASE 0x38 +#define HWEBASE 0x3A +#define VWSBASE 0x05 +#define VWEBASE 0x06 + +struct ov6x20 { + int auto_brt; + int auto_exp; + int backlight; + int bandfilt; + int mirror; +}; + +/* Initial values for use with OV511/OV511+ cameras */ +static struct ovcamchip_regvals regvals_init_6x20_511[] = { + { 0x12, 0x80 }, /* reset */ + { 0x11, 0x01 }, + { 0x03, 0x60 }, + { 0x05, 0x7f }, /* For when autoadjust is off */ + { 0x07, 0xa8 }, + { 0x0c, 0x24 }, + { 0x0d, 0x24 }, + { 0x0f, 0x15 }, /* COMS */ + { 0x10, 0x75 }, /* AEC Exposure time */ + { 0x12, 0x24 }, /* Enable AGC and AWB */ + { 0x14, 0x04 }, + { 0x16, 0x03 }, + { 0x26, 0xb2 }, /* BLC enable */ + /* 0x28: 0x05 Selects RGB format if RGB on */ + { 0x28, 0x05 }, + { 0x2a, 0x04 }, /* Disable framerate adjust */ + { 0x2d, 0x99 }, + { 0x33, 0xa0 }, /* Color Processing Parameter */ + { 0x34, 0xd2 }, /* Max A/D range */ + { 0x38, 0x8b }, + { 0x39, 0x40 }, + + { 0x3c, 0x39 }, /* Enable AEC mode changing */ + { 0x3c, 0x3c }, /* Change AEC mode */ + { 0x3c, 0x24 }, /* Disable AEC mode changing */ + + { 0x3d, 0x80 }, + /* These next two registers (0x4a, 0x4b) are undocumented. They + * control the color balance */ + { 0x4a, 0x80 }, + { 0x4b, 0x80 }, + { 0x4d, 0xd2 }, /* This reduces noise a bit */ + { 0x4e, 0xc1 }, + { 0x4f, 0x04 }, + { 0xff, 0xff }, /* END MARKER */ +}; + +/* Initial values for use with OV518 cameras */ +static struct ovcamchip_regvals regvals_init_6x20_518[] = { + { 0x12, 0x80 }, /* Do a reset */ + { 0x03, 0xc0 }, /* Saturation */ + { 0x05, 0x8a }, /* Contrast */ + { 0x0c, 0x24 }, /* AWB blue */ + { 0x0d, 0x24 }, /* AWB red */ + { 0x0e, 0x8d }, /* Additional 2x gain */ + { 0x0f, 0x25 }, /* Black expanding level = 1.3V */ + { 0x11, 0x01 }, /* Clock div. */ + { 0x12, 0x24 }, /* Enable AGC and AWB */ + { 0x13, 0x01 }, /* (default) */ + { 0x14, 0x80 }, /* Set reserved bit 7 */ + { 0x15, 0x01 }, /* (default) */ + { 0x16, 0x03 }, /* (default) */ + { 0x17, 0x38 }, /* (default) */ + { 0x18, 0xea }, /* (default) */ + { 0x19, 0x04 }, + { 0x1a, 0x93 }, + { 0x1b, 0x00 }, /* (default) */ + { 0x1e, 0xc4 }, /* (default) */ + { 0x1f, 0x04 }, /* (default) */ + { 0x20, 0x20 }, /* Enable 1st stage aperture correction */ + { 0x21, 0x10 }, /* Y offset */ + { 0x22, 0x88 }, /* U offset */ + { 0x23, 0xc0 }, /* Set XTAL power level */ + { 0x24, 0x53 }, /* AEC bright ratio */ + { 0x25, 0x7a }, /* AEC black ratio */ + { 0x26, 0xb2 }, /* BLC enable */ + { 0x27, 0xa2 }, /* Full output range */ + { 0x28, 0x01 }, /* (default) */ + { 0x29, 0x00 }, /* (default) */ + { 0x2a, 0x84 }, /* (default) */ + { 0x2b, 0xa8 }, /* Set custom frame rate */ + { 0x2c, 0xa0 }, /* (reserved) */ + { 0x2d, 0x95 }, /* Enable banding filter */ + { 0x2e, 0x88 }, /* V offset */ + { 0x33, 0x22 }, /* Luminance gamma on */ + { 0x34, 0xc7 }, /* A/D bias */ + { 0x36, 0x12 }, /* (reserved) */ + { 0x37, 0x63 }, /* (reserved) */ + { 0x38, 0x8b }, /* Quick AEC/AEB */ + { 0x39, 0x00 }, /* (default) */ + { 0x3a, 0x0f }, /* (default) */ + { 0x3b, 0x3c }, /* (default) */ + { 0x3c, 0x5c }, /* AEC controls */ + { 0x3d, 0x80 }, /* Drop 1 (bad) frame when AEC change */ + { 0x3e, 0x80 }, /* (default) */ + { 0x3f, 0x02 }, /* (default) */ + { 0x40, 0x10 }, /* (reserved) */ + { 0x41, 0x10 }, /* (reserved) */ + { 0x42, 0x00 }, /* (reserved) */ + { 0x43, 0x7f }, /* (reserved) */ + { 0x44, 0x80 }, /* (reserved) */ + { 0x45, 0x1c }, /* (reserved) */ + { 0x46, 0x1c }, /* (reserved) */ + { 0x47, 0x80 }, /* (reserved) */ + { 0x48, 0x5f }, /* (reserved) */ + { 0x49, 0x00 }, /* (reserved) */ + { 0x4a, 0x00 }, /* Color balance (undocumented) */ + { 0x4b, 0x80 }, /* Color balance (undocumented) */ + { 0x4c, 0x58 }, /* (reserved) */ + { 0x4d, 0xd2 }, /* U *= .938, V *= .838 */ + { 0x4e, 0xa0 }, /* (default) */ + { 0x4f, 0x04 }, /* UV 3-point average */ + { 0x50, 0xff }, /* (reserved) */ + { 0x51, 0x58 }, /* (reserved) */ + { 0x52, 0xc0 }, /* (reserved) */ + { 0x53, 0x42 }, /* (reserved) */ + { 0x27, 0xa6 }, /* Enable manual offset adj. (reg 21 & 22) */ + { 0x12, 0x20 }, + { 0x12, 0x24 }, + + { 0xff, 0xff }, /* END MARKER */ +}; + +/* This initializes the OV6x20 camera chip and relevant variables. */ +static int ov6x20_init(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x20 *s; + int rc; + + DDEBUG(4, &c->dev, "entered"); + + switch (c->adapter->id) { + case I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV511: + rc = ov_write_regvals(c, regvals_init_6x20_511); + break; + case I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518: + rc = ov_write_regvals(c, regvals_init_6x20_518); + break; + default: + dev_err(&c->dev, "ov6x20: Unsupported adapter\n"); + rc = -ENODEV; + } + + if (rc < 0) + return rc; + + ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL); + if (!s) + return -ENOMEM; + memset(s, 0, sizeof *s); + + s->auto_brt = 1; + s->auto_exp = 1; + + return rc; +} + +static int ov6x20_free(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + kfree(ov->spriv); + return 0; +} + +static int ov6x20_set_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x20 *s = ov->spriv; + int rc; + int v = ctl->value; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_write(c, REG_CNT, v >> 8); + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_write(c, REG_BRT, v >> 8); + break; + case OVCAMCHIP_CID_SAT: + rc = ov_write(c, REG_SAT, v >> 8); + break; + case OVCAMCHIP_CID_HUE: + rc = ov_write(c, REG_RED, 0xFF - (v >> 8)); + if (rc < 0) + goto out; + + rc = ov_write(c, REG_BLUE, v >> 8); + break; + case OVCAMCHIP_CID_EXP: + rc = ov_write(c, REG_EXP, v); + break; + case OVCAMCHIP_CID_FREQ: + { + int sixty = (v == 60); + + rc = ov_write(c, 0x2b, sixty?0xa8:0x28); + if (rc < 0) + goto out; + + rc = ov_write(c, 0x2a, sixty?0x84:0xa4); + break; + } + case OVCAMCHIP_CID_BANDFILT: + rc = ov_write_mask(c, 0x2d, v?0x04:0x00, 0x04); + s->bandfilt = v; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + rc = ov_write_mask(c, 0x2d, v?0x10:0x00, 0x10); + s->auto_brt = v; + break; + case OVCAMCHIP_CID_AUTOEXP: + rc = ov_write_mask(c, 0x13, v?0x01:0x00, 0x01); + s->auto_exp = v; + break; + case OVCAMCHIP_CID_BACKLIGHT: + { + rc = ov_write_mask(c, 0x4e, v?0xe0:0xc0, 0xe0); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x29, v?0x08:0x00, 0x08); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x0e, v?0x80:0x00, 0x80); + s->backlight = v; + break; + } + case OVCAMCHIP_CID_MIRROR: + rc = ov_write_mask(c, 0x12, v?0x40:0x00, 0x40); + s->mirror = v; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + +out: + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, v, rc); + return rc; +} + +static int ov6x20_get_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x20 *s = ov->spriv; + int rc = 0; + unsigned char val = 0; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_read(c, REG_CNT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_read(c, REG_BRT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_read(c, REG_SAT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_HUE: + rc = ov_read(c, REG_BLUE, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_EXP: + rc = ov_read(c, REG_EXP, &val); + ctl->value = val; + break; + case OVCAMCHIP_CID_BANDFILT: + ctl->value = s->bandfilt; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + ctl->value = s->auto_brt; + break; + case OVCAMCHIP_CID_AUTOEXP: + ctl->value = s->auto_exp; + break; + case OVCAMCHIP_CID_BACKLIGHT: + ctl->value = s->backlight; + break; + case OVCAMCHIP_CID_MIRROR: + ctl->value = s->mirror; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, ctl->value, rc); + return rc; +} + +static int ov6x20_mode_init(struct i2c_client *c, struct ovcamchip_window *win) +{ + /******** QCIF-specific regs ********/ + + ov_write(c, 0x14, win->quarter?0x24:0x04); + + /******** Palette-specific regs ********/ + + /* OV518 needs 8 bit multiplexed in color mode, and 16 bit in B&W */ + if (c->adapter->id == (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518)) { + if (win->format == VIDEO_PALETTE_GREY) + ov_write_mask(c, 0x13, 0x00, 0x20); + else + ov_write_mask(c, 0x13, 0x20, 0x20); + } else { + if (win->format == VIDEO_PALETTE_GREY) + ov_write_mask(c, 0x13, 0x20, 0x20); + else + ov_write_mask(c, 0x13, 0x00, 0x20); + } + + /******** Clock programming ********/ + + /* The OV6620 needs special handling. This prevents the + * severe banding that normally occurs */ + + /* Clock down */ + ov_write(c, 0x2a, 0x04); + + ov_write(c, 0x11, win->clockdiv); + + ov_write(c, 0x2a, 0x84); + /* This next setting is critical. It seems to improve + * the gain or the contrast. The "reserved" bits seem + * to have some effect in this case. */ + ov_write(c, 0x2d, 0x85); /* FIXME: This messes up banding filter */ + + return 0; +} + +static int ov6x20_set_window(struct i2c_client *c, struct ovcamchip_window *win) +{ + int ret, hwscale, vwscale; + + ret = ov6x20_mode_init(c, win); + if (ret < 0) + return ret; + + if (win->quarter) { + hwscale = 0; + vwscale = 0; + } else { + hwscale = 1; + vwscale = 1; /* The datasheet says 0; it's wrong */ + } + + ov_write(c, 0x17, HWSBASE + (win->x >> hwscale)); + ov_write(c, 0x18, HWEBASE + ((win->x + win->width) >> hwscale)); + ov_write(c, 0x19, VWSBASE + (win->y >> vwscale)); + ov_write(c, 0x1a, VWEBASE + ((win->y + win->height) >> vwscale)); + + return 0; +} + +static int ov6x20_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + switch (cmd) { + case OVCAMCHIP_CMD_S_CTRL: + return ov6x20_set_control(c, arg); + case OVCAMCHIP_CMD_G_CTRL: + return ov6x20_get_control(c, arg); + case OVCAMCHIP_CMD_S_MODE: + return ov6x20_set_window(c, arg); + default: + DDEBUG(2, &c->dev, "command not supported: %d", cmd); + return -ENOIOCTLCMD; + } +} + +struct ovcamchip_ops ov6x20_ops = { + .init = ov6x20_init, + .free = ov6x20_free, + .command = ov6x20_command, +}; diff --git a/drivers/media/video/ovcamchip/ov6x30.c b/drivers/media/video/ovcamchip/ov6x30.c new file mode 100644 index 000000000..44a842379 --- /dev/null +++ b/drivers/media/video/ovcamchip/ov6x30.c @@ -0,0 +1,374 @@ +/* OmniVision OV6630/OV6130 Camera Chip Support Code + * + * Copyright (c) 1999-2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include "ovcamchip_priv.h" + +/* Registers */ +#define REG_GAIN 0x00 /* gain [5:0] */ +#define REG_BLUE 0x01 /* blue gain */ +#define REG_RED 0x02 /* red gain */ +#define REG_SAT 0x03 /* saturation [7:3] */ +#define REG_CNT 0x05 /* Y contrast [3:0] */ +#define REG_BRT 0x06 /* Y brightness */ +#define REG_SHARP 0x07 /* sharpness */ +#define REG_WB_BLUE 0x0C /* WB blue ratio [5:0] */ +#define REG_WB_RED 0x0D /* WB red ratio [5:0] */ +#define REG_EXP 0x10 /* exposure */ + +/* Window parameters */ +#define HWSBASE 0x38 +#define HWEBASE 0x3A +#define VWSBASE 0x05 +#define VWEBASE 0x06 + +struct ov6x30 { + int auto_brt; + int auto_exp; + int backlight; + int bandfilt; + int mirror; +}; + +static struct ovcamchip_regvals regvals_init_6x30[] = { + { 0x12, 0x80 }, /* reset */ + { 0x00, 0x1f }, /* Gain */ + { 0x01, 0x99 }, /* Blue gain */ + { 0x02, 0x7c }, /* Red gain */ + { 0x03, 0xc0 }, /* Saturation */ + { 0x05, 0x0a }, /* Contrast */ + { 0x06, 0x95 }, /* Brightness */ + { 0x07, 0x2d }, /* Sharpness */ + { 0x0c, 0x20 }, + { 0x0d, 0x20 }, + { 0x0e, 0x20 }, + { 0x0f, 0x05 }, + { 0x10, 0x9a }, /* "exposure check" */ + { 0x11, 0x00 }, /* Pixel clock = fastest */ + { 0x12, 0x24 }, /* Enable AGC and AWB */ + { 0x13, 0x21 }, + { 0x14, 0x80 }, + { 0x15, 0x01 }, + { 0x16, 0x03 }, + { 0x17, 0x38 }, + { 0x18, 0xea }, + { 0x19, 0x04 }, + { 0x1a, 0x93 }, + { 0x1b, 0x00 }, + { 0x1e, 0xc4 }, + { 0x1f, 0x04 }, + { 0x20, 0x20 }, + { 0x21, 0x10 }, + { 0x22, 0x88 }, + { 0x23, 0xc0 }, /* Crystal circuit power level */ + { 0x25, 0x9a }, /* Increase AEC black pixel ratio */ + { 0x26, 0xb2 }, /* BLC enable */ + { 0x27, 0xa2 }, + { 0x28, 0x00 }, + { 0x29, 0x00 }, + { 0x2a, 0x84 }, /* (keep) */ + { 0x2b, 0xa8 }, /* (keep) */ + { 0x2c, 0xa0 }, + { 0x2d, 0x95 }, /* Enable auto-brightness */ + { 0x2e, 0x88 }, + { 0x33, 0x26 }, + { 0x34, 0x03 }, + { 0x36, 0x8f }, + { 0x37, 0x80 }, + { 0x38, 0x83 }, + { 0x39, 0x80 }, + { 0x3a, 0x0f }, + { 0x3b, 0x3c }, + { 0x3c, 0x1a }, + { 0x3d, 0x80 }, + { 0x3e, 0x80 }, + { 0x3f, 0x0e }, + { 0x40, 0x00 }, /* White bal */ + { 0x41, 0x00 }, /* White bal */ + { 0x42, 0x80 }, + { 0x43, 0x3f }, /* White bal */ + { 0x44, 0x80 }, + { 0x45, 0x20 }, + { 0x46, 0x20 }, + { 0x47, 0x80 }, + { 0x48, 0x7f }, + { 0x49, 0x00 }, + { 0x4a, 0x00 }, + { 0x4b, 0x80 }, + { 0x4c, 0xd0 }, + { 0x4d, 0x10 }, /* U = 0.563u, V = 0.714v */ + { 0x4e, 0x40 }, + { 0x4f, 0x07 }, /* UV average mode, color killer: strongest */ + { 0x50, 0xff }, + { 0x54, 0x23 }, /* Max AGC gain: 18dB */ + { 0x55, 0xff }, + { 0x56, 0x12 }, + { 0x57, 0x81 }, /* (default) */ + { 0x58, 0x75 }, + { 0x59, 0x01 }, /* AGC dark current compensation: +1 */ + { 0x5a, 0x2c }, + { 0x5b, 0x0f }, /* AWB chrominance levels */ + { 0x5c, 0x10 }, + { 0x3d, 0x80 }, + { 0x27, 0xa6 }, + /* Toggle AWB off and on */ + { 0x12, 0x20 }, + { 0x12, 0x24 }, + + { 0xff, 0xff }, /* END MARKER */ +}; + +/* This initializes the OV6x30 camera chip and relevant variables. */ +static int ov6x30_init(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x30 *s; + int rc; + + DDEBUG(4, &c->dev, "entered"); + + rc = ov_write_regvals(c, regvals_init_6x30); + if (rc < 0) + return rc; + + ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL); + if (!s) + return -ENOMEM; + memset(s, 0, sizeof *s); + + s->auto_brt = 1; + s->auto_exp = 1; + + return rc; +} + +static int ov6x30_free(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + kfree(ov->spriv); + return 0; +} + +static int ov6x30_set_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x30 *s = ov->spriv; + int rc; + int v = ctl->value; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_write_mask(c, REG_CNT, v >> 12, 0x0f); + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_write(c, REG_BRT, v >> 8); + break; + case OVCAMCHIP_CID_SAT: + rc = ov_write(c, REG_SAT, v >> 8); + break; + case OVCAMCHIP_CID_HUE: + rc = ov_write(c, REG_RED, 0xFF - (v >> 8)); + if (rc < 0) + goto out; + + rc = ov_write(c, REG_BLUE, v >> 8); + break; + case OVCAMCHIP_CID_EXP: + rc = ov_write(c, REG_EXP, v); + break; + case OVCAMCHIP_CID_FREQ: + { + int sixty = (v == 60); + + rc = ov_write(c, 0x2b, sixty?0xa8:0x28); + if (rc < 0) + goto out; + + rc = ov_write(c, 0x2a, sixty?0x84:0xa4); + break; + } + case OVCAMCHIP_CID_BANDFILT: + rc = ov_write_mask(c, 0x2d, v?0x04:0x00, 0x04); + s->bandfilt = v; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + rc = ov_write_mask(c, 0x2d, v?0x10:0x00, 0x10); + s->auto_brt = v; + break; + case OVCAMCHIP_CID_AUTOEXP: + rc = ov_write_mask(c, 0x28, v?0x00:0x10, 0x10); + s->auto_exp = v; + break; + case OVCAMCHIP_CID_BACKLIGHT: + { + rc = ov_write_mask(c, 0x4e, v?0x80:0x60, 0xe0); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x29, v?0x08:0x00, 0x08); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x28, v?0x02:0x00, 0x02); + s->backlight = v; + break; + } + case OVCAMCHIP_CID_MIRROR: + rc = ov_write_mask(c, 0x12, v?0x40:0x00, 0x40); + s->mirror = v; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + +out: + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, v, rc); + return rc; +} + +static int ov6x30_get_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov6x30 *s = ov->spriv; + int rc = 0; + unsigned char val = 0; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_read(c, REG_CNT, &val); + ctl->value = (val & 0x0f) << 12; + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_read(c, REG_BRT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_read(c, REG_SAT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_HUE: + rc = ov_read(c, REG_BLUE, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_EXP: + rc = ov_read(c, REG_EXP, &val); + ctl->value = val; + break; + case OVCAMCHIP_CID_BANDFILT: + ctl->value = s->bandfilt; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + ctl->value = s->auto_brt; + break; + case OVCAMCHIP_CID_AUTOEXP: + ctl->value = s->auto_exp; + break; + case OVCAMCHIP_CID_BACKLIGHT: + ctl->value = s->backlight; + break; + case OVCAMCHIP_CID_MIRROR: + ctl->value = s->mirror; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, ctl->value, rc); + return rc; +} + +static int ov6x30_mode_init(struct i2c_client *c, struct ovcamchip_window *win) +{ + /******** QCIF-specific regs ********/ + + ov_write_mask(c, 0x14, win->quarter?0x20:0x00, 0x20); + + /******** Palette-specific regs ********/ + + if (win->format == VIDEO_PALETTE_GREY) { + if (c->adapter->id == (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518)) { + /* Do nothing - we're already in 8-bit mode */ + } else { + ov_write_mask(c, 0x13, 0x20, 0x20); + } + } else { + /* The OV518 needs special treatment. Although both the OV518 + * and the OV6630 support a 16-bit video bus, only the 8 bit Y + * bus is actually used. The UV bus is tied to ground. + * Therefore, the OV6630 needs to be in 8-bit multiplexed + * output mode */ + + if (c->adapter->id == (I2C_ALGO_SMBUS | I2C_HW_SMBUS_OV518)) { + /* Do nothing - we want to stay in 8-bit mode */ + /* Warning: Messing with reg 0x13 breaks OV518 color */ + } else { + ov_write_mask(c, 0x13, 0x00, 0x20); + } + } + + /******** Clock programming ********/ + + ov_write(c, 0x11, win->clockdiv); + + return 0; +} + +static int ov6x30_set_window(struct i2c_client *c, struct ovcamchip_window *win) +{ + int ret, hwscale, vwscale; + + ret = ov6x30_mode_init(c, win); + if (ret < 0) + return ret; + + if (win->quarter) { + hwscale = 0; + vwscale = 0; + } else { + hwscale = 1; + vwscale = 1; /* The datasheet says 0; it's wrong */ + } + + ov_write(c, 0x17, HWSBASE + (win->x >> hwscale)); + ov_write(c, 0x18, HWEBASE + ((win->x + win->width) >> hwscale)); + ov_write(c, 0x19, VWSBASE + (win->y >> vwscale)); + ov_write(c, 0x1a, VWEBASE + ((win->y + win->height) >> vwscale)); + + return 0; +} + +static int ov6x30_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + switch (cmd) { + case OVCAMCHIP_CMD_S_CTRL: + return ov6x30_set_control(c, arg); + case OVCAMCHIP_CMD_G_CTRL: + return ov6x30_get_control(c, arg); + case OVCAMCHIP_CMD_S_MODE: + return ov6x30_set_window(c, arg); + default: + DDEBUG(2, &c->dev, "command not supported: %d", cmd); + return -ENOIOCTLCMD; + } +} + +struct ovcamchip_ops ov6x30_ops = { + .init = ov6x30_init, + .free = ov6x30_free, + .command = ov6x30_command, +}; diff --git a/drivers/media/video/ovcamchip/ov76be.c b/drivers/media/video/ovcamchip/ov76be.c new file mode 100644 index 000000000..29bbdc05e --- /dev/null +++ b/drivers/media/video/ovcamchip/ov76be.c @@ -0,0 +1,303 @@ +/* OmniVision OV76BE Camera Chip Support Code + * + * Copyright (c) 1999-2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include "ovcamchip_priv.h" + +/* OV7610 registers: Since the OV76BE is undocumented, we'll settle for these + * for now. */ +#define REG_GAIN 0x00 /* gain [5:0] */ +#define REG_BLUE 0x01 /* blue channel balance */ +#define REG_RED 0x02 /* red channel balance */ +#define REG_SAT 0x03 /* saturation */ +#define REG_CNT 0x05 /* Y contrast */ +#define REG_BRT 0x06 /* Y brightness */ +#define REG_BLUE_BIAS 0x0C /* blue channel bias [5:0] */ +#define REG_RED_BIAS 0x0D /* red channel bias [5:0] */ +#define REG_GAMMA_COEFF 0x0E /* gamma settings */ +#define REG_WB_RANGE 0x0F /* AEC/ALC/S-AWB settings */ +#define REG_EXP 0x10 /* manual exposure setting */ +#define REG_CLOCK 0x11 /* polarity/clock prescaler */ +#define REG_FIELD_DIVIDE 0x16 /* field interval/mode settings */ +#define REG_HWIN_START 0x17 /* horizontal window start */ +#define REG_HWIN_END 0x18 /* horizontal window end */ +#define REG_VWIN_START 0x19 /* vertical window start */ +#define REG_VWIN_END 0x1A /* vertical window end */ +#define REG_PIXEL_SHIFT 0x1B /* pixel shift */ +#define REG_YOFFSET 0x21 /* Y channel offset */ +#define REG_UOFFSET 0x22 /* U channel offset */ +#define REG_ECW 0x24 /* exposure white level for AEC */ +#define REG_ECB 0x25 /* exposure black level for AEC */ +#define REG_FRAMERATE_H 0x2A /* frame rate MSB + misc */ +#define REG_FRAMERATE_L 0x2B /* frame rate LSB */ +#define REG_ALC 0x2C /* Auto Level Control settings */ +#define REG_VOFFSET 0x2E /* V channel offset adjustment */ +#define REG_ARRAY_BIAS 0x2F /* array bias -- don't change */ +#define REG_YGAMMA 0x33 /* misc gamma settings [7:6] */ +#define REG_BIAS_ADJUST 0x34 /* misc bias settings */ + +/* Window parameters */ +#define HWSBASE 0x38 +#define HWEBASE 0x3a +#define VWSBASE 0x05 +#define VWEBASE 0x05 + +struct ov76be { + int auto_brt; + int auto_exp; + int bandfilt; + int mirror; +}; + +/* NOTE: These are the same as the 7x10 settings, but should eventually be + * optimized for the OV76BE */ +static struct ovcamchip_regvals regvals_init_76be[] = { + { 0x10, 0xff }, + { 0x16, 0x03 }, + { 0x28, 0x24 }, + { 0x2b, 0xac }, + { 0x12, 0x00 }, + { 0x38, 0x81 }, + { 0x28, 0x24 }, /* 0c */ + { 0x0f, 0x85 }, /* lg's setting */ + { 0x15, 0x01 }, + { 0x20, 0x1c }, + { 0x23, 0x2a }, + { 0x24, 0x10 }, + { 0x25, 0x8a }, + { 0x26, 0xa2 }, + { 0x27, 0xc2 }, + { 0x2a, 0x04 }, + { 0x2c, 0xfe }, + { 0x2d, 0x93 }, + { 0x30, 0x71 }, + { 0x31, 0x60 }, + { 0x32, 0x26 }, + { 0x33, 0x20 }, + { 0x34, 0x48 }, + { 0x12, 0x24 }, + { 0x11, 0x01 }, + { 0x0c, 0x24 }, + { 0x0d, 0x24 }, + { 0xff, 0xff }, /* END MARKER */ +}; + +/* This initializes the OV76be camera chip and relevant variables. */ +static int ov76be_init(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov76be *s; + int rc; + + DDEBUG(4, &c->dev, "entered"); + + rc = ov_write_regvals(c, regvals_init_76be); + if (rc < 0) + return rc; + + ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL); + if (!s) + return -ENOMEM; + memset(s, 0, sizeof *s); + + s->auto_brt = 1; + s->auto_exp = 1; + + return rc; +} + +static int ov76be_free(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + kfree(ov->spriv); + return 0; +} + +static int ov76be_set_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov76be *s = ov->spriv; + int rc; + int v = ctl->value; + + switch (ctl->id) { + case OVCAMCHIP_CID_BRIGHT: + rc = ov_write(c, REG_BRT, v >> 8); + break; + case OVCAMCHIP_CID_SAT: + rc = ov_write(c, REG_SAT, v >> 8); + break; + case OVCAMCHIP_CID_EXP: + rc = ov_write(c, REG_EXP, v); + break; + case OVCAMCHIP_CID_FREQ: + { + int sixty = (v == 60); + + rc = ov_write_mask(c, 0x2a, sixty?0x00:0x80, 0x80); + if (rc < 0) + goto out; + + rc = ov_write(c, 0x2b, sixty?0x00:0xac); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x76, 0x01, 0x01); + break; + } + case OVCAMCHIP_CID_BANDFILT: + rc = ov_write_mask(c, 0x2d, v?0x04:0x00, 0x04); + s->bandfilt = v; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + rc = ov_write_mask(c, 0x2d, v?0x10:0x00, 0x10); + s->auto_brt = v; + break; + case OVCAMCHIP_CID_AUTOEXP: + rc = ov_write_mask(c, 0x13, v?0x01:0x00, 0x01); + s->auto_exp = v; + break; + case OVCAMCHIP_CID_MIRROR: + rc = ov_write_mask(c, 0x12, v?0x40:0x00, 0x40); + s->mirror = v; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + +out: + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, v, rc); + return rc; +} + +static int ov76be_get_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov76be *s = ov->spriv; + int rc = 0; + unsigned char val = 0; + + switch (ctl->id) { + case OVCAMCHIP_CID_BRIGHT: + rc = ov_read(c, REG_BRT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_read(c, REG_SAT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_EXP: + rc = ov_read(c, REG_EXP, &val); + ctl->value = val; + break; + case OVCAMCHIP_CID_BANDFILT: + ctl->value = s->bandfilt; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + ctl->value = s->auto_brt; + break; + case OVCAMCHIP_CID_AUTOEXP: + ctl->value = s->auto_exp; + break; + case OVCAMCHIP_CID_MIRROR: + ctl->value = s->mirror; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, ctl->value, rc); + return rc; +} + +static int ov76be_mode_init(struct i2c_client *c, struct ovcamchip_window *win) +{ + int qvga = win->quarter; + + /******** QVGA-specific regs ********/ + + ov_write(c, 0x14, qvga?0xa4:0x84); + + /******** Palette-specific regs ********/ + + if (win->format == VIDEO_PALETTE_GREY) { + ov_write_mask(c, 0x0e, 0x40, 0x40); + ov_write_mask(c, 0x13, 0x20, 0x20); + } else { + ov_write_mask(c, 0x0e, 0x00, 0x40); + ov_write_mask(c, 0x13, 0x00, 0x20); + } + + /******** Clock programming ********/ + + ov_write(c, 0x11, win->clockdiv); + + /******** Resolution-specific ********/ + + if (win->width == 640 && win->height == 480) + ov_write(c, 0x35, 0x9e); + else + ov_write(c, 0x35, 0x1e); + + return 0; +} + +static int ov76be_set_window(struct i2c_client *c, struct ovcamchip_window *win) +{ + int ret, hwscale, vwscale; + + ret = ov76be_mode_init(c, win); + if (ret < 0) + return ret; + + if (win->quarter) { + hwscale = 1; + vwscale = 0; + } else { + hwscale = 2; + vwscale = 1; + } + + ov_write(c, 0x17, HWSBASE + (win->x >> hwscale)); + ov_write(c, 0x18, HWEBASE + ((win->x + win->width) >> hwscale)); + ov_write(c, 0x19, VWSBASE + (win->y >> vwscale)); + ov_write(c, 0x1a, VWEBASE + ((win->y + win->height) >> vwscale)); + + return 0; +} + +static int ov76be_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + switch (cmd) { + case OVCAMCHIP_CMD_S_CTRL: + return ov76be_set_control(c, arg); + case OVCAMCHIP_CMD_G_CTRL: + return ov76be_get_control(c, arg); + case OVCAMCHIP_CMD_S_MODE: + return ov76be_set_window(c, arg); + default: + DDEBUG(2, &c->dev, "command not supported: %d", cmd); + return -ENOIOCTLCMD; + } +} + +struct ovcamchip_ops ov76be_ops = { + .init = ov76be_init, + .free = ov76be_free, + .command = ov76be_command, +}; diff --git a/drivers/media/video/ovcamchip/ov7x10.c b/drivers/media/video/ovcamchip/ov7x10.c new file mode 100644 index 000000000..6c383d4b1 --- /dev/null +++ b/drivers/media/video/ovcamchip/ov7x10.c @@ -0,0 +1,335 @@ +/* OmniVision OV7610/OV7110 Camera Chip Support Code + * + * Copyright (c) 1999-2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * Color fixes by by Orion Sky Lawlor (2/26/2000) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include "ovcamchip_priv.h" + +/* Registers */ +#define REG_GAIN 0x00 /* gain [5:0] */ +#define REG_BLUE 0x01 /* blue channel balance */ +#define REG_RED 0x02 /* red channel balance */ +#define REG_SAT 0x03 /* saturation */ +#define REG_CNT 0x05 /* Y contrast */ +#define REG_BRT 0x06 /* Y brightness */ +#define REG_BLUE_BIAS 0x0C /* blue channel bias [5:0] */ +#define REG_RED_BIAS 0x0D /* red channel bias [5:0] */ +#define REG_GAMMA_COEFF 0x0E /* gamma settings */ +#define REG_WB_RANGE 0x0F /* AEC/ALC/S-AWB settings */ +#define REG_EXP 0x10 /* manual exposure setting */ +#define REG_CLOCK 0x11 /* polarity/clock prescaler */ +#define REG_FIELD_DIVIDE 0x16 /* field interval/mode settings */ +#define REG_HWIN_START 0x17 /* horizontal window start */ +#define REG_HWIN_END 0x18 /* horizontal window end */ +#define REG_VWIN_START 0x19 /* vertical window start */ +#define REG_VWIN_END 0x1A /* vertical window end */ +#define REG_PIXEL_SHIFT 0x1B /* pixel shift */ +#define REG_YOFFSET 0x21 /* Y channel offset */ +#define REG_UOFFSET 0x22 /* U channel offset */ +#define REG_ECW 0x24 /* exposure white level for AEC */ +#define REG_ECB 0x25 /* exposure black level for AEC */ +#define REG_FRAMERATE_H 0x2A /* frame rate MSB + misc */ +#define REG_FRAMERATE_L 0x2B /* frame rate LSB */ +#define REG_ALC 0x2C /* Auto Level Control settings */ +#define REG_VOFFSET 0x2E /* V channel offset adjustment */ +#define REG_ARRAY_BIAS 0x2F /* array bias -- don't change */ +#define REG_YGAMMA 0x33 /* misc gamma settings [7:6] */ +#define REG_BIAS_ADJUST 0x34 /* misc bias settings */ + +/* Window parameters */ +#define HWSBASE 0x38 +#define HWEBASE 0x3a +#define VWSBASE 0x05 +#define VWEBASE 0x05 + +struct ov7x10 { + int auto_brt; + int auto_exp; + int bandfilt; + int mirror; +}; + +/* Lawrence Glaister reports: + * + * Register 0x0f in the 7610 has the following effects: + * + * 0x85 (AEC method 1): Best overall, good contrast range + * 0x45 (AEC method 2): Very overexposed + * 0xa5 (spec sheet default): Ok, but the black level is + * shifted resulting in loss of contrast + * 0x05 (old driver setting): very overexposed, too much + * contrast + */ +static struct ovcamchip_regvals regvals_init_7x10[] = { + { 0x10, 0xff }, + { 0x16, 0x03 }, + { 0x28, 0x24 }, + { 0x2b, 0xac }, + { 0x12, 0x00 }, + { 0x38, 0x81 }, + { 0x28, 0x24 }, /* 0c */ + { 0x0f, 0x85 }, /* lg's setting */ + { 0x15, 0x01 }, + { 0x20, 0x1c }, + { 0x23, 0x2a }, + { 0x24, 0x10 }, + { 0x25, 0x8a }, + { 0x26, 0xa2 }, + { 0x27, 0xc2 }, + { 0x2a, 0x04 }, + { 0x2c, 0xfe }, + { 0x2d, 0x93 }, + { 0x30, 0x71 }, + { 0x31, 0x60 }, + { 0x32, 0x26 }, + { 0x33, 0x20 }, + { 0x34, 0x48 }, + { 0x12, 0x24 }, + { 0x11, 0x01 }, + { 0x0c, 0x24 }, + { 0x0d, 0x24 }, + { 0xff, 0xff }, /* END MARKER */ +}; + +/* This initializes the OV7x10 camera chip and relevant variables. */ +static int ov7x10_init(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x10 *s; + int rc; + + DDEBUG(4, &c->dev, "entered"); + + rc = ov_write_regvals(c, regvals_init_7x10); + if (rc < 0) + return rc; + + ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL); + if (!s) + return -ENOMEM; + memset(s, 0, sizeof *s); + + s->auto_brt = 1; + s->auto_exp = 1; + + return rc; +} + +static int ov7x10_free(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + kfree(ov->spriv); + return 0; +} + +static int ov7x10_set_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x10 *s = ov->spriv; + int rc; + int v = ctl->value; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_write(c, REG_CNT, v >> 8); + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_write(c, REG_BRT, v >> 8); + break; + case OVCAMCHIP_CID_SAT: + rc = ov_write(c, REG_SAT, v >> 8); + break; + case OVCAMCHIP_CID_HUE: + rc = ov_write(c, REG_RED, 0xFF - (v >> 8)); + if (rc < 0) + goto out; + + rc = ov_write(c, REG_BLUE, v >> 8); + break; + case OVCAMCHIP_CID_EXP: + rc = ov_write(c, REG_EXP, v); + break; + case OVCAMCHIP_CID_FREQ: + { + int sixty = (v == 60); + + rc = ov_write_mask(c, 0x2a, sixty?0x00:0x80, 0x80); + if (rc < 0) + goto out; + + rc = ov_write(c, 0x2b, sixty?0x00:0xac); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x13, 0x10, 0x10); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x13, 0x00, 0x10); + break; + } + case OVCAMCHIP_CID_BANDFILT: + rc = ov_write_mask(c, 0x2d, v?0x04:0x00, 0x04); + s->bandfilt = v; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + rc = ov_write_mask(c, 0x2d, v?0x10:0x00, 0x10); + s->auto_brt = v; + break; + case OVCAMCHIP_CID_AUTOEXP: + rc = ov_write_mask(c, 0x29, v?0x00:0x80, 0x80); + s->auto_exp = v; + break; + case OVCAMCHIP_CID_MIRROR: + rc = ov_write_mask(c, 0x12, v?0x40:0x00, 0x40); + s->mirror = v; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + +out: + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, v, rc); + return rc; +} + +static int ov7x10_get_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x10 *s = ov->spriv; + int rc = 0; + unsigned char val = 0; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_read(c, REG_CNT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_read(c, REG_BRT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_read(c, REG_SAT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_HUE: + rc = ov_read(c, REG_BLUE, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_EXP: + rc = ov_read(c, REG_EXP, &val); + ctl->value = val; + break; + case OVCAMCHIP_CID_BANDFILT: + ctl->value = s->bandfilt; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + ctl->value = s->auto_brt; + break; + case OVCAMCHIP_CID_AUTOEXP: + ctl->value = s->auto_exp; + break; + case OVCAMCHIP_CID_MIRROR: + ctl->value = s->mirror; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, ctl->value, rc); + return rc; +} + +static int ov7x10_mode_init(struct i2c_client *c, struct ovcamchip_window *win) +{ + int qvga = win->quarter; + + /******** QVGA-specific regs ********/ + + ov_write(c, 0x14, qvga?0x24:0x04); + + /******** Palette-specific regs ********/ + + if (win->format == VIDEO_PALETTE_GREY) { + ov_write_mask(c, 0x0e, 0x40, 0x40); + ov_write_mask(c, 0x13, 0x20, 0x20); + } else { + ov_write_mask(c, 0x0e, 0x00, 0x40); + ov_write_mask(c, 0x13, 0x00, 0x20); + } + + /******** Clock programming ********/ + + ov_write(c, 0x11, win->clockdiv); + + /******** Resolution-specific ********/ + + if (win->width == 640 && win->height == 480) + ov_write(c, 0x35, 0x9e); + else + ov_write(c, 0x35, 0x1e); + + return 0; +} + +static int ov7x10_set_window(struct i2c_client *c, struct ovcamchip_window *win) +{ + int ret, hwscale, vwscale; + + ret = ov7x10_mode_init(c, win); + if (ret < 0) + return ret; + + if (win->quarter) { + hwscale = 1; + vwscale = 0; + } else { + hwscale = 2; + vwscale = 1; + } + + ov_write(c, 0x17, HWSBASE + (win->x >> hwscale)); + ov_write(c, 0x18, HWEBASE + ((win->x + win->width) >> hwscale)); + ov_write(c, 0x19, VWSBASE + (win->y >> vwscale)); + ov_write(c, 0x1a, VWEBASE + ((win->y + win->height) >> vwscale)); + + return 0; +} + +static int ov7x10_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + switch (cmd) { + case OVCAMCHIP_CMD_S_CTRL: + return ov7x10_set_control(c, arg); + case OVCAMCHIP_CMD_G_CTRL: + return ov7x10_get_control(c, arg); + case OVCAMCHIP_CMD_S_MODE: + return ov7x10_set_window(c, arg); + default: + DDEBUG(2, &c->dev, "command not supported: %d", cmd); + return -ENOIOCTLCMD; + } +} + +struct ovcamchip_ops ov7x10_ops = { + .init = ov7x10_init, + .free = ov7x10_free, + .command = ov7x10_command, +}; diff --git a/drivers/media/video/ovcamchip/ov7x20.c b/drivers/media/video/ovcamchip/ov7x20.c new file mode 100644 index 000000000..3c8c48f33 --- /dev/null +++ b/drivers/media/video/ovcamchip/ov7x20.c @@ -0,0 +1,455 @@ +/* OmniVision OV7620/OV7120 Camera Chip Support Code + * + * Copyright (c) 1999-2004 Mark McClelland + * http://alpha.dyndns.org/ov511/ + * + * OV7620 fixes by Charl P. Botha + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. NO WARRANTY OF ANY KIND is expressed or implied. + */ + +#define DEBUG + +#include +#include "ovcamchip_priv.h" + +/* Registers */ +#define REG_GAIN 0x00 /* gain [5:0] */ +#define REG_BLUE 0x01 /* blue gain */ +#define REG_RED 0x02 /* red gain */ +#define REG_SAT 0x03 /* saturation */ +#define REG_BRT 0x06 /* Y brightness */ +#define REG_SHARP 0x07 /* analog sharpness */ +#define REG_BLUE_BIAS 0x0C /* WB blue ratio [5:0] */ +#define REG_RED_BIAS 0x0D /* WB red ratio [5:0] */ +#define REG_EXP 0x10 /* exposure */ + +/* Default control settings. Values are in terms of V4L2 controls. */ +#define OV7120_DFL_BRIGHT 0x60 +#define OV7620_DFL_BRIGHT 0x60 +#define OV7120_DFL_SAT 0xb0 +#define OV7620_DFL_SAT 0xc0 +#define DFL_AUTO_EXP 1 +#define DFL_AUTO_GAIN 1 +#define OV7120_DFL_GAIN 0x00 +#define OV7620_DFL_GAIN 0x00 +/* NOTE: Since autoexposure is the default, these aren't programmed into the + * OV7x20 chip. They are just here because V4L2 expects a default */ +#define OV7120_DFL_EXP 0x7f +#define OV7620_DFL_EXP 0x7f + +/* Window parameters */ +#define HWSBASE 0x2F /* From 7620.SET (spec is wrong) */ +#define HWEBASE 0x2F +#define VWSBASE 0x05 +#define VWEBASE 0x05 + +struct ov7x20 { + int auto_brt; + int auto_exp; + int auto_gain; + int backlight; + int bandfilt; + int mirror; +}; + +/* Contrast look-up table */ +static unsigned char ctab[] = { + 0x01, 0x05, 0x09, 0x11, 0x15, 0x35, 0x37, 0x57, + 0x5b, 0xa5, 0xa7, 0xc7, 0xc9, 0xcf, 0xef, 0xff +}; + +/* Settings for (Black & White) OV7120 camera chip */ +static struct ovcamchip_regvals regvals_init_7120[] = { + { 0x12, 0x80 }, /* reset */ + { 0x13, 0x00 }, /* Autoadjust off */ + { 0x12, 0x20 }, /* Disable AWB */ + { 0x13, DFL_AUTO_GAIN?0x01:0x00 }, /* Autoadjust on (if desired) */ + { 0x00, OV7120_DFL_GAIN }, + { 0x01, 0x80 }, + { 0x02, 0x80 }, + { 0x03, OV7120_DFL_SAT }, + { 0x06, OV7120_DFL_BRIGHT }, + { 0x07, 0x00 }, + { 0x0c, 0x20 }, + { 0x0d, 0x20 }, + { 0x11, 0x01 }, + { 0x14, 0x84 }, + { 0x15, 0x01 }, + { 0x16, 0x03 }, + { 0x17, 0x2f }, + { 0x18, 0xcf }, + { 0x19, 0x06 }, + { 0x1a, 0xf5 }, + { 0x1b, 0x00 }, + { 0x20, 0x08 }, + { 0x21, 0x80 }, + { 0x22, 0x80 }, + { 0x23, 0x00 }, + { 0x26, 0xa0 }, + { 0x27, 0xfa }, + { 0x28, 0x20 }, /* DON'T set bit 6. It is for the OV7620 only */ + { 0x29, DFL_AUTO_EXP?0x00:0x80 }, + { 0x2a, 0x10 }, + { 0x2b, 0x00 }, + { 0x2c, 0x88 }, + { 0x2d, 0x95 }, + { 0x2e, 0x80 }, + { 0x2f, 0x44 }, + { 0x60, 0x20 }, + { 0x61, 0x02 }, + { 0x62, 0x5f }, + { 0x63, 0xd5 }, + { 0x64, 0x57 }, + { 0x65, 0x83 }, /* OV says "don't change this value" */ + { 0x66, 0x55 }, + { 0x67, 0x92 }, + { 0x68, 0xcf }, + { 0x69, 0x76 }, + { 0x6a, 0x22 }, + { 0x6b, 0xe2 }, + { 0x6c, 0x40 }, + { 0x6d, 0x48 }, + { 0x6e, 0x80 }, + { 0x6f, 0x0d }, + { 0x70, 0x89 }, + { 0x71, 0x00 }, + { 0x72, 0x14 }, + { 0x73, 0x54 }, + { 0x74, 0xa0 }, + { 0x75, 0x8e }, + { 0x76, 0x00 }, + { 0x77, 0xff }, + { 0x78, 0x80 }, + { 0x79, 0x80 }, + { 0x7a, 0x80 }, + { 0x7b, 0xe6 }, + { 0x7c, 0x00 }, + { 0x24, 0x3a }, + { 0x25, 0x60 }, + { 0xff, 0xff }, /* END MARKER */ +}; + +/* Settings for (color) OV7620 camera chip */ +static struct ovcamchip_regvals regvals_init_7620[] = { + { 0x12, 0x80 }, /* reset */ + { 0x00, OV7620_DFL_GAIN }, + { 0x01, 0x80 }, + { 0x02, 0x80 }, + { 0x03, OV7620_DFL_SAT }, + { 0x06, OV7620_DFL_BRIGHT }, + { 0x07, 0x00 }, + { 0x0c, 0x24 }, + { 0x0c, 0x24 }, + { 0x0d, 0x24 }, + { 0x11, 0x01 }, + { 0x12, 0x24 }, + { 0x13, DFL_AUTO_GAIN?0x01:0x00 }, + { 0x14, 0x84 }, + { 0x15, 0x01 }, + { 0x16, 0x03 }, + { 0x17, 0x2f }, + { 0x18, 0xcf }, + { 0x19, 0x06 }, + { 0x1a, 0xf5 }, + { 0x1b, 0x00 }, + { 0x20, 0x18 }, + { 0x21, 0x80 }, + { 0x22, 0x80 }, + { 0x23, 0x00 }, + { 0x26, 0xa2 }, + { 0x27, 0xea }, + { 0x28, 0x20 }, + { 0x29, DFL_AUTO_EXP?0x00:0x80 }, + { 0x2a, 0x10 }, + { 0x2b, 0x00 }, + { 0x2c, 0x88 }, + { 0x2d, 0x91 }, + { 0x2e, 0x80 }, + { 0x2f, 0x44 }, + { 0x60, 0x27 }, + { 0x61, 0x02 }, + { 0x62, 0x5f }, + { 0x63, 0xd5 }, + { 0x64, 0x57 }, + { 0x65, 0x83 }, + { 0x66, 0x55 }, + { 0x67, 0x92 }, + { 0x68, 0xcf }, + { 0x69, 0x76 }, + { 0x6a, 0x22 }, + { 0x6b, 0x00 }, + { 0x6c, 0x02 }, + { 0x6d, 0x44 }, + { 0x6e, 0x80 }, + { 0x6f, 0x1d }, + { 0x70, 0x8b }, + { 0x71, 0x00 }, + { 0x72, 0x14 }, + { 0x73, 0x54 }, + { 0x74, 0x00 }, + { 0x75, 0x8e }, + { 0x76, 0x00 }, + { 0x77, 0xff }, + { 0x78, 0x80 }, + { 0x79, 0x80 }, + { 0x7a, 0x80 }, + { 0x7b, 0xe2 }, + { 0x7c, 0x00 }, + { 0xff, 0xff }, /* END MARKER */ +}; + +/* Returns index into the specified look-up table, with 'n' elements, for which + * the value is greater than or equal to "val". If a match isn't found, (n-1) + * is returned. The entries in the table must be in ascending order. */ +static inline int ov7x20_lut_find(unsigned char lut[], int n, unsigned char val) +{ + int i = 0; + + while (lut[i] < val && i < n) + i++; + + return i; +} + +/* This initializes the OV7x20 camera chip and relevant variables. */ +static int ov7x20_init(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x20 *s; + int rc; + + DDEBUG(4, &c->dev, "entered"); + + if (ov->mono) + rc = ov_write_regvals(c, regvals_init_7120); + else + rc = ov_write_regvals(c, regvals_init_7620); + + if (rc < 0) + return rc; + + ov->spriv = s = kmalloc(sizeof *s, GFP_KERNEL); + if (!s) + return -ENOMEM; + memset(s, 0, sizeof *s); + + s->auto_brt = 1; + s->auto_exp = DFL_AUTO_EXP; + s->auto_gain = DFL_AUTO_GAIN; + + return 0; +} + +static int ov7x20_free(struct i2c_client *c) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + + kfree(ov->spriv); + return 0; +} + +static int ov7x20_set_v4l1_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x20 *s = ov->spriv; + int rc; + int v = ctl->value; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + { + /* Use Y gamma control instead. Bit 0 enables it. */ + rc = ov_write(c, 0x64, ctab[v >> 12]); + break; + } + case OVCAMCHIP_CID_BRIGHT: + /* 7620 doesn't like manual changes when in auto mode */ + if (!s->auto_brt) + rc = ov_write(c, REG_BRT, v >> 8); + else + rc = 0; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_write(c, REG_SAT, v >> 8); + break; + case OVCAMCHIP_CID_EXP: + if (!s->auto_exp) + rc = ov_write(c, REG_EXP, v); + else + rc = -EBUSY; + break; + case OVCAMCHIP_CID_FREQ: + { + int sixty = (v == 60); + + rc = ov_write_mask(c, 0x2a, sixty?0x00:0x80, 0x80); + if (rc < 0) + goto out; + + rc = ov_write(c, 0x2b, sixty?0x00:0xac); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x76, 0x01, 0x01); + break; + } + case OVCAMCHIP_CID_BANDFILT: + rc = ov_write_mask(c, 0x2d, v?0x04:0x00, 0x04); + s->bandfilt = v; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + rc = ov_write_mask(c, 0x2d, v?0x10:0x00, 0x10); + s->auto_brt = v; + break; + case OVCAMCHIP_CID_AUTOEXP: + rc = ov_write_mask(c, 0x13, v?0x01:0x00, 0x01); + s->auto_exp = v; + break; + case OVCAMCHIP_CID_BACKLIGHT: + { + rc = ov_write_mask(c, 0x68, v?0xe0:0xc0, 0xe0); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x29, v?0x08:0x00, 0x08); + if (rc < 0) + goto out; + + rc = ov_write_mask(c, 0x28, v?0x02:0x00, 0x02); + s->backlight = v; + break; + } + case OVCAMCHIP_CID_MIRROR: + rc = ov_write_mask(c, 0x12, v?0x40:0x00, 0x40); + s->mirror = v; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + +out: + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, v, rc); + return rc; +} + +static int ov7x20_get_v4l1_control(struct i2c_client *c, + struct ovcamchip_control *ctl) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + struct ov7x20 *s = ov->spriv; + int rc = 0; + unsigned char val = 0; + + switch (ctl->id) { + case OVCAMCHIP_CID_CONT: + rc = ov_read(c, 0x64, &val); + ctl->value = ov7x20_lut_find(ctab, 16, val) << 12; + break; + case OVCAMCHIP_CID_BRIGHT: + rc = ov_read(c, REG_BRT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_SAT: + rc = ov_read(c, REG_SAT, &val); + ctl->value = val << 8; + break; + case OVCAMCHIP_CID_EXP: + rc = ov_read(c, REG_EXP, &val); + ctl->value = val; + break; + case OVCAMCHIP_CID_BANDFILT: + ctl->value = s->bandfilt; + break; + case OVCAMCHIP_CID_AUTOBRIGHT: + ctl->value = s->auto_brt; + break; + case OVCAMCHIP_CID_AUTOEXP: + ctl->value = s->auto_exp; + break; + case OVCAMCHIP_CID_BACKLIGHT: + ctl->value = s->backlight; + break; + case OVCAMCHIP_CID_MIRROR: + ctl->value = s->mirror; + break; + default: + DDEBUG(2, &c->dev, "control not supported: %d", ctl->id); + return -EPERM; + } + + DDEBUG(3, &c->dev, "id=%d, arg=%d, rc=%d", ctl->id, ctl->value, rc); + return rc; +} + +static int ov7x20_mode_init(struct i2c_client *c, struct ovcamchip_window *win) +{ + struct ovcamchip *ov = i2c_get_clientdata(c); + int qvga = win->quarter; + + /******** QVGA-specific regs ********/ + ov_write_mask(c, 0x14, qvga?0x20:0x00, 0x20); + ov_write_mask(c, 0x28, qvga?0x00:0x20, 0x20); + ov_write(c, 0x24, qvga?0x20:0x3a); + ov_write(c, 0x25, qvga?0x30:0x60); + ov_write_mask(c, 0x2d, qvga?0x40:0x00, 0x40); + if (!ov->mono) + ov_write_mask(c, 0x67, qvga?0xf0:0x90, 0xf0); + ov_write_mask(c, 0x74, qvga?0x20:0x00, 0x20); + + /******** Clock programming ********/ + + ov_write(c, 0x11, win->clockdiv); + + return 0; +} + +static int ov7x20_set_window(struct i2c_client *c, struct ovcamchip_window *win) +{ + int ret, hwscale, vwscale; + + ret = ov7x20_mode_init(c, win); + if (ret < 0) + return ret; + + if (win->quarter) { + hwscale = 1; + vwscale = 0; + } else { + hwscale = 2; + vwscale = 1; + } + + ov_write(c, 0x17, HWSBASE + (win->x >> hwscale)); + ov_write(c, 0x18, HWEBASE + ((win->x + win->width) >> hwscale)); + ov_write(c, 0x19, VWSBASE + (win->y >> vwscale)); + ov_write(c, 0x1a, VWEBASE + ((win->y + win->height) >> vwscale)); + + return 0; +} + +static int ov7x20_command(struct i2c_client *c, unsigned int cmd, void *arg) +{ + switch (cmd) { + case OVCAMCHIP_CMD_S_CTRL: + return ov7x20_set_v4l1_control(c, arg); + case OVCAMCHIP_CMD_G_CTRL: + return ov7x20_get_v4l1_control(c, arg); + case OVCAMCHIP_CMD_S_MODE: + return ov7x20_set_window(c, arg); + default: + DDEBUG(2, &c->dev, "command not supported: %d", cmd); + return -ENOIOCTLCMD; + } +} + +struct ovcamchip_ops ov7x20_ops = { + .init = ov7x20_init, + .free = ov7x20_free, + .command = ov7x20_command, +}; diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c new file mode 100644 index 000000000..d1a785628 --- /dev/null +++ b/drivers/mtd/chips/cfi_util.c @@ -0,0 +1,92 @@ +/* + * Common Flash Interface support: + * Generic utility functions not dependant on command set + * + * Copyright (C) 2002 Red Hat + * Copyright (C) 2003 STMicroelectronics Limited + * + * This code is covered by the GPL. + * + * $Id: cfi_util.c,v 1.4 2004/07/14 08:38:44 dwmw2 Exp $ + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +struct cfi_extquery * +cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name) +{ + struct cfi_private *cfi = map->fldrv_priv; + __u32 base = 0; // cfi->chips[0].start; + int ofs_factor = cfi->interleave * cfi->device_type; + int i; + struct cfi_extquery *extp = NULL; + + printk(" %s Extended Query Table at 0x%4.4X\n", name, adr); + if (!adr) + goto out; + + /* Switch it into Query Mode */ + cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); + + extp = kmalloc(size, GFP_KERNEL); + if (!extp) { + printk(KERN_ERR "Failed to allocate memory\n"); + goto out; + } + + /* Read in the Extended Query Table */ + for (i=0; iMajorVersion != '1' || + (extp->MinorVersion < '0' || extp->MinorVersion > '3')) { + printk(KERN_WARNING " Unknown %s Extended Query " + "version %c.%c.\n", name, extp->MajorVersion, + extp->MinorVersion); + kfree(extp); + extp = NULL; + goto out; + } + +out: + /* Make sure it's in read mode */ + cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0xff, 0, base, map, cfi, cfi->device_type, NULL); + + return extp; +} + +EXPORT_SYMBOL(cfi_read_pri); + +void cfi_fixup(struct map_info *map, struct cfi_fixup* fixups) +{ + struct cfi_private *cfi = map->fldrv_priv; + struct cfi_fixup *f; + + for (f=fixups; f->fixup; f++) { + if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) && + ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) { + f->fixup(map, f->param); + } + } +} + +EXPORT_SYMBOL(cfi_fixup); + +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c new file mode 100644 index 000000000..5f66e9bfb --- /dev/null +++ b/drivers/mtd/devices/phram.c @@ -0,0 +1,362 @@ +/** + * + * $Id: phram.c,v 1.1 2003/08/21 17:52:30 joern Exp $ + * + * Copyright (c) Jochen Schaeuble + * 07/2003 rewritten by Joern Engel + * + * DISCLAIMER: This driver makes use of Rusty's excellent module code, + * so it will not work for 2.4 without changes and it wont work for 2.4 + * as a module without major changes. Oh well! + * + * Usage: + * + * one commend line parameter per device, each in the form: + * phram=,, + * may be up to 63 characters. + * and can be octal, decimal or hexadecimal. If followed + * by "k", "M" or "G", the numbers will be interpreted as kilo, mega or + * gigabytes. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#define ERROR(fmt, args...) printk(KERN_ERR "phram: " fmt , ## args) + +struct phram_mtd_list { + struct list_head list; + struct mtd_info *mtdinfo; +}; + +static LIST_HEAD(phram_list); + + + +int phram_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + u_char *start = (u_char *)mtd->priv; + + if (instr->addr + instr->len > mtd->size) + return -EINVAL; + + memset(start + instr->addr, 0xff, instr->len); + + /* This'll catch a few races. Free the thing before returning :) + * I don't feel at all ashamed. This kind of thing is possible anyway + * with flash, but unlikely. + */ + + instr->state = MTD_ERASE_DONE; + + if (instr->callback) + (*(instr->callback))(instr); + else + kfree(instr); + + return 0; +} + +int phram_point(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char **mtdbuf) +{ + u_char *start = (u_char *)mtd->priv; + + if (from + len > mtd->size) + return -EINVAL; + + *mtdbuf = start + from; + *retlen = len; + return 0; +} + +void phram_unpoint(struct mtd_info *mtd, u_char *addr, loff_t from, size_t len) +{ +} + +int phram_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + u_char *start = (u_char *)mtd->priv; + + if (from + len > mtd->size) + return -EINVAL; + + memcpy(buf, start + from, len); + + *retlen = len; + return 0; +} + +int phram_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + u_char *start = (u_char *)mtd->priv; + + if (to + len > mtd->size) + return -EINVAL; + + memcpy(start + to, buf, len); + + *retlen = len; + return 0; +} + + + +static void unregister_devices(void) +{ + struct phram_mtd_list *this; + + list_for_each_entry(this, &phram_list, list) { + del_mtd_device(this->mtdinfo); + iounmap(this->mtdinfo->priv); + kfree(this->mtdinfo); + kfree(this); + } +} + +static int register_device(char *name, unsigned long start, unsigned long len) +{ + struct phram_mtd_list *new; + int ret = -ENOMEM; + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + goto out0; + + new->mtdinfo = kmalloc(sizeof(struct mtd_info), GFP_KERNEL); + if (!new->mtdinfo) + goto out1; + + memset(new->mtdinfo, 0, sizeof(struct mtd_info)); + + ret = -EIO; + new->mtdinfo->priv = ioremap(start, len); + if (!new->mtdinfo->priv) { + ERROR("ioremap failed\n"); + goto out2; + } + + + new->mtdinfo->name = name; + new->mtdinfo->size = len; + new->mtdinfo->flags = MTD_CAP_RAM | MTD_ERASEABLE | MTD_VOLATILE; + new->mtdinfo->erase = phram_erase; + new->mtdinfo->point = phram_point; + new->mtdinfo->unpoint = phram_unpoint; + new->mtdinfo->read = phram_read; + new->mtdinfo->write = phram_write; + new->mtdinfo->owner = THIS_MODULE; + new->mtdinfo->type = MTD_RAM; + new->mtdinfo->erasesize = 0x0; + + ret = -EAGAIN; + if (add_mtd_device(new->mtdinfo)) { + ERROR("Failed to register new device\n"); + goto out3; + } + + list_add_tail(&new->list, &phram_list); + return 0; + +out3: + iounmap(new->mtdinfo->priv); +out2: + kfree(new->mtdinfo); +out1: + kfree(new); +out0: + return ret; +} + +static int ustrtoul(const char *cp, char **endp, unsigned int base) +{ + unsigned long result = simple_strtoul(cp, endp, base); + + switch (**endp) { + case 'G': + result *= 1024; + case 'M': + result *= 1024; + case 'k': + result *= 1024; + endp++; + } + return result; +} + +static int parse_num32(uint32_t *num32, const char *token) +{ + char *endp; + unsigned long n; + + n = ustrtoul(token, &endp, 0); + if (*endp) + return -EINVAL; + + *num32 = n; + return 0; +} + +static int parse_name(char **pname, const char *token) +{ + size_t len; + char *name; + + len = strlen(token) + 1; + if (len > 64) + return -ENOSPC; + + name = kmalloc(len, GFP_KERNEL); + if (!name) + return -ENOMEM; + + strcpy(name, token); + + *pname = name; + return 0; +} + +#define parse_err(fmt, args...) do { \ + ERROR(fmt , ## args); \ + return 0; \ +} while (0) + +static int phram_setup(const char *val, struct kernel_param *kp) +{ + char buf[64+12+12], *str = buf; + char *token[3]; + char *name; + uint32_t start; + uint32_t len; + int i, ret; + + if (strnlen(val, sizeof(str)) >= sizeof(str)) + parse_err("parameter too long\n"); + + strcpy(str, val); + + for (i=0; i<3; i++) + token[i] = strsep(&str, ","); + + if (str) + parse_err("too many arguments\n"); + + if (!token[2]) + parse_err("not enough arguments\n"); + + ret = parse_name(&name, token[0]); + if (ret == -ENOMEM) + parse_err("out of memory\n"); + if (ret == -ENOSPC) + parse_err("name too long\n"); + if (ret) + return 0; + + ret = parse_num32(&start, token[1]); + if (ret) + parse_err("illegal start address\n"); + + ret = parse_num32(&len, token[2]); + if (ret) + parse_err("illegal device length\n"); + + register_device(name, start, len); + + return 0; +} + +module_param_call(phram, phram_setup, NULL, NULL, 000); +MODULE_PARM_DESC(phram, "Memory region to map. \"map=,\""); + +/* + * Just for compatibility with slram, this is horrible and should go someday. + */ +static int __init slram_setup(const char *val, struct kernel_param *kp) +{ + char buf[256], *str = buf; + + if (!val || !val[0]) + parse_err("no arguments to \"slram=\"\n"); + + if (strnlen(val, sizeof(str)) >= sizeof(str)) + parse_err("parameter too long\n"); + + strcpy(str, val); + + while (str) { + char *token[3]; + char *name; + uint32_t start; + uint32_t len; + int i, ret; + + for (i=0; i<3; i++) { + token[i] = strsep(&str, ","); + if (token[i]) + continue; + parse_err("wrong number of arguments to \"slram=\"\n"); + } + + /* name */ + ret = parse_name(&name, token[0]); + if (ret == -ENOMEM) + parse_err("of memory\n"); + if (ret == -ENOSPC) + parse_err("too long\n"); + if (ret) + return 1; + + /* start */ + ret = parse_num32(&start, token[1]); + if (ret) + parse_err("illegal start address\n"); + + /* len */ + if (token[2][0] == '+') + ret = parse_num32(&len, token[2] + 1); + else + ret = parse_num32(&len, token[2]); + + if (ret) + parse_err("illegal device length\n"); + + if (token[2][0] != '+') { + if (len < start) + parse_err("end < start\n"); + len -= start; + } + + register_device(name, start, len); + } + return 1; +} + +module_param_call(slram, slram_setup, NULL, NULL, 000); +MODULE_PARM_DESC(slram, "List of memory regions to map. \"map=,\""); + + +int __init init_phram(void) +{ + printk(KERN_ERR "phram loaded\n"); + return 0; +} + +static void __exit cleanup_phram(void) +{ + unregister_devices(); +} + +module_init(init_phram); +module_exit(cleanup_phram); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jörn Engel "); +MODULE_DESCRIPTION("MTD driver for physical RAM"); diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c new file mode 100644 index 000000000..ec2612b0d --- /dev/null +++ b/drivers/mtd/maps/ichxrom.c @@ -0,0 +1,407 @@ +/* + * ichxrom.c + * + * Normal mappings of chips in physical memory + * $Id: ichxrom.c,v 1.7 2004/07/14 18:14:09 eric Exp $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define xstr(s) str(s) +#define str(s) #s +#define MOD_NAME xstr(KBUILD_BASENAME) + +#define MTD_DEV_NAME_LENGTH 16 + +#define RESERVE_MEM_REGION 0 + + +#define MANUFACTURER_INTEL 0x0089 +#define I82802AB 0x00ad +#define I82802AC 0x00ac + +#define ICHX_FWH_REGION_START 0xFF000000UL +#define ICHX_FWH_REGION_SIZE 0x01000000UL +#define BIOS_CNTL 0x4e +#define FWH_DEC_EN1 0xE3 +#define FWH_DEC_EN2 0xF0 +#define FWH_SEL1 0xE8 +#define FWH_SEL2 0xEE + +struct ichxrom_map_info { + struct map_info map; + struct mtd_info *mtd; + unsigned long window_addr; + struct pci_dev *pdev; + struct resource window_rsrc; + struct resource rom_rsrc; + char mtd_name[MTD_DEV_NAME_LENGTH]; +}; + +static inline unsigned long addr(struct map_info *map, unsigned long ofs) +{ + unsigned long offset; + offset = ((8*1024*1024) - map->size) + ofs; + if (offset >= (4*1024*1024)) { + offset += 0x400000; + } + return map->map_priv_1 + 0x400000 + offset; +} + +static inline unsigned long dbg_addr(struct map_info *map, unsigned long addr) +{ + return addr - map->map_priv_1 + ICHX_FWH_REGION_START; +} + +static map_word ichxrom_read(struct map_info *map, unsigned long ofs) +{ + map_word val; + int i; + switch(map->bankwidth) { + case 1: val.x[0] = __raw_readb(addr(map, ofs)); break; + case 2: val.x[0] = __raw_readw(addr(map, ofs)); break; + case 4: val.x[0] = __raw_readl(addr(map, ofs)); break; +#if BITS_PER_LONG >= 64 + case 8: val.x[0] = __raw_readq(addr(map, ofs)); break; +#endif + default: val.x[0] = 0; break; + } + for(i = 1; i < map_words(map); i++) { + val.x[i] = 0; + } + return val; +} + +static void ichxrom_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) +{ + memcpy_fromio(to, addr(map, from), len); +} + +static void ichxrom_write(struct map_info *map, map_word d, unsigned long ofs) +{ + switch(map->bankwidth) { + case 1: __raw_writeb(d.x[0], addr(map,ofs)); break; + case 2: __raw_writew(d.x[0], addr(map,ofs)); break; + case 4: __raw_writel(d.x[0], addr(map,ofs)); break; +#if BITS_PER_LONG >= 64 + case 8: __raw_writeq(d.x[0], addr(map,ofs)); break; +#endif + } + mb(); +} + +static void ichxrom_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) +{ + memcpy_toio(addr(map, to), from, len); +} + +static struct ichxrom_map_info ichxrom_map = { + .map = { + .name = MOD_NAME, + .phys = NO_XIP, + .size = 0, + .bankwidth = 1, + .read = ichxrom_read, + .copy_from = ichxrom_copy_from, + .write = ichxrom_write, + .copy_to = ichxrom_copy_to, + /* Firmware hubs only use vpp when being programmed + * in a factory setting. So in-place programming + * needs to use a different method. + */ + }, + /* remaining fields of structure are initialized to 0 */ +}; + +enum fwh_lock_state { + FWH_DENY_WRITE = 1, + FWH_IMMUTABLE = 2, + FWH_DENY_READ = 4, +}; + +static void ichxrom_cleanup(struct ichxrom_map_info *info) +{ + u16 word; + + /* Disable writes through the rom window */ + pci_read_config_word(info->pdev, BIOS_CNTL, &word); + pci_write_config_word(info->pdev, BIOS_CNTL, word & ~1); + + if (info->mtd) { + del_mtd_device(info->mtd); + map_destroy(info->mtd); + info->mtd = NULL; + info->map.virt = 0; + } + if (info->rom_rsrc.parent) + release_resource(&info->rom_rsrc); + if (info->window_rsrc.parent) + release_resource(&info->window_rsrc); + + if (info->window_addr) { + iounmap((void *)(info->window_addr)); + info->window_addr = 0; + } +} + + +static int ichxrom_set_lock_state(struct mtd_info *mtd, loff_t ofs, size_t len, + enum fwh_lock_state state) +{ + struct map_info *map = mtd->priv; + unsigned long start = ofs; + unsigned long end = start + len -1; + + /* FIXME do I need to guard against concurrency here? */ + /* round down to 64K boundaries */ + start = start & ~0xFFFF; + end = end & ~0xFFFF; + while (start <= end) { + unsigned long ctrl_addr; + ctrl_addr = addr(map, start) - 0x400000 + 2; + writeb(state, ctrl_addr); + start = start + 0x10000; + } + return 0; +} + +static int ichxrom_lock(struct mtd_info *mtd, loff_t ofs, size_t len) +{ + return ichxrom_set_lock_state(mtd, ofs, len, FWH_DENY_WRITE); +} + +static int ichxrom_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) +{ + return ichxrom_set_lock_state(mtd, ofs, len, 0); +} + +static int __devinit ichxrom_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + u16 word; + struct ichxrom_map_info *info = &ichxrom_map; + unsigned long map_size; + static char *probes[] = { "cfi_probe", "jedec_probe" }; + struct cfi_private *cfi; + + /* For now I just handle the ichx and I assume there + * are not a lot of resources up at the top of the address + * space. It is possible to handle other devices in the + * top 16MB but it is very painful. Also since + * you can only really attach a FWH to an ICHX there + * a number of simplifications you can make. + * + * Also you can page firmware hubs if an 8MB window isn't enough + * but don't currently handle that case either. + */ + + info->pdev = pdev; + + /* + * Try to reserve the window mem region. If this fails then + * it is likely due to the window being "reseved" by the BIOS. + */ + info->window_rsrc.name = MOD_NAME; + info->window_rsrc.start = ICHX_FWH_REGION_START; + info->window_rsrc.end = ICHX_FWH_REGION_START + ICHX_FWH_REGION_SIZE - 1; + info->window_rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + if (request_resource(&iomem_resource, &info->window_rsrc)) { + info->window_rsrc.parent = NULL; + printk(KERN_ERR MOD_NAME + " %s(): Unable to register resource" + " 0x%.08lx-0x%.08lx - kernel bug?\n", + __func__, + info->window_rsrc.start, info->window_rsrc.end); + } + + /* Enable writes through the rom window */ + pci_read_config_word(pdev, BIOS_CNTL, &word); + if (!(word & 1) && (word & (1<<1))) { + /* The BIOS will generate an error if I enable + * this device, so don't even try. + */ + printk(KERN_ERR MOD_NAME ": firmware access control, I can't enable writes\n"); + goto failed; + } + pci_write_config_word(pdev, BIOS_CNTL, word | 1); + + + /* Map the firmware hub into my address space. */ + /* Does this use too much virtual address space? */ + info->window_addr = (unsigned long)ioremap( + ICHX_FWH_REGION_START, ICHX_FWH_REGION_SIZE); + if (!info->window_addr) { + printk(KERN_ERR "Failed to ioremap\n"); + goto failed; + } + + /* For now assume the firmware has setup all relevant firmware + * windows. We don't have enough information to handle this case + * intelligently. + */ + + /* FIXME select the firmware hub and enable a window to it. */ + + info->mtd = NULL; + info->map.map_priv_1 = info->window_addr; + + /* Loop through the possible bankwidths */ + for(ichxrom_map.map.bankwidth = 4; ichxrom_map.map.bankwidth; ichxrom_map.map.bankwidth >>= 1) { + map_size = ICHX_FWH_REGION_SIZE; + while(!info->mtd && (map_size > 0)) { + int i; + info->map.size = map_size; + for(i = 0; i < sizeof(probes)/sizeof(char *); i++) { + info->mtd = do_map_probe(probes[i], &ichxrom_map.map); + if (info->mtd) + break; + } + map_size -= 512*1024; + } + if (info->mtd) + break; + } + if (!info->mtd) { + goto failed; + } + cfi = ichxrom_map.map.fldrv_priv; + if ((cfi->mfr == MANUFACTURER_INTEL) && ( + (cfi->id == I82802AB) || + (cfi->id == I82802AC))) + { + /* If it is a firmware hub put in the special lock + * and unlock routines. + */ + info->mtd->lock = ichxrom_lock; + info->mtd->unlock = ichxrom_unlock; + } + if (info->mtd->size > info->map.size) { + printk(KERN_WARNING MOD_NAME " rom(%u) larger than window(%u). fixing...\n", + info->mtd->size, info->map.size); + info->mtd->size = info->map.size; + } + + info->mtd->owner = THIS_MODULE; + add_mtd_device(info->mtd); + + if (info->window_rsrc.parent) { + /* + * Registering the MTD device in iomem may not be possible + * if there is a BIOS "reserved" and BUSY range. If this + * fails then continue anyway. + */ + snprintf(info->mtd_name, MTD_DEV_NAME_LENGTH, + "mtd%d", info->mtd->index); + + info->rom_rsrc.name = info->mtd_name; + info->rom_rsrc.start = ICHX_FWH_REGION_START + + ICHX_FWH_REGION_SIZE - map_size; + info->rom_rsrc.end = ICHX_FWH_REGION_START + + ICHX_FWH_REGION_SIZE; + info->rom_rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; + if (request_resource(&info->window_rsrc, &info->rom_rsrc)) { + printk(KERN_ERR MOD_NAME + ": cannot reserve MTD resource\n"); + info->rom_rsrc.parent = NULL; + } + } + + return 0; + + failed: + ichxrom_cleanup(info); + return -ENODEV; +} + + +static void __devexit ichxrom_remove_one (struct pci_dev *pdev) +{ + struct ichxrom_map_info *info = &ichxrom_map; + u16 word; + + del_mtd_device(info->mtd); + map_destroy(info->mtd); + info->mtd = NULL; + info->map.map_priv_1 = 0; + + iounmap((void *)(info->window_addr)); + info->window_addr = 0; + + /* Disable writes through the rom window */ + pci_read_config_word(pdev, BIOS_CNTL, &word); + pci_write_config_word(pdev, BIOS_CNTL, word & ~1); + +#if RESERVE_MEM_REGION + release_mem_region(ICHX_FWH_REGION_START, ICHX_FWH_REGION_SIZE); +#endif +} + +static struct pci_device_id ichxrom_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, + PCI_ANY_ID, PCI_ANY_ID, }, + { 0, }, +}; + +MODULE_DEVICE_TABLE(pci, ichxrom_pci_tbl); + +#if 0 +static struct pci_driver ichxrom_driver = { + .name = MOD_NAME, + .id_table = ichxrom_pci_tbl, + .probe = ichxrom_init_one, + .remove = ichxrom_remove_one, +}; +#endif + +static struct pci_dev *mydev; +int __init init_ichxrom(void) +{ + struct pci_dev *pdev; + struct pci_device_id *id; + + pdev = NULL; + for (id = ichxrom_pci_tbl; id->vendor; id++) { + pdev = pci_find_device(id->vendor, id->device, NULL); + if (pdev) { + break; + } + } + if (pdev) { + mydev = pdev; + return ichxrom_init_one(pdev, &ichxrom_pci_tbl[0]); + } + return -ENXIO; +#if 0 + return pci_module_init(&ichxrom_driver); +#endif +} + +static void __exit cleanup_ichxrom(void) +{ + ichxrom_remove_one(mydev); +} + +module_init(init_ichxrom); +module_exit(cleanup_ichxrom); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Eric Biederman "); +MODULE_DESCRIPTION("MTD map driver for BIOS chips on the ICHX southbridge"); diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c new file mode 100644 index 000000000..677c21685 --- /dev/null +++ b/drivers/mtd/nand/diskonchip.c @@ -0,0 +1,1237 @@ +/* + * drivers/mtd/nand/diskonchip.c + * + * (C) 2003 Red Hat, Inc. + * + * Author: David Woodhouse + * + * Interface to generic NAND code for M-Systems DiskOnChip devices + * + * $Id: diskonchip.c,v 1.23 2004/07/13 00:14:35 dbrown Exp $ + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* Where to look for the devices? */ +#ifndef CONFIG_MTD_DOCPROBE_ADDRESS +#define CONFIG_MTD_DOCPROBE_ADDRESS 0 +#endif + +static unsigned long __initdata doc_locations[] = { +#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__) +#ifdef CONFIG_MTD_DOCPROBE_HIGH + 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000, + 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000, + 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000, + 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000, + 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000, +#else /* CONFIG_MTD_DOCPROBE_HIGH */ + 0xc8000, 0xca000, 0xcc000, 0xce000, + 0xd0000, 0xd2000, 0xd4000, 0xd6000, + 0xd8000, 0xda000, 0xdc000, 0xde000, + 0xe0000, 0xe2000, 0xe4000, 0xe6000, + 0xe8000, 0xea000, 0xec000, 0xee000, +#endif /* CONFIG_MTD_DOCPROBE_HIGH */ +#elif defined(__PPC__) + 0xe4000000, +#elif defined(CONFIG_MOMENCO_OCELOT) + 0x2f000000, + 0xff000000, +#elif defined(CONFIG_MOMENCO_OCELOT_G) || defined (CONFIG_MOMENCO_OCELOT_C) + 0xff000000, +##else +#warning Unknown architecture for DiskOnChip. No default probe locations defined +#endif + 0xffffffff }; + +static struct mtd_info *doclist = NULL; + +struct doc_priv { + unsigned long virtadr; + unsigned long physadr; + u_char ChipID; + u_char CDSNControl; + int chips_per_floor; /* The number of chips detected on each floor */ + int curfloor; + int curchip; + int mh0_page; + int mh1_page; + struct mtd_info *nextdoc; +}; + +/* Max number of eraseblocks to scan (from start of device) for the (I)NFTL + MediaHeader. The spec says to just keep going, I think, but that's just + silly. */ +#define MAX_MEDIAHEADER_SCAN 8 + +/* This is the syndrome computed by the HW ecc generator upon reading an empty + page, one with all 0xff for data and stored ecc code. */ +static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a }; +/* This is the ecc value computed by the HW ecc generator upon writing an empty + page, one with all 0xff for data. */ +static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 }; + +#define INFTL_BBT_RESERVED_BLOCKS 4 + +#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil) +#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k) + +static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd); +static void doc200x_select_chip(struct mtd_info *mtd, int chip); + +static int debug=0; +MODULE_PARM(debug, "i"); + +static int try_dword=1; +MODULE_PARM(try_dword, "i"); + +static int no_ecc_failures=0; +MODULE_PARM(no_ecc_failures, "i"); + +static int no_autopart=0; +MODULE_PARM(no_autopart, "i"); + +#ifdef MTD_NAND_DISKONCHIP_BBTWRITE +static int inftl_bbt_write=1; +#else +static int inftl_bbt_write=0; +#endif +MODULE_PARM(inftl_bbt_write, "i"); + +static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS; +MODULE_PARM(doc_config_location, "l"); +MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip"); + +static void DoC_Delay(struct doc_priv *doc, unsigned short cycles) +{ + volatile char dummy; + int i; + + for (i = 0; i < cycles; i++) { + if (DoC_is_Millennium(doc)) + dummy = ReadDOC(doc->virtadr, NOP); + else + dummy = ReadDOC(doc->virtadr, DOCStatus); + } + +} +/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ +static int _DoC_WaitReady(struct doc_priv *doc) +{ + unsigned long docptr = doc->virtadr; + unsigned long timeo = jiffies + (HZ * 10); + + if(debug) printk("_DoC_WaitReady...\n"); + /* Out-of-line routine to wait for chip response */ + while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { + if (time_after(jiffies, timeo)) { + printk("_DoC_WaitReady timed out.\n"); + return -EIO; + } + udelay(1); + cond_resched(); + } + + return 0; +} + +static inline int DoC_WaitReady(struct doc_priv *doc) +{ + unsigned long docptr = doc->virtadr; + int ret = 0; + + DoC_Delay(doc, 4); + + if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) + /* Call the out-of-line routine to wait */ + ret = _DoC_WaitReady(doc); + + DoC_Delay(doc, 2); + if(debug) printk("DoC_WaitReady OK\n"); + return ret; +} + +static void doc2000_write_byte(struct mtd_info *mtd, u_char datum) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + if(debug)printk("write_byte %02x\n", datum); + WriteDOC(datum, docptr, CDSNSlowIO); + WriteDOC(datum, docptr, 2k_CDSN_IO); +} + +static u_char doc2000_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + u_char ret; + + ReadDOC(docptr, CDSNSlowIO); + DoC_Delay(doc, 2); + ret = ReadDOC(docptr, 2k_CDSN_IO); + if (debug) printk("read_byte returns %02x\n", ret); + return ret; +} + +static void doc2000_writebuf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + if (debug)printk("writebuf of %d bytes: ", len); + for (i=0; i < len; i++) { + WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i); + if (debug && i < 16) + printk("%02x ", buf[i]); + } + if (debug) printk("\n"); +} + +static void doc2000_readbuf(struct mtd_info *mtd, + u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + if (debug)printk("readbuf of %d bytes: ", len); + + for (i=0; i < len; i++) { + buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i); + } +} + +static void doc2000_readbuf_dword(struct mtd_info *mtd, + u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + if (debug) printk("readbuf_dword of %d bytes: ", len); + + if (unlikely((((unsigned long)buf)|len) & 3)) { + for (i=0; i < len; i++) { + *(uint8_t *)(&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i); + } + } else { + for (i=0; i < len; i+=4) { + *(uint32_t*)(&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i); + } + } +} + +static int doc2000_verifybuf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + for (i=0; i < len; i++) + if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO)) + return -EFAULT; + return 0; +} + +static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + uint16_t ret; + + doc200x_select_chip(mtd, nr); + doc200x_hwcontrol(mtd, NAND_CTL_SETCLE); + this->write_byte(mtd, NAND_CMD_READID); + doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE); + doc200x_hwcontrol(mtd, NAND_CTL_SETALE); + this->write_byte(mtd, 0); + doc200x_hwcontrol(mtd, NAND_CTL_CLRALE); + + ret = this->read_byte(mtd) << 8; + ret |= this->read_byte(mtd); + + if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) { + /* First chip probe. See if we get same results by 32-bit access */ + union { + uint32_t dword; + uint8_t byte[4]; + } ident; + unsigned long docptr = doc->virtadr; + + doc200x_hwcontrol(mtd, NAND_CTL_SETCLE); + doc2000_write_byte(mtd, NAND_CMD_READID); + doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE); + doc200x_hwcontrol(mtd, NAND_CTL_SETALE); + doc2000_write_byte(mtd, 0); + doc200x_hwcontrol(mtd, NAND_CTL_CLRALE); + + ident.dword = readl(docptr + DoC_2k_CDSN_IO); + if (((ident.byte[0] << 8) | ident.byte[1]) == ret) { + printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n"); + this->read_buf = &doc2000_readbuf_dword; + } + } + + return ret; +} + +static void __init doc2000_count_chips(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + uint16_t mfrid; + int i; + + /* Max 4 chips per floor on DiskOnChip 2000 */ + doc->chips_per_floor = 4; + + /* Find out what the first chip is */ + mfrid = doc200x_ident_chip(mtd, 0); + + /* Find how many chips in each floor. */ + for (i = 1; i < 4; i++) { + if (doc200x_ident_chip(mtd, i) != mfrid) + break; + } + doc->chips_per_floor = i; + printk(KERN_DEBUG "Detected %d chips per floor.\n", i); +} + +static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this, int state) +{ + struct doc_priv *doc = (void *)this->priv; + + int status; + + DoC_WaitReady(doc); + this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); + DoC_WaitReady(doc); + status = (int)this->read_byte(mtd); + + return status; +} + +static void doc2001_write_byte(struct mtd_info *mtd, u_char datum) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + WriteDOC(datum, docptr, CDSNSlowIO); + WriteDOC(datum, docptr, Mil_CDSN_IO); + WriteDOC(datum, docptr, WritePipeTerm); +} + +static u_char doc2001_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + //ReadDOC(docptr, CDSNSlowIO); + /* 11.4.5 -- delay twice to allow extended length cycle */ + DoC_Delay(doc, 2); + ReadDOC(docptr, ReadPipeInit); + //return ReadDOC(docptr, Mil_CDSN_IO); + return ReadDOC(docptr, LastDataRead); +} + +static void doc2001_writebuf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + for (i=0; i < len; i++) + WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i); + /* Terminate write pipeline */ + WriteDOC(0x00, docptr, WritePipeTerm); +} + +static void doc2001_readbuf(struct mtd_info *mtd, + u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + /* Start read pipeline */ + ReadDOC(docptr, ReadPipeInit); + + for (i=0; i < len-1; i++) + buf[i] = ReadDOC(docptr, Mil_CDSN_IO); + + /* Terminate read pipeline */ + buf[i] = ReadDOC(docptr, LastDataRead); +} + +static int doc2001_verifybuf(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + + /* Start read pipeline */ + ReadDOC(docptr, ReadPipeInit); + + for (i=0; i < len-1; i++) + if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) { + ReadDOC(docptr, LastDataRead); + return i; + } + if (buf[i] != ReadDOC(docptr, LastDataRead)) + return i; + return 0; +} + +static void doc200x_select_chip(struct mtd_info *mtd, int chip) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int floor = 0; + + /* 11.4.4 -- deassert CE before changing chip */ + doc200x_hwcontrol(mtd, NAND_CTL_CLRNCE); + + if(debug)printk("select chip (%d)\n", chip); + + if (chip == -1) + return; + + floor = chip / doc->chips_per_floor; + chip -= (floor * doc->chips_per_floor); + + WriteDOC(floor, docptr, FloorSelect); + WriteDOC(chip, docptr, CDSNDeviceSelect); + + doc200x_hwcontrol(mtd, NAND_CTL_SETNCE); + + doc->curchip = chip; + doc->curfloor = floor; +} + +static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + switch(cmd) { + case NAND_CTL_SETNCE: + doc->CDSNControl |= CDSN_CTRL_CE; + break; + case NAND_CTL_CLRNCE: + doc->CDSNControl &= ~CDSN_CTRL_CE; + break; + case NAND_CTL_SETCLE: + doc->CDSNControl |= CDSN_CTRL_CLE; + break; + case NAND_CTL_CLRCLE: + doc->CDSNControl &= ~CDSN_CTRL_CLE; + break; + case NAND_CTL_SETALE: + doc->CDSNControl |= CDSN_CTRL_ALE; + break; + case NAND_CTL_CLRALE: + doc->CDSNControl &= ~CDSN_CTRL_ALE; + break; + case NAND_CTL_SETWP: + doc->CDSNControl |= CDSN_CTRL_WP; + break; + case NAND_CTL_CLRWP: + doc->CDSNControl &= ~CDSN_CTRL_WP; + break; + } + if (debug)printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl); + WriteDOC(doc->CDSNControl, docptr, CDSNControl); + /* 11.4.3 -- 4 NOPs after CSDNControl write */ + DoC_Delay(doc, 4); +} + +static int doc200x_dev_ready(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + /* 11.4.2 -- must NOP four times before checking FR/B# */ + DoC_Delay(doc, 4); + if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { + if(debug) + printk("not ready\n"); + return 0; + } + /* 11.4.2 -- Must NOP twice if it's ready */ + DoC_Delay(doc, 2); + if (debug)printk("was ready\n"); + return 1; +} + +static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) +{ + /* This is our last resort if we couldn't find or create a BBT. Just + pretend all blocks are good. */ + return 0; +} + +static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + + /* Prime the ECC engine */ + switch(mode) { + case NAND_ECC_READ: + WriteDOC(DOC_ECC_RESET, docptr, ECCConf); + WriteDOC(DOC_ECC_EN, docptr, ECCConf); + break; + case NAND_ECC_WRITE: + WriteDOC(DOC_ECC_RESET, docptr, ECCConf); + WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf); + break; + } +} + +/* This code is only called on write */ +static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, + unsigned char *ecc_code) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + int i; + int emptymatch = 1; + + /* flush the pipeline */ + if (DoC_is_2000(doc)) { + WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl); + WriteDOC(0, docptr, 2k_CDSN_IO); + WriteDOC(0, docptr, 2k_CDSN_IO); + WriteDOC(0, docptr, 2k_CDSN_IO); + WriteDOC(doc->CDSNControl, docptr, CDSNControl); + } else { + WriteDOC(0, docptr, NOP); + WriteDOC(0, docptr, NOP); + WriteDOC(0, docptr, NOP); + } + + for (i = 0; i < 6; i++) { + ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i); + if (ecc_code[i] != empty_write_ecc[i]) + emptymatch = 0; + } + WriteDOC(DOC_ECC_DIS, docptr, ECCConf); +#if 0 + /* If emptymatch=1, we might have an all-0xff data buffer. Check. */ + if (emptymatch) { + /* Note: this somewhat expensive test should not be triggered + often. It could be optimized away by examining the data in + the writebuf routine, and remembering the result. */ + for (i = 0; i < 512; i++) { + if (dat[i] == 0xff) continue; + emptymatch = 0; + break; + } + } + /* If emptymatch still =1, we do have an all-0xff data buffer. + Return all-0xff ecc value instead of the computed one, so + it'll look just like a freshly-erased page. */ + if (emptymatch) memset(ecc_code, 0xff, 6); +#endif + return 0; +} + +static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) +{ + int i, ret = 0; + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + unsigned long docptr = doc->virtadr; + volatile u_char dummy; + int emptymatch = 1; + + /* flush the pipeline */ + if (DoC_is_2000(doc)) { + dummy = ReadDOC(docptr, 2k_ECCStatus); + dummy = ReadDOC(docptr, 2k_ECCStatus); + dummy = ReadDOC(docptr, 2k_ECCStatus); + } else { + dummy = ReadDOC(docptr, ECCConf); + dummy = ReadDOC(docptr, ECCConf); + dummy = ReadDOC(docptr, ECCConf); + } + + /* Error occured ? */ + if (dummy & 0x80) { + for (i = 0; i < 6; i++) { + calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i); + if (calc_ecc[i] != empty_read_syndrome[i]) + emptymatch = 0; + } + /* If emptymatch=1, the read syndrome is consistent with an + all-0xff data and stored ecc block. Check the stored ecc. */ + if (emptymatch) { + for (i = 0; i < 6; i++) { + if (read_ecc[i] == 0xff) continue; + emptymatch = 0; + break; + } + } + /* If emptymatch still =1, check the data block. */ + if (emptymatch) { + /* Note: this somewhat expensive test should not be triggered + often. It could be optimized away by examining the data in + the readbuf routine, and remembering the result. */ + for (i = 0; i < 512; i++) { + if (dat[i] == 0xff) continue; + emptymatch = 0; + break; + } + } + /* If emptymatch still =1, this is almost certainly a freshly- + erased block, in which case the ECC will not come out right. + We'll suppress the error and tell the caller everything's + OK. Because it is. */ + if (!emptymatch) ret = doc_decode_ecc (dat, calc_ecc); + if (ret > 0) + printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret); + } + WriteDOC(DOC_ECC_DIS, docptr, ECCConf); + if (no_ecc_failures && (ret == -1)) { + printk(KERN_ERR "suppressing ECC failure\n"); + ret = 0; + } + return ret; +} + +//u_char mydatabuf[528]; + +static struct nand_oobinfo doc200x_oobinfo = { + .useecc = MTD_NANDECC_AUTOPLACE, + .eccbytes = 6, + .eccpos = {0, 1, 2, 3, 4, 5}, + .oobfree = { {8, 8} } +}; + +/* Find the (I)NFTL Media Header, and optionally also the mirror media header. + On sucessful return, buf will contain a copy of the media header for + further processing. id is the string to scan for, and will presumably be + either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media + header. The page #s of the found media headers are placed in mh0_page and + mh1_page in the DOC private structure. */ +static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, + const char *id, int findmirror) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + int offs, end = (MAX_MEDIAHEADER_SCAN << this->phys_erase_shift); + int ret, retlen; + + end = min(end, mtd->size); // paranoia + for (offs = 0; offs < end; offs += mtd->erasesize) { + ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf); + if (retlen != mtd->oobblock) continue; + if (ret) { + printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", + offs); + } + if (memcmp(buf, id, 6)) continue; + printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs); + if (doc->mh0_page == -1) { + doc->mh0_page = offs >> this->page_shift; + if (!findmirror) return 1; + continue; + } + doc->mh1_page = offs >> this->page_shift; + return 2; + } + if (doc->mh0_page == -1) { + printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id); + return 0; + } + /* Only one mediaheader was found. We want buf to contain a + mediaheader on return, so we'll have to re-read the one we found. */ + offs = doc->mh0_page << this->page_shift; + ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf); + if (retlen != mtd->oobblock) { + /* Insanity. Give up. */ + printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n"); + return 0; + } + return 1; +} + +static inline int __init nftl_partscan(struct mtd_info *mtd, + struct mtd_partition *parts) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + u_char *buf = this->data_buf; + struct NFTLMediaHeader *mh = (struct NFTLMediaHeader *) buf; + const int psize = 1 << this->page_shift; + int blocks, maxblocks; + int offs, numheaders; + + if (!(numheaders=find_media_headers(mtd, buf, "ANAND", 1))) return 0; + +//#ifdef CONFIG_MTD_DEBUG_VERBOSE +// if (CONFIG_MTD_DEBUG_VERBOSE >= 2) + printk(KERN_INFO " DataOrgID = %s\n" + " NumEraseUnits = %d\n" + " FirstPhysicalEUN = %d\n" + " FormattedSize = %d\n" + " UnitSizeFactor = %d\n", + mh->DataOrgID, mh->NumEraseUnits, + mh->FirstPhysicalEUN, mh->FormattedSize, + mh->UnitSizeFactor); +//#endif + + blocks = mtd->size >> this->phys_erase_shift; + maxblocks = min(32768, mtd->erasesize - psize); + + if (mh->UnitSizeFactor == 0x00) { + /* Auto-determine UnitSizeFactor. The constraints are: + - There can be at most 32768 virtual blocks. + - There can be at most (virtual block size - page size) + virtual blocks (because MediaHeader+BBT must fit in 1). + */ + mh->UnitSizeFactor = 0xff; + while (blocks > maxblocks) { + blocks >>= 1; + maxblocks = min(32768, (maxblocks << 1) + psize); + mh->UnitSizeFactor--; + } + printk(KERN_WARNING "UnitSizeFactor=0x00 detected. Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor); + } + + /* NOTE: The lines below modify internal variables of the NAND and MTD + layers; variables with have already been configured by nand_scan. + Unfortunately, we didn't know before this point what these values + should be. Thus, this code is somewhat dependant on the exact + implementation of the NAND layer. */ + if (mh->UnitSizeFactor != 0xff) { + this->bbt_erase_shift += (0xff - mh->UnitSizeFactor); + mtd->erasesize <<= (0xff - mh->UnitSizeFactor); + printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize); + blocks = mtd->size >> this->bbt_erase_shift; + maxblocks = min(32768, mtd->erasesize - psize); + } + + if (blocks > maxblocks) { + printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size. Aborting.\n", mh->UnitSizeFactor); + return 0; + } + + /* Skip past the media headers. */ + offs = max(doc->mh0_page, doc->mh1_page); + offs <<= this->page_shift; + offs += mtd->erasesize; + + //parts[0].name = " DiskOnChip Boot / Media Header partition"; + //parts[0].offset = 0; + //parts[0].size = offs; + + parts[0].name = " DiskOnChip BDTL partition"; + parts[0].offset = offs; + parts[0].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift; + + offs += parts[0].size; + if (offs < mtd->size) { + parts[1].name = " DiskOnChip Remainder partition"; + parts[1].offset = offs; + parts[1].size = mtd->size - offs; + return 2; + } + return 1; +} + +/* This is a stripped-down copy of the code in inftlmount.c */ +static inline int __init inftl_partscan(struct mtd_info *mtd, + struct mtd_partition *parts) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + u_char *buf = this->data_buf; + struct INFTLMediaHeader *mh = (struct INFTLMediaHeader *) buf; + struct INFTLPartition *ip; + int numparts = 0; + int blocks; + int vshift, lastvunit = 0; + int i; + int end = mtd->size; + + if (inftl_bbt_write) + end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift); + + if (!find_media_headers(mtd, buf, "BNAND", 0)) return 0; + doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift); + + mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks); + mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions); + mh->NoOfBDTLPartitions = le32_to_cpu(mh->NoOfBDTLPartitions); + mh->BlockMultiplierBits = le32_to_cpu(mh->BlockMultiplierBits); + mh->FormatFlags = le32_to_cpu(mh->FormatFlags); + mh->PercentUsed = le32_to_cpu(mh->PercentUsed); + +//#ifdef CONFIG_MTD_DEBUG_VERBOSE +// if (CONFIG_MTD_DEBUG_VERBOSE >= 2) + printk(KERN_INFO " bootRecordID = %s\n" + " NoOfBootImageBlocks = %d\n" + " NoOfBinaryPartitions = %d\n" + " NoOfBDTLPartitions = %d\n" + " BlockMultiplerBits = %d\n" + " FormatFlgs = %d\n" + " OsakVersion = 0x%x\n" + " PercentUsed = %d\n", + mh->bootRecordID, mh->NoOfBootImageBlocks, + mh->NoOfBinaryPartitions, + mh->NoOfBDTLPartitions, + mh->BlockMultiplierBits, mh->FormatFlags, + mh->OsakVersion, mh->PercentUsed); +//#endif + + vshift = this->phys_erase_shift + mh->BlockMultiplierBits; + + blocks = mtd->size >> vshift; + if (blocks > 32768) { + printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size. Aborting.\n", mh->BlockMultiplierBits); + return 0; + } + + blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift); + if (inftl_bbt_write && (blocks > mtd->erasesize)) { + printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported. FIX ME!\n"); + return 0; + } + + /* Scan the partitions */ + for (i = 0; (i < 4); i++) { + ip = &(mh->Partitions[i]); + ip->virtualUnits = le32_to_cpu(ip->virtualUnits); + ip->firstUnit = le32_to_cpu(ip->firstUnit); + ip->lastUnit = le32_to_cpu(ip->lastUnit); + ip->flags = le32_to_cpu(ip->flags); + ip->spareUnits = le32_to_cpu(ip->spareUnits); + ip->Reserved0 = le32_to_cpu(ip->Reserved0); + +//#ifdef CONFIG_MTD_DEBUG_VERBOSE +// if (CONFIG_MTD_DEBUG_VERBOSE >= 2) + printk(KERN_INFO " PARTITION[%d] ->\n" + " virtualUnits = %d\n" + " firstUnit = %d\n" + " lastUnit = %d\n" + " flags = 0x%x\n" + " spareUnits = %d\n", + i, ip->virtualUnits, ip->firstUnit, + ip->lastUnit, ip->flags, + ip->spareUnits); +//#endif + +/* + if ((i == 0) && (ip->firstUnit > 0)) { + parts[0].name = " DiskOnChip IPL / Media Header partition"; + parts[0].offset = 0; + parts[0].size = mtd->erasesize * ip->firstUnit; + numparts = 1; + } +*/ + + if (ip->flags & INFTL_BINARY) + parts[numparts].name = " DiskOnChip BDK partition"; + else + parts[numparts].name = " DiskOnChip BDTL partition"; + parts[numparts].offset = ip->firstUnit << vshift; + parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift; + numparts++; + if (ip->lastUnit > lastvunit) lastvunit = ip->lastUnit; + if (ip->flags & INFTL_LAST) break; + } + lastvunit++; + if ((lastvunit << vshift) < end) { + parts[numparts].name = " DiskOnChip Remainder partition"; + parts[numparts].offset = lastvunit << vshift; + parts[numparts].size = end - parts[numparts].offset; + numparts++; + } + return numparts; +} + +static int __init nftl_scan_bbt(struct mtd_info *mtd) +{ + int ret, numparts; + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + struct mtd_partition parts[2]; + + memset((char *) parts, 0, sizeof(parts)); + /* On NFTL, we have to find the media headers before we can read the + BBTs, since they're stored in the media header eraseblocks. */ + numparts = nftl_partscan(mtd, parts); + if (!numparts) return -EIO; + this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT | + NAND_BBT_SAVECONTENT | NAND_BBT_WRITE | + NAND_BBT_VERSION; + this->bbt_td->veroffs = 7; + this->bbt_td->pages[0] = doc->mh0_page + 1; + if (doc->mh1_page != -1) { + this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT | + NAND_BBT_SAVECONTENT | NAND_BBT_WRITE | + NAND_BBT_VERSION; + this->bbt_md->veroffs = 7; + this->bbt_md->pages[0] = doc->mh1_page + 1; + } else { + this->bbt_md = NULL; + } + + /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set. + At least as nand_bbt.c is currently written. */ + if ((ret = nand_scan_bbt(mtd, NULL))) + return ret; + add_mtd_device(mtd); +#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE) + if (!no_autopart) add_mtd_partitions(mtd, parts, numparts); +#endif + return 0; +} + +static int __init inftl_scan_bbt(struct mtd_info *mtd) +{ + int ret, numparts; + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + struct mtd_partition parts[5]; + + if (this->numchips > doc->chips_per_floor) { + printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n"); + return -EIO; + } + + if (mtd->size == (8<<20)) { +#if 0 +/* This doesn't seem to work for me. I get ECC errors on every page. */ + /* The Millennium 8MiB is actually an NFTL device! */ + mtd->name = "DiskOnChip Millennium 8MiB (NFTL)"; + return nftl_scan_bbt(mtd); +#endif + printk(KERN_ERR "DiskOnChip Millennium 8MiB is not supported.\n"); + return -EIO; + } + + this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | + NAND_BBT_VERSION; + if (inftl_bbt_write) + this->bbt_td->options |= NAND_BBT_WRITE; + this->bbt_td->offs = 8; + this->bbt_td->len = 8; + this->bbt_td->veroffs = 7; + this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS; + this->bbt_td->reserved_block_code = 0x01; + this->bbt_td->pattern = "MSYS_BBT"; + + this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | + NAND_BBT_VERSION; + if (inftl_bbt_write) + this->bbt_md->options |= NAND_BBT_WRITE; + this->bbt_md->offs = 8; + this->bbt_md->len = 8; + this->bbt_md->veroffs = 7; + this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS; + this->bbt_md->reserved_block_code = 0x01; + this->bbt_md->pattern = "TBB_SYSM"; + + /* It's safe to set bd=NULL below because NAND_BBT_CREATE is not set. + At least as nand_bbt.c is currently written. */ + if ((ret = nand_scan_bbt(mtd, NULL))) + return ret; + memset((char *) parts, 0, sizeof(parts)); + numparts = inftl_partscan(mtd, parts); + /* At least for now, require the INFTL Media Header. We could probably + do without it for non-INFTL use, since all it gives us is + autopartitioning, but I want to give it more thought. */ + if (!numparts) return -EIO; + add_mtd_device(mtd); +#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE) + if (!no_autopart) add_mtd_partitions(mtd, parts, numparts); +#endif + return 0; +} + +static inline int __init doc2000_init(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + + this->write_byte = doc2000_write_byte; + this->read_byte = doc2000_read_byte; + this->write_buf = doc2000_writebuf; + this->read_buf = doc2000_readbuf; + this->verify_buf = doc2000_verifybuf; + this->scan_bbt = nftl_scan_bbt; + + doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO; + doc2000_count_chips(mtd); + mtd->name = "DiskOnChip 2000 (NFTL Model)"; + return (4 * doc->chips_per_floor); +} + +static inline int __init doc2001_init(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + struct doc_priv *doc = (void *)this->priv; + + this->write_byte = doc2001_write_byte; + this->read_byte = doc2001_read_byte; + this->write_buf = doc2001_writebuf; + this->read_buf = doc2001_readbuf; + this->verify_buf = doc2001_verifybuf; + this->scan_bbt = inftl_scan_bbt; + + ReadDOC(doc->virtadr, ChipID); + ReadDOC(doc->virtadr, ChipID); + ReadDOC(doc->virtadr, ChipID); + if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) { + /* It's not a Millennium; it's one of the newer + DiskOnChip 2000 units with a similar ASIC. + Treat it like a Millennium, except that it + can have multiple chips. */ + doc2000_count_chips(mtd); + mtd->name = "DiskOnChip 2000 (INFTL Model)"; + return (4 * doc->chips_per_floor); + } else { + /* Bog-standard Millennium */ + doc->chips_per_floor = 1; + mtd->name = "DiskOnChip Millennium"; + return 1; + } +} + +static inline int __init doc_probe(unsigned long physadr) +{ + unsigned char ChipID; + struct mtd_info *mtd; + struct nand_chip *nand; + struct doc_priv *doc; + unsigned long virtadr; + unsigned char save_control; + unsigned char tmp, tmpb, tmpc; + int reg, len, numchips; + int ret = 0; + + virtadr = (unsigned long)ioremap(physadr, DOC_IOREMAP_LEN); + if (!virtadr) { + printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr); + return -EIO; + } + + /* It's not possible to cleanly detect the DiskOnChip - the + * bootup procedure will put the device into reset mode, and + * it's not possible to talk to it without actually writing + * to the DOCControl register. So we store the current contents + * of the DOCControl register's location, in case we later decide + * that it's not a DiskOnChip, and want to put it back how we + * found it. + */ + save_control = ReadDOC(virtadr, DOCControl); + + /* Reset the DiskOnChip ASIC */ + WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, + virtadr, DOCControl); + WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, + virtadr, DOCControl); + + /* Enable the DiskOnChip ASIC */ + WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, + virtadr, DOCControl); + WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, + virtadr, DOCControl); + + ChipID = ReadDOC(virtadr, ChipID); + + switch(ChipID) { + case DOC_ChipID_Doc2k: + reg = DoC_2k_ECCStatus; + break; + case DOC_ChipID_DocMil: + reg = DoC_ECCConf; + break; + default: + ret = -ENODEV; + goto notfound; + } + /* Check the TOGGLE bit in the ECC register */ + tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; + tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; + tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; + if ((tmp == tmpb) || (tmp != tmpc)) { + printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr); + ret = -ENODEV; + goto notfound; + } + + for (mtd = doclist; mtd; mtd = doc->nextdoc) { + nand = mtd->priv; + doc = (void *)nand->priv; + /* Use the alias resolution register to determine if this is + in fact the same DOC aliased to a new address. If writes + to one chip's alias resolution register change the value on + the other chip, they're the same chip. */ + unsigned char oldval = ReadDOC(doc->virtadr, AliasResolution); + unsigned char newval = ReadDOC(virtadr, AliasResolution); + if (oldval != newval) + continue; + WriteDOC(~newval, virtadr, AliasResolution); + oldval = ReadDOC(doc->virtadr, AliasResolution); + WriteDOC(newval, virtadr, AliasResolution); // restore it + newval = ~newval; + if (oldval == newval) { + //printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr); + goto notfound; + } + } + + printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr); + + len = sizeof(struct mtd_info) + + sizeof(struct nand_chip) + + sizeof(struct doc_priv) + + (2 * sizeof(struct nand_bbt_descr)); + mtd = kmalloc(len, GFP_KERNEL); + if (!mtd) { + printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len); + ret = -ENOMEM; + goto fail; + } + memset(mtd, 0, len); + + nand = (struct nand_chip *) (mtd + 1); + doc = (struct doc_priv *) (nand + 1); + nand->bbt_td = (struct nand_bbt_descr *) (doc + 1); + nand->bbt_md = nand->bbt_td + 1; + + mtd->priv = (void *) nand; + mtd->owner = THIS_MODULE; + + nand->priv = (void *) doc; + nand->select_chip = doc200x_select_chip; + nand->hwcontrol = doc200x_hwcontrol; + nand->dev_ready = doc200x_dev_ready; + nand->waitfunc = doc200x_wait; + nand->block_bad = doc200x_block_bad; + nand->enable_hwecc = doc200x_enable_hwecc; + nand->calculate_ecc = doc200x_calculate_ecc; + nand->correct_data = doc200x_correct_data; + //nand->data_buf + nand->autooob = &doc200x_oobinfo; + nand->eccmode = NAND_ECC_HW6_512; + nand->options = NAND_USE_FLASH_BBT | NAND_HWECC_SYNDROME; + + doc->physadr = physadr; + doc->virtadr = virtadr; + doc->ChipID = ChipID; + doc->curfloor = -1; + doc->curchip = -1; + doc->mh0_page = -1; + doc->mh1_page = -1; + doc->nextdoc = doclist; + + if (ChipID == DOC_ChipID_Doc2k) + numchips = doc2000_init(mtd); + else + numchips = doc2001_init(mtd); + + if ((ret = nand_scan(mtd, numchips))) { + /* DBB note: i believe nand_release is necessary here, as + buffers may have been allocated in nand_base. Check with + Thomas. FIX ME! */ + /* nand_release will call del_mtd_device, but we haven't yet + added it. This is handled without incident by + del_mtd_device, as far as I can tell. */ + nand_release(mtd); + kfree(mtd); + goto fail; + } + + /* Success! */ + doclist = mtd; + return 0; + +notfound: + /* Put back the contents of the DOCControl register, in case it's not + actually a DiskOnChip. */ + WriteDOC(save_control, virtadr, DOCControl); +fail: + iounmap((void *)virtadr); + return ret; +} + +int __init init_nanddoc(void) +{ + int i; + + if (doc_config_location) { + printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location); + return doc_probe(doc_config_location); + } else { + for (i=0; (doc_locations[i] != 0xffffffff); i++) { + doc_probe(doc_locations[i]); + } + } + /* No banner message any more. Print a message if no DiskOnChip + found, so the user knows we at least tried. */ + if (!doclist) { + printk(KERN_INFO "No valid DiskOnChip devices found\n"); + return -ENODEV; + } + return 0; +} + +void __exit cleanup_nanddoc(void) +{ + struct mtd_info *mtd, *nextmtd; + struct nand_chip *nand; + struct doc_priv *doc; + + for (mtd = doclist; mtd; mtd = nextmtd) { + nand = mtd->priv; + doc = (void *)nand->priv; + + nextmtd = doc->nextdoc; + nand_release(mtd); + iounmap((void *)doc->virtadr); + kfree(mtd); + } +} + +module_init(init_nanddoc); +module_exit(cleanup_nanddoc); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Woodhouse "); +MODULE_DESCRIPTION("M-Systems DiskOnChip 2000 and Millennium device driver\n"); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c new file mode 100644 index 000000000..596bc8f70 --- /dev/null +++ b/drivers/mtd/nand/nand_base.c @@ -0,0 +1,2581 @@ +/* + * drivers/mtd/nand.c + * + * Overview: + * This is the generic MTD driver for NAND flash devices. It should be + * capable of working with almost all NAND chips currently available. + * Basic support for AG-AND chips is provided. + * + * Additional technical information is available on + * http://www.linux-mtd.infradead.org/tech/nand.html + * + * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) + * 2002 Thomas Gleixner (tglx@linutronix.de) + * + * 02-08-2004 tglx: support for strange chips, which cannot auto increment + * pages on read / read_oob + * + * 03-17-2004 tglx: Check ready before auto increment check. Simon Bayes + * pointed this out, as he marked an auto increment capable chip + * as NOAUTOINCR in the board driver. + * Make reads over block boundaries work too + * + * 04-14-2004 tglx: first working version for 2k page size chips + * + * 05-19-2004 tglx: Basic support for Renesas AG-AND chips + * + * Credits: + * David Woodhouse for adding multichip support + * + * Aleph One Ltd. and Toby Churchill Ltd. for supporting the + * rework for 2K page size chips + * + * TODO: + * Enable cached programming for 2k page size chips + * Check, if mtd->ecctype should be set to MTD_ECC_HW + * if we have HW ecc support. + * The AG-AND chips have nice features for speed improvement, + * which are not supported yet. Read / program 4 pages in one go. + * + * $Id: nand_base.c,v 1.113 2004/07/14 16:31:31 gleixner Exp $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE) +#include +#endif + +/* Define default oob placement schemes for large and small page devices */ +static struct nand_oobinfo nand_oob_8 = { + .useecc = MTD_NANDECC_AUTOPLACE, + .eccbytes = 3, + .eccpos = {0, 1, 2}, + .oobfree = { {3, 2}, {6, 2} } +}; + +static struct nand_oobinfo nand_oob_16 = { + .useecc = MTD_NANDECC_AUTOPLACE, + .eccbytes = 6, + .eccpos = {0, 1, 2, 3, 6, 7}, + .oobfree = { {8, 8} } +}; + +static struct nand_oobinfo nand_oob_64 = { + .useecc = MTD_NANDECC_AUTOPLACE, + .eccbytes = 24, + .eccpos = { + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63}, + .oobfree = { {2, 38} } +}; + +/* This is used for padding purposes in nand_write_oob */ +static u_char ffchars[] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +}; + +/* + * NAND low-level MTD interface functions + */ +static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len); +static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len); +static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len); + +static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf); +static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len, + size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel); +static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf); +static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf); +static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len, + size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel); +static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char *buf); +static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t * retlen); +static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel); +static int nand_erase (struct mtd_info *mtd, struct erase_info *instr); +static void nand_sync (struct mtd_info *mtd); + +/* Some internal functions */ +static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, u_char *oob_buf, + struct nand_oobinfo *oobsel, int mode); +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE +static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages, + u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode); +#else +#define nand_verify_pages(...) (0) +#endif + +static void nand_get_chip (struct nand_chip *this, struct mtd_info *mtd, int new_state); + +/** + * nand_release_chip - [GENERIC] release chip + * @mtd: MTD device structure + * + * Deselect, release chip lock and wake up anyone waiting on the device + */ +static void nand_release_chip (struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + + /* De-select the NAND device */ + this->select_chip(mtd, -1); + /* Release the chip */ + spin_lock_bh (&this->chip_lock); + this->state = FL_READY; + wake_up (&this->wq); + spin_unlock_bh (&this->chip_lock); +} + +/** + * nand_read_byte - [DEFAULT] read one byte from the chip + * @mtd: MTD device structure + * + * Default read function for 8bit buswith + */ +static u_char nand_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + return readb(this->IO_ADDR_R); +} + +/** + * nand_write_byte - [DEFAULT] write one byte to the chip + * @mtd: MTD device structure + * @byte: pointer to data byte to write + * + * Default write function for 8it buswith + */ +static void nand_write_byte(struct mtd_info *mtd, u_char byte) +{ + struct nand_chip *this = mtd->priv; + writeb(byte, this->IO_ADDR_W); +} + +/** + * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip + * @mtd: MTD device structure + * + * Default read function for 16bit buswith with + * endianess conversion + */ +static u_char nand_read_byte16(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + return (u_char) cpu_to_le16(readw(this->IO_ADDR_R)); +} + +/** + * nand_write_byte16 - [DEFAULT] write one byte endianess aware to the chip + * @mtd: MTD device structure + * @byte: pointer to data byte to write + * + * Default write function for 16bit buswith with + * endianess conversion + */ +static void nand_write_byte16(struct mtd_info *mtd, u_char byte) +{ + struct nand_chip *this = mtd->priv; + writew(le16_to_cpu((u16) byte), this->IO_ADDR_W); +} + +/** + * nand_read_word - [DEFAULT] read one word from the chip + * @mtd: MTD device structure + * + * Default read function for 16bit buswith without + * endianess conversion + */ +static u16 nand_read_word(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + return readw(this->IO_ADDR_R); +} + +/** + * nand_write_word - [DEFAULT] write one word to the chip + * @mtd: MTD device structure + * @word: data word to write + * + * Default write function for 16bit buswith without + * endianess conversion + */ +static void nand_write_word(struct mtd_info *mtd, u16 word) +{ + struct nand_chip *this = mtd->priv; + writew(word, this->IO_ADDR_W); +} + +/** + * nand_select_chip - [DEFAULT] control CE line + * @mtd: MTD device structure + * @chip: chipnumber to select, -1 for deselect + * + * Default select function for 1 chip devices. + */ +static void nand_select_chip(struct mtd_info *mtd, int chip) +{ + struct nand_chip *this = mtd->priv; + switch(chip) { + case -1: + this->hwcontrol(mtd, NAND_CTL_CLRNCE); + break; + case 0: + this->hwcontrol(mtd, NAND_CTL_SETNCE); + break; + + default: + BUG(); + } +} + +/** + * nand_write_buf - [DEFAULT] write buffer to chip + * @mtd: MTD device structure + * @buf: data buffer + * @len: number of bytes to write + * + * Default write function for 8bit buswith + */ +static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_W); +} + +/** + * nand_read_buf - [DEFAULT] read chip data into buffer + * @mtd: MTD device structure + * @buf: buffer to store date + * @len: number of bytes to read + * + * Default read function for 8bit buswith + */ +static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_R); +} + +/** + * nand_verify_buf - [DEFAULT] Verify chip data against buffer + * @mtd: MTD device structure + * @buf: buffer containing the data to compare + * @len: number of bytes to compare + * + * Default verify function for 8bit buswith + */ +static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_R)) + return -EFAULT; + + return 0; +} + +/** + * nand_write_buf16 - [DEFAULT] write buffer to chip + * @mtd: MTD device structure + * @buf: data buffer + * @len: number of bytes to write + * + * Default write function for 16bit buswith + */ +static void nand_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + u16 *p = (u16 *) buf; + len >>= 1; + + for (i=0; iIO_ADDR_W); + +} + +/** + * nand_read_buf16 - [DEFAULT] read chip data into buffer + * @mtd: MTD device structure + * @buf: buffer to store date + * @len: number of bytes to read + * + * Default read function for 16bit buswith + */ +static void nand_read_buf16(struct mtd_info *mtd, u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + u16 *p = (u16 *) buf; + len >>= 1; + + for (i=0; iIO_ADDR_R); +} + +/** + * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer + * @mtd: MTD device structure + * @buf: buffer containing the data to compare + * @len: number of bytes to compare + * + * Default verify function for 16bit buswith + */ +static int nand_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + u16 *p = (u16 *) buf; + len >>= 1; + + for (i=0; iIO_ADDR_R)) + return -EFAULT; + + return 0; +} + +/** + * nand_block_bad - [DEFAULT] Read bad block marker from the chip + * @mtd: MTD device structure + * @ofs: offset from device start + * @getchip: 0, if the chip is already selected + * + * Check, if the block is bad. + */ +static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) +{ + int page, chipnr, res = 0; + struct nand_chip *this = mtd->priv; + u16 bad; + + if (getchip) { + page = (int)(ofs >> this->page_shift); + chipnr = (int)(ofs >> this->chip_shift); + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd, FL_READING); + + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + } else + page = (int) ofs; + + if (this->options & NAND_BUSWIDTH_16) { + this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos & 0xFE, page & this->pagemask); + bad = cpu_to_le16(this->read_word(mtd)); + if (this->badblockpos & 0x1) + bad >>= 1; + if ((bad & 0xFF) != 0xff) + res = 1; + } else { + this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos, page & this->pagemask); + if (this->read_byte(mtd) != 0xff) + res = 1; + } + + if (getchip) { + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + } + + return res; +} + +/** + * nand_default_block_markbad - [DEFAULT] mark a block bad + * @mtd: MTD device structure + * @ofs: offset from device start + * + * This is the default implementation, which can be overridden by + * a hardware specific driver. +*/ +static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ + struct nand_chip *this = mtd->priv; + u_char buf[2] = {0, 0}; + size_t retlen; + int block; + + /* Get block number */ + block = ((int) ofs) >> this->bbt_erase_shift; + this->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); + + /* Do we have a flash based bad block table ? */ + if (this->options & NAND_USE_FLASH_BBT) + return nand_update_bbt (mtd, ofs); + + /* We write two bytes, so we dont have to mess with 16 bit access */ + ofs += mtd->oobsize + (this->badblockpos & ~0x01); + return nand_write_oob (mtd, ofs , 2, &retlen, buf); +} + +/** + * nand_check_wp - [GENERIC] check if the chip is write protected + * @mtd: MTD device structure + * Check, if the device is write protected + * + * The function expects, that the device is already selected + */ +static int nand_check_wp (struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + /* Check the WP bit */ + this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1); + return (this->read_byte(mtd) & 0x80) ? 0 : 1; +} + +/** + * nand_block_checkbad - [GENERIC] Check if a block is marked bad + * @mtd: MTD device structure + * @ofs: offset from device start + * @getchip: 0, if the chip is already selected + * @allowbbt: 1, if its allowed to access the bbt area + * + * Check, if the block is bad. Either by reading the bad block table or + * calling of the scan function. + */ +static int nand_block_checkbad (struct mtd_info *mtd, loff_t ofs, int getchip, int allowbbt) +{ + struct nand_chip *this = mtd->priv; + + if (!this->bbt) + return this->block_bad(mtd, ofs, getchip); + + /* Return info from the table */ + return nand_isbad_bbt (mtd, ofs, allowbbt); +} + +/** + * nand_command - [DEFAULT] Send command to NAND device + * @mtd: MTD device structure + * @command: the command to be sent + * @column: the column address for this command, -1 if none + * @page_addr: the page address for this command, -1 if none + * + * Send command to NAND device. This function is used for small page + * devices (256/512 Bytes per page) + */ +static void nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr) +{ + register struct nand_chip *this = mtd->priv; + + /* Begin command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_SETCLE); + /* + * Write out the command to the device. + */ + if (command == NAND_CMD_SEQIN) { + int readcmd; + + if (column >= mtd->oobblock) { + /* OOB area */ + column -= mtd->oobblock; + readcmd = NAND_CMD_READOOB; + } else if (column < 256) { + /* First 256 bytes --> READ0 */ + readcmd = NAND_CMD_READ0; + } else { + column -= 256; + readcmd = NAND_CMD_READ1; + } + this->write_byte(mtd, readcmd); + } + this->write_byte(mtd, command); + + /* Set ALE and clear CLE to start address cycle */ + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + + if (column != -1 || page_addr != -1) { + this->hwcontrol(mtd, NAND_CTL_SETALE); + + /* Serially input address */ + if (column != -1) { + /* Adjust columns for 16 bit buswidth */ + if (this->options & NAND_BUSWIDTH_16) + column >>= 1; + this->write_byte(mtd, column); + } + if (page_addr != -1) { + this->write_byte(mtd, (unsigned char) (page_addr & 0xff)); + this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff)); + /* One more address cycle for higher density devices */ + if (this->chipsize & 0x0c000000) + this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f)); + } + /* Latch in address */ + this->hwcontrol(mtd, NAND_CTL_CLRALE); + } + + /* + * program and erase have their own busy handlers + * status and sequential in needs no delay + */ + switch (command) { + + case NAND_CMD_PAGEPROG: + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: + case NAND_CMD_SEQIN: + case NAND_CMD_STATUS: + return; + + case NAND_CMD_RESET: + if (this->dev_ready) + break; + udelay(this->chip_delay); + this->hwcontrol(mtd, NAND_CTL_SETCLE); + this->write_byte(mtd, NAND_CMD_STATUS); + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + while ( !(this->read_byte(mtd) & 0x40)); + return; + + /* This applies to read commands */ + default: + /* + * If we don't have access to the busy pin, we apply the given + * command delay + */ + if (!this->dev_ready) { + udelay (this->chip_delay); + return; + } + } + + /* Apply this short delay always to ensure that we do wait tWB in + * any case on any machine. */ + ndelay (100); + /* wait until command is processed */ + while (!this->dev_ready(mtd)); +} + +/** + * nand_command_lp - [DEFAULT] Send command to NAND large page device + * @mtd: MTD device structure + * @command: the command to be sent + * @column: the column address for this command, -1 if none + * @page_addr: the page address for this command, -1 if none + * + * Send command to NAND device. This is the version for the new large page devices + * We dont have the seperate regions as we have in the small page devices. + * We must emulate NAND_CMD_READOOB to keep the code compatible. + * + */ +static void nand_command_lp (struct mtd_info *mtd, unsigned command, int column, int page_addr) +{ + register struct nand_chip *this = mtd->priv; + + /* Emulate NAND_CMD_READOOB */ + if (command == NAND_CMD_READOOB) { + column += mtd->oobblock; + command = NAND_CMD_READ0; + } + + + /* Begin command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_SETCLE); + /* Write out the command to the device. */ + this->write_byte(mtd, command); + /* End command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + + if (column != -1 || page_addr != -1) { + this->hwcontrol(mtd, NAND_CTL_SETALE); + + /* Serially input address */ + if (column != -1) { + /* Adjust columns for 16 bit buswidth */ + if (this->options & NAND_BUSWIDTH_16) + column >>= 1; + this->write_byte(mtd, column & 0xff); + this->write_byte(mtd, column >> 8); + } + if (page_addr != -1) { + this->write_byte(mtd, (unsigned char) (page_addr & 0xff)); + this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff)); + /* One more address cycle for devices > 128MiB */ + if (this->chipsize > (128 << 20)) + this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0xff)); + } + /* Latch in address */ + this->hwcontrol(mtd, NAND_CTL_CLRALE); + } + + /* + * program and erase have their own busy handlers + * status and sequential in needs no delay + */ + switch (command) { + + case NAND_CMD_CACHEDPROG: + case NAND_CMD_PAGEPROG: + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: + case NAND_CMD_SEQIN: + case NAND_CMD_STATUS: + return; + + + case NAND_CMD_RESET: + if (this->dev_ready) + break; + udelay(this->chip_delay); + this->hwcontrol(mtd, NAND_CTL_SETCLE); + this->write_byte(mtd, NAND_CMD_STATUS); + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + while ( !(this->read_byte(mtd) & 0x40)); + return; + + case NAND_CMD_READ0: + /* Begin command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_SETCLE); + /* Write out the start read command */ + this->write_byte(mtd, NAND_CMD_READSTART); + /* End command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + /* Fall through into ready check */ + + /* This applies to read commands */ + default: + /* + * If we don't have access to the busy pin, we apply the given + * command delay + */ + if (!this->dev_ready) { + udelay (this->chip_delay); + return; + } + } + + /* Apply this short delay always to ensure that we do wait tWB in + * any case on any machine. */ + ndelay (100); + /* wait until command is processed */ + while (!this->dev_ready(mtd)); +} + +/** + * nand_get_chip - [GENERIC] Get chip for selected access + * @this: the nand chip descriptor + * @mtd: MTD device structure + * @new_state: the state which is requested + * + * Get the device and lock it for exclusive access + */ +static void nand_get_chip (struct nand_chip *this, struct mtd_info *mtd, int new_state) +{ + + DECLARE_WAITQUEUE (wait, current); + + /* + * Grab the lock and see if the device is available + */ +retry: + spin_lock_bh (&this->chip_lock); + + if (this->state == FL_READY) { + this->state = new_state; + spin_unlock_bh (&this->chip_lock); + return; + } + + set_current_state (TASK_UNINTERRUPTIBLE); + add_wait_queue (&this->wq, &wait); + spin_unlock_bh (&this->chip_lock); + schedule (); + remove_wait_queue (&this->wq, &wait); + goto retry; +} + +/** + * nand_wait - [DEFAULT] wait until the command is done + * @mtd: MTD device structure + * @this: NAND chip structure + * @state: state to select the max. timeout value + * + * Wait for command done. This applies to erase and program only + * Erase can take up to 400ms and program up to 20ms according to + * general NAND and SmartMedia specs + * +*/ +static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state) +{ + + unsigned long timeo = jiffies; + int status; + + if (state == FL_ERASING) + timeo += (HZ * 400) / 1000; + else + timeo += (HZ * 20) / 1000; + + /* Apply this short delay always to ensure that we do wait tWB in + * any case on any machine. */ + ndelay (100); + + spin_lock_bh (&this->chip_lock); + if ((state == FL_ERASING) && (this->options & NAND_IS_AND)) + this->cmdfunc (mtd, NAND_CMD_STATUS_MULTI, -1, -1); + else + this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1); + + while (time_before(jiffies, timeo)) { + /* Check, if we were interrupted */ + if (this->state != state) { + spin_unlock_bh (&this->chip_lock); + return 0; + } + if (this->dev_ready) { + if (this->dev_ready(mtd)) + break; + } + if (this->read_byte(mtd) & NAND_STATUS_READY) + break; + + spin_unlock_bh (&this->chip_lock); + yield (); + spin_lock_bh (&this->chip_lock); + } + status = (int) this->read_byte(mtd); + spin_unlock_bh (&this->chip_lock); + + return status; +} + +/** + * nand_write_page - [GENERIC] write one page + * @mtd: MTD device structure + * @this: NAND chip structure + * @page: startpage inside the chip, must be called with (page & this->pagemask) + * @oob_buf: out of band data buffer + * @oobsel: out of band selecttion structre + * @cached: 1 = enable cached programming if supported by chip + * + * Nand_page_program function is used for write and writev ! + * This function will always program a full page of data + * If you call it with a non page aligned buffer, you're lost :) + * + * Cached programming is not supported yet. + */ +static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, + u_char *oob_buf, struct nand_oobinfo *oobsel, int cached) +{ + int i, status; + u_char ecc_code[8]; + int eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE; + int *oob_config = oobsel->eccpos; + int datidx = 0, eccidx = 0, eccsteps = this->eccsteps; + int eccbytes = 0; + + /* FIXME: Enable cached programming */ + cached = 0; + + /* Send command to begin auto page programming */ + this->cmdfunc (mtd, NAND_CMD_SEQIN, 0x00, page); + + /* Write out complete page of data, take care of eccmode */ + switch (eccmode) { + /* No ecc, write all */ + case NAND_ECC_NONE: + printk (KERN_WARNING "Writing data without ECC to NAND-FLASH is not recommended\n"); + this->write_buf(mtd, this->data_poi, mtd->oobblock); + break; + + /* Software ecc 3/256, write all */ + case NAND_ECC_SOFT: + for (; eccsteps; eccsteps--) { + this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code); + for (i = 0; i < 3; i++, eccidx++) + oob_buf[oob_config[eccidx]] = ecc_code[i]; + datidx += this->eccsize; + } + this->write_buf(mtd, this->data_poi, mtd->oobblock); + break; + + /* Hardware ecc 8 byte / 512 byte data */ + case NAND_ECC_HW8_512: + eccbytes += 2; + /* Hardware ecc 6 byte / 512 byte data */ + case NAND_ECC_HW6_512: + eccbytes += 3; + /* Hardware ecc 3 byte / 256 data */ + /* Hardware ecc 3 byte / 512 byte data */ + case NAND_ECC_HW3_256: + case NAND_ECC_HW3_512: + eccbytes += 3; + for (; eccsteps; eccsteps--) { + /* enable hardware ecc logic for write */ + this->enable_hwecc(mtd, NAND_ECC_WRITE); + this->write_buf(mtd, &this->data_poi[datidx], this->eccsize); + this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code); + for (i = 0; i < eccbytes; i++, eccidx++) + oob_buf[oob_config[eccidx]] = ecc_code[i]; + /* If the hardware ecc provides syndromes then + * the ecc code must be written immidiately after + * the data bytes (words) */ + if (this->options & NAND_HWECC_SYNDROME) + this->write_buf(mtd, ecc_code, eccbytes); + + datidx += this->eccsize; + } + break; + + default: + printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode); + BUG(); + } + + /* Write out OOB data */ + if (this->options & NAND_HWECC_SYNDROME) + this->write_buf(mtd, &oob_buf[oobsel->eccbytes], mtd->oobsize - oobsel->eccbytes); + else + this->write_buf(mtd, oob_buf, mtd->oobsize); + + /* Send command to actually program the data */ + this->cmdfunc (mtd, cached ? NAND_CMD_CACHEDPROG : NAND_CMD_PAGEPROG, -1, -1); + + if (!cached) { + /* call wait ready function */ + status = this->waitfunc (mtd, this, FL_WRITING); + /* See if device thinks it succeeded */ + if (status & 0x01) { + DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write, page 0x%08x, ", __FUNCTION__, page); + return -EIO; + } + } else { + /* FIXME: Implement cached programming ! */ + /* wait until cache is ready*/ + // status = this->waitfunc (mtd, this, FL_CACHEDRPG); + } + return 0; +} + +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE +/** + * nand_verify_pages - [GENERIC] verify the chip contents after a write + * @mtd: MTD device structure + * @this: NAND chip structure + * @page: startpage inside the chip, must be called with (page & this->pagemask) + * @numpages: number of pages to verify + * @oob_buf: out of band data buffer + * @oobsel: out of band selecttion structre + * @chipnr: number of the current chip + * @oobmode: 1 = full buffer verify, 0 = ecc only + * + * The NAND device assumes that it is always writing to a cleanly erased page. + * Hence, it performs its internal write verification only on bits that + * transitioned from 1 to 0. The device does NOT verify the whole page on a + * byte by byte basis. It is possible that the page was not completely erased + * or the page is becoming unusable due to wear. The read with ECC would catch + * the error later when the ECC page check fails, but we would rather catch + * it early in the page write stage. Better to write no data than invalid data. + */ +static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages, + u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode) +{ + int i, j, datidx = 0, oobofs = 0, res = -EIO; + int eccsteps = this->eccsteps; + int hweccbytes; + u_char oobdata[64]; + + hweccbytes = (this->options & NAND_HWECC_SYNDROME) ? (oobsel->eccbytes / eccsteps) : 0; + + /* Send command to read back the first page */ + this->cmdfunc (mtd, NAND_CMD_READ0, 0, page); + + for(;;) { + for (j = 0; j < eccsteps; j++) { + /* Loop through and verify the data */ + if (this->verify_buf(mtd, &this->data_poi[datidx], mtd->eccsize)) { + DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page); + goto out; + } + datidx += mtd->eccsize; + /* Have we a hw generator layout ? */ + if (!hweccbytes) + continue; + if (this->verify_buf(mtd, &this->oob_buf[oobofs], hweccbytes)) { + DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page); + goto out; + } + oobofs += hweccbytes; + } + + /* check, if we must compare all data or if we just have to + * compare the ecc bytes + */ + if (oobmode) { + if (this->verify_buf(mtd, &oob_buf[oobofs], mtd->oobsize - hweccbytes * eccsteps)) { + DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page); + goto out; + } + } else { + /* Read always, else autoincrement fails */ + this->read_buf(mtd, oobdata, mtd->oobsize - hweccbytes * eccsteps); + + if (oobsel->useecc != MTD_NANDECC_OFF && !hweccbytes) { + int ecccnt = oobsel->eccbytes; + + for (i = 0; i < ecccnt; i++) { + int idx = oobsel->eccpos[i]; + if (oobdata[idx] != oob_buf[oobofs + idx] ) { + DEBUG (MTD_DEBUG_LEVEL0, + "%s: Failed ECC write " + "verify, page 0x%08x, " "%6i bytes were succesful\n", __FUNCTION__, page, i); + goto out; + } + } + } + } + oobofs += mtd->oobsize - hweccbytes * eccsteps; + page++; + numpages--; + + /* Apply delay or wait for ready/busy pin + * Do this before the AUTOINCR check, so no problems + * arise if a chip which does auto increment + * is marked as NOAUTOINCR by the board driver. + * Do this also before returning, so the chip is + * ready for the next command. + */ + if (!this->dev_ready) + udelay (this->chip_delay); + else + while (!this->dev_ready(mtd)); + + /* All done, return happy */ + if (!numpages) + return 0; + + + /* Check, if the chip supports auto page increment */ + if (!NAND_CANAUTOINCR(this)) + this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page); + } + /* + * Terminate the read command. We come here in case of an error + * So we must issue a reset command. + */ +out: + this->cmdfunc (mtd, NAND_CMD_RESET, -1, -1); + return res; +} +#endif + +/** + * nand_read - [MTD Interface] MTD compability function for nand_read_ecc + * @mtd: MTD device structure + * @from: offset to read from + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put data + * + * This function simply calls nand_read_ecc with oob buffer and oobsel = NULL +*/ +static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf) +{ + return nand_read_ecc (mtd, from, len, retlen, buf, NULL, NULL); +} + + +/** + * nand_read_ecc - [MTD Interface] Read data with ECC + * @mtd: MTD device structure + * @from: offset to read from + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put data + * @oob_buf: filesystem supplied oob data buffer + * @oobsel: oob selection structure + * + * NAND read with ECC + */ +static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len, + size_t * retlen, u_char * buf, u_char * oob_buf, struct nand_oobinfo *oobsel) +{ + int i, j, col, realpage, page, end, ecc, chipnr, sndcmd = 1; + int read = 0, oob = 0, ecc_status = 0, ecc_failed = 0; + struct nand_chip *this = mtd->priv; + u_char *data_poi, *oob_data = oob_buf; + u_char ecc_calc[32]; + u_char ecc_code[32]; + int eccmode, eccsteps; + int *oob_config, datidx; + int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1; + int eccbytes = 3; + int compareecc = 1; + int oobreadlen; + + + DEBUG (MTD_DEBUG_LEVEL3, "nand_read_ecc: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); + + /* Do not allow reads past end of device */ + if ((from + len) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: Attempt read beyond end of device\n"); + *retlen = 0; + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd ,FL_READING); + + /* use userspace supplied oobinfo, if zero */ + if (oobsel == NULL) + oobsel = &mtd->oobinfo; + + /* Autoplace of oob data ? Use the default placement scheme */ + if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) + oobsel = this->autooob; + + eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE; + oob_config = oobsel->eccpos; + + /* Select the NAND device */ + chipnr = (int)(from >> this->chip_shift); + this->select_chip(mtd, chipnr); + + /* First we calculate the starting page */ + realpage = (int) (from >> this->page_shift); + page = realpage & this->pagemask; + + /* Get raw starting column */ + col = from & (mtd->oobblock - 1); + + end = mtd->oobblock; + ecc = this->eccsize; + switch (eccmode) { + case NAND_ECC_HW6_512: /* Hardware ECC 6 byte / 512 byte data */ + eccbytes = 6; + break; + case NAND_ECC_HW8_512: /* Hardware ECC 8 byte / 512 byte data */ + eccbytes = 8; + break; + case NAND_ECC_NONE: + compareecc = 0; + break; + } + + if (this->options & NAND_HWECC_SYNDROME) + compareecc = 0; + + oobreadlen = mtd->oobsize; + if (this->options & NAND_HWECC_SYNDROME) + oobreadlen -= oobsel->eccbytes; + + /* Loop until all data read */ + while (read < len) { + + int aligned = (!col && (len - read) >= end); + /* + * If the read is not page aligned, we have to read into data buffer + * due to ecc, else we read into return buffer direct + */ + if (aligned) + data_poi = &buf[read]; + else + data_poi = this->data_buf; + + /* Check, if we have this page in the buffer + * + * FIXME: Make it work when we must provide oob data too, + * check the usage of data_buf oob field + */ + if (realpage == this->pagebuf && !oob_buf) { + /* aligned read ? */ + if (aligned) + memcpy (data_poi, this->data_buf, end); + goto readdata; + } + + /* Check, if we must send the read command */ + if (sndcmd) { + this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page); + sndcmd = 0; + } + + /* get oob area, if we have no oob buffer from fs-driver */ + if (!oob_buf || oobsel->useecc == MTD_NANDECC_AUTOPLACE) + oob_data = &this->data_buf[end]; + + eccsteps = this->eccsteps; + + switch (eccmode) { + case NAND_ECC_NONE: { /* No ECC, Read in a page */ + static unsigned long lastwhinge = 0; + if ((lastwhinge / HZ) != (jiffies / HZ)) { + printk (KERN_WARNING "Reading data from NAND FLASH without ECC is not recommended\n"); + lastwhinge = jiffies; + } + this->read_buf(mtd, data_poi, end); + break; + } + + case NAND_ECC_SOFT: /* Software ECC 3/256: Read in a page + oob data */ + this->read_buf(mtd, data_poi, end); + for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=3, datidx += ecc) + this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]); + break; + + case NAND_ECC_HW3_256: /* Hardware ECC 3 byte /256 byte data */ + case NAND_ECC_HW3_512: /* Hardware ECC 3 byte /512 byte data */ + case NAND_ECC_HW6_512: /* Hardware ECC 6 byte / 512 byte data */ + case NAND_ECC_HW8_512: /* Hardware ECC 8 byte / 512 byte data */ + for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=eccbytes, datidx += ecc) { + this->enable_hwecc(mtd, NAND_ECC_READ); + this->read_buf(mtd, &data_poi[datidx], ecc); + + /* HW ecc with syndrome calculation must read the + * syndrome from flash immidiately after the data */ + if (!compareecc) { + /* Some hw ecc generators need to know when the + * syndrome is read from flash */ + this->enable_hwecc(mtd, NAND_ECC_READSYN); + this->read_buf(mtd, &oob_data[i], eccbytes); + /* We calc error correction directly, it checks the hw + * generator for an error, reads back the syndrome and + * does the error correction on the fly */ + if (this->correct_data(mtd, &data_poi[datidx], &oob_data[i], &ecc_code[i]) == -1) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: " + "Failed ECC read, page 0x%08x on chip %d\n", page, chipnr); + ecc_failed++; + } + } else { + this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]); + } + } + break; + + default: + printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode); + BUG(); + } + + /* read oobdata */ + this->read_buf(mtd, &oob_data[mtd->oobsize - oobreadlen], oobreadlen); + + /* Skip ECC check, if not requested (ECC_NONE or HW_ECC with syndromes) */ + if (!compareecc) + goto readoob; + + /* Pick the ECC bytes out of the oob data */ + for (j = 0; j < oobsel->eccbytes; j++) + ecc_code[j] = oob_data[oob_config[j]]; + + /* correct data, if neccecary */ + for (i = 0, j = 0, datidx = 0; i < this->eccsteps; i++, datidx += ecc) { + ecc_status = this->correct_data(mtd, &data_poi[datidx], &ecc_code[j], &ecc_calc[j]); + + /* Get next chunk of ecc bytes */ + j += eccbytes; + + /* Check, if we have a fs supplied oob-buffer, + * This is the legacy mode. Used by YAFFS1 + * Should go away some day + */ + if (oob_buf && oobsel->useecc == MTD_NANDECC_PLACE) { + int *p = (int *)(&oob_data[mtd->oobsize]); + p[i] = ecc_status; + } + + if (ecc_status == -1) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: " "Failed ECC read, page 0x%08x\n", page); + ecc_failed++; + } + } + + readoob: + /* check, if we have a fs supplied oob-buffer */ + if (oob_buf) { + /* without autoplace. Legacy mode used by YAFFS1 */ + switch(oobsel->useecc) { + case MTD_NANDECC_AUTOPLACE: + /* Walk through the autoplace chunks */ + for (i = 0, j = 0; j < mtd->oobavail; i++) { + int from = oobsel->oobfree[i][0]; + int num = oobsel->oobfree[i][1]; + memcpy(&oob_buf[oob], &oob_data[from], num); + j+= num; + } + oob += mtd->oobavail; + break; + case MTD_NANDECC_PLACE: + /* YAFFS1 legacy mode */ + oob_data += this->eccsteps * sizeof (int); + default: + oob_data += mtd->oobsize; + } + } + readdata: + /* Partial page read, transfer data into fs buffer */ + if (!aligned) { + for (j = col; j < end && read < len; j++) + buf[read++] = data_poi[j]; + this->pagebuf = realpage; + } else + read += mtd->oobblock; + + /* Apply delay or wait for ready/busy pin + * Do this before the AUTOINCR check, so no problems + * arise if a chip which does auto increment + * is marked as NOAUTOINCR by the board driver. + */ + if (!this->dev_ready) + udelay (this->chip_delay); + else + while (!this->dev_ready(mtd)); + + if (read == len) + break; + + /* For subsequent reads align to page boundary. */ + col = 0; + /* Increment page address */ + realpage++; + + page = realpage & this->pagemask; + /* Check, if we cross a chip boundary */ + if (!page) { + chipnr++; + this->select_chip(mtd, -1); + this->select_chip(mtd, chipnr); + } + /* Check, if the chip supports auto page increment + * or if we have hit a block boundary. + */ + if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) + sndcmd = 1; + } + + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + /* + * Return success, if no ECC failures, else -EIO + * fs driver will take care of that, because + * retlen == desired len and result == -EIO + */ + *retlen = read; + return ecc_failed ? -EIO : 0; +} + +/** + * nand_read_oob - [MTD Interface] NAND read out-of-band + * @mtd: MTD device structure + * @from: offset to read from + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put data + * + * NAND read out-of-band data from the spare area + */ +static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf) +{ + int i, col, page, chipnr; + struct nand_chip *this = mtd->priv; + int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1; + + DEBUG (MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); + + /* Shift to get page */ + page = (int)(from >> this->page_shift); + chipnr = (int)(from >> this->chip_shift); + + /* Mask to get column */ + col = from & (mtd->oobsize - 1); + + /* Initialize return length value */ + *retlen = 0; + + /* Do not allow reads past end of device */ + if ((from + len) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: Attempt read beyond end of device\n"); + *retlen = 0; + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd , FL_READING); + + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + + /* Send the read command */ + this->cmdfunc (mtd, NAND_CMD_READOOB, col, page & this->pagemask); + /* + * Read the data, if we read more than one page + * oob data, let the device transfer the data ! + */ + i = 0; + while (i < len) { + int thislen = mtd->oobsize - col; + thislen = min_t(int, thislen, len); + this->read_buf(mtd, &buf[i], thislen); + i += thislen; + + /* Apply delay or wait for ready/busy pin + * Do this before the AUTOINCR check, so no problems + * arise if a chip which does auto increment + * is marked as NOAUTOINCR by the board driver. + */ + if (!this->dev_ready) + udelay (this->chip_delay); + else + while (!this->dev_ready(mtd)); + + /* Read more ? */ + if (i < len) { + page++; + col = 0; + + /* Check, if we cross a chip boundary */ + if (!(page & this->pagemask)) { + chipnr++; + this->select_chip(mtd, -1); + this->select_chip(mtd, chipnr); + } + + /* Check, if the chip supports auto page increment + * or if we have hit a block boundary. + */ + if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) { + /* For subsequent page reads set offset to 0 */ + this->cmdfunc (mtd, NAND_CMD_READOOB, 0x0, page & this->pagemask); + } + } + } + + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + /* Return happy */ + *retlen = len; + return 0; +} + +/** + * nand_read_raw - [GENERIC] Read raw data including oob into buffer + * @mtd: MTD device structure + * @buf: temporary buffer + * @from: offset to read from + * @len: number of bytes to read + * @ooblen: number of oob data bytes to read + * + * Read raw data including oob into buffer + */ +int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_t len, size_t ooblen) +{ + struct nand_chip *this = mtd->priv; + int page = (int) (from >> this->page_shift); + int chip = (int) (from >> this->chip_shift); + int sndcmd = 1; + int cnt = 0; + int pagesize = mtd->oobblock + mtd->oobsize; + int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1; + + /* Do not allow reads past end of device */ + if ((from + len) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_read_raw: Attempt read beyond end of device\n"); + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd , FL_READING); + + this->select_chip (mtd, chip); + + /* Add requested oob length */ + len += ooblen; + + while (len) { + if (sndcmd) + this->cmdfunc (mtd, NAND_CMD_READ0, 0, page & this->pagemask); + sndcmd = 0; + + this->read_buf (mtd, &buf[cnt], pagesize); + + len -= pagesize; + cnt += pagesize; + page++; + + if (!this->dev_ready) + udelay (this->chip_delay); + else + while (!this->dev_ready(mtd)); + + /* Check, if the chip supports auto page increment */ + if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) + sndcmd = 1; + } + + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + return 0; +} + + +/** + * nand_prepare_oobbuf - [GENERIC] Prepare the out of band buffer + * @mtd: MTD device structure + * @fsbuf: buffer given by fs driver + * @oobsel: out of band selection structre + * @autoplace: 1 = place given buffer into the oob bytes + * @numpages: number of pages to prepare + * + * Return: + * 1. Filesystem buffer available and autoplacement is off, + * return filesystem buffer + * 2. No filesystem buffer or autoplace is off, return internal + * buffer + * 3. Filesystem buffer is given and autoplace selected + * put data from fs buffer into internal buffer and + * retrun internal buffer + * + * Note: The internal buffer is filled with 0xff. This must + * be done only once, when no autoplacement happens + * Autoplacement sets the buffer dirty flag, which + * forces the 0xff fill before using the buffer again. + * +*/ +static u_char * nand_prepare_oobbuf (struct mtd_info *mtd, u_char *fsbuf, struct nand_oobinfo *oobsel, + int autoplace, int numpages) +{ + struct nand_chip *this = mtd->priv; + int i, len, ofs; + + /* Zero copy fs supplied buffer */ + if (fsbuf && !autoplace) + return fsbuf; + + /* Check, if the buffer must be filled with ff again */ + if (this->oobdirty) { + memset (this->oob_buf, 0xff, + mtd->oobsize << (this->phys_erase_shift - this->page_shift)); + this->oobdirty = 0; + } + + /* If we have no autoplacement or no fs buffer use the internal one */ + if (!autoplace || !fsbuf) + return this->oob_buf; + + /* Walk through the pages and place the data */ + this->oobdirty = 1; + ofs = 0; + while (numpages--) { + for (i = 0, len = 0; len < mtd->oobavail; i++) { + int to = ofs + oobsel->oobfree[i][0]; + int num = oobsel->oobfree[i][1]; + memcpy (&this->oob_buf[to], fsbuf, num); + len += num; + fsbuf += num; + } + ofs += mtd->oobavail; + } + return this->oob_buf; +} + +#define NOTALIGNED(x) (x & (mtd->oobblock-1)) != 0 + +/** + * nand_write - [MTD Interface] compability function for nand_write_ecc + * @mtd: MTD device structure + * @to: offset to write to + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of written bytes + * @buf: the data to write + * + * This function simply calls nand_write_ecc with oob buffer and oobsel = NULL + * +*/ +static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf) +{ + return (nand_write_ecc (mtd, to, len, retlen, buf, NULL, NULL)); +} + +/** + * nand_write_ecc - [MTD Interface] NAND write with ECC + * @mtd: MTD device structure + * @to: offset to write to + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of written bytes + * @buf: the data to write + * @eccbuf: filesystem supplied oob data buffer + * @oobsel: oob selection structure + * + * NAND write with ECC + */ +static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len, + size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel) +{ + int startpage, page, ret = -EIO, oob = 0, written = 0, chipnr; + int autoplace = 0, numpages, totalpages; + struct nand_chip *this = mtd->priv; + u_char *oobbuf, *bufstart; + int ppblock = (1 << (this->phys_erase_shift - this->page_shift)); + + DEBUG (MTD_DEBUG_LEVEL3, "nand_write_ecc: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); + + /* Initialize retlen, in case of early exit */ + *retlen = 0; + + /* Do not allow write past end of device */ + if ((to + len) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: Attempt to write past end of page\n"); + return -EINVAL; + } + + /* reject writes, which are not page aligned */ + if (NOTALIGNED (to) || NOTALIGNED(len)) { + printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n"); + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd, FL_WRITING); + + /* Calculate chipnr */ + chipnr = (int)(to >> this->chip_shift); + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + + /* Check, if it is write protected */ + if (nand_check_wp(mtd)) + goto out; + + /* if oobsel is NULL, use chip defaults */ + if (oobsel == NULL) + oobsel = &mtd->oobinfo; + + /* Autoplace of oob data ? Use the default placement scheme */ + if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) { + oobsel = this->autooob; + autoplace = 1; + } + + /* Setup variables and oob buffer */ + totalpages = len >> this->page_shift; + page = (int) (to >> this->page_shift); + /* Invalidate the page cache, if we write to the cached page */ + if (page <= this->pagebuf && this->pagebuf < (page + totalpages)) + this->pagebuf = -1; + + /* Set it relative to chip */ + page &= this->pagemask; + startpage = page; + /* Calc number of pages we can write in one go */ + numpages = min (ppblock - (startpage & (ppblock - 1)), totalpages); + oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel, autoplace, numpages); + bufstart = (u_char *)buf; + + /* Loop until all data is written */ + while (written < len) { + + this->data_poi = (u_char*) &buf[written]; + /* Write one page. If this is the last page to write + * or the last page in this block, then use the + * real pageprogram command, else select cached programming + * if supported by the chip. + */ + ret = nand_write_page (mtd, this, page, &oobbuf[oob], oobsel, (--numpages > 0)); + if (ret) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: write_page failed %d\n", ret); + goto out; + } + /* Next oob page */ + oob += mtd->oobsize; + /* Update written bytes count */ + written += mtd->oobblock; + if (written == len) + goto cmp; + + /* Increment page address */ + page++; + + /* Have we hit a block boundary ? Then we have to verify and + * if verify is ok, we have to setup the oob buffer for + * the next pages. + */ + if (!(page & (ppblock - 1))){ + int ofs; + this->data_poi = bufstart; + ret = nand_verify_pages (mtd, this, startpage, + page - startpage, + oobbuf, oobsel, chipnr, (eccbuf != NULL)); + if (ret) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret); + goto out; + } + *retlen = written; + + ofs = autoplace ? mtd->oobavail : mtd->oobsize; + if (eccbuf) + eccbuf += (page - startpage) * ofs; + totalpages -= page - startpage; + numpages = min (totalpages, ppblock); + page &= this->pagemask; + startpage = page; + oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel, + autoplace, numpages); + /* Check, if we cross a chip boundary */ + if (!page) { + chipnr++; + this->select_chip(mtd, -1); + this->select_chip(mtd, chipnr); + } + } + } + /* Verify the remaining pages */ +cmp: + this->data_poi = bufstart; + ret = nand_verify_pages (mtd, this, startpage, totalpages, + oobbuf, oobsel, chipnr, (eccbuf != NULL)); + if (!ret) + *retlen = written; + else + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret); + +out: + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + return ret; +} + + +/** + * nand_write_oob - [MTD Interface] NAND write out-of-band + * @mtd: MTD device structure + * @to: offset to write to + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of written bytes + * @buf: the data to write + * + * NAND write out-of-band + */ +static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf) +{ + int column, page, status, ret = -EIO, chipnr; + struct nand_chip *this = mtd->priv; + + DEBUG (MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); + + /* Shift to get page */ + page = (int) (to >> this->page_shift); + chipnr = (int) (to >> this->chip_shift); + + /* Mask to get column */ + column = to & (mtd->oobsize - 1); + + /* Initialize return length value */ + *retlen = 0; + + /* Do not allow write past end of page */ + if ((column + len) > mtd->oobsize) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: Attempt to write past end of page\n"); + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd, FL_WRITING); + + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + + /* Reset the chip. Some chips (like the Toshiba TC5832DC found + in one of my DiskOnChip 2000 test units) will clear the whole + data page too if we don't do this. I have no clue why, but + I seem to have 'fixed' it in the doc2000 driver in + August 1999. dwmw2. */ + this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + + /* Check, if it is write protected */ + if (nand_check_wp(mtd)) + goto out; + + /* Invalidate the page cache, if we write to the cached page */ + if (page == this->pagebuf) + this->pagebuf = -1; + + if (NAND_MUST_PAD(this)) { + /* Write out desired data */ + this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock, page & this->pagemask); + /* prepad 0xff for partial programming */ + this->write_buf(mtd, ffchars, column); + /* write data */ + this->write_buf(mtd, buf, len); + /* postpad 0xff for partial programming */ + this->write_buf(mtd, ffchars, mtd->oobsize - (len+column)); + } else { + /* Write out desired data */ + this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock + column, page & this->pagemask); + /* write data */ + this->write_buf(mtd, buf, len); + } + /* Send command to program the OOB data */ + this->cmdfunc (mtd, NAND_CMD_PAGEPROG, -1, -1); + + status = this->waitfunc (mtd, this, FL_WRITING); + + /* See if device thinks it succeeded */ + if (status & 0x01) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write, page 0x%08x\n", page); + ret = -EIO; + goto out; + } + /* Return happy */ + *retlen = len; + +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE + /* Send command to read back the data */ + this->cmdfunc (mtd, NAND_CMD_READOOB, column, page & this->pagemask); + + if (this->verify_buf(mtd, buf, len)) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write verify, page 0x%08x\n", page); + ret = -EIO; + goto out; + } +#endif + ret = 0; +out: + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + return ret; +} + + +/** + * nand_writev - [MTD Interface] compabilty function for nand_writev_ecc + * @mtd: MTD device structure + * @vecs: the iovectors to write + * @count: number of vectors + * @to: offset to write to + * @retlen: pointer to variable to store the number of written bytes + * + * NAND write with kvec. This just calls the ecc function + */ +static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, + loff_t to, size_t * retlen) +{ + return (nand_writev_ecc (mtd, vecs, count, to, retlen, NULL, NULL)); +} + +/** + * nand_writev_ecc - [MTD Interface] write with iovec with ecc + * @mtd: MTD device structure + * @vecs: the iovectors to write + * @count: number of vectors + * @to: offset to write to + * @retlen: pointer to variable to store the number of written bytes + * @eccbuf: filesystem supplied oob data buffer + * @oobsel: oob selection structure + * + * NAND write with iovec with ecc + */ +static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, + loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel) +{ + int i, page, len, total_len, ret = -EIO, written = 0, chipnr; + int oob, numpages, autoplace = 0, startpage; + struct nand_chip *this = mtd->priv; + int ppblock = (1 << (this->phys_erase_shift - this->page_shift)); + u_char *oobbuf, *bufstart; + + /* Preset written len for early exit */ + *retlen = 0; + + /* Calculate total length of data */ + total_len = 0; + for (i = 0; i < count; i++) + total_len += (int) vecs[i].iov_len; + + DEBUG (MTD_DEBUG_LEVEL3, + "nand_writev: to = 0x%08x, len = %i, count = %ld\n", (unsigned int) to, (unsigned int) total_len, count); + + /* Do not allow write past end of page */ + if ((to + total_len) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_writev: Attempted write past end of device\n"); + return -EINVAL; + } + + /* reject writes, which are not page aligned */ + if (NOTALIGNED (to) || NOTALIGNED(total_len)) { + printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n"); + return -EINVAL; + } + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd, FL_WRITING); + + /* Get the current chip-nr */ + chipnr = (int) (to >> this->chip_shift); + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + + /* Check, if it is write protected */ + if (nand_check_wp(mtd)) + goto out; + + /* if oobsel is NULL, use chip defaults */ + if (oobsel == NULL) + oobsel = &mtd->oobinfo; + + /* Autoplace of oob data ? Use the default placement scheme */ + if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) { + oobsel = this->autooob; + autoplace = 1; + } + + /* Setup start page */ + page = (int) (to >> this->page_shift); + /* Invalidate the page cache, if we write to the cached page */ + if (page <= this->pagebuf && this->pagebuf < ((to + total_len) >> this->page_shift)) + this->pagebuf = -1; + + startpage = page & this->pagemask; + + /* Loop until all kvec' data has been written */ + len = 0; + while (count) { + /* If the given tuple is >= pagesize then + * write it out from the iov + */ + if ((vecs->iov_len - len) >= mtd->oobblock) { + /* Calc number of pages we can write + * out of this iov in one go */ + numpages = (vecs->iov_len - len) >> this->page_shift; + /* Do not cross block boundaries */ + numpages = min (ppblock - (startpage & (ppblock - 1)), numpages); + oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages); + bufstart = (u_char *)vecs->iov_base; + bufstart += len; + this->data_poi = bufstart; + oob = 0; + for (i = 1; i <= numpages; i++) { + /* Write one page. If this is the last page to write + * then use the real pageprogram command, else select + * cached programming if supported by the chip. + */ + ret = nand_write_page (mtd, this, page & this->pagemask, + &oobbuf[oob], oobsel, i != numpages); + if (ret) + goto out; + this->data_poi += mtd->oobblock; + len += mtd->oobblock; + oob += mtd->oobsize; + page++; + } + /* Check, if we have to switch to the next tuple */ + if (len >= (int) vecs->iov_len) { + vecs++; + len = 0; + count--; + } + } else { + /* We must use the internal buffer, read data out of each + * tuple until we have a full page to write + */ + int cnt = 0; + while (cnt < mtd->oobblock) { + if (vecs->iov_base != NULL && vecs->iov_len) + this->data_buf[cnt++] = ((u_char *) vecs->iov_base)[len++]; + /* Check, if we have to switch to the next tuple */ + if (len >= (int) vecs->iov_len) { + vecs++; + len = 0; + count--; + } + } + this->pagebuf = page; + this->data_poi = this->data_buf; + bufstart = this->data_poi; + numpages = 1; + oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages); + ret = nand_write_page (mtd, this, page & this->pagemask, + oobbuf, oobsel, 0); + if (ret) + goto out; + page++; + } + + this->data_poi = bufstart; + ret = nand_verify_pages (mtd, this, startpage, numpages, oobbuf, oobsel, chipnr, 0); + if (ret) + goto out; + + written += mtd->oobblock * numpages; + /* All done ? */ + if (!count) + break; + + startpage = page & this->pagemask; + /* Check, if we cross a chip boundary */ + if (!startpage) { + chipnr++; + this->select_chip(mtd, -1); + this->select_chip(mtd, chipnr); + } + } + ret = 0; +out: + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + *retlen = written; + return ret; +} + +/** + * single_erease_cmd - [GENERIC] NAND standard block erase command function + * @mtd: MTD device structure + * @page: the page address of the block which will be erased + * + * Standard erase command for NAND chips + */ +static void single_erase_cmd (struct mtd_info *mtd, int page) +{ + struct nand_chip *this = mtd->priv; + /* Send commands to erase a block */ + this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page); + this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1); +} + +/** + * multi_erease_cmd - [GENERIC] AND specific block erase command function + * @mtd: MTD device structure + * @page: the page address of the block which will be erased + * + * AND multi block erase command function + * Erase 4 consecutive blocks + */ +static void multi_erase_cmd (struct mtd_info *mtd, int page) +{ + struct nand_chip *this = mtd->priv; + /* Send commands to erase a block */ + this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++); + this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++); + this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++); + this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page); + this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1); +} + +/** + * nand_erase - [MTD Interface] erase block(s) + * @mtd: MTD device structure + * @instr: erase instruction + * + * Erase one ore more blocks + */ +static int nand_erase (struct mtd_info *mtd, struct erase_info *instr) +{ + return nand_erase_nand (mtd, instr, 0); +} + +/** + * nand_erase_intern - [NAND Interface] erase block(s) + * @mtd: MTD device structure + * @instr: erase instruction + * @allowbbt: allow erasing the bbt area + * + * Erase one ore more blocks + */ +int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbbt) +{ + int page, len, status, pages_per_block, ret, chipnr; + struct nand_chip *this = mtd->priv; + + DEBUG (MTD_DEBUG_LEVEL3, + "nand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len); + + /* Start address must align on block boundary */ + if (instr->addr & ((1 << this->phys_erase_shift) - 1)) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n"); + return -EINVAL; + } + + /* Length must align on block boundary */ + if (instr->len & ((1 << this->phys_erase_shift) - 1)) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Length not block aligned\n"); + return -EINVAL; + } + + /* Do not allow erase past end of device */ + if ((instr->len + instr->addr) > mtd->size) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Erase past end of device\n"); + return -EINVAL; + } + + instr->fail_addr = 0xffffffff; + + /* Grab the lock and see if the device is available */ + nand_get_chip (this, mtd, FL_ERASING); + + /* Shift to get first page */ + page = (int) (instr->addr >> this->page_shift); + chipnr = (int) (instr->addr >> this->chip_shift); + + /* Calculate pages in each block */ + pages_per_block = 1 << (this->phys_erase_shift - this->page_shift); + + /* Select the NAND device */ + this->select_chip(mtd, chipnr); + + /* Check the WP bit */ + /* Check, if it is write protected */ + if (nand_check_wp(mtd)) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Device is write protected!!!\n"); + instr->state = MTD_ERASE_FAILED; + goto erase_exit; + } + + /* Loop through the pages */ + len = instr->len; + + instr->state = MTD_ERASING; + + while (len) { + /* Check if we have a bad block, we do not erase bad blocks ! */ + if (nand_block_checkbad(mtd, ((loff_t) page) << this->page_shift, 0, allowbbt)) { + printk (KERN_WARNING "nand_erase: attempt to erase a bad block at page 0x%08x\n", page); + instr->state = MTD_ERASE_FAILED; + goto erase_exit; + } + + /* Invalidate the page cache, if we erase the block which contains + the current cached page */ + if (page <= this->pagebuf && this->pagebuf < (page + pages_per_block)) + this->pagebuf = -1; + + this->erase_cmd (mtd, page & this->pagemask); + + status = this->waitfunc (mtd, this, FL_ERASING); + + /* See if block erase succeeded */ + if (status & 0x01) { + DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: " "Failed erase, page 0x%08x\n", page); + instr->state = MTD_ERASE_FAILED; + instr->fail_addr = (page << this->page_shift); + goto erase_exit; + } + + /* Increment page address and decrement length */ + len -= (1 << this->phys_erase_shift); + page += pages_per_block; + + /* Check, if we cross a chip boundary */ + if (len && !(page & this->pagemask)) { + chipnr++; + this->select_chip(mtd, -1); + this->select_chip(mtd, chipnr); + } + } + instr->state = MTD_ERASE_DONE; + +erase_exit: + + ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; + /* Do call back function */ + if (!ret && instr->callback) + instr->callback (instr); + + /* Deselect and wake up anyone waiting on the device */ + nand_release_chip(mtd); + + /* Return more or less happy */ + return ret; +} + +/** + * nand_sync - [MTD Interface] sync + * @mtd: MTD device structure + * + * Sync is actually a wait for chip ready function + */ +static void nand_sync (struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + DECLARE_WAITQUEUE (wait, current); + + DEBUG (MTD_DEBUG_LEVEL3, "nand_sync: called\n"); + +retry: + /* Grab the spinlock */ + spin_lock_bh (&this->chip_lock); + + /* See what's going on */ + switch (this->state) { + case FL_READY: + case FL_SYNCING: + this->state = FL_SYNCING; + spin_unlock_bh (&this->chip_lock); + break; + + default: + /* Not an idle state */ + add_wait_queue (&this->wq, &wait); + spin_unlock_bh (&this->chip_lock); + schedule (); + + remove_wait_queue (&this->wq, &wait); + goto retry; + } + + /* Lock the device */ + spin_lock_bh (&this->chip_lock); + + /* Set the device to be ready again */ + if (this->state == FL_SYNCING) { + this->state = FL_READY; + wake_up (&this->wq); + } + + /* Unlock the device */ + spin_unlock_bh (&this->chip_lock); +} + + +/** + * nand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + */ +static int nand_block_isbad (struct mtd_info *mtd, loff_t ofs) +{ + /* Check for invalid offset */ + if (ofs > mtd->size) + return -EINVAL; + + return nand_block_checkbad (mtd, ofs, 1, 0); +} + +/** + * nand_block_markbad - [MTD Interface] Mark the block at the given offset as bad + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + */ +static int nand_block_markbad (struct mtd_info *mtd, loff_t ofs) +{ + struct nand_chip *this = mtd->priv; + int ret; + + if ((ret = nand_block_isbad(mtd, ofs))) { + /* If it was bad already, return success and do nothing. */ + if (ret > 0) + return 0; + return ret; + } + + return this->block_markbad(mtd, ofs); +} + +/** + * nand_scan - [NAND Interface] Scan for the NAND device + * @mtd: MTD device structure + * @maxchips: Number of chips to scan for + * + * This fills out all the not initialized function pointers + * with the defaults. + * The flash ID is read and the mtd/chip structures are + * filled with the appropriate values. Buffers are allocated if + * they are not provided by the board driver + * + */ +int nand_scan (struct mtd_info *mtd, int maxchips) +{ + int i, j, nand_maf_id, nand_dev_id, busw; + struct nand_chip *this = mtd->priv; + + /* Get buswidth to select the correct functions*/ + busw = this->options & NAND_BUSWIDTH_16; + + /* check for proper chip_delay setup, set 20us if not */ + if (!this->chip_delay) + this->chip_delay = 20; + + /* check, if a user supplied command function given */ + if (this->cmdfunc == NULL) + this->cmdfunc = nand_command; + + /* check, if a user supplied wait function given */ + if (this->waitfunc == NULL) + this->waitfunc = nand_wait; + + if (!this->select_chip) + this->select_chip = nand_select_chip; + if (!this->write_byte) + this->write_byte = busw ? nand_write_byte16 : nand_write_byte; + if (!this->read_byte) + this->read_byte = busw ? nand_read_byte16 : nand_read_byte; + if (!this->write_word) + this->write_word = nand_write_word; + if (!this->read_word) + this->read_word = nand_read_word; + if (!this->block_bad) + this->block_bad = nand_block_bad; + if (!this->block_markbad) + this->block_markbad = nand_default_block_markbad; + if (!this->write_buf) + this->write_buf = busw ? nand_write_buf16 : nand_write_buf; + if (!this->read_buf) + this->read_buf = busw ? nand_read_buf16 : nand_read_buf; + if (!this->verify_buf) + this->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf; + if (!this->scan_bbt) + this->scan_bbt = nand_default_bbt; + + /* Select the device */ + this->select_chip(mtd, 0); + + /* Send the command for reading device ID */ + this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1); + + /* Read manufacturer and device IDs */ + nand_maf_id = this->read_byte(mtd); + nand_dev_id = this->read_byte(mtd); + + /* Print and store flash device information */ + for (i = 0; nand_flash_ids[i].name != NULL; i++) { + + if (nand_dev_id != nand_flash_ids[i].id) + continue; + + if (!mtd->name) mtd->name = nand_flash_ids[i].name; + this->chipsize = nand_flash_ids[i].chipsize << 20; + + /* New devices have all the information in additional id bytes */ + if (!nand_flash_ids[i].pagesize) { + int extid; + /* The 3rd id byte contains non relevant data ATM */ + extid = this->read_byte(mtd); + /* The 4th id byte is the important one */ + extid = this->read_byte(mtd); + /* Calc pagesize */ + mtd->oobblock = 1024 << (extid & 0x3); + extid >>= 2; + /* Calc oobsize */ + mtd->oobsize = (8 << (extid & 0x03)) * (mtd->oobblock / 512); + extid >>= 2; + /* Calc blocksize. Blocksize is multiples of 64KiB */ + mtd->erasesize = (64 * 1024) << (extid & 0x03); + extid >>= 2; + /* Get buswidth information */ + busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; + + } else { + /* Old devices have this data hardcoded in the + * device id table */ + mtd->erasesize = nand_flash_ids[i].erasesize; + mtd->oobblock = nand_flash_ids[i].pagesize; + mtd->oobsize = mtd->oobblock / 32; + busw = nand_flash_ids[i].options & NAND_BUSWIDTH_16; + } + + /* Check, if buswidth is correct. Hardware drivers should set + * this correct ! */ + if (busw != (this->options & NAND_BUSWIDTH_16)) { + printk (KERN_INFO "NAND device: Manufacturer ID:" + " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id, + nand_manuf_ids[i].name , mtd->name); + printk (KERN_WARNING + "NAND bus width %d instead %d bit\n", + (this->options & NAND_BUSWIDTH_16) ? 16 : 8, + busw ? 16 : 8); + this->select_chip(mtd, -1); + return 1; + } + + /* Calculate the address shift from the page size */ + this->page_shift = ffs(mtd->oobblock) - 1; + this->bbt_erase_shift = this->phys_erase_shift = ffs(mtd->erasesize) - 1; + this->chip_shift = ffs(this->chipsize) - 1; + + /* Set the bad block position */ + this->badblockpos = mtd->oobblock > 512 ? + NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS; + + /* Get chip options, preserve non chip based options */ + this->options &= ~NAND_CHIPOPTIONS_MSK; + this->options |= nand_flash_ids[i].options & NAND_CHIPOPTIONS_MSK; + /* Set this as a default. Board drivers can override it, if neccecary */ + this->options |= NAND_NO_AUTOINCR; + /* Check if this is a not a samsung device. Do not clear the options + * for chips which are not having an extended id. + */ + if (nand_maf_id != NAND_MFR_SAMSUNG && !nand_flash_ids[i].pagesize) + this->options &= ~NAND_SAMSUNG_LP_OPTIONS; + + /* Check for AND chips with 4 page planes */ + if (this->options & NAND_4PAGE_ARRAY) + this->erase_cmd = multi_erase_cmd; + else + this->erase_cmd = single_erase_cmd; + + /* Do not replace user supplied command function ! */ + if (mtd->oobblock > 512 && this->cmdfunc == nand_command) + this->cmdfunc = nand_command_lp; + + /* Try to identify manufacturer */ + for (j = 0; nand_manuf_ids[j].id != 0x0; j++) { + if (nand_manuf_ids[j].id == nand_maf_id) + break; + } + printk (KERN_INFO "NAND device: Manufacturer ID:" + " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id, + nand_manuf_ids[j].name , nand_flash_ids[i].name); + break; + } + + if (!nand_flash_ids[i].name) { + printk (KERN_WARNING "No NAND device found!!!\n"); + this->select_chip(mtd, -1); + return 1; + } + + for (i=1; i < maxchips; i++) { + this->select_chip(mtd, i); + + /* Send the command for reading device ID */ + this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1); + + /* Read manufacturer and device IDs */ + if (nand_maf_id != this->read_byte(mtd) || + nand_dev_id != this->read_byte(mtd)) + break; + } + if (i > 1) + printk(KERN_INFO "%d NAND chips detected\n", i); + + /* Allocate buffers, if neccecary */ + if (!this->oob_buf) { + size_t len; + len = mtd->oobsize << (this->phys_erase_shift - this->page_shift); + this->oob_buf = kmalloc (len, GFP_KERNEL); + if (!this->oob_buf) { + printk (KERN_ERR "nand_scan(): Cannot allocate oob_buf\n"); + return -ENOMEM; + } + this->options |= NAND_OOBBUF_ALLOC; + } + + if (!this->data_buf) { + size_t len; + len = mtd->oobblock + mtd->oobsize; + this->data_buf = kmalloc (len, GFP_KERNEL); + if (!this->data_buf) { + if (this->options & NAND_OOBBUF_ALLOC) + kfree (this->oob_buf); + printk (KERN_ERR "nand_scan(): Cannot allocate data_buf\n"); + return -ENOMEM; + } + this->options |= NAND_DATABUF_ALLOC; + } + + /* Store the number of chips and calc total size for mtd */ + this->numchips = i; + mtd->size = i * this->chipsize; + /* Convert chipsize to number of pages per chip -1. */ + this->pagemask = (this->chipsize >> this->page_shift) - 1; + /* Preset the internal oob buffer */ + memset(this->oob_buf, 0xff, mtd->oobsize << (this->phys_erase_shift - this->page_shift)); + + /* If no default placement scheme is given, select an + * appropriate one */ + if (!this->autooob) { + /* Select the appropriate default oob placement scheme for + * placement agnostic filesystems */ + switch (mtd->oobsize) { + case 8: + this->autooob = &nand_oob_8; + break; + case 16: + this->autooob = &nand_oob_16; + break; + case 64: + this->autooob = &nand_oob_64; + break; + default: + printk (KERN_WARNING "No oob scheme defined for oobsize %d\n", + mtd->oobsize); + BUG(); + } + } + + /* The number of bytes available for the filesystem to place fs dependend + * oob data */ + if (this->options & NAND_BUSWIDTH_16) { + mtd->oobavail = mtd->oobsize - (this->autooob->eccbytes + 2); + if (this->autooob->eccbytes & 0x01) + mtd->oobavail--; + } else + mtd->oobavail = mtd->oobsize - (this->autooob->eccbytes + 1); + + /* + * check ECC mode, default to software + * if 3byte/512byte hardware ECC is selected and we have 256 byte pagesize + * fallback to software ECC + */ + this->eccsize = 256; /* set default eccsize */ + + switch (this->eccmode) { + + case NAND_ECC_HW3_512: + case NAND_ECC_HW6_512: + case NAND_ECC_HW8_512: + if (mtd->oobblock == 256) { + printk (KERN_WARNING "512 byte HW ECC not possible on 256 Byte pagesize, fallback to SW ECC \n"); + this->eccmode = NAND_ECC_SOFT; + this->calculate_ecc = nand_calculate_ecc; + this->correct_data = nand_correct_data; + break; + } else + this->eccsize = 512; /* set eccsize to 512 and fall through for function check */ + + case NAND_ECC_HW3_256: + if (this->calculate_ecc && this->correct_data && this->enable_hwecc) + break; + printk (KERN_WARNING "No ECC functions supplied, Hardware ECC not possible\n"); + BUG(); + + case NAND_ECC_NONE: + printk (KERN_WARNING "NAND_ECC_NONE selected by board driver. This is not recommended !!\n"); + this->eccmode = NAND_ECC_NONE; + break; + + case NAND_ECC_SOFT: + this->calculate_ecc = nand_calculate_ecc; + this->correct_data = nand_correct_data; + break; + + default: + printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode); + BUG(); + } + + mtd->eccsize = this->eccsize; + + /* Set the number of read / write steps for one page to ensure ECC generation */ + switch (this->eccmode) { + case NAND_ECC_HW3_512: + case NAND_ECC_HW6_512: + case NAND_ECC_HW8_512: + this->eccsteps = mtd->oobblock / 512; + break; + case NAND_ECC_HW3_256: + case NAND_ECC_SOFT: + this->eccsteps = mtd->oobblock / 256; + break; + + case NAND_ECC_NONE: + this->eccsteps = 1; + break; + } + + /* Initialize state, waitqueue and spinlock */ + this->state = FL_READY; + init_waitqueue_head (&this->wq); + spin_lock_init (&this->chip_lock); + + /* De-select the device */ + this->select_chip(mtd, -1); + + /* Invalidate the pagebuffer reference */ + this->pagebuf = -1; + + /* Fill in remaining MTD driver data */ + mtd->type = MTD_NANDFLASH; + mtd->flags = MTD_CAP_NANDFLASH | MTD_ECC; + mtd->ecctype = MTD_ECC_SW; + mtd->erase = nand_erase; + mtd->point = NULL; + mtd->unpoint = NULL; + mtd->read = nand_read; + mtd->write = nand_write; + mtd->read_ecc = nand_read_ecc; + mtd->write_ecc = nand_write_ecc; + mtd->read_oob = nand_read_oob; + mtd->write_oob = nand_write_oob; + mtd->readv = NULL; + mtd->writev = nand_writev; + mtd->writev_ecc = nand_writev_ecc; + mtd->sync = nand_sync; + mtd->lock = NULL; + mtd->unlock = NULL; + mtd->suspend = NULL; + mtd->resume = NULL; + mtd->block_isbad = nand_block_isbad; + mtd->block_markbad = nand_block_markbad; + + /* and make the autooob the default one */ + memcpy(&mtd->oobinfo, this->autooob, sizeof(mtd->oobinfo)); + + mtd->owner = THIS_MODULE; + + /* Build bad block table */ + return this->scan_bbt (mtd); +} + +/** + * nand_release - [NAND Interface] Free resources held by the NAND device + * @mtd: MTD device structure +*/ +void nand_release (struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + +#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE) + /* Unregister partitions */ + del_mtd_partitions (mtd); +#endif + /* Unregister the device */ + del_mtd_device (mtd); + + /* Free bad block table memory, if allocated */ + if (this->bbt) + kfree (this->bbt); + /* Buffer allocated by nand_scan ? */ + if (this->options & NAND_OOBBUF_ALLOC) + kfree (this->oob_buf); + /* Buffer allocated by nand_scan ? */ + if (this->options & NAND_DATABUF_ALLOC) + kfree (this->data_buf); +} + +EXPORT_SYMBOL (nand_scan); +EXPORT_SYMBOL (nand_release); + +MODULE_LICENSE ("GPL"); +MODULE_AUTHOR ("Steven J. Hill , Thomas Gleixner "); +MODULE_DESCRIPTION ("Generic NAND flash driver code"); diff --git a/drivers/mtd/nand/tx4925ndfmc.c b/drivers/mtd/nand/tx4925ndfmc.c new file mode 100644 index 000000000..94a616561 --- /dev/null +++ b/drivers/mtd/nand/tx4925ndfmc.c @@ -0,0 +1,442 @@ +/* + * drivers/mtd/tx4925ndfmc.c + * + * Overview: + * This is a device driver for the NAND flash device found on the + * Toshiba RBTX4925 reference board, which is a SmartMediaCard. It supports + * 16MiB, 32MiB and 64MiB cards. + * + * Author: MontaVista Software, Inc. source@mvista.com + * + * Derived from drivers/mtd/autcpu12.c + * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) + * + * $Id: tx4925ndfmc.c,v 1.2 2004/03/27 19:55:53 gleixner Exp $ + * + * Copyright (C) 2001 Toshiba Corporation + * + * 2003 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct nand_oobinfo jffs2_oobinfo; + +/* + * MTD structure for RBTX4925 board + */ +static struct mtd_info *tx4925ndfmc_mtd = NULL; + +/* + * Module stuff + */ +#if LINUX_VERSION_CODE < 0x20212 && defined(MODULE) +#define tx4925ndfmc_init init_module +#define tx4925ndfmc_cleanup cleanup_module +#endif + +/* + * Define partitions for flash devices + */ + +static struct mtd_partition partition_info16k[] = { + { .name = "RBTX4925 flash partition 1", + .offset = 0, + .size = 8 * 0x00100000 }, + { .name = "RBTX4925 flash partition 2", + .offset = 8 * 0x00100000, + .size = 8 * 0x00100000 }, +}; + +static struct mtd_partition partition_info32k[] = { + { .name = "RBTX4925 flash partition 1", + .offset = 0, + .size = 8 * 0x00100000 }, + { .name = "RBTX4925 flash partition 2", + .offset = 8 * 0x00100000, + .size = 24 * 0x00100000 }, +}; + +static struct mtd_partition partition_info64k[] = { + { .name = "User FS", + .offset = 0, + .size = 16 * 0x00100000 }, + { .name = "RBTX4925 flash partition 2", + .offset = 16 * 0x00100000, + .size = 48 * 0x00100000}, +}; + +static struct mtd_partition partition_info128k[] = { + { .name = "Skip bad section", + .offset = 0, + .size = 16 * 0x00100000 }, + { .name = "User FS", + .offset = 16 * 0x00100000, + .size = 112 * 0x00100000 }, +}; +#define NUM_PARTITIONS16K 2 +#define NUM_PARTITIONS32K 2 +#define NUM_PARTITIONS64K 2 +#define NUM_PARTITIONS128K 2 + +/* + * hardware specific access to control-lines +*/ +static void tx4925ndfmc_hwcontrol(struct mtd_info *mtd, int cmd) +{ + + switch(cmd){ + + case NAND_CTL_SETCLE: + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_CLE; + break; + case NAND_CTL_CLRCLE: + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_CLE; + break; + case NAND_CTL_SETALE: + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ALE; + break; + case NAND_CTL_CLRALE: + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ALE; + break; + case NAND_CTL_SETNCE: + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_CE; + break; + case NAND_CTL_CLRNCE: + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_CE; + break; + case NAND_CTL_SETWP: + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_WE; + break; + case NAND_CTL_CLRWP: + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_WE; + break; + } +} + +/* +* read device ready pin +*/ +static int tx4925ndfmc_device_ready(struct mtd_info *mtd) +{ + int ready; + ready = (tx4925_ndfmcptr->sr & TX4925_NDSFR_BUSY) ? 0 : 1; + return ready; +} +void tx4925ndfmc_enable_hwecc(struct mtd_info *mtd, int mode) +{ + /* reset first */ + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_MASK; + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK; + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_ENAB; +} +static void tx4925ndfmc_disable_ecc(void) +{ + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK; +} +static void tx4925ndfmc_enable_read_ecc(void) +{ + tx4925_ndfmcptr->mcr &= ~TX4925_NDFMCR_ECC_CNTL_MASK; + tx4925_ndfmcptr->mcr |= TX4925_NDFMCR_ECC_CNTL_READ; +} +void tx4925ndfmc_readecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code){ + int i; + u_char *ecc = ecc_code; + tx4925ndfmc_enable_read_ecc(); + for (i = 0;i < 6;i++,ecc++) + *ecc = tx4925_read_nfmc(&(tx4925_ndfmcptr->dtr)); + tx4925ndfmc_disable_ecc(); +} +void tx4925ndfmc_device_setup(void) +{ + + *(unsigned char *)0xbb005000 &= ~0x08; + + /* reset NDFMC */ + tx4925_ndfmcptr->rstr |= TX4925_NDFRSTR_RST; + while (tx4925_ndfmcptr->rstr & TX4925_NDFRSTR_RST); + + /* setup BusSeparete, Hold Time, Strobe Pulse Width */ + tx4925_ndfmcptr->mcr = TX4925_BSPRT ? TX4925_NDFMCR_BSPRT : 0; + tx4925_ndfmcptr->spr = TX4925_HOLD << 4 | TX4925_SPW; +} +static u_char tx4925ndfmc_nand_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *this = mtd->priv; + return tx4925_read_nfmc(this->IO_ADDR_R); +} + +static void tx4925ndfmc_nand_write_byte(struct mtd_info *mtd, u_char byte) +{ + struct nand_chip *this = mtd->priv; + tx4925_write_nfmc(byte, this->IO_ADDR_W); +} + +static void tx4925ndfmc_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_W); +} + +static void tx4925ndfmc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_R); +} + +static int tx4925ndfmc_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ + int i; + struct nand_chip *this = mtd->priv; + + for (i=0; iIO_ADDR_R)) + return -EFAULT; + + return 0; +} + +/* + * Send command to NAND device + */ +static void tx4925ndfmc_nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr) +{ + register struct nand_chip *this = mtd->priv; + + /* Begin command latch cycle */ + this->hwcontrol(mtd, NAND_CTL_SETCLE); + /* + * Write out the command to the device. + */ + if (command == NAND_CMD_SEQIN) { + int readcmd; + + if (column >= mtd->oobblock) { + /* OOB area */ + column -= mtd->oobblock; + readcmd = NAND_CMD_READOOB; + } else if (column < 256) { + /* First 256 bytes --> READ0 */ + readcmd = NAND_CMD_READ0; + } else { + column -= 256; + readcmd = NAND_CMD_READ1; + } + this->write_byte(mtd, readcmd); + } + this->write_byte(mtd, command); + + /* Set ALE and clear CLE to start address cycle */ + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + + if (column != -1 || page_addr != -1) { + this->hwcontrol(mtd, NAND_CTL_SETALE); + + /* Serially input address */ + if (column != -1) + this->write_byte(mtd, column); + if (page_addr != -1) { + this->write_byte(mtd, (unsigned char) (page_addr & 0xff)); + this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff)); + /* One more address cycle for higher density devices */ + if (mtd->size & 0x0c000000) + this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f)); + } + /* Latch in address */ + this->hwcontrol(mtd, NAND_CTL_CLRALE); + } + + /* + * program and erase have their own busy handlers + * status and sequential in needs no delay + */ + switch (command) { + + case NAND_CMD_PAGEPROG: + /* Turn off WE */ + this->hwcontrol (mtd, NAND_CTL_CLRWP); + return; + + case NAND_CMD_SEQIN: + /* Turn on WE */ + this->hwcontrol (mtd, NAND_CTL_SETWP); + return; + + case NAND_CMD_ERASE1: + case NAND_CMD_ERASE2: + case NAND_CMD_STATUS: + return; + + case NAND_CMD_RESET: + if (this->dev_ready) + break; + this->hwcontrol(mtd, NAND_CTL_SETCLE); + this->write_byte(mtd, NAND_CMD_STATUS); + this->hwcontrol(mtd, NAND_CTL_CLRCLE); + while ( !(this->read_byte(mtd) & 0x40)); + return; + + /* This applies to read commands */ + default: + /* + * If we don't have access to the busy pin, we apply the given + * command delay + */ + if (!this->dev_ready) { + udelay (this->chip_delay); + return; + } + } + + /* wait until command is processed */ + while (!this->dev_ready(mtd)); +} + +#ifdef CONFIG_MTD_CMDLINE_PARTS +extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partitio +n **pparts, char *); +#endif + +/* + * Main initialization routine + */ +extern int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); +int __init tx4925ndfmc_init (void) +{ + struct nand_chip *this; + int err = 0; + + /* Allocate memory for MTD device structure and private data */ + tx4925ndfmc_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip), + GFP_KERNEL); + if (!tx4925ndfmc_mtd) { + printk ("Unable to allocate RBTX4925 NAND MTD device structure.\n"); + err = -ENOMEM; + goto out; + } + + tx4925ndfmc_device_setup(); + + /* io is indirect via a register so don't need to ioremap address */ + + /* Get pointer to private data */ + this = (struct nand_chip *) (&tx4925ndfmc_mtd[1]); + + /* Initialize structures */ + memset((char *) tx4925ndfmc_mtd, 0, sizeof(struct mtd_info)); + memset((char *) this, 0, sizeof(struct nand_chip)); + + /* Link the private data with the MTD structure */ + tx4925ndfmc_mtd->priv = this; + + /* Set address of NAND IO lines */ + this->IO_ADDR_R = (unsigned long)&(tx4925_ndfmcptr->dtr); + this->IO_ADDR_W = (unsigned long)&(tx4925_ndfmcptr->dtr); + this->hwcontrol = tx4925ndfmc_hwcontrol; + this->enable_hwecc = tx4925ndfmc_enable_hwecc; + this->calculate_ecc = tx4925ndfmc_readecc; + this->correct_data = nand_correct_data; + this->eccmode = NAND_ECC_HW6_512; + this->dev_ready = tx4925ndfmc_device_ready; + /* 20 us command delay time */ + this->chip_delay = 20; + this->read_byte = tx4925ndfmc_nand_read_byte; + this->write_byte = tx4925ndfmc_nand_write_byte; + this->cmdfunc = tx4925ndfmc_nand_command; + this->write_buf = tx4925ndfmc_nand_write_buf; + this->read_buf = tx4925ndfmc_nand_read_buf; + this->verify_buf = tx4925ndfmc_nand_verify_buf; + + /* Scan to find existance of the device */ + if (nand_scan (tx4925ndfmc_mtd, 1)) { + err = -ENXIO; + goto out_ior; + } + + /* Allocate memory for internal data buffer */ + this->data_buf = kmalloc (sizeof(u_char) * (tx4925ndfmc_mtd->oobblock + tx4925ndfmc_mtd->oobsize), GFP_KERNEL); + if (!this->data_buf) { + printk ("Unable to allocate NAND data buffer for RBTX4925.\n"); + err = -ENOMEM; + goto out_ior; + } + + /* Register the partitions */ +#ifdef CONFIG_MTD_CMDLINE_PARTS + { + int mtd_parts_nb = 0; + struct mtd_partition *mtd_parts = 0; + mtd_parts_nb = parse_cmdline_partitions(tx4925ndfmc_mtd, &mtd_parts, "tx4925ndfmc"); + if (mtd_parts_nb > 0) + add_mtd_partitions(tx4925ndfmc_mtd, mtd_parts, mtd_parts_nb); + else + add_mtd_device(tx4925ndfmc_mtd); + } +#else /* ifdef CONFIG_MTD_CMDLINE_PARTS */ + switch(tx4925ndfmc_mtd->size){ + case 0x01000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info16k, NUM_PARTITIONS16K); break; + case 0x02000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info32k, NUM_PARTITIONS32K); break; + case 0x04000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info64k, NUM_PARTITIONS64K); break; + case 0x08000000: add_mtd_partitions(tx4925ndfmc_mtd, partition_info128k, NUM_PARTITIONS128K); break; + default: { + printk ("Unsupported SmartMedia device\n"); + err = -ENXIO; + goto out_buf; + } + } +#endif /* ifdef CONFIG_MTD_CMDLINE_PARTS */ + goto out; + +out_buf: + kfree (this->data_buf); +out_ior: +out: + return err; +} + +module_init(tx4925ndfmc_init); + +/* + * Clean up routine + */ +#ifdef MODULE +static void __exit tx4925ndfmc_cleanup (void) +{ + struct nand_chip *this = (struct nand_chip *) &tx4925ndfmc_mtd[1]; + + /* Unregister partitions */ + del_mtd_partitions(tx4925ndfmc_mtd); + + /* Unregister the device */ + del_mtd_device (tx4925ndfmc_mtd); + + /* Free internal data buffers */ + kfree (this->data_buf); + + /* Free the MTD device structure */ + kfree (tx4925ndfmc_mtd); +} +module_exit(tx4925ndfmc_cleanup); +#endif + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alice Hennessy "); +MODULE_DESCRIPTION("Glue layer for SmartMediaCard on Toshiba RBTX4925"); diff --git a/drivers/net/fec_8xx/Kconfig b/drivers/net/fec_8xx/Kconfig new file mode 100644 index 000000000..db36ac3ea --- /dev/null +++ b/drivers/net/fec_8xx/Kconfig @@ -0,0 +1,14 @@ +config FEC_8XX + tristate "Motorola 8xx FEC driver" + depends on NET_ETHERNET && 8xx && (NETTA || NETPHONE) + select MII + +config FEC_8XX_GENERIC_PHY + bool "Support any generic PHY" + depends on FEC_8XX + default y + +config FEC_8XX_DM9161_PHY + bool "Support DM9161 PHY" + depends on FEC_8XX + default n diff --git a/drivers/net/fec_8xx/Makefile b/drivers/net/fec_8xx/Makefile new file mode 100644 index 000000000..70c54f8c4 --- /dev/null +++ b/drivers/net/fec_8xx/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for the Motorola 8xx FEC ethernet controller +# + +obj-$(CONFIG_FEC_8XX) += fec_8xx.o + +fec_8xx-objs := fec_main.o fec_mii.o + +# the platform instantatiation objects +ifeq ($(CONFIG_NETTA),y) +fec_8xx-objs += fec_8xx-netta.o +endif diff --git a/drivers/net/fec_8xx/fec_8xx.h b/drivers/net/fec_8xx/fec_8xx.h new file mode 100644 index 000000000..5af60b0f9 --- /dev/null +++ b/drivers/net/fec_8xx/fec_8xx.h @@ -0,0 +1,218 @@ +#ifndef FEC_8XX_H +#define FEC_8XX_H + +#include +#include + +#include + +/* HW info */ + +/* CRC polynomium used by the FEC for the multicast group filtering */ +#define FEC_CRC_POLY 0x04C11DB7 + +#define MII_ADVERTISE_HALF (ADVERTISE_100HALF | \ + ADVERTISE_10HALF | ADVERTISE_CSMA) +#define MII_ADVERTISE_ALL (ADVERTISE_100FULL | \ + ADVERTISE_10FULL | MII_ADVERTISE_HALF) + +/* Interrupt events/masks. +*/ +#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */ +#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */ +#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */ +#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */ +#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */ +#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */ +#define FEC_ENET_RXF 0x02000000U /* Full frame received */ +#define FEC_ENET_RXB 0x01000000U /* A buffer was received */ +#define FEC_ENET_MII 0x00800000U /* MII interrupt */ +#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */ + +#define FEC_ECNTRL_PINMUX 0x00000004 +#define FEC_ECNTRL_ETHER_EN 0x00000002 +#define FEC_ECNTRL_RESET 0x00000001 + +#define FEC_RCNTRL_BC_REJ 0x00000010 +#define FEC_RCNTRL_PROM 0x00000008 +#define FEC_RCNTRL_MII_MODE 0x00000004 +#define FEC_RCNTRL_DRT 0x00000002 +#define FEC_RCNTRL_LOOP 0x00000001 + +#define FEC_TCNTRL_FDEN 0x00000004 +#define FEC_TCNTRL_HBC 0x00000002 +#define FEC_TCNTRL_GTS 0x00000001 + +/* values for MII phy_status */ + +#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ +#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ +#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ +#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ +#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ +#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ +#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ + +#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ +#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ +#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ +#define PHY_STAT_SPMASK 0xf000 /* mask for speed */ +#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ +#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ +#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ +#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ + +typedef struct phy_info { + unsigned int id; + const char *name; + void (*startup) (struct net_device * dev); + void (*shutdown) (struct net_device * dev); + void (*ack_int) (struct net_device * dev); +} phy_info_t; + +/* The FEC stores dest/src/type, data, and checksum for receive packets. + */ +#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */ +#define MIN_MTU 46 /* this is data size */ +#define CRC_LEN 4 + +#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN) +#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN) + +/* Must be a multiple of 4 */ +#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE+3) & ~3) +/* This is needed so that invalidate_xxx wont invalidate too much */ +#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE) + +/* platform interface */ + +struct fec_platform_info { + int fec_no; /* FEC index */ + int use_mdio; /* use external MII */ + int phy_addr; /* the phy address */ + int fec_irq, phy_irq; /* the irq for the controller */ + int rx_ring, tx_ring; /* number of buffers on rx */ + int sys_clk; /* system clock */ + __u8 macaddr[6]; /* mac address */ + int rx_copybreak; /* limit we copy small frames */ + int use_napi; /* use NAPI */ + int napi_weight; /* NAPI weight */ +}; + +/* forward declaration */ +struct fec; + +struct fec_enet_private { + spinlock_t lock; /* during all ops except TX pckt processing */ + spinlock_t tx_lock; /* during fec_start_xmit and fec_tx */ + int fecno; + struct fec *fecp; + const struct fec_platform_info *fpi; + int rx_ring, tx_ring; + dma_addr_t ring_mem_addr; + void *ring_base; + struct sk_buff **rx_skbuff; + struct sk_buff **tx_skbuff; + cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ + cbd_t *tx_bd_base; + cbd_t *dirty_tx; /* ring entries to be free()ed. */ + cbd_t *cur_rx; + cbd_t *cur_tx; + int tx_free; + struct net_device_stats stats; + struct timer_list phy_timer_list; + const struct phy_info *phy; + unsigned int fec_phy_speed; + __u32 msg_enable; + struct mii_if_info mii_if; +}; + +/***************************************************************************/ + +void fec_restart(struct net_device *dev, int duplex, int speed); +void fec_stop(struct net_device *dev); + +/***************************************************************************/ + +int fec_mii_read(struct net_device *dev, int phy_id, int location); +void fec_mii_write(struct net_device *dev, int phy_id, int location, int value); + +int fec_mii_phy_id_detect(struct net_device *dev); +void fec_mii_startup(struct net_device *dev); +void fec_mii_shutdown(struct net_device *dev); +void fec_mii_ack_int(struct net_device *dev); + +void fec_mii_link_status_change_check(struct net_device *dev, int init_media); + +/***************************************************************************/ + +#define FEC1_NO 0x00 +#define FEC2_NO 0x01 +#define FEC3_NO 0x02 + +int fec_8xx_init_one(const struct fec_platform_info *fpi, + struct net_device **devp); +int fec_8xx_cleanup_one(struct net_device *dev); + +/***************************************************************************/ + +#define DRV_MODULE_NAME "fec_8xx" +#define PFX DRV_MODULE_NAME ": " +#define DRV_MODULE_VERSION "0.1" +#define DRV_MODULE_RELDATE "May 6, 2004" + +/***************************************************************************/ + +int fec_8xx_platform_init(void); +void fec_8xx_platform_cleanup(void); + +/***************************************************************************/ + +/* FEC access macros */ +#if defined(CONFIG_8xx) +/* for a 8xx __raw_xxx's are sufficient */ +#define __fec_out32(addr, x) __raw_writel(x, addr) +#define __fec_out16(addr, x) __raw_writew(x, addr) +#define __fec_in32(addr) __raw_readl(addr) +#define __fec_in16(addr) __raw_readw(addr) +#else +/* for others play it safe */ +#define __fec_out32(addr, x) out_be32(addr, x) +#define __fec_out16(addr, x) out_be16(addr, x) +#define __fec_in32(addr) in_be32(addr) +#define __fec_in16(addr) in_be16(addr) +#endif + +/* write */ +#define FW(_fecp, _reg, _v) __fec_out32(&(_fecp)->fec_ ## _reg, (_v)) + +/* read */ +#define FR(_fecp, _reg) __fec_in32(&(_fecp)->fec_ ## _reg) + +/* set bits */ +#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v)) + +/* clear bits */ +#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) + +/* buffer descriptor access macros */ + +/* write */ +#define CBDW_SC(_cbd, _sc) __fec_out16(&(_cbd)->cbd_sc, (_sc)) +#define CBDW_DATLEN(_cbd, _datlen) __fec_out16(&(_cbd)->cbd_datlen, (_datlen)) +#define CBDW_BUFADDR(_cbd, _bufaddr) __fec_out32(&(_cbd)->cbd_bufaddr, (_bufaddr)) + +/* read */ +#define CBDR_SC(_cbd) __fec_in16(&(_cbd)->cbd_sc) +#define CBDR_DATLEN(_cbd) __fec_in16(&(_cbd)->cbd_datlen) +#define CBDR_BUFADDR(_cbd) __fec_in32(&(_cbd)->cbd_bufaddr) + +/* set bits */ +#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc)) + +/* clear bits */ +#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc)) + +/***************************************************************************/ + +#endif diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c new file mode 100644 index 000000000..5005c1c94 --- /dev/null +++ b/drivers/net/gianfar.c @@ -0,0 +1,1926 @@ +/* + * drivers/net/gianfar.c + * + * Gianfar Ethernet Driver + * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Gianfar: AKA Lambda Draconis, "Dragon" + * RA 11 31 24.2 + * Dec +69 19 52 + * V 3.84 + * B-V +1.62 + * + * Theory of operation + * This driver is designed for the Triple-speed Ethernet + * controllers on the Freescale 8540/8560 integrated processors, + * as well as the Fast Ethernet Controller on the 8540. + * + * The driver is initialized through OCP. Structures which + * define the configuration needed by the board are defined in a + * board structure in arch/ppc/platforms (though I do not + * discount the possibility that other architectures could one + * day be supported. One assumption the driver currently makes + * is that the PHY is configured in such a way to advertise all + * capabilities. This is a sensible default, and on certain + * PHYs, changing this default encounters substantial errata + * issues. Future versions may remove this requirement, but for + * now, it is best for the firmware to ensure this is the case. + * + * The Gianfar Ethernet Controller uses a ring of buffer + * descriptors. The beginning is indicated by a register + * pointing to the physical address of the start of the ring. + * The end is determined by a "wrap" bit being set in the + * last descriptor of the ring. + * + * When a packet is received, the RXF bit in the + * IEVENT register is set, triggering an interrupt when the + * corresponding bit in the IMASK register is also set (if + * interrupt coalescing is active, then the interrupt may not + * happen immediately, but will wait until either a set number + * of frames or amount of time have passed.). In NAPI, the + * interrupt handler will signal there is work to be done, and + * exit. Without NAPI, the packet(s) will be handled + * immediately. Both methods will start at the last known empty + * descriptor, and process every subsequent descriptor until there + * are none left with data (NAPI will stop after a set number of + * packets to give time to other tasks, but will eventually + * process all the packets). The data arrives inside a + * pre-allocated skb, and so after the skb is passed up to the + * stack, a new skb must be allocated, and the address field in + * the buffer descriptor must be updated to indicate this new + * skb. + * + * When the kernel requests that a packet be transmitted, the + * driver starts where it left off last time, and points the + * descriptor at the buffer which was passed in. The driver + * then informs the DMA engine that there are packets ready to + * be transmitted. Once the controller is finished transmitting + * the packet, an interrupt may be triggered (under the same + * conditions as for reception, but depending on the TXF bit). + * The driver then cleans up the buffer. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "gianfar.h" +#include "gianfar_phy.h" +#ifdef CONFIG_NET_FASTROUTE +#include +#include +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41) +#define irqreturn_t void +#define IRQ_HANDLED +#endif + +#define TX_TIMEOUT (1*HZ) +#define SKB_ALLOC_TIMEOUT 1000000 +#undef BRIEF_GFAR_ERRORS +#define VERBOSE_GFAR_ERRORS + +#ifdef CONFIG_GFAR_NAPI +#define RECEIVE(x) netif_receive_skb(x) +#else +#define RECEIVE(x) netif_rx(x) +#endif + +#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.0, " +char gfar_driver_name[] = "Gianfar Ethernet"; +char gfar_driver_version[] = "1.0"; + +int startup_gfar(struct net_device *dev); +static int gfar_enet_open(struct net_device *dev); +static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); +static void gfar_timeout(struct net_device *dev); +static int gfar_close(struct net_device *dev); +struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp); +static struct net_device_stats *gfar_get_stats(struct net_device *dev); +static int gfar_set_mac_address(struct net_device *dev); +static int gfar_change_mtu(struct net_device *dev, int new_mtu); +static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs); +static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs); +irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs); +static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs); +static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs); +static void gfar_phy_change(void *data); +static void gfar_phy_timer(unsigned long data); +static void adjust_link(struct net_device *dev); +static void init_registers(struct net_device *dev); +static int init_phy(struct net_device *dev); +static int gfar_probe(struct ocp_device *ocpdev); +static void gfar_remove(struct ocp_device *ocpdev); +void free_skb_resources(struct gfar_private *priv); +static void gfar_set_multi(struct net_device *dev); +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); +#ifdef CONFIG_GFAR_NAPI +static int gfar_poll(struct net_device *dev, int *budget); +#endif +#ifdef CONFIG_NET_FASTROUTE +static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst); +#endif +static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length); +#ifdef CONFIG_GFAR_NAPI +static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); +#else +static int gfar_clean_rx_ring(struct net_device *dev); +#endif +static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); + +extern struct ethtool_ops gfar_ethtool_ops; +extern void gfar_gstrings_normon(struct net_device *dev, u32 stringset, + u8 * buf); +extern void gfar_fill_stats_normon(struct net_device *dev, + struct ethtool_stats *dummy, u64 * buf); +extern int gfar_stats_count_normon(struct net_device *dev); + + +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION("Gianfar Ethernet Driver"); +MODULE_LICENSE("GPL"); + +/* Called by the ocp code to initialize device data structures + * required for bringing up the device + * returns 0 on success */ +static int gfar_probe(struct ocp_device *ocpdev) +{ + u32 tempval; + struct ocp_device *mdiodev; + struct net_device *dev = NULL; + struct gfar_private *priv = NULL; + struct ocp_gfar_data *einfo; + int idx; + int err = 0; + struct ethtool_ops *dev_ethtool_ops; + + einfo = (struct ocp_gfar_data *) ocpdev->def->additions; + + if (einfo == NULL) { + printk(KERN_ERR "gfar %d: Missing additional data!\n", + ocpdev->def->index); + + return -ENODEV; + } + + /* get a pointer to the register memory which can + * configure the PHYs. If it's different from this set, + * get the device which has those regs */ + if ((einfo->phyregidx >= 0) && (einfo->phyregidx != ocpdev->def->index)) { + mdiodev = ocp_find_device(OCP_ANY_ID, + OCP_FUNC_GFAR, einfo->phyregidx); + + /* If the device which holds the MDIO regs isn't + * up, wait for it to come up */ + if (mdiodev == NULL) + return -EAGAIN; + } else { + mdiodev = ocpdev; + } + + /* Create an ethernet device instance */ + dev = alloc_etherdev(sizeof (*priv)); + + if (dev == NULL) + return -ENOMEM; + + priv = netdev_priv(dev); + + /* Set the info in the priv to the current info */ + priv->einfo = einfo; + + /* get a pointer to the register memory */ + priv->regs = (struct gfar *) + ioremap(ocpdev->def->paddr, sizeof (struct gfar)); + + if (priv->regs == NULL) { + err = -ENOMEM; + goto regs_fail; + } + + /* Set the PHY base address */ + priv->phyregs = (struct gfar *) + ioremap(mdiodev->def->paddr, sizeof (struct gfar)); + + if (priv->phyregs == NULL) { + err = -ENOMEM; + goto phy_regs_fail; + } + + ocp_set_drvdata(ocpdev, dev); + + /* Stop the DMA engine now, in case it was running before */ + /* (The firmware could have used it, and left it running). */ + /* To do this, we write Graceful Receive Stop and Graceful */ + /* Transmit Stop, and then wait until the corresponding bits */ + /* in IEVENT indicate the stops have completed. */ + tempval = gfar_read(&priv->regs->dmactrl); + tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + tempval = gfar_read(&priv->regs->dmactrl); + tempval |= (DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) + cpu_relax(); + + /* Reset MAC layer */ + gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); + + tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + gfar_write(&priv->regs->maccfg1, tempval); + + /* Initialize MACCFG2. */ + gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); + + /* Initialize ECNTRL */ + gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); + + /* Copy the station address into the dev structure, */ + /* and into the address registers MAC_STNADDR1,2. */ + /* Backwards, because little endian MACs are dumb. */ + /* Don't set the regs if the firmware already did */ + memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN); + + /* Set the dev->base_addr to the gfar reg region */ + dev->base_addr = (unsigned long) (priv->regs); + + SET_MODULE_OWNER(dev); + + /* Fill in the dev structure */ + dev->open = gfar_enet_open; + dev->hard_start_xmit = gfar_start_xmit; + dev->tx_timeout = gfar_timeout; + dev->watchdog_timeo = TX_TIMEOUT; +#ifdef CONFIG_GFAR_NAPI + dev->poll = gfar_poll; + dev->weight = GFAR_DEV_WEIGHT; +#endif + dev->stop = gfar_close; + dev->get_stats = gfar_get_stats; + dev->change_mtu = gfar_change_mtu; + dev->mtu = 1500; + dev->set_multicast_list = gfar_set_multi; + dev->flags |= IFF_MULTICAST; + + dev_ethtool_ops = + (struct ethtool_ops *)kmalloc(sizeof(struct ethtool_ops), + GFP_KERNEL); + + if(dev_ethtool_ops == NULL) { + err = -ENOMEM; + goto ethtool_fail; + } + + memcpy(dev_ethtool_ops, &gfar_ethtool_ops, sizeof(gfar_ethtool_ops)); + + /* If there is no RMON support in this device, we don't + * want to expose non-existant statistics */ + if((priv->einfo->flags & GFAR_HAS_RMON) == 0) { + dev_ethtool_ops->get_strings = gfar_gstrings_normon; + dev_ethtool_ops->get_stats_count = gfar_stats_count_normon; + dev_ethtool_ops->get_ethtool_stats = gfar_fill_stats_normon; + } + + if((priv->einfo->flags & GFAR_HAS_COALESCE) == 0) { + dev_ethtool_ops->set_coalesce = NULL; + dev_ethtool_ops->get_coalesce = NULL; + } + + dev->ethtool_ops = dev_ethtool_ops; + +#ifdef CONFIG_NET_FASTROUTE + dev->accept_fastpath = gfar_accept_fastpath; +#endif + + priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; +#ifdef CONFIG_GFAR_BUFSTASH + priv->rx_stash_size = STASH_LENGTH; +#endif + priv->tx_ring_size = DEFAULT_TX_RING_SIZE; + priv->rx_ring_size = DEFAULT_RX_RING_SIZE; + + /* Initially, coalescing is disabled */ + priv->txcoalescing = 0; + priv->txcount = 0; + priv->txtime = 0; + priv->rxcoalescing = 0; + priv->rxcount = 0; + priv->rxtime = 0; + + err = register_netdev(dev); + + if (err) { + printk(KERN_ERR "%s: Cannot register net device, aborting.\n", + dev->name); + goto register_fail; + } + + /* Print out the device info */ + printk(DEVICE_NAME, dev->name); + for (idx = 0; idx < 6; idx++) + printk("%2.2x%c", dev->dev_addr[idx], idx == 5 ? ' ' : ':'); + printk("\n"); + + /* Even more device info helps when determining which kernel */ + /* provided which set of benchmarks. Since this is global for all */ + /* devices, we only print it once */ +#ifdef CONFIG_GFAR_NAPI + printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); +#else + printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name); +#endif + printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", + dev->name, priv->rx_ring_size, priv->tx_ring_size); + + return 0; + + +register_fail: + kfree(dev_ethtool_ops); +ethtool_fail: + iounmap((void *) priv->phyregs); +phy_regs_fail: + iounmap((void *) priv->regs); +regs_fail: + free_netdev(dev); + return -ENOMEM; +} + +static void gfar_remove(struct ocp_device *ocpdev) +{ + struct net_device *dev = ocp_get_drvdata(ocpdev); + struct gfar_private *priv = netdev_priv(dev); + + ocp_set_drvdata(ocpdev, NULL); + + kfree(dev->ethtool_ops); + iounmap((void *) priv->regs); + iounmap((void *) priv->phyregs); + free_netdev(dev); +} + +/* Configure the PHY for dev. + * returns 0 if success. -1 if failure + */ +static int init_phy(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct phy_info *curphy; + + priv->link = 1; + priv->oldlink = 0; + priv->oldspeed = 0; + priv->olddplx = -1; + + /* get info for this PHY */ + curphy = get_phy_info(dev); + + if (curphy == NULL) { + printk(KERN_ERR "%s: No PHY found\n", dev->name); + return -1; + } + + priv->phyinfo = curphy; + + /* Run the commands which configure the PHY */ + phy_run_commands(dev, curphy->config); + + return 0; +} + +static void init_registers(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + /* Clear IEVENT */ + gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); + + /* Initialize IMASK */ + gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); + + /* Init hash registers to zero */ + gfar_write(&priv->regs->iaddr0, 0); + gfar_write(&priv->regs->iaddr1, 0); + gfar_write(&priv->regs->iaddr2, 0); + gfar_write(&priv->regs->iaddr3, 0); + gfar_write(&priv->regs->iaddr4, 0); + gfar_write(&priv->regs->iaddr5, 0); + gfar_write(&priv->regs->iaddr6, 0); + gfar_write(&priv->regs->iaddr7, 0); + + gfar_write(&priv->regs->gaddr0, 0); + gfar_write(&priv->regs->gaddr1, 0); + gfar_write(&priv->regs->gaddr2, 0); + gfar_write(&priv->regs->gaddr3, 0); + gfar_write(&priv->regs->gaddr4, 0); + gfar_write(&priv->regs->gaddr5, 0); + gfar_write(&priv->regs->gaddr6, 0); + gfar_write(&priv->regs->gaddr7, 0); + + /* Zero out rctrl */ + gfar_write(&priv->regs->rctrl, 0x00000000); + + /* Zero out the rmon mib registers if it has them */ + if (priv->einfo->flags & GFAR_HAS_RMON) { + memset((void *) &(priv->regs->rmon), 0, + sizeof (struct rmon_mib)); + + /* Mask off the CAM interrupts */ + gfar_write(&priv->regs->rmon.cam1, 0xffffffff); + gfar_write(&priv->regs->rmon.cam2, 0xffffffff); + } + + /* Initialize the max receive buffer length */ + gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); + +#ifdef CONFIG_GFAR_BUFSTASH + /* If we are stashing buffers, we need to set the + * extraction length to the size of the buffer */ + gfar_write(&priv->regs->attreli, priv->rx_stash_size << 16); +#endif + + /* Initialize the Minimum Frame Length Register */ + gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); + + /* Setup Attributes so that snooping is on for rx */ + gfar_write(&priv->regs->attr, ATTR_INIT_SETTINGS); + gfar_write(&priv->regs->attreli, ATTRELI_INIT_SETTINGS); + + /* Assign the TBI an address which won't conflict with the PHYs */ + gfar_write(&priv->regs->tbipa, TBIPA_VALUE); +} + +void stop_gfar(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar *regs = priv->regs; + unsigned long flags; + u32 tempval; + + /* Lock it down */ + spin_lock_irqsave(&priv->lock, flags); + + /* Tell the kernel the link is down */ + priv->link = 0; + adjust_link(dev); + + /* Mask all interrupts */ + gfar_write(®s->imask, IMASK_INIT_CLEAR); + + /* Clear all interrupts */ + gfar_write(®s->ievent, IEVENT_INIT_CLEAR); + + /* Stop the DMA, and wait for it to stop */ + tempval = gfar_read(&priv->regs->dmactrl); + if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) + != (DMACTRL_GRS | DMACTRL_GTS)) { + tempval |= (DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + while (!(gfar_read(&priv->regs->ievent) & + (IEVENT_GRSC | IEVENT_GTSC))) + cpu_relax(); + } + + /* Disable Rx and Tx */ + tempval = gfar_read(®s->maccfg1); + tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); + + if (priv->einfo->flags & GFAR_HAS_PHY_INTR) { + phy_run_commands(dev, priv->phyinfo->shutdown); + } + + spin_unlock_irqrestore(&priv->lock, flags); + + /* Free the IRQs */ + if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) { + free_irq(priv->einfo->interruptError, dev); + free_irq(priv->einfo->interruptTransmit, dev); + free_irq(priv->einfo->interruptReceive, dev); + } else { + free_irq(priv->einfo->interruptTransmit, dev); + } + + if (priv->einfo->flags & GFAR_HAS_PHY_INTR) { + free_irq(priv->einfo->interruptPHY, dev); + } else { + del_timer_sync(&priv->phy_info_timer); + } + + free_skb_resources(priv); + + dma_unmap_single(NULL, gfar_read(®s->tbase), + sizeof(struct txbd)*priv->tx_ring_size, + DMA_BIDIRECTIONAL); + dma_unmap_single(NULL, gfar_read(®s->rbase), + sizeof(struct rxbd)*priv->rx_ring_size, + DMA_BIDIRECTIONAL); + + /* Free the buffer descriptors */ + kfree(priv->tx_bd_base); +} + +/* If there are any tx skbs or rx skbs still around, free them. + * Then free tx_skbuff and rx_skbuff */ +void free_skb_resources(struct gfar_private *priv) +{ + struct rxbd8 *rxbdp; + struct txbd8 *txbdp; + int i; + + /* Go through all the buffer descriptors and free their data buffers */ + txbdp = priv->tx_bd_base; + + for (i = 0; i < priv->tx_ring_size; i++) { + + if (priv->tx_skbuff[i]) { + dma_unmap_single(NULL, txbdp->bufPtr, + txbdp->length, + DMA_TO_DEVICE); + dev_kfree_skb_any(priv->tx_skbuff[i]); + priv->tx_skbuff[i] = NULL; + } + } + + kfree(priv->tx_skbuff); + + rxbdp = priv->rx_bd_base; + + /* rx_skbuff is not guaranteed to be allocated, so only + * free it and its contents if it is allocated */ + if(priv->rx_skbuff != NULL) { + for (i = 0; i < priv->rx_ring_size; i++) { + if (priv->rx_skbuff[i]) { + dma_unmap_single(NULL, rxbdp->bufPtr, + priv->rx_buffer_size + + RXBUF_ALIGNMENT, + DMA_FROM_DEVICE); + + dev_kfree_skb_any(priv->rx_skbuff[i]); + priv->rx_skbuff[i] = NULL; + } + + rxbdp->status = 0; + rxbdp->length = 0; + rxbdp->bufPtr = 0; + + rxbdp++; + } + + kfree(priv->rx_skbuff); + } +} + +/* Bring the controller up and running */ +int startup_gfar(struct net_device *dev) +{ + struct txbd8 *txbdp; + struct rxbd8 *rxbdp; + unsigned long addr; + int i; + struct gfar_private *priv = netdev_priv(dev); + struct gfar *regs = priv->regs; + u32 tempval; + int err = 0; + + gfar_write(®s->imask, IMASK_INIT_CLEAR); + + /* Allocate memory for the buffer descriptors */ + addr = + (unsigned int) kmalloc(sizeof (struct txbd8) * priv->tx_ring_size + + sizeof (struct rxbd8) * priv->rx_ring_size, + GFP_KERNEL); + + if (addr == 0) { + printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n", + dev->name); + return -ENOMEM; + } + + priv->tx_bd_base = (struct txbd8 *) addr; + + /* enet DMA only understands physical addresses */ + gfar_write(®s->tbase, + dma_map_single(NULL, (void *)addr, + sizeof(struct txbd8) * priv->tx_ring_size, + DMA_BIDIRECTIONAL)); + + /* Start the rx descriptor ring where the tx ring leaves off */ + addr = addr + sizeof (struct txbd8) * priv->tx_ring_size; + priv->rx_bd_base = (struct rxbd8 *) addr; + gfar_write(®s->rbase, + dma_map_single(NULL, (void *)addr, + sizeof(struct rxbd8) * priv->rx_ring_size, + DMA_BIDIRECTIONAL)); + + /* Setup the skbuff rings */ + priv->tx_skbuff = + (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * + priv->tx_ring_size, GFP_KERNEL); + + if (priv->tx_skbuff == NULL) { + printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", + dev->name); + err = -ENOMEM; + goto tx_skb_fail; + } + + for (i = 0; i < priv->tx_ring_size; i++) + priv->tx_skbuff[i] = NULL; + + priv->rx_skbuff = + (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * + priv->rx_ring_size, GFP_KERNEL); + + if (priv->rx_skbuff == NULL) { + printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", + dev->name); + err = -ENOMEM; + goto rx_skb_fail; + } + + for (i = 0; i < priv->rx_ring_size; i++) + priv->rx_skbuff[i] = NULL; + + /* Initialize some variables in our dev structure */ + priv->dirty_tx = priv->cur_tx = priv->tx_bd_base; + priv->cur_rx = priv->rx_bd_base; + priv->skb_curtx = priv->skb_dirtytx = 0; + priv->skb_currx = 0; + + /* Initialize Transmit Descriptor Ring */ + txbdp = priv->tx_bd_base; + for (i = 0; i < priv->tx_ring_size; i++) { + txbdp->status = 0; + txbdp->length = 0; + txbdp->bufPtr = 0; + txbdp++; + } + + /* Set the last descriptor in the ring to indicate wrap */ + txbdp--; + txbdp->status |= TXBD_WRAP; + + rxbdp = priv->rx_bd_base; + for (i = 0; i < priv->rx_ring_size; i++) { + struct sk_buff *skb = NULL; + + rxbdp->status = 0; + + skb = gfar_new_skb(dev, rxbdp); + + priv->rx_skbuff[i] = skb; + + rxbdp++; + } + + /* Set the last descriptor in the ring to wrap */ + rxbdp--; + rxbdp->status |= RXBD_WRAP; + + /* If the device has multiple interrupts, register for + * them. Otherwise, only register for the one */ + if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) { + /* Install our interrupt handlers for Error, + * Transmit, and Receive */ + if (request_irq(priv->einfo->interruptError, gfar_error, + SA_SHIRQ, "enet_error", dev) < 0) { + printk(KERN_ERR "%s: Can't get IRQ %d\n", + dev->name, priv->einfo->interruptError); + + err = -1; + goto err_irq_fail; + } + + if (request_irq(priv->einfo->interruptTransmit, gfar_transmit, + SA_SHIRQ, "enet_tx", dev) < 0) { + printk(KERN_ERR "%s: Can't get IRQ %d\n", + dev->name, priv->einfo->interruptTransmit); + + err = -1; + + goto tx_irq_fail; + } + + if (request_irq(priv->einfo->interruptReceive, gfar_receive, + SA_SHIRQ, "enet_rx", dev) < 0) { + printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", + dev->name, priv->einfo->interruptReceive); + + err = -1; + goto rx_irq_fail; + } + } else { + if (request_irq(priv->einfo->interruptTransmit, gfar_interrupt, + SA_SHIRQ, "gfar_interrupt", dev) < 0) { + printk(KERN_ERR "%s: Can't get IRQ %d\n", + dev->name, priv->einfo->interruptError); + + err = -1; + goto err_irq_fail; + } + } + + /* Grab the PHY interrupt */ + if (priv->einfo->flags & GFAR_HAS_PHY_INTR) { + if (request_irq(priv->einfo->interruptPHY, phy_interrupt, + SA_SHIRQ, "phy_interrupt", dev) < 0) { + printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n", + dev->name, priv->einfo->interruptPHY); + + err = -1; + + if (priv->einfo->flags & GFAR_HAS_MULTI_INTR) + goto phy_irq_fail; + else + goto tx_irq_fail; + } + } else { + init_timer(&priv->phy_info_timer); + priv->phy_info_timer.function = &gfar_phy_timer; + priv->phy_info_timer.data = (unsigned long) dev; + mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ); + } + + /* Set up the bottom half queue */ + INIT_WORK(&priv->tq, (void (*)(void *))gfar_phy_change, dev); + + /* Configure the PHY interrupt */ + phy_run_commands(dev, priv->phyinfo->startup); + + /* Tell the kernel the link is up, and determine the + * negotiated features (speed, duplex) */ + adjust_link(dev); + + if (priv->link == 0) + printk(KERN_INFO "%s: No link detected\n", dev->name); + + /* Configure the coalescing support */ + if (priv->txcoalescing) + gfar_write(®s->txic, + mk_ic_value(priv->txcount, priv->txtime)); + else + gfar_write(®s->txic, 0); + + if (priv->rxcoalescing) + gfar_write(®s->rxic, + mk_ic_value(priv->rxcount, priv->rxtime)); + else + gfar_write(®s->rxic, 0); + + init_waitqueue_head(&priv->rxcleanupq); + + /* Enable Rx and Tx in MACCFG1 */ + tempval = gfar_read(®s->maccfg1); + tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); + gfar_write(®s->maccfg1, tempval); + + /* Initialize DMACTRL to have WWR and WOP */ + tempval = gfar_read(&priv->regs->dmactrl); + tempval |= DMACTRL_INIT_SETTINGS; + gfar_write(&priv->regs->dmactrl, tempval); + + /* Clear THLT, so that the DMA starts polling now */ + gfar_write(®s->tstat, TSTAT_CLEAR_THALT); + + /* Make sure we aren't stopped */ + tempval = gfar_read(&priv->regs->dmactrl); + tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + /* Unmask the interrupts we look for */ + gfar_write(®s->imask, IMASK_DEFAULT); + + return 0; + +phy_irq_fail: + free_irq(priv->einfo->interruptReceive, dev); +rx_irq_fail: + free_irq(priv->einfo->interruptTransmit, dev); +tx_irq_fail: + free_irq(priv->einfo->interruptError, dev); +err_irq_fail: +rx_skb_fail: + free_skb_resources(priv); +tx_skb_fail: + kfree(priv->tx_bd_base); + return err; +} + +/* Called when something needs to use the ethernet device */ +/* Returns 0 for success. */ +static int gfar_enet_open(struct net_device *dev) +{ + int err; + + /* Initialize a bunch of registers */ + init_registers(dev); + + gfar_set_mac_address(dev); + + err = init_phy(dev); + + if (err) + return err; + + err = startup_gfar(dev); + + netif_start_queue(dev); + + return err; +} + +/* This is called by the kernel when a frame is ready for transmission. */ +/* It is pointed to by the dev->hard_start_xmit function pointer */ +static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct txbd8 *txbdp; + + /* Update transmit stats */ + priv->stats.tx_bytes += skb->len; + + /* Lock priv now */ + spin_lock_irq(&priv->lock); + + /* Point at the first free tx descriptor */ + txbdp = priv->cur_tx; + + /* Clear all but the WRAP status flags */ + txbdp->status &= TXBD_WRAP; + + /* Set buffer length and pointer */ + txbdp->length = skb->len; + txbdp->bufPtr = dma_map_single(NULL, skb->data, + skb->len, DMA_TO_DEVICE); + + /* Save the skb pointer so we can free it later */ + priv->tx_skbuff[priv->skb_curtx] = skb; + + /* Update the current skb pointer (wrapping if this was the last) */ + priv->skb_curtx = + (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); + + /* Flag the BD as interrupt-causing */ + txbdp->status |= TXBD_INTERRUPT; + + /* Flag the BD as ready to go, last in frame, and */ + /* in need of CRC */ + txbdp->status |= (TXBD_READY | TXBD_LAST | TXBD_CRC); + + dev->trans_start = jiffies; + + /* If this was the last BD in the ring, the next one */ + /* is at the beginning of the ring */ + if (txbdp->status & TXBD_WRAP) + txbdp = priv->tx_bd_base; + else + txbdp++; + + /* If the next BD still needs to be cleaned up, then the bds + are full. We need to tell the kernel to stop sending us stuff. */ + if (txbdp == priv->dirty_tx) { + netif_stop_queue(dev); + + priv->stats.tx_fifo_errors++; + } + + /* Update the current txbd to the next one */ + priv->cur_tx = txbdp; + + /* Tell the DMA to go go go */ + gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); + + /* Unlock priv */ + spin_unlock_irq(&priv->lock); + + return 0; +} + +/* Stops the kernel queue, and halts the controller */ +static int gfar_close(struct net_device *dev) +{ + stop_gfar(dev); + + netif_stop_queue(dev); + + return 0; +} + +/* returns a net_device_stats structure pointer */ +static struct net_device_stats * gfar_get_stats(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + return &(priv->stats); +} + +/* Changes the mac address if the controller is not running. */ +int gfar_set_mac_address(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + int i; + char tmpbuf[MAC_ADDR_LEN]; + u32 tempval; + + /* Now copy it into the mac registers backwards, cuz */ + /* little endian is silly */ + for (i = 0; i < MAC_ADDR_LEN; i++) + tmpbuf[MAC_ADDR_LEN - 1 - i] = dev->dev_addr[i]; + + gfar_write(&priv->regs->macstnaddr1, *((u32 *) (tmpbuf))); + + tempval = *((u32 *) (tmpbuf + 4)); + + gfar_write(&priv->regs->macstnaddr2, tempval); + + return 0; +} + +/********************************************************************** + * gfar_accept_fastpath + * + * Used to authenticate to the kernel that a fast path entry can be + * added to device's routing table cache + * + * Input : pointer to ethernet interface network device structure and + * a pointer to the designated entry to be added to the cache. + * Output : zero upon success, negative upon failure + **********************************************************************/ +#ifdef CONFIG_NET_FASTROUTE +static int gfar_accept_fastpath(struct net_device *dev, struct dst_entry *dst) +{ + struct net_device *odev = dst->dev; + + if ((dst->ops->protocol != __constant_htons(ETH_P_IP)) + || (odev->type != ARPHRD_ETHER) + || (odev->accept_fastpath == NULL)) { + return -1; + } + + return 0; +} +#endif + +/* try_fastroute() -- Checks the fastroute cache to see if a given packet + * can be routed immediately to another device. If it can, we send it. + * If we used a fastroute, we return 1. Otherwise, we return 0. + * Returns 0 if CONFIG_NET_FASTROUTE is not on + */ +static inline int try_fastroute(struct sk_buff *skb, struct net_device *dev, int length) +{ +#ifdef CONFIG_NET_FASTROUTE + struct ethhdr *eth; + struct iphdr *iph; + unsigned int hash; + struct rtable *rt; + struct net_device *odev; + struct gfar_private *priv = netdev_priv(dev); + unsigned int CPU_ID = smp_processor_id(); + + eth = (struct ethhdr *) (skb->data); + + /* Only route ethernet IP packets */ + if (eth->h_proto == __constant_htons(ETH_P_IP)) { + iph = (struct iphdr *) (skb->data + ETH_HLEN); + + /* Generate the hash value */ + hash = ((*(u8 *) &iph->daddr) ^ (*(u8 *) & iph->saddr)) & NETDEV_FASTROUTE_HMASK; + + rt = (struct rtable *) (dev->fastpath[hash]); + if (rt != NULL + && ((*(u32 *) &iph->daddr) == (*(u32 *) &rt->key.dst)) + && ((*(u32 *) &iph->saddr) == (*(u32 *) &rt->key.src)) + && !(rt->u.dst.obsolete)) { + odev = rt->u.dst.dev; + netdev_rx_stat[CPU_ID].fastroute_hit++; + + /* Make sure the packet is: + * 1) IPv4 + * 2) without any options (header length of 5) + * 3) Not a multicast packet + * 4) going to a valid destination + * 5) Not out of time-to-live + */ + if (iph->version == 4 + && iph->ihl == 5 + && (!(eth->h_dest[0] & 0x01)) + && neigh_is_valid(rt->u.dst.neighbour) + && iph->ttl > 1) { + + /* Fast Route Path: Taken if the outgoing device is ready to transmit the packet now */ + if ((!netif_queue_stopped(odev)) + && (!spin_is_locked(odev->xmit_lock)) + && (skb->len <= (odev->mtu + ETH_HLEN + 2 + 4))) { + + skb->pkt_type = PACKET_FASTROUTE; + skb->protocol = __constant_htons(ETH_P_IP); + ip_decrease_ttl(iph); + memcpy(eth->h_source, odev->dev_addr, MAC_ADDR_LEN); + memcpy(eth->h_dest, rt->u.dst.neighbour->ha, MAC_ADDR_LEN); + skb->dev = odev; + + /* Prep the skb for the packet */ + skb_put(skb, length); + + if (odev->hard_start_xmit(skb, odev) != 0) { + panic("%s: FastRoute path corrupted", dev->name); + } + netdev_rx_stat[CPU_ID].fastroute_success++; + } + + /* Semi Fast Route Path: Mark the packet as needing fast routing, but let the + * stack handle getting it to the device */ + else { + skb->pkt_type = PACKET_FASTROUTE; + skb->nh.raw = skb->data + ETH_HLEN; + skb->protocol = __constant_htons(ETH_P_IP); + netdev_rx_stat[CPU_ID].fastroute_defer++; + + /* Prep the skb for the packet */ + skb_put(skb, length); + + if(RECEIVE(skb) == NET_RX_DROP) { + priv->extra_stats.kernel_dropped++; + } + } + + return 1; + } + } + } +#endif /* CONFIG_NET_FASTROUTE */ + return 0; +} + +static int gfar_change_mtu(struct net_device *dev, int new_mtu) +{ + int tempsize, tempval; + struct gfar_private *priv = netdev_priv(dev); + int oldsize = priv->rx_buffer_size; + int frame_size = new_mtu + 18; + + if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { + printk(KERN_ERR "%s: Invalid MTU setting\n", dev->name); + return -EINVAL; + } + + tempsize = + (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) + + INCREMENTAL_BUFFER_SIZE; + + /* Only stop and start the controller if it isn't already + * stopped */ + if ((oldsize != tempsize) && (dev->flags & IFF_UP)) + stop_gfar(dev); + + priv->rx_buffer_size = tempsize; + + dev->mtu = new_mtu; + + gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); + gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); + + /* If the mtu is larger than the max size for standard + * ethernet frames (ie, a jumbo frame), then set maccfg2 + * to allow huge frames, and to check the length */ + tempval = gfar_read(&priv->regs->maccfg2); + + if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) + tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); + else + tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); + + gfar_write(&priv->regs->maccfg2, tempval); + + if ((oldsize != tempsize) && (dev->flags & IFF_UP)) + startup_gfar(dev); + + return 0; +} + +/* gfar_timeout gets called when a packet has not been + * transmitted after a set amount of time. + * For now, assume that clearing out all the structures, and + * starting over will fix the problem. */ +static void gfar_timeout(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + + priv->stats.tx_errors++; + + if (dev->flags & IFF_UP) { + stop_gfar(dev); + startup_gfar(dev); + } + + if (!netif_queue_stopped(dev)) + netif_schedule(dev); +} + +/* Interrupt Handler for Transmit complete */ +static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct gfar_private *priv = netdev_priv(dev); + struct txbd8 *bdp; + + /* Clear IEVENT */ + gfar_write(&priv->regs->ievent, IEVENT_TX_MASK); + + /* Lock priv */ + spin_lock(&priv->lock); + bdp = priv->dirty_tx; + while ((bdp->status & TXBD_READY) == 0) { + /* If dirty_tx and cur_tx are the same, then either the */ + /* ring is empty or full now (it could only be full in the beginning, */ + /* obviously). If it is empty, we are done. */ + if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) + break; + + priv->stats.tx_packets++; + + /* Deferred means some collisions occurred during transmit, */ + /* but we eventually sent the packet. */ + if (bdp->status & TXBD_DEF) + priv->stats.collisions++; + + /* Free the sk buffer associated with this TxBD */ + dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); + priv->tx_skbuff[priv->skb_dirtytx] = NULL; + priv->skb_dirtytx = + (priv->skb_dirtytx + + 1) & TX_RING_MOD_MASK(priv->tx_ring_size); + + /* update bdp to point at next bd in the ring (wrapping if necessary) */ + if (bdp->status & TXBD_WRAP) + bdp = priv->tx_bd_base; + else + bdp++; + + /* Move dirty_tx to be the next bd */ + priv->dirty_tx = bdp; + + /* We freed a buffer, so now we can restart transmission */ + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + } /* while ((bdp->status & TXBD_READY) == 0) */ + + /* If we are coalescing the interrupts, reset the timer */ + /* Otherwise, clear it */ + if (priv->txcoalescing) + gfar_write(&priv->regs->txic, + mk_ic_value(priv->txcount, priv->txtime)); + else + gfar_write(&priv->regs->txic, 0); + + spin_unlock(&priv->lock); + + return IRQ_HANDLED; +} + +struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) +{ + struct gfar_private *priv = netdev_priv(dev); + struct sk_buff *skb = NULL; + unsigned int timeout = SKB_ALLOC_TIMEOUT; + + /* We have to allocate the skb, so keep trying till we succeed */ + while ((!skb) && timeout--) + skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); + + if (skb == NULL) + return NULL; + + /* We need the data buffer to be aligned properly. We will reserve + * as many bytes as needed to align the data properly + */ + skb_reserve(skb, + RXBUF_ALIGNMENT - + (((unsigned) skb->data) & (RXBUF_ALIGNMENT - 1))); + + skb->dev = dev; + + bdp->bufPtr = dma_map_single(NULL, skb->data, + priv->rx_buffer_size + RXBUF_ALIGNMENT, + DMA_FROM_DEVICE); + + bdp->length = 0; + + /* Mark the buffer empty */ + bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT); + + return skb; +} + +static inline void count_errors(unsigned short status, struct gfar_private *priv) +{ + struct net_device_stats *stats = &priv->stats; + struct gfar_extra_stats *estats = &priv->extra_stats; + + /* If the packet was truncated, none of the other errors + * matter */ + if (status & RXBD_TRUNCATED) { + stats->rx_length_errors++; + + estats->rx_trunc++; + + return; + } + /* Count the errors, if there were any */ + if (status & (RXBD_LARGE | RXBD_SHORT)) { + stats->rx_length_errors++; + + if (status & RXBD_LARGE) + estats->rx_large++; + else + estats->rx_short++; + } + if (status & RXBD_NONOCTET) { + stats->rx_frame_errors++; + estats->rx_nonoctet++; + } + if (status & RXBD_CRCERR) { + estats->rx_crcerr++; + stats->rx_crc_errors++; + } + if (status & RXBD_OVERRUN) { + estats->rx_overrun++; + stats->rx_crc_errors++; + } +} + +irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct gfar_private *priv = netdev_priv(dev); + +#ifdef CONFIG_GFAR_NAPI + u32 tempval; +#endif + + /* Clear IEVENT, so rx interrupt isn't called again + * because of this interrupt */ + gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); + + /* support NAPI */ +#ifdef CONFIG_GFAR_NAPI + if (netif_rx_schedule_prep(dev)) { + tempval = gfar_read(&priv->regs->imask); + tempval &= IMASK_RX_DISABLED; + gfar_write(&priv->regs->imask, tempval); + + __netif_rx_schedule(dev); + } else { +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n", + dev->name, gfar_read(priv->regs->ievent), + gfar_read(priv->regs->imask)); +#endif + } +#else + + spin_lock(&priv->lock); + gfar_clean_rx_ring(dev); + + /* If we are coalescing interrupts, update the timer */ + /* Otherwise, clear it */ + if (priv->rxcoalescing) + gfar_write(&priv->regs->rxic, + mk_ic_value(priv->rxcount, priv->rxtime)); + else + gfar_write(&priv->regs->rxic, 0); + + /* Just in case we need to wake the ring param changer */ + priv->rxclean = 1; + + spin_unlock(&priv->lock); +#endif + + return IRQ_HANDLED; +} + + +/* gfar_process_frame() -- handle one incoming packet if skb + * isn't NULL. Try the fastroute before using the stack */ +static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, + int length) +{ + struct gfar_private *priv = netdev_priv(dev); + + if (skb == NULL) { +#ifdef BRIEF_GFAR_ERRORS + printk(KERN_WARNING "%s: Missing skb!!.\n", + dev->name); +#endif + priv->stats.rx_dropped++; + priv->extra_stats.rx_skbmissing++; + } else { + if(try_fastroute(skb, dev, length) == 0) { + /* Prep the skb for the packet */ + skb_put(skb, length); + + /* Tell the skb what kind of packet this is */ + skb->protocol = eth_type_trans(skb, dev); + + /* Send the packet up the stack */ + if (RECEIVE(skb) == NET_RX_DROP) { + priv->extra_stats.kernel_dropped++; + } + } + } + + return 0; +} + +/* gfar_clean_rx_ring() -- Processes each frame in the rx ring + * until all are gone (or, in the case of NAPI, the budget/quota + * has been reached). Returns the number of frames handled + */ +#ifdef CONFIG_GFAR_NAPI +static int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) +#else +static int gfar_clean_rx_ring(struct net_device *dev) +#endif +{ + struct rxbd8 *bdp; + struct sk_buff *skb; + u16 pkt_len; + int howmany = 0; + struct gfar_private *priv = netdev_priv(dev); + + /* Get the first full descriptor */ + bdp = priv->cur_rx; + +#ifdef CONFIG_GFAR_NAPI +#define GFAR_RXDONE() ((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0)) +#else +#define GFAR_RXDONE() (bdp->status & RXBD_EMPTY) +#endif + while (!GFAR_RXDONE()) { + skb = priv->rx_skbuff[priv->skb_currx]; + + if (!(bdp->status & + (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET + | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) { + /* Increment the number of packets */ + priv->stats.rx_packets++; + howmany++; + + /* Remove the FCS from the packet length */ + pkt_len = bdp->length - 4; + + gfar_process_frame(dev, skb, pkt_len); + + priv->stats.rx_bytes += pkt_len; + + } else { + count_errors(bdp->status, priv); + + if (skb) + dev_kfree_skb_any(skb); + + priv->rx_skbuff[priv->skb_currx] = NULL; + } + + dev->last_rx = jiffies; + + /* Clear the status flags for this buffer */ + bdp->status &= ~RXBD_STATS; + + /* Add another skb for the future */ + skb = gfar_new_skb(dev, bdp); + priv->rx_skbuff[priv->skb_currx] = skb; + + /* Update to the next pointer */ + if (bdp->status & RXBD_WRAP) + bdp = priv->rx_bd_base; + else + bdp++; + + /* update to point at the next skb */ + priv->skb_currx = + (priv->skb_currx + + 1) & RX_RING_MOD_MASK(priv->rx_ring_size); + + } + + /* Update the current rxbd pointer to be the next one */ + priv->cur_rx = bdp; + + /* If no packets have arrived since the + * last one we processed, clear the IEVENT RX and + * BSY bits so that another interrupt won't be + * generated when we set IMASK */ + if (bdp->status & RXBD_EMPTY) + gfar_write(&priv->regs->ievent, IEVENT_RX_MASK); + + return howmany; +} + +#ifdef CONFIG_GFAR_NAPI +static int gfar_poll(struct net_device *dev, int *budget) +{ + int howmany; + struct gfar_private *priv = netdev_priv(dev); + int rx_work_limit = *budget; + + if (rx_work_limit > dev->quota) + rx_work_limit = dev->quota; + + spin_lock(&priv->lock); + howmany = gfar_clean_rx_ring(dev, rx_work_limit); + + dev->quota -= howmany; + rx_work_limit -= howmany; + *budget -= howmany; + + if (rx_work_limit >= 0) { + netif_rx_complete(dev); + + /* Clear the halt bit in RSTAT */ + gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); + + gfar_write(&priv->regs->imask, IMASK_DEFAULT); + + /* If we are coalescing interrupts, update the timer */ + /* Otherwise, clear it */ + if (priv->rxcoalescing) + gfar_write(&priv->regs->rxic, + mk_ic_value(priv->rxcount, priv->rxtime)); + else + gfar_write(&priv->regs->rxic, 0); + + /* Signal to the ring size changer that it's safe to go */ + priv->rxclean = 1; + } + + spin_unlock(priv->lock); + + return (rx_work_limit < 0) ? 1 : 0; +} +#endif + +/* The interrupt handler for devices with one interrupt */ +static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct gfar_private *priv = netdev_priv(dev); + + /* Save ievent for future reference */ + u32 events = gfar_read(&priv->regs->ievent); + + /* Clear IEVENT */ + gfar_write(&priv->regs->ievent, events); + + /* Check for reception */ + if ((events & IEVENT_RXF0) || (events & IEVENT_RXB0)) + gfar_receive(irq, dev_id, regs); + + /* Check for transmit completion */ + if ((events & IEVENT_TXF) || (events & IEVENT_TXB)) + gfar_transmit(irq, dev_id, regs); + + /* Update error statistics */ + if (events & IEVENT_TXE) { + priv->stats.tx_errors++; + + if (events & IEVENT_LC) + priv->stats.tx_window_errors++; + if (events & IEVENT_CRL) + priv->stats.tx_aborted_errors++; + if (events & IEVENT_XFUN) { +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_WARNING "%s: tx underrun. dropped packet\n", + dev->name); +#endif + priv->stats.tx_dropped++; + priv->extra_stats.tx_underrun++; + + /* Reactivate the Tx Queues */ + gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); + } + } + if (events & IEVENT_BSY) { + priv->stats.rx_errors++; + priv->extra_stats.rx_bsy++; + + gfar_receive(irq, dev_id, regs); + +#ifndef CONFIG_GFAR_NAPI + /* Clear the halt bit in RSTAT */ + gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); +#endif + +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name, + gfar_read(priv->regs->rstat)); +#endif + } + if (events & IEVENT_BABR) { + priv->stats.rx_errors++; + priv->extra_stats.rx_babr++; + +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: babbling error\n", dev->name); +#endif + } + if (events & IEVENT_EBERR) { + priv->extra_stats.eberr++; +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: EBERR\n", dev->name); +#endif + } + if (events & IEVENT_RXC) { +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: control frame\n", dev->name); +#endif + } + + if (events & IEVENT_BABT) { + priv->extra_stats.tx_babt++; +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: babt error\n", dev->name); +#endif + } + + return IRQ_HANDLED; +} + +static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = (struct net_device *) dev_id; + struct gfar_private *priv = netdev_priv(dev); + + /* Run the commands which acknowledge the interrupt */ + phy_run_commands(dev, priv->phyinfo->ack_int); + + /* Schedule the bottom half */ + schedule_work(&priv->tq); + + return IRQ_HANDLED; +} + +/* Scheduled by the phy_interrupt/timer to handle PHY changes */ +static void gfar_phy_change(void *data) +{ + struct net_device *dev = (struct net_device *) data; + struct gfar_private *priv = netdev_priv(dev); + int timeout = HZ / 1000 + 1; + + /* Delay to give the PHY a chance to change the + * register state */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(timeout); + + /* Run the commands which check the link state */ + phy_run_commands(dev, priv->phyinfo->handle_int); + + /* React to the change in state */ + adjust_link(dev); +} + +/* Called every so often on systems that don't interrupt + * the core for PHY changes */ +static void gfar_phy_timer(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct gfar_private *priv = netdev_priv(dev); + + schedule_work(&priv->tq); + + mod_timer(&priv->phy_info_timer, jiffies + 2 * HZ); +} + +/* Called every time the controller might need to be made + * aware of new link state. The PHY code conveys this + * information through variables in the priv structure, and this + * function converts those variables into the appropriate + * register values, and can bring down the device if needed. + */ +static void adjust_link(struct net_device *dev) +{ + struct gfar_private *priv = netdev_priv(dev); + struct gfar *regs = priv->regs; + u32 tempval; + + if (priv->link) { + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (priv->duplexity != priv->olddplx) { + if (!(priv->duplexity)) { + tempval = gfar_read(®s->maccfg2); + tempval &= ~(MACCFG2_FULL_DUPLEX); + gfar_write(®s->maccfg2, tempval); + + printk(KERN_INFO "%s: Half Duplex\n", + dev->name); + } else { + tempval = gfar_read(®s->maccfg2); + tempval |= MACCFG2_FULL_DUPLEX; + gfar_write(®s->maccfg2, tempval); + + printk(KERN_INFO "%s: Full Duplex\n", + dev->name); + } + + priv->olddplx = priv->duplexity; + } + + if (priv->speed != priv->oldspeed) { + switch (priv->speed) { + case 1000: + tempval = gfar_read(®s->maccfg2); + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + gfar_write(®s->maccfg2, tempval); + break; + case 100: + case 10: + tempval = gfar_read(®s->maccfg2); + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); + gfar_write(®s->maccfg2, tempval); + break; + default: + printk(KERN_WARNING + "%s: Ack! Speed (%d) is not 10/100/1000!\n", + dev->name, priv->speed); + break; + } + + printk(KERN_INFO "%s: Speed %dBT\n", dev->name, + priv->speed); + + priv->oldspeed = priv->speed; + } + + if (!priv->oldlink) { + printk(KERN_INFO "%s: Link is up\n", dev->name); + priv->oldlink = 1; + netif_carrier_on(dev); + netif_schedule(dev); + } + } else { + if (priv->oldlink) { + printk(KERN_INFO "%s: Link is down\n", dev->name); + priv->oldlink = 0; + priv->oldspeed = 0; + priv->olddplx = -1; + netif_carrier_off(dev); + } + } + +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_INFO "%s: Link now %s; %dBT %s-duplex\n", + dev->name, priv->link ? "up" : "down", priv->speed, priv->duplexity ? "full" : "half"); +#endif +} + + +/* Update the hash table based on the current list of multicast + * addresses we subscribe to. Also, change the promiscuity of + * the device based on the flags (this function is called + * whenever dev->flags is changed */ +static void gfar_set_multi(struct net_device *dev) +{ + struct dev_mc_list *mc_ptr; + struct gfar_private *priv = netdev_priv(dev); + struct gfar *regs = priv->regs; + u32 tempval; + + if(dev->flags & IFF_PROMISC) { + printk(KERN_INFO "%s: Entering promiscuous mode.\n", + dev->name); + /* Set RCTRL to PROM */ + tempval = gfar_read(®s->rctrl); + tempval |= RCTRL_PROM; + gfar_write(®s->rctrl, tempval); + } else { + /* Set RCTRL to not PROM */ + tempval = gfar_read(®s->rctrl); + tempval &= ~(RCTRL_PROM); + gfar_write(®s->rctrl, tempval); + } + + if(dev->flags & IFF_ALLMULTI) { + /* Set the hash to rx all multicast frames */ + gfar_write(®s->gaddr0, 0xffffffff); + gfar_write(®s->gaddr1, 0xffffffff); + gfar_write(®s->gaddr2, 0xffffffff); + gfar_write(®s->gaddr3, 0xffffffff); + gfar_write(®s->gaddr4, 0xffffffff); + gfar_write(®s->gaddr5, 0xffffffff); + gfar_write(®s->gaddr6, 0xffffffff); + gfar_write(®s->gaddr7, 0xffffffff); + } else { + /* zero out the hash */ + gfar_write(®s->gaddr0, 0x0); + gfar_write(®s->gaddr1, 0x0); + gfar_write(®s->gaddr2, 0x0); + gfar_write(®s->gaddr3, 0x0); + gfar_write(®s->gaddr4, 0x0); + gfar_write(®s->gaddr5, 0x0); + gfar_write(®s->gaddr6, 0x0); + gfar_write(®s->gaddr7, 0x0); + + if(dev->mc_count == 0) + return; + + /* Parse the list, and set the appropriate bits */ + for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { + gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); + } + } + + return; +} + +/* Set the appropriate hash bit for the given addr */ +/* The algorithm works like so: + * 1) Take the Destination Address (ie the multicast address), and + * do a CRC on it (little endian), and reverse the bits of the + * result. + * 2) Use the 8 most significant bits as a hash into a 256-entry + * table. The table is controlled through 8 32-bit registers: + * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is + * gaddr7. This means that the 3 most significant bits in the + * hash index which gaddr register to use, and the 5 other bits + * indicate which bit (assuming an IBM numbering scheme, which + * for PowerPC (tm) is usually the case) in the register holds + * the entry. */ +static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr) +{ + u32 tempval; + struct gfar_private *priv = netdev_priv(dev); + struct gfar *regs = priv->regs; + u32 *hash = ®s->gaddr0; + u32 result = ether_crc(MAC_ADDR_LEN, addr); + u8 whichreg = ((result >> 29) & 0x7); + u8 whichbit = ((result >> 24) & 0x1f); + u32 value = (1 << (31-whichbit)); + + tempval = gfar_read(&hash[whichreg]); + tempval |= value; + gfar_write(&hash[whichreg], tempval); + + return; +} + +/* GFAR error interrupt handler */ +static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs) +{ + struct net_device *dev = dev_id; + struct gfar_private *priv = netdev_priv(dev); + + /* Save ievent for future reference */ + u32 events = gfar_read(&priv->regs->ievent); + + /* Clear IEVENT */ + gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK); + + /* Hmm... */ +#if defined (BRIEF_GFAR_ERRORS) || defined (VERBOSE_GFAR_ERRORS) + printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", + dev->name, events, gfar_read(priv->regs->imask)); +#endif + + /* Update the error counters */ + if (events & IEVENT_TXE) { + priv->stats.tx_errors++; + + if (events & IEVENT_LC) + priv->stats.tx_window_errors++; + if (events & IEVENT_CRL) + priv->stats.tx_aborted_errors++; + if (events & IEVENT_XFUN) { +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: underrun. packet dropped.\n", + dev->name); +#endif + priv->stats.tx_dropped++; + priv->extra_stats.tx_underrun++; + + /* Reactivate the Tx Queues */ + gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); + } +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); +#endif + } + if (events & IEVENT_BSY) { + priv->stats.rx_errors++; + priv->extra_stats.rx_bsy++; + + gfar_receive(irq, dev_id, regs); + +#ifndef CONFIG_GFAR_NAPI + /* Clear the halt bit in RSTAT */ + gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); +#endif + +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: busy error (rhalt: %x)\n", dev->name, + gfar_read(priv->regs->rstat)); +#endif + } + if (events & IEVENT_BABR) { + priv->stats.rx_errors++; + priv->extra_stats.rx_babr++; + +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: babbling error\n", dev->name); +#endif + } + if (events & IEVENT_EBERR) { + priv->extra_stats.eberr++; +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: EBERR\n", dev->name); +#endif + } + if (events & IEVENT_RXC) +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: control frame\n", dev->name); +#endif + + if (events & IEVENT_BABT) { + priv->extra_stats.tx_babt++; +#ifdef VERBOSE_GFAR_ERRORS + printk(KERN_DEBUG "%s: babt error\n", dev->name); +#endif + } + return IRQ_HANDLED; +} + +/* Structure for a device driver */ +static struct ocp_device_id gfar_ids[] = { + {.vendor = OCP_ANY_ID,.function = OCP_FUNC_GFAR}, + {.vendor = OCP_VENDOR_INVALID} +}; + +static struct ocp_driver gfar_driver = { + .name = "gianfar", + .id_table = gfar_ids, + + .probe = gfar_probe, + .remove = gfar_remove, +}; + +static int __init gfar_init(void) +{ + int rc; + + rc = ocp_register_driver(&gfar_driver); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41) + if (rc != 0) { +#else + if (rc == 0) { +#endif + ocp_unregister_driver(&gfar_driver); + return -ENODEV; + } + + return 0; +} + +static void __exit gfar_exit(void) +{ + ocp_unregister_driver(&gfar_driver); +} + +module_init(gfar_init); +module_exit(gfar_exit); diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h new file mode 100644 index 000000000..f7af3465c --- /dev/null +++ b/drivers/net/gianfar.h @@ -0,0 +1,537 @@ +/* + * drivers/net/gianfar.h + * + * Gianfar Ethernet Driver + * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Still left to do: + * -Add support for module parameters + */ +#ifndef __GIANFAR_H +#define __GIANFAR_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41) +#include +#else +#include +#define work_struct tq_struct +#define schedule_work schedule_task +#endif + +#include +#include +#include +#include "gianfar_phy.h" + +/* The maximum number of packets to be handled in one call of gfar_poll */ +#define GFAR_DEV_WEIGHT 64 + +/* Number of bytes to align the rx bufs to */ +#define RXBUF_ALIGNMENT 64 + +/* The number of bytes which composes a unit for the purpose of + * allocating data buffers. ie-for any given MTU, the data buffer + * will be the next highest multiple of 512 bytes. */ +#define INCREMENTAL_BUFFER_SIZE 512 + + +#define MAC_ADDR_LEN 6 + +extern char gfar_driver_name[]; +extern char gfar_driver_version[]; + +/* These need to be powers of 2 for this driver */ +#ifdef CONFIG_GFAR_NAPI +#define DEFAULT_TX_RING_SIZE 256 +#define DEFAULT_RX_RING_SIZE 256 +#else +#define DEFAULT_TX_RING_SIZE 64 +#define DEFAULT_RX_RING_SIZE 64 +#endif + +#define GFAR_RX_MAX_RING_SIZE 256 +#define GFAR_TX_MAX_RING_SIZE 256 + +#define DEFAULT_RX_BUFFER_SIZE 1536 +#define TX_RING_MOD_MASK(size) (size-1) +#define RX_RING_MOD_MASK(size) (size-1) +#define JUMBO_BUFFER_SIZE 9728 +#define JUMBO_FRAME_SIZE 9600 + +/* Latency of interface clock in nanoseconds */ +/* Interface clock latency , in this case, means the + * time described by a value of 1 in the interrupt + * coalescing registers' time fields. Since those fields + * refer to the time it takes for 64 clocks to pass, the + * latencies are as such: + * GBIT = 125MHz => 8ns/clock => 8*64 ns / tick + * 100 = 25 MHz => 40ns/clock => 40*64 ns / tick + * 10 = 2.5 MHz => 400ns/clock => 400*64 ns / tick + */ +#define GFAR_GBIT_TIME 512 +#define GFAR_100_TIME 2560 +#define GFAR_10_TIME 25600 + +#define DEFAULT_TXCOUNT 16 +#define DEFAULT_TXTIME 32768 + +#define DEFAULT_RXCOUNT 16 +#define DEFAULT_RXTIME 32768 + +#define TBIPA_VALUE 0x1f +#define MIIMCFG_INIT_VALUE 0x00000007 +#define MIIMCFG_RESET 0x80000000 +#define MIIMIND_BUSY 0x00000001 + +/* MAC register bits */ +#define MACCFG1_SOFT_RESET 0x80000000 +#define MACCFG1_RESET_RX_MC 0x00080000 +#define MACCFG1_RESET_TX_MC 0x00040000 +#define MACCFG1_RESET_RX_FUN 0x00020000 +#define MACCFG1_RESET_TX_FUN 0x00010000 +#define MACCFG1_LOOPBACK 0x00000100 +#define MACCFG1_RX_FLOW 0x00000020 +#define MACCFG1_TX_FLOW 0x00000010 +#define MACCFG1_SYNCD_RX_EN 0x00000008 +#define MACCFG1_RX_EN 0x00000004 +#define MACCFG1_SYNCD_TX_EN 0x00000002 +#define MACCFG1_TX_EN 0x00000001 + +#define MACCFG2_INIT_SETTINGS 0x00007205 +#define MACCFG2_FULL_DUPLEX 0x00000001 +#define MACCFG2_IF 0x00000300 +#define MACCFG2_MII 0x00000100 +#define MACCFG2_GMII 0x00000200 +#define MACCFG2_HUGEFRAME 0x00000020 +#define MACCFG2_LENGTHCHECK 0x00000010 + +#define ECNTRL_INIT_SETTINGS 0x00001000 +#define ECNTRL_TBI_MODE 0x00000020 + +#define MRBLR_INIT_SETTINGS DEFAULT_RX_BUFFER_SIZE + +#define MINFLR_INIT_SETTINGS 0x00000040 + +/* Init to do tx snooping for buffers and descriptors */ +#define DMACTRL_INIT_SETTINGS 0x000000c3 +#define DMACTRL_GRS 0x00000010 +#define DMACTRL_GTS 0x00000008 + +#define TSTAT_CLEAR_THALT 0x80000000 + +/* Interrupt coalescing macros */ +#define IC_ICEN 0x80000000 +#define IC_ICFT_MASK 0x1fe00000 +#define IC_ICFT_SHIFT 21 +#define mk_ic_icft(x) \ + (((unsigned int)x << IC_ICFT_SHIFT)&IC_ICFT_MASK) +#define IC_ICTT_MASK 0x0000ffff +#define mk_ic_ictt(x) (x&IC_ICTT_MASK) + +#define mk_ic_value(count, time) (IC_ICEN | \ + mk_ic_icft(count) | \ + mk_ic_ictt(time)) + +#define RCTRL_PROM 0x00000008 +#define RSTAT_CLEAR_RHALT 0x00800000 + +#define IEVENT_INIT_CLEAR 0xffffffff +#define IEVENT_BABR 0x80000000 +#define IEVENT_RXC 0x40000000 +#define IEVENT_BSY 0x20000000 +#define IEVENT_EBERR 0x10000000 +#define IEVENT_MSRO 0x04000000 +#define IEVENT_GTSC 0x02000000 +#define IEVENT_BABT 0x01000000 +#define IEVENT_TXC 0x00800000 +#define IEVENT_TXE 0x00400000 +#define IEVENT_TXB 0x00200000 +#define IEVENT_TXF 0x00100000 +#define IEVENT_LC 0x00040000 +#define IEVENT_CRL 0x00020000 +#define IEVENT_XFUN 0x00010000 +#define IEVENT_RXB0 0x00008000 +#define IEVENT_GRSC 0x00000100 +#define IEVENT_RXF0 0x00000080 +#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0) +#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) +#define IEVENT_ERR_MASK \ +(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ + IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ + | IEVENT_CRL | IEVENT_XFUN) + +#define IMASK_INIT_CLEAR 0x00000000 +#define IMASK_BABR 0x80000000 +#define IMASK_RXC 0x40000000 +#define IMASK_BSY 0x20000000 +#define IMASK_EBERR 0x10000000 +#define IMASK_MSRO 0x04000000 +#define IMASK_GRSC 0x02000000 +#define IMASK_BABT 0x01000000 +#define IMASK_TXC 0x00800000 +#define IMASK_TXEEN 0x00400000 +#define IMASK_TXBEN 0x00200000 +#define IMASK_TXFEN 0x00100000 +#define IMASK_LC 0x00040000 +#define IMASK_CRL 0x00020000 +#define IMASK_XFUN 0x00010000 +#define IMASK_RXB0 0x00008000 +#define IMASK_GTSC 0x00000100 +#define IMASK_RXFEN0 0x00000080 +#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY) +#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \ + IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ + IMASK_XFUN | IMASK_RXC | IMASK_BABT) + + +/* Attribute fields */ + +/* This enables rx snooping for buffers and descriptors */ +#ifdef CONFIG_GFAR_BDSTASH +#define ATTR_BDSTASH 0x00000800 +#else +#define ATTR_BDSTASH 0x00000000 +#endif + +#ifdef CONFIG_GFAR_BUFSTASH +#define ATTR_BUFSTASH 0x00004000 +#define STASH_LENGTH 64 +#else +#define ATTR_BUFSTASH 0x00000000 +#endif + +#define ATTR_SNOOPING 0x000000c0 +#define ATTR_INIT_SETTINGS (ATTR_SNOOPING \ + | ATTR_BDSTASH | ATTR_BUFSTASH) + +#define ATTRELI_INIT_SETTINGS 0x0 + + +/* TxBD status field bits */ +#define TXBD_READY 0x8000 +#define TXBD_PADCRC 0x4000 +#define TXBD_WRAP 0x2000 +#define TXBD_INTERRUPT 0x1000 +#define TXBD_LAST 0x0800 +#define TXBD_CRC 0x0400 +#define TXBD_DEF 0x0200 +#define TXBD_HUGEFRAME 0x0080 +#define TXBD_LATECOLLISION 0x0080 +#define TXBD_RETRYLIMIT 0x0040 +#define TXBD_RETRYCOUNTMASK 0x003c +#define TXBD_UNDERRUN 0x0002 + +/* RxBD status field bits */ +#define RXBD_EMPTY 0x8000 +#define RXBD_RO1 0x4000 +#define RXBD_WRAP 0x2000 +#define RXBD_INTERRUPT 0x1000 +#define RXBD_LAST 0x0800 +#define RXBD_FIRST 0x0400 +#define RXBD_MISS 0x0100 +#define RXBD_BROADCAST 0x0080 +#define RXBD_MULTICAST 0x0040 +#define RXBD_LARGE 0x0020 +#define RXBD_NONOCTET 0x0010 +#define RXBD_SHORT 0x0008 +#define RXBD_CRCERR 0x0004 +#define RXBD_OVERRUN 0x0002 +#define RXBD_TRUNCATED 0x0001 +#define RXBD_STATS 0x01ff + +struct txbd8 +{ + u16 status; /* Status Fields */ + u16 length; /* Buffer length */ + u32 bufPtr; /* Buffer Pointer */ +}; + +struct rxbd8 +{ + u16 status; /* Status Fields */ + u16 length; /* Buffer Length */ + u32 bufPtr; /* Buffer Pointer */ +}; + +struct rmon_mib +{ + u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */ + u32 tr127; /* 0x.684 - Transmit and Receive 65-127 byte Frame Counter */ + u32 tr255; /* 0x.688 - Transmit and Receive 128-255 byte Frame Counter */ + u32 tr511; /* 0x.68c - Transmit and Receive 256-511 byte Frame Counter */ + u32 tr1k; /* 0x.690 - Transmit and Receive 512-1023 byte Frame Counter */ + u32 trmax; /* 0x.694 - Transmit and Receive 1024-1518 byte Frame Counter */ + u32 trmgv; /* 0x.698 - Transmit and Receive 1519-1522 byte Good VLAN Frame */ + u32 rbyt; /* 0x.69c - Receive Byte Counter */ + u32 rpkt; /* 0x.6a0 - Receive Packet Counter */ + u32 rfcs; /* 0x.6a4 - Receive FCS Error Counter */ + u32 rmca; /* 0x.6a8 - Receive Multicast Packet Counter */ + u32 rbca; /* 0x.6ac - Receive Broadcast Packet Counter */ + u32 rxcf; /* 0x.6b0 - Receive Control Frame Packet Counter */ + u32 rxpf; /* 0x.6b4 - Receive Pause Frame Packet Counter */ + u32 rxuo; /* 0x.6b8 - Receive Unknown OP Code Counter */ + u32 raln; /* 0x.6bc - Receive Alignment Error Counter */ + u32 rflr; /* 0x.6c0 - Receive Frame Length Error Counter */ + u32 rcde; /* 0x.6c4 - Receive Code Error Counter */ + u32 rcse; /* 0x.6c8 - Receive Carrier Sense Error Counter */ + u32 rund; /* 0x.6cc - Receive Undersize Packet Counter */ + u32 rovr; /* 0x.6d0 - Receive Oversize Packet Counter */ + u32 rfrg; /* 0x.6d4 - Receive Fragments Counter */ + u32 rjbr; /* 0x.6d8 - Receive Jabber Counter */ + u32 rdrp; /* 0x.6dc - Receive Drop Counter */ + u32 tbyt; /* 0x.6e0 - Transmit Byte Counter Counter */ + u32 tpkt; /* 0x.6e4 - Transmit Packet Counter */ + u32 tmca; /* 0x.6e8 - Transmit Multicast Packet Counter */ + u32 tbca; /* 0x.6ec - Transmit Broadcast Packet Counter */ + u32 txpf; /* 0x.6f0 - Transmit Pause Control Frame Counter */ + u32 tdfr; /* 0x.6f4 - Transmit Deferral Packet Counter */ + u32 tedf; /* 0x.6f8 - Transmit Excessive Deferral Packet Counter */ + u32 tscl; /* 0x.6fc - Transmit Single Collision Packet Counter */ + u32 tmcl; /* 0x.700 - Transmit Multiple Collision Packet Counter */ + u32 tlcl; /* 0x.704 - Transmit Late Collision Packet Counter */ + u32 txcl; /* 0x.708 - Transmit Excessive Collision Packet Counter */ + u32 tncl; /* 0x.70c - Transmit Total Collision Counter */ + u8 res1[4]; + u32 tdrp; /* 0x.714 - Transmit Drop Frame Counter */ + u32 tjbr; /* 0x.718 - Transmit Jabber Frame Counter */ + u32 tfcs; /* 0x.71c - Transmit FCS Error Counter */ + u32 txcf; /* 0x.720 - Transmit Control Frame Counter */ + u32 tovr; /* 0x.724 - Transmit Oversize Frame Counter */ + u32 tund; /* 0x.728 - Transmit Undersize Frame Counter */ + u32 tfrg; /* 0x.72c - Transmit Fragments Frame Counter */ + u32 car1; /* 0x.730 - Carry Register One */ + u32 car2; /* 0x.734 - Carry Register Two */ + u32 cam1; /* 0x.738 - Carry Mask Register One */ + u32 cam2; /* 0x.73c - Carry Mask Register Two */ +}; + +struct gfar_extra_stats { + u64 kernel_dropped; + u64 rx_large; + u64 rx_short; + u64 rx_nonoctet; + u64 rx_crcerr; + u64 rx_overrun; + u64 rx_bsy; + u64 rx_babr; + u64 rx_trunc; + u64 eberr; + u64 tx_babt; + u64 tx_underrun; + u64 rx_skbmissing; + u64 tx_timeout; +}; + +#define GFAR_RMON_LEN ((sizeof(struct rmon_mib) - 16)/sizeof(u32)) +#define GFAR_EXTRA_STATS_LEN (sizeof(struct gfar_extra_stats)/sizeof(u64)) + +/* Number of stats in the stats structure (ignore car and cam regs)*/ +#define GFAR_STATS_LEN (GFAR_RMON_LEN + GFAR_EXTRA_STATS_LEN) + +#define GFAR_INFOSTR_LEN 32 + +struct gfar_stats { + u64 extra[GFAR_EXTRA_STATS_LEN]; + u64 rmon[GFAR_RMON_LEN]; +}; + + +struct gfar { + u8 res1[16]; + u32 ievent; /* 0x.010 - Interrupt Event Register */ + u32 imask; /* 0x.014 - Interrupt Mask Register */ + u32 edis; /* 0x.018 - Error Disabled Register */ + u8 res2[4]; + u32 ecntrl; /* 0x.020 - Ethernet Control Register */ + u32 minflr; /* 0x.024 - Minimum Frame Length Register */ + u32 ptv; /* 0x.028 - Pause Time Value Register */ + u32 dmactrl; /* 0x.02c - DMA Control Register */ + u32 tbipa; /* 0x.030 - TBI PHY Address Register */ + u8 res3[88]; + u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */ + u8 res4[8]; + u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */ + u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */ + u8 res5[96]; + u32 tctrl; /* 0x.100 - Transmit Control Register */ + u32 tstat; /* 0x.104 - Transmit Status Register */ + u8 res6[4]; + u32 tbdlen; /* 0x.10c - Transmit Buffer Descriptor Data Length Register */ + u32 txic; /* 0x.110 - Transmit Interrupt Coalescing Configuration Register */ + u8 res7[16]; + u32 ctbptr; /* 0x.124 - Current Transmit Buffer Descriptor Pointer Register */ + u8 res8[92]; + u32 tbptr; /* 0x.184 - Transmit Buffer Descriptor Pointer Low Register */ + u8 res9[124]; + u32 tbase; /* 0x.204 - Transmit Descriptor Base Address Register */ + u8 res10[168]; + u32 ostbd; /* 0x.2b0 - Out-of-Sequence Transmit Buffer Descriptor Register */ + u32 ostbdp; /* 0x.2b4 - Out-of-Sequence Transmit Data Buffer Pointer Register */ + u8 res11[72]; + u32 rctrl; /* 0x.300 - Receive Control Register */ + u32 rstat; /* 0x.304 - Receive Status Register */ + u8 res12[4]; + u32 rbdlen; /* 0x.30c - RxBD Data Length Register */ + u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */ + u8 res13[16]; + u32 crbptr; /* 0x.324 - Current Receive Buffer Descriptor Pointer */ + u8 res14[24]; + u32 mrblr; /* 0x.340 - Maximum Receive Buffer Length Register */ + u8 res15[64]; + u32 rbptr; /* 0x.384 - Receive Buffer Descriptor Pointer */ + u8 res16[124]; + u32 rbase; /* 0x.404 - Receive Descriptor Base Address */ + u8 res17[248]; + u32 maccfg1; /* 0x.500 - MAC Configuration 1 Register */ + u32 maccfg2; /* 0x.504 - MAC Configuration 2 Register */ + u32 ipgifg; /* 0x.508 - Inter Packet Gap/Inter Frame Gap Register */ + u32 hafdup; /* 0x.50c - Half Duplex Register */ + u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */ + u8 res18[12]; + u32 miimcfg; /* 0x.520 - MII Management Configuration Register */ + u32 miimcom; /* 0x.524 - MII Management Command Register */ + u32 miimadd; /* 0x.528 - MII Management Address Register */ + u32 miimcon; /* 0x.52c - MII Management Control Register */ + u32 miimstat; /* 0x.530 - MII Management Status Register */ + u32 miimind; /* 0x.534 - MII Management Indicator Register */ + u8 res19[4]; + u32 ifstat; /* 0x.53c - Interface Status Register */ + u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */ + u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */ + u8 res20[312]; + struct rmon_mib rmon; + u8 res21[192]; + u32 iaddr0; /* 0x.800 - Indivdual address register 0 */ + u32 iaddr1; /* 0x.804 - Indivdual address register 1 */ + u32 iaddr2; /* 0x.808 - Indivdual address register 2 */ + u32 iaddr3; /* 0x.80c - Indivdual address register 3 */ + u32 iaddr4; /* 0x.810 - Indivdual address register 4 */ + u32 iaddr5; /* 0x.814 - Indivdual address register 5 */ + u32 iaddr6; /* 0x.818 - Indivdual address register 6 */ + u32 iaddr7; /* 0x.81c - Indivdual address register 7 */ + u8 res22[96]; + u32 gaddr0; /* 0x.880 - Global address register 0 */ + u32 gaddr1; /* 0x.884 - Global address register 1 */ + u32 gaddr2; /* 0x.888 - Global address register 2 */ + u32 gaddr3; /* 0x.88c - Global address register 3 */ + u32 gaddr4; /* 0x.890 - Global address register 4 */ + u32 gaddr5; /* 0x.894 - Global address register 5 */ + u32 gaddr6; /* 0x.898 - Global address register 6 */ + u32 gaddr7; /* 0x.89c - Global address register 7 */ + u8 res23[856]; + u32 attr; /* 0x.bf8 - Attributes Register */ + u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ + u8 res24[1024]; + +}; + +/* Struct stolen almost completely (and shamelessly) from the FCC enet source + * (Ok, that's not so true anymore, but there is a family resemblence) + * The GFAR buffer descriptors track the ring buffers. The rx_bd_base + * and tx_bd_base always point to the currently available buffer. + * The dirty_tx tracks the current buffer that is being sent by the + * controller. The cur_tx and dirty_tx are equal under both completely + * empty and completely full conditions. The empty/ready indicator in + * the buffer descriptor determines the actual condition. + */ +struct gfar_private +{ + /* pointers to arrays of skbuffs for tx and rx */ + struct sk_buff ** tx_skbuff; + struct sk_buff ** rx_skbuff; + + /* indices pointing to the next free sbk in skb arrays */ + u16 skb_curtx; + u16 skb_currx; + + /* index of the first skb which hasn't been transmitted + * yet. */ + u16 skb_dirtytx; + + /* Configuration info for the coalescing features */ + unsigned char txcoalescing; + unsigned short txcount; + unsigned short txtime; + unsigned char rxcoalescing; + unsigned short rxcount; + unsigned short rxtime; + + /* GFAR addresses */ + struct rxbd8 *rx_bd_base; /* Base addresses of Rx and Tx Buffers */ + struct txbd8 *tx_bd_base; + struct rxbd8 *cur_rx; /* Next free rx ring entry */ + struct txbd8 *cur_tx; /* Next free ring entry */ + struct txbd8 *dirty_tx; /* The Ring entry to be freed. */ + struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */ + struct phy_info *phyinfo; + struct gfar *phyregs; + struct work_struct tq; + struct timer_list phy_info_timer; + struct net_device_stats stats; /* linux network statistics */ + struct gfar_extra_stats extra_stats; + spinlock_t lock; + unsigned int rx_buffer_size; + unsigned int rx_stash_size; + unsigned int tx_ring_size; + unsigned int rx_ring_size; + wait_queue_head_t rxcleanupq; + unsigned int rxclean; + int link; /* current link state */ + int oldlink; + int duplexity; /* Indicates negotiated duplex state */ + int olddplx; + int speed; /* Indicates negotiated speed */ + int oldspeed; + + /* Info structure initialized by board setup code */ + struct ocp_gfar_data *einfo; +}; + +extern inline u32 gfar_read(volatile unsigned *addr) +{ + u32 val; + val = in_be32(addr); + return val; +} + +extern inline void gfar_write(volatile unsigned *addr, u32 val) +{ + out_be32(addr, val); +} + + + +#endif /* __GIANFAR_H */ diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c new file mode 100644 index 000000000..4ccb5afd6 --- /dev/null +++ b/drivers/net/gianfar_ethtool.c @@ -0,0 +1,484 @@ +/* + * drivers/net/gianfar_ethtool.c + * + * Gianfar Ethernet Driver + * Ethtool support for Gianfar Enet + * Based on e1000 ethtool support + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This software may be used and distributed according to + * the terms of the GNU Public License, Version 2, incorporated herein + * by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gianfar.h" + +#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) + +extern int startup_gfar(struct net_device *dev); +extern void stop_gfar(struct net_device *dev); +extern void gfar_receive(int irq, void *dev_id, struct pt_regs *regs); + +void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, + u64 * buf); +void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); +int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); +int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals); +void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals); +int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals); +void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo); + +static char stat_gstrings[][ETH_GSTRING_LEN] = { + "RX Dropped by Kernel", + "RX Large Frame Errors", + "RX Short Frame Errors", + "RX Non-Octet Errors", + "RX CRC Errors", + "RX Overrun Errors", + "RX Busy Errors", + "RX Babbling Errors", + "RX Truncated Frames", + "Ethernet Bus Error", + "TX Babbling Errors", + "TX Underrun Errors", + "RX SKB Missing Errors", + "TX Timeout Errors", + "tx&rx 64B frames", + "tx&rx 65-127B frames", + "tx&rx 128-255B frames", + "tx&rx 256-511B frames", + "tx&rx 512-1023B frames", + "tx&rx 1024-1518B frames", + "tx&rx 1519-1522B Good VLAN", + "RX bytes", + "RX Packets", + "RX FCS Errors", + "Receive Multicast Packet", + "Receive Broadcast Packet", + "RX Control Frame Packets", + "RX Pause Frame Packets", + "RX Unknown OP Code", + "RX Alignment Error", + "RX Frame Length Error", + "RX Code Error", + "RX Carrier Sense Error", + "RX Undersize Packets", + "RX Oversize Packets", + "RX Fragmented Frames", + "RX Jabber Frames", + "RX Dropped Frames", + "TX Byte Counter", + "TX Packets", + "TX Multicast Packets", + "TX Broadcast Packets", + "TX Pause Control Frames", + "TX Deferral Packets", + "TX Excessive Deferral Packets", + "TX Single Collision Packets", + "TX Multiple Collision Packets", + "TX Late Collision Packets", + "TX Excessive Collision Packets", + "TX Total Collision", + "RESERVED", + "TX Dropped Frames", + "TX Jabber Frames", + "TX FCS Errors", + "TX Control Frames", + "TX Oversize Frames", + "TX Undersize Frames", + "TX Fragmented Frames", +}; + +/* Fill in an array of 64-bit statistics from various sources. + * This array will be appended to the end of the ethtool_stats + * structure, and returned to user space + */ +void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf) +{ + int i; + struct gfar_private *priv = (struct gfar_private *) dev->priv; + u32 *rmon = (u32 *) & priv->regs->rmon; + u64 *extra = (u64 *) & priv->extra_stats; + struct gfar_stats *stats = (struct gfar_stats *) buf; + + for (i = 0; i < GFAR_RMON_LEN; i++) { + stats->rmon[i] = (u64) (rmon[i]); + } + + for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) { + stats->extra[i] = extra[i]; + } +} + +/* Returns the number of stats (and their corresponding strings) */ +int gfar_stats_count(struct net_device *dev) +{ + return GFAR_STATS_LEN; +} + +void gfar_gstrings_normon(struct net_device *dev, u32 stringset, u8 * buf) +{ + memcpy(buf, stat_gstrings, GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN); +} + +void gfar_fill_stats_normon(struct net_device *dev, + struct ethtool_stats *dummy, u64 * buf) +{ + int i; + struct gfar_private *priv = (struct gfar_private *) dev->priv; + u64 *extra = (u64 *) & priv->extra_stats; + + for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++) { + buf[i] = extra[i]; + } +} + + +int gfar_stats_count_normon(struct net_device *dev) +{ + return GFAR_EXTRA_STATS_LEN; +} +/* Fills in the drvinfo structure with some basic info */ +void gfar_gdrvinfo(struct net_device *dev, struct + ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, gfar_driver_name, GFAR_INFOSTR_LEN); + strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN); + strncpy(drvinfo->fw_version, "N/A", GFAR_INFOSTR_LEN); + strncpy(drvinfo->bus_info, "N/A", GFAR_INFOSTR_LEN); + drvinfo->n_stats = GFAR_STATS_LEN; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = 0; + drvinfo->eedump_len = 0; +} + +/* Return the current settings in the ethtool_cmd structure */ +int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + uint gigabit_support = + priv->einfo->flags & GFAR_HAS_GIGABIT ? SUPPORTED_1000baseT_Full : 0; + uint gigabit_advert = + priv->einfo->flags & GFAR_HAS_GIGABIT ? ADVERTISED_1000baseT_Full: 0; + + cmd->supported = (SUPPORTED_10baseT_Half + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | gigabit_support | SUPPORTED_Autoneg); + + /* For now, we always advertise everything */ + cmd->advertising = (ADVERTISED_10baseT_Half + | ADVERTISED_100baseT_Half + | ADVERTISED_100baseT_Full + | gigabit_advert | ADVERTISED_Autoneg); + + cmd->speed = priv->speed; + cmd->duplex = priv->duplexity; + cmd->port = PORT_MII; + cmd->phy_address = priv->einfo->phyid; + cmd->transceiver = XCVR_EXTERNAL; + cmd->autoneg = AUTONEG_ENABLE; + cmd->maxtxpkt = priv->txcount; + cmd->maxrxpkt = priv->rxcount; + + return 0; +} + +/* Return the length of the register structure */ +int gfar_reglen(struct net_device *dev) +{ + return sizeof (struct gfar); +} + +/* Return a dump of the GFAR register space */ +void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) +{ + int i; + struct gfar_private *priv = (struct gfar_private *) dev->priv; + u32 *theregs = (u32 *) priv->regs; + u32 *buf = (u32 *) regbuf; + + for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) + buf[i] = theregs[i]; +} + +/* Return the link state 1 is up, 0 is down */ +u32 gfar_get_link(struct net_device *dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + return (u32) priv->link; +} + +/* Fill in a buffer with the strings which correspond to the + * stats */ +void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf) +{ + memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN); +} + +/* Convert microseconds to ethernet clock ticks, which changes + * depending on what speed the controller is running at */ +static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs) +{ + unsigned int count; + + /* The timer is different, depending on the interface speed */ + switch (priv->speed) { + case 1000: + count = GFAR_GBIT_TIME; + break; + case 100: + count = GFAR_100_TIME; + break; + case 10: + default: + count = GFAR_10_TIME; + break; + } + + /* Make sure we return a number greater than 0 + * if usecs > 0 */ + return ((usecs * 1000 + count - 1) / count); +} + +/* Convert ethernet clock ticks to microseconds */ +static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks) +{ + unsigned int count; + + /* The timer is different, depending on the interface speed */ + switch (priv->speed) { + case 1000: + count = GFAR_GBIT_TIME; + break; + case 100: + count = GFAR_100_TIME; + break; + case 10: + default: + count = GFAR_10_TIME; + break; + } + + /* Make sure we return a number greater than 0 */ + /* if ticks is > 0 */ + return ((ticks * count) / 1000); +} + +/* Get the coalescing parameters, and put them in the cvals + * structure. */ +int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime); + cvals->rx_max_coalesced_frames = priv->rxcount; + + cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, priv->txtime); + cvals->tx_max_coalesced_frames = priv->txcount; + + cvals->use_adaptive_rx_coalesce = 0; + cvals->use_adaptive_tx_coalesce = 0; + + cvals->pkt_rate_low = 0; + cvals->rx_coalesce_usecs_low = 0; + cvals->rx_max_coalesced_frames_low = 0; + cvals->tx_coalesce_usecs_low = 0; + cvals->tx_max_coalesced_frames_low = 0; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + cvals->pkt_rate_high = 0; + cvals->rx_coalesce_usecs_high = 0; + cvals->rx_max_coalesced_frames_high = 0; + cvals->tx_coalesce_usecs_high = 0; + cvals->tx_max_coalesced_frames_high = 0; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + cvals->rate_sample_interval = 0; + + return 0; +} + +/* Change the coalescing values. + * Both cvals->*_usecs and cvals->*_frames have to be > 0 + * in order for coalescing to be active + */ +int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + /* Set up rx coalescing */ + if ((cvals->rx_coalesce_usecs == 0) || + (cvals->rx_max_coalesced_frames == 0)) + priv->rxcoalescing = 0; + else + priv->rxcoalescing = 1; + + priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs); + priv->rxcount = cvals->rx_max_coalesced_frames; + + /* Set up tx coalescing */ + if ((cvals->tx_coalesce_usecs == 0) || + (cvals->tx_max_coalesced_frames == 0)) + priv->txcoalescing = 0; + else + priv->txcoalescing = 1; + + priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs); + priv->txcount = cvals->tx_max_coalesced_frames; + + if (priv->rxcoalescing) + gfar_write(&priv->regs->rxic, + mk_ic_value(priv->rxcount, priv->rxtime)); + else + gfar_write(&priv->regs->rxic, 0); + + if (priv->txcoalescing) + gfar_write(&priv->regs->txic, + mk_ic_value(priv->txcount, priv->txtime)); + else + gfar_write(&priv->regs->txic, 0); + + return 0; +} + +/* Fills in rvals with the current ring parameters. Currently, + * rx, rx_mini, and rx_jumbo rings are the same size, as mini and + * jumbo are ignored by the driver */ +void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE; + rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + rvals->rx_pending = priv->rx_ring_size; + rvals->rx_mini_pending = priv->rx_ring_size; + rvals->rx_jumbo_pending = priv->rx_ring_size; + rvals->tx_pending = priv->tx_ring_size; +} + +/* Change the current ring parameters, stopping the controller if + * necessary so that we don't mess things up while we're in + * motion. We wait for the ring to be clean before reallocating + * the rings. */ +int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) +{ + u32 tempval; + struct gfar_private *priv = (struct gfar_private *) dev->priv; + int err = 0; + + if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) + return -EINVAL; + + if (!is_power_of_2(rvals->rx_pending)) { + printk("%s: Ring sizes must be a power of 2\n", + dev->name); + return -EINVAL; + } + + if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE) + return -EINVAL; + + if (!is_power_of_2(rvals->tx_pending)) { + printk("%s: Ring sizes must be a power of 2\n", + dev->name); + return -EINVAL; + } + + /* Stop the controller so we don't rx any more frames */ + /* But first, make sure we clear the bits */ + tempval = gfar_read(&priv->regs->dmactrl); + tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + tempval = gfar_read(&priv->regs->dmactrl); + tempval |= (DMACTRL_GRS | DMACTRL_GTS); + gfar_write(&priv->regs->dmactrl, tempval); + + while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC))) + cpu_relax(); + + /* Note that rx is not clean right now */ + priv->rxclean = 0; + + if (dev->flags & IFF_UP) { + /* Tell the driver to process the rest of the frames */ + gfar_receive(0, (void *) dev, NULL); + + /* Now wait for it to be done */ + wait_event_interruptible(priv->rxcleanupq, priv->rxclean); + + /* Ok, all packets have been handled. Now we bring it down, + * change the ring size, and bring it up */ + + stop_gfar(dev); + } + + priv->rx_ring_size = rvals->rx_pending; + priv->tx_ring_size = rvals->tx_pending; + + if (dev->flags & IFF_UP) + err = startup_gfar(dev); + + return err; +} + +struct ethtool_ops gfar_ethtool_ops = { + .get_settings = gfar_gsettings, + .get_drvinfo = gfar_gdrvinfo, + .get_regs_len = gfar_reglen, + .get_regs = gfar_get_regs, + .get_link = gfar_get_link, + .get_coalesce = gfar_gcoalesce, + .set_coalesce = gfar_scoalesce, + .get_ringparam = gfar_gringparam, + .set_ringparam = gfar_sringparam, + .get_strings = gfar_gstrings, + .get_stats_count = gfar_stats_count, + .get_ethtool_stats = gfar_fill_stats, +}; diff --git a/drivers/net/gianfar_phy.c b/drivers/net/gianfar_phy.c new file mode 100644 index 000000000..ea02e5da9 --- /dev/null +++ b/drivers/net/gianfar_phy.c @@ -0,0 +1,622 @@ +/* + * drivers/net/gianfar_phy.c + * + * Gianfar Ethernet Driver -- PHY handling + * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "gianfar.h" +#include "gianfar_phy.h" + +/* Write value to the PHY for this device to the register at regnum, */ +/* waiting until the write is done before it returns. All PHY */ +/* configuration has to be done through the TSEC1 MIIM regs */ +void write_phy_reg(struct net_device *dev, u16 regnum, u16 value) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + struct gfar *regbase = priv->phyregs; + struct ocp_gfar_data *einfo = priv->einfo; + + /* Set the PHY address and the register address we want to write */ + gfar_write(®base->miimadd, ((einfo->phyid) << 8) | regnum); + + /* Write out the value we want */ + gfar_write(®base->miimcon, value); + + /* Wait for the transaction to finish */ + while (gfar_read(®base->miimind) & MIIMIND_BUSY) + cpu_relax(); +} + +/* Reads from register regnum in the PHY for device dev, */ +/* returning the value. Clears miimcom first. All PHY */ +/* configuration has to be done through the TSEC1 MIIM regs */ +u16 read_phy_reg(struct net_device *dev, u16 regnum) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + struct gfar *regbase = priv->phyregs; + struct ocp_gfar_data *einfo = priv->einfo; + u16 value; + + /* Set the PHY address and the register address we want to read */ + gfar_write(®base->miimadd, ((einfo->phyid) << 8) | regnum); + + /* Clear miimcom, and then initiate a read */ + gfar_write(®base->miimcom, 0); + gfar_write(®base->miimcom, MIIM_READ_COMMAND); + + /* Wait for the transaction to finish */ + while (gfar_read(®base->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY)) + cpu_relax(); + + /* Grab the value of the register from miimstat */ + value = gfar_read(®base->miimstat); + + return value; +} + +/* returns which value to write to the control register. */ +/* For 10/100 the value is slightly different. */ +u16 mii_cr_init(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + struct ocp_gfar_data *einfo = priv->einfo; + + if (einfo->flags & GFAR_HAS_GIGABIT) + return MIIM_CONTROL_INIT; + else + return MIIM_CR_INIT; +} + +#define BRIEF_GFAR_ERRORS +/* Wait for auto-negotiation to complete */ +u16 mii_parse_sr(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + unsigned int timeout = GFAR_AN_TIMEOUT; + + if (mii_reg & MIIM_STATUS_LINK) + priv->link = 1; + else + priv->link = 0; + + /* Only auto-negotiate if the link has just gone up */ + if (priv->link && !priv->oldlink) { + while ((!(mii_reg & MIIM_STATUS_AN_DONE)) && timeout--) + mii_reg = read_phy_reg(dev, MIIM_STATUS); + +#if defined(BRIEF_GFAR_ERRORS) + if (mii_reg & MIIM_STATUS_AN_DONE) + printk(KERN_INFO "%s: Auto-negotiation done\n", + dev->name); + else + printk(KERN_INFO "%s: Auto-negotiation timed out\n", + dev->name); +#endif + } + + return 0; +} + +/* Determine the speed and duplex which was negotiated */ +u16 mii_parse_88E1011_psr(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + unsigned int speed; + + if (priv->link) { + if (mii_reg & MIIM_88E1011_PHYSTAT_DUPLEX) + priv->duplexity = 1; + else + priv->duplexity = 0; + + speed = (mii_reg & MIIM_88E1011_PHYSTAT_SPEED); + + switch (speed) { + case MIIM_88E1011_PHYSTAT_GBIT: + priv->speed = 1000; + break; + case MIIM_88E1011_PHYSTAT_100: + priv->speed = 100; + break; + default: + priv->speed = 10; + break; + } + } else { + priv->speed = 0; + priv->duplexity = 0; + } + + return 0; +} + +u16 mii_parse_cis8201(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + unsigned int speed; + + if (priv->link) { + if (mii_reg & MIIM_CIS8201_AUXCONSTAT_DUPLEX) + priv->duplexity = 1; + else + priv->duplexity = 0; + + speed = mii_reg & MIIM_CIS8201_AUXCONSTAT_SPEED; + + switch (speed) { + case MIIM_CIS8201_AUXCONSTAT_GBIT: + priv->speed = 1000; + break; + case MIIM_CIS8201_AUXCONSTAT_100: + priv->speed = 100; + break; + default: + priv->speed = 10; + break; + } + } else { + priv->speed = 0; + priv->duplexity = 0; + } + + return 0; +} + +u16 mii_parse_dm9161_scsr(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + if (mii_reg & (MIIM_DM9161_SCSR_100F | MIIM_DM9161_SCSR_100H)) + priv->speed = 100; + else + priv->speed = 10; + + if (mii_reg & (MIIM_DM9161_SCSR_100F | MIIM_DM9161_SCSR_10F)) + priv->duplexity = 1; + else + priv->duplexity = 0; + + return 0; +} + +u16 dm9161_wait(u16 mii_reg, struct net_device *dev) +{ + int timeout = HZ; + int secondary = 10; + u16 temp; + + do { + + /* Davicom takes a bit to come up after a reset, + * so wait here for a bit */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(timeout); + + temp = read_phy_reg(dev, MIIM_STATUS); + + secondary--; + } while ((!(temp & MIIM_STATUS_AN_DONE)) && secondary); + + return 0; +} + +/* + * consult the BCM54xx auxilliary status register to find the link settings + */ +u16 mii_parse_bcm54xx_sr(u16 mii_reg, struct net_device * dev) +{ + struct gfar_private *priv = (struct gfar_private *) dev->priv; + + /* Link modes of the BCM5400 PHY */ + static const uint16_t link_table[8][3] = { + { 0, 0 }, /* No link */ + { 0, 10 }, /* 10BT Half Duplex */ + { 1, 10 }, /* 10BT Full Duplex */ + { 0, 100 }, /* 100BT Half Duplex */ + { 0, 100 }, /* 100BT Half Duplex */ + { 1, 100 }, /* 100BT Full Duplex*/ + { 1, 1000 }, /* 1000BT */ + { 1, 1000 }, /* 1000BT */ + }; + + uint16_t link_mode; + + link_mode = mii_reg & MIIM_BCM54xx_AUXSTATUS_LINKMODE_MASK; + link_mode >>= MIIM_BCM54xx_AUXSTATUS_LINKMODE_SHIFT; + + priv->duplexity = link_table[link_mode][0]; + priv->speed = link_table[link_mode][1]; + + return 0; +} + +static struct phy_info phy_info_M88E1011S = { + 0x01410c6, + "Marvell 88E1011S", + 4, + (const struct phy_cmd[]) { /* config */ + /* Reset and configure the PHY */ + {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* startup */ + /* Status is read once to clear old link state */ + {MIIM_STATUS, miim_read, NULL}, + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + /* Read the status */ + {MIIM_88E1011_PHY_STATUS, miim_read, mii_parse_88E1011_psr}, + /* Clear the IEVENT register */ + {MIIM_88E1011_IEVENT, miim_read, NULL}, + /* Set up the mask */ + {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_INIT, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* ack_int */ + /* Clear the interrupt */ + {MIIM_88E1011_IEVENT, miim_read, NULL}, + /* Disable interrupts */ + {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_CLEAR, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* handle_int */ + /* Read the Status (2x to make sure link is right) */ + {MIIM_STATUS, miim_read, NULL}, + /* Check the status */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + {MIIM_88E1011_PHY_STATUS, miim_read, mii_parse_88E1011_psr}, + /* Enable Interrupts */ + {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_INIT, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* shutdown */ + {MIIM_88E1011_IEVENT, miim_read, NULL}, + {MIIM_88E1011_IMASK, MIIM_88E1011_IMASK_CLEAR, NULL}, + {miim_end,} + }, +}; + +/* Cicada 8204 */ +static struct phy_info phy_info_cis8204 = { + 0x3f11, + "Cicada Cis8204", + 6, + (const struct phy_cmd[]) { /* config */ + /* Override PHY config settings */ + {MIIM_CIS8201_AUX_CONSTAT, MIIM_CIS8201_AUXCONSTAT_INIT, NULL}, + /* Set up the interface mode */ + {MIIM_CIS8201_EXT_CON1, MIIM_CIS8201_EXTCON1_INIT, NULL}, + /* Configure some basic stuff */ + {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* startup */ + /* Read the Status (2x to make sure link is right) */ + {MIIM_STATUS, miim_read, NULL}, + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + /* Read the status */ + {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201}, + /* Clear the status register */ + {MIIM_CIS8204_ISTAT, miim_read, NULL}, + /* Enable interrupts */ + {MIIM_CIS8204_IMASK, MIIM_CIS8204_IMASK_MASK, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* ack_int */ + /* Clear the status register */ + {MIIM_CIS8204_ISTAT, miim_read, NULL}, + /* Disable interrupts */ + {MIIM_CIS8204_IMASK, 0x0, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* handle_int */ + /* Read the Status (2x to make sure link is right) */ + {MIIM_STATUS, miim_read, NULL}, + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + /* Read the status */ + {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201}, + /* Enable interrupts */ + {MIIM_CIS8204_IMASK, MIIM_CIS8204_IMASK_MASK, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* shutdown */ + /* Clear the status register */ + {MIIM_CIS8204_ISTAT, miim_read, NULL}, + /* Disable interrupts */ + {MIIM_CIS8204_IMASK, 0x0, NULL}, + {miim_end,} + }, +}; + +/* Cicada 8201 */ +static struct phy_info phy_info_cis8201 = { + 0xfc41, + "CIS8201", + 4, + (const struct phy_cmd[]) { /* config */ + /* Override PHY config settings */ + {MIIM_CIS8201_AUX_CONSTAT, MIIM_CIS8201_AUXCONSTAT_INIT, NULL}, + /* Set up the interface mode */ + {MIIM_CIS8201_EXT_CON1, MIIM_CIS8201_EXTCON1_INIT, NULL}, + /* Configure some basic stuff */ + {MIIM_CONTROL, MIIM_CONTROL_INIT, mii_cr_init}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* startup */ + /* Read the Status (2x to make sure link is right) */ + {MIIM_STATUS, miim_read, NULL}, + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + /* Read the status */ + {MIIM_CIS8201_AUX_CONSTAT, miim_read, mii_parse_cis8201}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* ack_int */ + {miim_end,} + }, + (const struct phy_cmd[]) { /* handle_int */ + {miim_end,} + }, + (const struct phy_cmd[]) { /* shutdown */ + {miim_end,} + }, +}; + +static struct phy_info phy_info_dm9161 = { + 0x0181b88, + "Davicom DM9161E", + 4, + (const struct phy_cmd[]) { /* config */ + {MIIM_CONTROL, MIIM_DM9161_CR_STOP, NULL}, + /* Do not bypass the scrambler/descrambler */ + {MIIM_DM9161_SCR, MIIM_DM9161_SCR_INIT, NULL}, + /* Clear 10BTCSR to default */ + {MIIM_DM9161_10BTCSR, MIIM_DM9161_10BTCSR_INIT, NULL}, + /* Configure some basic stuff */ + {MIIM_CONTROL, MIIM_CR_INIT, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* startup */ + /* Restart Auto Negotiation */ + {MIIM_CONTROL, MIIM_DM9161_CR_RSTAN, NULL}, + /* Status is read once to clear old link state */ + {MIIM_STATUS, miim_read, dm9161_wait}, + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + /* Read the status */ + {MIIM_DM9161_SCSR, miim_read, mii_parse_dm9161_scsr}, + /* Clear any pending interrupts */ + {MIIM_DM9161_INTR, miim_read, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* ack_int */ + {MIIM_DM9161_INTR, miim_read, NULL}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* handle_int */ + {MIIM_STATUS, miim_read, NULL}, + {MIIM_STATUS, miim_read, mii_parse_sr}, + {MIIM_DM9161_SCSR, miim_read, mii_parse_dm9161_scsr}, + {miim_end,} + }, + (const struct phy_cmd[]) { /* shutdown */ + {MIIM_DM9161_INTR, miim_read, NULL}, + {miim_end,} + }, +}; + +/* Broadcom BCM5421S PHY */ +static struct phy_info phy_info_bcm5421s = { + .id = 0x2060E1, + .name = "Broadcom BCM5421S", + .shift = 0, + .config = (const struct phy_cmd[]) { + /* Configure some basic stuff */ + {MIIM_CONTROL, MIIM_CR_INIT, NULL}, +#if 0 /* 5421 only */ + miim_write(MII_BCM5400_AUXCONTROL, 0x1007), + miim_set_bits(MII_BCM5400_AUXCONTROL, 0x0400), + miim_write(MII_BCM5400_AUXCONTROL, 0x0007), + miim_set_bits(MII_BCM5400_AUXCONTROL, 0x0800), + miim_write(0x17, 0x000a), + miim_set_bits(MII_RERRCOUNTER, 0x0200), +#endif +#if 0 /* enable automatic low power */ + miim_write(MII_NCONFIG, 0x9002), + miim_write(MII_NCONFIG, 0xa821), + miim_write(MII_NCONFIG, 0x941d), +#endif + {miim_end,} + }, + .startup = (const struct phy_cmd[]) { + /* Restart Auto Negotiation */ + miim_set_bits(MIIM_CONTROL, BMCR_ANENABLE | BMCR_ANRESTART), +#if 0 + /* Status is read once to clear old link state */ + {MIIM_STATUS, miim_read, dm9161_wait}, +#endif + /* Auto-negotiate */ + {MIIM_STATUS, miim_read, mii_parse_sr}, + + /* Read the link status */ + {MIIM_BCM54xx_AUXSTATUS, miim_read, mii_parse_bcm54xx_sr}, + + {miim_end,} + }, + .ack_int = (const struct phy_cmd[]) { + {miim_end,} + }, + .handle_int = (const struct phy_cmd[]) { + {MIIM_STATUS, miim_read, NULL}, + {MIIM_STATUS, miim_read, mii_parse_sr}, + {miim_end,} + }, + .shutdown = (const struct phy_cmd[]) { + {miim_end,} + }, +}; + +static struct phy_info *phy_info[] = { + &phy_info_cis8201, + &phy_info_cis8204, + &phy_info_M88E1011S, + &phy_info_dm9161, + &phy_info_bcm5421s, + NULL +}; + +/* Use the PHY ID registers to determine what type of PHY is attached + * to device dev. return a struct phy_info structure describing that PHY + */ +struct phy_info * get_phy_info(struct net_device *dev) +{ + u16 phy_reg; + u32 phy_ID; + int i; + struct phy_info *theInfo = NULL; + + /* Grab the bits from PHYIR1, and put them in the upper half */ + phy_reg = read_phy_reg(dev, MIIM_PHYIR1); + phy_ID = (phy_reg & 0xffff) << 16; + + /* Grab the bits from PHYIR2, and put them in the lower half */ + phy_reg = read_phy_reg(dev, MIIM_PHYIR2); + phy_ID |= (phy_reg & 0xffff); + + /* loop through all the known PHY types, and find one that */ + /* matches the ID we read from the PHY. */ + for (i = 0; phy_info[i]; i++) + if (phy_info[i]->id == (phy_ID >> phy_info[i]->shift)) + theInfo = phy_info[i]; + + if (theInfo == NULL) { + printk("%s: PHY id %x is not supported!\n", dev->name, phy_ID); + return NULL; + } else { + printk("%s: PHY is %s (%x)\n", dev->name, theInfo->name, + phy_ID); + } + + return theInfo; +} + +/* Take a list of struct phy_cmd, and, depending on the values, either */ +/* read or write, using a helper function if provided */ +/* It is assumed that all lists of struct phy_cmd will be terminated by */ +/* mii_end. */ +void phy_run_commands(struct net_device *dev, const struct phy_cmd *cmd) +{ + int i; + u16 result; + struct gfar_private *priv = (struct gfar_private *) dev->priv; + struct gfar *phyregs = priv->phyregs; + + /* Reset the management interface */ + gfar_write(&phyregs->miimcfg, MIIMCFG_RESET); + + /* Setup the MII Mgmt clock speed */ + gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE); + + /* Wait until the bus is free */ + while (gfar_read(&phyregs->miimind) & MIIMIND_BUSY) + cpu_relax(); + + for (i = 0; cmd->mii_reg != miim_end; i++) { + switch (cmd->mii_data >> 16) { + case 0x0000: + /* Otherwise, it's a write */ + /* If a function was supplied, it will provide + * the value to write */ + /* Otherwise, the value was supplied in cmd->mii_data */ + if (cmd->funct != NULL) + result = (*(cmd->funct)) (0, dev); + else + result = cmd->mii_data; + + write_phy_reg(dev, cmd->mii_reg, result); + break; + + case 0x0001: + /* Read the value of the PHY reg */ + result = read_phy_reg(dev, cmd->mii_reg); + + /* If a function was supplied, we need to let it process */ + /* the result. */ + if (cmd->funct != NULL) + (*(cmd->funct)) (result, dev); + break; + + case 0x0002: + /* read the value, clear some bits and write it back */ + BUG_ON(cmd->funct); + + result = read_phy_reg(dev, cmd->mii_reg); + result &= cmd->mii_data; + write_phy_reg(dev, cmd->mii_reg, result); + break; + + case 0x0003: + /* read the value, set some bits and write it back */ + BUG_ON(cmd->funct); + + result = read_phy_reg(dev, cmd->mii_reg); + result &= cmd->mii_data; + write_phy_reg(dev, cmd->mii_reg, result); + break; + + case 0x0004: + /* read the value, flip some bits and write it back */ + BUG_ON(cmd->funct); + + result = read_phy_reg(dev, cmd->mii_reg); + result &= cmd->mii_data; + write_phy_reg(dev, cmd->mii_reg, result); + break; + + default: + printk("GIANFAR: Unknown MII command %08x\n", + cmd->mii_data); + BUG(); + } + cmd++; + } +} diff --git a/drivers/net/gianfar_phy.h b/drivers/net/gianfar_phy.h new file mode 100644 index 000000000..df4c0ec9d --- /dev/null +++ b/drivers/net/gianfar_phy.h @@ -0,0 +1,202 @@ +/* + * drivers/net/gianfar_phy.h + * + * Gianfar Ethernet Driver -- PHY handling + * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560 + * Based on 8260_io/fcc_enet.c + * + * Author: Andy Fleming + * Maintainer: Kumar Gala (kumar.gala@freescale.com) + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ +#ifndef __GIANFAR_PHY_H +#define __GIANFAR_PHY_H + +/* simple datum processing commands */ +#define miim_end (0xffff0000U) +#define miim_read (0x00010000U) +#define miim_clear_bits(reg,x) { reg, (0x00020000U | ~(u32)(x)), NULL } +#define miim_set_bits(reg,x) { reg, (0x00030000U | (u32)(x)), NULL } +#define miim_flip_bits(reg,x) { reg, (0x00040000U | (u32)(x)), NULL } +#define miim_write(reg, x) { reg, (0x0000ffffU & (u32)(x)), NULL } + +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_NOTVALID 0x00000004 + +#define MIIM_CONTROL 0x00 +#define MIIM_CONTROL_RESET 0x00008000 +#define MIIM_CONTROL_INIT 0x00001140 +#define MIIM_ANEN 0x00001000 + +#define MIIM_CR 0x00 +#define MIIM_CR_RST 0x00008000 +#define MIIM_CR_INIT 0x00001000 + +#define MIIM_STATUS 0x1 +#define MIIM_STATUS_AN_DONE 0x00000020 +#define MIIM_STATUS_LINK 0x0004 + +#define MIIM_PHYIR1 0x2 +#define MIIM_PHYIR2 0x3 + +#define GFAR_AN_TIMEOUT 0x000fffff + +#define MIIM_ANLPBPA 0x5 +#define MIIM_ANLPBPA_HALF 0x00000040 +#define MIIM_ANLPBPA_FULL 0x00000020 + +#define MIIM_ANEX 0x6 +#define MIIM_ANEX_NP 0x00000004 +#define MIIM_ANEX_PRX 0x00000002 + + +/* Cicada Extended Control Register 1 */ +#define MIIM_CIS8201_EXT_CON1 0x17 +#define MIIM_CIS8201_EXTCON1_INIT 0x0000 + +/* Cicada Interrupt Mask Register */ +#define MIIM_CIS8204_IMASK 0x19 +#define MIIM_CIS8204_IMASK_IEN 0x8000 +#define MIIM_CIS8204_IMASK_SPEED 0x4000 +#define MIIM_CIS8204_IMASK_LINK 0x2000 +#define MIIM_CIS8204_IMASK_DUPLEX 0x1000 +#define MIIM_CIS8204_IMASK_MASK 0xf000 + +/* Cicada Interrupt Status Register */ +#define MIIM_CIS8204_ISTAT 0x1a +#define MIIM_CIS8204_ISTAT_STATUS 0x8000 +#define MIIM_CIS8204_ISTAT_SPEED 0x4000 +#define MIIM_CIS8204_ISTAT_LINK 0x2000 +#define MIIM_CIS8204_ISTAT_DUPLEX 0x1000 + +/* Cicada Auxiliary Control/Status Register */ +#define MIIM_CIS8201_AUX_CONSTAT 0x1c +#define MIIM_CIS8201_AUXCONSTAT_INIT 0x0004 +#define MIIM_CIS8201_AUXCONSTAT_DUPLEX 0x0020 +#define MIIM_CIS8201_AUXCONSTAT_SPEED 0x0018 +#define MIIM_CIS8201_AUXCONSTAT_GBIT 0x0010 +#define MIIM_CIS8201_AUXCONSTAT_100 0x0008 + +/* 88E1011 PHY Status Register */ +#define MIIM_88E1011_PHY_STATUS 0x11 +#define MIIM_88E1011_PHYSTAT_SPEED 0xc000 +#define MIIM_88E1011_PHYSTAT_GBIT 0x8000 +#define MIIM_88E1011_PHYSTAT_100 0x4000 +#define MIIM_88E1011_PHYSTAT_DUPLEX 0x2000 +#define MIIM_88E1011_PHYSTAT_LINK 0x0400 + +#define MIIM_88E1011_IEVENT 0x13 +#define MIIM_88E1011_IEVENT_CLEAR 0x0000 + +#define MIIM_88E1011_IMASK 0x12 +#define MIIM_88E1011_IMASK_INIT 0x6400 +#define MIIM_88E1011_IMASK_CLEAR 0x0000 + +/* DM9161 Control register values */ +#define MIIM_DM9161_CR_STOP 0x0400 +#define MIIM_DM9161_CR_RSTAN 0x1200 + +#define MIIM_DM9161_SCR 0x10 +#define MIIM_DM9161_SCR_INIT 0x0610 + +/* DM9161 Specified Configuration and Status Register */ +#define MIIM_DM9161_SCSR 0x11 +#define MIIM_DM9161_SCSR_100F 0x8000 +#define MIIM_DM9161_SCSR_100H 0x4000 +#define MIIM_DM9161_SCSR_10F 0x2000 +#define MIIM_DM9161_SCSR_10H 0x1000 + +/* DM9161 Interrupt Register */ +#define MIIM_DM9161_INTR 0x15 +#define MIIM_DM9161_INTR_PEND 0x8000 +#define MIIM_DM9161_INTR_DPLX_MASK 0x0800 +#define MIIM_DM9161_INTR_SPD_MASK 0x0400 +#define MIIM_DM9161_INTR_LINK_MASK 0x0200 +#define MIIM_DM9161_INTR_MASK 0x0100 +#define MIIM_DM9161_INTR_DPLX_CHANGE 0x0010 +#define MIIM_DM9161_INTR_SPD_CHANGE 0x0008 +#define MIIM_DM9161_INTR_LINK_CHANGE 0x0004 +#define MIIM_DM9161_INTR_INIT 0x0000 +#define MIIM_DM9161_INTR_STOP \ +(MIIM_DM9161_INTR_DPLX_MASK | MIIM_DM9161_INTR_SPD_MASK \ + | MIIM_DM9161_INTR_LINK_MASK | MIIM_DM9161_INTR_MASK) + +/* DM9161 10BT Configuration/Status */ +#define MIIM_DM9161_10BTCSR 0x12 +#define MIIM_DM9161_10BTCSR_INIT 0x7800 + +/* BCM54xx regs */ +#define MIIM_BCM54xx_AUXCONTROL 0x18 +#define MIIM_BCM54xx_AUXSTATUS 0x19 +#define MIIM_BCM54xx_AUXSTATUS_LINKMODE_MASK 0x0700 +#define MIIM_BCM54xx_AUXSTATUS_LINKMODE_SHIFT 8 + +#define MIIM_READ_COMMAND 0x00000001 + +/* + * struct phy_cmd: A command for reading or writing a PHY register + * + * mii_reg: The register to read or write + * + * mii_data: For writes, the value to put in the register. + * A value of -1 indicates this is a read. + * + * funct: A function pointer which is invoked for each command. + * For reads, this function will be passed the value read + * from the PHY, and process it. + * For writes, the result of this function will be written + * to the PHY register + */ +struct phy_cmd { + u32 mii_reg; + u32 mii_data; + u16 (*funct) (u16 mii_reg, struct net_device * dev); +}; + +/* struct phy_info: a structure which defines attributes for a PHY + * + * id will contain a number which represents the PHY. During + * startup, the driver will poll the PHY to find out what its + * UID--as defined by registers 2 and 3--is. The 32-bit result + * gotten from the PHY will be shifted right by "shift" bits to + * discard any bits which may change based on revision numbers + * unimportant to functionality + * + * The struct phy_cmd entries represent pointers to an arrays of + * commands which tell the driver what to do to the PHY. + */ +struct phy_info { + u32 id; + char *name; + unsigned int shift; + /* Called to configure the PHY, and modify the controller + * based on the results */ + const struct phy_cmd *config; + + /* Called when starting up the controller. Usually sets + * up the interrupt for state changes */ + const struct phy_cmd *startup; + + /* Called inside the interrupt handler to acknowledge + * the interrupt */ + const struct phy_cmd *ack_int; + + /* Called in the bottom half to handle the interrupt */ + const struct phy_cmd *handle_int; + + /* Called when bringing down the controller. Usually stops + * the interrupts from being generated */ + const struct phy_cmd *shutdown; +}; + +struct phy_info *get_phy_info(struct net_device *dev); +void phy_run_commands(struct net_device *dev, const struct phy_cmd *cmd); + +#endif /* GIANFAR_PHY_H */ diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c new file mode 100644 index 000000000..3356fd8de --- /dev/null +++ b/drivers/net/via-velocity.c @@ -0,0 +1,3277 @@ +/* + * This code is derived from the VIA reference driver (copyright message + * below) provided to Red Hat by VIA Networking Technologies, Inc. for + * addition to the Linux kernel. + * + * The code has been merged into one source file, cleaned up to follow + * Linux coding style, ported to the Linux 2.6 kernel tree and cleaned + * for 64bit hardware platforms. + * + * TODO + * Big-endian support + * rx_copybreak/alignment + * Scatter gather + * More testing + * + * The changes are (c) Copyright 2004, Red Hat Inc. + * Additional fixes and clean up: Francois Romieu + * + * This source has not been verified for use in safety critical systems. + * + * Please direct queries about the revamped driver to the linux-kernel + * list not VIA. + * + * Original code: + * + * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. + * All rights reserved. + * + * This software may be redistributed and/or modified under + * the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or + * any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * Author: Chuang Liang-Shing, AJ Jiang + * + * Date: Jan 24, 2003 + * + * MODULE_LICENSE("GPL"); + * + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "via-velocity.h" + + +static int velocity_nics = 0; +static int msglevel = MSG_LEVEL_INFO; + + +static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +static struct ethtool_ops velocity_ethtool_ops; + +/* + Define module options +*/ + +MODULE_AUTHOR("VIA Networking Technologies, Inc."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"); + +#define VELOCITY_PARAM(N,D) \ + static const int N[MAX_UNITS]=OPTION_DEFAULT;\ + MODULE_PARM(N, "1-" __MODULE_STRING(MAX_UNITS) "i");\ + MODULE_PARM_DESC(N, D); + +#define RX_DESC_MIN 64 +#define RX_DESC_MAX 255 +#define RX_DESC_DEF 64 +VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors"); + +#define TX_DESC_MIN 16 +#define TX_DESC_MAX 256 +#define TX_DESC_DEF 64 +VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors"); + +#define VLAN_ID_MIN 0 +#define VLAN_ID_MAX 4095 +#define VLAN_ID_DEF 0 +/* VID_setting[] is used for setting the VID of NIC. + 0: default VID. + 1-4094: other VIDs. +*/ +VELOCITY_PARAM(VID_setting, "802.1Q VLAN ID"); + +#define RX_THRESH_MIN 0 +#define RX_THRESH_MAX 3 +#define RX_THRESH_DEF 0 +/* rx_thresh[] is used for controlling the receive fifo threshold. + 0: indicate the rxfifo threshold is 128 bytes. + 1: indicate the rxfifo threshold is 512 bytes. + 2: indicate the rxfifo threshold is 1024 bytes. + 3: indicate the rxfifo threshold is store & forward. +*/ +VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); + +#define DMA_LENGTH_MIN 0 +#define DMA_LENGTH_MAX 7 +#define DMA_LENGTH_DEF 0 + +/* DMA_length[] is used for controlling the DMA length + 0: 8 DWORDs + 1: 16 DWORDs + 2: 32 DWORDs + 3: 64 DWORDs + 4: 128 DWORDs + 5: 256 DWORDs + 6: SF(flush till emply) + 7: SF(flush till emply) +*/ +VELOCITY_PARAM(DMA_length, "DMA length"); + +#define TAGGING_DEF 0 +/* enable_tagging[] is used for enabling 802.1Q VID tagging. + 0: disable VID seeting(default). + 1: enable VID setting. +*/ +VELOCITY_PARAM(enable_tagging, "Enable 802.1Q tagging"); + +#define IP_ALIG_DEF 0 +/* IP_byte_align[] is used for IP header DWORD byte aligned + 0: indicate the IP header won't be DWORD byte aligned.(Default) . + 1: indicate the IP header will be DWORD byte aligned. + In some enviroment, the IP header should be DWORD byte aligned, + or the packet will be droped when we receive it. (eg: IPVS) +*/ +VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); + +#define TX_CSUM_DEF 1 +/* txcsum_offload[] is used for setting the checksum offload ability of NIC. + (We only support RX checksum offload now) + 0: disable csum_offload[checksum offload + 1: enable checksum offload. (Default) +*/ +VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload"); + +#define FLOW_CNTL_DEF 1 +#define FLOW_CNTL_MIN 1 +#define FLOW_CNTL_MAX 5 + +/* flow_control[] is used for setting the flow control ability of NIC. + 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR. + 2: enable TX flow control. + 3: enable RX flow control. + 4: enable RX/TX flow control. + 5: disable +*/ +VELOCITY_PARAM(flow_control, "Enable flow control ability"); + +#define MED_LNK_DEF 0 +#define MED_LNK_MIN 0 +#define MED_LNK_MAX 4 +/* speed_duplex[] is used for setting the speed and duplex mode of NIC. + 0: indicate autonegotiation for both speed and duplex mode + 1: indicate 100Mbps half duplex mode + 2: indicate 100Mbps full duplex mode + 3: indicate 10Mbps half duplex mode + 4: indicate 10Mbps full duplex mode + + Note: + if EEPROM have been set to the force mode, this option is ignored + by driver. +*/ +VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); + +#define VAL_PKT_LEN_DEF 0 +/* ValPktLen[] is used for setting the checksum offload ability of NIC. + 0: Receive frame with invalid layer 2 length (Default) + 1: Drop frame with invalid layer 2 length +*/ +VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); + +#define WOL_OPT_DEF 0 +#define WOL_OPT_MIN 0 +#define WOL_OPT_MAX 7 +/* wol_opts[] is used for controlling wake on lan behavior. + 0: Wake up if recevied a magic packet. (Default) + 1: Wake up if link status is on/off. + 2: Wake up if recevied an arp packet. + 4: Wake up if recevied any unicast packet. + Those value can be sumed up to support more than one option. +*/ +VELOCITY_PARAM(wol_opts, "Wake On Lan options"); + +#define INT_WORKS_DEF 20 +#define INT_WORKS_MIN 10 +#define INT_WORKS_MAX 64 + +VELOCITY_PARAM(int_works, "Number of packets per interrupt services"); + +static int velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent); +static void velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info); +static int velocity_get_pci_info(struct velocity_info *, struct pci_dev *pdev); +static void velocity_print_info(struct velocity_info *vptr); +static int velocity_open(struct net_device *dev); +static int velocity_change_mtu(struct net_device *dev, int mtu); +static int velocity_xmit(struct sk_buff *skb, struct net_device *dev); +static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs); +static void velocity_set_multi(struct net_device *dev); +static struct net_device_stats *velocity_get_stats(struct net_device *dev); +static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int velocity_close(struct net_device *dev); +static int velocity_rx_srv(struct velocity_info *vptr, int status); +static int velocity_receive_frame(struct velocity_info *, int idx); +static int velocity_alloc_rx_buf(struct velocity_info *, int idx); +static void velocity_init_registers(struct velocity_info *vptr, enum velocity_init_type type); +static void velocity_free_rd_ring(struct velocity_info *vptr); +static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *); +static int velocity_soft_reset(struct velocity_info *vptr); +static void mii_init(struct velocity_info *vptr, u32 mii_status); +static u32 velocity_get_opt_media_mode(struct velocity_info *vptr); +static void velocity_print_link_status(struct velocity_info *vptr); +static void safe_disable_mii_autopoll(struct mac_regs * regs); +static void velocity_shutdown(struct velocity_info *vptr); +static void enable_flow_control_ability(struct velocity_info *vptr); +static void enable_mii_autopoll(struct mac_regs * regs); +static int velocity_mii_read(struct mac_regs *, u8 byIdx, u16 * pdata); +static int velocity_mii_write(struct mac_regs *, u8 byMiiAddr, u16 data); +static int velocity_set_wol(struct velocity_info *vptr); +static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context); +static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context); +static u32 mii_check_media_mode(struct mac_regs * regs); +static u32 check_connection_type(struct mac_regs * regs); +static void velocity_init_cam_filter(struct velocity_info *vptr); +static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status); + +#ifdef CONFIG_PM +static int velocity_suspend(struct pci_dev *pdev, u32 state); +static int velocity_resume(struct pci_dev *pdev); + +static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr); + +static struct notifier_block velocity_inetaddr_notifier = { + notifier_call:velocity_netdev_event, +}; + +#endif /* CONFIG_PM */ + +/* + * Internal board variants. At the moment we have only one + */ + +static struct velocity_info_tbl chip_info_table[] = { + {CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1, 0x00FFFFFFUL}, + {0, NULL} +}; + +/* + * Describe the PCI device identifiers that we support in this + * device driver. Used for hotplug autoloading. + */ + +static struct pci_device_id velocity_id_table[] __devinitdata = { + {0x1106, 0x3119, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &chip_info_table[0]}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, velocity_id_table); + +/** + * get_chip_name - identifier to name + * @id: chip identifier + * + * Given a chip identifier return a suitable description. Returns + * a pointer a static string valid while the driver is loaded. + */ + +static char __devinit *get_chip_name(enum chip_type chip_id) +{ + int i; + for (i = 0; chip_info_table[i].name != NULL; i++) + if (chip_info_table[i].chip_id == chip_id) + break; + return chip_info_table[i].name; +} + +/** + * velocity_remove1 - device unplug + * @pdev: PCI device being removed + * + * Device unload callback. Called on an unplug or on module + * unload for each active device that is present. Disconnects + * the device from the network layer and frees all the resources + */ + +static void __devexit velocity_remove1(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct velocity_info *vptr = dev->priv; + + unregister_netdev(dev); + iounmap(vptr->mac_regs); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + free_netdev(dev); +} + +/** + * velocity_set_int_opt - parser for integer options + * @opt: pointer to option value + * @val: value the user requested (or -1 for default) + * @min: lowest value allowed + * @max: highest value allowed + * @def: default value + * @name: property name + * @dev: device name + * + * Set an integer property in the module options. This function does + * all the verification and checking as well as reporting so that + * we don't duplicate code for each option. + */ + +static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, char *devname) +{ + if (val == -1) + *opt = def; + else if (val < min || val > max) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n", + devname, name, min, max); + *opt = def; + } else { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n", + devname, name, val); + *opt = val; + } +} + +/** + * velocity_set_bool_opt - parser for boolean options + * @opt: pointer to option value + * @val: value the user requested (or -1 for default) + * @def: default value (yes/no) + * @flag: numeric value to set for true. + * @name: property name + * @dev: device name + * + * Set a boolean property in the module options. This function does + * all the verification and checking as well as reporting so that + * we don't duplicate code for each option. + */ + +static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, char *devname) +{ + (*opt) &= (~flag); + if (val == -1) + *opt |= (def ? flag : 0); + else if (val < 0 || val > 1) { + printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n", + devname, name); + *opt |= (def ? flag : 0); + } else { + printk(KERN_INFO "%s: set parameter %s to %s\n", + devname, name, val ? "TRUE" : "FALSE"); + *opt |= (val ? flag : 0); + } +} + +/** + * velocity_get_options - set options on device + * @opts: option structure for the device + * @index: index of option to use in module options array + * @devname: device name + * + * Turn the module and command options into a single structure + * for the current device + */ + +static void __devinit velocity_get_options(struct velocity_opt *opts, int index, char *devname) +{ + + velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); + velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname); + velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); + velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); + velocity_set_int_opt(&opts->vid, VID_setting[index], VLAN_ID_MIN, VLAN_ID_MAX, VLAN_ID_DEF, "VID_setting", devname); + velocity_set_bool_opt(&opts->flags, enable_tagging[index], TAGGING_DEF, VELOCITY_FLAGS_TAGGING, "enable_tagging", devname); + velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname); + velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); + velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); + velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); + velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); + velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); + velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname); + opts->numrx = (opts->numrx & ~3); +} + +/** + * velocity_init_cam_filter - initialise CAM + * @vptr: velocity to program + * + * Initialize the content addressable memory used for filters. Load + * appropriately according to the presence of VLAN + */ + +static void velocity_init_cam_filter(struct velocity_info *vptr) +{ + struct mac_regs * regs = vptr->mac_regs; + + /* T urn on MCFG_PQEN, turn off MCFG_RTGOPT */ + WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG); + WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG); + + /* Disable all CAMs */ + memset(vptr->vCAMmask, 0, sizeof(u8) * 8); + memset(vptr->mCAMmask, 0, sizeof(u8) * 8); + mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); + mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + + /* Enable first VCAM */ + if (vptr->flags & VELOCITY_FLAGS_TAGGING) { + /* If Tagging option is enabled and VLAN ID is not zero, then + turn on MCFG_RTGOPT also */ + if (vptr->options.vid != 0) + WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); + + mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), VELOCITY_VLAN_ID_CAM); + vptr->vCAMmask[0] |= 1; + mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); + } else { + u16 temp = 0; + mac_set_cam(regs, 0, (u8 *) &temp, VELOCITY_VLAN_ID_CAM); + temp = 1; + mac_set_cam_mask(regs, (u8 *) &temp, VELOCITY_VLAN_ID_CAM); + } +} + +/** + * velocity_rx_reset - handle a receive reset + * @vptr: velocity we are resetting + * + * Reset the ownership and status for the receive ring side. + * Hand all the receive queue to the NIC. + */ + +static void velocity_rx_reset(struct velocity_info *vptr) +{ + + struct mac_regs * regs = vptr->mac_regs; + int i; + + vptr->rd_used = vptr->rd_curr = 0; + + /* + * Init state, all RD entries belong to the NIC + */ + for (i = 0; i < vptr->options.numrx; ++i) + vptr->rd_ring[i].rdesc0.owner = cpu_to_le32(OWNED_BY_NIC); + + writew(vptr->options.numrx, ®s->RBRDU); + writel(vptr->rd_pool_dma, ®s->RDBaseLo); + writew(0, ®s->RDIdx); + writew(vptr->options.numrx - 1, ®s->RDCSize); +} + +/** + * velocity_init_registers - initialise MAC registers + * @vptr: velocity to init + * @type: type of initialisation (hot or cold) + * + * Initialise the MAC on a reset or on first set up on the + * hardware. + */ + +static void velocity_init_registers(struct velocity_info *vptr, + enum velocity_init_type type) +{ + struct mac_regs * regs = vptr->mac_regs; + int i, mii_status; + + mac_wol_reset(regs); + + switch (type) { + case VELOCITY_INIT_RESET: + case VELOCITY_INIT_WOL: + + netif_stop_queue(vptr->dev); + + /* + * Reset RX to prevent RX pointer not on the 4X location + */ + velocity_rx_reset(vptr); + mac_rx_queue_run(regs); + mac_rx_queue_wake(regs); + + mii_status = velocity_get_opt_media_mode(vptr); + if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { + velocity_print_link_status(vptr); + if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) + netif_wake_queue(vptr->dev); + } + + enable_flow_control_ability(vptr); + + mac_clear_isr(regs); + writel(CR0_STOP, ®s->CR0Clr); + writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), + ®s->CR0Set); + + break; + + case VELOCITY_INIT_COLD: + default: + /* + * Do reset + */ + velocity_soft_reset(vptr); + mdelay(5); + + mac_eeprom_reload(regs); + for (i = 0; i < 6; i++) { + writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); + } + /* + * clear Pre_ACPI bit. + */ + BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA)); + mac_set_rx_thresh(regs, vptr->options.rx_thresh); + mac_set_dma_length(regs, vptr->options.DMA_length); + + writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet); + /* + * Bback off algorithm use original IEEE standard + */ + BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), ®s->CFGB); + + /* + * Set packet filter: Receive directed and broadcast address + */ + velocity_set_multi(vptr->dev); + + /* + * Enable MII auto-polling + */ + enable_mii_autopoll(regs); + + vptr->int_mask = INT_MASK_DEF; + + writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo); + writew(vptr->options.numrx - 1, ®s->RDCSize); + mac_rx_queue_run(regs); + mac_rx_queue_wake(regs); + + writew(vptr->options.numtx - 1, ®s->TDCSize); + + for (i = 0; i < vptr->num_txq; i++) { + writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i])); + mac_tx_queue_run(regs, i); + } + + velocity_init_cam_filter(vptr); + + init_flow_control_register(vptr); + + writel(CR0_STOP, ®s->CR0Clr); + writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), ®s->CR0Set); + + mii_status = velocity_get_opt_media_mode(vptr); + netif_stop_queue(vptr->dev); + mac_clear_isr(regs); + + mii_init(vptr, mii_status); + + if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { + velocity_print_link_status(vptr); + if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) + netif_wake_queue(vptr->dev); + } + + enable_flow_control_ability(vptr); + mac_hw_mibs_init(regs); + mac_write_int_mask(vptr->int_mask, regs); + mac_clear_isr(regs); + + } +} + +/** + * velocity_soft_reset - soft reset + * @vptr: velocity to reset + * + * Kick off a soft reset of the velocity adapter and then poll + * until the reset sequence has completed before returning. + */ + +static int velocity_soft_reset(struct velocity_info *vptr) +{ + struct mac_regs * regs = vptr->mac_regs; + int i = 0; + + writel(CR0_SFRST, ®s->CR0Set); + + for (i = 0; i < W_MAX_TIMEOUT; i++) { + udelay(5); + if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set)) + break; + } + + if (i == W_MAX_TIMEOUT) { + writel(CR0_FORSRST, ®s->CR0Set); + /* FIXME: PCI POSTING */ + /* delay 2ms */ + mdelay(2); + } + return 0; +} + +/** + * velocity_found1 - set up discovered velocity card + * @pdev: PCI device + * @ent: PCI device table entry that matched + * + * Configure a discovered adapter from scratch. Return a negative + * errno error code on failure paths. + */ + +static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int first = 1; + struct net_device *dev; + int i; + struct velocity_info_tbl *info = (struct velocity_info_tbl *) ent->driver_data; + struct velocity_info *vptr; + struct mac_regs * regs; + int ret = -ENOMEM; + + if (velocity_nics++ >= MAX_UNITS) { + printk(KERN_NOTICE VELOCITY_NAME ": already found %d NICs.\n", + velocity_nics); + return -ENODEV; + } + + dev = alloc_etherdev(sizeof(struct velocity_info)); + + if (dev == NULL) { + printk(KERN_ERR VELOCITY_NAME ": allocate net device failed.\n"); + goto out; + } + + /* Chain it all together */ + + SET_MODULE_OWNER(dev); + SET_NETDEV_DEV(dev, &pdev->dev); + vptr = dev->priv; + + + if (first) { + printk(KERN_INFO "%s Ver. %s\n", + VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); + printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n"); + printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n"); + first = 0; + } + + velocity_init_info(pdev, vptr, info); + + vptr->dev = dev; + + dev->priv = vptr; + dev->irq = pdev->irq; + + ret = pci_enable_device(pdev); + if (ret < 0) + goto err_free_dev; + + ret = velocity_get_pci_info(vptr, pdev); + if (ret < 0) { + printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n"); + goto err_disable; + } + + ret = pci_request_regions(pdev, VELOCITY_NAME); + if (ret < 0) { + printk(KERN_ERR VELOCITY_NAME ": Failed to find PCI device.\n"); + goto err_disable; + } + + regs = ioremap(vptr->memaddr, vptr->io_size); + if (regs == NULL) { + ret = -EIO; + goto err_release_res; + } + + vptr->mac_regs = regs; + + mac_wol_reset(regs); + + dev->base_addr = vptr->ioaddr; + + for (i = 0; i < 6; i++) + dev->dev_addr[i] = readb(®s->PAR[i]); + + + velocity_get_options(&vptr->options, velocity_nics - 1, dev->name); + + /* + * Mask out the options cannot be set to the chip + */ + + vptr->options.flags &= info->flags; + + /* + * Enable the chip specified capbilities + */ + + vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL); + + vptr->wol_opts = vptr->options.wol_opts; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + + vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); + + dev->irq = pdev->irq; + dev->open = velocity_open; + dev->hard_start_xmit = velocity_xmit; + dev->stop = velocity_close; + dev->get_stats = velocity_get_stats; + dev->set_multicast_list = velocity_set_multi; + dev->do_ioctl = velocity_ioctl; + dev->ethtool_ops = &velocity_ethtool_ops; + dev->change_mtu = velocity_change_mtu; +#ifdef VELOCITY_ZERO_COPY_SUPPORT + dev->features |= NETIF_F_SG; +#endif + + if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) { + dev->features |= NETIF_F_HW_CSUM; + } + + ret = register_netdev(dev); + if (ret < 0) + goto err_iounmap; + + velocity_print_info(vptr); + pci_set_drvdata(pdev, dev); + + /* and leave the chip powered down */ + + pci_set_power_state(pdev, 3); +out: + return ret; + +err_iounmap: + iounmap(regs); +err_release_res: + pci_release_regions(pdev); +err_disable: + pci_disable_device(pdev); +err_free_dev: + free_netdev(dev); + goto out; +} + +/** + * velocity_print_info - per driver data + * @vptr: velocity + * + * Print per driver data as the kernel driver finds Velocity + * hardware + */ + +static void __devinit velocity_print_info(struct velocity_info *vptr) +{ + struct net_device *dev = vptr->dev; + + printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); + printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", + dev->name, + dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], + dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); +} + +/** + * velocity_init_info - init private data + * @pdev: PCI device + * @vptr: Velocity info + * @info: Board type + * + * Set up the initial velocity_info struct for the device that has been + * discovered. + */ + +static void __devinit velocity_init_info(struct pci_dev *pdev, struct velocity_info *vptr, struct velocity_info_tbl *info) +{ + memset(vptr, 0, sizeof(struct velocity_info)); + + vptr->pdev = pdev; + vptr->chip_id = info->chip_id; + vptr->io_size = info->io_size; + vptr->num_txq = info->txqueue; + vptr->multicast_limit = MCAM_SIZE; + + spin_lock_init(&vptr->lock); + spin_lock_init(&vptr->xmit_lock); +} + +/** + * velocity_get_pci_info - retrieve PCI info for device + * @vptr: velocity device + * @pdev: PCI device it matches + * + * Retrieve the PCI configuration space data that interests us from + * the kernel PCI layer + */ + +static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev) +{ + + if(pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) + return -EIO; + + pci_set_master(pdev); + + vptr->ioaddr = pci_resource_start(pdev, 0); + vptr->memaddr = pci_resource_start(pdev, 1); + + if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) + { + printk(KERN_ERR "%s: region #0 is not an I/O resource, aborting.\n", + pci_name(pdev)); + return -EINVAL; + } + + if((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) + { + printk(KERN_ERR "%s: region #1 is an I/O resource, aborting.\n", + pci_name(pdev)); + return -EINVAL; + } + + if(pci_resource_len(pdev, 1) < 256) + { + printk(KERN_ERR "%s: region #1 is too small.\n", + pci_name(pdev)); + return -EINVAL; + } + vptr->pdev = pdev; + + return 0; +} + +/** + * velocity_init_rings - set up DMA rings + * @vptr: Velocity to set up + * + * Allocate PCI mapped DMA rings for the receive and transmit layer + * to use. + */ + +static int velocity_init_rings(struct velocity_info *vptr) +{ + int i; + unsigned int psize; + unsigned int tsize; + dma_addr_t pool_dma; + u8 *pool; + + /* + * Allocate all RD/TD rings a single pool + */ + + psize = vptr->options.numrx * sizeof(struct rx_desc) + + vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; + + /* + * pci_alloc_consistent() fulfills the requirement for 64 bytes + * alignment + */ + pool = pci_alloc_consistent(vptr->pdev, psize, &pool_dma); + + if (pool == NULL) { + printk(KERN_ERR "%s : DMA memory allocation failed.\n", + vptr->dev->name); + return -ENOMEM; + } + + memset(pool, 0, psize); + + vptr->rd_ring = (struct rx_desc *) pool; + + vptr->rd_pool_dma = pool_dma; + + tsize = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; + vptr->tx_bufs = pci_alloc_consistent(vptr->pdev, tsize, + &vptr->tx_bufs_dma); + + if (vptr->tx_bufs == NULL) { + printk(KERN_ERR "%s: DMA memory allocation failed.\n", + vptr->dev->name); + pci_free_consistent(vptr->pdev, psize, pool, pool_dma); + return -ENOMEM; + } + + memset(vptr->tx_bufs, 0, vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq); + + i = vptr->options.numrx * sizeof(struct rx_desc); + pool += i; + pool_dma += i; + for (i = 0; i < vptr->num_txq; i++) { + int offset = vptr->options.numtx * sizeof(struct tx_desc); + + vptr->td_pool_dma[i] = pool_dma; + vptr->td_rings[i] = (struct tx_desc *) pool; + pool += offset; + pool_dma += offset; + } + return 0; +} + +/** + * velocity_free_rings - free PCI ring pointers + * @vptr: Velocity to free from + * + * Clean up the PCI ring buffers allocated to this velocity. + */ + +static void velocity_free_rings(struct velocity_info *vptr) +{ + int size; + + size = vptr->options.numrx * sizeof(struct rx_desc) + + vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; + + pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); + + size = vptr->options.numtx * PKT_BUF_SZ * vptr->num_txq; + + pci_free_consistent(vptr->pdev, size, vptr->tx_bufs, vptr->tx_bufs_dma); +} + +/** + * velocity_init_rd_ring - set up receive ring + * @vptr: velocity to configure + * + * Allocate and set up the receive buffers for each ring slot and + * assign them to the network adapter. + */ + +static int velocity_init_rd_ring(struct velocity_info *vptr) +{ + int i, ret = -ENOMEM; + struct rx_desc *rd; + struct velocity_rd_info *rd_info; + unsigned int rsize = sizeof(struct velocity_rd_info) * + vptr->options.numrx; + + vptr->rd_info = kmalloc(rsize, GFP_KERNEL); + if(vptr->rd_info == NULL) + goto out; + memset(vptr->rd_info, 0, rsize); + + /* Init the RD ring entries */ + for (i = 0; i < vptr->options.numrx; i++) { + rd = &(vptr->rd_ring[i]); + rd_info = &(vptr->rd_info[i]); + + ret = velocity_alloc_rx_buf(vptr, i); + if (ret < 0) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR + "%s: failed to allocate RX buffer.\n", + vptr->dev->name); + velocity_free_rd_ring(vptr); + goto out; + } + rd->rdesc0.owner = OWNED_BY_NIC; + } + vptr->rd_used = vptr->rd_curr = 0; +out: + return ret; +} + +/** + * velocity_free_rd_ring - set up receive ring + * @vptr: velocity to clean up + * + * Free the receive buffers for each ring slot and any + * attached socket buffers that need to go away. + */ + +static void velocity_free_rd_ring(struct velocity_info *vptr) +{ + int i; + + if (vptr->rd_info == NULL) + return; + + for (i = 0; i < vptr->options.numrx; i++) { + struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); + + if (!rd_info->skb_dma) + continue; + pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, + PCI_DMA_FROMDEVICE); + rd_info->skb_dma = (dma_addr_t) NULL; + + dev_kfree_skb(rd_info->skb); + rd_info->skb = NULL; + } + + kfree(vptr->rd_info); + vptr->rd_info = NULL; +} + +/** + * velocity_init_td_ring - set up transmit ring + * @vptr: velocity + * + * Set up the transmit ring and chain the ring pointers together. + * Returns zero on success or a negative posix errno code for + * failure. + */ + +static int velocity_init_td_ring(struct velocity_info *vptr) +{ + int i, j; + dma_addr_t curr; + struct tx_desc *td; + struct velocity_td_info *td_info; + unsigned int tsize = sizeof(struct velocity_td_info) * + vptr->options.numtx; + + /* Init the TD ring entries */ + for (j = 0; j < vptr->num_txq; j++) { + curr = vptr->td_pool_dma[j]; + + vptr->td_infos[j] = kmalloc(tsize, GFP_KERNEL); + if(vptr->td_infos[j] == NULL) + { + while(--j >= 0) + kfree(vptr->td_infos[j]); + return -ENOMEM; + } + memset(vptr->td_infos[j], 0, tsize); + + for (i = 0; i < vptr->options.numtx; i++, curr += sizeof(struct tx_desc)) { + td = &(vptr->td_rings[j][i]); + td_info = &(vptr->td_infos[j][i]); + td_info->buf = vptr->tx_bufs + (i + j) * PKT_BUF_SZ; + td_info->buf_dma = vptr->tx_bufs_dma + (i + j) * PKT_BUF_SZ; + } + vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; + } + return 0; +} + +/* + * FIXME: could we merge this with velocity_free_tx_buf ? + */ + +static void velocity_free_td_ring_entry(struct velocity_info *vptr, + int q, int n) +{ + struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); + int i; + + if (td_info == NULL) + return; + + if (td_info->skb) { + for (i = 0; i < td_info->nskb_dma; i++) + { + if (td_info->skb_dma[i]) { + pci_unmap_single(vptr->pdev, td_info->skb_dma[i], + td_info->skb->len, PCI_DMA_TODEVICE); + td_info->skb_dma[i] = (dma_addr_t) NULL; + } + } + dev_kfree_skb(td_info->skb); + td_info->skb = NULL; + } +} + +/** + * velocity_free_td_ring - free td ring + * @vptr: velocity + * + * Free up the transmit ring for this particular velocity adapter. + * We free the ring contents but not the ring itself. + */ + +static void velocity_free_td_ring(struct velocity_info *vptr) +{ + int i, j; + + for (j = 0; j < vptr->num_txq; j++) { + if (vptr->td_infos[j] == NULL) + continue; + for (i = 0; i < vptr->options.numtx; i++) { + velocity_free_td_ring_entry(vptr, j, i); + + } + if (vptr->td_infos[j]) { + kfree(vptr->td_infos[j]); + vptr->td_infos[j] = NULL; + } + } +} + +/** + * velocity_rx_srv - service RX interrupt + * @vptr: velocity + * @status: adapter status (unused) + * + * Walk the receive ring of the velocity adapter and remove + * any received packets from the receive queue. Hand the ring + * slots back to the adapter for reuse. + */ + +static int velocity_rx_srv(struct velocity_info *vptr, int status) +{ + struct rx_desc *rd; + struct net_device_stats *stats = &vptr->stats; + struct mac_regs * regs = vptr->mac_regs; + int rd_curr = vptr->rd_curr; + int works = 0; + + while (1) { + + rd = &(vptr->rd_ring[rd_curr]); + + if ((vptr->rd_info[rd_curr]).skb == NULL) { + if (velocity_alloc_rx_buf(vptr, rd_curr) < 0) + break; + } + + if (works++ > 15) + break; + + if (rd->rdesc0.owner == OWNED_BY_NIC) + break; + + /* + * Don't drop CE or RL error frame although RXOK is off + * FIXME: need to handle copybreak + */ + if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { + if (velocity_receive_frame(vptr, rd_curr) == 0) { + if (velocity_alloc_rx_buf(vptr, rd_curr) < 0) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR "%s: can not allocate rx buf\n", vptr->dev->name); + break; + } + } else { + stats->rx_dropped++; + } + } else { + if (rd->rdesc0.RSR & RSR_CRC) + stats->rx_crc_errors++; + if (rd->rdesc0.RSR & RSR_FAE) + stats->rx_frame_errors++; + + stats->rx_dropped++; + } + + rd->inten = 1; + + if (++vptr->rd_used >= 4) { + int i, rd_prev = rd_curr; + for (i = 0; i < 4; i++) { + if (--rd_prev < 0) + rd_prev = vptr->options.numrx - 1; + + rd = &(vptr->rd_ring[rd_prev]); + rd->rdesc0.owner = OWNED_BY_NIC; + } + writew(4, &(regs->RBRDU)); + vptr->rd_used -= 4; + } + + vptr->dev->last_rx = jiffies; + + rd_curr++; + if (rd_curr >= vptr->options.numrx) + rd_curr = 0; + } + vptr->rd_curr = rd_curr; + VAR_USED(stats); + return works; +} + +/** + * velocity_rx_csum - checksum process + * @rd: receive packet descriptor + * @skb: network layer packet buffer + * + * Process the status bits for the received packet and determine + * if the checksum was computed and verified by the hardware + */ + +static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb) +{ + skb->ip_summed = CHECKSUM_NONE; + + if (rd->rdesc1.CSM & CSM_IPKT) { + if (rd->rdesc1.CSM & CSM_IPOK) { + if ((rd->rdesc1.CSM & CSM_TCPKT) || + (rd->rdesc1.CSM & CSM_UDPKT)) { + if (!(rd->rdesc1.CSM & CSM_TUPOK)) { + return; + } + } + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + } +} + +/** + * velocity_receive_frame - received packet processor + * @vptr: velocity we are handling + * @idx: ring index + * + * A packet has arrived. We process the packet and if appropriate + * pass the frame up the network stack + */ + +static int velocity_receive_frame(struct velocity_info *vptr, int idx) +{ + struct net_device_stats *stats = &vptr->stats; + struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); + struct rx_desc *rd = &(vptr->rd_ring[idx]); + struct sk_buff *skb; + + if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { + VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); + stats->rx_length_errors++; + return -EINVAL; + } + + if (rd->rdesc0.RSR & RSR_MAR) + vptr->stats.multicast++; + + skb = rd_info->skb; + skb->dev = vptr->dev; + + pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, + PCI_DMA_FROMDEVICE); + rd_info->skb_dma = (dma_addr_t) NULL; + rd_info->skb = NULL; + + /* FIXME - memmove ? */ + if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) { + int i; + for (i = rd->rdesc0.len + 4; i >= 0; i--) + *(skb->data + i + 2) = *(skb->data + i); + skb->data += 2; + skb->tail += 2; + } + + skb_put(skb, (rd->rdesc0.len - 4)); + skb->protocol = eth_type_trans(skb, skb->dev); + + /* + * Drop frame not meeting IEEE 802.3 + */ + + if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) { + if (rd->rdesc0.RSR & RSR_RL) { + stats->rx_length_errors++; + return -EINVAL; + } + } + + velocity_rx_csum(rd, skb); + + /* + * FIXME: need rx_copybreak handling + */ + + stats->rx_bytes += skb->len; + netif_rx(skb); + + return 0; +} + +/** + * velocity_alloc_rx_buf - allocate aligned receive buffer + * @vptr: velocity + * @idx: ring index + * + * Allocate a new full sized buffer for the reception of a frame and + * map it into PCI space for the hardware to use. The hardware + * requires *64* byte alignment of the buffer which makes life + * less fun than would be ideal. + */ + +static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) +{ + struct rx_desc *rd = &(vptr->rd_ring[idx]); + struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); + + rd_info->skb = dev_alloc_skb(vptr->rx_buf_sz + 64); + if (rd_info->skb == NULL) + return -ENOMEM; + + /* + * Do the gymnastics to get the buffer head for data at + * 64byte alignment. + */ + skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->tail & 63); + rd_info->skb->dev = vptr->dev; + rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->tail, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); + + /* + * Fill in the descriptor to match + */ + + *((u32 *) & (rd->rdesc0)) = 0; + rd->len = cpu_to_le32(vptr->rx_buf_sz); + rd->inten = 1; + rd->pa_low = cpu_to_le32(rd_info->skb_dma); + rd->pa_high = 0; + return 0; +} + +/** + * tx_srv - transmit interrupt service + * @vptr; Velocity + * @status: + * + * Scan the queues looking for transmitted packets that + * we can complete and clean up. Update any statistics as + * neccessary/ + */ + +static int velocity_tx_srv(struct velocity_info *vptr, u32 status) +{ + struct tx_desc *td; + int qnum; + int full = 0; + int idx; + int works = 0; + struct velocity_td_info *tdinfo; + struct net_device_stats *stats = &vptr->stats; + + for (qnum = 0; qnum < vptr->num_txq; qnum++) { + for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; + idx = (idx + 1) % vptr->options.numtx) { + + /* + * Get Tx Descriptor + */ + td = &(vptr->td_rings[qnum][idx]); + tdinfo = &(vptr->td_infos[qnum][idx]); + + if (td->tdesc0.owner == OWNED_BY_NIC) + break; + + if ((works++ > 15)) + break; + + if (td->tdesc0.TSR & TSR0_TERR) { + stats->tx_errors++; + stats->tx_dropped++; + if (td->tdesc0.TSR & TSR0_CDH) + stats->tx_heartbeat_errors++; + if (td->tdesc0.TSR & TSR0_CRS) + stats->tx_carrier_errors++; + if (td->tdesc0.TSR & TSR0_ABT) + stats->tx_aborted_errors++; + if (td->tdesc0.TSR & TSR0_OWC) + stats->tx_window_errors++; + } else { + stats->tx_packets++; + stats->tx_bytes += tdinfo->skb->len; + } + velocity_free_tx_buf(vptr, tdinfo); + vptr->td_used[qnum]--; + } + vptr->td_tail[qnum] = idx; + + if (AVAIL_TD(vptr, qnum) < 1) { + full = 1; + } + } + /* + * Look to see if we should kick the transmit network + * layer for more work. + */ + if (netif_queue_stopped(vptr->dev) && (full == 0) + && (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { + netif_wake_queue(vptr->dev); + } + return works; +} + +/** + * velocity_print_link_status - link status reporting + * @vptr: velocity to report on + * + * Turn the link status of the velocity card into a kernel log + * description of the new link state, detailing speed and duplex + * status + */ + +static void velocity_print_link_status(struct velocity_info *vptr) +{ + + if (vptr->mii_status & VELOCITY_LINK_FAIL) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name); + } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link autonegation", vptr->dev->name); + + if (vptr->mii_status & VELOCITY_SPEED_1000) + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); + else if (vptr->mii_status & VELOCITY_SPEED_100) + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps"); + else + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps"); + + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n"); + else + VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); + } else { + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); + switch (vptr->options.spd_dpx) { + case SPD_DPX_100_HALF: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n"); + break; + case SPD_DPX_100_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n"); + break; + case SPD_DPX_10_HALF: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n"); + break; + case SPD_DPX_10_FULL: + VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n"); + break; + default: + break; + } + } +} + +/** + * velocity_error - handle error from controller + * @vptr: velocity + * @status: card status + * + * Process an error report from the hardware and attempt to recover + * the card itself. At the moment we cannot recover from some + * theoretically impossible errors but this could be fixed using + * the pci_device_failed logic to bounce the hardware + * + */ + +static void velocity_error(struct velocity_info *vptr, int status) +{ + + if (status & ISR_TXSTLI) { + struct mac_regs * regs = vptr->mac_regs; + + printk(KERN_ERR "TD structure errror TDindex=%hx\n", readw(®s->TDIdx[0])); + BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); + writew(TRDCSR_RUN, ®s->TDCSRClr); + netif_stop_queue(vptr->dev); + + /* FIXME: port over the pci_device_failed code and use it + here */ + } + + if (status & ISR_SRCI) { + struct mac_regs * regs = vptr->mac_regs; + int linked; + + if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + vptr->mii_status = check_connection_type(regs); + + /* + * If it is a 3119, disable frame bursting in + * halfduplex mode and enable it in fullduplex + * mode + */ + if (vptr->rev_id < REV_ID_VT3216_A0) { + if (vptr->mii_status | VELOCITY_DUPLEX_FULL) + BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); + else + BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); + } + /* + * Only enable CD heart beat counter in 10HD mode + */ + if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10)) { + BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); + } else { + BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); + } + } + /* + * Get link status from PHYSR0 + */ + linked = readb(®s->PHYSR0) & PHYSR0_LINKGD; + + if (linked) { + vptr->mii_status &= ~VELOCITY_LINK_FAIL; + } else { + vptr->mii_status |= VELOCITY_LINK_FAIL; + } + + velocity_print_link_status(vptr); + enable_flow_control_ability(vptr); + + /* + * Re-enable auto-polling because SRCI will disable + * auto-polling + */ + + enable_mii_autopoll(regs); + + if (vptr->mii_status & VELOCITY_LINK_FAIL) + netif_stop_queue(vptr->dev); + else + netif_wake_queue(vptr->dev); + + }; + if (status & ISR_MIBFI) + velocity_update_hw_mibs(vptr); + if (status & ISR_LSTEI) + mac_rx_queue_wake(vptr->mac_regs); +} + +/** + * velocity_free_tx_buf - free transmit buffer + * @vptr: velocity + * @tdinfo: buffer + * + * Release an transmit buffer. If the buffer was preallocated then + * recycle it, if not then unmap the buffer. + */ + +static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo) +{ + struct sk_buff *skb = tdinfo->skb; + int i; + + /* + * Don't unmap the pre-allocated tx_bufs + */ + if (tdinfo->skb_dma && (tdinfo->skb_dma[0] != tdinfo->buf_dma)) { + + for (i = 0; i < tdinfo->nskb_dma; i++) { +#ifdef VELOCITY_ZERO_COPY_SUPPORT + pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE); +#else + pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE); +#endif + tdinfo->skb_dma[i] = 0; + } + } + dev_kfree_skb_irq(skb); + tdinfo->skb = NULL; +} + +/** + * velocity_open - interface activation callback + * @dev: network layer device to open + * + * Called when the network layer brings the interface up. Returns + * a negative posix error code on failure, or zero on success. + * + * All the ring allocation and set up is done on open for this + * adapter to minimise memory usage when inactive + */ + +static int velocity_open(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + int ret; + + vptr->rx_buf_sz = (dev->mtu <= 1504 ? PKT_BUF_SZ : dev->mtu + 32); + + ret = velocity_init_rings(vptr); + if (ret < 0) + goto out; + + ret = velocity_init_rd_ring(vptr); + if (ret < 0) + goto err_free_desc_rings; + + ret = velocity_init_td_ring(vptr); + if (ret < 0) + goto err_free_rd_ring; + + /* Ensure chip is running */ + pci_set_power_state(vptr->pdev, 0); + + velocity_init_registers(vptr, VELOCITY_INIT_COLD); + + ret = request_irq(vptr->pdev->irq, &velocity_intr, SA_SHIRQ, + dev->name, dev); + if (ret < 0) { + /* Power down the chip */ + pci_set_power_state(vptr->pdev, 3); + goto err_free_td_ring; + } + + mac_enable_int(vptr->mac_regs); + netif_start_queue(dev); + vptr->flags |= VELOCITY_FLAGS_OPENED; +out: + return ret; + +err_free_td_ring: + velocity_free_td_ring(vptr); +err_free_rd_ring: + velocity_free_rd_ring(vptr); +err_free_desc_rings: + velocity_free_rings(vptr); + goto out; +} + +/** + * velocity_change_mtu - MTU change callback + * @dev: network device + * @new_mtu: desired MTU + * + * Handle requests from the networking layer for MTU change on + * this interface. It gets called on a change by the network layer. + * Return zero for success or negative posix error code. + */ + +static int velocity_change_mtu(struct net_device *dev, int new_mtu) +{ + struct velocity_info *vptr = dev->priv; + unsigned long flags; + int oldmtu = dev->mtu; + int ret = 0; + + if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { + VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", + vptr->dev->name); + return -EINVAL; + } + + if (new_mtu != oldmtu) { + spin_lock_irqsave(&vptr->lock, flags); + + netif_stop_queue(dev); + velocity_shutdown(vptr); + + velocity_free_td_ring(vptr); + velocity_free_rd_ring(vptr); + + dev->mtu = new_mtu; + if (new_mtu > 8192) + vptr->rx_buf_sz = 9 * 1024; + else if (new_mtu > 4096) + vptr->rx_buf_sz = 8192; + else + vptr->rx_buf_sz = 4 * 1024; + + ret = velocity_init_rd_ring(vptr); + if (ret < 0) + goto out_unlock; + + ret = velocity_init_td_ring(vptr); + if (ret < 0) + goto out_unlock; + + velocity_init_registers(vptr, VELOCITY_INIT_COLD); + + mac_enable_int(vptr->mac_regs); + netif_start_queue(dev); +out_unlock: + spin_unlock_irqrestore(&vptr->lock, flags); + } + + return ret; +} + +/** + * velocity_shutdown - shut down the chip + * @vptr: velocity to deactivate + * + * Shuts down the internal operations of the velocity and + * disables interrupts, autopolling, transmit and receive + */ + +static void velocity_shutdown(struct velocity_info *vptr) +{ + struct mac_regs * regs = vptr->mac_regs; + mac_disable_int(regs); + writel(CR0_STOP, ®s->CR0Set); + writew(0xFFFF, ®s->TDCSRClr); + writeb(0xFF, ®s->RDCSRClr); + safe_disable_mii_autopoll(regs); + mac_clear_isr(regs); +} + +/** + * velocity_close - close adapter callback + * @dev: network device + * + * Callback from the network layer when the velocity is being + * deactivated by the network layer + */ + +static int velocity_close(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + + netif_stop_queue(dev); + velocity_shutdown(vptr); + + if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) + velocity_get_ip(vptr); + if (dev->irq != 0) + free_irq(dev->irq, dev); + + /* Power down the chip */ + pci_set_power_state(vptr->pdev, 3); + + /* Free the resources */ + velocity_free_td_ring(vptr); + velocity_free_rd_ring(vptr); + velocity_free_rings(vptr); + + vptr->flags &= (~VELOCITY_FLAGS_OPENED); + return 0; +} + +/** + * velocity_xmit - transmit packet callback + * @skb: buffer to transmit + * @dev: network device + * + * Called by the networ layer to request a packet is queued to + * the velocity. Returns zero on success. + */ + +static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + int qnum = 0; + struct tx_desc *td_ptr; + struct velocity_td_info *tdinfo; + unsigned long flags; + int index; + + int pktlen = skb->len; + + spin_lock_irqsave(&vptr->lock, flags); + + index = vptr->td_curr[qnum]; + td_ptr = &(vptr->td_rings[qnum][index]); + tdinfo = &(vptr->td_infos[qnum][index]); + + td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; + td_ptr->tdesc1.TCR = TCR0_TIC; + td_ptr->td_buf[0].queue = 0; + + /* + * Pad short frames. + */ + if (pktlen < ETH_ZLEN) { + /* Cannot occur until ZC support */ + if(skb_linearize(skb, GFP_ATOMIC)) + return 0; + pktlen = ETH_ZLEN; + memcpy(tdinfo->buf, skb->data, skb->len); + memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len); + tdinfo->skb = skb; + tdinfo->skb_dma[0] = tdinfo->buf_dma; + td_ptr->tdesc0.pktsize = pktlen; + td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); + td_ptr->td_buf[0].pa_high = 0; + td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; + tdinfo->nskb_dma = 1; + td_ptr->tdesc1.CMDZ = 2; + } else +#ifdef VELOCITY_ZERO_COPY_SUPPORT + if (skb_shinfo(skb)->nr_frags > 0) { + int nfrags = skb_shinfo(skb)->nr_frags; + tdinfo->skb = skb; + if (nfrags > 6) { + skb_linearize(skb, GFP_ATOMIC); + memcpy(tdinfo->buf, skb->data, skb->len); + tdinfo->skb_dma[0] = tdinfo->buf_dma; + td_ptr->tdesc0.pktsize = + td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); + td_ptr->td_buf[0].pa_high = 0; + td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; + tdinfo->nskb_dma = 1; + td_ptr->tdesc1.CMDZ = 2; + } else { + int i = 0; + tdinfo->nskb_dma = 0; + tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE); + + td_ptr->tdesc0.pktsize = pktlen; + + /* FIXME: support 48bit DMA later */ + td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); + td_ptr->td_buf[i].pa_high = 0; + td_ptr->td_buf[i].bufsize = skb->len->skb->data_len; + + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + void *addr = ((void *) page_address(frag->page + frag->page_offset)); + + tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); + + td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); + td_ptr->td_buf[i + 1].pa_high = 0; + td_ptr->td_buf[i + 1].bufsize = frag->size; + } + tdinfo->nskb_dma = i - 1; + td_ptr->tdesc1.CMDZ = i; + } + + } else +#endif + { + /* + * Map the linear network buffer into PCI space and + * add it to the transmit ring. + */ + tdinfo->skb = skb; + tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); + td_ptr->tdesc0.pktsize = pktlen; + td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); + td_ptr->td_buf[0].pa_high = 0; + td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; + tdinfo->nskb_dma = 1; + td_ptr->tdesc1.CMDZ = 2; + } + + if (vptr->flags & VELOCITY_FLAGS_TAGGING) { + td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff); + td_ptr->tdesc1.pqinf.priority = 0; + td_ptr->tdesc1.pqinf.CFI = 0; + td_ptr->tdesc1.TCR |= TCR0_VETAG; + } + + /* + * Handle hardware checksum + */ + if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) + && (skb->ip_summed == CHECKSUM_HW)) { + struct iphdr *ip = skb->nh.iph; + if (ip->protocol == IPPROTO_TCP) + td_ptr->tdesc1.TCR |= TCR0_TCPCK; + else if (ip->protocol == IPPROTO_UDP) + td_ptr->tdesc1.TCR |= (TCR0_UDPCK); + td_ptr->tdesc1.TCR |= TCR0_IPCK; + } + { + + int prev = index - 1; + + if (prev < 0) + prev = vptr->options.numtx - 1; + td_ptr->tdesc0.owner = OWNED_BY_NIC; + vptr->td_used[qnum]++; + vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; + + if (AVAIL_TD(vptr, qnum) < 1) + netif_stop_queue(dev); + + td_ptr = &(vptr->td_rings[qnum][prev]); + td_ptr->td_buf[0].queue = 1; + mac_tx_queue_wake(vptr->mac_regs, qnum); + } + dev->trans_start = jiffies; + spin_unlock_irqrestore(&vptr->lock, flags); + return 0; +} + +/** + * velocity_intr - interrupt callback + * @irq: interrupt number + * @dev_instance: interrupting device + * @pt_regs: CPU register state at interrupt + * + * Called whenever an interrupt is generated by the velocity + * adapter IRQ line. We may not be the source of the interrupt + * and need to identify initially if we are, and if not exit as + * efficiently as possible. + */ + +static int velocity_intr(int irq, void *dev_instance, struct pt_regs *regs) +{ + struct net_device *dev = dev_instance; + struct velocity_info *vptr = dev->priv; + u32 isr_status; + int max_count = 0; + + + spin_lock(&vptr->lock); + isr_status = mac_read_isr(vptr->mac_regs); + + /* Not us ? */ + if (isr_status == 0) { + spin_unlock(&vptr->lock); + return IRQ_NONE; + } + + mac_disable_int(vptr->mac_regs); + + /* + * Keep processing the ISR until we have completed + * processing and the isr_status becomes zero + */ + + while (isr_status != 0) { + mac_write_isr(vptr->mac_regs, isr_status); + if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI))) + velocity_error(vptr, isr_status); + if (isr_status & (ISR_PRXI | ISR_PPRXI)) + max_count += velocity_rx_srv(vptr, isr_status); + if (isr_status & (ISR_PTXI | ISR_PPTXI)) + max_count += velocity_tx_srv(vptr, isr_status); + isr_status = mac_read_isr(vptr->mac_regs); + if (max_count > vptr->options.int_works) + { + printk(KERN_WARNING "%s: excessive work at interrupt.\n", + dev->name); + max_count = 0; + } + } + spin_unlock(&vptr->lock); + mac_enable_int(vptr->mac_regs); + return IRQ_HANDLED; + +} + + +/** + * ether_crc - ethernet CRC function + * + * Compute an ethernet CRC hash of the data block provided. This + * is not performance optimised but is not needed in performance + * critical code paths. + * + * FIXME: could we use shared code here ? + */ + +static inline u32 ether_crc(int length, unsigned char *data) +{ + static unsigned const ethernet_polynomial = 0x04c11db7U; + + int crc = -1; + + while (--length >= 0) { + unsigned char current_octet = *data++; + int bit; + for (bit = 0; bit < 8; bit++, current_octet >>= 1) { + crc = (crc << 1) ^ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0); + } + } + return crc; +} + +/** + * velocity_set_multi - filter list change callback + * @dev: network device + * + * Called by the network layer when the filter lists need to change + * for a velocity adapter. Reload the CAMs with the new address + * filter ruleset. + */ + +static void velocity_set_multi(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + struct mac_regs * regs = vptr->mac_regs; + u8 rx_mode; + int i; + struct dev_mc_list *mclist; + + if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ + /* Unconditionally log net taps. */ + printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name); + writel(0xffffffff, ®s->MARCAM[0]); + writel(0xffffffff, ®s->MARCAM[4]); + rx_mode = (RCR_AM | RCR_AB | RCR_PROM); + } else if ((dev->mc_count > vptr->multicast_limit) + || (dev->flags & IFF_ALLMULTI)) { + writel(0xffffffff, ®s->MARCAM[0]); + writel(0xffffffff, ®s->MARCAM[4]); + rx_mode = (RCR_AM | RCR_AB); + } else { + int offset = MCAM_SIZE - vptr->multicast_limit; + mac_get_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + + for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) { + mac_set_cam(regs, i + offset, mclist->dmi_addr, VELOCITY_MULTICAST_CAM); + vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); + } + + mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); + rx_mode = (RCR_AM | RCR_AB); + } + if (dev->mtu > 1500) + rx_mode |= RCR_AL; + + BYTE_REG_BITS_ON(rx_mode, ®s->RCR); + +} + +/** + * velocity_get_status - statistics callback + * @dev: network device + * + * Callback from the network layer to allow driver statistics + * to be resynchronized with hardware collected state. In the + * case of the velocity we need to pull the MIB counters from + * the hardware into the counters before letting the network + * layer display them. + */ + +static struct net_device_stats *velocity_get_stats(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + + /* If the hardware is down, don't touch MII */ + if(!netif_running(dev)) + return &vptr->stats; + + spin_lock_irq(&vptr->lock); + velocity_update_hw_mibs(vptr); + spin_unlock_irq(&vptr->lock); + + vptr->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts]; + vptr->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts]; + vptr->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors]; + +// unsigned long rx_dropped; /* no space in linux buffers */ + vptr->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions]; + /* detailed rx_errors: */ +// unsigned long rx_length_errors; +// unsigned long rx_over_errors; /* receiver ring buff overflow */ + vptr->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE]; +// unsigned long rx_frame_errors; /* recv'd frame alignment error */ +// unsigned long rx_fifo_errors; /* recv'r fifo overrun */ +// unsigned long rx_missed_errors; /* receiver missed packet */ + + /* detailed tx_errors */ +// unsigned long tx_fifo_errors; + + return &vptr->stats; +} + + +/** + * velocity_ioctl - ioctl entry point + * @dev: network device + * @rq: interface request ioctl + * @cmd: command code + * + * Called when the user issues an ioctl request to the network + * device in question. The velocity interface supports MII. + */ + +static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct velocity_info *vptr = dev->priv; + int ret; + + /* If we are asked for information and the device is power + saving then we need to bring the device back up to talk to it */ + + if(!netif_running(dev)) + pci_set_power_state(vptr->pdev, 0); + + switch (cmd) { + case SIOCGMIIPHY: /* Get address of MII PHY in use. */ + case SIOCGMIIREG: /* Read MII PHY register. */ + case SIOCSMIIREG: /* Write to MII PHY register. */ + ret = velocity_mii_ioctl(dev, rq, cmd); + break; + + default: + ret = -EOPNOTSUPP; + } + if(!netif_running(dev)) + pci_set_power_state(vptr->pdev, 3); + + + return ret; +} + +/* + * Definition for our device driver. The PCI layer interface + * uses this to handle all our card discover and plugging + */ + +static struct pci_driver velocity_driver = { + name:VELOCITY_NAME, + id_table:velocity_id_table, + probe:velocity_found1, + remove:velocity_remove1, +#ifdef CONFIG_PM + suspend:velocity_suspend, + resume:velocity_resume, +#endif +}; + +/** + * velocity_init_module - load time function + * + * Called when the velocity module is loaded. The PCI driver + * is registered with the PCI layer, and in turn will call + * the probe functions for each velocity adapter installed + * in the system. + */ + +static int __init velocity_init_module(void) +{ + int ret; + ret = pci_module_init(&velocity_driver); + +#ifdef CONFIG_PM + register_inetaddr_notifier(&velocity_inetaddr_notifier); +#endif + return ret; +} + +/** + * velocity_cleanup - module unload + * + * When the velocity hardware is unloaded this function is called. + * It will clean up the notifiers and the unregister the PCI + * driver interface for this hardware. This in turn cleans up + * all discovered interfaces before returning from the function + */ + +static void __exit velocity_cleanup_module(void) +{ +#ifdef CONFIG_PM + unregister_inetaddr_notifier(&velocity_inetaddr_notifier); +#endif + pci_unregister_driver(&velocity_driver); +} + +module_init(velocity_init_module); +module_exit(velocity_cleanup_module); + + +/* + * MII access , media link mode setting functions + */ + + +/** + * mii_init - set up MII + * @vptr: velocity adapter + * @mii_status: links tatus + * + * Set up the PHY for the current link state. + */ + +static void mii_init(struct velocity_info *vptr, u32 mii_status) +{ + u16 BMCR; + + switch (PHYID_GET_PHY_ID(vptr->phy_id)) { + case PHYID_CICADA_CS8201: + /* + * Reset to hardware default + */ + MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); + /* + * Turn on ECHODIS bit in NWay-forced full mode and turn it + * off it in NWay-forced half mode for NWay-forced v.s. + * legacy-forced issue. + */ + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + /* + * Turn on Link/Activity LED enable bit for CIS8201 + */ + MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs); + break; + case PHYID_VT3216_32BIT: + case PHYID_VT3216_64BIT: + /* + * Reset to hardware default + */ + MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); + /* + * Turn on ECHODIS bit in NWay-forced full mode and turn it + * off it in NWay-forced half mode for NWay-forced v.s. + * legacy-forced issue + */ + if (vptr->mii_status & VELOCITY_DUPLEX_FULL) + MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + else + MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); + break; + + case PHYID_MARVELL_1000: + case PHYID_MARVELL_1000S: + /* + * Assert CRS on Transmit + */ + MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); + /* + * Reset to hardware default + */ + MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); + break; + default: + ; + } + velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR); + if (BMCR & BMCR_ISO) { + BMCR &= ~BMCR_ISO; + velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR); + } +} + +/** + * safe_disable_mii_autopoll - autopoll off + * @regs: velocity registers + * + * Turn off the autopoll and wait for it to disable on the chip + */ + +static void safe_disable_mii_autopoll(struct mac_regs * regs) +{ + u16 ww; + + /* turn off MAUTO */ + writeb(0, ®s->MIICR); + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + udelay(1); + if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } +} + +/** + * enable_mii_autopoll - turn on autopolling + * @regs: velocity registers + * + * Enable the MII link status autopoll feature on the Velocity + * hardware. Wait for it to enable. + */ + +static void enable_mii_autopoll(struct mac_regs * regs) +{ + int ii; + + writeb(0, &(regs->MIICR)); + writeb(MIIADR_SWMPL, ®s->MIIADR); + + for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { + udelay(1); + if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } + + writeb(MIICR_MAUTO, ®s->MIICR); + + for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { + udelay(1); + if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) + break; + } + +} + +/** + * velocity_mii_read - read MII data + * @regs: velocity registers + * @index: MII register index + * @data: buffer for received data + * + * Perform a single read of an MII 16bit register. Returns zero + * on success or -ETIMEDOUT if the PHY did not respond. + */ + +static int velocity_mii_read(struct mac_regs * regs, u8 index, u16 *data) +{ + u16 ww; + + /* + * Disable MIICR_MAUTO, so that mii addr can be set normally + */ + safe_disable_mii_autopoll(regs); + + writeb(index, ®s->MIIADR); + + BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR); + + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + if (!(readb(®s->MIICR) & MIICR_RCMD)) + break; + } + + *data = readw(®s->MIIDATA); + + enable_mii_autopoll(regs); + if (ww == W_MAX_TIMEOUT) + return -ETIMEDOUT; + return 0; +} + +/** + * velocity_mii_write - write MII data + * @regs: velocity registers + * @index: MII register index + * @data: 16bit data for the MII register + * + * Perform a single write to an MII 16bit register. Returns zero + * on success or -ETIMEDOUT if the PHY did not respond. + */ + +static int velocity_mii_write(struct mac_regs * regs, u8 mii_addr, u16 data) +{ + u16 ww; + + /* + * Disable MIICR_MAUTO, so that mii addr can be set normally + */ + safe_disable_mii_autopoll(regs); + + /* MII reg offset */ + writeb(mii_addr, ®s->MIIADR); + /* set MII data */ + writew(data, ®s->MIIDATA); + + /* turn on MIICR_WCMD */ + BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR); + + /* W_MAX_TIMEOUT is the timeout period */ + for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { + udelay(5); + if (!(readb(®s->MIICR) & MIICR_WCMD)) + break; + } + enable_mii_autopoll(regs); + + if (ww == W_MAX_TIMEOUT) + return -ETIMEDOUT; + return 0; +} + +/** + * velocity_get_opt_media_mode - get media selection + * @vptr: velocity adapter + * + * Get the media mode stored in EEPROM or module options and load + * mii_status accordingly. The requested link state information + * is also returned. + */ + +static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) +{ + u32 status = 0; + + switch (vptr->options.spd_dpx) { + case SPD_DPX_AUTO: + status = VELOCITY_AUTONEG_ENABLE; + break; + case SPD_DPX_100_FULL: + status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL; + break; + case SPD_DPX_10_FULL: + status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL; + break; + case SPD_DPX_100_HALF: + status = VELOCITY_SPEED_100; + break; + case SPD_DPX_10_HALF: + status = VELOCITY_SPEED_10; + break; + } + vptr->mii_status = status; + return status; +} + +/** + * mii_set_auto_on - autonegotiate on + * @vptr: velocity + * + * Enable autonegotation on this interface + */ + +static void mii_set_auto_on(struct velocity_info *vptr) +{ + if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) + MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); + else + MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); +} + + +/* +static void mii_set_auto_off(struct velocity_info * vptr) +{ + MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); +} +*/ + +/** + * set_mii_flow_control - flow control setup + * @vptr: velocity interface + * + * Set up the flow control on this interface according to + * the supplied user/eeprom options. + */ + +static void set_mii_flow_control(struct velocity_info *vptr) +{ + /*Enable or Disable PAUSE in ANAR */ + switch (vptr->options.flow_cntl) { + case FLOW_CNTL_TX: + MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + break; + + case FLOW_CNTL_RX: + MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + break; + + case FLOW_CNTL_TX_RX: + MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + break; + + case FLOW_CNTL_DISABLE: + MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); + break; + default: + break; + } +} + +/** + * velocity_set_media_mode - set media mode + * @mii_status: old MII link state + * + * Check the media link state and configure the flow control + * PHY and also velocity hardware setup accordingly. In particular + * we need to set up CD polling and frame bursting. + */ + +static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) +{ + u32 curr_status; + struct mac_regs * regs = vptr->mac_regs; + + vptr->mii_status = mii_check_media_mode(vptr->mac_regs); + curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); + + /* Set mii link status */ + set_mii_flow_control(vptr); + + /* + Check if new status is consisent with current status + if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) + || (mii_status==curr_status)) { + vptr->mii_status=mii_check_media_mode(vptr->mac_regs); + vptr->mii_status=check_connection_type(vptr->mac_regs); + VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n"); + return 0; + } + */ + + if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) { + MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); + } + + /* + * If connection type is AUTO + */ + if (mii_status & VELOCITY_AUTONEG_ENABLE) { + VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n"); + /* clear force MAC mode bit */ + BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); + /* set duplex mode of MAC according to duplex mode of MII */ + MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs); + MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); + MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); + + /* enable AUTO-NEGO mode */ + mii_set_auto_on(vptr); + } else { + u16 ANAR; + u8 CHIPGCR; + + /* + * 1. if it's 3119, disable frame bursting in halfduplex mode + * and enable it in fullduplex mode + * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR + * 3. only enable CD heart beat counter in 10HD mode + */ + + /* set force MAC mode bit */ + BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); + + CHIPGCR = readb(®s->CHIPGCR); + CHIPGCR &= ~CHIPGCR_FCGMII; + + if (mii_status & VELOCITY_DUPLEX_FULL) { + CHIPGCR |= CHIPGCR_FCFDX; + writeb(CHIPGCR, ®s->CHIPGCR); + VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n"); + if (vptr->rev_id < REV_ID_VT3216_A0) + BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); + } else { + CHIPGCR &= ~CHIPGCR_FCFDX; + VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n"); + writeb(CHIPGCR, ®s->CHIPGCR); + if (vptr->rev_id < REV_ID_VT3216_A0) + BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); + } + + MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); + + if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) { + BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); + } else { + BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); + } + /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */ + velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); + ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)); + if (mii_status & VELOCITY_SPEED_100) { + if (mii_status & VELOCITY_DUPLEX_FULL) + ANAR |= ANAR_TXFD; + else + ANAR |= ANAR_TX; + } else { + if (mii_status & VELOCITY_DUPLEX_FULL) + ANAR |= ANAR_10FD; + else + ANAR |= ANAR_10; + } + velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR); + /* enable AUTO-NEGO mode */ + mii_set_auto_on(vptr); + /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */ + } + /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ + /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ + return VELOCITY_LINK_CHANGE; +} + +/** + * mii_check_media_mode - check media state + * @regs: velocity registers + * + * Check the current MII status and determine the link status + * accordingly + */ + +static u32 mii_check_media_mode(struct mac_regs * regs) +{ + u32 status = 0; + u16 ANAR; + + if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs)) + status |= VELOCITY_LINK_FAIL; + + if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs)) + status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; + else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs)) + status |= (VELOCITY_SPEED_1000); + else { + velocity_mii_read(regs, MII_REG_ANAR, &ANAR); + if (ANAR & ANAR_TXFD) + status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); + else if (ANAR & ANAR_TX) + status |= VELOCITY_SPEED_100; + else if (ANAR & ANAR_10FD) + status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); + else + status |= (VELOCITY_SPEED_10); + } + + if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { + velocity_mii_read(regs, MII_REG_ANAR, &ANAR); + if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) + == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { + if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) + status |= VELOCITY_AUTONEG_ENABLE; + } + } + + return status; +} + +static u32 check_connection_type(struct mac_regs * regs) +{ + u32 status = 0; + u8 PHYSR0; + u16 ANAR; + PHYSR0 = readb(®s->PHYSR0); + + /* + if (!(PHYSR0 & PHYSR0_LINKGD)) + status|=VELOCITY_LINK_FAIL; + */ + + if (PHYSR0 & PHYSR0_FDPX) + status |= VELOCITY_DUPLEX_FULL; + + if (PHYSR0 & PHYSR0_SPDG) + status |= VELOCITY_SPEED_1000; + if (PHYSR0 & PHYSR0_SPD10) + status |= VELOCITY_SPEED_10; + else + status |= VELOCITY_SPEED_100; + + if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { + velocity_mii_read(regs, MII_REG_ANAR, &ANAR); + if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) + == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { + if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) + status |= VELOCITY_AUTONEG_ENABLE; + } + } + + return status; +} + +/** + * enable_flow_control_ability - flow control + * @vptr: veloity to configure + * + * Set up flow control according to the flow control options + * determined by the eeprom/configuration. + */ + +static void enable_flow_control_ability(struct velocity_info *vptr) +{ + + struct mac_regs * regs = vptr->mac_regs; + + switch (vptr->options.flow_cntl) { + + case FLOW_CNTL_DEFAULT: + if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0)) + writel(CR0_FDXRFCEN, ®s->CR0Set); + else + writel(CR0_FDXRFCEN, ®s->CR0Clr); + + if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0)) + writel(CR0_FDXTFCEN, ®s->CR0Set); + else + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_TX: + writel(CR0_FDXTFCEN, ®s->CR0Set); + writel(CR0_FDXRFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_RX: + writel(CR0_FDXRFCEN, ®s->CR0Set); + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + case FLOW_CNTL_TX_RX: + writel(CR0_FDXTFCEN, ®s->CR0Set); + writel(CR0_FDXRFCEN, ®s->CR0Set); + break; + + case FLOW_CNTL_DISABLE: + writel(CR0_FDXRFCEN, ®s->CR0Clr); + writel(CR0_FDXTFCEN, ®s->CR0Clr); + break; + + default: + break; + } + +} + + +/** + * velocity_ethtool_up - pre hook for ethtool + * @dev: network device + * + * Called before an ethtool operation. We need to make sure the + * chip is out of D3 state before we poke at it. + */ + +static int velocity_ethtool_up(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + if(!netif_running(dev)) + pci_set_power_state(vptr->pdev, 0); + return 0; +} + +/** + * velocity_ethtool_down - post hook for ethtool + * @dev: network device + * + * Called after an ethtool operation. Restore the chip back to D3 + * state if it isn't running. + */ + +static void velocity_ethtool_down(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + if(!netif_running(dev)) + pci_set_power_state(vptr->pdev, 3); +} + +static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct velocity_info *vptr = dev->priv; + struct mac_regs * regs = vptr->mac_regs; + u32 status; + status = check_connection_type(vptr->mac_regs); + + cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; + if (status & VELOCITY_SPEED_100) + cmd->speed = SPEED_100; + else + cmd->speed = SPEED_10; + cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + cmd->port = PORT_TP; + cmd->transceiver = XCVR_INTERNAL; + cmd->phy_address = readb(®s->MIIADR) & 0x1F; + + if (status & VELOCITY_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; + else + cmd->duplex = DUPLEX_HALF; + + return 0; +} + +static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct velocity_info *vptr = dev->priv; + u32 curr_status; + u32 new_status = 0; + int ret = 0; + + curr_status = check_connection_type(vptr->mac_regs); + curr_status &= (~VELOCITY_LINK_FAIL); + + new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0); + new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0); + new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0); + new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0); + + if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE))) + ret = -EINVAL; + else + velocity_set_media_mode(vptr, new_status); + + return ret; +} + +static u32 velocity_get_link(struct net_device *dev) +{ + struct velocity_info *vptr = dev->priv; + struct mac_regs * regs = vptr->mac_regs; + return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 0 : 1; +} + +static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + struct velocity_info *vptr = dev->priv; + strcpy(info->driver, VELOCITY_NAME); + strcpy(info->version, VELOCITY_VERSION); + strcpy(info->bus_info, vptr->pdev->slot_name); +} + +static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct velocity_info *vptr = dev->priv; + wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP; + wol->wolopts |= WAKE_MAGIC; + /* + if (vptr->wol_opts & VELOCITY_WOL_PHY) + wol.wolopts|=WAKE_PHY; + */ + if (vptr->wol_opts & VELOCITY_WOL_UCAST) + wol->wolopts |= WAKE_UCAST; + if (vptr->wol_opts & VELOCITY_WOL_ARP) + wol->wolopts |= WAKE_ARP; + memcpy(&wol->sopass, vptr->wol_passwd, 6); +} + +static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct velocity_info *vptr = dev->priv; + + if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP))) + return -EFAULT; + vptr->wol_opts = VELOCITY_WOL_MAGIC; + + /* + if (wol.wolopts & WAKE_PHY) { + vptr->wol_opts|=VELOCITY_WOL_PHY; + vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED; + } + */ + + if (wol->wolopts & WAKE_MAGIC) { + vptr->wol_opts |= VELOCITY_WOL_MAGIC; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + if (wol->wolopts & WAKE_UCAST) { + vptr->wol_opts |= VELOCITY_WOL_UCAST; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + if (wol->wolopts & WAKE_ARP) { + vptr->wol_opts |= VELOCITY_WOL_ARP; + vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; + } + memcpy(vptr->wol_passwd, wol->sopass, 6); + return 0; +} + +static u32 velocity_get_msglevel(struct net_device *dev) +{ + return msglevel; +} + +static void velocity_set_msglevel(struct net_device *dev, u32 value) +{ + msglevel = value; +} + +static struct ethtool_ops velocity_ethtool_ops = { + .get_settings = velocity_get_settings, + .set_settings = velocity_set_settings, + .get_drvinfo = velocity_get_drvinfo, + .get_wol = velocity_ethtool_get_wol, + .set_wol = velocity_ethtool_set_wol, + .get_msglevel = velocity_get_msglevel, + .set_msglevel = velocity_set_msglevel, + .get_link = velocity_get_link, + .begin = velocity_ethtool_up, + .complete = velocity_ethtool_down +}; + +/** + * velocity_mii_ioctl - MII ioctl handler + * @dev: network device + * @ifr: the ifreq block for the ioctl + * @cmd: the command + * + * Process MII requests made via ioctl from the network layer. These + * are used by tools like kudzu to interrogate the link state of the + * hardware + */ + +static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct velocity_info *vptr = dev->priv; + struct mac_regs * regs = vptr->mac_regs; + unsigned long flags; + struct mii_ioctl_data *miidata = (struct mii_ioctl_data *) &(ifr->ifr_data); + int err; + + switch (cmd) { + case SIOCGMIIPHY: + miidata->phy_id = readb(®s->MIIADR) & 0x1f; + break; + case SIOCGMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if(velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0) + return -ETIMEDOUT; + break; + case SIOCSMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + spin_lock_irqsave(&vptr->lock, flags); + err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in); + spin_unlock_irqrestore(&vptr->lock, flags); + check_connection_type(vptr->mac_regs); + if(err) + return err; + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +#ifdef CONFIG_PM + +/** + * velocity_save_context - save registers + * @vptr: velocity + * @context: buffer for stored context + * + * Retrieve the current configuration from the velocity hardware + * and stash it in the context structure, for use by the context + * restore functions. This allows us to save things we need across + * power down states + */ + +static void velocity_save_context(struct velocity_info *vptr, struct velocity_context * context) +{ + struct mac_regs * regs = vptr->mac_regs; + u16 i; + u8 *ptr = (u8 *)regs; + + for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + + for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + + for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) + *((u32 *) (context->mac_reg + i)) = readl(ptr + i); + +} + +/** + * velocity_restore_context - restore registers + * @vptr: velocity + * @context: buffer for stored context + * + * Reload the register configuration from the velocity context + * created by velocity_save_context. + */ + +static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context) +{ + struct mac_regs * regs = vptr->mac_regs; + int i; + u8 *ptr = (u8 *)regs; + + for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4) { + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + } + + /* Just skip cr0 */ + for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) { + /* Clear */ + writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4); + /* Set */ + writeb(*((u8 *) (context->mac_reg + i)), ptr + i); + } + + for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4) { + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + } + + for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4) { + writel(*((u32 *) (context->mac_reg + i)), ptr + i); + } + + for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++) { + writeb(*((u8 *) (context->mac_reg + i)), ptr + i); + } + +} + +static int velocity_suspend(struct pci_dev *pdev, u32 state) +{ + struct velocity_info *vptr = pci_get_drvdata(pdev); + unsigned long flags; + + if(!netif_running(vptr->dev)) + return 0; + + netif_device_detach(vptr->dev); + + spin_lock_irqsave(&vptr->lock, flags); + pci_save_state(pdev, vptr->pci_state); +#ifdef ETHTOOL_GWOL + if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) { + velocity_get_ip(vptr); + velocity_save_context(vptr, &vptr->context); + velocity_shutdown(vptr); + velocity_set_wol(vptr); + pci_enable_wake(pdev, 3, 1); + pci_set_power_state(pdev, 3); + } else { + velocity_save_context(vptr, &vptr->context); + velocity_shutdown(vptr); + pci_disable_device(pdev); + pci_set_power_state(pdev, state); + } +#else + pci_set_power_state(pdev, state); +#endif + spin_unlock_irqrestore(&vptr->lock, flags); + return 0; +} + +static int velocity_resume(struct pci_dev *pdev) +{ + struct velocity_info *vptr = pci_get_drvdata(pdev); + unsigned long flags; + int i; + + if(!netif_running(vptr->dev)) + return 0; + + pci_set_power_state(pdev, 0); + pci_enable_wake(pdev, 0, 0); + pci_restore_state(pdev, vptr->pci_state); + + mac_wol_reset(vptr->mac_regs); + + spin_lock_irqsave(&vptr->lock, flags); + velocity_restore_context(vptr, &vptr->context); + velocity_init_registers(vptr, VELOCITY_INIT_WOL); + mac_disable_int(vptr->mac_regs); + + velocity_tx_srv(vptr, 0); + + for (i = 0; i < vptr->num_txq; i++) { + if (vptr->td_used[i]) { + mac_tx_queue_wake(vptr->mac_regs, i); + } + } + + mac_enable_int(vptr->mac_regs); + spin_unlock_irqrestore(&vptr->lock, flags); + netif_device_attach(vptr->dev); + + return 0; +} + +static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; + struct net_device *dev; + struct velocity_info *vptr; + + if (ifa) { + dev = ifa->ifa_dev->dev; + vptr = dev->priv; + velocity_get_ip(vptr); + } + return NOTIFY_DONE; +} +#endif + +/* + * Purpose: Functions to set WOL. + */ + +const static unsigned short crc16_tab[256] = { + 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, + 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, + 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, + 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, + 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd, + 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5, + 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c, + 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974, + 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb, + 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, + 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, + 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, + 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, + 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1, + 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738, + 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70, + 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7, + 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff, + 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, + 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, + 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, + 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, + 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134, + 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c, + 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3, + 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb, + 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232, + 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, + 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, + 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, + 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, + 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78 +}; + + +static u32 mask_pattern[2][4] = { + {0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */ + {0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff} /* Magic Packet */ +}; + +/** + * ether_crc16 - compute ethernet CRC + * @len: buffer length + * @cp: buffer + * @crc16: initial CRC + * + * Compute a CRC value for a block of data. + * FIXME: can we use generic functions ? + */ + +static u16 ether_crc16(int len, u8 * cp, u16 crc16) +{ + while (len--) + crc16 = (crc16 >> 8) ^ crc16_tab[(crc16 ^ *cp++) & 0xff]; + return (crc16); +} + +/** + * bit_reverse - 16bit reverse + * @data: 16bit data t reverse + * + * Reverse the order of a 16bit value and return the reversed bits + */ + +static u16 bit_reverse(u16 data) +{ + u32 new = 0x00000000; + int ii; + + + for (ii = 0; ii < 16; ii++) { + new |= ((u32) (data & 1) << (31 - ii)); + data >>= 1; + } + + return (u16) (new >> 16); +} + +/** + * wol_calc_crc - WOL CRC + * @pattern: data pattern + * @mask_pattern: mask + * + * Compute the wake on lan crc hashes for the packet header + * we are interested in. + */ + +u16 wol_calc_crc(int size, u8 * pattern, u8 *mask_pattern) +{ + u16 crc = 0xFFFF; + u8 mask; + int i, j; + + for (i = 0; i < size; i++) { + mask = mask_pattern[i]; + + /* Skip this loop if the mask equals to zero */ + if (mask == 0x00) + continue; + + for (j = 0; j < 8; j++) { + if ((mask & 0x01) == 0) { + mask >>= 1; + continue; + } + mask >>= 1; + crc = ether_crc16(1, &(pattern[i * 8 + j]), crc); + } + } + /* Finally, invert the result once to get the correct data */ + crc = ~crc; + return bit_reverse(crc); +} + +/** + * velocity_set_wol - set up for wake on lan + * @vptr: velocity to set WOL status on + * + * Set a card up for wake on lan either by unicast or by + * ARP packet. + * + * FIXME: check static buffer is safe here + */ + +static int velocity_set_wol(struct velocity_info *vptr) +{ + struct mac_regs * regs = vptr->mac_regs; + static u8 buf[256]; + int i; + + writew(0xFFFF, ®s->WOLCRClr); + writeb(WOLCFG_SAB | WOLCFG_SAM, ®s->WOLCFGSet); + writew(WOLCR_MAGIC_EN, ®s->WOLCRSet); + + /* + if (vptr->wol_opts & VELOCITY_WOL_PHY) + writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), ®s->WOLCRSet); + */ + + if (vptr->wol_opts & VELOCITY_WOL_UCAST) { + writew(WOLCR_UNICAST_EN, ®s->WOLCRSet); + } + + if (vptr->wol_opts & VELOCITY_WOL_ARP) { + struct arp_packet *arp = (struct arp_packet *) buf; + u16 crc; + memset(buf, 0, sizeof(struct arp_packet) + 7); + + for (i = 0; i < 4; i++) + writel(mask_pattern[0][i], ®s->ByteMask[0][i]); + + arp->type = htons(ETH_P_ARP); + arp->ar_op = htons(1); + + memcpy(arp->ar_tip, vptr->ip_addr, 4); + + crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf, (u8 *) & mask_pattern[0][0]); + + writew(crc, ®s->PatternCRC[0]); + writew(WOLCR_ARP_EN, ®s->WOLCRSet); + } + + BYTE_REG_BITS_ON(PWCFG_WOLTYPE, ®s->PWCFGSet); + BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, ®s->PWCFGSet); + + writew(0x0FFF, ®s->WOLSRClr); + + if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { + if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) + MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); + + MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); + } + + if (vptr->mii_status & VELOCITY_SPEED_1000) + MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); + + BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); + + { + u8 GCR; + GCR = readb(®s->CHIPGCR); + GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX; + writeb(GCR, ®s->CHIPGCR); + } + + BYTE_REG_BITS_OFF(ISR_PWEI, ®s->ISR); + /* Turn on SWPTAG just before entering power mode */ + BYTE_REG_BITS_ON(STICKHW_SWPTAG, ®s->STICKHW); + /* Go to bed ..... */ + BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), ®s->STICKHW); + + return 0; +} + diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c new file mode 100644 index 000000000..694e13f43 --- /dev/null +++ b/drivers/pcmcia/pd6729.c @@ -0,0 +1,732 @@ +/* + * Driver for the Cirrus PD6729 PCI-PCMCIA bridge. + * + * Based on the i82092.c driver. + * + * This software may be used and distributed according to the terms of + * the GNU General Public License, incorporated herein by reference. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "pd6729.h" +#include "i82365.h" +#include "cirrus.h" + +MODULE_LICENSE("GPL"); + +#define MAX_SOCKETS 2 + +/* simple helper functions */ +/* External clock time, in nanoseconds. 120 ns = 8.33 MHz */ +#define to_cycles(ns) ((ns)/120) + +static spinlock_t port_lock = SPIN_LOCK_UNLOCKED; + +/* basic value read/write functions */ + +static unsigned char indirect_read(struct pd6729_socket *socket, unsigned short reg) +{ + unsigned long port; + unsigned char val; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg += socket->number * 0x40; + port = socket->io_base; + outb(reg, port); + val = inb(port + 1); + spin_unlock_irqrestore(&port_lock, flags); + + return val; +} + +static unsigned short indirect_read16(struct pd6729_socket *socket, unsigned short reg) +{ + unsigned long port; + unsigned short tmp; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg = reg + socket->number * 0x40; + port = socket->io_base; + outb(reg, port); + tmp = inb(port + 1); + reg++; + outb(reg, port); + tmp = tmp | (inb(port + 1) << 8); + spin_unlock_irqrestore(&port_lock, flags); + + return tmp; +} + +static void indirect_write(struct pd6729_socket *socket, unsigned short reg, unsigned char value) +{ + unsigned long port; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg = reg + socket->number * 0x40; + port = socket->io_base; + outb(reg, port); + outb(value, port + 1); + spin_unlock_irqrestore(&port_lock, flags); +} + +static void indirect_setbit(struct pd6729_socket *socket, unsigned short reg, unsigned char mask) +{ + unsigned long port; + unsigned char val; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg = reg + socket->number * 0x40; + port = socket->io_base; + outb(reg, port); + val = inb(port + 1); + val |= mask; + outb(reg, port); + outb(val, port + 1); + spin_unlock_irqrestore(&port_lock, flags); +} + +static void indirect_resetbit(struct pd6729_socket *socket, unsigned short reg, unsigned char mask) +{ + unsigned long port; + unsigned char val; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg = reg + socket->number * 0x40; + port = socket->io_base; + outb(reg, port); + val = inb(port + 1); + val &= ~mask; + outb(reg, port); + outb(val, port + 1); + spin_unlock_irqrestore(&port_lock, flags); +} + +static void indirect_write16(struct pd6729_socket *socket, unsigned short reg, unsigned short value) +{ + unsigned long port; + unsigned char val; + unsigned long flags; + + spin_lock_irqsave(&port_lock, flags); + reg = reg + socket->number * 0x40; + port = socket->io_base; + + outb(reg, port); + val = value & 255; + outb(val, port + 1); + + reg++; + + outb(reg, port); + val = value >> 8; + outb(val, port + 1); + spin_unlock_irqrestore(&port_lock, flags); +} + +/* Interrupt handler functionality */ + +static irqreturn_t pd6729_interrupt(int irq, void *dev, struct pt_regs *regs) +{ + struct pd6729_socket *socket = (struct pd6729_socket *)dev; + int i; + int loopcount = 0; + int handled = 0; + unsigned int events, active = 0; + + while (1) { + loopcount++; + if (loopcount > 20) { + printk(KERN_ERR "pd6729: infinite eventloop in interrupt\n"); + break; + } + + active = 0; + + for (i = 0; i < MAX_SOCKETS; i++) { + unsigned int csc; + + /* card status change register */ + csc = indirect_read(&socket[i], I365_CSC); + if (csc == 0) /* no events on this socket */ + continue; + + handled = 1; + events = 0; + + if (csc & I365_CSC_DETECT) { + events |= SS_DETECT; + dprintk("Card detected in socket %i!\n", i); + } + + if (indirect_read(&socket[i], I365_INTCTL) & I365_PC_IOCARD) { + /* For IO/CARDS, bit 0 means "read the card" */ + events |= (csc & I365_CSC_STSCHG) ? SS_STSCHG : 0; + } else { + /* Check for battery/ready events */ + events |= (csc & I365_CSC_BVD1) ? SS_BATDEAD : 0; + events |= (csc & I365_CSC_BVD2) ? SS_BATWARN : 0; + events |= (csc & I365_CSC_READY) ? SS_READY : 0; + } + + if (events) { + pcmcia_parse_events(&socket[i].socket, events); + } + active |= events; + } + + if (active == 0) /* no more events to handle */ + break; + } + return IRQ_RETVAL(handled); +} + +/* socket functions */ + +static void set_bridge_state(struct pd6729_socket *socket) +{ + indirect_write(socket, I365_GBLCTL, 0x00); + indirect_write(socket, I365_GENCTL, 0x00); + + indirect_setbit(socket, I365_INTCTL, 0x08); +} + +static int pd6729_get_status(struct pcmcia_socket *sock, u_int *value) +{ + struct pd6729_socket *socket = container_of(sock, struct pd6729_socket, socket); + unsigned int status; + unsigned int data; + struct pd6729_socket *t; + + /* Interface Status Register */ + status = indirect_read(socket, I365_STATUS); + *value = 0; + + if ((status & I365_CS_DETECT) == I365_CS_DETECT) { + *value |= SS_DETECT; + } + + /* IO cards have a different meaning of bits 0,1 */ + /* Also notice the inverse-logic on the bits */ + if (indirect_read(socket, I365_INTCTL) & I365_PC_IOCARD) { + /* IO card */ + if (!(status & I365_CS_STSCHG)) + *value |= SS_STSCHG; + } else { + /* non I/O card */ + if (!(status & I365_CS_BVD1)) + *value |= SS_BATDEAD; + if (!(status & I365_CS_BVD2)) + *value |= SS_BATWARN; + } + + if (status & I365_CS_WRPROT) + *value |= SS_WRPROT; /* card is write protected */ + + if (status & I365_CS_READY) + *value |= SS_READY; /* card is not busy */ + + if (status & I365_CS_POWERON) + *value |= SS_POWERON; /* power is applied to the card */ + + t = (socket->number) ? socket : socket + 1; + indirect_write(t, PD67_EXT_INDEX, PD67_EXTERN_DATA); + data = indirect_read16(t, PD67_EXT_DATA); + *value |= (data & PD67_EXD_VS1(socket->number)) ? 0 : SS_3VCARD; + + return 0; +} + + +static int pd6729_get_socket(struct pcmcia_socket *sock, socket_state_t *state) +{ + struct pd6729_socket *socket = container_of(sock, struct pd6729_socket, socket); + unsigned char reg, vcc, vpp; + + state->flags = 0; + state->Vcc = 0; + state->Vpp = 0; + state->io_irq = 0; + state->csc_mask = 0; + + /* First the power status of the socket */ + /* PCTRL - Power Control Register */ + reg = indirect_read(socket, I365_POWER); + + if (reg & I365_PWR_AUTO) + state->flags |= SS_PWR_AUTO; /* Automatic Power Switch */ + + if (reg & I365_PWR_OUT) + state->flags |= SS_OUTPUT_ENA; /* Output signals are enabled */ + + vcc = reg & I365_VCC_MASK; vpp = reg & I365_VPP1_MASK; + + if (reg & I365_VCC_5V) { + state->Vcc = (indirect_read(socket, PD67_MISC_CTL_1) & + PD67_MC1_VCC_3V) ? 33 : 50; + + if (vpp == I365_VPP1_5V) { + if (state->Vcc == 50) + state->Vpp = 50; + else + state->Vpp = 33; + } + if (vpp == I365_VPP1_12V) + state->Vpp = 120; + } + + /* Now the IO card, RESET flags and IO interrupt */ + /* IGENC, Interrupt and General Control */ + reg = indirect_read(socket, I365_INTCTL); + + if ((reg & I365_PC_RESET) == 0) + state->flags |= SS_RESET; + if (reg & I365_PC_IOCARD) + state->flags |= SS_IOCARD; /* This is an IO card */ + + /* Set the IRQ number */ + state->io_irq = socket->socket.pci_irq; + + /* Card status change */ + /* CSCICR, Card Status Change Interrupt Configuration */ + reg = indirect_read(socket, I365_CSCINT); + + if (reg & I365_CSC_DETECT) + state->csc_mask |= SS_DETECT; /* Card detect is enabled */ + + if (state->flags & SS_IOCARD) {/* IO Cards behave different */ + if (reg & I365_CSC_STSCHG) + state->csc_mask |= SS_STSCHG; + } else { + if (reg & I365_CSC_BVD1) + state->csc_mask |= SS_BATDEAD; + if (reg & I365_CSC_BVD2) + state->csc_mask |= SS_BATWARN; + if (reg & I365_CSC_READY) + state->csc_mask |= SS_READY; + } + + return 0; +} + +static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state) +{ + struct pd6729_socket *socket = container_of(sock, struct pd6729_socket, socket); + unsigned char reg; + + /* First, set the global controller options */ + + set_bridge_state(socket); + + /* Values for the IGENC register */ + + reg = 0; + /* The reset bit has "inverse" logic */ + if (!(state->flags & SS_RESET)) + reg = reg | I365_PC_RESET; + if (state->flags & SS_IOCARD) + reg = reg | I365_PC_IOCARD; + + /* IGENC, Interrupt and General Control Register */ + indirect_write(socket, I365_INTCTL, reg); + + /* Power registers */ + + reg = I365_PWR_NORESET; /* default: disable resetdrv on resume */ + + if (state->flags & SS_PWR_AUTO) { + dprintk("Auto power\n"); + reg |= I365_PWR_AUTO; /* automatic power mngmnt */ + } + if (state->flags & SS_OUTPUT_ENA) { + dprintk("Power Enabled\n"); + reg |= I365_PWR_OUT; /* enable power */ + } + + switch (state->Vcc) { + case 0: + break; + case 33: + dprintk("setting voltage to Vcc to 3.3V on socket %i\n", + socket->number); + reg |= I365_VCC_5V; + indirect_setbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); + break; + case 50: + dprintk("setting voltage to Vcc to 5V on socket %i\n", + socket->number); + reg |= I365_VCC_5V; + indirect_resetbit(socket, PD67_MISC_CTL_1, PD67_MC1_VCC_3V); + break; + default: + dprintk("pd6729: pd6729_set_socket called with invalid VCC power value: %i\n", + state->Vcc); + return -EINVAL; + } + + switch (state->Vpp) { + case 0: + dprintk("not setting Vpp on socket %i\n", socket->number); + break; + case 33: + case 50: + dprintk("setting Vpp to Vcc for socket %i\n", socket->number); + reg |= I365_VPP1_5V; + break; + case 120: + dprintk("setting Vpp to 12.0\n"); + reg |= I365_VPP1_12V; + break; + default: + dprintk("pd6729: pd6729_set_socket called with invalid VPP power value: %i\n", + state->Vpp); + return -EINVAL; + } + + /* only write if changed */ + if (reg != indirect_read(socket, I365_POWER)) + indirect_write(socket, I365_POWER, reg); + + /* Now, specifiy that all interrupts are to be done as PCI interrupts */ + indirect_write(socket, PD67_EXT_INDEX, PD67_EXT_CTL_1); + indirect_write(socket, PD67_EXT_DATA, PD67_EC1_INV_MGMT_IRQ | PD67_EC1_INV_CARD_IRQ); + + /* Enable specific interrupt events */ + + reg = 0x00; + if (state->csc_mask & SS_DETECT) { + reg |= I365_CSC_DETECT; + } + if (state->flags & SS_IOCARD) { + if (state->csc_mask & SS_STSCHG) + reg |= I365_CSC_STSCHG; + } else { + if (state->csc_mask & SS_BATDEAD) + reg |= I365_CSC_BVD1; + if (state->csc_mask & SS_BATWARN) + reg |= I365_CSC_BVD2; + if (state->csc_mask & SS_READY) + reg |= I365_CSC_READY; + } + reg |= 0x30; /* management IRQ: PCI INTA# = "irq 3" */ + indirect_write(socket, I365_CSCINT, reg); + + reg = indirect_read(socket, I365_INTCTL); + reg |= 0x03; /* card IRQ: PCI INTA# = "irq 3" */ + indirect_write(socket, I365_INTCTL, reg); + + /* now clear the (probably bogus) pending stuff by doing a dummy read */ + (void)indirect_read(socket, I365_CSC); + + return 0; +} + +static int pd6729_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) +{ + struct pd6729_socket *socket = container_of(sock, struct pd6729_socket, socket); + unsigned char map, ioctl; + + map = io->map; + + /* Check error conditions */ + if (map > 1) { + dprintk("pd6729_set_io_map with invalid map"); + return -EINVAL; + } + + /* Turn off the window before changing anything */ + if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_IO(map)) + indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_IO(map)); + +/* dprintk("set_io_map: Setting range to %x - %x\n", io->start, io->stop);*/ + + /* write the new values */ + indirect_write16(socket, I365_IO(map)+I365_W_START, io->start); + indirect_write16(socket, I365_IO(map)+I365_W_STOP, io->stop); + + ioctl = indirect_read(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map); + + if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map); + if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map); + if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map); + + indirect_write(socket, I365_IOCTL, ioctl); + + /* Turn the window back on if needed */ + if (io->flags & MAP_ACTIVE) + indirect_setbit(socket, I365_ADDRWIN, I365_ENA_IO(map)); + + return 0; +} + +static int pd6729_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *mem) +{ + struct pd6729_socket *socket = container_of(sock, struct pd6729_socket, socket); + unsigned short base, i; + unsigned char map; + + map = mem->map; + if (map > 4) { + printk("pd6729_set_mem_map: invalid map"); + return -EINVAL; + } + + if ((mem->sys_start > mem->sys_stop) || (mem->speed > 1000)) { + printk("pd6729_set_mem_map: invalid address / speed"); + /* printk("invalid mem map for socket %i : %lx to %lx with a start of %x\n", + sock, mem->sys_start, mem->sys_stop, mem->card_start); */ + return -EINVAL; + } + + /* Turn off the window before changing anything */ + if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_MEM(map)) + indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_MEM(map)); + + /* write the start address */ + base = I365_MEM(map); + i = (mem->sys_start >> 12) & 0x0fff; + if (mem->flags & MAP_16BIT) + i |= I365_MEM_16BIT; + if (mem->flags & MAP_0WS) + i |= I365_MEM_0WS; + indirect_write16(socket, base + I365_W_START, i); + + /* write the stop address */ + + i= (mem->sys_stop >> 12) & 0x0fff; + switch (to_cycles(mem->speed)) { + case 0: + break; + case 1: + i |= I365_MEM_WS0; + break; + case 2: + i |= I365_MEM_WS1; + break; + default: + i |= I365_MEM_WS1 | I365_MEM_WS0; + break; + } + + indirect_write16(socket, base + I365_W_STOP, i); + + /* Take care of high byte */ + indirect_write(socket, PD67_EXT_INDEX, PD67_MEM_PAGE(map)); + indirect_write(socket, PD67_EXT_DATA, mem->sys_start >> 24); + + /* card start */ + + i = ((mem->card_start - mem->sys_start) >> 12) & 0x3fff; + if (mem->flags & MAP_WRPROT) + i |= I365_MEM_WRPROT; + if (mem->flags & MAP_ATTRIB) { +/* dprintk("requesting attribute memory for socket %i\n", + socket->number);*/ + i |= I365_MEM_REG; + } else { +/* dprintk("requesting normal memory for socket %i\n", + socket->number);*/ + } + indirect_write16(socket, base + I365_W_OFF, i); + + /* Enable the window if necessary */ + if (mem->flags & MAP_ACTIVE) + indirect_setbit(socket, I365_ADDRWIN, I365_ENA_MEM(map)); + + return 0; +} + +static int pd6729_suspend(struct pcmcia_socket *sock) +{ + return pd6729_set_socket(sock, &dead_socket); +} + +static int pd6729_init(struct pcmcia_socket *sock) +{ + int i; + struct resource res = { .end = 0x0fff }; + pccard_io_map io = { 0, 0, 0, 0, 1 }; + pccard_mem_map mem = { .res = &res, .sys_stop = 0x0fff }; + + pd6729_set_socket(sock, &dead_socket); + for (i = 0; i < 2; i++) { + io.map = i; + pd6729_set_io_map(sock, &io); + } + for (i = 0; i < 5; i++) { + mem.map = i; + pd6729_set_mem_map(sock, &mem); + } + + return 0; +} + + +/* the pccard structure and its functions */ +static struct pccard_operations pd6729_operations = { + .init = pd6729_init, + .suspend = pd6729_suspend, + .get_status = pd6729_get_status, + .get_socket = pd6729_get_socket, + .set_socket = pd6729_set_socket, + .set_io_map = pd6729_set_io_map, + .set_mem_map = pd6729_set_mem_map, +}; + +static int __devinit pd6729_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + int i, j, ret; + char configbyte; + struct pd6729_socket *socket; + + socket = kmalloc(sizeof(struct pd6729_socket) * MAX_SOCKETS, GFP_KERNEL); + if (!socket) + return -ENOMEM; + + memset(socket, 0, sizeof(struct pd6729_socket) * MAX_SOCKETS); + + if ((ret = pci_enable_device(dev))) + goto err_out_free_mem; + + printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge at 0x%lx on irq %d\n", + pci_resource_start(dev, 0), dev->irq); + printk(KERN_INFO "pd6729: configured as a %d socket device.\n", MAX_SOCKETS); + /* Since we have no memory BARs some firmware we may not + have had PCI_COMMAND_MEM enabled, yet the device needs + it. */ + pci_read_config_byte(dev, PCI_COMMAND, &configbyte); + if (!(configbyte & PCI_COMMAND_MEMORY)) { + printk(KERN_DEBUG "pd6729: Enabling PCI_COMMAND_MEMORY.\n"); + configbyte |= PCI_COMMAND_MEMORY; + pci_write_config_byte(dev, PCI_COMMAND, configbyte); + } + + ret = pci_request_regions(dev, "pd6729"); + if (ret) { + printk(KERN_INFO "pd6729: pci request region failed.\n"); + goto err_out_disable; + } + + for (i = 0; i < MAX_SOCKETS; i++) { + socket[i].io_base = pci_resource_start(dev, 0); + socket[i].socket.features |= SS_CAP_PCCARD; + socket[i].socket.map_size = 0x1000; + socket[i].socket.irq_mask = 0; + socket[i].socket.pci_irq = dev->irq; + socket[i].socket.owner = THIS_MODULE; + + socket[i].number = i; + + socket[i].socket.ops = &pd6729_operations; + socket[i].socket.dev.dev = &dev->dev; + socket[i].socket.driver_data = &socket[i]; + } + + pci_set_drvdata(dev, socket); + + /* Register the interrupt handler */ + if ((ret = request_irq(dev->irq, pd6729_interrupt, SA_SHIRQ, "pd6729", socket))) { + printk(KERN_ERR "pd6729: Failed to register irq %d, aborting\n", dev->irq); + goto err_out_free_res; + } + + for (i = 0; i < MAX_SOCKETS; i++) { + ret = pcmcia_register_socket(&socket[i].socket); + if (ret) { + printk(KERN_INFO "pd6729: pcmcia_register_socket failed.\n"); + for (j = 0; j < i ; j++) + pcmcia_unregister_socket(&socket[j].socket); + goto err_out_free_res2; + } + } + + return 0; + + err_out_free_res2: + free_irq(dev->irq, socket); + err_out_free_res: + pci_release_regions(dev); + err_out_disable: + pci_disable_device(dev); + + err_out_free_mem: + kfree(socket); + return ret; +} + +static void __devexit pd6729_pci_remove(struct pci_dev *dev) +{ + int i; + struct pd6729_socket *socket = pci_get_drvdata(dev); + + for (i = 0; i < MAX_SOCKETS; i++) + pcmcia_unregister_socket(&socket[i].socket); + + free_irq(dev->irq, socket); + pci_release_regions(dev); + pci_disable_device(dev); + + kfree(socket); +} + +static int pd6729_socket_suspend(struct pci_dev *dev, u32 state) +{ + return pcmcia_socket_dev_suspend(&dev->dev, state); +} + +static int pd6729_socket_resume(struct pci_dev *dev) +{ + return pcmcia_socket_dev_resume(&dev->dev); +} + +static struct pci_device_id pd6729_pci_ids[] = { + { + .vendor = PCI_VENDOR_ID_CIRRUS, + .device = PCI_DEVICE_ID_CIRRUS_6729, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, + { } +}; +MODULE_DEVICE_TABLE(pci, pd6729_pci_ids); + +static struct pci_driver pd6729_pci_drv = { + .name = "pd6729", + .id_table = pd6729_pci_ids, + .probe = pd6729_pci_probe, + .remove = __devexit_p(pd6729_pci_remove), + .suspend = pd6729_socket_suspend, + .resume = pd6729_socket_resume, +}; + +static int pd6729_module_init(void) +{ + return pci_module_init(&pd6729_pci_drv); +} + +static void pd6729_module_exit(void) +{ + pci_unregister_driver(&pd6729_pci_drv); +} + +module_init(pd6729_module_init); +module_exit(pd6729_module_exit); diff --git a/drivers/pcmcia/pd6729.h b/drivers/pcmcia/pd6729.h new file mode 100644 index 000000000..9e90520ef --- /dev/null +++ b/drivers/pcmcia/pd6729.h @@ -0,0 +1,28 @@ +#ifndef _INCLUDE_GUARD_PD6729_H_ +#define _INCLUDE_GUARD_PD6729_H_ + +/* Debuging defines */ +#ifdef NOTRACE +#define dprintk(fmt, args...) printk(fmt , ## args) +#else +#define dprintk(fmt, args...) do {} while (0) +#endif + +/* Flags for I365_GENCTL */ +#define I365_DF_VS1 0x40 /* DF-step Voltage Sense */ +#define I365_DF_VS2 0x80 + +/* Fields in PD67_EXTERN_DATA */ +#define PD67_EXD_VS1(s) (0x01 << ((s) << 1)) +#define PD67_EXD_VS2(s) (0x02 << ((s) << 1)) + + + + +struct pd6729_socket { + int number; + unsigned long io_base; /* base io address of the socket */ + struct pcmcia_socket socket; +}; + +#endif diff --git a/drivers/s390/net/ctcdbug.c b/drivers/s390/net/ctcdbug.c new file mode 100644 index 000000000..ba004e0aa --- /dev/null +++ b/drivers/s390/net/ctcdbug.c @@ -0,0 +1,83 @@ +/* + * + * linux/drivers/s390/net/ctcdbug.c ($Revision: 1.1 $) + * + * Linux on zSeries OSA Express and HiperSockets support + * + * Copyright 2000,2003 IBM Corporation + * + * Author(s): Original Code written by + * Peter Tiedemann (ptiedem@de.ibm.com) + * + * $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include "ctcdbug.h" + +/** + * Debug Facility Stuff + */ +debug_info_t *dbf_setup = NULL; +debug_info_t *dbf_data = NULL; +debug_info_t *dbf_trace = NULL; + +DEFINE_PER_CPU(char[256], dbf_txt_buf); + +void +unregister_dbf_views(void) +{ + if (dbf_setup) + debug_unregister(dbf_setup); + if (dbf_data) + debug_unregister(dbf_data); + if (dbf_trace) + debug_unregister(dbf_trace); +} +int +register_dbf_views(void) +{ + dbf_setup = debug_register(CTC_DBF_SETUP_NAME, + CTC_DBF_SETUP_INDEX, + CTC_DBF_SETUP_NR_AREAS, + CTC_DBF_SETUP_LEN); + dbf_data = debug_register(CTC_DBF_DATA_NAME, + CTC_DBF_DATA_INDEX, + CTC_DBF_DATA_NR_AREAS, + CTC_DBF_DATA_LEN); + dbf_trace = debug_register(CTC_DBF_TRACE_NAME, + CTC_DBF_TRACE_INDEX, + CTC_DBF_TRACE_NR_AREAS, + CTC_DBF_TRACE_LEN); + + if ((dbf_setup == NULL) || (dbf_data == NULL) || + (dbf_trace == NULL)) { + unregister_dbf_views(); + return -ENOMEM; + } + debug_register_view(dbf_setup, &debug_hex_ascii_view); + debug_set_level(dbf_setup, CTC_DBF_SETUP_LEVEL); + + debug_register_view(dbf_data, &debug_hex_ascii_view); + debug_set_level(dbf_data, CTC_DBF_DATA_LEVEL); + + debug_register_view(dbf_trace, &debug_hex_ascii_view); + debug_set_level(dbf_trace, CTC_DBF_TRACE_LEVEL); + + return 0; +} + + diff --git a/drivers/s390/net/ctcdbug.h b/drivers/s390/net/ctcdbug.h new file mode 100644 index 000000000..447fd1abf --- /dev/null +++ b/drivers/s390/net/ctcdbug.h @@ -0,0 +1,123 @@ +/* + * + * linux/drivers/s390/net/ctcdbug.h ($Revision: 1.1 $) + * + * Linux on zSeries OSA Express and HiperSockets support + * + * Copyright 2000,2003 IBM Corporation + * + * Author(s): Original Code written by + * Peter Tiedemann (ptiedem@de.ibm.com) + * + * $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + +#include +/** + * Debug Facility stuff + */ +#define CTC_DBF_SETUP_NAME "ctc_setup" +#define CTC_DBF_SETUP_LEN 16 +#define CTC_DBF_SETUP_INDEX 3 +#define CTC_DBF_SETUP_NR_AREAS 1 +#define CTC_DBF_SETUP_LEVEL 3 + +#define CTC_DBF_DATA_NAME "ctc_data" +#define CTC_DBF_DATA_LEN 128 +#define CTC_DBF_DATA_INDEX 3 +#define CTC_DBF_DATA_NR_AREAS 1 +#define CTC_DBF_DATA_LEVEL 2 + +#define CTC_DBF_TRACE_NAME "ctc_trace" +#define CTC_DBF_TRACE_LEN 16 +#define CTC_DBF_TRACE_INDEX 2 +#define CTC_DBF_TRACE_NR_AREAS 2 +#define CTC_DBF_TRACE_LEVEL 3 + +#define DBF_TEXT(name,level,text) \ + do { \ + debug_text_event(dbf_##name,level,text); \ + } while (0) + +#define DBF_HEX(name,level,addr,len) \ + do { \ + debug_event(dbf_##name,level,(void*)(addr),len); \ + } while (0) + +extern DEFINE_PER_CPU(char[256], dbf_txt_buf); +extern debug_info_t *dbf_setup; +extern debug_info_t *dbf_data; +extern debug_info_t *dbf_trace; + + +#define DBF_TEXT_(name,level,text...) \ + do { \ + char* dbf_txt_buf = get_cpu_var(dbf_txt_buf); \ + sprintf(dbf_txt_buf, text); \ + debug_text_event(dbf_##name,level,dbf_txt_buf); \ + put_cpu_var(dbf_txt_buf); \ + } while (0) + +#define DBF_SPRINTF(name,level,text...) \ + do { \ + debug_sprintf_event(dbf_trace, level, ##text ); \ + debug_sprintf_event(dbf_trace, level, text ); \ + } while (0) + + +int register_dbf_views(void); + +void unregister_dbf_views(void); + +/** + * some more debug stuff + */ + +#define HEXDUMP16(importance,header,ptr) \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \ + *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \ + *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \ + *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \ + *(((char*)ptr)+12),*(((char*)ptr)+13), \ + *(((char*)ptr)+14),*(((char*)ptr)+15)); \ +PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ + "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ + *(((char*)ptr)+16),*(((char*)ptr)+17), \ + *(((char*)ptr)+18),*(((char*)ptr)+19), \ + *(((char*)ptr)+20),*(((char*)ptr)+21), \ + *(((char*)ptr)+22),*(((char*)ptr)+23), \ + *(((char*)ptr)+24),*(((char*)ptr)+25), \ + *(((char*)ptr)+26),*(((char*)ptr)+27), \ + *(((char*)ptr)+28),*(((char*)ptr)+29), \ + *(((char*)ptr)+30),*(((char*)ptr)+31)); + +static inline void +hex_dump(unsigned char *buf, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) { + if (i && !(i % 16)) + printk("\n"); + printk("%02x ", *(buf + i)); + } + printk("\n"); +} + diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c new file mode 100644 index 000000000..9f75c87c3 --- /dev/null +++ b/drivers/scsi/3w-9xxx.c @@ -0,0 +1,2153 @@ +/* + 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux. + + Written By: Adam Radford + + Copyright (C) 2004 Applied Micro Circuits Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + NO WARRANTY + THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + solely responsible for determining the appropriateness of using and + distributing the Program and assumes all risks associated with its + exercise of rights under this Agreement, including but not limited to + the risks and costs of program errors, damage to or loss of data, + programs or equipment, and unavailability or interruption of operations. + + DISCLAIMER OF LIABILITY + NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + Bugs/Comments/Suggestions should be mailed to: + linuxraid@amcc.com + + For more information, goto: + http://www.amcc.com + + Note: This version of the driver does not contain a bundled firmware + image. + + History + ------- + 2.26.02.000 - Driver cleanup for kernel submission. + 2.26.02.001 - Replace schedule_timeout() calls with msleep(). +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "3w-9xxx.h" + +/* Globals */ +static const char *twa_driver_version="2.26.02.001"; +static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; +static unsigned int twa_device_extension_count; +static int twa_major = -1; +extern struct timezone sys_tz; + +/* Module parameters */ +MODULE_AUTHOR ("AMCC"); +MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver"); +MODULE_LICENSE("GPL"); + +/* Function prototypes */ +static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header); +static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id); +static char *twa_aen_severity_lookup(unsigned char severity_code); +static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id); +static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg); +static int twa_chrdev_open(struct inode *inode, struct file *file); +static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host); +static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id); +static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id); +static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, + u32 set_features, unsigned short current_fw_srl, + unsigned short current_fw_arch_id, + unsigned short current_fw_branch, + unsigned short current_fw_build, + unsigned short *fw_on_ctlr_srl, + unsigned short *fw_on_ctlr_arch_id, + unsigned short *fw_on_ctlr_branch, + unsigned short *fw_on_ctlr_build, + u32 *init_connect_result); +static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length); +static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds); +static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds); +static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal); +static int twa_reset_device_extension(TW_Device_Extension *tw_dev); +static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); +static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Apache *sglistarg); +static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); +static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); +static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id); + +/* Functions */ + +/* Show some statistics about the card */ +static ssize_t twa_show_stats(struct class_device *class_dev, char *buf) +{ + struct Scsi_Host *host = class_to_shost(class_dev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + unsigned long flags = 0; + ssize_t len; + + spin_lock_irqsave(tw_dev->host->host_lock, flags); + len = snprintf(buf, PAGE_SIZE, "Driver version: %s\n" + "Current commands posted: %4d\n" + "Max commands posted: %4d\n" + "Current pending commands: %4d\n" + "Max pending commands: %4d\n" + "Last sgl length: %4d\n" + "Max sgl length: %4d\n" + "Last sector count: %4d\n" + "Max sector count: %4d\n" + "SCSI Host Resets: %4d\n" + "SCSI Aborts/Timeouts: %4d\n" + "AEN's: %4d\n", + twa_driver_version, + tw_dev->posted_request_count, + tw_dev->max_posted_request_count, + tw_dev->pending_request_count, + tw_dev->max_pending_request_count, + tw_dev->sgl_entries, + tw_dev->max_sgl_entries, + tw_dev->sector_count, + tw_dev->max_sector_count, + tw_dev->num_resets, + tw_dev->num_aborts, + tw_dev->aen_count); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + return len; +} /* End twa_show_stats() */ + +/* This function will set a devices queue depth */ +static ssize_t twa_store_queue_depth(struct device *dev, const char *buf, size_t count) +{ + int queue_depth; + struct scsi_device *sdev = to_scsi_device(dev); + + queue_depth = simple_strtoul(buf, NULL, 0); + if (queue_depth > TW_Q_LENGTH-2) + return -EINVAL; + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); + + return count; +} /* End twa_store_queue_depth() */ + +/* Create sysfs 'queue_depth' entry */ +static struct device_attribute twa_queue_depth_attr = { + .attr = { + .name = "queue_depth", + .mode = S_IRUSR | S_IWUSR, + }, + .store = twa_store_queue_depth +}; + +/* Device attributes initializer */ +static struct device_attribute *twa_dev_attrs[] = { + &twa_queue_depth_attr, + NULL, +}; + +/* Create sysfs 'stats' entry */ +static struct class_device_attribute twa_host_stats_attr = { + .attr = { + .name = "stats", + .mode = S_IRUGO, + }, + .show = twa_show_stats +}; + +/* Host attributes initializer */ +static struct class_device_attribute *twa_host_attrs[] = { + &twa_host_stats_attr, + NULL, +}; + +/* File operations struct for character device */ +static struct file_operations twa_fops = { + .owner = THIS_MODULE, + .ioctl = twa_chrdev_ioctl, + .open = twa_chrdev_open, + .release = NULL +}; + +/* This function will complete an aen request from the isr */ +static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) +{ + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Command_Apache_Header *header; + unsigned short aen; + int retval = 1; + + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; + tw_dev->posted_request_count--; + aen = header->status_block.error; + full_command_packet = tw_dev->command_packet_virt[request_id]; + command_packet = &full_command_packet->command.oldcommand; + + /* First check for internal completion of set param for time sync */ + if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) { + /* Keep reading the queue in case there are more aen's */ + if (twa_aen_read_queue(tw_dev, request_id)) + goto out2; + else { + retval = 0; + goto out; + } + } + + switch (aen) { + case TW_AEN_QUEUE_EMPTY: + /* Quit reading the queue if this is the last one */ + break; + case TW_AEN_SYNC_TIME_WITH_HOST: + twa_aen_sync_time(tw_dev, request_id); + retval = 0; + goto out; + default: + twa_aen_queue_event(tw_dev, header); + + /* If there are more aen's, keep reading the queue */ + if (twa_aen_read_queue(tw_dev, request_id)) + goto out2; + else { + retval = 0; + goto out; + } + } + retval = 0; +out2: + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); +out: + return retval; +} /* End twa_aen_complete() */ + +/* This function will drain aen queue */ +static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset) +{ + int request_id = 0; + char cdb[TW_MAX_CDB_LEN]; + TW_SG_Apache sglist[1]; + int finished = 0, count = 0; + TW_Command_Full *full_command_packet; + TW_Command_Apache_Header *header; + unsigned short aen; + int first_reset = 0, queue = 0, retval = 1; + + if (no_check_reset) + first_reset = 0; + else + first_reset = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + + /* Initialize cdb */ + memset(&cdb, 0, TW_MAX_CDB_LEN); + cdb[0] = REQUEST_SENSE; /* opcode */ + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ + + /* Initialize sglist */ + memset(&sglist, 0, sizeof(TW_SG_Apache)); + sglist[0].length = TW_SECTOR_SIZE; + sglist[0].address = tw_dev->generic_buffer_phys[request_id]; + + if (sglist[0].address & TW_ALIGNMENT_9000_SGL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain"); + goto out; + } + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + do { + /* Send command to the board */ + if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense"); + goto out; + } + + /* Now poll for completion */ + if (twa_poll_response(tw_dev, request_id, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue"); + tw_dev->posted_request_count--; + goto out; + } + + tw_dev->posted_request_count--; + header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id]; + aen = header->status_block.error; + queue = 0; + count++; + + switch (aen) { + case TW_AEN_QUEUE_EMPTY: + if (first_reset != 1) + goto out; + else + finished = 1; + break; + case TW_AEN_SOFT_RESET: + if (first_reset == 0) + first_reset = 1; + else + queue = 1; + break; + case TW_AEN_SYNC_TIME_WITH_HOST: + break; + default: + queue = 1; + } + + /* Now queue an event info */ + if (queue) + twa_aen_queue_event(tw_dev, header); + } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN)); + + if (count == TW_MAX_AEN_DRAIN) + goto out; + + retval = 0; +out: + tw_dev->state[request_id] = TW_S_INITIAL; + return retval; +} /* End twa_aen_drain_queue() */ + +/* This function will queue an event */ +static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header) +{ + u32 local_time; + struct timeval time; + TW_Event *event; + unsigned short aen; + char host[16]; + + tw_dev->aen_count++; + + /* Fill out event info */ + event = tw_dev->event_queue[tw_dev->error_index]; + + /* Check for clobber */ + host[0] = '\0'; + if (tw_dev->host) { + sprintf(host, " scsi%d:", tw_dev->host->host_no); + if (event->retrieved == TW_AEN_NOT_RETRIEVED) + tw_dev->aen_clobber = 1; + } + + aen = header->status_block.error; + memset(event, 0, sizeof(TW_Event)); + + event->severity = TW_SEV_OUT(header->status_block.severity__reserved); + do_gettimeofday(&time); + local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60)); + event->time_stamp_sec = local_time; + event->aen_code = aen; + event->retrieved = TW_AEN_NOT_RETRIEVED; + event->sequence_id = tw_dev->error_sequence_id; + tw_dev->error_sequence_id++; + + header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0'; + event->parameter_len = strlen(header->err_specific_desc); + memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len); + if (event->severity != TW_AEN_SEVERITY_DEBUG) + printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n", + host, + twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)), + TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen, + twa_string_lookup(twa_aen_table, aen), + header->err_specific_desc); + else + tw_dev->aen_count--; + + if ((tw_dev->error_index + 1) == TW_Q_LENGTH) + tw_dev->event_queue_wrapped = 1; + tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH; +} /* End twa_aen_queue_event() */ + +/* This function will read the aen queue from the isr */ +static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id) +{ + char cdb[TW_MAX_CDB_LEN]; + TW_SG_Apache sglist[1]; + TW_Command_Full *full_command_packet; + int retval = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + + /* Initialize cdb */ + memset(&cdb, 0, TW_MAX_CDB_LEN); + cdb[0] = REQUEST_SENSE; /* opcode */ + cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */ + + /* Initialize sglist */ + memset(&sglist, 0, sizeof(TW_SG_Apache)); + sglist[0].length = TW_SECTOR_SIZE; + sglist[0].address = tw_dev->generic_buffer_phys[request_id]; + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + /* Now post the command packet */ + if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue"); + goto out; + } + retval = 0; +out: + return retval; +} /* End twa_aen_read_queue() */ + +/* This function will look up an AEN severity string */ +static char *twa_aen_severity_lookup(unsigned char severity_code) +{ + char *retval = NULL; + + if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) || + (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG)) + goto out; + + retval = twa_aen_severity_table[severity_code]; +out: + return retval; +} /* End twa_aen_severity_lookup() */ + +/* This function will sync firmware time with the host time */ +static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id) +{ + u32 schedulertime; + struct timeval utc; + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Param_Apache *param; + u32 local_time; + + /* Fill out the command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + command_packet = &full_command_packet->command.oldcommand; + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM); + command_packet->request_id = request_id; + command_packet->byte8_offset.param.sgl[0].address = tw_dev->generic_buffer_phys[request_id]; + command_packet->byte8_offset.param.sgl[0].length = TW_SECTOR_SIZE; + command_packet->size = TW_COMMAND_SIZE; + command_packet->byte6_offset.parameter_count = 1; + + /* Setup the param */ + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; + memset(param, 0, TW_SECTOR_SIZE); + param->table_id = TW_TIMEKEEP_TABLE | 0x8000; /* Controller time keep table */ + param->parameter_id = 0x3; /* SchedulerTime */ + param->parameter_size_bytes = 4; + + /* Convert system time in UTC to local time seconds since last + Sunday 12:00AM */ + do_gettimeofday(&utc); + local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60)); + schedulertime = local_time - (3 * 86400); + schedulertime = schedulertime % 604800; + + memcpy(param->data, &schedulertime, sizeof(u32)); + + /* Mark internal command */ + tw_dev->srb[request_id] = NULL; + + /* Now post the command */ + twa_post_command_packet(tw_dev, request_id, 1); +} /* End twa_aen_sync_time() */ + +/* This function will allocate memory and check if it is correctly aligned */ +static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which) +{ + int i; + dma_addr_t dma_handle; + unsigned long *cpu_addr; + int retval = 1; + + cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle); + if (!cpu_addr) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed"); + goto out; + } + + if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory"); + pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle); + goto out; + } + + memset(cpu_addr, 0, size*TW_Q_LENGTH); + + for (i = 0; i < TW_Q_LENGTH; i++) { + switch(which) { + case 0: + tw_dev->command_packet_phys[i] = dma_handle+(i*size); + tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size)); + break; + case 1: + tw_dev->generic_buffer_phys[i] = dma_handle+(i*size); + tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size)); + break; + } + } + retval = 0; +out: + return retval; +} /* End twa_allocate_memory() */ + +/* This function will check the status register for unexpected bits */ +static int twa_check_bits(u32 status_reg_value) +{ + int retval = 1; + + if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) + goto out; + if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0) + goto out; + + retval = 0; +out: + return retval; +} /* End twa_check_bits() */ + +/* This function will check the srl and decide if we are compatible */ +static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed) +{ + int retval = 1; + unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0; + unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0; + u32 init_connect_result = 0; + + if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, + TW_EXTENDED_INIT_CONNECT, TW_CURRENT_FW_SRL, + TW_9000_ARCH_ID, TW_CURRENT_FW_BRANCH, + TW_CURRENT_FW_BUILD, &fw_on_ctlr_srl, + &fw_on_ctlr_arch_id, &fw_on_ctlr_branch, + &fw_on_ctlr_build, &init_connect_result)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL"); + goto out; + } + + tw_dev->working_srl = TW_CURRENT_FW_SRL; + tw_dev->working_branch = TW_CURRENT_FW_BRANCH; + tw_dev->working_build = TW_CURRENT_FW_BUILD; + + /* Try base mode compatibility */ + if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { + if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS, + TW_EXTENDED_INIT_CONNECT, + TW_BASE_FW_SRL, TW_9000_ARCH_ID, + TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD, + &fw_on_ctlr_srl, &fw_on_ctlr_arch_id, + &fw_on_ctlr_branch, &fw_on_ctlr_build, + &init_connect_result)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL"); + goto out; + } + if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) { + if (TW_CURRENT_FW_SRL > fw_on_ctlr_srl) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware"); + } else { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver"); + } + goto out; + } + tw_dev->working_srl = TW_BASE_FW_SRL; + tw_dev->working_branch = TW_BASE_FW_BRANCH; + tw_dev->working_build = TW_BASE_FW_BUILD; + } + retval = 0; +out: + return retval; +} /* End twa_check_srl() */ + +/* This function handles ioctl for the character device */ +static int twa_chrdev_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) +{ + long timeout; + unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0; + dma_addr_t dma_handle; + int request_id = 0; + unsigned int sequence_id = 0; + unsigned char event_index, start_index; + TW_Ioctl_Driver_Command driver_command; + TW_Ioctl_Buf_Apache *tw_ioctl; + TW_Lock *tw_lock; + TW_Command_Full *full_command_packet; + TW_Compatibility_Info *tw_compat_info; + TW_Event *event; + struct timeval current_time; + u32 current_time_ms; + TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)]; + int retval = TW_IOCTL_ERROR_OS_EFAULT; + + /* Only let one of these through at a time */ + if (down_interruptible(&tw_dev->ioctl_sem)) { + retval = TW_IOCTL_ERROR_OS_EINTR; + goto out; + } + + /* First copy down the driver command */ + if (copy_from_user(&driver_command, (void *)arg, sizeof(TW_Ioctl_Driver_Command))) + goto out2; + + /* Check data buffer size */ + if (driver_command.buffer_length > TW_MAX_SECTORS * 512) { + retval = TW_IOCTL_ERROR_OS_EINVAL; + goto out2; + } + + /* Hardware can only do multiple of 512 byte transfers */ + data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511; + + /* Now allocate ioctl buf memory */ + cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle); + if (!cpu_addr) { + retval = TW_IOCTL_ERROR_OS_ENOMEM; + goto out2; + } + + tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr; + + /* Now copy down the entire ioctl */ + if (copy_from_user(tw_ioctl, (void *)arg, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1)) + goto out3; + + /* See which ioctl we are doing */ + switch (cmd) { + case TW_IOCTL_FIRMWARE_PASS_THROUGH: + spin_lock_irqsave(tw_dev->host->host_lock, flags); + twa_get_request_id(tw_dev, &request_id); + + /* Flag internal command */ + tw_dev->srb[request_id] = 0; + + /* Flag chrdev ioctl */ + tw_dev->chrdev_request_id = request_id; + + full_command_packet = &tw_ioctl->firmware_command; + + /* Load request id and sglist for both command types */ + twa_load_sgl(full_command_packet, request_id, dma_handle, data_buffer_length_adjusted); + + memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full)); + + /* Now post the command packet to the controller */ + twa_post_command_packet(tw_dev, request_id, 1); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + + timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ; + + /* Now wait for command to complete */ + timeout = wait_event_interruptible_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout); + + /* Check if we timed out, got a signal, or didn't get + an interrupt */ + if ((timeout <= 0) && (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE)) { + /* Now we need to reset the board */ + if (timeout == TW_IOCTL_ERROR_OS_ERESTARTSYS) { + retval = timeout; + } else { + printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n", + tw_dev->host->host_no, TW_DRIVER, 0xc, + cmd); + retval = TW_IOCTL_ERROR_OS_EIO; + } + spin_lock_irqsave(tw_dev->host->host_lock, flags); + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + tw_dev->posted_request_count--; + twa_reset_device_extension(tw_dev); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + goto out3; + } + + /* Now copy in the command packet response */ + memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full)); + + /* Now complete the io */ + spin_lock_irqsave(tw_dev->host->host_lock, flags); + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + spin_unlock_irqrestore(tw_dev->host->host_lock, flags); + break; + case TW_IOCTL_GET_COMPATIBILITY_INFO: + tw_ioctl->driver_command.status = 0; + /* Copy compatiblity struct into ioctl data buffer */ + tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer; + strncpy(tw_compat_info->driver_version, twa_driver_version, strlen(twa_driver_version)); + tw_compat_info->working_srl = tw_dev->working_srl; + tw_compat_info->working_branch = tw_dev->working_branch; + tw_compat_info->working_build = tw_dev->working_build; + break; + case TW_IOCTL_GET_LAST_EVENT: + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } else + tw_ioctl->driver_command.status = 0; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + tw_ioctl->driver_command.status = 0; + } + event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH; + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_FIRST_EVENT: + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } else + tw_ioctl->driver_command.status = 0; + event_index = tw_dev->error_index; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + tw_ioctl->driver_command.status = 0; + event_index = 0; + } + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_NEXT_EVENT: + event = (TW_Event *)tw_ioctl->data_buffer; + sequence_id = event->sequence_id; + tw_ioctl->driver_command.status = 0; + + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } + start_index = tw_dev->error_index; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + start_index = 0; + } + event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH; + + if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) { + if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) + tw_dev->aen_clobber = 1; + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_PREVIOUS_EVENT: + event = (TW_Event *)tw_ioctl->data_buffer; + sequence_id = event->sequence_id; + tw_ioctl->driver_command.status = 0; + + if (tw_dev->event_queue_wrapped) { + if (tw_dev->aen_clobber) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER; + tw_dev->aen_clobber = 0; + } + start_index = tw_dev->error_index; + } else { + if (!tw_dev->error_index) { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + start_index = 0; + } + event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH; + + if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) { + if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER) + tw_dev->aen_clobber = 1; + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS; + break; + } + memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event)); + tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED; + break; + case TW_IOCTL_GET_LOCK: + tw_lock = (TW_Lock *)tw_ioctl->data_buffer; + do_gettimeofday(¤t_time); + current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000); + + if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) { + tw_dev->ioctl_sem_lock = 1; + tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec; + tw_ioctl->driver_command.status = 0; + tw_lock->time_remaining_msec = tw_lock->timeout_msec; + } else { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED; + tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms; + } + break; + case TW_IOCTL_RELEASE_LOCK: + if (tw_dev->ioctl_sem_lock == 1) { + tw_dev->ioctl_sem_lock = 0; + tw_ioctl->driver_command.status = 0; + } else { + tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED; + } + break; + default: + retval = TW_IOCTL_ERROR_OS_ENOTTY; + goto out3; + } + + /* Now copy the entire response to userspace */ + if (copy_to_user((void *)arg, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0) + retval = 0; +out3: + /* Now free ioctl buf memory */ + pci_free_consistent(tw_dev->tw_pci_dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle); +out2: + up(&tw_dev->ioctl_sem); +out: + return retval; +} /* End twa_chrdev_ioctl() */ + +/* This function handles open for the character device */ +static int twa_chrdev_open(struct inode *inode, struct file *file) +{ + unsigned int minor_number; + int retval = TW_IOCTL_ERROR_OS_ENODEV; + + minor_number = iminor(inode); + if (minor_number >= twa_device_extension_count) + goto out; + retval = 0; +out: + return retval; +} /* End twa_chrdev_open() */ + +/* This function will print readable messages from status register errors */ +static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value) +{ + int retval = 1; + + /* Check for various error conditions and handle them appropriately */ + if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing"); + writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_PCI_ABORT) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing"); + writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev)); + pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT); + } + + if (status_reg_value & TW_STATUS_QUEUE_ERROR) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing"); + writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_SBUF_WRITE_ERROR) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0xf, "SBUF Write Error: clearing"); + writel(TW_CONTROL_CLEAR_SBUF_WRITE_ERROR, TW_CONTROL_REG_ADDR(tw_dev)); + } + + if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) { + if (tw_dev->reset_print == 0) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing"); + tw_dev->reset_print = 1; + } + goto out; + } + retval = 0; +out: + return retval; +} /* End twa_decode_bits() */ + +/* This function will empty the response queue */ +static int twa_empty_response_queue(TW_Device_Extension *tw_dev) +{ + u32 status_reg_value, response_que_value; + int count = 0, retval = 1; + + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) { + response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + count++; + } + if (count == TW_MAX_RESPONSE_DRAIN) + goto out; + + retval = 0; +out: + return retval; +} /* End twa_empty_response_queue() */ + +/* This function passes sense keys from firmware to scsi layer */ +static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host) +{ + TW_Command_Full *full_command_packet; + unsigned short error; + int retval = 1; + + full_command_packet = tw_dev->command_packet_virt[request_id]; + /* Don't print error for Logical unit not supported during rollcall */ + error = full_command_packet->header.status_block.error; + if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) { + if (print_host) + printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n", + tw_dev->host->host_no, + TW_MESSAGE_SOURCE_CONTROLLER_ERROR, + full_command_packet->header.status_block.error, + twa_string_lookup(twa_error_table, + full_command_packet->header.status_block.error), + full_command_packet->header.err_specific_desc); + else + printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n", + TW_MESSAGE_SOURCE_CONTROLLER_ERROR, + full_command_packet->header.status_block.error, + twa_string_lookup(twa_error_table, + full_command_packet->header.status_block.error), + full_command_packet->header.err_specific_desc); + } + + if (copy_sense) { + memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH); + tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1); + retval = TW_ISR_DONT_RESULT; + goto out; + } + retval = 0; +out: + return retval; +} /* End twa_fill_sense() */ + +/* This function will free up device extension resources */ +static void twa_free_device_extension(TW_Device_Extension *tw_dev) +{ + if (tw_dev->command_packet_virt[0]) + pci_free_consistent(tw_dev->tw_pci_dev, + sizeof(TW_Command_Full)*TW_Q_LENGTH, + tw_dev->command_packet_virt[0], + tw_dev->command_packet_phys[0]); + + if (tw_dev->generic_buffer_virt[0]) + pci_free_consistent(tw_dev->tw_pci_dev, + TW_SECTOR_SIZE*TW_Q_LENGTH, + tw_dev->generic_buffer_virt[0], + tw_dev->generic_buffer_phys[0]); + + if (tw_dev->event_queue[0]) + kfree(tw_dev->event_queue[0]); +} /* End twa_free_device_extension() */ + +/* This function will free a request id */ +static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id) +{ + tw_dev->free_queue[tw_dev->free_tail] = request_id; + tw_dev->state[request_id] = TW_S_FINISHED; + tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH; +} /* End twa_free_request_id() */ + +/* This function will get parameter table entires from the firmware */ +static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes) +{ + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Param_Apache *param; + unsigned long param_value; + void *retval = NULL; + + /* Setup the command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + command_packet = &full_command_packet->command.oldcommand; + + command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM); + command_packet->size = TW_COMMAND_SIZE; + command_packet->request_id = request_id; + command_packet->byte6_offset.block_count = 1; + + /* Now setup the param */ + param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id]; + memset(param, 0, TW_SECTOR_SIZE); + param->table_id = table_id | 0x8000; + param->parameter_id = parameter_id; + param->parameter_size_bytes = parameter_size_bytes; + param_value = tw_dev->generic_buffer_phys[request_id]; + + command_packet->byte8_offset.param.sgl[0].address = param_value; + command_packet->byte8_offset.param.sgl[0].length = TW_SECTOR_SIZE; + + /* Post the command packet to the board */ + twa_post_command_packet(tw_dev, request_id, 1); + + /* Poll for completion */ + if (twa_poll_response(tw_dev, request_id, 30)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param") + else + retval = (void *)&(param->data[0]); + + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_INITIAL; + + return retval; +} /* End twa_get_param() */ + +/* This function will assign an available request id */ +static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id) +{ + *request_id = tw_dev->free_queue[tw_dev->free_head]; + tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH; + tw_dev->state[*request_id] = TW_S_STARTED; +} /* End twa_get_request_id() */ + +/* This function will send an initconnection command to controller */ +static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits, + u32 set_features, unsigned short current_fw_srl, + unsigned short current_fw_arch_id, + unsigned short current_fw_branch, + unsigned short current_fw_build, + unsigned short *fw_on_ctlr_srl, + unsigned short *fw_on_ctlr_arch_id, + unsigned short *fw_on_ctlr_branch, + unsigned short *fw_on_ctlr_build, + u32 *init_connect_result) +{ + TW_Command_Full *full_command_packet; + TW_Initconnect *tw_initconnect; + int request_id = 0, retval = 1; + + /* Initialize InitConnection command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + memset(full_command_packet, 0, sizeof(TW_Command_Full)); + full_command_packet->header.header_desc.size_header = 128; + + tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand; + tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION); + tw_initconnect->request_id = request_id; + tw_initconnect->message_credits = message_credits; + tw_initconnect->features = set_features; +#if BITS_PER_LONG > 32 + /* Turn on 64-bit sgl support */ + tw_initconnect->features |= 1; +#endif + + if (set_features & TW_EXTENDED_INIT_CONNECT) { + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED; + tw_initconnect->fw_srl = current_fw_srl; + tw_initconnect->fw_arch_id = current_fw_arch_id; + tw_initconnect->fw_branch = current_fw_branch; + tw_initconnect->fw_build = current_fw_build; + } else + tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE; + + /* Send command packet to the board */ + twa_post_command_packet(tw_dev, request_id, 1); + + /* Poll for completion */ + if (twa_poll_response(tw_dev, request_id, 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection"); + } else { + if (set_features & TW_EXTENDED_INIT_CONNECT) { + *fw_on_ctlr_srl = tw_initconnect->fw_srl; + *fw_on_ctlr_arch_id = tw_initconnect->fw_arch_id; + *fw_on_ctlr_branch = tw_initconnect->fw_branch; + *fw_on_ctlr_build = tw_initconnect->fw_build; + *init_connect_result = tw_initconnect->result; + } + retval = 0; + } + + tw_dev->posted_request_count--; + tw_dev->state[request_id] = TW_S_INITIAL; + + return retval; +} /* End twa_initconnection() */ + +/* This function will initialize the fields of a device extension */ +static int twa_initialize_device_extension(TW_Device_Extension *tw_dev) +{ + int i, retval = 1; + + /* Initialize command packet buffers */ + if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed"); + goto out; + } + + /* Initialize generic buffer */ + if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed"); + goto out; + } + + /* Allocate event info space */ + tw_dev->event_queue[0] = kmalloc(sizeof(TW_Event) * TW_Q_LENGTH, GFP_KERNEL); + if (!tw_dev->event_queue[0]) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed"); + goto out; + } + + memset(tw_dev->event_queue[0], 0, sizeof(TW_Event) * TW_Q_LENGTH); + + for (i = 0; i < TW_Q_LENGTH; i++) { + tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event))); + tw_dev->free_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + + tw_dev->pending_head = TW_Q_START; + tw_dev->pending_tail = TW_Q_START; + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->error_sequence_id = 1; + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + + init_MUTEX(&tw_dev->ioctl_sem); + init_waitqueue_head(&tw_dev->ioctl_wqueue); + + retval = 0; +out: + return retval; +} /* End twa_initialize_device_extension() */ + +/* This function is the interrupt service routine */ +static irqreturn_t twa_interrupt(int irq, void *dev_instance, struct pt_regs *regs) +{ + int request_id, error = 0; + u32 status_reg_value; + TW_Response_Queue response_que; + TW_Command_Full *full_command_packet; + TW_Command *command_packet; + TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance; + int handled = 0; + + /* Get the per adapter lock */ + spin_lock(tw_dev->host->host_lock); + + /* See if the interrupt matches this instance */ + if (tw_dev->tw_pci_dev->irq == (unsigned int)irq) { + + handled = 1; + + /* Read the registers */ + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + /* Check if this is our interrupt, otherwise bail */ + if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT)) + goto twa_interrupt_bail; + + /* Check controller for errors */ + if (twa_check_bits(status_reg_value)) { + if (twa_decode_bits(tw_dev, status_reg_value)) { + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + } + + /* Handle host interrupt */ + if (status_reg_value & TW_STATUS_HOST_INTERRUPT) + TW_CLEAR_HOST_INTERRUPT(tw_dev); + + /* Handle attention interrupt */ + if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) { + TW_CLEAR_ATTENTION_INTERRUPT(tw_dev); + if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) { + twa_get_request_id(tw_dev, &request_id); + + error = twa_aen_read_queue(tw_dev, request_id); + if (error) { + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags); + } + } + } + + /* Handle command interrupt */ + if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) { + TW_MASK_COMMAND_INTERRUPT(tw_dev); + /* Drain as many pending commands as we can */ + while (tw_dev->pending_request_count > 0) { + request_id = tw_dev->pending_queue[tw_dev->pending_head]; + if (tw_dev->state[request_id] != TW_S_PENDING) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending"); + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + if (twa_post_command_packet(tw_dev, request_id, 1)==0) { + tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH; + tw_dev->pending_request_count--; + } else { + /* If we get here, we will continue re-posting on the next command interrupt */ + break; + } + } + } + + /* Handle response interrupt */ + if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) { + + /* Drain the response queue from the board */ + while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) { + /* Complete the response */ + response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + request_id = TW_RESID_OUT(response_que.response_id); + full_command_packet = tw_dev->command_packet_virt[request_id]; + error = 0; + command_packet = &full_command_packet->command.oldcommand; + /* Check for command packet errors */ + if (full_command_packet->command.newcommand.status != 0) { + if (tw_dev->srb[request_id] != 0) { + error = twa_fill_sense(tw_dev, request_id, 1, 1); + } else { + /* Skip ioctl error prints */ + if (request_id != tw_dev->chrdev_request_id) { + error = twa_fill_sense(tw_dev, request_id, 0, 1); + } + } + } + + /* Check for correct state */ + if (tw_dev->state[request_id] != TW_S_POSTED) { + if (tw_dev->srb[request_id] != 0) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted"); + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + } + + /* Check for internal command completion */ + if (tw_dev->srb[request_id] == 0) { + if (request_id != tw_dev->chrdev_request_id) { + if (twa_aen_complete(tw_dev, request_id)) + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt"); + } else { + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + wake_up(&tw_dev->ioctl_wqueue); + } + } else { + twa_scsiop_execute_scsi_complete(tw_dev, request_id); + /* If no error command was a success */ + if (error == 0) { + tw_dev->srb[request_id]->result = (DID_OK << 16); + } + + /* If error, command failed */ + if (error == 1) { + /* Ask for a host reset */ + tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1); + } + + /* Now complete the io */ + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + tw_dev->posted_request_count--; + tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); + twa_unmap_scsi_data(tw_dev, request_id); + } + + /* Check for valid status after each drain */ + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + if (twa_check_bits(status_reg_value)) { + if (twa_decode_bits(tw_dev, status_reg_value)) { + TW_CLEAR_ALL_INTERRUPTS(tw_dev); + goto twa_interrupt_bail; + } + } + } + } + } +twa_interrupt_bail: + spin_unlock(tw_dev->host->host_lock); + return IRQ_RETVAL(handled); +} /* End twa_interrupt() */ + +/* This function will load the request id and various sgls for ioctls */ +static void twa_load_sgl(TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length) +{ + TW_Command *oldcommand; + TW_Command_Apache *newcommand; + TW_SG_Entry *sgl; + + if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { + newcommand = &full_command_packet->command.newcommand; + newcommand->request_id = request_id; + newcommand->sg_list[0].address = dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1; + newcommand->sg_list[0].length = length; + } else { + oldcommand = &full_command_packet->command.oldcommand; + oldcommand->request_id = request_id; + + if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) { + /* Load the sg list */ + sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset)); + sgl->address = dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1; + sgl->length = length; + } + } +} /* End twa_load_sgl() */ + +/* This function will perform a pci-dma mapping for a scatter gather list */ +static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) +{ + int use_sg; + struct scsi_cmnd *cmd = tw_dev->srb[request_id]; + struct pci_dev *pdev = tw_dev->tw_pci_dev; + int retval = 0; + + if (cmd->use_sg == 0) + goto out; + + use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, DMA_BIDIRECTIONAL); + + if (use_sg == 0) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); + goto out; + } + + cmd->SCp.phase = TW_PHASE_SGLIST; + cmd->SCp.have_data_in = use_sg; + retval = use_sg; +out: + return retval; +} /* End twa_map_scsi_sg_data() */ + +/* This function will perform a pci-dma map for a single buffer */ +static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int request_id) +{ + dma_addr_t mapping; + struct scsi_cmnd *cmd = tw_dev->srb[request_id]; + struct pci_dev *pdev = tw_dev->tw_pci_dev; + int retval = 0; + + if (cmd->request_bufflen == 0) { + retval = 0; + goto out; + } + + mapping = pci_map_single(pdev, cmd->request_buffer, cmd->request_bufflen, DMA_BIDIRECTIONAL); + + if (mapping == 0) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Failed to map page"); + goto out; + } + + cmd->SCp.phase = TW_PHASE_SINGLE; + cmd->SCp.have_data_in = mapping; + retval = mapping; +out: + return retval; +} /* End twa_map_scsi_single_data() */ + +/* This function will poll for a response interrupt of a request */ +static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) +{ + int retval = 1, found = 0, response_request_id; + TW_Response_Queue response_queue; + TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id]; + + if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) { + response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev)); + response_request_id = TW_RESID_OUT(response_queue.response_id); + if (request_id != response_request_id) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response"); + goto out; + } + if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) { + if (full_command_packet->command.newcommand.status != 0) { + /* bad response */ + twa_fill_sense(tw_dev, request_id, 0, 0); + goto out; + } + found = 1; + } else { + if (full_command_packet->command.oldcommand.status != 0) { + /* bad response */ + twa_fill_sense(tw_dev, request_id, 0, 0); + goto out; + } + found = 1; + } + } + + if (found) + retval = 0; +out: + return retval; +} /* End twa_poll_response() */ + +/* This function will poll the status register for a flag */ +static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds) +{ + u32 status_reg_value; + unsigned long before; + int retval = 1; + + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + before = jiffies; + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + while ((status_reg_value & flag) != flag) { + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + if (time_after(jiffies, before + HZ * seconds)) + goto out; + + msleep(50); + } + retval = 0; +out: + return retval; +} /* End twa_poll_status() */ + +/* This function will poll the status register for disappearance of a flag */ +static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds) +{ + u32 status_reg_value; + unsigned long before; + int retval = 1; + + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + before = jiffies; + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + while ((status_reg_value & flag) != 0) { + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + if (time_after(jiffies, before + HZ * seconds)) + goto out; + + msleep(50); + } + retval = 0; +out: + return retval; +} /* End twa_poll_status_gone() */ + +/* This function will attempt to post a command packet to the board */ +static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal) +{ + u32 status_reg_value; + unsigned long command_que_value; + int retval = 1; + + command_que_value = tw_dev->command_packet_phys[request_id]; + status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev)); + + if (twa_check_bits(status_reg_value)) + twa_decode_bits(tw_dev, status_reg_value); + + if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) { + + /* Only pend internal driver commands */ + if (!internal) { + retval = SCSI_MLQUEUE_HOST_BUSY; + goto out; + } + + /* Couldn't post the command packet, so we do it later */ + if (tw_dev->state[request_id] != TW_S_PENDING) { + tw_dev->state[request_id] = TW_S_PENDING; + tw_dev->pending_request_count++; + if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) { + tw_dev->max_pending_request_count = tw_dev->pending_request_count; + } + tw_dev->pending_queue[tw_dev->pending_tail] = request_id; + tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH; + } + TW_UNMASK_COMMAND_INTERRUPT(tw_dev); + goto out; + } else { + /* We successfully posted the command packet */ +#if BITS_PER_LONG > 32 + writeq(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); +#else + writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev)); +#endif + tw_dev->state[request_id] = TW_S_POSTED; + tw_dev->posted_request_count++; + if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) { + tw_dev->max_posted_request_count = tw_dev->posted_request_count; + } + } + retval = 0; +out: + return retval; +} /* End twa_post_command_packet() */ + +/* This function will reset a device extension */ +static int twa_reset_device_extension(TW_Device_Extension *tw_dev) +{ + int i = 0; + int retval = 1; + + /* Abort all requests that are in progress */ + for (i = 0; i < TW_Q_LENGTH; i++) { + if ((tw_dev->state[i] != TW_S_FINISHED) && + (tw_dev->state[i] != TW_S_INITIAL) && + (tw_dev->state[i] != TW_S_COMPLETED)) { + if (tw_dev->srb[i]) { + tw_dev->srb[i]->result = (DID_RESET << 16); + tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); + twa_unmap_scsi_data(tw_dev, i); + } + } + } + + /* Reset queues and counts */ + for (i = 0; i < TW_Q_LENGTH; i++) { + tw_dev->free_queue[i] = i; + tw_dev->state[i] = TW_S_INITIAL; + } + tw_dev->free_head = TW_Q_START; + tw_dev->free_tail = TW_Q_START; + tw_dev->posted_request_count = 0; + tw_dev->pending_request_count = 0; + tw_dev->pending_head = TW_Q_START; + tw_dev->pending_tail = TW_Q_START; + tw_dev->reset_print = 0; + tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE; + tw_dev->flags = 0; + + TW_DISABLE_INTERRUPTS(tw_dev); + + if (twa_reset_sequence(tw_dev, 1)) + goto out; + + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); + + retval = 0; +out: + return retval; +} /* End twa_reset_device_extension() */ + +/* This function will reset a controller */ +static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset) +{ + int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset; + + while (tries < TW_MAX_RESET_TRIES) { + if (do_soft_reset) + TW_SOFT_RESET(tw_dev); + + /* Make sure controller is in a good state */ + if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 30)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + /* Empty response queue */ + if (twa_empty_response_queue(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + flashed = 0; + + /* Check for compatibility/flash */ + if (twa_check_srl(tw_dev, &flashed)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } else { + if (flashed) { + tries++; + continue; + } + } + + /* Drain the AEN queue */ + if (twa_aen_drain_queue(tw_dev, soft_reset)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence"); + do_soft_reset = 1; + tries++; + continue; + } + + /* If we got here, controller is in a good state */ + retval = 0; + goto out; + } +out: + return retval; +} /* End twa_reset_sequence() */ + +/* This funciton returns unit geometry in cylinders/heads/sectors */ +static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) +{ + int heads, sectors, cylinders; + TW_Device_Extension *tw_dev; + + tw_dev = (TW_Device_Extension *)sdev->host->hostdata; + + if (capacity >= 0x200000) { + heads = 255; + sectors = 63; + cylinders = sector_div(capacity, heads * sectors); + } else { + heads = 64; + sectors = 32; + cylinders = sector_div(capacity, heads * sectors); + } + + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return 0; +} /* End twa_scsi_biosparam() */ + +/* This is the new scsi eh abort function */ +static int twa_scsi_eh_abort(struct scsi_cmnd *SCpnt) +{ + int i; + TW_Device_Extension *tw_dev = NULL; + int retval = FAILED; + + tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + spin_unlock_irq(tw_dev->host->host_lock); + + tw_dev->num_aborts++; + + /* If we find any IO's in process, we have to reset the card */ + for (i = 0; i < TW_Q_LENGTH; i++) { + if ((tw_dev->state[i] != TW_S_FINISHED) && (tw_dev->state[i] != TW_S_INITIAL)) { + printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Unit #%d: Command (0x%x) timed out, resetting card.\n", + tw_dev->host->host_no, TW_DRIVER, 0x2c, + SCpnt->device->id, SCpnt->cmnd[0]); + if (twa_reset_device_extension(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2a, "Controller reset failed during scsi abort"); + goto out; + } + break; + } + } + retval = SUCCESS; +out: + spin_lock_irq(tw_dev->host->host_lock); + return retval; +} /* End twa_scsi_eh_abort() */ + +/* This is the new scsi eh reset function */ +static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt) +{ + TW_Device_Extension *tw_dev = NULL; + int retval = FAILED; + + tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + spin_unlock_irq(tw_dev->host->host_lock); + + tw_dev->num_resets++; + + printk(KERN_WARNING "3w-9xxx: scsi%d: SCSI host reset started.\n", tw_dev->host->host_no); + + /* Now reset the card and some of the device extension data */ + if (twa_reset_device_extension(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset"); + goto out; + } + printk(KERN_WARNING "3w-9xxx: scsi%d: SCSI host reset succeeded.\n", tw_dev->host->host_no); + retval = SUCCESS; +out: + spin_lock_irq(tw_dev->host->host_lock); + return retval; +} /* End twa_scsi_eh_reset() */ + +/* This is the main scsi queue function to handle scsi opcodes */ +static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +{ + int request_id, retval; + TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; + + /* Save done function into scsi_cmnd struct */ + SCpnt->scsi_done = done; + + /* Get a free request id */ + twa_get_request_id(tw_dev, &request_id); + + /* Save the scsi command for use by the ISR */ + tw_dev->srb[request_id] = SCpnt; + + /* Initialize phase to zero */ + SCpnt->SCp.phase = TW_PHASE_INITIAL; + + retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); + switch (retval) { + case SCSI_MLQUEUE_HOST_BUSY: + twa_free_request_id(tw_dev, request_id); + break; + case 1: + tw_dev->state[request_id] = TW_S_COMPLETED; + twa_free_request_id(tw_dev, request_id); + SCpnt->result = (DID_ERROR << 16); + done(SCpnt); + } + + return retval; +} /* End twa_scsi_queue() */ + +/* This function hands scsi cdb's to the firmware */ +static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Apache *sglistarg) +{ + TW_Command_Full *full_command_packet; + TW_Command_Apache *command_packet; + u32 num_sectors = 0x0; + int i, sg_count; + struct scsi_cmnd *srb = NULL; + struct scatterlist *sglist = NULL; + u32 buffaddr = 0x0; + int retval = 1; + + if (tw_dev->srb[request_id]) { + if (tw_dev->srb[request_id]->request_buffer) { + sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer; + } + srb = tw_dev->srb[request_id]; + } + + /* Initialize command packet */ + full_command_packet = tw_dev->command_packet_virt[request_id]; + full_command_packet->header.header_desc.size_header = 128; + full_command_packet->header.status_block.error = 0; + full_command_packet->header.status_block.severity__reserved = 0; + + command_packet = &full_command_packet->command.newcommand; + command_packet->status = 0; + command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI); + + /* We forced 16 byte cdb use earlier */ + if (!cdb) + memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN); + else + memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN); + + if (srb) + command_packet->unit = srb->device->id; + else + command_packet->unit = 0; + + command_packet->request_id = request_id; + command_packet->sgl_offset = 16; + + if (!sglistarg) { + /* Map sglist from scsi layer to cmd packet */ + if (tw_dev->srb[request_id]->use_sg == 0) { + if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH) { + command_packet->sg_list[0].address = tw_dev->generic_buffer_phys[request_id]; + command_packet->sg_list[0].length = TW_MIN_SGL_LENGTH; + } else { + buffaddr = twa_map_scsi_single_data(tw_dev, request_id); + if (buffaddr == 0) + goto out; + + command_packet->sg_list[0].address = buffaddr; + command_packet->sg_list[0].length = tw_dev->srb[request_id]->request_bufflen; + } + command_packet->sgl_entries = 1; + + if (command_packet->sg_list[0].address & TW_ALIGNMENT_9000_SGL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2d, "Found unaligned address during execute scsi"); + goto out; + } + } + + if (tw_dev->srb[request_id]->use_sg > 0) { + sg_count = twa_map_scsi_sg_data(tw_dev, request_id); + if (sg_count == 0) + goto out; + + for (i = 0; i < sg_count; i++) { + command_packet->sg_list[i].address = sg_dma_address(&sglist[i]); + command_packet->sg_list[i].length = sg_dma_len(&sglist[i]); + if (command_packet->sg_list[i].address & TW_ALIGNMENT_9000_SGL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi"); + goto out; + } + } + command_packet->sgl_entries = tw_dev->srb[request_id]->use_sg; + } + } else { + /* Internal cdb post */ + for (i = 0; i < use_sg; i++) { + command_packet->sg_list[i].address = sglistarg[i].address; + command_packet->sg_list[i].length = sglistarg[i].length; + if (command_packet->sg_list[i].address & TW_ALIGNMENT_9000_SGL) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post"); + goto out; + } + } + command_packet->sgl_entries = use_sg; + } + + if (srb) { + if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6) + num_sectors = (u32)srb->cmnd[4]; + + if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) + num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8); + } + + /* Update sector statistic */ + tw_dev->sector_count = num_sectors; + if (tw_dev->sector_count > tw_dev->max_sector_count) + tw_dev->max_sector_count = tw_dev->sector_count; + + /* Update SG statistics */ + if (srb) { + tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg; + if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) + tw_dev->max_sgl_entries = tw_dev->sgl_entries; + } + + /* Now post the command to the board */ + if (srb) { + retval = twa_post_command_packet(tw_dev, request_id, 0); + } else { + twa_post_command_packet(tw_dev, request_id, 1); + retval = 0; + } +out: + return retval; +} /* End twa_scsiop_execute_scsi() */ + +/* This function completes an execute scsi operation */ +static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) +{ + /* Copy the response if too small */ + if ((tw_dev->srb[request_id]->request_buffer) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) { + memcpy(tw_dev->srb[request_id]->request_buffer, + tw_dev->generic_buffer_virt[request_id], + tw_dev->srb[request_id]->request_bufflen); + } +} /* End twa_scsiop_execute_scsi_complete() */ + +/* This function tells the controller to shut down */ +static void __twa_shutdown(TW_Device_Extension *tw_dev) +{ + /* Disable interrupts */ + TW_DISABLE_INTERRUPTS(tw_dev); + + printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no); + + /* Tell the card we are shutting down */ + if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed"); + } else { + printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n"); + } + + /* Clear all interrupts just before exit */ + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); +} /* End __twa_shutdown() */ + +/* Wrapper for __twa_shutdown */ +static void twa_shutdown(struct device *dev) +{ + struct Scsi_Host *host = pci_get_drvdata(to_pci_dev(dev)); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + __twa_shutdown(tw_dev); +} /* End twa_shutdown() */ + +/* This function will look up a string */ +static char *twa_string_lookup(twa_message_type *table, unsigned int code) +{ + int index; + + for (index = 0; ((code != table[index].code) && + (table[index].text != (char *)0)); index++); + return(table[index].text); +} /* End twa_string_lookup() */ + +/* This function will perform a pci-dma unmap */ +static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) +{ + struct scsi_cmnd *cmd = tw_dev->srb[request_id]; + struct pci_dev *pdev = tw_dev->tw_pci_dev; + + switch(cmd->SCp.phase) { + case TW_PHASE_SINGLE: + pci_unmap_single(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL); + break; + case TW_PHASE_SGLIST: + pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL); + break; + } +} /* End twa_unmap_scsi_data() */ + +/* scsi_host_template initializer */ +static struct scsi_host_template driver_template = { + .module = THIS_MODULE, + .name = "3ware 9000 Storage Controller", + .queuecommand = twa_scsi_queue, + .eh_abort_handler = twa_scsi_eh_abort, + .eh_host_reset_handler = twa_scsi_eh_reset, + .bios_param = twa_scsi_biosparam, + .can_queue = TW_Q_LENGTH-2, + .this_id = -1, + .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH, + .max_sectors = TW_MAX_SECTORS, + .cmd_per_lun = TW_MAX_CMDS_PER_LUN, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = twa_host_attrs, + .sdev_attrs = twa_dev_attrs, + .emulated = 1 +}; + +/* This function will probe and initialize a card */ +static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) +{ + struct Scsi_Host *host = NULL; + TW_Device_Extension *tw_dev; + u32 mem_addr; + int retval = -ENODEV; + + retval = pci_enable_device(pdev); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device"); + goto out_disable_device; + } + + pci_set_master(pdev); + + retval = pci_set_dma_mask(pdev, TW_DMA_MASK); + if (retval) { + TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask"); + goto out_disable_device; + } + + host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension)); + if (!host) { + TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension"); + retval = -ENOMEM; + goto out_disable_device; + } + tw_dev = (TW_Device_Extension *)host->hostdata; + + memset(tw_dev, 0, sizeof(TW_Device_Extension)); + + /* Save values to device extension */ + tw_dev->host = host; + tw_dev->tw_pci_dev = pdev; + + if (twa_initialize_device_extension(tw_dev)) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); + goto out_free_device_extension; + } + + /* Request IO regions */ + retval = pci_request_regions(pdev, "3w-9xxx"); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region"); + goto out_free_device_extension; + } + + mem_addr = pci_resource_start(pdev, 1); + + /* Save base address */ + tw_dev->base_addr = ioremap(mem_addr, PAGE_SIZE); + if (!tw_dev->base_addr) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); + goto out_release_mem_region; + } + + /* Disable interrupts on the card */ + TW_DISABLE_INTERRUPTS(tw_dev); + + /* Initialize the card */ + if (twa_reset_sequence(tw_dev, 0)) + goto out_release_mem_region; + + /* Set host specific parameters */ + host->max_id = TW_MAX_UNITS; + host->max_cmd_len = TW_MAX_CDB_LEN; + + /* Luns and channels aren't supported by adapter */ + host->max_lun = 0; + host->max_channel = 0; + + /* Register the card with the kernel SCSI layer */ + retval = scsi_add_host(host, &pdev->dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed"); + goto out_release_mem_region; + } + + pci_set_drvdata(pdev, host); + + printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%x, IRQ: %d.\n", + host->host_no, mem_addr, pdev->irq); + printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n", + host->host_no, + (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE, + TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH), + (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE, + TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH), + *(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE, + TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)); + + /* Now setup the interrupt handler */ + retval = request_irq(pdev->irq, twa_interrupt, SA_SHIRQ, "3w-9xxx", tw_dev); + if (retval) { + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ"); + goto out_remove_host; + } + + twa_device_extension_list[twa_device_extension_count] = tw_dev; + twa_device_extension_count++; + + /* Re-enable interrupts on the card */ + TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev); + + /* Finally, scan the host */ + scsi_scan_host(host); + + if (twa_major == -1) { + if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0) + TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device"); + } + return 0; + +out_remove_host: + scsi_remove_host(host); +out_release_mem_region: + pci_release_regions(pdev); +out_free_device_extension: + twa_free_device_extension(tw_dev); + scsi_host_put(host); +out_disable_device: + pci_disable_device(pdev); + + return retval; +} /* End twa_probe() */ + +/* This function is called to remove a device */ +static void twa_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *host = pci_get_drvdata(pdev); + TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; + + scsi_remove_host(tw_dev->host); + + __twa_shutdown(tw_dev); + + /* Free up the IRQ */ + free_irq(tw_dev->tw_pci_dev->irq, tw_dev); + + /* Free up the mem region */ + pci_release_regions(pdev); + + /* Free up device extension resources */ + twa_free_device_extension(tw_dev); + + /* Unregister character device */ + if (twa_major >= 0) { + unregister_chrdev(twa_major, "twa"); + twa_major = -1; + } + + scsi_host_put(tw_dev->host); + pci_disable_device(pdev); + twa_device_extension_count--; +} /* End twa_remove() */ + +/* PCI Devices supported by this driver */ +static struct pci_device_id twa_pci_tbl[] __devinitdata = { + { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + { } +}; +MODULE_DEVICE_TABLE(pci, twa_pci_tbl); + +/* pci_driver initializer */ +static struct pci_driver twa_driver = { + .name = "3w-9xxx", + .id_table = twa_pci_tbl, + .probe = twa_probe, + .remove = twa_remove, + .driver = { + .shutdown = twa_shutdown + } +}; + +/* This function is called on driver initialization */ +static int __init twa_init(void) +{ + printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", twa_driver_version); + + return pci_module_init(&twa_driver); +} /* End twa_init() */ + +/* This function is called on driver exit */ +static void __exit twa_exit(void) +{ + pci_unregister_driver(&twa_driver); +} /* End twa_exit() */ + +module_init(twa_init); +module_exit(twa_exit); + diff --git a/drivers/scsi/fdomain.h b/drivers/scsi/fdomain.h new file mode 100644 index 000000000..47021d9d4 --- /dev/null +++ b/drivers/scsi/fdomain.h @@ -0,0 +1,24 @@ +/* + * fdomain.c -- Future Domain TMC-16x0 SCSI driver + * Author: Rickard E. Faith, faith@cs.unc.edu + * Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org) + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +extern struct scsi_host_template fdomain_driver_template; +extern int fdomain_setup(char *str); +extern struct Scsi_Host *__fdomain_16x0_detect(struct scsi_host_template *tpnt ); +extern int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt); diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c new file mode 100644 index 000000000..bf1932e49 --- /dev/null +++ b/drivers/scsi/sata_nv.c @@ -0,0 +1,355 @@ +/* + * sata_nv.c - NVIDIA nForce SATA + * + * Copyright 2004 NVIDIA Corp. All rights reserved. + * Copyright 2004 Andrew Chew + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include +#include + +#define DRV_NAME "sata_nv" +#define DRV_VERSION "0.01" + +#define NV_PORTS 2 +#define NV_PIO_MASK 0x1f +#define NV_UDMA_MASK 0x7f +#define NV_PORT0_BMDMA_REG_OFFSET 0x00 +#define NV_PORT1_BMDMA_REG_OFFSET 0x08 +#define NV_PORT0_SCR_REG_OFFSET 0x00 +#define NV_PORT1_SCR_REG_OFFSET 0x40 + +#define NV_INT_STATUS 0x10 +#define NV_INT_STATUS_PDEV_INT 0x01 +#define NV_INT_STATUS_PDEV_PM 0x02 +#define NV_INT_STATUS_PDEV_ADDED 0x04 +#define NV_INT_STATUS_PDEV_REMOVED 0x08 +#define NV_INT_STATUS_SDEV_INT 0x10 +#define NV_INT_STATUS_SDEV_PM 0x20 +#define NV_INT_STATUS_SDEV_ADDED 0x40 +#define NV_INT_STATUS_SDEV_REMOVED 0x80 +#define NV_INT_STATUS_PDEV_HOTPLUG (NV_INT_STATUS_PDEV_ADDED | \ + NV_INT_STATUS_PDEV_REMOVED) +#define NV_INT_STATUS_SDEV_HOTPLUG (NV_INT_STATUS_SDEV_ADDED | \ + NV_INT_STATUS_SDEV_REMOVED) +#define NV_INT_STATUS_HOTPLUG (NV_INT_STATUS_PDEV_HOTPLUG | \ + NV_INT_STATUS_SDEV_HOTPLUG) + +#define NV_INT_ENABLE 0x11 +#define NV_INT_ENABLE_PDEV_MASK 0x01 +#define NV_INT_ENABLE_PDEV_PM 0x02 +#define NV_INT_ENABLE_PDEV_ADDED 0x04 +#define NV_INT_ENABLE_PDEV_REMOVED 0x08 +#define NV_INT_ENABLE_SDEV_MASK 0x10 +#define NV_INT_ENABLE_SDEV_PM 0x20 +#define NV_INT_ENABLE_SDEV_ADDED 0x40 +#define NV_INT_ENABLE_SDEV_REMOVED 0x80 +#define NV_INT_ENABLE_PDEV_HOTPLUG (NV_INT_ENABLE_PDEV_ADDED | \ + NV_INT_ENABLE_PDEV_REMOVED) +#define NV_INT_ENABLE_SDEV_HOTPLUG (NV_INT_ENABLE_SDEV_ADDED | \ + NV_INT_ENABLE_SDEV_REMOVED) +#define NV_INT_ENABLE_HOTPLUG (NV_INT_ENABLE_PDEV_HOTPLUG | \ + NV_INT_ENABLE_SDEV_HOTPLUG) + +#define NV_INT_CONFIG 0x12 +#define NV_INT_CONFIG_METHD 0x01 // 0 = INT, 1 = SMI + +static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +irqreturn_t nv_interrupt (int irq, void *dev_instance, struct pt_regs *regs); +static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg); +static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static void nv_host_stop (struct ata_host_set *host_set); + +static struct pci_device_id nv_pci_tbl[] = { + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA, + PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2, + PCI_ANY_ID, PCI_ANY_ID, }, + { 0, } /* terminate list */ +}; + +static struct pci_driver nv_pci_driver = { + .name = DRV_NAME, + .id_table = nv_pci_tbl, + .probe = nv_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template nv_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = ATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .dma_boundary = ATA_DMA_BOUNDARY, + .slave_configure = ata_scsi_slave_config, + .bios_param = ata_std_bios_param, +}; + +static struct ata_port_operations nv_ops = { + .port_disable = ata_port_disable, + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + .exec_command = ata_exec_command_pio, + .check_status = ata_check_status_pio, + .phy_reset = sata_phy_reset, + .bmdma_setup = ata_bmdma_setup_pio, + .bmdma_start = ata_bmdma_start_pio, + .qc_prep = ata_qc_prep, + .qc_issue = ata_qc_issue_prot, + .eng_timeout = ata_eng_timeout, + .irq_handler = nv_interrupt, + .irq_clear = ata_bmdma_irq_clear, + .scr_read = nv_scr_read, + .scr_write = nv_scr_write, + .port_start = ata_port_start, + .port_stop = ata_port_stop, + .host_stop = nv_host_stop, +}; + +MODULE_AUTHOR("NVIDIA"); +MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, nv_pci_tbl); + +irqreturn_t nv_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + unsigned int i; + unsigned int handled = 0; + unsigned long flags; + u8 intr_status; + u8 intr_enable; + + spin_lock_irqsave(&host_set->lock, flags); + + for (i = 0; i < host_set->n_ports; i++) { + struct ata_port *ap; + + ap = host_set->ports[i]; + if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && (!(qc->tf.ctl & ATA_NIEN))) + handled += ata_host_intr(ap, qc); + } + + intr_status = inb(ap->ioaddr.scr_addr + NV_INT_STATUS); + intr_enable = inb(ap->ioaddr.scr_addr + NV_INT_ENABLE); + + // Clear interrupt status. + outb(0xff, ap->ioaddr.scr_addr + NV_INT_STATUS); + + if (intr_status & NV_INT_STATUS_HOTPLUG) { + if (intr_status & NV_INT_STATUS_PDEV_ADDED) { + printk(KERN_WARNING "ata%u: " + "Primary device added\n", ap->id); + } + + if (intr_status & NV_INT_STATUS_PDEV_REMOVED) { + printk(KERN_WARNING "ata%u: " + "Primary device removed\n", ap->id); + } + + if (intr_status & NV_INT_STATUS_SDEV_ADDED) { + printk(KERN_WARNING "ata%u: " + "Secondary device added\n", ap->id); + } + + if (intr_status & NV_INT_STATUS_SDEV_REMOVED) { + printk(KERN_WARNING "ata%u: " + "Secondary device removed\n", ap->id); + } + } + } + + spin_unlock_irqrestore(&host_set->lock, flags); + + return IRQ_RETVAL(handled); +} + +static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + if (sc_reg > SCR_CONTROL) + return 0xffffffffU; + + return inl(ap->ioaddr.scr_addr + (sc_reg * 4)); +} + +static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +{ + if (sc_reg > SCR_CONTROL) + return; + + outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)); +} + +static void nv_host_stop (struct ata_host_set *host_set) +{ + int i; + + for (i=0; in_ports; i++) { + u8 intr_mask; + + // Disable hotplug event interrupts. + intr_mask = inb(host_set->ports[i]->ioaddr.scr_addr + + NV_INT_ENABLE); + intr_mask &= ~(NV_INT_ENABLE_HOTPLUG); + outb(intr_mask, host_set->ports[i]->ioaddr.scr_addr + + NV_INT_ENABLE); + } +} + +static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version = 0; + struct ata_probe_ent *probe_ent = NULL; + int i; + int rc; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + INIT_LIST_HEAD(&probe_ent->node); + + probe_ent->pdev = pdev; + probe_ent->sht = &nv_sht; + probe_ent->host_flags = ATA_FLAG_SATA | + ATA_FLAG_SATA_RESET | + ATA_FLAG_SRST | + ATA_FLAG_NO_LEGACY; + probe_ent->port_ops = &nv_ops; + probe_ent->n_ports = NV_PORTS; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->pio_mask = NV_PIO_MASK; + probe_ent->udma_mask = NV_UDMA_MASK; + + probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); + ata_std_ports(&probe_ent->port[0]); + probe_ent->port[0].altstatus_addr = + probe_ent->port[0].ctl_addr = + pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; + probe_ent->port[0].bmdma_addr = + pci_resource_start(pdev, 4) | NV_PORT0_BMDMA_REG_OFFSET; + probe_ent->port[0].scr_addr = + pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET; + + probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); + ata_std_ports(&probe_ent->port[1]); + probe_ent->port[1].altstatus_addr = + probe_ent->port[1].ctl_addr = + pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; + probe_ent->port[1].bmdma_addr = + pci_resource_start(pdev, 4) | NV_PORT1_BMDMA_REG_OFFSET; + probe_ent->port[1].scr_addr = + pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET; + + pci_set_master(pdev); + + rc = ata_device_add(probe_ent); + if (rc != NV_PORTS) + goto err_out_regions; + + // Enable hotplug event interrupts. + for (i=0; in_ports; i++) { + u8 intr_mask; + + outb(NV_INT_STATUS_HOTPLUG, probe_ent->port[i].scr_addr + + NV_INT_STATUS); + + intr_mask = inb(probe_ent->port[i].scr_addr + NV_INT_ENABLE); + intr_mask |= NV_INT_ENABLE_HOTPLUG; + outb(intr_mask, probe_ent->port[i].scr_addr + NV_INT_ENABLE); + } + + kfree(probe_ent); + + return 0; + +err_out_regions: + pci_release_regions(pdev); + +err_out: + pci_disable_device(pdev); + return rc; +} + +static int __init nv_init(void) +{ + return pci_module_init(&nv_pci_driver); +} + +static void __exit nv_exit(void) +{ + pci_unregister_driver(&nv_pci_driver); +} + +module_init(nv_init); +module_exit(nv_exit); diff --git a/drivers/serial/cpm_uart/Makefile b/drivers/serial/cpm_uart/Makefile new file mode 100644 index 000000000..e072724ea --- /dev/null +++ b/drivers/serial/cpm_uart/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the Motorola 8xx FEC ethernet controller +# + +obj-$(CONFIG_SERIAL_CPM) += cpm_uart.o + +# Select the correct platform objects. +cpm_uart-objs-$(CONFIG_CPM2) += cpm_uart_cpm2.o +cpm_uart-objs-$(CONFIG_8xx) += cpm_uart_cpm1.o + +cpm_uart-objs := cpm_uart_core.o $(cpm_uart-objs-y) diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c new file mode 100644 index 000000000..0612e9453 --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart_core.c @@ -0,0 +1,1179 @@ +/* + * linux/drivers/serial/cpm_uart.c + * + * Driver for CPM (SCC/SMC) serial ports; core driver + * + * Based on arch/ppc/cpm2_io/uart.c by Dan Malek + * Based on ppc8xx.c by Thomas Gleixner + * Based on drivers/serial/amba.c by Russell King + * + * Maintainer: Kumar Gala (kumar.gala@freescale.com) (CPM2) + * Pantelis Antoniou (panto@intracom.gr) (CPM1) + * + * Copyright (C) 2004 Freescale Semiconductor, Inc. + * (C) 2004 Intracom, S.A. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if defined(CONFIG_SERIAL_CPM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +#define SUPPORT_SYSRQ +#endif + +#include +#include + +#include "cpm_uart.h" + +/***********************************************************************/ + +/* Track which ports are configured as uarts */ +int cpm_uart_port_map[UART_NR]; +/* How many ports did we config as uarts */ +int cpm_uart_nr; + +/**************************************************************/ + +static int cpm_uart_tx_pump(struct uart_port *port); +static void cpm_uart_init_smc(struct uart_cpm_port *pinfo, int bits, u16 cval); +static void cpm_uart_init_scc(struct uart_cpm_port *pinfo, int sbits, u16 sval); + +/**************************************************************/ + +/* + * Check, if transmit buffers are processed +*/ +static unsigned int cpm_uart_tx_empty(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile cbd_t *bdp = pinfo->tx_bd_base; + int ret = 0; + + while (1) { + if (bdp->cbd_sc & BD_SC_READY) + break; + + if (bdp->cbd_sc & BD_SC_WRAP) { + ret = TIOCSER_TEMT; + break; + } + bdp++; + } + + pr_debug("CPM uart[%d]:tx_empty: %d\n", port->line, ret); + + return ret; +} + +static void cpm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* Whee. Do nothing. */ +} + +static unsigned int cpm_uart_get_mctrl(struct uart_port *port) +{ + /* Whee. Do nothing. */ + return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS; +} + +/* + * Stop transmitter + */ +static void cpm_uart_stop_tx(struct uart_port *port, unsigned int tty_stop) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile smc_t *smcp = pinfo->smcp; + volatile scc_t *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:stop tx\n", port->line); + + if (IS_SMC(pinfo)) + smcp->smc_smcm &= ~SMCM_TX; + else + sccp->scc_sccm &= ~UART_SCCM_TX; +} + +/* + * Start transmitter + */ +static void cpm_uart_start_tx(struct uart_port *port, unsigned int tty_start) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile smc_t *smcp = pinfo->smcp; + volatile scc_t *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:start tx\n", port->line); + + /* if in the middle of discarding return */ + if (IS_DISCARDING(pinfo)) + return; + + if (IS_SMC(pinfo)) { + if (smcp->smc_smcm & SMCM_TX) + return; + } else { + if (sccp->scc_sccm & UART_SCCM_TX) + return; + } + + if (cpm_uart_tx_pump(port) != 0) { + if (IS_SMC(pinfo)) + smcp->smc_smcm |= SMCM_TX; + else + sccp->scc_sccm |= UART_SCCM_TX; + } +} + +/* + * Stop receiver + */ +static void cpm_uart_stop_rx(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile smc_t *smcp = pinfo->smcp; + volatile scc_t *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:stop rx\n", port->line); + + if (IS_SMC(pinfo)) + smcp->smc_smcm &= ~SMCM_RX; + else + sccp->scc_sccm &= ~UART_SCCM_RX; +} + +/* + * Enable Modem status interrupts + */ +static void cpm_uart_enable_ms(struct uart_port *port) +{ + pr_debug("CPM uart[%d]:enable ms\n", port->line); +} + +/* + * Generate a break. + */ +static void cpm_uart_break_ctl(struct uart_port *port, int break_state) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + int line = pinfo - cpm_uart_ports; + + pr_debug("CPM uart[%d]:break ctrl, break_state: %d\n", port->line, + break_state); + + if (break_state) + cpm_line_cr_cmd(line, CPM_CR_STOP_TX); + else + cpm_line_cr_cmd(line, CPM_CR_RESTART_TX); +} + +/* + * Transmit characters, refill buffer descriptor, if possible + */ +static void cpm_uart_int_tx(struct uart_port *port, struct pt_regs *regs) +{ + pr_debug("CPM uart[%d]:TX INT\n", port->line); + + cpm_uart_tx_pump(port); +} + +/* + * Receive characters + */ +static void cpm_uart_int_rx(struct uart_port *port, struct pt_regs *regs) +{ + int i; + unsigned char ch, *cp; + struct tty_struct *tty = port->info->tty; + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile cbd_t *bdp; + u16 status; + unsigned int flg; + + pr_debug("CPM uart[%d]:RX INT\n", port->line); + + /* Just loop through the closed BDs and copy the characters into + * the buffer. + */ + bdp = pinfo->rx_cur; + for (;;) { + /* get status */ + status = bdp->cbd_sc; + /* If this one is empty, return happy */ + if (status & BD_SC_EMPTY) + break; + + /* get number of characters, and check spce in flip-buffer */ + i = bdp->cbd_datlen; + + /* If we have not enough room in tty flip buffer, then we try + * later, which will be the next rx-interrupt or a timeout + */ + if ((tty->flip.count + i) >= TTY_FLIPBUF_SIZE) { + tty->flip.work.func((void *)tty); + if ((tty->flip.count + i) >= TTY_FLIPBUF_SIZE) { + printk(KERN_WARNING "TTY_DONT_FLIP set\n"); + return; + } + } + + /* get pointer */ + cp = (unsigned char *)bus_to_virt(bdp->cbd_bufaddr); + + /* loop through the buffer */ + while (i-- > 0) { + ch = *cp++; + port->icount.rx++; + flg = TTY_NORMAL; + + if (status & + (BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV)) + goto handle_error; + if (uart_handle_sysrq_char(port, ch, regs)) + continue; + + error_return: + *tty->flip.char_buf_ptr++ = ch; + *tty->flip.flag_buf_ptr++ = flg; + tty->flip.count++; + + } /* End while (i--) */ + + /* This BD is ready to be used again. Clear status. get next */ + bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV); + bdp->cbd_sc |= BD_SC_EMPTY; + + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = pinfo->rx_bd_base; + else + bdp++; + } /* End for (;;) */ + + /* Write back buffer pointer */ + pinfo->rx_cur = (volatile cbd_t *) bdp; + + /* activate BH processing */ + tty_flip_buffer_push(tty); + + return; + + /* Error processing */ + + handle_error: + /* Statistics */ + if (status & BD_SC_BR) + port->icount.brk++; + if (status & BD_SC_PR) + port->icount.parity++; + if (status & BD_SC_FR) + port->icount.frame++; + if (status & BD_SC_OV) + port->icount.overrun++; + + /* Mask out ignored conditions */ + status &= port->read_status_mask; + + /* Handle the remaining ones */ + if (status & BD_SC_BR) + flg = TTY_BREAK; + else if (status & BD_SC_PR) + flg = TTY_PARITY; + else if (status & BD_SC_FR) + flg = TTY_FRAME; + + /* overrun does not affect the current character ! */ + if (status & BD_SC_OV) { + ch = 0; + flg = TTY_OVERRUN; + /* We skip this buffer */ + /* CHECK: Is really nothing senseful there */ + /* ASSUMPTION: it contains nothing valid */ + i = 0; + } +#ifdef SUPPORT_SYSRQ + port->sysrq = 0; +#endif + goto error_return; +} + +/* + * Asynchron mode interrupt handler + */ +static irqreturn_t cpm_uart_int(int irq, void *data, struct pt_regs *regs) +{ + u8 events; + struct uart_port *port = (struct uart_port *)data; + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + volatile smc_t *smcp = pinfo->smcp; + volatile scc_t *sccp = pinfo->sccp; + + pr_debug("CPM uart[%d]:IRQ\n", port->line); + + if (IS_SMC(pinfo)) { + events = smcp->smc_smce; + if (events & SMCM_BRKE) + uart_handle_break(port); + if (events & SMCM_RX) + cpm_uart_int_rx(port, regs); + if (events & SMCM_TX) + cpm_uart_int_tx(port, regs); + smcp->smc_smce = events; + } else { + events = sccp->scc_scce; + if (events & UART_SCCM_BRKE) + uart_handle_break(port); + if (events & UART_SCCM_RX) + cpm_uart_int_rx(port, regs); + if (events & UART_SCCM_TX) + cpm_uart_int_tx(port, regs); + sccp->scc_scce = events; + } + return (events) ? IRQ_HANDLED : IRQ_NONE; +} + +static int cpm_uart_startup(struct uart_port *port) +{ + int retval; + + pr_debug("CPM uart[%d]:startup\n", port->line); + + /* Install interrupt handler. */ + retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port); + if (retval) + return retval; + + return 0; +} + +/* + * Shutdown the uart + */ +static void cpm_uart_shutdown(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + int line = pinfo - cpm_uart_ports; + + pr_debug("CPM uart[%d]:shutdown\n", port->line); + + /* free interrupt handler */ + free_irq(port->irq, port); + + /* If the port is not the console, disable Rx and Tx. */ + if (!(pinfo->flags & FLAG_CONSOLE)) { + /* Stop uarts */ + if (IS_SMC(pinfo)) { + volatile smc_t *smcp = pinfo->smcp; + smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX); + } else { + volatile scc_t *sccp = pinfo->sccp; + sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX); + } + + /* Shut them really down and reinit buffer descriptors */ + cpm_line_cr_cmd(line, CPM_CR_INIT_TRX); + } +} + +static void cpm_uart_set_termios(struct uart_port *port, + struct termios *termios, struct termios *old) +{ + int baud; + unsigned long flags; + u16 cval, scval; + int bits, sbits; + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + int line = pinfo - cpm_uart_ports; + volatile cbd_t *bdp; + + pr_debug("CPM uart[%d]:set_termios\n", port->line); + + spin_lock_irqsave(&port->lock, flags); + /* disable uart interrupts */ + if (IS_SMC(pinfo)) + pinfo->smcp->smc_smcm &= ~(SMCM_RX | SMCM_TX); + else + pinfo->sccp->scc_sccm &= ~(UART_SCCM_TX | UART_SCCM_RX); + pinfo->flags |= FLAG_DISCARDING; + spin_unlock_irqrestore(&port->lock, flags); + + /* if previous configuration exists wait for tx to finish */ + if (pinfo->baud != 0 && pinfo->bits != 0) { + + /* point to the last txed bd */ + bdp = pinfo->tx_cur; + if (bdp == pinfo->tx_bd_base) + bdp = pinfo->tx_bd_base + (pinfo->tx_nrfifos - 1); + else + bdp--; + + /* wait for it to be transmitted */ + while ((bdp->cbd_sc & BD_SC_READY) != 0) + schedule(); + + /* and delay for the hw fifo to drain */ + udelay((3 * 1000000 * pinfo->bits) / pinfo->baud); + } + + spin_lock_irqsave(&port->lock, flags); + + /* Send the CPM an initialize command. */ + cpm_line_cr_cmd(line, CPM_CR_STOP_TX); + + /* Stop uart */ + if (IS_SMC(pinfo)) + pinfo->smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + else + pinfo->sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + + /* Send the CPM an initialize command. */ + cpm_line_cr_cmd(line, CPM_CR_INIT_TRX); + + spin_unlock_irqrestore(&port->lock, flags); + + baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); + + /* Character length programmed into the mode register is the + * sum of: 1 start bit, number of data bits, 0 or 1 parity bit, + * 1 or 2 stop bits, minus 1. + * The value 'bits' counts this for us. + */ + cval = 0; + scval = 0; + + /* byte size */ + switch (termios->c_cflag & CSIZE) { + case CS5: + bits = 5; + break; + case CS6: + bits = 6; + break; + case CS7: + bits = 7; + break; + case CS8: + bits = 8; + break; + /* Never happens, but GCC is too dumb to figure it out */ + default: + bits = 8; + break; + } + sbits = bits - 5; + + if (termios->c_cflag & CSTOPB) { + cval |= SMCMR_SL; /* Two stops */ + scval |= SCU_PSMR_SL; + bits++; + } + + if (termios->c_cflag & PARENB) { + cval |= SMCMR_PEN; + scval |= SCU_PSMR_PEN; + bits++; + if (!(termios->c_cflag & PARODD)) { + cval |= SMCMR_PM_EVEN; + scval |= (SCU_PSMR_REVP | SCU_PSMR_TEVP); + } + } + + /* + * Set up parity check flag + */ +#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) + + port->read_status_mask = (BD_SC_EMPTY | BD_SC_OV); + if (termios->c_iflag & INPCK) + port->read_status_mask |= BD_SC_FR | BD_SC_PR; + if ((termios->c_iflag & BRKINT) || (termios->c_iflag & PARMRK)) + port->read_status_mask |= BD_SC_BR; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= BD_SC_PR | BD_SC_FR; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= BD_SC_BR; + /* + * If we're ignore parity and break indicators, ignore + * overruns too. (For real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= BD_SC_OV; + } + /* + * !!! ignore all characters if CREAD is not set + */ + if ((termios->c_cflag & CREAD) == 0) + port->read_status_mask &= ~BD_SC_EMPTY; + + spin_lock_irqsave(&port->lock, flags); + + cpm_set_brg(pinfo->brg - 1, baud); + + /* Start bit has not been added (so don't, because we would just + * subtract it later), and we need to add one for the number of + * stops bits (there is always at least one). + */ + bits++; + + /* re-init */ + if (IS_SMC(pinfo)) + cpm_uart_init_smc(pinfo, bits, cval); + else + cpm_uart_init_scc(pinfo, sbits, scval); + + pinfo->baud = baud; + pinfo->bits = bits; + + pinfo->flags &= ~FLAG_DISCARDING; + spin_unlock_irqrestore(&port->lock, flags); + +} + +static const char *cpm_uart_type(struct uart_port *port) +{ + pr_debug("CPM uart[%d]:uart_type\n", port->line); + + return port->type == PORT_CPM ? "CPM UART" : NULL; +} + +/* + * verify the new serial_struct (for TIOCSSERIAL). + */ +static int cpm_uart_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + int ret = 0; + + pr_debug("CPM uart[%d]:verify_port\n", port->line); + + if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= NR_IRQS) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + return ret; +} + +/* + * Transmit characters, refill buffer descriptor, if possible + */ +static int cpm_uart_tx_pump(struct uart_port *port) +{ + volatile cbd_t *bdp; + unsigned char *p; + int count; + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + struct circ_buf *xmit = &port->info->xmit; + + /* Handle xon/xoff */ + if (port->x_char) { + /* Pick next descriptor and fill from buffer */ + bdp = pinfo->tx_cur; + + p = bus_to_virt(bdp->cbd_bufaddr); + *p++ = xmit->buf[xmit->tail]; + bdp->cbd_datlen = 1; + bdp->cbd_sc |= BD_SC_READY; + /* Get next BD. */ + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = pinfo->tx_bd_base; + else + bdp++; + pinfo->tx_cur = bdp; + + port->icount.tx++; + port->x_char = 0; + return 1; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + cpm_uart_stop_tx(port, 0); + return 0; + } + + /* Pick next descriptor and fill from buffer */ + bdp = pinfo->tx_cur; + + while (!(bdp->cbd_sc & BD_SC_READY) && (xmit->tail != xmit->head)) { + count = 0; + p = bus_to_virt(bdp->cbd_bufaddr); + while (count < pinfo->tx_fifosize) { + *p++ = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + count++; + if (xmit->head == xmit->tail) + break; + } + bdp->cbd_datlen = count; + bdp->cbd_sc |= BD_SC_READY; + /* Get next BD. */ + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = pinfo->tx_bd_base; + else + bdp++; + } + pinfo->tx_cur = bdp; + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) { + cpm_uart_stop_tx(port, 0); + return 0; + } + + return 1; +} + +static void cpm_uart_init_scc(struct uart_cpm_port *pinfo, int bits, u16 scval) +{ + int line = pinfo - cpm_uart_ports; + volatile scc_t *scp; + volatile scc_uart_t *sup; + u8 *mem_addr; + volatile cbd_t *bdp; + int i; + + pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line); + + scp = pinfo->sccp; + sup = pinfo->sccup; + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + pinfo->rx_cur = pinfo->rx_bd_base; + mem_addr = pinfo->mem_addr; + for (bdp = pinfo->rx_bd_base, i = 0; i < pinfo->rx_nrfifos; i++, bdp++) { + bdp->cbd_bufaddr = virt_to_bus(mem_addr); + bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT | (i < (pinfo->rx_nrfifos - 1) ? 0 : BD_SC_WRAP); + mem_addr += pinfo->rx_fifosize; + } + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize); + pinfo->tx_cur = pinfo->tx_bd_base; + for (bdp = pinfo->tx_bd_base, i = 0; i < pinfo->tx_nrfifos; i++, bdp++) { + bdp->cbd_bufaddr = virt_to_bus(mem_addr); + bdp->cbd_sc = BD_SC_INTRPT | (i < (pinfo->tx_nrfifos - 1) ? 0 : BD_SC_WRAP); + mem_addr += pinfo->tx_fifosize; + bdp++; + } + + /* Store address */ + pinfo->sccup->scc_genscc.scc_rbase = (unsigned char *)pinfo->rx_bd_base - DPRAM_BASE; + pinfo->sccup->scc_genscc.scc_tbase = (unsigned char *)pinfo->tx_bd_base - DPRAM_BASE; + + /* Set up the uart parameters in the + * parameter ram. + */ + + cpm_set_scc_fcr(sup); + + sup->scc_genscc.scc_mrblr = pinfo->rx_fifosize; + sup->scc_maxidl = pinfo->rx_fifosize; + sup->scc_brkcr = 1; + sup->scc_parec = 0; + sup->scc_frmec = 0; + sup->scc_nosec = 0; + sup->scc_brkec = 0; + sup->scc_uaddr1 = 0; + sup->scc_uaddr2 = 0; + sup->scc_toseq = 0; + sup->scc_char1 = 0x8000; + sup->scc_char2 = 0x8000; + sup->scc_char3 = 0x8000; + sup->scc_char4 = 0x8000; + sup->scc_char5 = 0x8000; + sup->scc_char6 = 0x8000; + sup->scc_char7 = 0x8000; + sup->scc_char8 = 0x8000; + sup->scc_rccm = 0xc0ff; + + /* Send the CPM an initialize command. + */ + cpm_line_cr_cmd(line, CPM_CR_INIT_TRX); + + /* Set UART mode, 8 bit, no parity, one stop. + * Enable receive and transmit. + */ + scp->scc_gsmrh = 0; + scp->scc_gsmrl = + (SCC_GSMRL_MODE_UART | SCC_GSMRL_TDCR_16 | SCC_GSMRL_RDCR_16); + + /* Enable rx interrupts and clear all pending events. */ + scp->scc_sccm = UART_SCCM_RX; + scp->scc_scce = 0xffff; + scp->scc_dsr = 0x7e7e; + scp->scc_psmr = (bits << 12) | scval; + + scp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT); +} + +static void cpm_uart_init_smc(struct uart_cpm_port *pinfo, int bits, u16 cval) +{ + int line = pinfo - cpm_uart_ports; + volatile smc_t *sp; + volatile smc_uart_t *up; + volatile u8 *mem_addr; + volatile cbd_t *bdp; + int i; + + pr_debug("CPM uart[%d]:init_smc\n", pinfo->port.line); + + sp = pinfo->smcp; + up = pinfo->smcup; + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + mem_addr = pinfo->mem_addr; + pinfo->rx_cur = pinfo->rx_bd_base; + for (bdp = pinfo->rx_bd_base, i = 0; i < pinfo->rx_nrfifos; i++, bdp++) { + bdp->cbd_bufaddr = virt_to_bus(mem_addr); + bdp->cbd_sc = BD_SC_EMPTY | BD_SC_INTRPT | (i < (pinfo->rx_nrfifos - 1) ? 0 : BD_SC_WRAP); + mem_addr += pinfo->rx_fifosize; + } + + /* Set the physical address of the host memory + * buffers in the buffer descriptors, and the + * virtual address for us to work with. + */ + mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize); + pinfo->tx_cur = pinfo->tx_bd_base; + for (bdp = pinfo->tx_bd_base, i = 0; i < pinfo->tx_nrfifos; i++, bdp++) { + bdp->cbd_bufaddr = virt_to_bus(mem_addr); + bdp->cbd_sc = BD_SC_INTRPT | (i < (pinfo->tx_nrfifos - 1) ? 0 : BD_SC_WRAP); + mem_addr += pinfo->tx_fifosize; + } + + /* Store address */ + pinfo->smcup->smc_rbase = (u_char *)pinfo->rx_bd_base - DPRAM_BASE; + pinfo->smcup->smc_tbase = (u_char *)pinfo->tx_bd_base - DPRAM_BASE; + + /* Set up the uart parameters in the + * parameter ram. + */ + cpm_set_smc_fcr(up); + + /* Using idle charater time requires some additional tuning. */ + up->smc_mrblr = pinfo->rx_fifosize; + up->smc_maxidl = pinfo->rx_fifosize; + up->smc_brkcr = 1; + + cpm_line_cr_cmd(line, CPM_CR_INIT_TRX); + + /* Set UART mode, according to the parameters */ + sp->smc_smcmr = smcr_mk_clen(bits) | cval | SMCMR_SM_UART; + + /* Enable only rx interrupts clear all pending events. */ + sp->smc_smcm = SMCM_RX; + sp->smc_smce = 0xff; + + sp->smc_smcmr |= (SMCMR_REN | SMCMR_TEN); +} + +/* + * Initialize port. This is called from early_console stuff + * so we have to be careful here ! + */ +static int cpm_uart_request_port(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + int ret; + + pr_debug("CPM uart[%d]:request port\n", port->line); + + if (pinfo->flags & FLAG_CONSOLE) + return 0; + + /* + * Setup any port IO, connect any baud rate generators, + * etc. This is expected to be handled by board + * dependant code + */ + if (pinfo->set_lineif) + pinfo->set_lineif(pinfo); + + ret = cpm_uart_allocbuf(pinfo, 0); + if (ret) + return ret; + + return 0; +} + +static void cpm_uart_release_port(struct uart_port *port) +{ + struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; + + if (!(pinfo->flags & FLAG_CONSOLE)) + cpm_uart_freebuf(pinfo); +} + +/* + * Configure/autoconfigure the port. + */ +static void cpm_uart_config_port(struct uart_port *port, int flags) +{ + pr_debug("CPM uart[%d]:config_port\n", port->line); + + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_CPM; + cpm_uart_request_port(port); + } +} +static struct uart_ops cpm_uart_pops = { + .tx_empty = cpm_uart_tx_empty, + .set_mctrl = cpm_uart_set_mctrl, + .get_mctrl = cpm_uart_get_mctrl, + .stop_tx = cpm_uart_stop_tx, + .start_tx = cpm_uart_start_tx, + .stop_rx = cpm_uart_stop_rx, + .enable_ms = cpm_uart_enable_ms, + .break_ctl = cpm_uart_break_ctl, + .startup = cpm_uart_startup, + .shutdown = cpm_uart_shutdown, + .set_termios = cpm_uart_set_termios, + .type = cpm_uart_type, + .release_port = cpm_uart_release_port, + .request_port = cpm_uart_request_port, + .config_port = cpm_uart_config_port, + .verify_port = cpm_uart_verify_port, +}; + +struct uart_cpm_port cpm_uart_ports[UART_NR] = { + [UART_SMC1] = { + .port = { + .irq = SMC1_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .flags = FLAG_SMC, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = smc1_lineif, + }, + [UART_SMC2] = { + .port = { + .irq = SMC2_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .flags = FLAG_SMC, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = smc2_lineif, + }, + [UART_SCC1] = { + .port = { + .irq = SCC1_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = scc1_lineif, + }, + [UART_SCC2] = { + .port = { + .irq = SCC2_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = scc2_lineif, + }, + [UART_SCC3] = { + .port = { + .irq = SCC3_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = scc3_lineif, + }, + [UART_SCC4] = { + .port = { + .irq = SCC4_IRQ, + .ops = &cpm_uart_pops, + .iotype = SERIAL_IO_MEM, + }, + .tx_nrfifos = TX_NUM_FIFO, + .tx_fifosize = TX_BUF_SIZE, + .rx_nrfifos = RX_NUM_FIFO, + .rx_fifosize = RX_BUF_SIZE, + .set_lineif = scc4_lineif, + }, +}; + +#ifdef CONFIG_SERIAL_CPM_CONSOLE +/* + * Print a string to the serial port trying not to disturb + * any possible real use of the port... + * + * Note that this is called with interrupts already disabled + */ +static void cpm_uart_console_write(struct console *co, const char *s, + u_int count) +{ + struct uart_cpm_port *pinfo = + &cpm_uart_ports[cpm_uart_port_map[co->index]]; + unsigned int i; + volatile cbd_t *bdp, *bdbase; + volatile unsigned char *cp; + + if (IS_DISCARDING(pinfo)) + return; + + /* Get the address of the host memory buffer. + */ + bdp = pinfo->tx_cur; + bdbase = pinfo->tx_bd_base; + + /* + * Now, do each character. This is not as bad as it looks + * since this is a holding FIFO and not a transmitting FIFO. + * We could add the complexity of filling the entire transmit + * buffer, but we would just wait longer between accesses...... + */ + for (i = 0; i < count; i++, s++) { + /* Wait for transmitter fifo to empty. + * Ready indicates output is ready, and xmt is doing + * that, not that it is ready for us to send. + */ + while ((bdp->cbd_sc & BD_SC_READY) != 0) + ; + + /* Send the character out. + * If the buffer address is in the CPM DPRAM, don't + * convert it. + */ + if ((uint) (bdp->cbd_bufaddr) > (uint) CPM_ADDR) + cp = (unsigned char *) (bdp->cbd_bufaddr); + else + cp = bus_to_virt(bdp->cbd_bufaddr); + + *cp = *s; + + bdp->cbd_datlen = 1; + bdp->cbd_sc |= BD_SC_READY; + + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = bdbase; + else + bdp++; + + /* if a LF, also do CR... */ + if (*s == 10) { + while ((bdp->cbd_sc & BD_SC_READY) != 0) + ; + + if ((uint) (bdp->cbd_bufaddr) > (uint) CPM_ADDR) + cp = (unsigned char *) (bdp->cbd_bufaddr); + else + cp = bus_to_virt(bdp->cbd_bufaddr); + + *cp = 13; + bdp->cbd_datlen = 1; + bdp->cbd_sc |= BD_SC_READY; + + if (bdp->cbd_sc & BD_SC_WRAP) + bdp = bdbase; + else + bdp++; + } + } + + /* + * Finally, Wait for transmitter & holding register to empty + * and restore the IER + */ + while ((bdp->cbd_sc & BD_SC_READY) != 0) + ; + + pinfo->tx_cur = (volatile cbd_t *) bdp; +} + +/* + * Setup console. Be careful is called early ! + */ +static int __init cpm_uart_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + struct uart_cpm_port *pinfo; + int baud = 38400; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + port = + (struct uart_port *)&cpm_uart_ports[cpm_uart_port_map[co->index]]; + pinfo = (struct uart_cpm_port *)port; + + pinfo->flags |= FLAG_CONSOLE; + + if (options) { + uart_parse_options(options, &baud, &parity, &bits, &flow); + } else { + bd_t *bd = (bd_t *) __res; + + if (bd->bi_baudrate) + baud = bd->bi_baudrate; + else + baud = 9600; + } + + /* + * Setup any port IO, connect any baud rate generators, + * etc. This is expected to be handled by board + * dependant code + */ + if (pinfo->set_lineif) + pinfo->set_lineif(pinfo); + + ret = cpm_uart_allocbuf(pinfo, 1); + if (ret) + return ret; + + uart_set_options(port, co, baud, parity, bits, flow); + + return 0; +} + +extern struct uart_driver cpm_reg; +static struct console cpm_scc_uart_console = { + .name "ttyCPM", + .write cpm_uart_console_write, + .device uart_console_device, + .setup cpm_uart_console_setup, + .flags CON_PRINTBUFFER, + .index -1, + .data = &cpm_reg, +}; + +int __init cpm_uart_console_init(void) +{ + int ret = cpm_uart_init_portdesc(); + + if (!ret) + register_console(&cpm_scc_uart_console); + return ret; +} + +console_initcall(cpm_uart_console_init); + +#define CPM_UART_CONSOLE &cpm_scc_uart_console +#else +#define CPM_UART_CONSOLE NULL +#endif + +static struct uart_driver cpm_reg = { + .owner = THIS_MODULE, + .driver_name = "ttyCPM", + .dev_name = "ttyCPM", + .major = SERIAL_CPM_MAJOR, + .minor = SERIAL_CPM_MINOR, + .cons = CPM_UART_CONSOLE, +}; + +static int __init cpm_uart_init(void) +{ + int ret, i; + + printk(KERN_INFO "Serial: CPM driver $Revision: 0.01 $\n"); + +#ifndef CONFIG_SERIAL_CPM_CONSOLE + ret = cpm_uart_init_portdesc(); + if (ret) + return ret; +#endif + + cpm_reg.nr = cpm_uart_nr; + ret = uart_register_driver(&cpm_reg); + + if (ret) + return ret; + + for (i = 0; i < cpm_uart_nr; i++) { + int con = cpm_uart_port_map[i]; + cpm_uart_ports[con].port.line = i; + cpm_uart_ports[con].port.flags = UPF_BOOT_AUTOCONF; + uart_add_one_port(&cpm_reg, &cpm_uart_ports[con].port); + } + + return ret; +} + +static void __exit cpm_uart_exit(void) +{ + int i; + + for (i = 0; i < cpm_uart_nr; i++) { + int con = cpm_uart_port_map[i]; + uart_remove_one_port(&cpm_reg, &cpm_uart_ports[con].port); + } + + uart_unregister_driver(&cpm_reg); +} + +module_init(cpm_uart_init); +module_exit(cpm_uart_exit); + +MODULE_AUTHOR("Kumar Gala/Antoniou Pantelis"); +MODULE_DESCRIPTION("CPM SCC/SMC port driver $Revision: 0.01 $"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_CHARDEV(SERIAL_CPM_MAJOR, SERIAL_CPM_MINOR); diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/serial/cpm_uart/cpm_uart_cpm1.c new file mode 100644 index 000000000..7c940b5b5 --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.c @@ -0,0 +1,275 @@ +/* + * linux/drivers/serial/cpm_uart.c + * + * Driver for CPM (SCC/SMC) serial ports; CPM1 definitions + * + * Maintainer: Kumar Gala (kumar.gala@freescale.com) (CPM2) + * Pantelis Antoniou (panto@intracom.gr) (CPM1) + * + * Copyright (C) 2004 Freescale Semiconductor, Inc. + * (C) 2004 Intracom, S.A. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "cpm_uart.h" + +/**************************************************************/ + +void cpm_line_cr_cmd(int line, int cmd) +{ + ushort val; + volatile cpm8xx_t *cp = cpmp; + + switch (line) { + case UART_SMC1: + val = mk_cr_cmd(CPM_CR_CH_SMC1, cmd) | CPM_CR_FLG; + break; + case UART_SMC2: + val = mk_cr_cmd(CPM_CR_CH_SMC2, cmd) | CPM_CR_FLG; + break; + case UART_SCC1: + val = mk_cr_cmd(CPM_CR_CH_SCC1, cmd) | CPM_CR_FLG; + break; + case UART_SCC2: + val = mk_cr_cmd(CPM_CR_CH_SCC2, cmd) | CPM_CR_FLG; + break; + case UART_SCC3: + val = mk_cr_cmd(CPM_CR_CH_SCC3, cmd) | CPM_CR_FLG; + break; + case UART_SCC4: + val = mk_cr_cmd(CPM_CR_CH_SCC4, cmd) | CPM_CR_FLG; + break; + default: + return; + + } + cp->cp_cpcr = val; + while (cp->cp_cpcr & CPM_CR_FLG) ; +} + +void smc1_lineif(struct uart_cpm_port *pinfo) +{ + volatile cpm8xx_t *cp = cpmp; + + cp->cp_pbpar |= 0x000000c0; + cp->cp_pbdir &= ~0x000000c0; + cp->cp_pbodr &= ~0x000000c0; + + pinfo->brg = 1; +} + +void smc2_lineif(struct uart_cpm_port *pinfo) +{ + /* XXX SMC2: insert port configuration here */ + pinfo->brg = 2; +} + +void scc1_lineif(struct uart_cpm_port *pinfo) +{ + /* XXX SCC1: insert port configuration here */ + pinfo->brg = 1; +} + +void scc2_lineif(struct uart_cpm_port *pinfo) +{ + /* XXX SCC2: insert port configuration here */ + pinfo->brg = 2; +} + +void scc3_lineif(struct uart_cpm_port *pinfo) +{ + /* XXX SCC3: insert port configuration here */ + pinfo->brg = 3; +} + +void scc4_lineif(struct uart_cpm_port *pinfo) +{ + /* XXX SCC4: insert port configuration here */ + pinfo->brg = 4; +} + +/* + * Allocate DP-Ram and memory buffers. We need to allocate a transmit and + * receive buffer descriptors from dual port ram, and a character + * buffer area from host mem. If we are allocating for the console we need + * to do it from bootmem + */ +int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con) +{ + int dpmemsz, memsz; + u8 *dp_mem; + uint dp_addr; + u8 *mem_addr; + dma_addr_t dma_addr; + + pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line); + + dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos); + dp_mem = m8xx_cpm_dpalloc(dpmemsz); + if (dp_mem == NULL) { + printk(KERN_ERR + "cpm_uart_cpm1.c: could not allocate buffer descriptors\n"); + return -ENOMEM; + } + dp_addr = m8xx_cpm_dpram_offset(dp_mem); + + memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); + if (is_con) { + mem_addr = (u8 *) m8xx_cpm_hostalloc(memsz); + dma_addr = 0; + } else + mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr, + GFP_KERNEL); + + if (mem_addr == NULL) { + m8xx_cpm_dpfree(dp_mem); + printk(KERN_ERR + "cpm_uart_cpm1.c: could not allocate coherent memory\n"); + return -ENOMEM; + } + + pinfo->dp_addr = dp_addr; + pinfo->mem_addr = mem_addr; + pinfo->dma_addr = dma_addr; + + pinfo->rx_buf = mem_addr; + pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos + * pinfo->rx_fifosize); + + pinfo->rx_bd_base = (volatile cbd_t *)dp_mem; + pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos; + + return 0; +} + +void cpm_uart_freebuf(struct uart_cpm_port *pinfo) +{ + dma_free_coherent(NULL, L1_CACHE_ALIGN(pinfo->rx_nrfifos * + pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * + pinfo->tx_fifosize), pinfo->mem_addr, + pinfo->dma_addr); + + m8xx_cpm_dpfree(m8xx_cpm_dpram_addr(pinfo->dp_addr)); +} + +/* Setup any dynamic params in the uart desc */ +int cpm_uart_init_portdesc(void) +{ + pr_debug("CPM uart[-]:init portdesc\n"); + + cpm_uart_nr = 0; +#ifdef CONFIG_SERIAL_CPM_SMC1 + cpm_uart_ports[UART_SMC1].smcp = &cpmp->cp_smc[0]; + cpm_uart_ports[UART_SMC1].smcup = + (smc_uart_t *) & cpmp->cp_dparam[PROFF_SMC1]; + cpm_uart_ports[UART_SMC1].port.mapbase = + (unsigned long)&cpmp->cp_smc[0]; + cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); + cpm_uart_ports[UART_SMC1].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + cpm_uart_ports[UART_SMC1].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1; +#endif + +#ifdef CONFIG_SERIAL_CPM_SMC2 + cpm_uart_ports[UART_SMC2].smcp = &cpmp->cp_smc[1]; + cpm_uart_ports[UART_SMC2].smcup = + (smc_uart_t *) & cpmp->cp_dparam[PROFF_SMC2]; + cpm_uart_ports[UART_SMC2].port.mapbase = + (unsigned long)&cpmp->cp_smc[1]; + cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); + cpm_uart_ports[UART_SMC2].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + cpm_uart_ports[UART_SMC2].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC1 + cpm_uart_ports[UART_SCC1].sccp = &cpmp->cp_scc[0]; + cpm_uart_ports[UART_SCC1].sccup = + (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC1]; + cpm_uart_ports[UART_SCC1].port.mapbase = + (unsigned long)&cpmp->cp_scc[0]; + cpm_uart_ports[UART_SCC1].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC1].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC1].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC2 + cpm_uart_ports[UART_SCC2].sccp = &cpmp->cp_scc[1]; + cpm_uart_ports[UART_SCC2].sccup = + (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC2]; + cpm_uart_ports[UART_SCC2].port.mapbase = + (unsigned long)&cpmp->cp_scc[1]; + cpm_uart_ports[UART_SCC2].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC2].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC2].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC3 + cpm_uart_ports[UART_SCC3].sccp = &cpmp->cp_scc[2]; + cpm_uart_ports[UART_SCC3].sccup = + (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC3]; + cpm_uart_ports[UART_SCC3].port.mapbase = + (unsigned long)&cpmp->cp_scc[2]; + cpm_uart_ports[UART_SCC3].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC3].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC3].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC4 + cpm_uart_ports[UART_SCC4].sccp = &cpmp->cp_scc[3]; + cpm_uart_ports[UART_SCC4].sccup = + (scc_uart_t *) & cpmp->cp_dparam[PROFF_SCC4]; + cpm_uart_ports[UART_SCC4].port.mapbase = + (unsigned long)&cpmp->cp_scc[3]; + cpm_uart_ports[UART_SCC4].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC4].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC4].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4; +#endif + return 0; +} diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/serial/cpm_uart/cpm_uart_cpm1.h new file mode 100644 index 000000000..155050b7c --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart_cpm1.h @@ -0,0 +1,45 @@ +/* + * linux/drivers/serial/cpm_uart_cpm1.h + * + * Driver for CPM (SCC/SMC) serial ports + * + * definitions for cpm1 + * + */ + +#ifndef CPM_UART_CPM1_H +#define CPM_UART_CPM1_H + +#include + +/* defines for IRQs */ +#define SMC1_IRQ (CPM_IRQ_OFFSET + CPMVEC_SMC1) +#define SMC2_IRQ (CPM_IRQ_OFFSET + CPMVEC_SMC2) +#define SCC1_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC1) +#define SCC2_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC2) +#define SCC3_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC3) +#define SCC4_IRQ (CPM_IRQ_OFFSET + CPMVEC_SCC4) + +/* the CPM address */ +#define CPM_ADDR IMAP_ADDR + +static inline void cpm_set_brg(int brg, int baud) +{ + m8xx_cpm_setbrg(brg, baud); +} + +static inline void cpm_set_scc_fcr(volatile scc_uart_t * sup) +{ + sup->scc_genscc.scc_rfcr = SMC_EB; + sup->scc_genscc.scc_tfcr = SMC_EB; +} + +static inline void cpm_set_smc_fcr(volatile smc_uart_t * up) +{ + up->smc_rfcr = SMC_EB; + up->smc_tfcr = SMC_EB; +} + +#define DPRAM_BASE ((unsigned char *)&cpmp->cp_dpmem[0]) + +#endif diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c new file mode 100644 index 000000000..d2566889d --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c @@ -0,0 +1,328 @@ +/* + * linux/drivers/serial/cpm_uart_cpm2.c + * + * Driver for CPM (SCC/SMC) serial ports; CPM2 definitions + * + * Maintainer: Kumar Gala (kumar.gala@freescale.com) (CPM2) + * Pantelis Antoniou (panto@intracom.gr) (CPM1) + * + * Copyright (C) 2004 Freescale Semiconductor, Inc. + * (C) 2004 Intracom, S.A. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include "cpm_uart.h" + +/**************************************************************/ + +void cpm_line_cr_cmd(int line, int cmd) +{ + volatile cpm_cpm2_t *cp = cpmp; + ulong val; + + switch (line) { + case UART_SMC1: + val = mk_cr_cmd(CPM_CR_SMC1_PAGE, CPM_CR_SMC1_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + case UART_SMC2: + val = mk_cr_cmd(CPM_CR_SMC2_PAGE, CPM_CR_SMC2_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + case UART_SCC1: + val = mk_cr_cmd(CPM_CR_SCC1_PAGE, CPM_CR_SCC1_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + case UART_SCC2: + val = mk_cr_cmd(CPM_CR_SCC2_PAGE, CPM_CR_SCC2_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + case UART_SCC3: + val = mk_cr_cmd(CPM_CR_SCC3_PAGE, CPM_CR_SCC3_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + case UART_SCC4: + val = mk_cr_cmd(CPM_CR_SCC4_PAGE, CPM_CR_SCC4_SBLOCK, 0, + cmd) | CPM_CR_FLG; + break; + default: + return; + + } + cp->cp_cpcr = val; + while (cp->cp_cpcr & CPM_CR_FLG) ; +} + +void smc1_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + + /* SMC1 is only on port D */ + io->iop_ppard |= 0x00c00000; + io->iop_pdird |= 0x00400000; + io->iop_pdird &= ~0x00800000; + io->iop_psord &= ~0x00c00000; + + /* Wire BRG1 to SMC1 */ + cpm2_immr->im_cpmux.cmx_smr &= 0x0f; + pinfo->brg = 1; +} + +void smc2_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + + /* SMC2 is only on port A */ + io->iop_ppara |= 0x00c00000; + io->iop_pdira |= 0x00400000; + io->iop_pdira &= ~0x00800000; + io->iop_psora &= ~0x00c00000; + + /* Wire BRG2 to SMC2 */ + cpm2_immr->im_cpmux.cmx_smr &= 0xf0; + pinfo->brg = 2; +} + +void scc1_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + + /* Use Port D for SCC1 instead of other functions. */ + io->iop_ppard |= 0x00000003; + io->iop_psord &= ~0x00000001; /* Rx */ + io->iop_psord |= 0x00000002; /* Tx */ + io->iop_pdird &= ~0x00000001; /* Rx */ + io->iop_pdird |= 0x00000002; /* Tx */ + + /* Wire BRG1 to SCC1 */ + cpm2_immr->im_cpmux.cmx_scr &= ~0x00ffffff; + cpm2_immr->im_cpmux.cmx_scr |= 0x00000000; + pinfo->brg = 1; +} + +void scc2_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + io->iop_pparb |= 0x008b0000; + io->iop_pdirb |= 0x00880000; + io->iop_psorb |= 0x00880000; + io->iop_pdirb &= ~0x00030000; + io->iop_psorb &= ~0x00030000; + cpm2_immr->im_cpmux.cmx_scr &= ~0xff00ffff; + cpm2_immr->im_cpmux.cmx_scr |= 0x00090000; + pinfo->brg = 2; +} + +void scc3_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + io->iop_pparb |= 0x008b0000; + io->iop_pdirb |= 0x00880000; + io->iop_psorb |= 0x00880000; + io->iop_pdirb &= ~0x00030000; + io->iop_psorb &= ~0x00030000; + cpm2_immr->im_cpmux.cmx_scr &= ~0xffff00ff; + cpm2_immr->im_cpmux.cmx_scr |= 0x00001200; + pinfo->brg = 3; +} + +void scc4_lineif(struct uart_cpm_port *pinfo) +{ + volatile iop_cpm2_t *io = &cpm2_immr->im_ioport; + + io->iop_ppard |= 0x00000600; + io->iop_psord &= ~0x00000600; /* Tx/Rx */ + io->iop_pdird &= ~0x00000200; /* Rx */ + io->iop_pdird |= 0x00000400; /* Tx */ + + cpm2_immr->im_cpmux.cmx_scr &= ~0xffffff00; + cpm2_immr->im_cpmux.cmx_scr |= 0x0000001b; + pinfo->brg = 4; +} + +/* + * Allocate DP-Ram and memory buffers. We need to allocate a transmit and + * receive buffer descriptors from dual port ram, and a character + * buffer area from host mem. If we are allocating for the console we need + * to do it from bootmem + */ +int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con) +{ + int dpmemsz, memsz; + u8 *dp_mem; + uint dp_addr; + u8 *mem_addr; + dma_addr_t dma_addr = 0; + + pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line); + + dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos); + dp_mem = cpm2_dpalloc(dpmemsz, 8); + if (dp_mem == NULL) { + printk(KERN_ERR + "cpm_uart_cpm1.c: could not allocate buffer descriptors\n"); + return -ENOMEM; + } + + dp_addr = cpm2_dpram_offset(dp_mem); + + memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize); + if (is_con) + mem_addr = alloc_bootmem(memsz); + else + mem_addr = dma_alloc_coherent(NULL, memsz, &dma_addr, + GFP_KERNEL); + + if (mem_addr == NULL) { + cpm2_dpfree(dp_mem); + printk(KERN_ERR + "cpm_uart_cpm1.c: could not allocate coherent memory\n"); + return -ENOMEM; + } + + pinfo->dp_addr = dp_addr; + pinfo->mem_addr = mem_addr; + pinfo->dma_addr = dma_addr; + + pinfo->rx_buf = mem_addr; + pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos + * pinfo->rx_fifosize); + + pinfo->rx_bd_base = (volatile cbd_t *)dp_mem; + pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos; + + return 0; +} + +void cpm_uart_freebuf(struct uart_cpm_port *pinfo) +{ + dma_free_coherent(NULL, L1_CACHE_ALIGN(pinfo->rx_nrfifos * + pinfo->rx_fifosize) + + L1_CACHE_ALIGN(pinfo->tx_nrfifos * + pinfo->tx_fifosize), pinfo->mem_addr, + pinfo->dma_addr); + + cpm2_dpfree(&pinfo->dp_addr); +} + +/* Setup any dynamic params in the uart desc */ +int cpm_uart_init_portdesc(void) +{ + pr_debug("CPM uart[-]:init portdesc\n"); + + cpm_uart_nr = 0; +#ifdef CONFIG_SERIAL_CPM_SMC1 + cpm_uart_ports[UART_SMC1].smcp = (smc_t *) & cpm2_immr->im_smc[0]; + cpm_uart_ports[UART_SMC1].smcup = + (smc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SMC1]; + cpm_uart_ports[UART_SMC1].port.mapbase = + (unsigned long)&cpm2_immr->im_smc[0]; + cpm_uart_ports[UART_SMC1].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); + cpm_uart_ports[UART_SMC1].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + cpm_uart_ports[UART_SMC1].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SMC1; +#endif + +#ifdef CONFIG_SERIAL_CPM_SMC2 + cpm_uart_ports[UART_SMC2].smcp = (smc_t *) & cpm2_immr->im_smc[1]; + cpm_uart_ports[UART_SMC2].smcup = + (smc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SMC2]; + cpm_uart_ports[UART_SMC2].port.mapbase = + (unsigned long)&cpm2_immr->im_smc[1]; + cpm_uart_ports[UART_SMC2].smcp->smc_smcm |= (SMCM_RX | SMCM_TX); + cpm_uart_ports[UART_SMC2].smcp->smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); + cpm_uart_ports[UART_SMC2].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SMC2; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC1 + cpm_uart_ports[UART_SCC1].sccp = (scc_t *) & cpm2_immr->im_scc[0]; + cpm_uart_ports[UART_SCC1].sccup = + (scc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SCC1]; + cpm_uart_ports[UART_SCC1].port.mapbase = + (unsigned long)&cpm2_immr->im_scc[0]; + cpm_uart_ports[UART_SCC1].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC1].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC1].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC1; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC2 + cpm_uart_ports[UART_SCC2].sccp = (scc_t *) & cpm2_immr->im_scc[1]; + cpm_uart_ports[UART_SCC2].sccup = + (scc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SCC2]; + cpm_uart_ports[UART_SCC2].port.mapbase = + (unsigned long)&cpm2_immr->im_scc[1]; + cpm_uart_ports[UART_SCC2].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC2].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC2].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC2; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC3 + cpm_uart_ports[UART_SCC3].sccp = (scc_t *) & cpm2_immr->im_scc[2]; + cpm_uart_ports[UART_SCC3].sccup = + (scc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SCC3]; + cpm_uart_ports[UART_SCC3].port.mapbase = + (unsigned long)&cpm2_immr->im_scc[2]; + cpm_uart_ports[UART_SCC3].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC3].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC3].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC3; +#endif + +#ifdef CONFIG_SERIAL_CPM_SCC4 + cpm_uart_ports[UART_SCC4].sccp = (scc_t *) & cpm2_immr->im_scc[3]; + cpm_uart_ports[UART_SCC4].sccup = + (scc_uart_t *) & cpm2_immr->im_dprambase[PROFF_SCC4]; + cpm_uart_ports[UART_SCC4].port.mapbase = + (unsigned long)&cpm2_immr->im_scc[3]; + cpm_uart_ports[UART_SCC4].sccp->scc_sccm &= + ~(UART_SCCM_TX | UART_SCCM_RX); + cpm_uart_ports[UART_SCC4].sccp->scc_gsmrl &= + ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); + cpm_uart_ports[UART_SCC4].port.uartclk = (((bd_t *) __res)->bi_intfreq); + cpm_uart_port_map[cpm_uart_nr++] = UART_SCC4; +#endif + + return 0; +} diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/serial/cpm_uart/cpm_uart_cpm2.h new file mode 100644 index 000000000..eb620bd98 --- /dev/null +++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.h @@ -0,0 +1,45 @@ +/* + * linux/drivers/serial/cpm_uart_cpm2.h + * + * Driver for CPM (SCC/SMC) serial ports + * + * definitions for cpm2 + * + */ + +#ifndef CPM_UART_CPM2_H +#define CPM_UART_CPM2_H + +#include + +/* defines for IRQs */ +#define SMC1_IRQ SIU_INT_SMC1 +#define SMC2_IRQ SIU_INT_SMC2 +#define SCC1_IRQ SIU_INT_SCC1 +#define SCC2_IRQ SIU_INT_SCC2 +#define SCC3_IRQ SIU_INT_SCC3 +#define SCC4_IRQ SIU_INT_SCC4 + +/* the CPM address */ +#define CPM_ADDR CPM_MAP_ADDR + +static inline void cpm_set_brg(int brg, int baud) +{ + cpm2_setbrg(brg, baud); +} + +static inline void cpm_set_scc_fcr(volatile scc_uart_t * sup) +{ + sup->scc_genscc.scc_rfcr = CPMFCR_GBL | CPMFCR_EB; + sup->scc_genscc.scc_tfcr = CPMFCR_GBL | CPMFCR_EB; +} + +static inline void cpm_set_smc_fcr(volatile smc_uart_t * up) +{ + up->smc_rfcr = CPMFCR_GBL | CPMFCR_EB; + up->smc_tfcr = CPMFCR_GBL | CPMFCR_EB; +} + +#define DPRAM_BASE ((unsigned char *)&cpm2_immr->im_dprambase[0]) + +#endif diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h new file mode 100644 index 000000000..2e4f49a0c --- /dev/null +++ b/drivers/usb/class/cdc-acm.h @@ -0,0 +1,115 @@ +/* + * + * Includes for cdc-acm.c + * + * Mainly take from usbnet's cdc-ether part + * + */ + +/* + * CMSPAR, some architectures can't have space and mark parity. + */ + +#ifndef CMSPAR +#define CMSPAR 0 +#endif + +/* + * Major and minor numbers. + */ + +#define ACM_TTY_MAJOR 166 +#define ACM_TTY_MINORS 32 + +/* + * Requests. + */ + +#define USB_RT_ACM (USB_TYPE_CLASS | USB_RECIP_INTERFACE) + +#define ACM_REQ_COMMAND 0x00 +#define ACM_REQ_RESPONSE 0x01 +#define ACM_REQ_SET_FEATURE 0x02 +#define ACM_REQ_GET_FEATURE 0x03 +#define ACM_REQ_CLEAR_FEATURE 0x04 + +#define ACM_REQ_SET_LINE 0x20 +#define ACM_REQ_GET_LINE 0x21 +#define ACM_REQ_SET_CONTROL 0x22 +#define ACM_REQ_SEND_BREAK 0x23 + +/* + * IRQs. + */ + +#define ACM_IRQ_NETWORK 0x00 +#define ACM_IRQ_LINE_STATE 0x20 + +/* + * Output control lines. + */ + +#define ACM_CTRL_DTR 0x01 +#define ACM_CTRL_RTS 0x02 + +/* + * Input control lines and line errors. + */ + +#define ACM_CTRL_DCD 0x01 +#define ACM_CTRL_DSR 0x02 +#define ACM_CTRL_BRK 0x04 +#define ACM_CTRL_RI 0x08 + +#define ACM_CTRL_FRAMING 0x10 +#define ACM_CTRL_PARITY 0x20 +#define ACM_CTRL_OVERRUN 0x40 + +/* + * Line speed and caracter encoding. + */ + +struct acm_line { + __u32 speed; + __u8 stopbits; + __u8 parity; + __u8 databits; +} __attribute__ ((packed)); + +/* + * Internal driver structures. + */ + +struct acm { + struct usb_device *dev; /* the corresponding usb device */ + struct usb_interface *control; /* control interface */ + struct usb_interface *data; /* data interface */ + struct tty_struct *tty; /* the corresponding tty */ + struct urb *ctrlurb, *readurb, *writeurb; /* urbs */ + struct acm_line line; /* line coding (bits, stop, parity) */ + struct work_struct work; /* work queue entry for line discipline waking up */ + struct tasklet_struct bh; /* rx processing */ + unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */ + unsigned int ctrlout; /* output control lines (DTR, RTS) */ + unsigned int writesize; /* max packet size for the output bulk endpoint */ + unsigned int used; /* someone has this acm's device open */ + unsigned int minor; /* acm minor number */ + unsigned char throttle; /* throttled by tty layer */ + unsigned char clocal; /* termios CLOCAL */ + unsigned char ready_for_write; /* write urb can be used */ +}; + +/* "Union Functional Descriptor" from CDC spec 5.2.3.X */ +struct union_desc { + u8 bLength; + u8 bDescriptorType; + u8 bDescriptorSubType; + + u8 bMasterInterface0; + u8 bSlaveInterface0; + /* ... and there could be other slave interfaces */ +} __attribute__ ((packed)); + +#define CDC_UNION_TYPE 0x06 +#define CDC_DATA_INTERFACE_TYPE 0x0a + diff --git a/drivers/w1/Kconfig b/drivers/w1/Kconfig new file mode 100644 index 000000000..f416b64b6 --- /dev/null +++ b/drivers/w1/Kconfig @@ -0,0 +1,31 @@ +menu "Dallas's 1-wire bus" + +config W1 + tristate "Dallas's 1-wire support" + ---help--- + Dallas's 1-wire bus is usefull to connect slow 1-pin devices + such as iButtons and thermal sensors. + + If you want W1 support, you should say Y here. + + This W1 support can also be built as a module. If so, the module + will be called wire.ko. + +config W1_MATROX + tristate "Matrox G400 transport layer for 1-wire" + depends on W1 + help + Say Y here if you want to communicate with your 1-wire devices + using Matrox's G400 GPIO pins. + + This support is also available as a module. If so, the module + will be called matrox_w1.ko. + +config W1_THERM + tristate "Thermal family implementation" + depends on W1 + help + Say Y here if you want to connect 1-wire thermal sensors to you + wire. + +endmenu diff --git a/drivers/w1/matrox_w1.c b/drivers/w1/matrox_w1.c new file mode 100644 index 000000000..55b1faf01 --- /dev/null +++ b/drivers/w1/matrox_w1.c @@ -0,0 +1,246 @@ +/* + * matrox_w1.c + * + * Copyright (c) 2004 Evgeniy Polyakov + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "w1.h" +#include "w1_int.h" +#include "w1_log.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Evgeniy Polyakov "); +MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio)."); + +static struct pci_device_id matrox_w1_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400) }, + { }, +}; +MODULE_DEVICE_TABLE(pci, matrox_w1_tbl); + +static int __devinit matrox_w1_probe(struct pci_dev *, const struct pci_device_id *); +static void __devexit matrox_w1_remove(struct pci_dev *); + +static struct pci_driver matrox_w1_pci_driver = { + .name = "matrox_w1", + .id_table = matrox_w1_tbl, + .probe = matrox_w1_probe, + .remove = __devexit_p(matrox_w1_remove), +}; + +/* + * Matrox G400 DDC registers. + */ + +#define MATROX_G400_DDC_CLK (1<<4) +#define MATROX_G400_DDC_DATA (1<<1) + +#define MATROX_BASE 0x3C00 +#define MATROX_STATUS 0x1e14 + +#define MATROX_PORT_INDEX_OFFSET 0x00 +#define MATROX_PORT_DATA_OFFSET 0x0A + +#define MATROX_GET_CONTROL 0x2A +#define MATROX_GET_DATA 0x2B +#define MATROX_CURSOR_CTL 0x06 + +struct matrox_device +{ + unsigned long base_addr; + unsigned long port_index, port_data; + u8 data_mask; + + unsigned long phys_addr, virt_addr; + unsigned long found; + + struct w1_bus_master *bus_master; +}; + +static u8 matrox_w1_read_ddc_bit(unsigned long); +static void matrox_w1_write_ddc_bit(unsigned long, u8); + +/* + * These functions read and write DDC Data bit. + * + * Using tristate pins, since i can't fin any open-drain pin in whole motherboard. + * Unfortunately we can't connect to Intel's 82801xx IO controller + * since we don't know motherboard schema, wich has pretty unused(may be not) GPIO. + * + * I've heard that PIIX also has open drain pin. + * + * Port mapping. + */ +static __inline__ u8 matrox_w1_read_reg(struct matrox_device *dev, u8 reg) +{ + u8 ret; + + writeb(reg, dev->port_index); + ret = readb(dev->port_data); + barrier(); + + return ret; +} + +static __inline__ void matrox_w1_write_reg(struct matrox_device *dev, u8 reg, u8 val) +{ + writeb(reg, dev->port_index); + writeb(val, dev->port_data); + wmb(); +} + +static void matrox_w1_write_ddc_bit(unsigned long data, u8 bit) +{ + u8 ret; + struct matrox_device *dev = (struct matrox_device *) data; + + if (bit) + bit = 0; + else + bit = dev->data_mask; + + ret = matrox_w1_read_reg(dev, MATROX_GET_CONTROL); + matrox_w1_write_reg(dev, MATROX_GET_CONTROL, ((ret & ~dev->data_mask) | bit)); + matrox_w1_write_reg(dev, MATROX_GET_DATA, 0x00); +} + +static u8 matrox_w1_read_ddc_bit(unsigned long data) +{ + u8 ret; + struct matrox_device *dev = (struct matrox_device *) data; + + ret = matrox_w1_read_reg(dev, MATROX_GET_DATA); + + return ret; +} + +static void matrox_w1_hw_init(struct matrox_device *dev) +{ + matrox_w1_write_reg(dev, MATROX_GET_DATA, 0xFF); + matrox_w1_write_reg(dev, MATROX_GET_CONTROL, 0x00); +} + +static int __devinit matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct matrox_device *dev; + int err; + + assert(pdev != NULL); + assert(ent != NULL); + + if (pdev->vendor != PCI_VENDOR_ID_MATROX || pdev->device != PCI_DEVICE_ID_MATROX_G400) + return -ENODEV; + + dev = kmalloc(sizeof(struct matrox_device) + + sizeof(struct w1_bus_master), GFP_KERNEL); + if (!dev) { + dev_err(&pdev->dev, + "%s: Failed to create new matrox_device object.\n", + __func__); + return -ENOMEM; + } + + memset(dev, 0, sizeof(struct matrox_device) + sizeof(struct w1_bus_master)); + + dev->bus_master = (struct w1_bus_master *)(dev + 1); + + /* + * True for G400, for some other we need resource 0, see drivers/video/matrox/matroxfb_base.c + */ + + dev->phys_addr = pci_resource_start(pdev, 1); + + dev->virt_addr = + (unsigned long) ioremap_nocache(dev->phys_addr, 16384); + if (!dev->virt_addr) { + dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n", + __func__, dev->phys_addr, 16384); + err = -EIO; + goto err_out_free_device; + } + + dev->base_addr = dev->virt_addr + MATROX_BASE; + dev->port_index = dev->base_addr + MATROX_PORT_INDEX_OFFSET; + dev->port_data = dev->base_addr + MATROX_PORT_DATA_OFFSET; + dev->data_mask = (MATROX_G400_DDC_DATA); + + matrox_w1_hw_init(dev); + + dev->bus_master->data = (unsigned long) dev; + dev->bus_master->read_bit = &matrox_w1_read_ddc_bit; + dev->bus_master->write_bit = &matrox_w1_write_ddc_bit; + + err = w1_add_master_device(dev->bus_master); + if (err) + goto err_out_free_device; + + pci_set_drvdata(pdev, dev); + + dev->found = 1; + + dev_info(&pdev->dev, "Matrox G400 GPIO transport layer for 1-wire.\n"); + + return 0; + +err_out_free_device: + kfree(dev); + + return err; +} + +static void __devexit matrox_w1_remove(struct pci_dev *pdev) +{ + struct matrox_device *dev = pci_get_drvdata(pdev); + + assert(dev != NULL); + + if (dev->found) { + w1_remove_master_device(dev->bus_master); + iounmap((void *) dev->virt_addr); + } + kfree(dev); +} + +static int __init matrox_w1_init(void) +{ + return pci_module_init(&matrox_w1_pci_driver); +} + +static void __exit matrox_w1_fini(void) +{ + pci_unregister_driver(&matrox_w1_pci_driver); +} + +module_init(matrox_w1_init); +module_exit(matrox_w1_fini); diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c new file mode 100644 index 000000000..d956f5e64 --- /dev/null +++ b/drivers/w1/w1.c @@ -0,0 +1,623 @@ +/* + * w1.c + * + * Copyright (c) 2004 Evgeniy Polyakov + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "w1.h" +#include "w1_io.h" +#include "w1_log.h" +#include "w1_int.h" +#include "w1_family.h" +#include "w1_netlink.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Evgeniy Polyakov "); +MODULE_DESCRIPTION("Driver for 1-wire Dallas network protocol."); + +static int w1_timeout = 5 * HZ; +int w1_max_slave_count = 10; + +module_param_named(timeout, w1_timeout, int, 0); +module_param_named(max_slave_count, w1_max_slave_count, int, 0); + +spinlock_t w1_mlock = SPIN_LOCK_UNLOCKED; +LIST_HEAD(w1_masters); + +static pid_t control_thread; +static int control_needs_exit; +static DECLARE_COMPLETION(w1_control_complete); +static DECLARE_WAIT_QUEUE_HEAD(w1_control_wait); + +static int w1_master_match(struct device *dev, struct device_driver *drv) +{ + return 1; +} + +static int w1_master_probe(struct device *dev) +{ + return -ENODEV; +} + +static int w1_master_remove(struct device *dev) +{ + return 0; +} + +static void w1_master_release(struct device *dev) +{ + struct w1_master *md = container_of(dev, struct w1_master, dev); + + complete(&md->dev_released); +} + +static void w1_slave_release(struct device *dev) +{ + struct w1_slave *sl = container_of(dev, struct w1_slave, dev); + + complete(&sl->dev_released); +} + +static ssize_t w1_default_read_name(struct device *dev, char *buf) +{ + return sprintf(buf, "No family registered.\n"); +} + +static ssize_t w1_default_read_bin(struct kobject *kobj, char *buf, loff_t off, + size_t count) +{ + return sprintf(buf, "No family registered.\n"); +} + +struct bus_type w1_bus_type = { + .name = "w1", + .match = w1_master_match, +}; + +struct device_driver w1_driver = { + .name = "w1_driver", + .bus = &w1_bus_type, + .probe = w1_master_probe, + .remove = w1_master_remove, +}; + +struct device w1_device = { + .parent = NULL, + .bus = &w1_bus_type, + .bus_id = "w1 bus master", + .driver = &w1_driver, + .release = &w1_master_release +}; + +static struct device_attribute w1_slave_attribute = { + .attr = { + .name = "name", + .mode = S_IRUGO, + .owner = THIS_MODULE + }, + .show = &w1_default_read_name, +}; + +static struct device_attribute w1_slave_attribute_val = { + .attr = { + .name = "value", + .mode = S_IRUGO, + .owner = THIS_MODULE + }, + .show = &w1_default_read_name, +}; + +static ssize_t w1_master_attribute_show(struct device *dev, char *buf) +{ + return sprintf(buf, "please fix me\n"); +#if 0 + struct w1_master *md = container_of(dev, struct w1_master, dev); + int c = PAGE_SIZE; + + if (down_interruptible(&md->mutex)) + return -EBUSY; + + c -= snprintf(buf + PAGE_SIZE - c, c, "%s\n", md->name); + c -= snprintf(buf + PAGE_SIZE - c, c, + "bus_master=0x%p, timeout=%d, max_slave_count=%d, attempts=%lu\n", + md->bus_master, w1_timeout, md->max_slave_count, + md->attempts); + c -= snprintf(buf + PAGE_SIZE - c, c, "%d slaves: ", + md->slave_count); + if (md->slave_count == 0) + c -= snprintf(buf + PAGE_SIZE - c, c, "no.\n"); + else { + struct list_head *ent, *n; + struct w1_slave *sl; + + list_for_each_safe(ent, n, &md->slist) { + sl = list_entry(ent, struct w1_slave, w1_slave_entry); + + c -= snprintf(buf + PAGE_SIZE - c, c, "%s[%p] ", + sl->name, sl); + } + c -= snprintf(buf + PAGE_SIZE - c, c, "\n"); + } + + up(&md->mutex); + + return PAGE_SIZE - c; +#endif +} + +struct device_attribute w1_master_attribute = { + .attr = { + .name = "w1_master_stats", + .mode = S_IRUGO, + .owner = THIS_MODULE, + }, + .show = &w1_master_attribute_show, +}; + +static struct bin_attribute w1_slave_bin_attribute = { + .attr = { + .name = "w1_slave", + .mode = S_IRUGO, + .owner = THIS_MODULE, + }, + .size = W1_SLAVE_DATA_SIZE, + .read = &w1_default_read_bin, +}; + +static int __w1_attach_slave_device(struct w1_slave *sl) +{ + int err; + + sl->dev.parent = &sl->master->dev; + sl->dev.driver = sl->master->driver; + sl->dev.bus = &w1_bus_type; + sl->dev.release = &w1_slave_release; + + snprintf(&sl->dev.bus_id[0], sizeof(sl->dev.bus_id), + "%x-%llx", + (unsigned int) sl->reg_num.family, + (unsigned long long) sl->reg_num.id); + snprintf (&sl->name[0], sizeof(sl->name), + "%x-%llx", + (unsigned int) sl->reg_num.family, + (unsigned long long) sl->reg_num.id); + + dev_dbg(&sl->dev, "%s: registering %s.\n", __func__, + &sl->dev.bus_id[0]); + + err = device_register(&sl->dev); + if (err < 0) { + dev_err(&sl->dev, + "Device registration [%s] failed. err=%d\n", + sl->dev.bus_id, err); + return err; + } + + w1_slave_bin_attribute.read = sl->family->fops->rbin; + w1_slave_attribute.show = sl->family->fops->rname; + w1_slave_attribute_val.show = sl->family->fops->rval; + w1_slave_attribute_val.attr.name = sl->family->fops->rvalname; + + err = device_create_file(&sl->dev, &w1_slave_attribute); + if (err < 0) { + dev_err(&sl->dev, + "sysfs file creation for [%s] failed. err=%d\n", + sl->dev.bus_id, err); + device_unregister(&sl->dev); + return err; + } + + err = device_create_file(&sl->dev, &w1_slave_attribute_val); + if (err < 0) { + dev_err(&sl->dev, + "sysfs file creation for [%s] failed. err=%d\n", + sl->dev.bus_id, err); + device_remove_file(&sl->dev, &w1_slave_attribute); + device_unregister(&sl->dev); + return err; + } + + err = sysfs_create_bin_file(&sl->dev.kobj, &w1_slave_bin_attribute); + if (err < 0) { + dev_err(&sl->dev, + "sysfs file creation for [%s] failed. err=%d\n", + sl->dev.bus_id, err); + device_remove_file(&sl->dev, &w1_slave_attribute); + device_remove_file(&sl->dev, &w1_slave_attribute_val); + device_unregister(&sl->dev); + return err; + } + + list_add_tail(&sl->w1_slave_entry, &sl->master->slist); + + return 0; +} + +static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) +{ + struct w1_slave *sl; + struct w1_family *f; + int err; + + sl = kmalloc(sizeof(struct w1_slave), GFP_KERNEL); + if (!sl) { + dev_err(&dev->dev, + "%s: failed to allocate new slave device.\n", + __func__); + return -ENOMEM; + } + + memset(sl, 0, sizeof(*sl)); + + sl->owner = THIS_MODULE; + sl->master = dev; + + memcpy(&sl->reg_num, rn, sizeof(sl->reg_num)); + atomic_set(&sl->refcnt, 0); + init_completion(&sl->dev_released); + + spin_lock(&w1_flock); + f = w1_family_registered(rn->family); + if (!f) { + spin_unlock(&w1_flock); + dev_info(&dev->dev, "Family %x is not registered.\n", + rn->family); + kfree(sl); + return -ENODEV; + } + __w1_family_get(f); + spin_unlock(&w1_flock); + + sl->family = f; + + + err = __w1_attach_slave_device(sl); + if (err < 0) { + dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__, + sl->name); + w1_family_put(sl->family); + kfree(sl); + return err; + } + + dev->slave_count++; + + return 0; +} + +static void w1_slave_detach(struct w1_slave *sl) +{ + dev_info(&sl->dev, "%s: detaching %s.\n", __func__, sl->name); + + while (atomic_read(&sl->refcnt)) + schedule_timeout(10); + + sysfs_remove_bin_file(&sl->dev.kobj, &w1_slave_bin_attribute); + device_remove_file(&sl->dev, &w1_slave_attribute); + device_unregister(&sl->dev); + w1_family_put(sl->family); +} + +static void w1_search(struct w1_master *dev) +{ + u64 last, rn, tmp; + int i, count = 0, slave_count; + int last_family_desc, last_zero, last_device; + int search_bit, id_bit, comp_bit, desc_bit; + struct list_head *ent; + struct w1_slave *sl; + int family_found = 0; + struct w1_netlink_msg msg; + + dev->attempts++; + + memset(&msg, 0, sizeof(msg)); + + search_bit = id_bit = comp_bit = 0; + rn = tmp = last = 0; + last_device = last_zero = last_family_desc = 0; + + desc_bit = 64; + + while (!(id_bit && comp_bit) && !last_device + && count++ < dev->max_slave_count) { + last = rn; + rn = 0; + + last_family_desc = 0; + + /* + * Reset bus and all 1-wire device state machines + * so they can respond to our requests. + * + * Return 0 - device(s) present, 1 - no devices present. + */ + if (w1_reset_bus(dev)) { + dev_info(&dev->dev, "No devices present on the wire.\n"); + break; + } + +#if 1 + memset(&msg, 0, sizeof(msg)); + + w1_write_8(dev, W1_SEARCH); + for (i = 0; i < 64; ++i) { + /* + * Read 2 bits from bus. + * All who don't sleep must send ID bit and COMPLEMENT ID bit. + * They actually are ANDed between all senders. + */ + id_bit = w1_read_bit(dev); + comp_bit = w1_read_bit(dev); + + if (id_bit && comp_bit) + break; + + if (id_bit == 0 && comp_bit == 0) { + if (i == desc_bit) + search_bit = 1; + else if (i > desc_bit) + search_bit = 0; + else + search_bit = ((last >> i) & 0x1); + + if (search_bit == 0) { + last_zero = i; + if (last_zero < 9) + last_family_desc = last_zero; + } + + } + else + search_bit = id_bit; + + tmp = search_bit; + rn |= (tmp << i); + + /* + * Write 1 bit to bus + * and make all who don't have "search_bit" in "i"'th position + * in it's registration number sleep. + */ + w1_write_bit(dev, search_bit); + + } +#endif + msg.id.w1_id = rn; + msg.val = w1_calc_crc8((u8 *) & rn, 7); + w1_netlink_send(dev, &msg); + + if (desc_bit == last_zero) + last_device = 1; + + desc_bit = last_zero; + + slave_count = 0; + list_for_each(ent, &dev->slist) { + struct w1_reg_num *tmp; + + tmp = (struct w1_reg_num *) &rn; + + sl = list_entry(ent, struct w1_slave, w1_slave_entry); + + if (sl->reg_num.family == tmp->family && + sl->reg_num.id == tmp->id && + sl->reg_num.crc == tmp->crc) + break; + else if (sl->reg_num.family == tmp->family) { + family_found = 1; + break; + } + + slave_count++; + } + + if (slave_count == dev->slave_count && + msg.val && (*((__u8 *) & msg.val) == msg.id.id.crc)) { + w1_attach_slave_device(dev, (struct w1_reg_num *) &rn); + } + } +} + +int w1_control(void *data) +{ + struct w1_slave *sl; + struct w1_master *dev; + struct list_head *ent, *ment, *n, *mn; + int err, have_to_wait = 0, timeout; + + daemonize("w1_control"); + allow_signal(SIGTERM); + + while (!control_needs_exit || have_to_wait) { + have_to_wait = 0; + + timeout = w1_timeout; + do { + timeout = interruptible_sleep_on_timeout(&w1_control_wait, timeout); + if (current->flags & PF_FREEZE) + refrigerator(PF_FREEZE); + } while (!signal_pending(current) && (timeout > 0)); + + if (signal_pending(current)) + flush_signals(current); + + list_for_each_safe(ment, mn, &w1_masters) { + dev = list_entry(ment, struct w1_master, w1_master_entry); + + if (!control_needs_exit && !dev->need_exit) + continue; + /* + * Little race: we can create thread but not set the flag. + * Get a chance for external process to set flag up. + */ + if (!dev->initialized) { + have_to_wait = 1; + continue; + } + + spin_lock(&w1_mlock); + list_del(&dev->w1_master_entry); + spin_unlock(&w1_mlock); + + if (control_needs_exit) { + dev->need_exit = 1; + + err = kill_proc(dev->kpid, SIGTERM, 1); + if (err) + dev_err(&dev->dev, + "Failed to send signal to w1 kernel thread %d.\n", + dev->kpid); + } + + wait_for_completion(&dev->dev_exited); + + list_for_each_safe(ent, n, &dev->slist) { + sl = list_entry(ent, struct w1_slave, w1_slave_entry); + + if (!sl) + dev_warn(&dev->dev, + "%s: slave entry is NULL.\n", + __func__); + else { + list_del(&sl->w1_slave_entry); + + w1_slave_detach(sl); + kfree(sl); + } + } + device_remove_file(&dev->dev, &w1_master_attribute); + atomic_dec(&dev->refcnt); + } + } + + complete_and_exit(&w1_control_complete, 0); +} + +int w1_process(void *data) +{ + struct w1_master *dev = (struct w1_master *) data; + unsigned long timeout; + + daemonize("%s", dev->name); + allow_signal(SIGTERM); + + while (!dev->need_exit) { + timeout = w1_timeout; + do { + timeout = interruptible_sleep_on_timeout(&dev->kwait, timeout); + if (current->flags & PF_FREEZE) + refrigerator(PF_FREEZE); + } while (!signal_pending(current) && (timeout > 0)); + + if (signal_pending(current)) + flush_signals(current); + + if (dev->need_exit) + break; + + if (!dev->initialized) + continue; + + if (down_interruptible(&dev->mutex)) + continue; + w1_search(dev); + up(&dev->mutex); + } + + atomic_dec(&dev->refcnt); + complete_and_exit(&dev->dev_exited, 0); + + return 0; +} + +int w1_init(void) +{ + int retval; + + printk(KERN_INFO "Driver for 1-wire Dallas network protocol.\n"); + + retval = bus_register(&w1_bus_type); + if (retval) { + printk(KERN_ERR "Failed to register bus. err=%d.\n", retval); + goto err_out_exit_init; + } + + retval = driver_register(&w1_driver); + if (retval) { + printk(KERN_ERR + "Failed to register master driver. err=%d.\n", + retval); + goto err_out_bus_unregister; + } + + control_thread = kernel_thread(&w1_control, NULL, 0); + if (control_thread < 0) { + printk(KERN_ERR "Failed to create control thread. err=%d\n", + control_thread); + retval = control_thread; + goto err_out_driver_unregister; + } + + return 0; + +err_out_driver_unregister: + driver_unregister(&w1_driver); + +err_out_bus_unregister: + bus_unregister(&w1_bus_type); + +err_out_exit_init: + return retval; +} + +void w1_fini(void) +{ + struct w1_master *dev; + struct list_head *ent, *n; + + list_for_each_safe(ent, n, &w1_masters) { + dev = list_entry(ent, struct w1_master, w1_master_entry); + __w1_remove_master_device(dev); + } + + control_needs_exit = 1; + + wait_for_completion(&w1_control_complete); + + driver_unregister(&w1_driver); + bus_unregister(&w1_bus_type); +} + +module_init(w1_init); +module_exit(w1_fini); diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c new file mode 100644 index 000000000..291a6d1a8 --- /dev/null +++ b/drivers/w1/w1_int.c @@ -0,0 +1,207 @@ +/* + * w1_int.c + * + * Copyright (c) 2004 Evgeniy Polyakov + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include + +#include "w1.h" +#include "w1_log.h" + +static u32 w1_ids = 1; + +extern struct device_driver w1_driver; +extern struct bus_type w1_bus_type; +extern struct device w1_device; +extern struct device_attribute w1_master_attribute; +extern int w1_max_slave_count; +extern struct list_head w1_masters; +extern spinlock_t w1_mlock; + +extern int w1_process(void *); + +struct w1_master * w1_alloc_dev(u32 id, int slave_count, + struct device_driver *driver, struct device *device) +{ + struct w1_master *dev; + int err; + + /* + * We are in process context(kernel thread), so can sleep. + */ + dev = kmalloc(sizeof(struct w1_master) + sizeof(struct w1_bus_master), GFP_KERNEL); + if (!dev) { + printk(KERN_ERR + "Failed to allocate %d bytes for new w1 device.\n", + sizeof(struct w1_master)); + return NULL; + } + + memset(dev, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master)); + + dev->bus_master = (struct w1_bus_master *)(dev + 1); + + dev->owner = THIS_MODULE; + dev->max_slave_count = slave_count; + dev->slave_count = 0; + dev->attempts = 0; + dev->kpid = -1; + dev->initialized = 0; + dev->id = id; + + atomic_set(&dev->refcnt, 2); + + INIT_LIST_HEAD(&dev->slist); + init_MUTEX(&dev->mutex); + + init_waitqueue_head(&dev->kwait); + init_completion(&dev->dev_released); + init_completion(&dev->dev_exited); + + memcpy(&dev->dev, device, sizeof(struct device)); + snprintf(dev->dev.bus_id, sizeof(dev->dev.bus_id), + "w1_bus_master%u", dev->id); + snprintf(dev->name, sizeof(dev->name), "w1_bus_master%u", dev->id); + + dev->driver = driver; + + dev->groups = 23; + dev->seq = 1; + dev->nls = netlink_kernel_create(NETLINK_NFLOG, NULL); + if (!dev->nls) { + printk(KERN_ERR "Failed to create new netlink socket(%u).\n", + NETLINK_NFLOG); + memset(dev, 0, sizeof(struct w1_master)); + kfree(dev); + dev = NULL; + } + + err = device_register(&dev->dev); + if (err) { + printk(KERN_ERR "Failed to register master device. err=%d\n", err); + if (dev->nls->sk_socket) + sock_release(dev->nls->sk_socket); + memset(dev, 0, sizeof(struct w1_master)); + kfree(dev); + dev = NULL; + } + + return dev; +} + +void w1_free_dev(struct w1_master *dev) +{ + device_unregister(&dev->dev); + if (dev->nls->sk_socket) + sock_release(dev->nls->sk_socket); + memset(dev, 0, sizeof(struct w1_master) + sizeof(struct w1_bus_master)); + kfree(dev); +} + +int w1_add_master_device(struct w1_bus_master *master) +{ + struct w1_master *dev; + int retval = 0; + + dev = w1_alloc_dev(w1_ids++, w1_max_slave_count, &w1_driver, &w1_device); + if (!dev) + return -ENOMEM; + + dev->kpid = kernel_thread(&w1_process, dev, 0); + if (dev->kpid < 0) { + dev_err(&dev->dev, + "Failed to create new kernel thread. err=%d\n", + dev->kpid); + retval = dev->kpid; + goto err_out_free_dev; + } + + retval = device_create_file(&dev->dev, &w1_master_attribute); + if (retval) + goto err_out_kill_thread; + + memcpy(dev->bus_master, master, sizeof(struct w1_bus_master)); + + dev->initialized = 1; + + spin_lock(&w1_mlock); + list_add(&dev->w1_master_entry, &w1_masters); + spin_unlock(&w1_mlock); + + return 0; + +err_out_kill_thread: + dev->need_exit = 1; + if (kill_proc(dev->kpid, SIGTERM, 1)) + dev_err(&dev->dev, + "Failed to send signal to w1 kernel thread %d.\n", + dev->kpid); + wait_for_completion(&dev->dev_exited); + +err_out_free_dev: + w1_free_dev(dev); + + return retval; +} + +void __w1_remove_master_device(struct w1_master *dev) +{ + int err; + + dev->need_exit = 1; + err = kill_proc(dev->kpid, SIGTERM, 1); + if (err) + dev_err(&dev->dev, + "%s: Failed to send signal to w1 kernel thread %d.\n", + __func__, dev->kpid); + + while (atomic_read(&dev->refcnt)) + schedule_timeout(10); + + w1_free_dev(dev); +} + +void w1_remove_master_device(struct w1_bus_master *bm) +{ + struct w1_master *dev = NULL; + struct list_head *ent, *n; + + list_for_each_safe(ent, n, &w1_masters) { + dev = list_entry(ent, struct w1_master, w1_master_entry); + if (!dev->initialized) + continue; + + if (dev->bus_master->data == bm->data) + break; + } + + if (!dev) { + printk(KERN_ERR "Device doesn't exist.\n"); + return; + } + + __w1_remove_master_device(dev); +} + +EXPORT_SYMBOL(w1_alloc_dev); +EXPORT_SYMBOL(w1_free_dev); +EXPORT_SYMBOL(w1_add_master_device); +EXPORT_SYMBOL(w1_remove_master_device); +EXPORT_SYMBOL(__w1_remove_master_device); diff --git a/drivers/w1/w1_int.h b/drivers/w1/w1_int.h new file mode 100644 index 000000000..a5aeb7615 --- /dev/null +++ b/drivers/w1/w1_int.h @@ -0,0 +1,36 @@ +/* + * w1_int.h + * + * Copyright (c) 2004 Evgeniy Polyakov + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __W1_INT_H +#define __W1_INT_H + +#include +#include + +#include "w1.h" + +struct w1_master * w1_alloc_dev(int, struct device_driver *, struct device *); +void w1_free_dev(struct w1_master *dev); +int w1_add_master_device(struct w1_bus_master *); +void w1_remove_master_device(struct w1_bus_master *); +void __w1_remove_master_device(struct w1_master *); + +#endif /* __W1_INT_H */ diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c new file mode 100644 index 000000000..75d538b74 --- /dev/null +++ b/drivers/w1/w1_io.c @@ -0,0 +1,138 @@ +/* + * w1_io.c + * + * Copyright (c) 2004 Evgeniy Polyakov + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include + +#include + +#include "w1.h" +#include "w1_log.h" +#include "w1_io.h" + +int w1_delay_parm = 1; +module_param_named(delay_coef, w1_delay_parm, int, 0); + +static u8 w1_crc8_table[] = { + 0, 94, 188, 226, 97, 63, 221, 131, 194, 156, 126, 32, 163, 253, 31, 65, + 157, 195, 33, 127, 252, 162, 64, 30, 95, 1, 227, 189, 62, 96, 130, 220, + 35, 125, 159, 193, 66, 28, 254, 160, 225, 191, 93, 3, 128, 222, 60, 98, + 190, 224, 2, 92, 223, 129, 99, 61, 124, 34, 192, 158, 29, 67, 161, 255, + 70, 24, 250, 164, 39, 121, 155, 197, 132, 218, 56, 102, 229, 187, 89, 7, + 219, 133, 103, 57, 186, 228, 6, 88, 25, 71, 165, 251, 120, 38, 196, 154, + 101, 59, 217, 135, 4, 90, 184, 230, 167, 249, 27, 69, 198, 152, 122, 36, + 248, 166, 68, 26, 153, 199, 37, 123, 58, 100, 134, 216, 91, 5, 231, 185, + 140, 210, 48, 110, 237, 179, 81, 15, 78, 16, 242, 172, 47, 113, 147, 205, + 17, 79, 173, 243, 112, 46, 204, 146, 211, 141, 111, 49, 178, 236, 14, 80, + 175, 241, 19, 77, 206, 144, 114, 44, 109, 51, 209, 143, 12, 82, 176, 238, + 50, 108, 142, 208, 83, 13, 239, 177, 240, 174, 76, 18, 145, 207, 45, 115, + 202, 148, 118, 40, 171, 245, 23, 73, 8, 86, 180, 234, 105, 55, 213, 139, + 87, 9, 235, 181, 54, 104, 138, 212, 149, 203, 41, 119, 244, 170, 72, 22, + 233, 183, 85, 11, 136, 214, 52, 106, 43, 117, 151, 201, 74, 20, 246, 168, + 116, 42, 200, 150, 21, 75, 169, 247, 182, 232, 10, 84, 215, 137, 107, 53 +}; + +void w1_delay(unsigned long tm) +{ + udelay(tm * w1_delay_parm); +} + +void w1_write_bit(struct w1_master *dev, int bit) +{ + if (bit) { + dev->bus_master->write_bit(dev->bus_master->data, 0); + w1_delay(6); + dev->bus_master->write_bit(dev->bus_master->data, 1); + w1_delay(64); + } else { + dev->bus_master->write_bit(dev->bus_master->data, 0); + w1_delay(60); + dev->bus_master->write_bit(dev->bus_master->data, 1); + w1_delay(10); + } +} + +void w1_write_8(struct w1_master *dev, u8 byte) +{ + int i; + + for (i = 0; i < 8; ++i) + w1_write_bit(dev, (byte >> i) & 0x1); +} + +u8 w1_read_bit(struct w1_master *dev) +{ + int result; + + dev->bus_master->write_bit(dev->bus_master->data, 0); + w1_delay(6); + dev->bus_master->write_bit(dev->bus_master->data, 1); + w1_delay(9); + + result = dev->bus_master->read_bit(dev->bus_master->data); + w1_delay(55); + + return result & 0x1; +} + +u8 w1_read_8(struct w1_master * dev) +{ + int i; + u8 res = 0; + + for (i = 0; i < 8; ++i) + res |= (w1_read_bit(dev) << i); + + return res; +} + +int w1_reset_bus(struct w1_master *dev) +{ + int result; + + dev->bus_master->write_bit(dev->bus_master->data, 0); + w1_delay(480); + dev->bus_master->write_bit(dev->bus_master->data, 1); + w1_delay(70); + + result = dev->bus_master->read_bit(dev->bus_master->data) & 0x1; + w1_delay(410); + + return result; +} + +u8 w1_calc_crc8(u8 * data, int len) +{ + u8 crc = 0; + + while (len--) + crc = w1_crc8_table[crc ^ *data++]; + + return crc; +} + +EXPORT_SYMBOL(w1_write_bit); +EXPORT_SYMBOL(w1_write_8); +EXPORT_SYMBOL(w1_read_bit); +EXPORT_SYMBOL(w1_read_8); +EXPORT_SYMBOL(w1_reset_bus); +EXPORT_SYMBOL(w1_calc_crc8); +EXPORT_SYMBOL(w1_delay); diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h new file mode 100644 index 000000000..a6bf6f44d --- /dev/null +++ b/drivers/w1/w1_log.h @@ -0,0 +1,38 @@ +/* + * w1_log.h + * + * Copyright (c) 2004 Evgeniy Polyakov + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __W1_LOG_H +#define __W1_LOG_H + +#define DEBUG + +#ifdef W1_DEBUG +# define assert(expr) do {} while (0) +#else +# define assert(expr) \ + if(unlikely(!(expr))) { \ + printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__FUNCTION__,__LINE__); \ + } +#endif + +#endif /* __W1_LOG_H */ + diff --git a/fs/isofs/export.c b/fs/isofs/export.c new file mode 100644 index 000000000..e4252c960 --- /dev/null +++ b/fs/isofs/export.c @@ -0,0 +1,228 @@ +/* + * fs/isofs/export.c + * + * (C) 2004 Paul Serice - The new inode scheme requires switching + * from iget() to iget5_locked() which means + * the NFS export operations have to be hand + * coded because the default routines rely on + * iget(). + * + * The following files are helpful: + * + * Documentation/filesystems/Exporting + * fs/exportfs/expfs.c. + */ + +#include +#include +#include +#include +#include + +static struct dentry * +isofs_export_iget(struct super_block *sb, + unsigned long block, + unsigned long offset, + __u32 generation) +{ + struct inode *inode; + struct dentry *result; + if (block == 0) + return ERR_PTR(-ESTALE); + inode = isofs_iget(sb, block, offset); + if (inode == NULL) + return ERR_PTR(-ENOMEM); + if (is_bad_inode(inode) + || (generation && inode->i_generation != generation)) + { + iput(inode); + return ERR_PTR(-ESTALE); + } + result = d_alloc_anon(inode); + if (!result) { + iput(inode); + return ERR_PTR(-ENOMEM); + } + return result; +} + +static struct dentry * +isofs_export_get_dentry(struct super_block *sb, void *vobjp) +{ + __u32 *objp = vobjp; + unsigned long block = objp[0]; + unsigned long offset = objp[1]; + __u32 generation = objp[2]; + return isofs_export_iget(sb, block, offset, generation); +} + +/* This function is surprisingly simple. The trick is understanding + * that "child" is always a directory. So, to find its parent, you + * simply need to find its ".." entry, normalize its block and offset, + * and return the underlying inode. See the comments for + * isofs_normalize_block_and_offset(). */ +static struct dentry *isofs_export_get_parent(struct dentry *child) +{ + unsigned long parent_block = 0; + unsigned long parent_offset = 0; + struct inode *child_inode = child->d_inode; + struct iso_inode_info *e_child_inode = ISOFS_I(child_inode); + struct inode *parent_inode = NULL; + struct iso_directory_record *de = NULL; + struct buffer_head * bh = NULL; + struct dentry *rv = NULL; + + /* "child" must always be a directory. */ + if (!S_ISDIR(child_inode->i_mode)) { + printk(KERN_ERR "isofs: isofs_export_get_parent(): " + "child is not a directory!\n"); + rv = ERR_PTR(-EACCES); + goto out; + } + + /* It is an invariant that the directory offset is zero. If + * it is not zero, it means the directory failed to be + * normalized for some reason. */ + if (e_child_inode->i_iget5_offset != 0) { + printk(KERN_ERR "isofs: isofs_export_get_parent(): " + "child directory not normalized!\n"); + rv = ERR_PTR(-EACCES); + goto out; + } + + /* The child inode has been normalized such that its + * i_iget5_block value points to the "." entry. Fortunately, + * the ".." entry is located in the same block. */ + parent_block = e_child_inode->i_iget5_block; + + /* Get the block in question. */ + bh = sb_bread(child_inode->i_sb, parent_block); + if (bh == NULL) { + rv = ERR_PTR(-EACCES); + goto out; + } + + /* This is the "." entry. */ + de = (struct iso_directory_record*)bh->b_data; + + /* The ".." entry is always the second entry. */ + parent_offset = (unsigned long)isonum_711(de->length); + de = (struct iso_directory_record*)(bh->b_data + parent_offset); + + /* Verify it is in fact the ".." entry. */ + if ((isonum_711(de->name_len) != 1) || (de->name[0] != 1)) { + printk(KERN_ERR "isofs: Unable to find the \"..\" " + "directory for NFS.\n"); + rv = ERR_PTR(-EACCES); + goto out; + } + + /* Normalize */ + isofs_normalize_block_and_offset(de, &parent_block, &parent_offset); + + /* Get the inode. */ + parent_inode = isofs_iget(child_inode->i_sb, + parent_block, + parent_offset); + if (parent_inode == NULL) { + rv = ERR_PTR(-EACCES); + goto out; + } + + /* Allocate the dentry. */ + rv = d_alloc_anon(parent_inode); + if (rv == NULL) { + rv = ERR_PTR(-ENOMEM); + goto out; + } + + out: + if (bh) { + brelse(bh); + } + return rv; +} + +static int +isofs_export_encode_fh(struct dentry *dentry, + __u32 *fh32, + int *max_len, + int connectable) +{ + struct inode * inode = dentry->d_inode; + struct iso_inode_info * ei = ISOFS_I(inode); + int len = *max_len; + int type = 1; + __u16 *fh16 = (__u16*)fh32; + + /* + * WARNING: max_len is 5 for NFSv2. Because of this + * limitation, we use the lower 16 bits of fh32[1] to hold the + * offset of the inode and the upper 16 bits of fh32[1] to + * hold the offset of the parent. + */ + + if (len < 3 || (connectable && len < 5)) + return 255; + + len = 3; + fh32[0] = ei->i_iget5_block; + fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */ + fh32[2] = inode->i_generation; + if (connectable && !S_ISDIR(inode->i_mode)) { + struct inode *parent; + struct iso_inode_info *eparent; + spin_lock(&dentry->d_lock); + parent = dentry->d_parent->d_inode; + eparent = ISOFS_I(parent); + fh32[3] = eparent->i_iget5_block; + fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */ + fh32[4] = parent->i_generation; + spin_unlock(&dentry->d_lock); + len = 5; + type = 2; + } + *max_len = len; + return type; +} + + +static struct dentry * +isofs_export_decode_fh(struct super_block *sb, + __u32 *fh32, + int fh_len, + int fileid_type, + int (*acceptable)(void *context, struct dentry *de), + void *context) +{ + __u16 *fh16 = (__u16*)fh32; + __u32 child[3]; /* The child is what triggered all this. */ + __u32 parent[3]; /* The parent is just along for the ride. */ + + if (fh_len < 3 || fileid_type > 2) + return NULL; + + child[0] = fh32[0]; + child[1] = fh16[2]; /* fh16 [sic] */ + child[2] = fh32[2]; + + parent[0] = 0; + parent[1] = 0; + parent[2] = 0; + if (fileid_type == 2) { + if (fh_len > 2) parent[0] = fh32[3]; + parent[1] = fh16[3]; /* fh16 [sic] */ + if (fh_len > 4) parent[2] = fh32[4]; + } + + return sb->s_export_op->find_exported_dentry(sb, child, parent, + acceptable, context); +} + + +struct export_operations isofs_export_ops = { + .decode_fh = isofs_export_decode_fh, + .encode_fh = isofs_export_encode_fh, + .get_dentry = isofs_export_get_dentry, + .get_parent = isofs_export_get_parent, +}; diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h new file mode 100644 index 000000000..5e3445da3 --- /dev/null +++ b/fs/jffs2/compr.h @@ -0,0 +1,122 @@ +/* + * JFFS2 -- Journalling Flash File System, Version 2. + * + * Copyright (C) 2004 Ferenc Havasi , + * University of Szeged, Hungary + * + * For licensing information, see the file 'LICENCE' in the + * jffs2 directory. + * + * $Id: compr.h,v 1.5 2004/06/23 16:34:39 havasi Exp $ + * + */ + +#ifndef __JFFS2_COMPR_H__ +#define __JFFS2_COMPR_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nodelist.h" + +#define JFFS2_RUBINMIPS_PRIORITY 10 +#define JFFS2_DYNRUBIN_PRIORITY 20 +#define JFFS2_LZARI_PRIORITY 30 +#define JFFS2_LZO_PRIORITY 40 +#define JFFS2_RTIME_PRIORITY 50 +#define JFFS2_ZLIB_PRIORITY 60 + +#define JFFS2_RUBINMIPS_DISABLED /* RUBINs will be used only */ +#define JFFS2_DYNRUBIN_DISABLED /* for decompression */ + +#define JFFS2_COMPR_MODE_NONE 0 +#define JFFS2_COMPR_MODE_PRIORITY 1 +#define JFFS2_COMPR_MODE_SIZE 2 + +void jffs2_set_compression_mode(int mode); +int jffs2_get_compression_mode(void); + +struct jffs2_compressor { + struct list_head list; + int priority; /* used by prirority comr. mode */ + char *name; + char compr; /* JFFS2_COMPR_XXX */ + int (*compress)(unsigned char *data_in, unsigned char *cpage_out, + uint32_t *srclen, uint32_t *destlen, void *model); + int (*decompress)(unsigned char *cdata_in, unsigned char *data_out, + uint32_t cdatalen, uint32_t datalen, void *model); + int usecount; + int disabled; /* if seted the compressor won't compress */ + unsigned char *compr_buf; /* used by size compr. mode */ + uint32_t compr_buf_size; /* used by size compr. mode */ + uint32_t stat_compr_orig_size; + uint32_t stat_compr_new_size; + uint32_t stat_compr_blocks; + uint32_t stat_decompr_blocks; +}; + +int jffs2_register_compressor(struct jffs2_compressor *comp); +int jffs2_unregister_compressor(struct jffs2_compressor *comp); + +int jffs2_compressors_init(void); +int jffs2_compressors_exit(void); + +uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, + unsigned char *data_in, unsigned char **cpage_out, + uint32_t *datalen, uint32_t *cdatalen); + +int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, + uint16_t comprtype, unsigned char *cdata_in, + unsigned char *data_out, uint32_t cdatalen, uint32_t datalen); + +void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig); + +#ifdef CONFIG_JFFS2_PROC +int jffs2_enable_compressor_name(const char *name); +int jffs2_disable_compressor_name(const char *name); +int jffs2_set_compression_mode_name(const char *mode_name); +char *jffs2_get_compression_mode_name(void); +int jffs2_set_compressor_priority(const char *mode_name, int priority); +char *jffs2_list_compressors(void); +char *jffs2_stats(void); +#endif + +/* Compressor modules */ +/* These functions will be called by jffs2_compressors_init/exit */ + +#ifdef CONFIG_JFFS2_RUBIN +int jffs2_rubinmips_init(void); +void jffs2_rubinmips_exit(void); +int jffs2_dynrubin_init(void); +void jffs2_dynrubin_exit(void); +#endif +#ifdef CONFIG_JFFS2_RTIME +int jffs2_rtime_init(void); +void jffs2_rtime_exit(void); +#endif +#ifdef CONFIG_JFFS2_ZLIB +int jffs2_zlib_init(void); +void jffs2_zlib_exit(void); +#endif +#ifdef CONFIG_JFFS2_LZARI +int jffs2_lzari_init(void); +void jffs2_lzari_exit(void); +#endif +#ifdef CONFIG_JFFS2_LZO +int jffs2_lzo_init(void); +void jffs2_lzo_exit(void); +#endif + +/* Prototypes from proc.c */ +int jffs2_proc_init(void); +int jffs2_proc_exit(void); + +#endif /* __JFFS2_COMPR_H__ */ diff --git a/fs/ntfs/collate.c b/fs/ntfs/collate.c new file mode 100644 index 000000000..2a4a25f0e --- /dev/null +++ b/fs/ntfs/collate.c @@ -0,0 +1,121 @@ +/* + * collate.c - NTFS kernel collation handling. Part of the Linux-NTFS project. + * + * Copyright (c) 2004 Anton Altaparmakov + * + * This program/include file is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program/include file is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program (in the main directory of the Linux-NTFS + * distribution in the file COPYING); if not, write to the Free Software + * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include "ntfs.h" +#include "collate.h" + +static int ntfs_collate_binary(ntfs_volume *vol, + const void *data1, const int data1_len, + const void *data2, const int data2_len) +{ + int rc; + + ntfs_debug("Entering."); + rc = memcmp(data1, data2, min(data1_len, data2_len)); + if (!rc && (data1_len != data2_len)) { + if (data1_len < data2_len) + rc = -1; + else + rc = 1; + } + ntfs_debug("Done, returning %i", rc); + return rc; +} + +static int ntfs_collate_ntofs_ulong(ntfs_volume *vol, + const void *data1, const int data1_len, + const void *data2, const int data2_len) +{ + int rc; + u32 d1, d2; + + ntfs_debug("Entering."); + // FIXME: We don't really want to bug here. + BUG_ON(data1_len != data2_len); + BUG_ON(data1_len != 4); + d1 = le32_to_cpup(data1); + d2 = le32_to_cpup(data2); + if (d1 < d2) + rc = -1; + else { + if (d1 == d2) + rc = 0; + else + rc = 1; + } + ntfs_debug("Done, returning %i", rc); + return rc; +} + +typedef int (*ntfs_collate_func_t)(ntfs_volume *, const void *, const int, + const void *, const int); + +static ntfs_collate_func_t ntfs_do_collate0x0[3] = { + ntfs_collate_binary, + NULL/*ntfs_collate_file_name*/, + NULL/*ntfs_collate_unicode_string*/, +}; + +static ntfs_collate_func_t ntfs_do_collate0x1[4] = { + ntfs_collate_ntofs_ulong, + NULL/*ntfs_collate_ntofs_sid*/, + NULL/*ntfs_collate_ntofs_security_hash*/, + NULL/*ntfs_collate_ntofs_ulongs*/, +}; + +/** + * ntfs_collate - collate two data items using a specified collation rule + * @vol: ntfs volume to which the data items belong + * @cr: collation rule to use when comparing the items + * @data1: first data item to collate + * @data1_len: length in bytes of @data1 + * @data2: second data item to collate + * @data2_len: length in bytes of @data2 + * + * Collate the two data items @data1 and @data2 using the collation rule @cr + * and return -1, 0, ir 1 if @data1 is found, respectively, to collate before, + * to match, or to collate after @data2. + * + * For speed we use the collation rule @cr as an index into two tables of + * function pointers to call the appropriate collation function. + */ +int ntfs_collate(ntfs_volume *vol, COLLATION_RULES cr, + const void *data1, const int data1_len, + const void *data2, const int data2_len) { + ntfs_debug("Entering."); + /* + * FIXME: At the moment we only support COLLATION_BINARY and + * COLLATION_NTOFS_ULONG, so we BUG() for everything else for now. + */ + BUG_ON(cr != COLLATION_BINARY && cr != COLLATION_NTOFS_ULONG); + cr = le32_to_cpu(cr); + BUG_ON(cr < 0); + if (cr <= 0x02) + return ntfs_do_collate0x0[cr](vol, data1, data1_len, + data2, data2_len); + BUG_ON(cr < 0x10); + cr -= 0x10; + if (likely(cr <= 3)) + return ntfs_do_collate0x1[cr](vol, data1, data1_len, + data2, data2_len); + BUG(); + return 0; +} diff --git a/include/asm-alpha/setup.h b/include/asm-alpha/setup.h new file mode 100644 index 000000000..2e023a4aa --- /dev/null +++ b/include/asm-alpha/setup.h @@ -0,0 +1,6 @@ +#ifndef __ALPHA_SETUP_H +#define __ALPHA_SETUP_H + +#define COMMAND_LINE_SIZE 256 + +#endif diff --git a/include/asm-arm/hardware/clock.h b/include/asm-arm/hardware/clock.h new file mode 100644 index 000000000..2fbf6078b --- /dev/null +++ b/include/asm-arm/hardware/clock.h @@ -0,0 +1,121 @@ +/* + * linux/include/asm-arm/hardware/clock.h + * + * Copyright (C) 2004 ARM Limited. + * Written by Deep Blue Solutions Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef ASMARM_CLOCK_H +#define ASMARM_CLOCK_H + +struct device; + +/* + * The base API. + */ + + +/* + * struct clk - an machine class defined object / cookie. + */ +struct clk; + +/** + * clk_get - lookup and obtain a reference to a clock producer. + * @dev: device for clock "consumer" + * @id: device ID + * + * Returns a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. + */ +struct clk *clk_get(struct device *dev, const char *id); + +/** + * clk_enable - inform the system when the clock source should be running. + * @clk: clock source + * + * If the clock can not be enabled/disabled, this should return success. + * + * Returns success (0) or negative errno. + */ +int clk_enable(struct clk *clk); + +/** + * clk_disable - inform the system when the clock source is no longer required. + * @clk: clock source + */ +void clk_disable(struct clk *clk); + +/** + * clk_use - increment the use count + * @clk: clock source + * + * Returns success (0) or negative errno. + */ +int clk_use(struct clk *clk); + +/** + * clk_unuse - decrement the use count + * @clk: clock source + */ +void clk_unuse(struct clk *clk); + +/** + * clk_get_rate - obtain the current clock rate for a clock source. + * This is only valid once the clock source has been enabled. + * @clk: clock source + */ +unsigned long clk_get_rate(struct clk *clk); + +/** + * clk_put - "free" the clock source + * @clk: clock source + */ +void clk_put(struct clk *clk); + + +/* + * The remaining APIs are optional for machine class support. + */ + + +/** + * clk_round_rate - adjust a rate to the exact rate a clock can provide + * @clk: clock source + * @rate: desired clock rate in kHz + * + * Returns rounded clock rate, or negative errno. + */ +long clk_round_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_rate - set the clock rate for a clock source + * @clk: clock source + * @rate: desired clock rate in kHz + * + * Returns success (0) or negative errno. + */ +int clk_set_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_parent - set the parent clock source for this clock + * @clk: clock source + * @parent: parent clock source + * + * Returns success (0) or negative errno. + */ +int clk_set_parent(struct clk *clk, struct clk *parent); + +/** + * clk_get_parent - get the parent clock source for this clock + * @clk: clock source + * + * Returns struct clk corresponding to parent clock source, or + * valid IS_ERR() condition containing errno. + */ +struct clk *clk_get_parent(struct clk *clk); + +#endif diff --git a/include/asm-arm/vfp.h b/include/asm-arm/vfp.h new file mode 100644 index 000000000..14c5e0946 --- /dev/null +++ b/include/asm-arm/vfp.h @@ -0,0 +1,78 @@ +/* + * linux/include/asm-arm/vfp.h + * + * VFP register definitions. + * First, the standard VFP set. + */ + +#define FPSID cr0 +#define FPSCR cr1 +#define FPEXC cr8 + +/* FPSID bits */ +#define FPSID_IMPLEMENTER_BIT (24) +#define FPSID_IMPLEMENTER_MASK (0xff << FPSID_IMPLEMENTER_BIT) +#define FPSID_SOFTWARE (1<<23) +#define FPSID_FORMAT_BIT (21) +#define FPSID_FORMAT_MASK (0x3 << FPSID_FORMAT_BIT) +#define FPSID_NODOUBLE (1<<20) +#define FPSID_ARCH_BIT (16) +#define FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT) +#define FPSID_PART_BIT (8) +#define FPSID_PART_MASK (0xFF << FPSID_PART_BIT) +#define FPSID_VARIANT_BIT (4) +#define FPSID_VARIANT_MASK (0xF << FPSID_VARIANT_BIT) +#define FPSID_REV_BIT (0) +#define FPSID_REV_MASK (0xF << FPSID_REV_BIT) + +/* FPEXC bits */ +#define FPEXC_EXCEPTION (1<<31) +#define FPEXC_ENABLE (1<<30) + +/* FPSCR bits */ +#define FPSCR_DEFAULT_NAN (1<<25) +#define FPSCR_FLUSHTOZERO (1<<24) +#define FPSCR_ROUND_NEAREST (0<<22) +#define FPSCR_ROUND_PLUSINF (1<<22) +#define FPSCR_ROUND_MINUSINF (2<<22) +#define FPSCR_ROUND_TOZERO (3<<22) +#define FPSCR_RMODE_BIT (22) +#define FPSCR_RMODE_MASK (3 << FPSCR_RMODE_BIT) +#define FPSCR_STRIDE_BIT (20) +#define FPSCR_STRIDE_MASK (3 << FPSCR_STRIDE_BIT) +#define FPSCR_LENGTH_BIT (16) +#define FPSCR_LENGTH_MASK (7 << FPSCR_LENGTH_BIT) +#define FPSCR_IOE (1<<8) +#define FPSCR_DZE (1<<9) +#define FPSCR_OFE (1<<10) +#define FPSCR_UFE (1<<11) +#define FPSCR_IXE (1<<12) +#define FPSCR_IDE (1<<15) +#define FPSCR_IOC (1<<0) +#define FPSCR_DZC (1<<1) +#define FPSCR_OFC (1<<2) +#define FPSCR_UFC (1<<3) +#define FPSCR_IXC (1<<4) +#define FPSCR_IDC (1<<7) + +/* + * VFP9-S specific. + */ +#define FPINST cr9 +#define FPINST2 cr10 + +/* FPEXC bits */ +#define FPEXC_FPV2 (1<<28) +#define FPEXC_LENGTH_BIT (8) +#define FPEXC_LENGTH_MASK (7 << FPEXC_LENGTH_BIT) +#define FPEXC_INV (1 << 7) +#define FPEXC_UFC (1 << 3) +#define FPEXC_OFC (1 << 2) +#define FPEXC_IOC (1 << 0) + +/* Bit patterns for decoding the packaged operation descriptors */ +#define VFPOPDESC_LENGTH_BIT (9) +#define VFPOPDESC_LENGTH_MASK (0x07 << VFPOPDESC_LENGTH_BIT) +#define VFPOPDESC_UNUSED_BIT (24) +#define VFPOPDESC_UNUSED_MASK (0xFF << VFPOPDESC_UNUSED_BIT) +#define VFPOPDESC_OPDESC_MASK (~(VFPOPDESC_LENGTH_MASK | VFPOPDESC_UNUSED_MASK)) diff --git a/include/asm-ia64/setup.h b/include/asm-ia64/setup.h new file mode 100644 index 000000000..ea29b57af --- /dev/null +++ b/include/asm-ia64/setup.h @@ -0,0 +1,6 @@ +#ifndef __IA64_SETUP_H +#define __IA64_SETUP_H + +#define COMMAND_LINE_SIZE 512 + +#endif diff --git a/include/asm-mips/gt64240.h b/include/asm-mips/gt64240.h new file mode 100644 index 000000000..8f9bd341e --- /dev/null +++ b/include/asm-mips/gt64240.h @@ -0,0 +1,1235 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright - Galileo technology. + * Copyright (C) 2004 by Ralf Baechle + */ +#ifndef __ASM_MIPS_MV64240_H +#define __ASM_MIPS_MV64240_H + +#include +#include + +/* + * CPU Control Registers + */ + +#define CPU_CONFIGURATION 0x000 +#define CPU_MODE 0x120 +#define CPU_READ_RESPONSE_CROSSBAR_LOW 0x170 +#define CPU_READ_RESPONSE_CROSSBAR_HIGH 0x178 + +/* + * Processor Address Space + */ + +/* Sdram's BAR'S */ +#define SCS_0_LOW_DECODE_ADDRESS 0x008 +#define SCS_0_HIGH_DECODE_ADDRESS 0x010 +#define SCS_1_LOW_DECODE_ADDRESS 0x208 +#define SCS_1_HIGH_DECODE_ADDRESS 0x210 +#define SCS_2_LOW_DECODE_ADDRESS 0x018 +#define SCS_2_HIGH_DECODE_ADDRESS 0x020 +#define SCS_3_LOW_DECODE_ADDRESS 0x218 +#define SCS_3_HIGH_DECODE_ADDRESS 0x220 +/* Devices BAR'S */ +#define CS_0_LOW_DECODE_ADDRESS 0x028 +#define CS_0_HIGH_DECODE_ADDRESS 0x030 +#define CS_1_LOW_DECODE_ADDRESS 0x228 +#define CS_1_HIGH_DECODE_ADDRESS 0x230 +#define CS_2_LOW_DECODE_ADDRESS 0x248 +#define CS_2_HIGH_DECODE_ADDRESS 0x250 +#define CS_3_LOW_DECODE_ADDRESS 0x038 +#define CS_3_HIGH_DECODE_ADDRESS 0x040 +#define BOOTCS_LOW_DECODE_ADDRESS 0x238 +#define BOOTCS_HIGH_DECODE_ADDRESS 0x240 + +#define PCI_0I_O_LOW_DECODE_ADDRESS 0x048 +#define PCI_0I_O_HIGH_DECODE_ADDRESS 0x050 +#define PCI_0MEMORY0_LOW_DECODE_ADDRESS 0x058 +#define PCI_0MEMORY0_HIGH_DECODE_ADDRESS 0x060 +#define PCI_0MEMORY1_LOW_DECODE_ADDRESS 0x080 +#define PCI_0MEMORY1_HIGH_DECODE_ADDRESS 0x088 +#define PCI_0MEMORY2_LOW_DECODE_ADDRESS 0x258 +#define PCI_0MEMORY2_HIGH_DECODE_ADDRESS 0x260 +#define PCI_0MEMORY3_LOW_DECODE_ADDRESS 0x280 +#define PCI_0MEMORY3_HIGH_DECODE_ADDRESS 0x288 + +#define PCI_1I_O_LOW_DECODE_ADDRESS 0x090 +#define PCI_1I_O_HIGH_DECODE_ADDRESS 0x098 +#define PCI_1MEMORY0_LOW_DECODE_ADDRESS 0x0a0 +#define PCI_1MEMORY0_HIGH_DECODE_ADDRESS 0x0a8 +#define PCI_1MEMORY1_LOW_DECODE_ADDRESS 0x0b0 +#define PCI_1MEMORY1_HIGH_DECODE_ADDRESS 0x0b8 +#define PCI_1MEMORY2_LOW_DECODE_ADDRESS 0x2a0 +#define PCI_1MEMORY2_HIGH_DECODE_ADDRESS 0x2a8 +#define PCI_1MEMORY3_LOW_DECODE_ADDRESS 0x2b0 +#define PCI_1MEMORY3_HIGH_DECODE_ADDRESS 0x2b8 + +#define INTERNAL_SPACE_DECODE 0x068 + +#define CPU_0_LOW_DECODE_ADDRESS 0x290 +#define CPU_0_HIGH_DECODE_ADDRESS 0x298 +#define CPU_1_LOW_DECODE_ADDRESS 0x2c0 +#define CPU_1_HIGH_DECODE_ADDRESS 0x2c8 + +#define PCI_0I_O_ADDRESS_REMAP 0x0f0 +#define PCI_0MEMORY0_ADDRESS_REMAP 0x0f8 +#define PCI_0MEMORY0_HIGH_ADDRESS_REMAP 0x320 +#define PCI_0MEMORY1_ADDRESS_REMAP 0x100 +#define PCI_0MEMORY1_HIGH_ADDRESS_REMAP 0x328 +#define PCI_0MEMORY2_ADDRESS_REMAP 0x2f8 +#define PCI_0MEMORY2_HIGH_ADDRESS_REMAP 0x330 +#define PCI_0MEMORY3_ADDRESS_REMAP 0x300 +#define PCI_0MEMORY3_HIGH_ADDRESS_REMAP 0x338 + +#define PCI_1I_O_ADDRESS_REMAP 0x108 +#define PCI_1MEMORY0_ADDRESS_REMAP 0x110 +#define PCI_1MEMORY0_HIGH_ADDRESS_REMAP 0x340 +#define PCI_1MEMORY1_ADDRESS_REMAP 0x118 +#define PCI_1MEMORY1_HIGH_ADDRESS_REMAP 0x348 +#define PCI_1MEMORY2_ADDRESS_REMAP 0x310 +#define PCI_1MEMORY2_HIGH_ADDRESS_REMAP 0x350 +#define PCI_1MEMORY3_ADDRESS_REMAP 0x318 +#define PCI_1MEMORY3_HIGH_ADDRESS_REMAP 0x358 + +/* + * CPU Sync Barrier + */ + +#define PCI_0SYNC_BARIER_VIRTUAL_REGISTER 0x0c0 +#define PCI_1SYNC_BARIER_VIRTUAL_REGISTER 0x0c8 + + +/* + * CPU Access Protect + */ + +#define CPU_LOW_PROTECT_ADDRESS_0 0X180 +#define CPU_HIGH_PROTECT_ADDRESS_0 0X188 +#define CPU_LOW_PROTECT_ADDRESS_1 0X190 +#define CPU_HIGH_PROTECT_ADDRESS_1 0X198 +#define CPU_LOW_PROTECT_ADDRESS_2 0X1a0 +#define CPU_HIGH_PROTECT_ADDRESS_2 0X1a8 +#define CPU_LOW_PROTECT_ADDRESS_3 0X1b0 +#define CPU_HIGH_PROTECT_ADDRESS_3 0X1b8 +#define CPU_LOW_PROTECT_ADDRESS_4 0X1c0 +#define CPU_HIGH_PROTECT_ADDRESS_4 0X1c8 +#define CPU_LOW_PROTECT_ADDRESS_5 0X1d0 +#define CPU_HIGH_PROTECT_ADDRESS_5 0X1d8 +#define CPU_LOW_PROTECT_ADDRESS_6 0X1e0 +#define CPU_HIGH_PROTECT_ADDRESS_6 0X1e8 +#define CPU_LOW_PROTECT_ADDRESS_7 0X1f0 +#define CPU_HIGH_PROTECT_ADDRESS_7 0X1f8 + + +/* + * Snoop Control + */ + +#define SNOOP_BASE_ADDRESS_0 0x380 +#define SNOOP_TOP_ADDRESS_0 0x388 +#define SNOOP_BASE_ADDRESS_1 0x390 +#define SNOOP_TOP_ADDRESS_1 0x398 +#define SNOOP_BASE_ADDRESS_2 0x3a0 +#define SNOOP_TOP_ADDRESS_2 0x3a8 +#define SNOOP_BASE_ADDRESS_3 0x3b0 +#define SNOOP_TOP_ADDRESS_3 0x3b8 + +/* + * CPU Error Report + */ + +#define CPU_ERROR_ADDRESS_LOW 0x070 +#define CPU_ERROR_ADDRESS_HIGH 0x078 +#define CPU_ERROR_DATA_LOW 0x128 +#define CPU_ERROR_DATA_HIGH 0x130 +#define CPU_ERROR_PARITY 0x138 +#define CPU_ERROR_CAUSE 0x140 +#define CPU_ERROR_MASK 0x148 + +/* + * Pslave Debug + */ + +#define X_0_ADDRESS 0x360 +#define X_0_COMMAND_ID 0x368 +#define X_1_ADDRESS 0x370 +#define X_1_COMMAND_ID 0x378 +#define WRITE_DATA_LOW 0x3c0 +#define WRITE_DATA_HIGH 0x3c8 +#define WRITE_BYTE_ENABLE 0X3e0 +#define READ_DATA_LOW 0x3d0 +#define READ_DATA_HIGH 0x3d8 +#define READ_ID 0x3e8 + + +/* + * SDRAM and Device Address Space + */ + + +/* + * SDRAM Configuration + */ + +#define SDRAM_CONFIGURATION 0x448 +#define SDRAM_OPERATION_MODE 0x474 +#define SDRAM_ADDRESS_DECODE 0x47C +#define SDRAM_TIMING_PARAMETERS 0x4b4 +#define SDRAM_UMA_CONTROL 0x4a4 +#define SDRAM_CROSS_BAR_CONTROL_LOW 0x4a8 +#define SDRAM_CROSS_BAR_CONTROL_HIGH 0x4ac +#define SDRAM_CROSS_BAR_TIMEOUT 0x4b0 + + +/* + * SDRAM Parameters + */ + +#define SDRAM_BANK0PARAMETERS 0x44C +#define SDRAM_BANK1PARAMETERS 0x450 +#define SDRAM_BANK2PARAMETERS 0x454 +#define SDRAM_BANK3PARAMETERS 0x458 + + +/* + * SDRAM Error Report + */ + +#define SDRAM_ERROR_DATA_LOW 0x484 +#define SDRAM_ERROR_DATA_HIGH 0x480 +#define SDRAM_AND_DEVICE_ERROR_ADDRESS 0x490 +#define SDRAM_RECEIVED_ECC 0x488 +#define SDRAM_CALCULATED_ECC 0x48c +#define SDRAM_ECC_CONTROL 0x494 +#define SDRAM_ECC_ERROR_COUNTER 0x498 + + +/* + * SDunit Debug (for internal use) + */ + +#define X0_ADDRESS 0x500 +#define X0_COMMAND_AND_ID 0x504 +#define X0_WRITE_DATA_LOW 0x508 +#define X0_WRITE_DATA_HIGH 0x50c +#define X0_WRITE_BYTE_ENABLE 0x518 +#define X0_READ_DATA_LOW 0x510 +#define X0_READ_DATA_HIGH 0x514 +#define X0_READ_ID 0x51c +#define X1_ADDRESS 0x520 +#define X1_COMMAND_AND_ID 0x524 +#define X1_WRITE_DATA_LOW 0x528 +#define X1_WRITE_DATA_HIGH 0x52c +#define X1_WRITE_BYTE_ENABLE 0x538 +#define X1_READ_DATA_LOW 0x530 +#define X1_READ_DATA_HIGH 0x534 +#define X1_READ_ID 0x53c +#define X0_SNOOP_ADDRESS 0x540 +#define X0_SNOOP_COMMAND 0x544 +#define X1_SNOOP_ADDRESS 0x548 +#define X1_SNOOP_COMMAND 0x54c + + +/* + * Device Parameters + */ + +#define DEVICE_BANK0PARAMETERS 0x45c +#define DEVICE_BANK1PARAMETERS 0x460 +#define DEVICE_BANK2PARAMETERS 0x464 +#define DEVICE_BANK3PARAMETERS 0x468 +#define DEVICE_BOOT_BANK_PARAMETERS 0x46c +#define DEVICE_CONTROL 0x4c0 +#define DEVICE_CROSS_BAR_CONTROL_LOW 0x4c8 +#define DEVICE_CROSS_BAR_CONTROL_HIGH 0x4cc +#define DEVICE_CROSS_BAR_TIMEOUT 0x4c4 + + +/* + * Device Interrupt + */ + +#define DEVICE_INTERRUPT_CAUSE 0x4d0 +#define DEVICE_INTERRUPT_MASK 0x4d4 +#define DEVICE_ERROR_ADDRESS 0x4d8 + +/* + * DMA Record + */ + +#define CHANNEL0_DMA_BYTE_COUNT 0x800 +#define CHANNEL1_DMA_BYTE_COUNT 0x804 +#define CHANNEL2_DMA_BYTE_COUNT 0x808 +#define CHANNEL3_DMA_BYTE_COUNT 0x80C +#define CHANNEL4_DMA_BYTE_COUNT 0x900 +#define CHANNEL5_DMA_BYTE_COUNT 0x904 +#define CHANNEL6_DMA_BYTE_COUNT 0x908 +#define CHANNEL7_DMA_BYTE_COUNT 0x90C +#define CHANNEL0_DMA_SOURCE_ADDRESS 0x810 +#define CHANNEL1_DMA_SOURCE_ADDRESS 0x814 +#define CHANNEL2_DMA_SOURCE_ADDRESS 0x818 +#define CHANNEL3_DMA_SOURCE_ADDRESS 0x81C +#define CHANNEL4_DMA_SOURCE_ADDRESS 0x910 +#define CHANNEL5_DMA_SOURCE_ADDRESS 0x914 +#define CHANNEL6_DMA_SOURCE_ADDRESS 0x918 +#define CHANNEL7_DMA_SOURCE_ADDRESS 0x91C +#define CHANNEL0_DMA_DESTINATION_ADDRESS 0x820 +#define CHANNEL1_DMA_DESTINATION_ADDRESS 0x824 +#define CHANNEL2_DMA_DESTINATION_ADDRESS 0x828 +#define CHANNEL3_DMA_DESTINATION_ADDRESS 0x82C +#define CHANNEL4_DMA_DESTINATION_ADDRESS 0x920 +#define CHANNEL5_DMA_DESTINATION_ADDRESS 0x924 +#define CHANNEL6_DMA_DESTINATION_ADDRESS 0x928 +#define CHANNEL7_DMA_DESTINATION_ADDRESS 0x92C +#define CHANNEL0NEXT_RECORD_POINTER 0x830 +#define CHANNEL1NEXT_RECORD_POINTER 0x834 +#define CHANNEL2NEXT_RECORD_POINTER 0x838 +#define CHANNEL3NEXT_RECORD_POINTER 0x83C +#define CHANNEL4NEXT_RECORD_POINTER 0x930 +#define CHANNEL5NEXT_RECORD_POINTER 0x934 +#define CHANNEL6NEXT_RECORD_POINTER 0x938 +#define CHANNEL7NEXT_RECORD_POINTER 0x93C +#define CHANNEL0CURRENT_DESCRIPTOR_POINTER 0x870 +#define CHANNEL1CURRENT_DESCRIPTOR_POINTER 0x874 +#define CHANNEL2CURRENT_DESCRIPTOR_POINTER 0x878 +#define CHANNEL3CURRENT_DESCRIPTOR_POINTER 0x87C +#define CHANNEL4CURRENT_DESCRIPTOR_POINTER 0x970 +#define CHANNEL5CURRENT_DESCRIPTOR_POINTER 0x974 +#define CHANNEL6CURRENT_DESCRIPTOR_POINTER 0x978 +#define CHANNEL7CURRENT_DESCRIPTOR_POINTER 0x97C +#define CHANNEL0_DMA_SOURCE_HIGH_PCI_ADDRESS 0x890 +#define CHANNEL1_DMA_SOURCE_HIGH_PCI_ADDRESS 0x894 +#define CHANNEL2_DMA_SOURCE_HIGH_PCI_ADDRESS 0x898 +#define CHANNEL3_DMA_SOURCE_HIGH_PCI_ADDRESS 0x89c +#define CHANNEL4_DMA_SOURCE_HIGH_PCI_ADDRESS 0x990 +#define CHANNEL5_DMA_SOURCE_HIGH_PCI_ADDRESS 0x994 +#define CHANNEL6_DMA_SOURCE_HIGH_PCI_ADDRESS 0x998 +#define CHANNEL7_DMA_SOURCE_HIGH_PCI_ADDRESS 0x99c +#define CHANNEL0_DMA_DESTINATION_HIGH_PCI_ADDRESS 0x8a0 +#define CHANNEL1_DMA_DESTINATION_HIGH_PCI_ADDRESS 0x8a4 +#define CHANNEL2_DMA_DESTINATION_HIGH_PCI_ADDRESS 0x8a8 +#define CHANNEL3_DMA_DESTINATION_HIGH_PCI_ADDRESS 0x8ac +#define CHANNEL4_DMA_DESTINATION_HIGH_PCI_ADDRESS 0x9a0 +#define CHANNEL5_DMA_DESTINATION_HIGH_PCI_ADDRESS 0x9a4 +#define CHANNEL6_DMA_DESTINATION_HIGH_PCI_ADDRESS 0x9a8 +#define CHANNEL7_DMA_DESTINATION_HIGH_PCI_ADDRESS 0x9ac +#define CHANNEL0_DMA_NEXT_RECORD_POINTER_HIGH_PCI_ADDRESS 0x8b0 +#define CHANNEL1_DMA_NEXT_RECORD_POINTER_HIGH_PCI_ADDRESS 0x8b4 +#define CHANNEL2_DMA_NEXT_RECORD_POINTER_HIGH_PCI_ADDRESS 0x8b8 +#define CHANNEL3_DMA_NEXT_RECORD_POINTER_HIGH_PCI_ADDRESS 0x8bc +#define CHANNEL4_DMA_NEXT_RECORD_POINTER_HIGH_PCI_ADDRESS 0x9b0 +#define CHANNEL5_DMA_NEXT_RECORD_POINTER_HIGH_PCI_ADDRESS 0x9b4 +#define CHANNEL6_DMA_NEXT_RECORD_POINTER_HIGH_PCI_ADDRESS 0x9b8 +#define CHANNEL7_DMA_NEXT_RECORD_POINTER_HIGH_PCI_ADDRESS 0x9bc + +/* + * DMA Channel Control + */ + +#define CHANNEL0CONTROL 0x840 +#define CHANNEL0CONTROL_HIGH 0x880 + +#define CHANNEL1CONTROL 0x844 +#define CHANNEL1CONTROL_HIGH 0x884 + +#define CHANNEL2CONTROL 0x848 +#define CHANNEL2CONTROL_HIGH 0x888 + +#define CHANNEL3CONTROL 0x84C +#define CHANNEL3CONTROL_HIGH 0x88C + +#define CHANNEL4CONTROL 0x940 +#define CHANNEL4CONTROL_HIGH 0x980 + +#define CHANNEL5CONTROL 0x944 +#define CHANNEL5CONTROL_HIGH 0x984 + +#define CHANNEL6CONTROL 0x948 +#define CHANNEL6CONTROL_HIGH 0x988 + +#define CHANNEL7CONTROL 0x94C +#define CHANNEL7CONTROL_HIGH 0x98C + + +/* + * DMA Arbiter + */ + +#define ARBITER_CONTROL_0_3 0x860 +#define ARBITER_CONTROL_4_7 0x960 + + +/* + * DMA Interrupt + */ + +#define CHANELS0_3_INTERRUPT_CAUSE 0x8c0 +#define CHANELS0_3_INTERRUPT_MASK 0x8c4 +#define CHANELS0_3_ERROR_ADDRESS 0x8c8 +#define CHANELS0_3_ERROR_SELECT 0x8cc +#define CHANELS4_7_INTERRUPT_CAUSE 0x9c0 +#define CHANELS4_7_INTERRUPT_MASK 0x9c4 +#define CHANELS4_7_ERROR_ADDRESS 0x9c8 +#define CHANELS4_7_ERROR_SELECT 0x9cc + + +/* + * DMA Debug (for internal use) + */ + +#define DMA_X0_ADDRESS 0x8e0 +#define DMA_X0_COMMAND_AND_ID 0x8e4 +#define DMA_X0_WRITE_DATA_LOW 0x8e8 +#define DMA_X0_WRITE_DATA_HIGH 0x8ec +#define DMA_X0_WRITE_BYTE_ENABLE 0x8f8 +#define DMA_X0_READ_DATA_LOW 0x8f0 +#define DMA_X0_READ_DATA_HIGH 0x8f4 +#define DMA_X0_READ_ID 0x8fc +#define DMA_X1_ADDRESS 0x9e0 +#define DMA_X1_COMMAND_AND_ID 0x9e4 +#define DMA_X1_WRITE_DATA_LOW 0x9e8 +#define DMA_X1_WRITE_DATA_HIGH 0x9ec +#define DMA_X1_WRITE_BYTE_ENABLE 0x9f8 +#define DMA_X1_READ_DATA_LOW 0x9f0 +#define DMA_X1_READ_DATA_HIGH 0x9f4 +#define DMA_X1_READ_ID 0x9fc + +/* + * Timer_Counter + */ + +#define TIMER_COUNTER0 0x850 +#define TIMER_COUNTER1 0x854 +#define TIMER_COUNTER2 0x858 +#define TIMER_COUNTER3 0x85C +#define TIMER_COUNTER_0_3_CONTROL 0x864 +#define TIMER_COUNTER_0_3_INTERRUPT_CAUSE 0x868 +#define TIMER_COUNTER_0_3_INTERRUPT_MASK 0x86c +#define TIMER_COUNTER4 0x950 +#define TIMER_COUNTER5 0x954 +#define TIMER_COUNTER6 0x958 +#define TIMER_COUNTER7 0x95C +#define TIMER_COUNTER_4_7_CONTROL 0x964 +#define TIMER_COUNTER_4_7_INTERRUPT_CAUSE 0x968 +#define TIMER_COUNTER_4_7_INTERRUPT_MASK 0x96c + +/* + * PCI Slave Address Decoding + */ + +#define PCI_0SCS_0_BANK_SIZE 0xc08 +#define PCI_1SCS_0_BANK_SIZE 0xc88 +#define PCI_0SCS_1_BANK_SIZE 0xd08 +#define PCI_1SCS_1_BANK_SIZE 0xd88 +#define PCI_0SCS_2_BANK_SIZE 0xc0c +#define PCI_1SCS_2_BANK_SIZE 0xc8c +#define PCI_0SCS_3_BANK_SIZE 0xd0c +#define PCI_1SCS_3_BANK_SIZE 0xd8c +#define PCI_0CS_0_BANK_SIZE 0xc10 +#define PCI_1CS_0_BANK_SIZE 0xc90 +#define PCI_0CS_1_BANK_SIZE 0xd10 +#define PCI_1CS_1_BANK_SIZE 0xd90 +#define PCI_0CS_2_BANK_SIZE 0xd18 +#define PCI_1CS_2_BANK_SIZE 0xd98 +#define PCI_0CS_3_BANK_SIZE 0xc14 +#define PCI_1CS_3_BANK_SIZE 0xc94 +#define PCI_0CS_BOOT_BANK_SIZE 0xd14 +#define PCI_1CS_BOOT_BANK_SIZE 0xd94 +#define PCI_0P2P_MEM0_BAR_SIZE 0xd1c +#define PCI_1P2P_MEM0_BAR_SIZE 0xd9c +#define PCI_0P2P_MEM1_BAR_SIZE 0xd20 +#define PCI_1P2P_MEM1_BAR_SIZE 0xda0 +#define PCI_0P2P_I_O_BAR_SIZE 0xd24 +#define PCI_1P2P_I_O_BAR_SIZE 0xda4 +#define PCI_0CPU_BAR_SIZE 0xd28 +#define PCI_1CPU_BAR_SIZE 0xda8 +#define PCI_0DAC_SCS_0_BANK_SIZE 0xe00 +#define PCI_1DAC_SCS_0_BANK_SIZE 0xe80 +#define PCI_0DAC_SCS_1_BANK_SIZE 0xe04 +#define PCI_1DAC_SCS_1_BANK_SIZE 0xe84 +#define PCI_0DAC_SCS_2_BANK_SIZE 0xe08 +#define PCI_1DAC_SCS_2_BANK_SIZE 0xe88 +#define PCI_0DAC_SCS_3_BANK_SIZE 0xe0c +#define PCI_1DAC_SCS_3_BANK_SIZE 0xe8c +#define PCI_0DAC_CS_0_BANK_SIZE 0xe10 +#define PCI_1DAC_CS_0_BANK_SIZE 0xe90 +#define PCI_0DAC_CS_1_BANK_SIZE 0xe14 +#define PCI_1DAC_CS_1_BANK_SIZE 0xe94 +#define PCI_0DAC_CS_2_BANK_SIZE 0xe18 +#define PCI_1DAC_CS_2_BANK_SIZE 0xe98 +#define PCI_0DAC_CS_3_BANK_SIZE 0xe1c +#define PCI_1DAC_CS_3_BANK_SIZE 0xe9c +#define PCI_0DAC_BOOTCS_BANK_SIZE 0xe20 +#define PCI_1DAC_BOOTCS_BANK_SIZE 0xea0 +#define PCI_0DAC_P2P_MEM0_BAR_SIZE 0xe24 +#define PCI_1DAC_P2P_MEM0_BAR_SIZE 0xea4 +#define PCI_0DAC_P2P_MEM1_BAR_SIZE 0xe28 +#define PCI_1DAC_P2P_MEM1_BAR_SIZE 0xea8 +#define PCI_0DAC_CPU_BAR_SIZE 0xe2c +#define PCI_1DAC_CPU_BAR_SIZE 0xeac +#define PCI_0EXPANSION_ROM_BAR_SIZE 0xd2c +#define PCI_1EXPANSION_ROM_BAR_SIZE 0xdac +#define PCI_0BASE_ADDRESS_REGISTERS_ENABLE 0xc3c +#define PCI_1BASE_ADDRESS_REGISTERS_ENABLE 0xcbc +#define PCI_0SCS_0_BASE_ADDRESS_REMAP 0xc48 +#define PCI_1SCS_0_BASE_ADDRESS_REMAP 0xcc8 +#define PCI_0SCS_1_BASE_ADDRESS_REMAP 0xd48 +#define PCI_1SCS_1_BASE_ADDRESS_REMAP 0xdc8 +#define PCI_0SCS_2_BASE_ADDRESS_REMAP 0xc4c +#define PCI_1SCS_2_BASE_ADDRESS_REMAP 0xccc +#define PCI_0SCS_3_BASE_ADDRESS_REMAP 0xd4c +#define PCI_1SCS_3_BASE_ADDRESS_REMAP 0xdcc +#define PCI_0CS_0_BASE_ADDRESS_REMAP 0xc50 +#define PCI_1CS_0_BASE_ADDRESS_REMAP 0xcd0 +#define PCI_0CS_1_BASE_ADDRESS_REMAP 0xd50 +#define PCI_1CS_1_BASE_ADDRESS_REMAP 0xdd0 +#define PCI_0CS_2_BASE_ADDRESS_REMAP 0xd58 +#define PCI_1CS_2_BASE_ADDRESS_REMAP 0xdd8 +#define PCI_0CS_3_BASE_ADDRESS_REMAP 0xc54 +#define PCI_1CS_3_BASE_ADDRESS_REMAP 0xcd4 +#define PCI_0CS_BOOTCS_BASE_ADDRESS_REMAP 0xd54 +#define PCI_1CS_BOOTCS_BASE_ADDRESS_REMAP 0xdd4 +#define PCI_0P2P_MEM0_BASE_ADDRESS_REMAP_LOW 0xd5c +#define PCI_1P2P_MEM0_BASE_ADDRESS_REMAP_LOW 0xddc +#define PCI_0P2P_MEM0_BASE_ADDRESS_REMAP_HIGH 0xd60 +#define PCI_1P2P_MEM0_BASE_ADDRESS_REMAP_HIGH 0xde0 +#define PCI_0P2P_MEM1_BASE_ADDRESS_REMAP_LOW 0xd64 +#define PCI_1P2P_MEM1_BASE_ADDRESS_REMAP_LOW 0xde4 +#define PCI_0P2P_MEM1_BASE_ADDRESS_REMAP_HIGH 0xd68 +#define PCI_1P2P_MEM1_BASE_ADDRESS_REMAP_HIGH 0xde8 +#define PCI_0P2P_I_O_BASE_ADDRESS_REMAP 0xd6c +#define PCI_1P2P_I_O_BASE_ADDRESS_REMAP 0xdec +#define PCI_0CPU_BASE_ADDRESS_REMAP 0xd70 +#define PCI_1CPU_BASE_ADDRESS_REMAP 0xdf0 +#define PCI_0DAC_SCS_0_BASE_ADDRESS_REMAP 0xf00 +#define PCI_1DAC_SCS_0_BASE_ADDRESS_REMAP 0xff0 +#define PCI_0DAC_SCS_1_BASE_ADDRESS_REMAP 0xf04 +#define PCI_1DAC_SCS_1_BASE_ADDRESS_REMAP 0xf84 +#define PCI_0DAC_SCS_2_BASE_ADDRESS_REMAP 0xf08 +#define PCI_1DAC_SCS_2_BASE_ADDRESS_REMAP 0xf88 +#define PCI_0DAC_SCS_3_BASE_ADDRESS_REMAP 0xf0c +#define PCI_1DAC_SCS_3_BASE_ADDRESS_REMAP 0xf8c +#define PCI_0DAC_CS_0_BASE_ADDRESS_REMAP 0xf10 +#define PCI_1DAC_CS_0_BASE_ADDRESS_REMAP 0xf90 +#define PCI_0DAC_CS_1_BASE_ADDRESS_REMAP 0xf14 +#define PCI_1DAC_CS_1_BASE_ADDRESS_REMAP 0xf94 +#define PCI_0DAC_CS_2_BASE_ADDRESS_REMAP 0xf18 +#define PCI_1DAC_CS_2_BASE_ADDRESS_REMAP 0xf98 +#define PCI_0DAC_CS_3_BASE_ADDRESS_REMAP 0xf1c +#define PCI_1DAC_CS_3_BASE_ADDRESS_REMAP 0xf9c +#define PCI_0DAC_BOOTCS_BASE_ADDRESS_REMAP 0xf20 +#define PCI_1DAC_BOOTCS_BASE_ADDRESS_REMAP 0xfa0 +#define PCI_0DAC_P2P_MEM0_BASE_ADDRESS_REMAP_LOW 0xf24 +#define PCI_1DAC_P2P_MEM0_BASE_ADDRESS_REMAP_LOW 0xfa4 +#define PCI_0DAC_P2P_MEM0_BASE_ADDRESS_REMAP_HIGH 0xf28 +#define PCI_1DAC_P2P_MEM0_BASE_ADDRESS_REMAP_HIGH 0xfa8 +#define PCI_0DAC_P2P_MEM1_BASE_ADDRESS_REMAP_LOW 0xf2c +#define PCI_1DAC_P2P_MEM1_BASE_ADDRESS_REMAP_LOW 0xfac +#define PCI_0DAC_P2P_MEM1_BASE_ADDRESS_REMAP_HIGH 0xf30 +#define PCI_1DAC_P2P_MEM1_BASE_ADDRESS_REMAP_HIGH 0xfb0 +#define PCI_0DAC_CPU_BASE_ADDRESS_REMAP 0xf34 +#define PCI_1DAC_CPU_BASE_ADDRESS_REMAP 0xfb4 +#define PCI_0EXPANSION_ROM_BASE_ADDRESS_REMAP 0xf38 +#define PCI_1EXPANSION_ROM_BASE_ADDRESS_REMAP 0xfb8 +#define PCI_0ADDRESS_DECODE_CONTROL 0xd3c +#define PCI_1ADDRESS_DECODE_CONTROL 0xdbc + +/* + * PCI Control + */ + +#define PCI_0COMMAND 0xc00 +#define PCI_1COMMAND 0xc80 +#define PCI_0MODE 0xd00 +#define PCI_1MODE 0xd80 +#define PCI_0TIMEOUT_RETRY 0xc04 +#define PCI_1TIMEOUT_RETRY 0xc84 +#define PCI_0READ_BUFFER_DISCARD_TIMER 0xd04 +#define PCI_1READ_BUFFER_DISCARD_TIMER 0xd84 +#define MSI_0TRIGGER_TIMER 0xc38 +#define MSI_1TRIGGER_TIMER 0xcb8 +#define PCI_0ARBITER_CONTROL 0x1d00 +#define PCI_1ARBITER_CONTROL 0x1d80 +/* changing untill here */ +#define PCI_0CROSS_BAR_CONTROL_LOW 0x1d08 +#define PCI_0CROSS_BAR_CONTROL_HIGH 0x1d0c +#define PCI_0CROSS_BAR_TIMEOUT 0x1d04 +#define PCI_0READ_RESPONSE_CROSS_BAR_CONTROL_LOW 0x1d18 +#define PCI_0READ_RESPONSE_CROSS_BAR_CONTROL_HIGH 0x1d1c +#define PCI_0SYNC_BARRIER_VIRTUAL_REGISTER 0x1d10 +#define PCI_0P2P_CONFIGURATION 0x1d14 +#define PCI_0ACCESS_CONTROL_BASE_0_LOW 0x1e00 +#define PCI_0ACCESS_CONTROL_BASE_0_HIGH 0x1e04 +#define PCI_0ACCESS_CONTROL_TOP_0 0x1e08 +#define PCI_0ACCESS_CONTROL_BASE_1_LOW 0c1e10 +#define PCI_0ACCESS_CONTROL_BASE_1_HIGH 0x1e14 +#define PCI_0ACCESS_CONTROL_TOP_1 0x1e18 +#define PCI_0ACCESS_CONTROL_BASE_2_LOW 0c1e20 +#define PCI_0ACCESS_CONTROL_BASE_2_HIGH 0x1e24 +#define PCI_0ACCESS_CONTROL_TOP_2 0x1e28 +#define PCI_0ACCESS_CONTROL_BASE_3_LOW 0c1e30 +#define PCI_0ACCESS_CONTROL_BASE_3_HIGH 0x1e34 +#define PCI_0ACCESS_CONTROL_TOP_3 0x1e38 +#define PCI_0ACCESS_CONTROL_BASE_4_LOW 0c1e40 +#define PCI_0ACCESS_CONTROL_BASE_4_HIGH 0x1e44 +#define PCI_0ACCESS_CONTROL_TOP_4 0x1e48 +#define PCI_0ACCESS_CONTROL_BASE_5_LOW 0c1e50 +#define PCI_0ACCESS_CONTROL_BASE_5_HIGH 0x1e54 +#define PCI_0ACCESS_CONTROL_TOP_5 0x1e58 +#define PCI_0ACCESS_CONTROL_BASE_6_LOW 0c1e60 +#define PCI_0ACCESS_CONTROL_BASE_6_HIGH 0x1e64 +#define PCI_0ACCESS_CONTROL_TOP_6 0x1e68 +#define PCI_0ACCESS_CONTROL_BASE_7_LOW 0c1e70 +#define PCI_0ACCESS_CONTROL_BASE_7_HIGH 0x1e74 +#define PCI_0ACCESS_CONTROL_TOP_7 0x1e78 +#define PCI_1CROSS_BAR_CONTROL_LOW 0x1d88 +#define PCI_1CROSS_BAR_CONTROL_HIGH 0x1d8c +#define PCI_1CROSS_BAR_TIMEOUT 0x1d84 +#define PCI_1READ_RESPONSE_CROSS_BAR_CONTROL_LOW 0x1d98 +#define PCI_1READ_RESPONSE_CROSS_BAR_CONTROL_HIGH 0x1d9c +#define PCI_1SYNC_BARRIER_VIRTUAL_REGISTER 0x1d90 +#define PCI_1P2P_CONFIGURATION 0x1d94 +#define PCI_1ACCESS_CONTROL_BASE_0_LOW 0x1e80 +#define PCI_1ACCESS_CONTROL_BASE_0_HIGH 0x1e84 +#define PCI_1ACCESS_CONTROL_TOP_0 0x1e88 +#define PCI_1ACCESS_CONTROL_BASE_1_LOW 0c1e90 +#define PCI_1ACCESS_CONTROL_BASE_1_HIGH 0x1e94 +#define PCI_1ACCESS_CONTROL_TOP_1 0x1e98 +#define PCI_1ACCESS_CONTROL_BASE_2_LOW 0c1ea0 +#define PCI_1ACCESS_CONTROL_BASE_2_HIGH 0x1ea4 +#define PCI_1ACCESS_CONTROL_TOP_2 0x1ea8 +#define PCI_1ACCESS_CONTROL_BASE_3_LOW 0c1eb0 +#define PCI_1ACCESS_CONTROL_BASE_3_HIGH 0x1eb4 +#define PCI_1ACCESS_CONTROL_TOP_3 0x1eb8 +#define PCI_1ACCESS_CONTROL_BASE_4_LOW 0c1ec0 +#define PCI_1ACCESS_CONTROL_BASE_4_HIGH 0x1ec4 +#define PCI_1ACCESS_CONTROL_TOP_4 0x1ec8 +#define PCI_1ACCESS_CONTROL_BASE_5_LOW 0c1ed0 +#define PCI_1ACCESS_CONTROL_BASE_5_HIGH 0x1ed4 +#define PCI_1ACCESS_CONTROL_TOP_5 0x1ed8 +#define PCI_1ACCESS_CONTROL_BASE_6_LOW 0c1ee0 +#define PCI_1ACCESS_CONTROL_BASE_6_HIGH 0x1ee4 +#define PCI_1ACCESS_CONTROL_TOP_6 0x1ee8 +#define PCI_1ACCESS_CONTROL_BASE_7_LOW 0c1ef0 +#define PCI_1ACCESS_CONTROL_BASE_7_HIGH 0x1ef4 +#define PCI_1ACCESS_CONTROL_TOP_7 0x1ef8 + +/* + * PCI Snoop Control + */ + +#define PCI_0SNOOP_CONTROL_BASE_0_LOW 0x1f00 +#define PCI_0SNOOP_CONTROL_BASE_0_HIGH 0x1f04 +#define PCI_0SNOOP_CONTROL_TOP_0 0x1f08 +#define PCI_0SNOOP_CONTROL_BASE_1_0_LOW 0x1f10 +#define PCI_0SNOOP_CONTROL_BASE_1_0_HIGH 0x1f14 +#define PCI_0SNOOP_CONTROL_TOP_1 0x1f18 +#define PCI_0SNOOP_CONTROL_BASE_2_0_LOW 0x1f20 +#define PCI_0SNOOP_CONTROL_BASE_2_0_HIGH 0x1f24 +#define PCI_0SNOOP_CONTROL_TOP_2 0x1f28 +#define PCI_0SNOOP_CONTROL_BASE_3_0_LOW 0x1f30 +#define PCI_0SNOOP_CONTROL_BASE_3_0_HIGH 0x1f34 +#define PCI_0SNOOP_CONTROL_TOP_3 0x1f38 +#define PCI_1SNOOP_CONTROL_BASE_0_LOW 0x1f80 +#define PCI_1SNOOP_CONTROL_BASE_0_HIGH 0x1f84 +#define PCI_1SNOOP_CONTROL_TOP_0 0x1f88 +#define PCI_1SNOOP_CONTROL_BASE_1_0_LOW 0x1f90 +#define PCI_1SNOOP_CONTROL_BASE_1_0_HIGH 0x1f94 +#define PCI_1SNOOP_CONTROL_TOP_1 0x1f98 +#define PCI_1SNOOP_CONTROL_BASE_2_0_LOW 0x1fa0 +#define PCI_1SNOOP_CONTROL_BASE_2_0_HIGH 0x1fa4 +#define PCI_1SNOOP_CONTROL_TOP_2 0x1fa8 +#define PCI_1SNOOP_CONTROL_BASE_3_0_LOW 0x1fb0 +#define PCI_1SNOOP_CONTROL_BASE_3_0_HIGH 0x1fb4 +#define PCI_1SNOOP_CONTROL_TOP_3 0x1fb8 + +/* + * PCI Configuration Address + */ + +#define PCI_0CONFIGURATION_ADDRESS 0xcf8 +#define PCI_0CONFIGURATION_DATA_VIRTUAL_REGISTER 0xcfc +#define PCI_1CONFIGURATION_ADDRESS 0xc78 +#define PCI_1CONFIGURATION_DATA_VIRTUAL_REGISTER 0xc7c +#define PCI_0INTERRUPT_ACKNOWLEDGE_VIRTUAL_REGISTER 0xc34 +#define PCI_1INTERRUPT_ACKNOWLEDGE_VIRTUAL_REGISTER 0xcb4 + +/* + * PCI Error Report + */ + +#define PCI_0SERR_MASK 0xc28 +#define PCI_0ERROR_ADDRESS_LOW 0x1d40 +#define PCI_0ERROR_ADDRESS_HIGH 0x1d44 +#define PCI_0ERROR_DATA_LOW 0x1d48 +#define PCI_0ERROR_DATA_HIGH 0x1d4c +#define PCI_0ERROR_COMMAND 0x1d50 +#define PCI_0ERROR_CAUSE 0x1d58 +#define PCI_0ERROR_MASK 0x1d5c + +#define PCI_1SERR_MASK 0xca8 +#define PCI_1ERROR_ADDRESS_LOW 0x1dc0 +#define PCI_1ERROR_ADDRESS_HIGH 0x1dc4 +#define PCI_1ERROR_DATA_LOW 0x1dc8 +#define PCI_1ERROR_DATA_HIGH 0x1dcc +#define PCI_1ERROR_COMMAND 0x1dd0 +#define PCI_1ERROR_CAUSE 0x1dd8 +#define PCI_1ERROR_MASK 0x1ddc + + +/* + * Lslave Debug (for internal use) + */ + +#define L_SLAVE_X0_ADDRESS 0x1d20 +#define L_SLAVE_X0_COMMAND_AND_ID 0x1d24 +#define L_SLAVE_X1_ADDRESS 0x1d28 +#define L_SLAVE_X1_COMMAND_AND_ID 0x1d2c +#define L_SLAVE_WRITE_DATA_LOW 0x1d30 +#define L_SLAVE_WRITE_DATA_HIGH 0x1d34 +#define L_SLAVE_WRITE_BYTE_ENABLE 0x1d60 +#define L_SLAVE_READ_DATA_LOW 0x1d38 +#define L_SLAVE_READ_DATA_HIGH 0x1d3c +#define L_SLAVE_READ_ID 0x1d64 + +#if 0 /* Disabled because PCI_* namespace belongs to PCI subsystem ... */ + +/* + * PCI Configuration Function 0 + */ + +#define PCI_DEVICE_AND_VENDOR_ID 0x000 +#define PCI_STATUS_AND_COMMAND 0x004 +#define PCI_CLASS_CODE_AND_REVISION_ID 0x008 +#define PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE 0x00C +#define PCI_SCS_0_BASE_ADDRESS 0x010 +#define PCI_SCS_1_BASE_ADDRESS 0x014 +#define PCI_SCS_2_BASE_ADDRESS 0x018 +#define PCI_SCS_3_BASE_ADDRESS 0x01C +#define PCI_INTERNAL_REGISTERS_MEMORY_MAPPED_BASE_ADDRESS 0x020 +#define PCI_INTERNAL_REGISTERS_I_OMAPPED_BASE_ADDRESS 0x024 +#define PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID 0x02C +#define PCI_EXPANSION_ROM_BASE_ADDRESS_REGISTER 0x030 +#define PCI_CAPABILTY_LIST_POINTER 0x034 +#define PCI_INTERRUPT_PIN_AND_LINE 0x03C +#define PCI_POWER_MANAGEMENT_CAPABILITY 0x040 +#define PCI_POWER_MANAGEMENT_STATUS_AND_CONTROL 0x044 +#define PCI_VPD_ADDRESS 0x048 +#define PCI_VPD_DATA 0X04c +#define PCI_MSI_MESSAGE_CONTROL 0x050 +#define PCI_MSI_MESSAGE_ADDRESS 0x054 +#define PCI_MSI_MESSAGE_UPPER_ADDRESS 0x058 +#define PCI_MSI_MESSAGE_DATA 0x05c +#define PCI_COMPACT_PCI_HOT_SWAP_CAPABILITY 0x058 + +/* + * PCI Configuration Function 1 + */ + +#define PCI_CS_0_BASE_ADDRESS 0x110 +#define PCI_CS_1_BASE_ADDRESS 0x114 +#define PCI_CS_2_BASE_ADDRESS 0x118 +#define PCI_CS_3_BASE_ADDRESS 0x11c +#define PCI_BOOTCS_BASE_ADDRESS 0x120 + +/* + * PCI Configuration Function 2 + */ + +#define PCI_P2P_MEM0_BASE_ADDRESS 0x210 +#define PCI_P2P_MEM1_BASE_ADDRESS 0x214 +#define PCI_P2P_I_O_BASE_ADDRESS 0x218 +#define PCI_CPU_BASE_ADDRESS 0x21c + +/* + * PCI Configuration Function 4 + */ + +#define PCI_DAC_SCS_0_BASE_ADDRESS_LOW 0x410 +#define PCI_DAC_SCS_0_BASE_ADDRESS_HIGH 0x414 +#define PCI_DAC_SCS_1_BASE_ADDRESS_LOW 0x418 +#define PCI_DAC_SCS_1_BASE_ADDRESS_HIGH 0x41c +#define PCI_DAC_P2P_MEM0_BASE_ADDRESS_LOW 0x420 +#define PCI_DAC_P2P_MEM0_BASE_ADDRESS_HIGH 0x424 + + +/* + * PCI Configuration Function 5 + */ + +#define PCI_DAC_SCS_2_BASE_ADDRESS_LOW 0x510 +#define PCI_DAC_SCS_2_BASE_ADDRESS_HIGH 0x514 +#define PCI_DAC_SCS_3_BASE_ADDRESS_LOW 0x518 +#define PCI_DAC_SCS_3_BASE_ADDRESS_HIGH 0x51c +#define PCI_DAC_P2P_MEM1_BASE_ADDRESS_LOW 0x520 +#define PCI_DAC_P2P_MEM1_BASE_ADDRESS_HIGH 0x524 + + +/* + * PCI Configuration Function 6 + */ + +#define PCI_DAC_CS_0_BASE_ADDRESS_LOW 0x610 +#define PCI_DAC_CS_0_BASE_ADDRESS_HIGH 0x614 +#define PCI_DAC_CS_1_BASE_ADDRESS_LOW 0x618 +#define PCI_DAC_CS_1_BASE_ADDRESS_HIGH 0x61c +#define PCI_DAC_CS_2_BASE_ADDRESS_LOW 0x620 +#define PCI_DAC_CS_2_BASE_ADDRESS_HIGH 0x624 + +/* + * PCI Configuration Function 7 + */ + +#define PCI_DAC_CS_3_BASE_ADDRESS_LOW 0x710 +#define PCI_DAC_CS_3_BASE_ADDRESS_HIGH 0x714 +#define PCI_DAC_BOOTCS_BASE_ADDRESS_LOW 0x718 +#define PCI_DAC_BOOTCS_BASE_ADDRESS_HIGH 0x71c +#define PCI_DAC_CPU_BASE_ADDRESS_LOW 0x720 +#define PCI_DAC_CPU_BASE_ADDRESS_HIGH 0x724 +#endif + +/* + * Interrupts + */ + +#define LOW_INTERRUPT_CAUSE_REGISTER 0xc18 +#define HIGH_INTERRUPT_CAUSE_REGISTER 0xc68 +#define CPU_INTERRUPT_MASK_REGISTER_LOW 0xc1c +#define CPU_INTERRUPT_MASK_REGISTER_HIGH 0xc6c +#define CPU_SELECT_CAUSE_REGISTER 0xc70 +#define PCI_0INTERRUPT_CAUSE_MASK_REGISTER_LOW 0xc24 +#define PCI_0INTERRUPT_CAUSE_MASK_REGISTER_HIGH 0xc64 +#define PCI_0SELECT_CAUSE 0xc74 +#define PCI_1INTERRUPT_CAUSE_MASK_REGISTER_LOW 0xca4 +#define PCI_1INTERRUPT_CAUSE_MASK_REGISTER_HIGH 0xce4 +#define PCI_1SELECT_CAUSE 0xcf4 +#define CPU_INT_0_MASK 0xe60 +#define CPU_INT_1_MASK 0xe64 +#define CPU_INT_2_MASK 0xe68 +#define CPU_INT_3_MASK 0xe6c + +/* + * I20 Support registers + */ + +#define INBOUND_MESSAGE_REGISTER0_PCI0_SIDE 0x010 +#define INBOUND_MESSAGE_REGISTER1_PCI0_SIDE 0x014 +#define OUTBOUND_MESSAGE_REGISTER0_PCI0_SIDE 0x018 +#define OUTBOUND_MESSAGE_REGISTER1_PCI0_SIDE 0x01C +#define INBOUND_DOORBELL_REGISTER_PCI0_SIDE 0x020 +#define INBOUND_INTERRUPT_CAUSE_REGISTER_PCI0_SIDE 0x024 +#define INBOUND_INTERRUPT_MASK_REGISTER_PCI0_SIDE 0x028 +#define OUTBOUND_DOORBELL_REGISTER_PCI0_SIDE 0x02C +#define OUTBOUND_INTERRUPT_CAUSE_REGISTER_PCI0_SIDE 0x030 +#define OUTBOUND_INTERRUPT_MASK_REGISTER_PCI0_SIDE 0x034 +#define INBOUND_QUEUE_PORT_VIRTUAL_REGISTER_PCI0_SIDE 0x040 +#define OUTBOUND_QUEUE_PORT_VIRTUAL_REGISTER_PCI0_SIDE 0x044 +#define QUEUE_CONTROL_REGISTER_PCI0_SIDE 0x050 +#define QUEUE_BASE_ADDRESS_REGISTER_PCI0_SIDE 0x054 +#define INBOUND_FREE_HEAD_POINTER_REGISTER_PCI0_SIDE 0x060 +#define INBOUND_FREE_TAIL_POINTER_REGISTER_PCI0_SIDE 0x064 +#define INBOUND_POST_HEAD_POINTER_REGISTER_PCI0_SIDE 0x068 +#define INBOUND_POST_TAIL_POINTER_REGISTER_PCI0_SIDE 0x06C +#define OUTBOUND_FREE_HEAD_POINTER_REGISTER_PCI0_SIDE 0x070 +#define OUTBOUND_FREE_TAIL_POINTER_REGISTER_PCI0_SIDE 0x074 +#define OUTBOUND_POST_HEAD_POINTER_REGISTER_PCI0_SIDE 0x0F8 +#define OUTBOUND_POST_TAIL_POINTER_REGISTER_PCI0_SIDE 0x0FC + +#define INBOUND_MESSAGE_REGISTER0_PCI1_SIDE 0x090 +#define INBOUND_MESSAGE_REGISTER1_PCI1_SIDE 0x094 +#define OUTBOUND_MESSAGE_REGISTER0_PCI1_SIDE 0x098 +#define OUTBOUND_MESSAGE_REGISTER1_PCI1_SIDE 0x09C +#define INBOUND_DOORBELL_REGISTER_PCI1_SIDE 0x0A0 +#define INBOUND_INTERRUPT_CAUSE_REGISTER_PCI1_SIDE 0x0A4 +#define INBOUND_INTERRUPT_MASK_REGISTER_PCI1_SIDE 0x0A8 +#define OUTBOUND_DOORBELL_REGISTER_PCI1_SIDE 0x0AC +#define OUTBOUND_INTERRUPT_CAUSE_REGISTER_PCI1_SIDE 0x0B0 +#define OUTBOUND_INTERRUPT_MASK_REGISTER_PCI1_SIDE 0x0B4 +#define INBOUND_QUEUE_PORT_VIRTUAL_REGISTER_PCI1_SIDE 0x0C0 +#define OUTBOUND_QUEUE_PORT_VIRTUAL_REGISTER_PCI1_SIDE 0x0C4 +#define QUEUE_CONTROL_REGISTER_PCI1_SIDE 0x0D0 +#define QUEUE_BASE_ADDRESS_REGISTER_PCI1_SIDE 0x0D4 +#define INBOUND_FREE_HEAD_POINTER_REGISTER_PCI1_SIDE 0x0E0 +#define INBOUND_FREE_TAIL_POINTER_REGISTER_PCI1_SIDE 0x0E4 +#define INBOUND_POST_HEAD_POINTER_REGISTER_PCI1_SIDE 0x0E8 +#define INBOUND_POST_TAIL_POINTER_REGISTER_PCI1_SIDE 0x0EC +#define OUTBOUND_FREE_HEAD_POINTER_REGISTER_PCI1_SIDE 0x0F0 +#define OUTBOUND_FREE_TAIL_POINTER_REGISTER_PCI1_SIDE 0x0F4 +#define OUTBOUND_POST_HEAD_POINTER_REGISTER_PCI1_SIDE 0x078 +#define OUTBOUND_POST_TAIL_POINTER_REGISTER_PCI1_SIDE 0x07C + +#define INBOUND_MESSAGE_REGISTER0_CPU0_SIDE 0X1C10 +#define INBOUND_MESSAGE_REGISTER1_CPU0_SIDE 0X1C14 +#define OUTBOUND_MESSAGE_REGISTER0_CPU0_SIDE 0X1C18 +#define OUTBOUND_MESSAGE_REGISTER1_CPU0_SIDE 0X1C1C +#define INBOUND_DOORBELL_REGISTER_CPU0_SIDE 0X1C20 +#define INBOUND_INTERRUPT_CAUSE_REGISTER_CPU0_SIDE 0X1C24 +#define INBOUND_INTERRUPT_MASK_REGISTER_CPU0_SIDE 0X1C28 +#define OUTBOUND_DOORBELL_REGISTER_CPU0_SIDE 0X1C2C +#define OUTBOUND_INTERRUPT_CAUSE_REGISTER_CPU0_SIDE 0X1C30 +#define OUTBOUND_INTERRUPT_MASK_REGISTER_CPU0_SIDE 0X1C34 +#define INBOUND_QUEUE_PORT_VIRTUAL_REGISTER_CPU0_SIDE 0X1C40 +#define OUTBOUND_QUEUE_PORT_VIRTUAL_REGISTER_CPU0_SIDE 0X1C44 +#define QUEUE_CONTROL_REGISTER_CPU0_SIDE 0X1C50 +#define QUEUE_BASE_ADDRESS_REGISTER_CPU0_SIDE 0X1C54 +#define INBOUND_FREE_HEAD_POINTER_REGISTER_CPU0_SIDE 0X1C60 +#define INBOUND_FREE_TAIL_POINTER_REGISTER_CPU0_SIDE 0X1C64 +#define INBOUND_POST_HEAD_POINTER_REGISTER_CPU0_SIDE 0X1C68 +#define INBOUND_POST_TAIL_POINTER_REGISTER_CPU0_SIDE 0X1C6C +#define OUTBOUND_FREE_HEAD_POINTER_REGISTER_CPU0_SIDE 0X1C70 +#define OUTBOUND_FREE_TAIL_POINTER_REGISTER_CPU0_SIDE 0X1C74 +#define OUTBOUND_POST_HEAD_POINTER_REGISTER_CPU0_SIDE 0X1CF8 +#define OUTBOUND_POST_TAIL_POINTER_REGISTER_CPU0_SIDE 0X1CFC + +#define INBOUND_MESSAGE_REGISTER0_CPU1_SIDE 0X1C90 +#define INBOUND_MESSAGE_REGISTER1_CPU1_SIDE 0X1C94 +#define OUTBOUND_MESSAGE_REGISTER0_CPU1_SIDE 0X1C98 +#define OUTBOUND_MESSAGE_REGISTER1_CPU1_SIDE 0X1C9C +#define INBOUND_DOORBELL_REGISTER_CPU1_SIDE 0X1CA0 +#define INBOUND_INTERRUPT_CAUSE_REGISTER_CPU1_SIDE 0X1CA4 +#define INBOUND_INTERRUPT_MASK_REGISTER_CPU1_SIDE 0X1CA8 +#define OUTBOUND_DOORBELL_REGISTER_CPU1_SIDE 0X1CAC +#define OUTBOUND_INTERRUPT_CAUSE_REGISTER_CPU1_SIDE 0X1CB0 +#define OUTBOUND_INTERRUPT_MASK_REGISTER_CPU1_SIDE 0X1CB4 +#define INBOUND_QUEUE_PORT_VIRTUAL_REGISTER_CPU1_SIDE 0X1CC0 +#define OUTBOUND_QUEUE_PORT_VIRTUAL_REGISTER_CPU1_SIDE 0X1CC4 +#define QUEUE_CONTROL_REGISTER_CPU1_SIDE 0X1CD0 +#define QUEUE_BASE_ADDRESS_REGISTER_CPU1_SIDE 0X1CD4 +#define INBOUND_FREE_HEAD_POINTER_REGISTER_CPU1_SIDE 0X1CE0 +#define INBOUND_FREE_TAIL_POINTER_REGISTER_CPU1_SIDE 0X1CE4 +#define INBOUND_POST_HEAD_POINTER_REGISTER_CPU1_SIDE 0X1CE8 +#define INBOUND_POST_TAIL_POINTER_REGISTER_CPU1_SIDE 0X1CEC +#define OUTBOUND_FREE_HEAD_POINTER_REGISTER_CPU1_SIDE 0X1CF0 +#define OUTBOUND_FREE_TAIL_POINTER_REGISTER_CPU1_SIDE 0X1CF4 +#define OUTBOUND_POST_HEAD_POINTER_REGISTER_CPU1_SIDE 0X1C78 +#define OUTBOUND_POST_TAIL_POINTER_REGISTER_CPU1_SIDE 0X1C7C + +/* + * Communication Unit Registers + */ + +#define ETHERNET_0_ADDRESS_CONTROL_LOW +#define ETHERNET_0_ADDRESS_CONTROL_HIGH 0xf204 +#define ETHERNET_0_RECEIVE_BUFFER_PCI_HIGH_ADDRESS 0xf208 +#define ETHERNET_0_TRANSMIT_BUFFER_PCI_HIGH_ADDRESS 0xf20c +#define ETHERNET_0_RECEIVE_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf210 +#define ETHERNET_0_TRANSMIT_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf214 +#define ETHERNET_0_HASH_TABLE_PCI_HIGH_ADDRESS 0xf218 +#define ETHERNET_1_ADDRESS_CONTROL_LOW 0xf220 +#define ETHERNET_1_ADDRESS_CONTROL_HIGH 0xf224 +#define ETHERNET_1_RECEIVE_BUFFER_PCI_HIGH_ADDRESS 0xf228 +#define ETHERNET_1_TRANSMIT_BUFFER_PCI_HIGH_ADDRESS 0xf22c +#define ETHERNET_1_RECEIVE_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf230 +#define ETHERNET_1_TRANSMIT_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf234 +#define ETHERNET_1_HASH_TABLE_PCI_HIGH_ADDRESS 0xf238 +#define ETHERNET_2_ADDRESS_CONTROL_LOW 0xf240 +#define ETHERNET_2_ADDRESS_CONTROL_HIGH 0xf244 +#define ETHERNET_2_RECEIVE_BUFFER_PCI_HIGH_ADDRESS 0xf248 +#define ETHERNET_2_TRANSMIT_BUFFER_PCI_HIGH_ADDRESS 0xf24c +#define ETHERNET_2_RECEIVE_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf250 +#define ETHERNET_2_TRANSMIT_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf254 +#define ETHERNET_2_HASH_TABLE_PCI_HIGH_ADDRESS 0xf258 +#define MPSC_0_ADDRESS_CONTROL_LOW 0xf280 +#define MPSC_0_ADDRESS_CONTROL_HIGH 0xf284 +#define MPSC_0_RECEIVE_BUFFER_PCI_HIGH_ADDRESS 0xf288 +#define MPSC_0_TRANSMIT_BUFFER_PCI_HIGH_ADDRESS 0xf28c +#define MPSC_0_RECEIVE_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf290 +#define MPSC_0_TRANSMIT_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf294 +#define MPSC_1_ADDRESS_CONTROL_LOW 0xf2a0 +#define MPSC_1_ADDRESS_CONTROL_HIGH 0xf2a4 +#define MPSC_1_RECEIVE_BUFFER_PCI_HIGH_ADDRESS 0xf2a8 +#define MPSC_1_TRANSMIT_BUFFER_PCI_HIGH_ADDRESS 0xf2ac +#define MPSC_1_RECEIVE_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf2b0 +#define MPSC_1_TRANSMIT_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf2b4 +#define MPSC_2_ADDRESS_CONTROL_LOW 0xf2c0 +#define MPSC_2_ADDRESS_CONTROL_HIGH 0xf2c4 +#define MPSC_2_RECEIVE_BUFFER_PCI_HIGH_ADDRESS 0xf2c8 +#define MPSC_2_TRANSMIT_BUFFER_PCI_HIGH_ADDRESS 0xf2cc +#define MPSC_2_RECEIVE_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf2d0 +#define MPSC_2_TRANSMIT_DESCRIPTOR_PCI_HIGH_ADDRESS 0xf2d4 +#define SERIAL_INIT_PCI_HIGH_ADDRESS 0xf320 +#define SERIAL_INIT_LAST_DATA 0xf324 +#define SERIAL_INIT_STATUS_AND_CONTROL 0xf328 +#define COMM_UNIT_ARBITER_CONTROL 0xf300 +#define COMM_UNIT_CROSS_BAR_TIMEOUT 0xf304 +#define COMM_UNIT_INTERRUPT_CAUSE 0xf310 +#define COMM_UNIT_INTERRUPT_MASK 0xf314 +#define COMM_UNIT_ERROR_ADDRESS 0xf314 + +/* + * Cunit Debug (for internal use) + */ + +#define CUNIT_ADDRESS 0xf340 +#define CUNIT_COMMAND_AND_ID 0xf344 +#define CUNIT_WRITE_DATA_LOW 0xf348 +#define CUNIT_WRITE_DATA_HIGH 0xf34c +#define CUNIT_WRITE_BYTE_ENABLE 0xf358 +#define CUNIT_READ_DATA_LOW 0xf350 +#define CUNIT_READ_DATA_HIGH 0xf354 +#define CUNIT_READ_ID 0xf35c + +/* + * Fast Ethernet Unit Registers + */ + +/* Ethernet */ + +#define ETHERNET_PHY_ADDRESS_REGISTER 0x2000 +#define ETHERNET_SMI_REGISTER 0x2010 + +/* Ethernet 0 */ + +#define ETHERNET0_PORT_CONFIGURATION_REGISTER 0x2400 +#define ETHERNET0_PORT_CONFIGURATION_EXTEND_REGISTER 0x2408 +#define ETHERNET0_PORT_COMMAND_REGISTER 0x2410 +#define ETHERNET0_PORT_STATUS_REGISTER 0x2418 +#define ETHERNET0_SERIAL_PARAMETRS_REGISTER 0x2420 +#define ETHERNET0_HASH_TABLE_POINTER_REGISTER 0x2428 +#define ETHERNET0_FLOW_CONTROL_SOURCE_ADDRESS_LOW 0x2430 +#define ETHERNET0_FLOW_CONTROL_SOURCE_ADDRESS_HIGH 0x2438 +#define ETHERNET0_SDMA_CONFIGURATION_REGISTER 0x2440 +#define ETHERNET0_SDMA_COMMAND_REGISTER 0x2448 +#define ETHERNET0_INTERRUPT_CAUSE_REGISTER 0x2450 +#define ETHERNET0_INTERRUPT_MASK_REGISTER 0x2458 +#define ETHERNET0_FIRST_RX_DESCRIPTOR_POINTER0 0x2480 +#define ETHERNET0_FIRST_RX_DESCRIPTOR_POINTER1 0x2484 +#define ETHERNET0_FIRST_RX_DESCRIPTOR_POINTER2 0x2488 +#define ETHERNET0_FIRST_RX_DESCRIPTOR_POINTER3 0x248c +#define ETHERNET0_CURRENT_RX_DESCRIPTOR_POINTER0 0x24a0 +#define ETHERNET0_CURRENT_RX_DESCRIPTOR_POINTER1 0x24a4 +#define ETHERNET0_CURRENT_RX_DESCRIPTOR_POINTER2 0x24a8 +#define ETHERNET0_CURRENT_RX_DESCRIPTOR_POINTER3 0x24ac +#define ETHERNET0_CURRENT_TX_DESCRIPTOR_POINTER0 0x24e0 +#define ETHERNET0_CURRENT_TX_DESCRIPTOR_POINTER1 0x24e4 +#define ETHERNET0_MIB_COUNTER_BASE 0x2500 + +/* Ethernet 1 */ + +#define ETHERNET1_PORT_CONFIGURATION_REGISTER 0x2800 +#define ETHERNET1_PORT_CONFIGURATION_EXTEND_REGISTER 0x2808 +#define ETHERNET1_PORT_COMMAND_REGISTER 0x2810 +#define ETHERNET1_PORT_STATUS_REGISTER 0x2818 +#define ETHERNET1_SERIAL_PARAMETRS_REGISTER 0x2820 +#define ETHERNET1_HASH_TABLE_POINTER_REGISTER 0x2828 +#define ETHERNET1_FLOW_CONTROL_SOURCE_ADDRESS_LOW 0x2830 +#define ETHERNET1_FLOW_CONTROL_SOURCE_ADDRESS_HIGH 0x2838 +#define ETHERNET1_SDMA_CONFIGURATION_REGISTER 0x2840 +#define ETHERNET1_SDMA_COMMAND_REGISTER 0x2848 +#define ETHERNET1_INTERRUPT_CAUSE_REGISTER 0x2850 +#define ETHERNET1_INTERRUPT_MASK_REGISTER 0x2858 +#define ETHERNET1_FIRST_RX_DESCRIPTOR_POINTER0 0x2880 +#define ETHERNET1_FIRST_RX_DESCRIPTOR_POINTER1 0x2884 +#define ETHERNET1_FIRST_RX_DESCRIPTOR_POINTER2 0x2888 +#define ETHERNET1_FIRST_RX_DESCRIPTOR_POINTER3 0x288c +#define ETHERNET1_CURRENT_RX_DESCRIPTOR_POINTER0 0x28a0 +#define ETHERNET1_CURRENT_RX_DESCRIPTOR_POINTER1 0x28a4 +#define ETHERNET1_CURRENT_RX_DESCRIPTOR_POINTER2 0x28a8 +#define ETHERNET1_CURRENT_RX_DESCRIPTOR_POINTER3 0x28ac +#define ETHERNET1_CURRENT_TX_DESCRIPTOR_POINTER0 0x28e0 +#define ETHERNET1_CURRENT_TX_DESCRIPTOR_POINTER1 0x28e4 +#define ETHERNET1_MIB_COUNTER_BASE 0x2900 + +/* Ethernet 2 */ + +#define ETHERNET2_PORT_CONFIGURATION_REGISTER 0x2c00 +#define ETHERNET2_PORT_CONFIGURATION_EXTEND_REGISTER 0x2c08 +#define ETHERNET2_PORT_COMMAND_REGISTER 0x2c10 +#define ETHERNET2_PORT_STATUS_REGISTER 0x2c18 +#define ETHERNET2_SERIAL_PARAMETRS_REGISTER 0x2c20 +#define ETHERNET2_HASH_TABLE_POINTER_REGISTER 0x2c28 +#define ETHERNET2_FLOW_CONTROL_SOURCE_ADDRESS_LOW 0x2c30 +#define ETHERNET2_FLOW_CONTROL_SOURCE_ADDRESS_HIGH 0x2c38 +#define ETHERNET2_SDMA_CONFIGURATION_REGISTER 0x2c40 +#define ETHERNET2_SDMA_COMMAND_REGISTER 0x2c48 +#define ETHERNET2_INTERRUPT_CAUSE_REGISTER 0x2c50 +#define ETHERNET2_INTERRUPT_MASK_REGISTER 0x2c58 +#define ETHERNET2_FIRST_RX_DESCRIPTOR_POINTER0 0x2c80 +#define ETHERNET2_FIRST_RX_DESCRIPTOR_POINTER1 0x2c84 +#define ETHERNET2_FIRST_RX_DESCRIPTOR_POINTER2 0x2c88 +#define ETHERNET2_FIRST_RX_DESCRIPTOR_POINTER3 0x2c8c +#define ETHERNET2_CURRENT_RX_DESCRIPTOR_POINTER0 0x2ca0 +#define ETHERNET2_CURRENT_RX_DESCRIPTOR_POINTER1 0x2ca4 +#define ETHERNET2_CURRENT_RX_DESCRIPTOR_POINTER2 0x2ca8 +#define ETHERNET2_CURRENT_RX_DESCRIPTOR_POINTER3 0x2cac +#define ETHERNET2_CURRENT_TX_DESCRIPTOR_POINTER0 0x2ce0 +#define ETHERNET2_CURRENT_TX_DESCRIPTOR_POINTER1 0x2ce4 +#define ETHERNET2_MIB_COUNTER_BASE 0x2d00 + +/* + * SDMA Registers + */ + +#define SDMA_GROUP_CONFIGURATION_REGISTER 0xb1f0 +#define CHANNEL0_CONFIGURATION_REGISTER 0x4000 +#define CHANNEL0_COMMAND_REGISTER 0x4008 +#define CHANNEL0_RX_CMD_STATUS 0x4800 +#define CHANNEL0_RX_PACKET_AND_BUFFER_SIZES 0x4804 +#define CHANNEL0_RX_BUFFER_POINTER 0x4808 +#define CHANNEL0_RX_NEXT_POINTER 0x480c +#define CHANNEL0_CURRENT_RX_DESCRIPTOR_POINTER 0x4810 +#define CHANNEL0_TX_CMD_STATUS 0x4C00 +#define CHANNEL0_TX_PACKET_SIZE 0x4C04 +#define CHANNEL0_TX_BUFFER_POINTER 0x4C08 +#define CHANNEL0_TX_NEXT_POINTER 0x4C0c +#define CHANNEL0_CURRENT_TX_DESCRIPTOR_POINTER 0x4c10 +#define CHANNEL0_FIRST_TX_DESCRIPTOR_POINTER 0x4c14 +#define CHANNEL1_CONFIGURATION_REGISTER 0x6000 +#define CHANNEL1_COMMAND_REGISTER 0x6008 +#define CHANNEL1_RX_CMD_STATUS 0x6800 +#define CHANNEL1_RX_PACKET_AND_BUFFER_SIZES 0x6804 +#define CHANNEL1_RX_BUFFER_POINTER 0x6808 +#define CHANNEL1_RX_NEXT_POINTER 0x680c +#define CHANNEL1_CURRENT_RX_DESCRIPTOR_POINTER 0x6810 +#define CHANNEL1_TX_CMD_STATUS 0x6C00 +#define CHANNEL1_TX_PACKET_SIZE 0x6C04 +#define CHANNEL1_TX_BUFFER_POINTER 0x6C08 +#define CHANNEL1_TX_NEXT_POINTER 0x6C0c +#define CHANNEL1_CURRENT_RX_DESCRIPTOR_POINTER 0x6810 +#define CHANNEL1_CURRENT_TX_DESCRIPTOR_POINTER 0x6c10 +#define CHANNEL1_FIRST_TX_DESCRIPTOR_POINTER 0x6c14 + +/* SDMA Interrupt */ + +#define SDMA_CAUSE 0xb820 +#define SDMA_MASK 0xb8a0 + + +/* + * Baude Rate Generators Registers + */ + +/* BRG 0 */ + +#define BRG0_CONFIGURATION_REGISTER 0xb200 +#define BRG0_BAUDE_TUNING_REGISTER 0xb204 + +/* BRG 1 */ + +#define BRG1_CONFIGURATION_REGISTER 0xb208 +#define BRG1_BAUDE_TUNING_REGISTER 0xb20c + +/* BRG 2 */ + +#define BRG2_CONFIGURATION_REGISTER 0xb210 +#define BRG2_BAUDE_TUNING_REGISTER 0xb214 + +/* BRG Interrupts */ + +#define BRG_CAUSE_REGISTER 0xb834 +#define BRG_MASK_REGISTER 0xb8b4 + +/* MISC */ + +#define MAIN_ROUTING_REGISTER 0xb400 +#define RECEIVE_CLOCK_ROUTING_REGISTER 0xb404 +#define TRANSMIT_CLOCK_ROUTING_REGISTER 0xb408 +#define COMM_UNIT_ARBITER_CONFIGURATION_REGISTER 0xb40c +#define WATCHDOG_CONFIGURATION_REGISTER 0xb410 +#define WATCHDOG_VALUE_REGISTER 0xb414 + + +/* + * Flex TDM Registers + */ + +/* FTDM Port */ + +#define FLEXTDM_TRANSMIT_READ_POINTER 0xa800 +#define FLEXTDM_RECEIVE_READ_POINTER 0xa804 +#define FLEXTDM_CONFIGURATION_REGISTER 0xa808 +#define FLEXTDM_AUX_CHANNELA_TX_REGISTER 0xa80c +#define FLEXTDM_AUX_CHANNELA_RX_REGISTER 0xa810 +#define FLEXTDM_AUX_CHANNELB_TX_REGISTER 0xa814 +#define FLEXTDM_AUX_CHANNELB_RX_REGISTER 0xa818 + +/* FTDM Interrupts */ + +#define FTDM_CAUSE_REGISTER 0xb830 +#define FTDM_MASK_REGISTER 0xb8b0 + + +/* + * GPP Interface Registers + */ + +#define GPP_IO_CONTROL 0xf100 +#define GPP_LEVEL_CONTROL 0xf110 +#define GPP_VALUE 0xf104 +#define GPP_INTERRUPT_CAUSE 0xf108 +#define GPP_INTERRUPT_MASK 0xf10c + +#define MPP_CONTROL0 0xf000 +#define MPP_CONTROL1 0xf004 +#define MPP_CONTROL2 0xf008 +#define MPP_CONTROL3 0xf00c +#define DEBUG_PORT_MULTIPLEX 0xf014 +#define SERIAL_PORT_MULTIPLEX 0xf010 + +/* + * I2C Registers + */ + +#define I2C_SLAVE_ADDRESS 0xc000 +#define I2C_EXTENDED_SLAVE_ADDRESS 0xc040 +#define I2C_DATA 0xc004 +#define I2C_CONTROL 0xc008 +#define I2C_STATUS_BAUDE_RATE 0xc00C +#define I2C_SOFT_RESET 0xc01c + +/* + * MPSC Registers + */ + +/* + * MPSC0 + */ + +#define MPSC0_MAIN_CONFIGURATION_LOW 0x8000 +#define MPSC0_MAIN_CONFIGURATION_HIGH 0x8004 +#define MPSC0_PROTOCOL_CONFIGURATION 0x8008 +#define CHANNEL0_REGISTER1 0x800c +#define CHANNEL0_REGISTER2 0x8010 +#define CHANNEL0_REGISTER3 0x8014 +#define CHANNEL0_REGISTER4 0x8018 +#define CHANNEL0_REGISTER5 0x801c +#define CHANNEL0_REGISTER6 0x8020 +#define CHANNEL0_REGISTER7 0x8024 +#define CHANNEL0_REGISTER8 0x8028 +#define CHANNEL0_REGISTER9 0x802c +#define CHANNEL0_REGISTER10 0x8030 +#define CHANNEL0_REGISTER11 0x8034 + +/* + * MPSC1 + */ + +#define MPSC1_MAIN_CONFIGURATION_LOW 0x9000 +#define MPSC1_MAIN_CONFIGURATION_HIGH 0x9004 +#define MPSC1_PROTOCOL_CONFIGURATION 0x9008 +#define CHANNEL1_REGISTER1 0x900c +#define CHANNEL1_REGISTER2 0x9010 +#define CHANNEL1_REGISTER3 0x9014 +#define CHANNEL1_REGISTER4 0x9018 +#define CHANNEL1_REGISTER5 0x901c +#define CHANNEL1_REGISTER6 0x9020 +#define CHANNEL1_REGISTER7 0x9024 +#define CHANNEL1_REGISTER8 0x9028 +#define CHANNEL1_REGISTER9 0x902c +#define CHANNEL1_REGISTER10 0x9030 +#define CHANNEL1_REGISTER11 0x9034 + +/* + * MPSCs Interupts + */ + +#define MPSC0_CAUSE 0xb804 +#define MPSC0_MASK 0xb884 +#define MPSC1_CAUSE 0xb80c +#define MPSC1_MASK 0xb88c + +#endif /* __ASM_MIPS_MV64240_H */ diff --git a/include/asm-ppc/cpm2.h b/include/asm-ppc/cpm2.h new file mode 100644 index 000000000..dc899d3b5 --- /dev/null +++ b/include/asm-ppc/cpm2.h @@ -0,0 +1,1041 @@ +/* + * Communication Processor Module v2. + * + * This file contains structures and information for the communication + * processor channels found in the dual port RAM or parameter RAM. + * All CPM control and status is available through the CPM2 internal + * memory map. See immap_cpm2.h for details. + */ +#ifdef __KERNEL__ +#ifndef __CPM2__ +#define __CPM2__ + +#include + +/* CPM Command register. +*/ +#define CPM_CR_RST ((uint)0x80000000) +#define CPM_CR_PAGE ((uint)0x7c000000) +#define CPM_CR_SBLOCK ((uint)0x03e00000) +#define CPM_CR_FLG ((uint)0x00010000) +#define CPM_CR_MCN ((uint)0x00003fc0) +#define CPM_CR_OPCODE ((uint)0x0000000f) + +/* Device sub-block and page codes. +*/ +#define CPM_CR_SCC1_SBLOCK (0x04) +#define CPM_CR_SCC2_SBLOCK (0x05) +#define CPM_CR_SCC3_SBLOCK (0x06) +#define CPM_CR_SCC4_SBLOCK (0x07) +#define CPM_CR_SMC1_SBLOCK (0x08) +#define CPM_CR_SMC2_SBLOCK (0x09) +#define CPM_CR_SPI_SBLOCK (0x0a) +#define CPM_CR_I2C_SBLOCK (0x0b) +#define CPM_CR_TIMER_SBLOCK (0x0f) +#define CPM_CR_RAND_SBLOCK (0x0e) +#define CPM_CR_FCC1_SBLOCK (0x10) +#define CPM_CR_FCC2_SBLOCK (0x11) +#define CPM_CR_FCC3_SBLOCK (0x12) +#define CPM_CR_IDMA1_SBLOCK (0x14) +#define CPM_CR_IDMA2_SBLOCK (0x15) +#define CPM_CR_IDMA3_SBLOCK (0x16) +#define CPM_CR_IDMA4_SBLOCK (0x17) +#define CPM_CR_MCC1_SBLOCK (0x1c) + +#define CPM_CR_SCC1_PAGE (0x00) +#define CPM_CR_SCC2_PAGE (0x01) +#define CPM_CR_SCC3_PAGE (0x02) +#define CPM_CR_SCC4_PAGE (0x03) +#define CPM_CR_SMC1_PAGE (0x07) +#define CPM_CR_SMC2_PAGE (0x08) +#define CPM_CR_SPI_PAGE (0x09) +#define CPM_CR_I2C_PAGE (0x0a) +#define CPM_CR_TIMER_PAGE (0x0a) +#define CPM_CR_RAND_PAGE (0x0a) +#define CPM_CR_FCC1_PAGE (0x04) +#define CPM_CR_FCC2_PAGE (0x05) +#define CPM_CR_FCC3_PAGE (0x06) +#define CPM_CR_IDMA1_PAGE (0x07) +#define CPM_CR_IDMA2_PAGE (0x08) +#define CPM_CR_IDMA3_PAGE (0x09) +#define CPM_CR_IDMA4_PAGE (0x0a) +#define CPM_CR_MCC1_PAGE (0x07) +#define CPM_CR_MCC2_PAGE (0x08) + +/* Some opcodes (there are more...later) +*/ +#define CPM_CR_INIT_TRX ((ushort)0x0000) +#define CPM_CR_INIT_RX ((ushort)0x0001) +#define CPM_CR_INIT_TX ((ushort)0x0002) +#define CPM_CR_HUNT_MODE ((ushort)0x0003) +#define CPM_CR_STOP_TX ((ushort)0x0004) +#define CPM_CR_RESTART_TX ((ushort)0x0006) +#define CPM_CR_SET_GADDR ((ushort)0x0008) +#define CPM_CR_START_IDMA ((ushort)0x0009) +#define CPM_CR_STOP_IDMA ((ushort)0x000b) + +#define mk_cr_cmd(PG, SBC, MCN, OP) \ + ((PG << 26) | (SBC << 21) | (MCN << 6) | OP) + +/* Dual Port RAM addresses. The first 16K is available for almost + * any CPM use, so we put the BDs there. The first 128 bytes are + * used for SMC1 and SMC2 parameter RAM, so we start allocating + * BDs above that. All of this must change when we start + * downloading RAM microcode. + */ +#define CPM_DATAONLY_BASE ((uint)128) +#define CPM_DP_NOSPACE ((uint)0x7fffffff) +#ifdef CONFIG_8272 +#define CPM_DATAONLY_SIZE ((uint)(8 * 1024) - CPM_DATAONLY_BASE) +#define CPM_FCC_SPECIAL_BASE ((uint)0x00009000) +#else +#define CPM_DATAONLY_SIZE ((uint)(16 * 1024) - CPM_DATAONLY_BASE) +#define CPM_FCC_SPECIAL_BASE ((uint)0x0000b000) +#endif + +/* The number of pages of host memory we allocate for CPM. This is + * done early in kernel initialization to get physically contiguous + * pages. + */ +#define NUM_CPM_HOST_PAGES 2 + + +/* Export the base address of the communication processor registers + * and dual port ram. + */ +extern cpm_cpm2_t *cpmp; /* Pointer to comm processor */ +extern void *cpm2_dpalloc(uint size, uint align); +extern int cpm2_dpfree(void *addr); +extern void *cpm2_dpalloc_fixed(void *addr, uint size, uint allign); +extern void cpm2_dpdump(void); +extern unsigned int cpm2_dpram_offset(void *addr); +extern void *cpm2_dpram_addr(int offset); +extern void cpm2_setbrg(uint brg, uint rate); +extern void cpm2_fastbrg(uint brg, uint rate, int div16); + +/* Buffer descriptors used by many of the CPM protocols. +*/ +typedef struct cpm_buf_desc { + ushort cbd_sc; /* Status and Control */ + ushort cbd_datlen; /* Data length in buffer */ + uint cbd_bufaddr; /* Buffer address in host memory */ +} cbd_t; + +#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */ +#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */ +#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */ +#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */ +#define BD_SC_LAST ((ushort)0x0800) /* Last buffer in frame */ +#define BD_SC_CM ((ushort)0x0200) /* Continous mode */ +#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */ +#define BD_SC_P ((ushort)0x0100) /* xmt preamble */ +#define BD_SC_BR ((ushort)0x0020) /* Break received */ +#define BD_SC_FR ((ushort)0x0010) /* Framing error */ +#define BD_SC_PR ((ushort)0x0008) /* Parity error */ +#define BD_SC_OV ((ushort)0x0002) /* Overrun */ +#define BD_SC_CD ((ushort)0x0001) /* ?? */ + +/* Function code bits, usually generic to devices. +*/ +#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */ +#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */ +#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */ +#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */ +#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */ + +/* Parameter RAM offsets from the base. +*/ +#define PROFF_SCC1 ((uint)0x8000) +#define PROFF_SCC2 ((uint)0x8100) +#define PROFF_SCC3 ((uint)0x8200) +#define PROFF_SCC4 ((uint)0x8300) +#define PROFF_FCC1 ((uint)0x8400) +#define PROFF_FCC2 ((uint)0x8500) +#define PROFF_FCC3 ((uint)0x8600) +#define PROFF_MCC1 ((uint)0x8700) +#define PROFF_SMC1_BASE ((uint)0x87fc) +#define PROFF_IDMA1_BASE ((uint)0x87fe) +#define PROFF_MCC2 ((uint)0x8800) +#define PROFF_SMC2_BASE ((uint)0x88fc) +#define PROFF_IDMA2_BASE ((uint)0x88fe) +#define PROFF_SPI_BASE ((uint)0x89fc) +#define PROFF_IDMA3_BASE ((uint)0x89fe) +#define PROFF_TIMERS ((uint)0x8ae0) +#define PROFF_REVNUM ((uint)0x8af0) +#define PROFF_RAND ((uint)0x8af8) +#define PROFF_I2C_BASE ((uint)0x8afc) +#define PROFF_IDMA4_BASE ((uint)0x8afe) + +/* The SMCs are relocated to any of the first eight DPRAM pages. + * We will fix these at the first locations of DPRAM, until we + * get some microcode patches :-). + * The parameter ram space for the SMCs is fifty-some bytes, and + * they are required to start on a 64 byte boundary. + */ +#define PROFF_SMC1 (0) +#define PROFF_SMC2 (64) + + +/* Define enough so I can at least use the serial port as a UART. + */ +typedef struct smc_uart { + ushort smc_rbase; /* Rx Buffer descriptor base address */ + ushort smc_tbase; /* Tx Buffer descriptor base address */ + u_char smc_rfcr; /* Rx function code */ + u_char smc_tfcr; /* Tx function code */ + ushort smc_mrblr; /* Max receive buffer length */ + uint smc_rstate; /* Internal */ + uint smc_idp; /* Internal */ + ushort smc_rbptr; /* Internal */ + ushort smc_ibc; /* Internal */ + uint smc_rxtmp; /* Internal */ + uint smc_tstate; /* Internal */ + uint smc_tdp; /* Internal */ + ushort smc_tbptr; /* Internal */ + ushort smc_tbc; /* Internal */ + uint smc_txtmp; /* Internal */ + ushort smc_maxidl; /* Maximum idle characters */ + ushort smc_tmpidl; /* Temporary idle counter */ + ushort smc_brklen; /* Last received break length */ + ushort smc_brkec; /* rcv'd break condition counter */ + ushort smc_brkcr; /* xmt break count register */ + ushort smc_rmask; /* Temporary bit mask */ + uint smc_stmp; /* SDMA Temp */ +} smc_uart_t; + +/* SMC uart mode register (Internal memory map). +*/ +#define SMCMR_REN ((ushort)0x0001) +#define SMCMR_TEN ((ushort)0x0002) +#define SMCMR_DM ((ushort)0x000c) +#define SMCMR_SM_GCI ((ushort)0x0000) +#define SMCMR_SM_UART ((ushort)0x0020) +#define SMCMR_SM_TRANS ((ushort)0x0030) +#define SMCMR_SM_MASK ((ushort)0x0030) +#define SMCMR_PM_EVEN ((ushort)0x0100) /* Even parity, else odd */ +#define SMCMR_REVD SMCMR_PM_EVEN +#define SMCMR_PEN ((ushort)0x0200) /* Parity enable */ +#define SMCMR_BS SMCMR_PEN +#define SMCMR_SL ((ushort)0x0400) /* Two stops, else one */ +#define SMCR_CLEN_MASK ((ushort)0x7800) /* Character length */ +#define smcr_mk_clen(C) (((C) << 11) & SMCR_CLEN_MASK) + +/* SMC Event and Mask register. +*/ +#define SMCM_BRKE ((unsigned char)0x40) /* When in UART Mode */ +#define SMCM_BRK ((unsigned char)0x10) /* When in UART Mode */ +#define SMCM_TXE ((unsigned char)0x10) +#define SMCM_BSY ((unsigned char)0x04) +#define SMCM_TX ((unsigned char)0x02) +#define SMCM_RX ((unsigned char)0x01) + +/* Baud rate generators. +*/ +#define CPM_BRG_RST ((uint)0x00020000) +#define CPM_BRG_EN ((uint)0x00010000) +#define CPM_BRG_EXTC_INT ((uint)0x00000000) +#define CPM_BRG_EXTC_CLK3_9 ((uint)0x00004000) +#define CPM_BRG_EXTC_CLK5_15 ((uint)0x00008000) +#define CPM_BRG_ATB ((uint)0x00002000) +#define CPM_BRG_CD_MASK ((uint)0x00001ffe) +#define CPM_BRG_DIV16 ((uint)0x00000001) + +/* SCCs. +*/ +#define SCC_GSMRH_IRP ((uint)0x00040000) +#define SCC_GSMRH_GDE ((uint)0x00010000) +#define SCC_GSMRH_TCRC_CCITT ((uint)0x00008000) +#define SCC_GSMRH_TCRC_BISYNC ((uint)0x00004000) +#define SCC_GSMRH_TCRC_HDLC ((uint)0x00000000) +#define SCC_GSMRH_REVD ((uint)0x00002000) +#define SCC_GSMRH_TRX ((uint)0x00001000) +#define SCC_GSMRH_TTX ((uint)0x00000800) +#define SCC_GSMRH_CDP ((uint)0x00000400) +#define SCC_GSMRH_CTSP ((uint)0x00000200) +#define SCC_GSMRH_CDS ((uint)0x00000100) +#define SCC_GSMRH_CTSS ((uint)0x00000080) +#define SCC_GSMRH_TFL ((uint)0x00000040) +#define SCC_GSMRH_RFW ((uint)0x00000020) +#define SCC_GSMRH_TXSY ((uint)0x00000010) +#define SCC_GSMRH_SYNL16 ((uint)0x0000000c) +#define SCC_GSMRH_SYNL8 ((uint)0x00000008) +#define SCC_GSMRH_SYNL4 ((uint)0x00000004) +#define SCC_GSMRH_RTSM ((uint)0x00000002) +#define SCC_GSMRH_RSYN ((uint)0x00000001) + +#define SCC_GSMRL_SIR ((uint)0x80000000) /* SCC2 only */ +#define SCC_GSMRL_EDGE_NONE ((uint)0x60000000) +#define SCC_GSMRL_EDGE_NEG ((uint)0x40000000) +#define SCC_GSMRL_EDGE_POS ((uint)0x20000000) +#define SCC_GSMRL_EDGE_BOTH ((uint)0x00000000) +#define SCC_GSMRL_TCI ((uint)0x10000000) +#define SCC_GSMRL_TSNC_3 ((uint)0x0c000000) +#define SCC_GSMRL_TSNC_4 ((uint)0x08000000) +#define SCC_GSMRL_TSNC_14 ((uint)0x04000000) +#define SCC_GSMRL_TSNC_INF ((uint)0x00000000) +#define SCC_GSMRL_RINV ((uint)0x02000000) +#define SCC_GSMRL_TINV ((uint)0x01000000) +#define SCC_GSMRL_TPL_128 ((uint)0x00c00000) +#define SCC_GSMRL_TPL_64 ((uint)0x00a00000) +#define SCC_GSMRL_TPL_48 ((uint)0x00800000) +#define SCC_GSMRL_TPL_32 ((uint)0x00600000) +#define SCC_GSMRL_TPL_16 ((uint)0x00400000) +#define SCC_GSMRL_TPL_8 ((uint)0x00200000) +#define SCC_GSMRL_TPL_NONE ((uint)0x00000000) +#define SCC_GSMRL_TPP_ALL1 ((uint)0x00180000) +#define SCC_GSMRL_TPP_01 ((uint)0x00100000) +#define SCC_GSMRL_TPP_10 ((uint)0x00080000) +#define SCC_GSMRL_TPP_ZEROS ((uint)0x00000000) +#define SCC_GSMRL_TEND ((uint)0x00040000) +#define SCC_GSMRL_TDCR_32 ((uint)0x00030000) +#define SCC_GSMRL_TDCR_16 ((uint)0x00020000) +#define SCC_GSMRL_TDCR_8 ((uint)0x00010000) +#define SCC_GSMRL_TDCR_1 ((uint)0x00000000) +#define SCC_GSMRL_RDCR_32 ((uint)0x0000c000) +#define SCC_GSMRL_RDCR_16 ((uint)0x00008000) +#define SCC_GSMRL_RDCR_8 ((uint)0x00004000) +#define SCC_GSMRL_RDCR_1 ((uint)0x00000000) +#define SCC_GSMRL_RENC_DFMAN ((uint)0x00003000) +#define SCC_GSMRL_RENC_MANCH ((uint)0x00002000) +#define SCC_GSMRL_RENC_FM0 ((uint)0x00001000) +#define SCC_GSMRL_RENC_NRZI ((uint)0x00000800) +#define SCC_GSMRL_RENC_NRZ ((uint)0x00000000) +#define SCC_GSMRL_TENC_DFMAN ((uint)0x00000600) +#define SCC_GSMRL_TENC_MANCH ((uint)0x00000400) +#define SCC_GSMRL_TENC_FM0 ((uint)0x00000200) +#define SCC_GSMRL_TENC_NRZI ((uint)0x00000100) +#define SCC_GSMRL_TENC_NRZ ((uint)0x00000000) +#define SCC_GSMRL_DIAG_LE ((uint)0x000000c0) /* Loop and echo */ +#define SCC_GSMRL_DIAG_ECHO ((uint)0x00000080) +#define SCC_GSMRL_DIAG_LOOP ((uint)0x00000040) +#define SCC_GSMRL_DIAG_NORM ((uint)0x00000000) +#define SCC_GSMRL_ENR ((uint)0x00000020) +#define SCC_GSMRL_ENT ((uint)0x00000010) +#define SCC_GSMRL_MODE_ENET ((uint)0x0000000c) +#define SCC_GSMRL_MODE_DDCMP ((uint)0x00000009) +#define SCC_GSMRL_MODE_BISYNC ((uint)0x00000008) +#define SCC_GSMRL_MODE_V14 ((uint)0x00000007) +#define SCC_GSMRL_MODE_AHDLC ((uint)0x00000006) +#define SCC_GSMRL_MODE_PROFIBUS ((uint)0x00000005) +#define SCC_GSMRL_MODE_UART ((uint)0x00000004) +#define SCC_GSMRL_MODE_SS7 ((uint)0x00000003) +#define SCC_GSMRL_MODE_ATALK ((uint)0x00000002) +#define SCC_GSMRL_MODE_HDLC ((uint)0x00000000) + +#define SCC_TODR_TOD ((ushort)0x8000) + +/* SCC Event and Mask register. +*/ +#define SCCM_TXE ((unsigned char)0x10) +#define SCCM_BSY ((unsigned char)0x04) +#define SCCM_TX ((unsigned char)0x02) +#define SCCM_RX ((unsigned char)0x01) + +typedef struct scc_param { + ushort scc_rbase; /* Rx Buffer descriptor base address */ + ushort scc_tbase; /* Tx Buffer descriptor base address */ + u_char scc_rfcr; /* Rx function code */ + u_char scc_tfcr; /* Tx function code */ + ushort scc_mrblr; /* Max receive buffer length */ + uint scc_rstate; /* Internal */ + uint scc_idp; /* Internal */ + ushort scc_rbptr; /* Internal */ + ushort scc_ibc; /* Internal */ + uint scc_rxtmp; /* Internal */ + uint scc_tstate; /* Internal */ + uint scc_tdp; /* Internal */ + ushort scc_tbptr; /* Internal */ + ushort scc_tbc; /* Internal */ + uint scc_txtmp; /* Internal */ + uint scc_rcrc; /* Internal */ + uint scc_tcrc; /* Internal */ +} sccp_t; + +/* CPM Ethernet through SCC1. + */ +typedef struct scc_enet { + sccp_t sen_genscc; + uint sen_cpres; /* Preset CRC */ + uint sen_cmask; /* Constant mask for CRC */ + uint sen_crcec; /* CRC Error counter */ + uint sen_alec; /* alignment error counter */ + uint sen_disfc; /* discard frame counter */ + ushort sen_pads; /* Tx short frame pad character */ + ushort sen_retlim; /* Retry limit threshold */ + ushort sen_retcnt; /* Retry limit counter */ + ushort sen_maxflr; /* maximum frame length register */ + ushort sen_minflr; /* minimum frame length register */ + ushort sen_maxd1; /* maximum DMA1 length */ + ushort sen_maxd2; /* maximum DMA2 length */ + ushort sen_maxd; /* Rx max DMA */ + ushort sen_dmacnt; /* Rx DMA counter */ + ushort sen_maxb; /* Max BD byte count */ + ushort sen_gaddr1; /* Group address filter */ + ushort sen_gaddr2; + ushort sen_gaddr3; + ushort sen_gaddr4; + uint sen_tbuf0data0; /* Save area 0 - current frame */ + uint sen_tbuf0data1; /* Save area 1 - current frame */ + uint sen_tbuf0rba; /* Internal */ + uint sen_tbuf0crc; /* Internal */ + ushort sen_tbuf0bcnt; /* Internal */ + ushort sen_paddrh; /* physical address (MSB) */ + ushort sen_paddrm; + ushort sen_paddrl; /* physical address (LSB) */ + ushort sen_pper; /* persistence */ + ushort sen_rfbdptr; /* Rx first BD pointer */ + ushort sen_tfbdptr; /* Tx first BD pointer */ + ushort sen_tlbdptr; /* Tx last BD pointer */ + uint sen_tbuf1data0; /* Save area 0 - current frame */ + uint sen_tbuf1data1; /* Save area 1 - current frame */ + uint sen_tbuf1rba; /* Internal */ + uint sen_tbuf1crc; /* Internal */ + ushort sen_tbuf1bcnt; /* Internal */ + ushort sen_txlen; /* Tx Frame length counter */ + ushort sen_iaddr1; /* Individual address filter */ + ushort sen_iaddr2; + ushort sen_iaddr3; + ushort sen_iaddr4; + ushort sen_boffcnt; /* Backoff counter */ + + /* NOTE: Some versions of the manual have the following items + * incorrectly documented. Below is the proper order. + */ + ushort sen_taddrh; /* temp address (MSB) */ + ushort sen_taddrm; + ushort sen_taddrl; /* temp address (LSB) */ +} scc_enet_t; + + +/* SCC Event register as used by Ethernet. +*/ +#define SCCE_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */ +#define SCCE_ENET_TXE ((ushort)0x0010) /* Transmit Error */ +#define SCCE_ENET_RXF ((ushort)0x0008) /* Full frame received */ +#define SCCE_ENET_BSY ((ushort)0x0004) /* All incoming buffers full */ +#define SCCE_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */ +#define SCCE_ENET_RXB ((ushort)0x0001) /* A buffer was received */ + +/* SCC Mode Register (PSMR) as used by Ethernet. +*/ +#define SCC_PSMR_HBC ((ushort)0x8000) /* Enable heartbeat */ +#define SCC_PSMR_FC ((ushort)0x4000) /* Force collision */ +#define SCC_PSMR_RSH ((ushort)0x2000) /* Receive short frames */ +#define SCC_PSMR_IAM ((ushort)0x1000) /* Check individual hash */ +#define SCC_PSMR_ENCRC ((ushort)0x0800) /* Ethernet CRC mode */ +#define SCC_PSMR_PRO ((ushort)0x0200) /* Promiscuous mode */ +#define SCC_PSMR_BRO ((ushort)0x0100) /* Catch broadcast pkts */ +#define SCC_PSMR_SBT ((ushort)0x0080) /* Special backoff timer */ +#define SCC_PSMR_LPB ((ushort)0x0040) /* Set Loopback mode */ +#define SCC_PSMR_SIP ((ushort)0x0020) /* Sample Input Pins */ +#define SCC_PSMR_LCW ((ushort)0x0010) /* Late collision window */ +#define SCC_PSMR_NIB22 ((ushort)0x000a) /* Start frame search */ +#define SCC_PSMR_FDE ((ushort)0x0001) /* Full duplex enable */ + +/* Buffer descriptor control/status used by Ethernet receive. + * Common to SCC and FCC. + */ +#define BD_ENET_RX_EMPTY ((ushort)0x8000) +#define BD_ENET_RX_WRAP ((ushort)0x2000) +#define BD_ENET_RX_INTR ((ushort)0x1000) +#define BD_ENET_RX_LAST ((ushort)0x0800) +#define BD_ENET_RX_FIRST ((ushort)0x0400) +#define BD_ENET_RX_MISS ((ushort)0x0100) +#define BD_ENET_RX_BC ((ushort)0x0080) /* FCC Only */ +#define BD_ENET_RX_MC ((ushort)0x0040) /* FCC Only */ +#define BD_ENET_RX_LG ((ushort)0x0020) +#define BD_ENET_RX_NO ((ushort)0x0010) +#define BD_ENET_RX_SH ((ushort)0x0008) +#define BD_ENET_RX_CR ((ushort)0x0004) +#define BD_ENET_RX_OV ((ushort)0x0002) +#define BD_ENET_RX_CL ((ushort)0x0001) +#define BD_ENET_RX_STATS ((ushort)0x01ff) /* All status bits */ + +/* Buffer descriptor control/status used by Ethernet transmit. + * Common to SCC and FCC. + */ +#define BD_ENET_TX_READY ((ushort)0x8000) +#define BD_ENET_TX_PAD ((ushort)0x4000) +#define BD_ENET_TX_WRAP ((ushort)0x2000) +#define BD_ENET_TX_INTR ((ushort)0x1000) +#define BD_ENET_TX_LAST ((ushort)0x0800) +#define BD_ENET_TX_TC ((ushort)0x0400) +#define BD_ENET_TX_DEF ((ushort)0x0200) +#define BD_ENET_TX_HB ((ushort)0x0100) +#define BD_ENET_TX_LC ((ushort)0x0080) +#define BD_ENET_TX_RL ((ushort)0x0040) +#define BD_ENET_TX_RCMASK ((ushort)0x003c) +#define BD_ENET_TX_UN ((ushort)0x0002) +#define BD_ENET_TX_CSL ((ushort)0x0001) +#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ + +/* SCC as UART +*/ +typedef struct scc_uart { + sccp_t scc_genscc; + uint scc_res1; /* Reserved */ + uint scc_res2; /* Reserved */ + ushort scc_maxidl; /* Maximum idle chars */ + ushort scc_idlc; /* temp idle counter */ + ushort scc_brkcr; /* Break count register */ + ushort scc_parec; /* receive parity error counter */ + ushort scc_frmec; /* receive framing error counter */ + ushort scc_nosec; /* receive noise counter */ + ushort scc_brkec; /* receive break condition counter */ + ushort scc_brkln; /* last received break length */ + ushort scc_uaddr1; /* UART address character 1 */ + ushort scc_uaddr2; /* UART address character 2 */ + ushort scc_rtemp; /* Temp storage */ + ushort scc_toseq; /* Transmit out of sequence char */ + ushort scc_char1; /* control character 1 */ + ushort scc_char2; /* control character 2 */ + ushort scc_char3; /* control character 3 */ + ushort scc_char4; /* control character 4 */ + ushort scc_char5; /* control character 5 */ + ushort scc_char6; /* control character 6 */ + ushort scc_char7; /* control character 7 */ + ushort scc_char8; /* control character 8 */ + ushort scc_rccm; /* receive control character mask */ + ushort scc_rccr; /* receive control character register */ + ushort scc_rlbc; /* receive last break character */ +} scc_uart_t; + +/* SCC Event and Mask registers when it is used as a UART. +*/ +#define UART_SCCM_GLR ((ushort)0x1000) +#define UART_SCCM_GLT ((ushort)0x0800) +#define UART_SCCM_AB ((ushort)0x0200) +#define UART_SCCM_IDL ((ushort)0x0100) +#define UART_SCCM_GRA ((ushort)0x0080) +#define UART_SCCM_BRKE ((ushort)0x0040) +#define UART_SCCM_BRKS ((ushort)0x0020) +#define UART_SCCM_CCR ((ushort)0x0008) +#define UART_SCCM_BSY ((ushort)0x0004) +#define UART_SCCM_TX ((ushort)0x0002) +#define UART_SCCM_RX ((ushort)0x0001) + +/* The SCC PSMR when used as a UART. +*/ +#define SCU_PSMR_FLC ((ushort)0x8000) +#define SCU_PSMR_SL ((ushort)0x4000) +#define SCU_PSMR_CL ((ushort)0x3000) +#define SCU_PSMR_UM ((ushort)0x0c00) +#define SCU_PSMR_FRZ ((ushort)0x0200) +#define SCU_PSMR_RZS ((ushort)0x0100) +#define SCU_PSMR_SYN ((ushort)0x0080) +#define SCU_PSMR_DRT ((ushort)0x0040) +#define SCU_PSMR_PEN ((ushort)0x0010) +#define SCU_PSMR_RPM ((ushort)0x000c) +#define SCU_PSMR_REVP ((ushort)0x0008) +#define SCU_PSMR_TPM ((ushort)0x0003) +#define SCU_PSMR_TEVP ((ushort)0x0003) + +/* CPM Transparent mode SCC. + */ +typedef struct scc_trans { + sccp_t st_genscc; + uint st_cpres; /* Preset CRC */ + uint st_cmask; /* Constant mask for CRC */ +} scc_trans_t; + +#define BD_SCC_TX_LAST ((ushort)0x0800) + +/* How about some FCCs..... +*/ +#define FCC_GFMR_DIAG_NORM ((uint)0x00000000) +#define FCC_GFMR_DIAG_LE ((uint)0x40000000) +#define FCC_GFMR_DIAG_AE ((uint)0x80000000) +#define FCC_GFMR_DIAG_ALE ((uint)0xc0000000) +#define FCC_GFMR_TCI ((uint)0x20000000) +#define FCC_GFMR_TRX ((uint)0x10000000) +#define FCC_GFMR_TTX ((uint)0x08000000) +#define FCC_GFMR_TTX ((uint)0x08000000) +#define FCC_GFMR_CDP ((uint)0x04000000) +#define FCC_GFMR_CTSP ((uint)0x02000000) +#define FCC_GFMR_CDS ((uint)0x01000000) +#define FCC_GFMR_CTSS ((uint)0x00800000) +#define FCC_GFMR_SYNL_NONE ((uint)0x00000000) +#define FCC_GFMR_SYNL_AUTO ((uint)0x00004000) +#define FCC_GFMR_SYNL_8 ((uint)0x00008000) +#define FCC_GFMR_SYNL_16 ((uint)0x0000c000) +#define FCC_GFMR_RTSM ((uint)0x00002000) +#define FCC_GFMR_RENC_NRZ ((uint)0x00000000) +#define FCC_GFMR_RENC_NRZI ((uint)0x00000800) +#define FCC_GFMR_REVD ((uint)0x00000400) +#define FCC_GFMR_TENC_NRZ ((uint)0x00000000) +#define FCC_GFMR_TENC_NRZI ((uint)0x00000100) +#define FCC_GFMR_TCRC_16 ((uint)0x00000000) +#define FCC_GFMR_TCRC_32 ((uint)0x00000080) +#define FCC_GFMR_ENR ((uint)0x00000020) +#define FCC_GFMR_ENT ((uint)0x00000010) +#define FCC_GFMR_MODE_ENET ((uint)0x0000000c) +#define FCC_GFMR_MODE_ATM ((uint)0x0000000a) +#define FCC_GFMR_MODE_HDLC ((uint)0x00000000) + +/* Generic FCC parameter ram. +*/ +typedef struct fcc_param { + ushort fcc_riptr; /* Rx Internal temp pointer */ + ushort fcc_tiptr; /* Tx Internal temp pointer */ + ushort fcc_res1; + ushort fcc_mrblr; /* Max receive buffer length, mod 32 bytes */ + uint fcc_rstate; /* Upper byte is Func code, must be set */ + uint fcc_rbase; /* Receive BD base */ + ushort fcc_rbdstat; /* RxBD status */ + ushort fcc_rbdlen; /* RxBD down counter */ + uint fcc_rdptr; /* RxBD internal data pointer */ + uint fcc_tstate; /* Upper byte is Func code, must be set */ + uint fcc_tbase; /* Transmit BD base */ + ushort fcc_tbdstat; /* TxBD status */ + ushort fcc_tbdlen; /* TxBD down counter */ + uint fcc_tdptr; /* TxBD internal data pointer */ + uint fcc_rbptr; /* Rx BD Internal buf pointer */ + uint fcc_tbptr; /* Tx BD Internal buf pointer */ + uint fcc_rcrc; /* Rx temp CRC */ + uint fcc_res2; + uint fcc_tcrc; /* Tx temp CRC */ +} fccp_t; + + +/* Ethernet controller through FCC. +*/ +typedef struct fcc_enet { + fccp_t fen_genfcc; + uint fen_statbuf; /* Internal status buffer */ + uint fen_camptr; /* CAM address */ + uint fen_cmask; /* Constant mask for CRC */ + uint fen_cpres; /* Preset CRC */ + uint fen_crcec; /* CRC Error counter */ + uint fen_alec; /* alignment error counter */ + uint fen_disfc; /* discard frame counter */ + ushort fen_retlim; /* Retry limit */ + ushort fen_retcnt; /* Retry counter */ + ushort fen_pper; /* Persistence */ + ushort fen_boffcnt; /* backoff counter */ + uint fen_gaddrh; /* Group address filter, high 32-bits */ + uint fen_gaddrl; /* Group address filter, low 32-bits */ + ushort fen_tfcstat; /* out of sequence TxBD */ + ushort fen_tfclen; + uint fen_tfcptr; + ushort fen_mflr; /* Maximum frame length (1518) */ + ushort fen_paddrh; /* MAC address */ + ushort fen_paddrm; + ushort fen_paddrl; + ushort fen_ibdcount; /* Internal BD counter */ + ushort fen_ibdstart; /* Internal BD start pointer */ + ushort fen_ibdend; /* Internal BD end pointer */ + ushort fen_txlen; /* Internal Tx frame length counter */ + uint fen_ibdbase[8]; /* Internal use */ + uint fen_iaddrh; /* Individual address filter */ + uint fen_iaddrl; + ushort fen_minflr; /* Minimum frame length (64) */ + ushort fen_taddrh; /* Filter transfer MAC address */ + ushort fen_taddrm; + ushort fen_taddrl; + ushort fen_padptr; /* Pointer to pad byte buffer */ + ushort fen_cftype; /* control frame type */ + ushort fen_cfrange; /* control frame range */ + ushort fen_maxb; /* maximum BD count */ + ushort fen_maxd1; /* Max DMA1 length (1520) */ + ushort fen_maxd2; /* Max DMA2 length (1520) */ + ushort fen_maxd; /* internal max DMA count */ + ushort fen_dmacnt; /* internal DMA counter */ + uint fen_octc; /* Total octect counter */ + uint fen_colc; /* Total collision counter */ + uint fen_broc; /* Total broadcast packet counter */ + uint fen_mulc; /* Total multicast packet count */ + uint fen_uspc; /* Total packets < 64 bytes */ + uint fen_frgc; /* Total packets < 64 bytes with errors */ + uint fen_ospc; /* Total packets > 1518 */ + uint fen_jbrc; /* Total packets > 1518 with errors */ + uint fen_p64c; /* Total packets == 64 bytes */ + uint fen_p65c; /* Total packets 64 < bytes <= 127 */ + uint fen_p128c; /* Total packets 127 < bytes <= 255 */ + uint fen_p256c; /* Total packets 256 < bytes <= 511 */ + uint fen_p512c; /* Total packets 512 < bytes <= 1023 */ + uint fen_p1024c; /* Total packets 1024 < bytes <= 1518 */ + uint fen_cambuf; /* Internal CAM buffer poiner */ + ushort fen_rfthr; /* Received frames threshold */ + ushort fen_rfcnt; /* Received frames count */ +} fcc_enet_t; + +/* FCC Event/Mask register as used by Ethernet. +*/ +#define FCC_ENET_GRA ((ushort)0x0080) /* Graceful stop complete */ +#define FCC_ENET_RXC ((ushort)0x0040) /* Control Frame Received */ +#define FCC_ENET_TXC ((ushort)0x0020) /* Out of seq. Tx sent */ +#define FCC_ENET_TXE ((ushort)0x0010) /* Transmit Error */ +#define FCC_ENET_RXF ((ushort)0x0008) /* Full frame received */ +#define FCC_ENET_BSY ((ushort)0x0004) /* Busy. Rx Frame dropped */ +#define FCC_ENET_TXB ((ushort)0x0002) /* A buffer was transmitted */ +#define FCC_ENET_RXB ((ushort)0x0001) /* A buffer was received */ + +/* FCC Mode Register (FPSMR) as used by Ethernet. +*/ +#define FCC_PSMR_HBC ((uint)0x80000000) /* Enable heartbeat */ +#define FCC_PSMR_FC ((uint)0x40000000) /* Force Collision */ +#define FCC_PSMR_SBT ((uint)0x20000000) /* Stop backoff timer */ +#define FCC_PSMR_LPB ((uint)0x10000000) /* Local protect. 1 = FDX */ +#define FCC_PSMR_LCW ((uint)0x08000000) /* Late collision select */ +#define FCC_PSMR_FDE ((uint)0x04000000) /* Full Duplex Enable */ +#define FCC_PSMR_MON ((uint)0x02000000) /* RMON Enable */ +#define FCC_PSMR_PRO ((uint)0x00400000) /* Promiscuous Enable */ +#define FCC_PSMR_FCE ((uint)0x00200000) /* Flow Control Enable */ +#define FCC_PSMR_RSH ((uint)0x00100000) /* Receive Short Frames */ +#define FCC_PSMR_CAM ((uint)0x00000400) /* CAM enable */ +#define FCC_PSMR_BRO ((uint)0x00000200) /* Broadcast pkt discard */ +#define FCC_PSMR_ENCRC ((uint)0x00000080) /* Use 32-bit CRC */ + +/* IIC parameter RAM. +*/ +typedef struct iic { + ushort iic_rbase; /* Rx Buffer descriptor base address */ + ushort iic_tbase; /* Tx Buffer descriptor base address */ + u_char iic_rfcr; /* Rx function code */ + u_char iic_tfcr; /* Tx function code */ + ushort iic_mrblr; /* Max receive buffer length */ + uint iic_rstate; /* Internal */ + uint iic_rdp; /* Internal */ + ushort iic_rbptr; /* Internal */ + ushort iic_rbc; /* Internal */ + uint iic_rxtmp; /* Internal */ + uint iic_tstate; /* Internal */ + uint iic_tdp; /* Internal */ + ushort iic_tbptr; /* Internal */ + ushort iic_tbc; /* Internal */ + uint iic_txtmp; /* Internal */ +} iic_t; + +/* SPI parameter RAM. +*/ +typedef struct spi { + ushort spi_rbase; /* Rx Buffer descriptor base address */ + ushort spi_tbase; /* Tx Buffer descriptor base address */ + u_char spi_rfcr; /* Rx function code */ + u_char spi_tfcr; /* Tx function code */ + ushort spi_mrblr; /* Max receive buffer length */ + uint spi_rstate; /* Internal */ + uint spi_rdp; /* Internal */ + ushort spi_rbptr; /* Internal */ + ushort spi_rbc; /* Internal */ + uint spi_rxtmp; /* Internal */ + uint spi_tstate; /* Internal */ + uint spi_tdp; /* Internal */ + ushort spi_tbptr; /* Internal */ + ushort spi_tbc; /* Internal */ + uint spi_txtmp; /* Internal */ + uint spi_res; /* Tx temp. */ + uint spi_res1[4]; /* SDMA temp. */ +} spi_t; + +/* SPI Mode register. +*/ +#define SPMODE_LOOP ((ushort)0x4000) /* Loopback */ +#define SPMODE_CI ((ushort)0x2000) /* Clock Invert */ +#define SPMODE_CP ((ushort)0x1000) /* Clock Phase */ +#define SPMODE_DIV16 ((ushort)0x0800) /* BRG/16 mode */ +#define SPMODE_REV ((ushort)0x0400) /* Reversed Data */ +#define SPMODE_MSTR ((ushort)0x0200) /* SPI Master */ +#define SPMODE_EN ((ushort)0x0100) /* Enable */ +#define SPMODE_LENMSK ((ushort)0x00f0) /* character length */ +#define SPMODE_PMMSK ((ushort)0x000f) /* prescale modulus */ + +#define SPMODE_LEN(x) ((((x)-1)&0xF)<<4) +#define SPMODE_PM(x) ((x) &0xF) + +#define SPI_EB ((u_char)0x10) /* big endian byte order */ + +#define BD_IIC_START ((ushort)0x0400) + +/* IDMA parameter RAM +*/ +typedef struct idma { + ushort ibase; /* IDMA buffer descriptor table base address */ + ushort dcm; /* DMA channel mode */ + ushort ibdptr; /* IDMA current buffer descriptor pointer */ + ushort dpr_buf; /* IDMA transfer buffer base address */ + ushort buf_inv; /* internal buffer inventory */ + ushort ss_max; /* steady-state maximum transfer size */ + ushort dpr_in_ptr; /* write pointer inside the internal buffer */ + ushort sts; /* source transfer size */ + ushort dpr_out_ptr; /* read pointer inside the internal buffer */ + ushort seob; /* source end of burst */ + ushort deob; /* destination end of burst */ + ushort dts; /* destination transfer size */ + ushort ret_add; /* return address when working in ERM=1 mode */ + ushort res0; /* reserved */ + uint bd_cnt; /* internal byte count */ + uint s_ptr; /* source internal data pointer */ + uint d_ptr; /* destination internal data pointer */ + uint istate; /* internal state */ + u_char res1[20]; /* pad to 64-byte length */ +} idma_t; + +/* DMA channel mode bit fields +*/ +#define IDMA_DCM_FB ((ushort)0x8000) /* fly-by mode */ +#define IDMA_DCM_LP ((ushort)0x4000) /* low priority */ +#define IDMA_DCM_TC2 ((ushort)0x0400) /* value driven on TC[2] */ +#define IDMA_DCM_DMA_WRAP_MASK ((ushort)0x01c0) /* mask for DMA wrap */ +#define IDMA_DCM_DMA_WRAP_64 ((ushort)0x0000) /* 64-byte DMA xfer buffer */ +#define IDMA_DCM_DMA_WRAP_128 ((ushort)0x0040) /* 128-byte DMA xfer buffer */ +#define IDMA_DCM_DMA_WRAP_256 ((ushort)0x0080) /* 256-byte DMA xfer buffer */ +#define IDMA_DCM_DMA_WRAP_512 ((ushort)0x00c0) /* 512-byte DMA xfer buffer */ +#define IDMA_DCM_DMA_WRAP_1024 ((ushort)0x0100) /* 1024-byte DMA xfer buffer */ +#define IDMA_DCM_DMA_WRAP_2048 ((ushort)0x0140) /* 2048-byte DMA xfer buffer */ +#define IDMA_DCM_SINC ((ushort)0x0020) /* source inc addr */ +#define IDMA_DCM_DINC ((ushort)0x0010) /* destination inc addr */ +#define IDMA_DCM_ERM ((ushort)0x0008) /* external request mode */ +#define IDMA_DCM_DT ((ushort)0x0004) /* DONE treatment */ +#define IDMA_DCM_SD_MASK ((ushort)0x0003) /* mask for SD bit field */ +#define IDMA_DCM_SD_MEM2MEM ((ushort)0x0000) /* memory-to-memory xfer */ +#define IDMA_DCM_SD_PER2MEM ((ushort)0x0002) /* peripheral-to-memory xfer */ +#define IDMA_DCM_SD_MEM2PER ((ushort)0x0001) /* memory-to-peripheral xfer */ + +/* IDMA Buffer Descriptors +*/ +typedef struct idma_bd { + uint flags; + uint len; /* data length */ + uint src; /* source data buffer pointer */ + uint dst; /* destination data buffer pointer */ +} idma_bd_t; + +/* IDMA buffer descriptor flag bit fields +*/ +#define IDMA_BD_V ((uint)0x80000000) /* valid */ +#define IDMA_BD_W ((uint)0x20000000) /* wrap */ +#define IDMA_BD_I ((uint)0x10000000) /* interrupt */ +#define IDMA_BD_L ((uint)0x08000000) /* last */ +#define IDMA_BD_CM ((uint)0x02000000) /* continuous mode */ +#define IDMA_BD_SDN ((uint)0x00400000) /* source done */ +#define IDMA_BD_DDN ((uint)0x00200000) /* destination done */ +#define IDMA_BD_DGBL ((uint)0x00100000) /* destination global */ +#define IDMA_BD_DBO_LE ((uint)0x00040000) /* little-end dest byte order */ +#define IDMA_BD_DBO_BE ((uint)0x00080000) /* big-end dest byte order */ +#define IDMA_BD_DDTB ((uint)0x00010000) /* destination data bus */ +#define IDMA_BD_SGBL ((uint)0x00002000) /* source global */ +#define IDMA_BD_SBO_LE ((uint)0x00000800) /* little-end src byte order */ +#define IDMA_BD_SBO_BE ((uint)0x00001000) /* big-end src byte order */ +#define IDMA_BD_SDTB ((uint)0x00000200) /* source data bus */ + +/* per-channel IDMA registers +*/ +typedef struct im_idma { + u_char idsr; /* IDMAn event status register */ + u_char res0[3]; + u_char idmr; /* IDMAn event mask register */ + u_char res1[3]; +} im_idma_t; + +/* IDMA event register bit fields +*/ +#define IDMA_EVENT_SC ((unsigned char)0x08) /* stop completed */ +#define IDMA_EVENT_OB ((unsigned char)0x04) /* out of buffers */ +#define IDMA_EVENT_EDN ((unsigned char)0x02) /* external DONE asserted */ +#define IDMA_EVENT_BC ((unsigned char)0x01) /* buffer descriptor complete */ + +/* RISC Controller Configuration Register (RCCR) bit fields +*/ +#define RCCR_TIME ((uint)0x80000000) /* timer enable */ +#define RCCR_TIMEP_MASK ((uint)0x3f000000) /* mask for timer period bit field */ +#define RCCR_DR0M ((uint)0x00800000) /* IDMA0 request mode */ +#define RCCR_DR1M ((uint)0x00400000) /* IDMA1 request mode */ +#define RCCR_DR2M ((uint)0x00000080) /* IDMA2 request mode */ +#define RCCR_DR3M ((uint)0x00000040) /* IDMA3 request mode */ +#define RCCR_DR0QP_MASK ((uint)0x00300000) /* mask for IDMA0 req priority */ +#define RCCR_DR0QP_HIGH ((uint)0x00000000) /* IDMA0 has high req priority */ +#define RCCR_DR0QP_MED ((uint)0x00100000) /* IDMA0 has medium req priority */ +#define RCCR_DR0QP_LOW ((uint)0x00200000) /* IDMA0 has low req priority */ +#define RCCR_DR1QP_MASK ((uint)0x00030000) /* mask for IDMA1 req priority */ +#define RCCR_DR1QP_HIGH ((uint)0x00000000) /* IDMA1 has high req priority */ +#define RCCR_DR1QP_MED ((uint)0x00010000) /* IDMA1 has medium req priority */ +#define RCCR_DR1QP_LOW ((uint)0x00020000) /* IDMA1 has low req priority */ +#define RCCR_DR2QP_MASK ((uint)0x00000030) /* mask for IDMA2 req priority */ +#define RCCR_DR2QP_HIGH ((uint)0x00000000) /* IDMA2 has high req priority */ +#define RCCR_DR2QP_MED ((uint)0x00000010) /* IDMA2 has medium req priority */ +#define RCCR_DR2QP_LOW ((uint)0x00000020) /* IDMA2 has low req priority */ +#define RCCR_DR3QP_MASK ((uint)0x00000003) /* mask for IDMA3 req priority */ +#define RCCR_DR3QP_HIGH ((uint)0x00000000) /* IDMA3 has high req priority */ +#define RCCR_DR3QP_MED ((uint)0x00000001) /* IDMA3 has medium req priority */ +#define RCCR_DR3QP_LOW ((uint)0x00000002) /* IDMA3 has low req priority */ +#define RCCR_EIE ((uint)0x00080000) /* external interrupt enable */ +#define RCCR_SCD ((uint)0x00040000) /* scheduler configuration */ +#define RCCR_ERAM_MASK ((uint)0x0000e000) /* mask for enable RAM microcode */ +#define RCCR_ERAM_0KB ((uint)0x00000000) /* use 0KB of dpram for microcode */ +#define RCCR_ERAM_2KB ((uint)0x00002000) /* use 2KB of dpram for microcode */ +#define RCCR_ERAM_4KB ((uint)0x00004000) /* use 4KB of dpram for microcode */ +#define RCCR_ERAM_6KB ((uint)0x00006000) /* use 6KB of dpram for microcode */ +#define RCCR_ERAM_8KB ((uint)0x00008000) /* use 8KB of dpram for microcode */ +#define RCCR_ERAM_10KB ((uint)0x0000a000) /* use 10KB of dpram for microcode */ +#define RCCR_ERAM_12KB ((uint)0x0000c000) /* use 12KB of dpram for microcode */ +#define RCCR_EDM0 ((uint)0x00000800) /* DREQ0 edge detect mode */ +#define RCCR_EDM1 ((uint)0x00000400) /* DREQ1 edge detect mode */ +#define RCCR_EDM2 ((uint)0x00000200) /* DREQ2 edge detect mode */ +#define RCCR_EDM3 ((uint)0x00000100) /* DREQ3 edge detect mode */ +#define RCCR_DEM01 ((uint)0x00000008) /* DONE0/DONE1 edge detect mode */ +#define RCCR_DEM23 ((uint)0x00000004) /* DONE2/DONE3 edge detect mode */ + +/*----------------------------------------------------------------------- + * CMXFCR - CMX FCC Clock Route Register + */ +#define CMXFCR_FC1 0x40000000 /* FCC1 connection */ +#define CMXFCR_RF1CS_MSK 0x38000000 /* Receive FCC1 Clock Source Mask */ +#define CMXFCR_TF1CS_MSK 0x07000000 /* Transmit FCC1 Clock Source Mask */ +#define CMXFCR_FC2 0x00400000 /* FCC2 connection */ +#define CMXFCR_RF2CS_MSK 0x00380000 /* Receive FCC2 Clock Source Mask */ +#define CMXFCR_TF2CS_MSK 0x00070000 /* Transmit FCC2 Clock Source Mask */ +#define CMXFCR_FC3 0x00004000 /* FCC3 connection */ +#define CMXFCR_RF3CS_MSK 0x00003800 /* Receive FCC3 Clock Source Mask */ +#define CMXFCR_TF3CS_MSK 0x00000700 /* Transmit FCC3 Clock Source Mask */ + +#define CMXFCR_RF1CS_BRG5 0x00000000 /* Receive FCC1 Clock Source is BRG5 */ +#define CMXFCR_RF1CS_BRG6 0x08000000 /* Receive FCC1 Clock Source is BRG6 */ +#define CMXFCR_RF1CS_BRG7 0x10000000 /* Receive FCC1 Clock Source is BRG7 */ +#define CMXFCR_RF1CS_BRG8 0x18000000 /* Receive FCC1 Clock Source is BRG8 */ +#define CMXFCR_RF1CS_CLK9 0x20000000 /* Receive FCC1 Clock Source is CLK9 */ +#define CMXFCR_RF1CS_CLK10 0x28000000 /* Receive FCC1 Clock Source is CLK10 */ +#define CMXFCR_RF1CS_CLK11 0x30000000 /* Receive FCC1 Clock Source is CLK11 */ +#define CMXFCR_RF1CS_CLK12 0x38000000 /* Receive FCC1 Clock Source is CLK12 */ + +#define CMXFCR_TF1CS_BRG5 0x00000000 /* Transmit FCC1 Clock Source is BRG5 */ +#define CMXFCR_TF1CS_BRG6 0x01000000 /* Transmit FCC1 Clock Source is BRG6 */ +#define CMXFCR_TF1CS_BRG7 0x02000000 /* Transmit FCC1 Clock Source is BRG7 */ +#define CMXFCR_TF1CS_BRG8 0x03000000 /* Transmit FCC1 Clock Source is BRG8 */ +#define CMXFCR_TF1CS_CLK9 0x04000000 /* Transmit FCC1 Clock Source is CLK9 */ +#define CMXFCR_TF1CS_CLK10 0x05000000 /* Transmit FCC1 Clock Source is CLK10 */ +#define CMXFCR_TF1CS_CLK11 0x06000000 /* Transmit FCC1 Clock Source is CLK11 */ +#define CMXFCR_TF1CS_CLK12 0x07000000 /* Transmit FCC1 Clock Source is CLK12 */ + +#define CMXFCR_RF2CS_BRG5 0x00000000 /* Receive FCC2 Clock Source is BRG5 */ +#define CMXFCR_RF2CS_BRG6 0x00080000 /* Receive FCC2 Clock Source is BRG6 */ +#define CMXFCR_RF2CS_BRG7 0x00100000 /* Receive FCC2 Clock Source is BRG7 */ +#define CMXFCR_RF2CS_BRG8 0x00180000 /* Receive FCC2 Clock Source is BRG8 */ +#define CMXFCR_RF2CS_CLK13 0x00200000 /* Receive FCC2 Clock Source is CLK13 */ +#define CMXFCR_RF2CS_CLK14 0x00280000 /* Receive FCC2 Clock Source is CLK14 */ +#define CMXFCR_RF2CS_CLK15 0x00300000 /* Receive FCC2 Clock Source is CLK15 */ +#define CMXFCR_RF2CS_CLK16 0x00380000 /* Receive FCC2 Clock Source is CLK16 */ + +#define CMXFCR_TF2CS_BRG5 0x00000000 /* Transmit FCC2 Clock Source is BRG5 */ +#define CMXFCR_TF2CS_BRG6 0x00010000 /* Transmit FCC2 Clock Source is BRG6 */ +#define CMXFCR_TF2CS_BRG7 0x00020000 /* Transmit FCC2 Clock Source is BRG7 */ +#define CMXFCR_TF2CS_BRG8 0x00030000 /* Transmit FCC2 Clock Source is BRG8 */ +#define CMXFCR_TF2CS_CLK13 0x00040000 /* Transmit FCC2 Clock Source is CLK13 */ +#define CMXFCR_TF2CS_CLK14 0x00050000 /* Transmit FCC2 Clock Source is CLK14 */ +#define CMXFCR_TF2CS_CLK15 0x00060000 /* Transmit FCC2 Clock Source is CLK15 */ +#define CMXFCR_TF2CS_CLK16 0x00070000 /* Transmit FCC2 Clock Source is CLK16 */ + +#define CMXFCR_RF3CS_BRG5 0x00000000 /* Receive FCC3 Clock Source is BRG5 */ +#define CMXFCR_RF3CS_BRG6 0x00000800 /* Receive FCC3 Clock Source is BRG6 */ +#define CMXFCR_RF3CS_BRG7 0x00001000 /* Receive FCC3 Clock Source is BRG7 */ +#define CMXFCR_RF3CS_BRG8 0x00001800 /* Receive FCC3 Clock Source is BRG8 */ +#define CMXFCR_RF3CS_CLK13 0x00002000 /* Receive FCC3 Clock Source is CLK13 */ +#define CMXFCR_RF3CS_CLK14 0x00002800 /* Receive FCC3 Clock Source is CLK14 */ +#define CMXFCR_RF3CS_CLK15 0x00003000 /* Receive FCC3 Clock Source is CLK15 */ +#define CMXFCR_RF3CS_CLK16 0x00003800 /* Receive FCC3 Clock Source is CLK16 */ + +#define CMXFCR_TF3CS_BRG5 0x00000000 /* Transmit FCC3 Clock Source is BRG5 */ +#define CMXFCR_TF3CS_BRG6 0x00000100 /* Transmit FCC3 Clock Source is BRG6 */ +#define CMXFCR_TF3CS_BRG7 0x00000200 /* Transmit FCC3 Clock Source is BRG7 */ +#define CMXFCR_TF3CS_BRG8 0x00000300 /* Transmit FCC3 Clock Source is BRG8 */ +#define CMXFCR_TF3CS_CLK13 0x00000400 /* Transmit FCC3 Clock Source is CLK13 */ +#define CMXFCR_TF3CS_CLK14 0x00000500 /* Transmit FCC3 Clock Source is CLK14 */ +#define CMXFCR_TF3CS_CLK15 0x00000600 /* Transmit FCC3 Clock Source is CLK15 */ +#define CMXFCR_TF3CS_CLK16 0x00000700 /* Transmit FCC3 Clock Source is CLK16 */ + +/*----------------------------------------------------------------------- + * CMXSCR - CMX SCC Clock Route Register + */ +#define CMXSCR_GR1 0x80000000 /* Grant Support of SCC1 */ +#define CMXSCR_SC1 0x40000000 /* SCC1 connection */ +#define CMXSCR_RS1CS_MSK 0x38000000 /* Receive SCC1 Clock Source Mask */ +#define CMXSCR_TS1CS_MSK 0x07000000 /* Transmit SCC1 Clock Source Mask */ +#define CMXSCR_GR2 0x00800000 /* Grant Support of SCC2 */ +#define CMXSCR_SC2 0x00400000 /* SCC2 connection */ +#define CMXSCR_RS2CS_MSK 0x00380000 /* Receive SCC2 Clock Source Mask */ +#define CMXSCR_TS2CS_MSK 0x00070000 /* Transmit SCC2 Clock Source Mask */ +#define CMXSCR_GR3 0x00008000 /* Grant Support of SCC3 */ +#define CMXSCR_SC3 0x00004000 /* SCC3 connection */ +#define CMXSCR_RS3CS_MSK 0x00003800 /* Receive SCC3 Clock Source Mask */ +#define CMXSCR_TS3CS_MSK 0x00000700 /* Transmit SCC3 Clock Source Mask */ +#define CMXSCR_GR4 0x00000080 /* Grant Support of SCC4 */ +#define CMXSCR_SC4 0x00000040 /* SCC4 connection */ +#define CMXSCR_RS4CS_MSK 0x00000038 /* Receive SCC4 Clock Source Mask */ +#define CMXSCR_TS4CS_MSK 0x00000007 /* Transmit SCC4 Clock Source Mask */ + +#define CMXSCR_RS1CS_BRG1 0x00000000 /* SCC1 Rx Clock Source is BRG1 */ +#define CMXSCR_RS1CS_BRG2 0x08000000 /* SCC1 Rx Clock Source is BRG2 */ +#define CMXSCR_RS1CS_BRG3 0x10000000 /* SCC1 Rx Clock Source is BRG3 */ +#define CMXSCR_RS1CS_BRG4 0x18000000 /* SCC1 Rx Clock Source is BRG4 */ +#define CMXSCR_RS1CS_CLK11 0x20000000 /* SCC1 Rx Clock Source is CLK11 */ +#define CMXSCR_RS1CS_CLK12 0x28000000 /* SCC1 Rx Clock Source is CLK12 */ +#define CMXSCR_RS1CS_CLK3 0x30000000 /* SCC1 Rx Clock Source is CLK3 */ +#define CMXSCR_RS1CS_CLK4 0x38000000 /* SCC1 Rx Clock Source is CLK4 */ + +#define CMXSCR_TS1CS_BRG1 0x00000000 /* SCC1 Tx Clock Source is BRG1 */ +#define CMXSCR_TS1CS_BRG2 0x01000000 /* SCC1 Tx Clock Source is BRG2 */ +#define CMXSCR_TS1CS_BRG3 0x02000000 /* SCC1 Tx Clock Source is BRG3 */ +#define CMXSCR_TS1CS_BRG4 0x03000000 /* SCC1 Tx Clock Source is BRG4 */ +#define CMXSCR_TS1CS_CLK11 0x04000000 /* SCC1 Tx Clock Source is CLK11 */ +#define CMXSCR_TS1CS_CLK12 0x05000000 /* SCC1 Tx Clock Source is CLK12 */ +#define CMXSCR_TS1CS_CLK3 0x06000000 /* SCC1 Tx Clock Source is CLK3 */ +#define CMXSCR_TS1CS_CLK4 0x07000000 /* SCC1 Tx Clock Source is CLK4 */ + +#define CMXSCR_RS2CS_BRG1 0x00000000 /* SCC2 Rx Clock Source is BRG1 */ +#define CMXSCR_RS2CS_BRG2 0x00080000 /* SCC2 Rx Clock Source is BRG2 */ +#define CMXSCR_RS2CS_BRG3 0x00100000 /* SCC2 Rx Clock Source is BRG3 */ +#define CMXSCR_RS2CS_BRG4 0x00180000 /* SCC2 Rx Clock Source is BRG4 */ +#define CMXSCR_RS2CS_CLK11 0x00200000 /* SCC2 Rx Clock Source is CLK11 */ +#define CMXSCR_RS2CS_CLK12 0x00280000 /* SCC2 Rx Clock Source is CLK12 */ +#define CMXSCR_RS2CS_CLK3 0x00300000 /* SCC2 Rx Clock Source is CLK3 */ +#define CMXSCR_RS2CS_CLK4 0x00380000 /* SCC2 Rx Clock Source is CLK4 */ + +#define CMXSCR_TS2CS_BRG1 0x00000000 /* SCC2 Tx Clock Source is BRG1 */ +#define CMXSCR_TS2CS_BRG2 0x00010000 /* SCC2 Tx Clock Source is BRG2 */ +#define CMXSCR_TS2CS_BRG3 0x00020000 /* SCC2 Tx Clock Source is BRG3 */ +#define CMXSCR_TS2CS_BRG4 0x00030000 /* SCC2 Tx Clock Source is BRG4 */ +#define CMXSCR_TS2CS_CLK11 0x00040000 /* SCC2 Tx Clock Source is CLK11 */ +#define CMXSCR_TS2CS_CLK12 0x00050000 /* SCC2 Tx Clock Source is CLK12 */ +#define CMXSCR_TS2CS_CLK3 0x00060000 /* SCC2 Tx Clock Source is CLK3 */ +#define CMXSCR_TS2CS_CLK4 0x00070000 /* SCC2 Tx Clock Source is CLK4 */ + +#define CMXSCR_RS3CS_BRG1 0x00000000 /* SCC3 Rx Clock Source is BRG1 */ +#define CMXSCR_RS3CS_BRG2 0x00000800 /* SCC3 Rx Clock Source is BRG2 */ +#define CMXSCR_RS3CS_BRG3 0x00001000 /* SCC3 Rx Clock Source is BRG3 */ +#define CMXSCR_RS3CS_BRG4 0x00001800 /* SCC3 Rx Clock Source is BRG4 */ +#define CMXSCR_RS3CS_CLK5 0x00002000 /* SCC3 Rx Clock Source is CLK5 */ +#define CMXSCR_RS3CS_CLK6 0x00002800 /* SCC3 Rx Clock Source is CLK6 */ +#define CMXSCR_RS3CS_CLK7 0x00003000 /* SCC3 Rx Clock Source is CLK7 */ +#define CMXSCR_RS3CS_CLK8 0x00003800 /* SCC3 Rx Clock Source is CLK8 */ + +#define CMXSCR_TS3CS_BRG1 0x00000000 /* SCC3 Tx Clock Source is BRG1 */ +#define CMXSCR_TS3CS_BRG2 0x00000100 /* SCC3 Tx Clock Source is BRG2 */ +#define CMXSCR_TS3CS_BRG3 0x00000200 /* SCC3 Tx Clock Source is BRG3 */ +#define CMXSCR_TS3CS_BRG4 0x00000300 /* SCC3 Tx Clock Source is BRG4 */ +#define CMXSCR_TS3CS_CLK5 0x00000400 /* SCC3 Tx Clock Source is CLK5 */ +#define CMXSCR_TS3CS_CLK6 0x00000500 /* SCC3 Tx Clock Source is CLK6 */ +#define CMXSCR_TS3CS_CLK7 0x00000600 /* SCC3 Tx Clock Source is CLK7 */ +#define CMXSCR_TS3CS_CLK8 0x00000700 /* SCC3 Tx Clock Source is CLK8 */ + +#define CMXSCR_RS4CS_BRG1 0x00000000 /* SCC4 Rx Clock Source is BRG1 */ +#define CMXSCR_RS4CS_BRG2 0x00000008 /* SCC4 Rx Clock Source is BRG2 */ +#define CMXSCR_RS4CS_BRG3 0x00000010 /* SCC4 Rx Clock Source is BRG3 */ +#define CMXSCR_RS4CS_BRG4 0x00000018 /* SCC4 Rx Clock Source is BRG4 */ +#define CMXSCR_RS4CS_CLK5 0x00000020 /* SCC4 Rx Clock Source is CLK5 */ +#define CMXSCR_RS4CS_CLK6 0x00000028 /* SCC4 Rx Clock Source is CLK6 */ +#define CMXSCR_RS4CS_CLK7 0x00000030 /* SCC4 Rx Clock Source is CLK7 */ +#define CMXSCR_RS4CS_CLK8 0x00000038 /* SCC4 Rx Clock Source is CLK8 */ + +#define CMXSCR_TS4CS_BRG1 0x00000000 /* SCC4 Tx Clock Source is BRG1 */ +#define CMXSCR_TS4CS_BRG2 0x00000001 /* SCC4 Tx Clock Source is BRG2 */ +#define CMXSCR_TS4CS_BRG3 0x00000002 /* SCC4 Tx Clock Source is BRG3 */ +#define CMXSCR_TS4CS_BRG4 0x00000003 /* SCC4 Tx Clock Source is BRG4 */ +#define CMXSCR_TS4CS_CLK5 0x00000004 /* SCC4 Tx Clock Source is CLK5 */ +#define CMXSCR_TS4CS_CLK6 0x00000005 /* SCC4 Tx Clock Source is CLK6 */ +#define CMXSCR_TS4CS_CLK7 0x00000006 /* SCC4 Tx Clock Source is CLK7 */ +#define CMXSCR_TS4CS_CLK8 0x00000007 /* SCC4 Tx Clock Source is CLK8 */ + +#endif /* __CPM2__ */ +#endif /* __KERNEL__ */ + + diff --git a/include/asm-ppc/immap_85xx.h b/include/asm-ppc/immap_85xx.h new file mode 100644 index 000000000..50fb5e470 --- /dev/null +++ b/include/asm-ppc/immap_85xx.h @@ -0,0 +1,126 @@ +/* + * include/asm-ppc/immap_85xx.h + * + * MPC85xx Internal Memory Map + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifdef __KERNEL__ +#ifndef __ASM_IMMAP_85XX_H__ +#define __ASM_IMMAP_85XX_H__ + +/* Eventually this should define all the IO block registers in 85xx */ + +/* PCI Registers */ +typedef struct ccsr_pci { + uint cfg_addr; /* 0x.000 - PCI Configuration Address Register */ + uint cfg_data; /* 0x.004 - PCI Configuration Data Register */ + uint int_ack; /* 0x.008 - PCI Interrupt Acknowledge Register */ + char res1[3060]; + uint potar0; /* 0x.c00 - PCI Outbound Transaction Address Register 0 */ + uint potear0; /* 0x.c04 - PCI Outbound Translation Extended Address Register 0 */ + uint powbar0; /* 0x.c08 - PCI Outbound Window Base Address Register 0 */ + char res2[4]; + uint powar0; /* 0x.c10 - PCI Outbound Window Attributes Register 0 */ + char res3[12]; + uint potar1; /* 0x.c20 - PCI Outbound Transaction Address Register 1 */ + uint potear1; /* 0x.c24 - PCI Outbound Translation Extended Address Register 1 */ + uint powbar1; /* 0x.c28 - PCI Outbound Window Base Address Register 1 */ + char res4[4]; + uint powar1; /* 0x.c30 - PCI Outbound Window Attributes Register 1 */ + char res5[12]; + uint potar2; /* 0x.c40 - PCI Outbound Transaction Address Register 2 */ + uint potear2; /* 0x.c44 - PCI Outbound Translation Extended Address Register 2 */ + uint powbar2; /* 0x.c48 - PCI Outbound Window Base Address Register 2 */ + char res6[4]; + uint powar2; /* 0x.c50 - PCI Outbound Window Attributes Register 2 */ + char res7[12]; + uint potar3; /* 0x.c60 - PCI Outbound Transaction Address Register 3 */ + uint potear3; /* 0x.c64 - PCI Outbound Translation Extended Address Register 3 */ + uint powbar3; /* 0x.c68 - PCI Outbound Window Base Address Register 3 */ + char res8[4]; + uint powar3; /* 0x.c70 - PCI Outbound Window Attributes Register 3 */ + char res9[12]; + uint potar4; /* 0x.c80 - PCI Outbound Transaction Address Register 4 */ + uint potear4; /* 0x.c84 - PCI Outbound Translation Extended Address Register 4 */ + uint powbar4; /* 0x.c88 - PCI Outbound Window Base Address Register 4 */ + char res10[4]; + uint powar4; /* 0x.c90 - PCI Outbound Window Attributes Register 4 */ + char res11[268]; + uint pitar3; /* 0x.da0 - PCI Inbound Translation Address Register 3 */ + char res12[4]; + uint piwbar3; /* 0x.da8 - PCI Inbound Window Base Address Register 3 */ + uint piwbear3; /* 0x.dac - PCI Inbound Window Base Extended Address Register 3 */ + uint piwar3; /* 0x.db0 - PCI Inbound Window Attributes Register 3 */ + char res13[12]; + uint pitar2; /* 0x.dc0 - PCI Inbound Translation Address Register 2 */ + char res14[4]; + uint piwbar2; /* 0x.dc8 - PCI Inbound Window Base Address Register 2 */ + uint piwbear2; /* 0x.dcc - PCI Inbound Window Base Extended Address Register 2 */ + uint piwar2; /* 0x.dd0 - PCI Inbound Window Attributes Register 2 */ + char res15[12]; + uint pitar1; /* 0x.de0 - PCI Inbound Translation Address Register 1 */ + char res16[4]; + uint piwbar1; /* 0x.de8 - PCI Inbound Window Base Address Register 1 */ + char res17[4]; + uint piwar1; /* 0x.df0 - PCI Inbound Window Attributes Register 1 */ + char res18[12]; + uint err_dr; /* 0x.e00 - PCI Error Detect Register */ + uint err_cap_dr; /* 0x.e04 - PCI Error Capture Disable Register */ + uint err_en; /* 0x.e08 - PCI Error Enable Register */ + uint err_attrib; /* 0x.e0c - PCI Error Attributes Capture Register */ + uint err_addr; /* 0x.e10 - PCI Error Address Capture Register */ + uint err_ext_addr; /* 0x.e14 - PCI Error Extended Address Capture Register */ + uint err_dl; /* 0x.e18 - PCI Error Data Low Capture Register */ + uint err_dh; /* 0x.e1c - PCI Error Data High Capture Register */ + uint gas_timr; /* 0x.e20 - PCI Gasket Timer Register */ + uint pci_timr; /* 0x.e24 - PCI Timer Register */ + char res19[472]; +} ccsr_pci_t; + +/* Global Utility Registers */ +typedef struct ccsr_guts { + uint porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ + uint porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ + uint porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ + uint pordevsr; /* 0x.000c - POR I/O Device Status Register */ + uint pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ + char res1[12]; + uint gpporcr; /* 0x.0020 - General-Purpose POR Configuration Register */ + char res2[12]; + uint gpiocr; /* 0x.0030 - GPIO Control Register */ + char res3[12]; + uint gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ + char res4[12]; + uint gpindr; /* 0x.0050 - General-Purpose Input Data Register */ + char res5[12]; + uint pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ + char res6[12]; + uint devdisr; /* 0x.0070 - Device Disable Control */ + char res7[12]; + uint powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ + char res8[12]; + uint mcpsumr; /* 0x.0090 - Machine Check Summary Register */ + char res9[12]; + uint pvr; /* 0x.00a0 - Processor Version Register */ + uint svr; /* 0x.00a4 - System Version Register */ + char res10[3416]; + uint clkocr; /* 0x.0e00 - Clock Out Select Register */ + char res11[12]; + uint ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ + char res12[12]; + uint lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ + char res13[61916]; +} ccsr_guts_t; + +#endif /* __ASM_IMMAP_85XX_H__ */ +#endif /* __KERNEL__ */ diff --git a/include/asm-ppc/immap_cpm2.h b/include/asm-ppc/immap_cpm2.h new file mode 100644 index 000000000..4d5651534 --- /dev/null +++ b/include/asm-ppc/immap_cpm2.h @@ -0,0 +1,648 @@ +/* + * CPM2 Internal Memory Map + * Copyright (c) 1999 Dan Malek (dmalek@jlc.net) + * + * The Internal Memory Map for devices with CPM2 on them. This + * is the superset of all CPM2 devices (8260, 8266, 8280, 8272, + * 8560). + */ +#ifdef __KERNEL__ +#ifndef __IMMAP_CPM2__ +#define __IMMAP_CPM2__ + +/* System configuration registers. +*/ +typedef struct sys_82xx_conf { + u32 sc_siumcr; + u32 sc_sypcr; + u8 res1[6]; + u16 sc_swsr; + u8 res2[20]; + u32 sc_bcr; + u8 sc_ppc_acr; + u8 res3[3]; + u32 sc_ppc_alrh; + u32 sc_ppc_alrl; + u8 sc_lcl_acr; + u8 res4[3]; + u32 sc_lcl_alrh; + u32 sc_lcl_alrl; + u32 sc_tescr1; + u32 sc_tescr2; + u32 sc_ltescr1; + u32 sc_ltescr2; + u32 sc_pdtea; + u8 sc_pdtem; + u8 res5[3]; + u32 sc_ldtea; + u8 sc_ldtem; + u8 res6[163]; +} sysconf_82xx_cpm2_t; + +typedef struct sys_85xx_conf { + u32 sc_cear; + u16 sc_ceer; + u16 sc_cemr; + u8 res1[70]; + u32 sc_smaer; + u8 res2[4]; + u32 sc_smevr; + u32 sc_smctr; + u32 sc_lmaer; + u8 res3[4]; + u32 sc_lmevr; + u32 sc_lmctr; + u8 res4[144]; +} sysconf_85xx_cpm2_t; + +typedef union sys_conf { + sysconf_82xx_cpm2_t siu_82xx; + sysconf_85xx_cpm2_t siu_85xx; +} sysconf_cpm2_t; + + + +/* Memory controller registers. +*/ +typedef struct mem_ctlr { + u32 memc_br0; + u32 memc_or0; + u32 memc_br1; + u32 memc_or1; + u32 memc_br2; + u32 memc_or2; + u32 memc_br3; + u32 memc_or3; + u32 memc_br4; + u32 memc_or4; + u32 memc_br5; + u32 memc_or5; + u32 memc_br6; + u32 memc_or6; + u32 memc_br7; + u32 memc_or7; + u32 memc_br8; + u32 memc_or8; + u32 memc_br9; + u32 memc_or9; + u32 memc_br10; + u32 memc_or10; + u32 memc_br11; + u32 memc_or11; + u8 res1[8]; + u32 memc_mar; + u8 res2[4]; + u32 memc_mamr; + u32 memc_mbmr; + u32 memc_mcmr; + u8 res3[8]; + u16 memc_mptpr; + u8 res4[2]; + u32 memc_mdr; + u8 res5[4]; + u32 memc_psdmr; + u32 memc_lsdmr; + u8 memc_purt; + u8 res6[3]; + u8 memc_psrt; + u8 res7[3]; + u8 memc_lurt; + u8 res8[3]; + u8 memc_lsrt; + u8 res9[3]; + u32 memc_immr; + u32 memc_pcibr0; + u32 memc_pcibr1; + u8 res10[16]; + u32 memc_pcimsk0; + u32 memc_pcimsk1; + u8 res11[52]; +} memctl_cpm2_t; + +/* System Integration Timers. +*/ +typedef struct sys_int_timers { + u8 res1[32]; + u16 sit_tmcntsc; + u8 res2[2]; + u32 sit_tmcnt; + u8 res3[4]; + u32 sit_tmcntal; + u8 res4[16]; + u16 sit_piscr; + u8 res5[2]; + u32 sit_pitc; + u32 sit_pitr; + u8 res6[92]; + u8 res7[390]; +} sit_cpm2_t; + +#define PISCR_PIRQ_MASK ((u16)0xff00) +#define PISCR_PS ((u16)0x0080) +#define PISCR_PIE ((u16)0x0004) +#define PISCR_PTF ((u16)0x0002) +#define PISCR_PTE ((u16)0x0001) + +/* PCI Controller. +*/ +typedef struct pci_ctlr { + u32 pci_omisr; + u32 pci_omimr; + u8 res1[8]; + u32 pci_ifqpr; + u32 pci_ofqpr; + u8 res2[8]; + u32 pci_imr0; + u32 pci_imr1; + u32 pci_omr0; + u32 pci_omr1; + u32 pci_odr; + u8 res3[4]; + u32 pci_idr; + u8 res4[20]; + u32 pci_imisr; + u32 pci_imimr; + u8 res5[24]; + u32 pci_ifhpr; + u8 res6[4]; + u32 pci_iftpr; + u8 res7[4]; + u32 pci_iphpr; + u8 res8[4]; + u32 pci_iptpr; + u8 res9[4]; + u32 pci_ofhpr; + u8 res10[4]; + u32 pci_oftpr; + u8 res11[4]; + u32 pci_ophpr; + u8 res12[4]; + u32 pci_optpr; + u8 res13[8]; + u32 pci_mucr; + u8 res14[8]; + u32 pci_qbar; + u8 res15[12]; + u32 pci_dmamr0; + u32 pci_dmasr0; + u32 pci_dmacdar0; + u8 res16[4]; + u32 pci_dmasar0; + u8 res17[4]; + u32 pci_dmadar0; + u8 res18[4]; + u32 pci_dmabcr0; + u32 pci_dmandar0; + u8 res19[86]; + u32 pci_dmamr1; + u32 pci_dmasr1; + u32 pci_dmacdar1; + u8 res20[4]; + u32 pci_dmasar1; + u8 res21[4]; + u32 pci_dmadar1; + u8 res22[4]; + u32 pci_dmabcr1; + u32 pci_dmandar1; + u8 res23[88]; + u32 pci_dmamr2; + u32 pci_dmasr2; + u32 pci_dmacdar2; + u8 res24[4]; + u32 pci_dmasar2; + u8 res25[4]; + u32 pci_dmadar2; + u8 res26[4]; + u32 pci_dmabcr2; + u32 pci_dmandar2; + u8 res27[88]; + u32 pci_dmamr3; + u32 pci_dmasr3; + u32 pci_dmacdar3; + u8 res28[4]; + u32 pci_dmasar3; + u8 res29[4]; + u32 pci_dmadar3; + u8 res30[4]; + u32 pci_dmabcr3; + u32 pci_dmandar3; + u8 res31[344]; + u32 pci_potar0; + u8 res32[4]; + u32 pci_pobar0; + u8 res33[4]; + u32 pci_pocmr0; + u8 res34[4]; + u32 pci_potar1; + u8 res35[4]; + u32 pci_pobar1; + u8 res36[4]; + u32 pci_pocmr1; + u8 res37[4]; + u32 pci_potar2; + u8 res38[4]; + u32 pci_pobar2; + u8 res39[4]; + u32 pci_pocmr2; + u8 res40[50]; + u32 pci_ptcr; + u32 pci_gpcr; + u32 pci_gcr; + u32 pci_esr; + u32 pci_emr; + u32 pci_ecr; + u32 pci_eacr; + u8 res41[4]; + u32 pci_edcr; + u8 res42[4]; + u32 pci_eccr; + u8 res43[44]; + u32 pci_pitar1; + u8 res44[4]; + u32 pci_pibar1; + u8 res45[4]; + u32 pci_picmr1; + u8 res46[4]; + u32 pci_pitar0; + u8 res47[4]; + u32 pci_pibar0; + u8 res48[4]; + u32 pci_picmr0; + u8 res49[4]; + u32 pci_cfg_addr; + u32 pci_cfg_data; + u32 pci_int_ack; + u8 res50[756]; +} pci_cpm2_t; + +/* Interrupt Controller. +*/ +typedef struct interrupt_controller { + u16 ic_sicr; + u8 res1[2]; + u32 ic_sivec; + u32 ic_sipnrh; + u32 ic_sipnrl; + u32 ic_siprr; + u32 ic_scprrh; + u32 ic_scprrl; + u32 ic_simrh; + u32 ic_simrl; + u32 ic_siexr; + u8 res2[88]; +} intctl_cpm2_t; + +/* Clocks and Reset. +*/ +typedef struct clk_and_reset { + u32 car_sccr; + u8 res1[4]; + u32 car_scmr; + u8 res2[4]; + u32 car_rsr; + u32 car_rmr; + u8 res[104]; +} car_cpm2_t; + +/* Input/Output Port control/status registers. + * Names consistent with processor manual, although they are different + * from the original 8xx names....... + */ +typedef struct io_port { + u32 iop_pdira; + u32 iop_ppara; + u32 iop_psora; + u32 iop_podra; + u32 iop_pdata; + u8 res1[12]; + u32 iop_pdirb; + u32 iop_pparb; + u32 iop_psorb; + u32 iop_podrb; + u32 iop_pdatb; + u8 res2[12]; + u32 iop_pdirc; + u32 iop_pparc; + u32 iop_psorc; + u32 iop_podrc; + u32 iop_pdatc; + u8 res3[12]; + u32 iop_pdird; + u32 iop_ppard; + u32 iop_psord; + u32 iop_podrd; + u32 iop_pdatd; + u8 res4[12]; +} iop_cpm2_t; + +/* Communication Processor Module Timers +*/ +typedef struct cpm_timers { + u8 cpmt_tgcr1; + u8 res1[3]; + u8 cpmt_tgcr2; + u8 res2[11]; + u16 cpmt_tmr1; + u16 cpmt_tmr2; + u16 cpmt_trr1; + u16 cpmt_trr2; + u16 cpmt_tcr1; + u16 cpmt_tcr2; + u16 cpmt_tcn1; + u16 cpmt_tcn2; + u16 cpmt_tmr3; + u16 cpmt_tmr4; + u16 cpmt_trr3; + u16 cpmt_trr4; + u16 cpmt_tcr3; + u16 cpmt_tcr4; + u16 cpmt_tcn3; + u16 cpmt_tcn4; + u16 cpmt_ter1; + u16 cpmt_ter2; + u16 cpmt_ter3; + u16 cpmt_ter4; + u8 res3[584]; +} cpmtimer_cpm2_t; + +/* DMA control/status registers. +*/ +typedef struct sdma_csr { + u8 res0[24]; + u8 sdma_sdsr; + u8 res1[3]; + u8 sdma_sdmr; + u8 res2[3]; + u8 sdma_idsr1; + u8 res3[3]; + u8 sdma_idmr1; + u8 res4[3]; + u8 sdma_idsr2; + u8 res5[3]; + u8 sdma_idmr2; + u8 res6[3]; + u8 sdma_idsr3; + u8 res7[3]; + u8 sdma_idmr3; + u8 res8[3]; + u8 sdma_idsr4; + u8 res9[3]; + u8 sdma_idmr4; + u8 res10[707]; +} sdma_cpm2_t; + +/* Fast controllers +*/ +typedef struct fcc { + u32 fcc_gfmr; + u32 fcc_fpsmr; + u16 fcc_ftodr; + u8 res1[2]; + u16 fcc_fdsr; + u8 res2[2]; + u16 fcc_fcce; + u8 res3[2]; + u16 fcc_fccm; + u8 res4[2]; + u8 fcc_fccs; + u8 res5[3]; + u8 fcc_ftirr_phy[4]; +} fcc_t; + +/* Fast controllers continued + */ +typedef struct fcc_c { + u32 fcc_firper; + u32 fcc_firer; + u32 fcc_firsr_hi; + u32 fcc_firsr_lo; + u8 fcc_gfemr; + u8 res1[15]; +} fcc_c_t; + +/* TC Layer + */ +typedef struct tclayer { + u16 tc_tcmode; + u16 tc_cdsmr; + u16 tc_tcer; + u16 tc_rcc; + u16 tc_tcmr; + u16 tc_fcc; + u16 tc_ccc; + u16 tc_icc; + u16 tc_tcc; + u16 tc_ecc; + u8 res1[12]; +} tclayer_t; + + +/* I2C +*/ +typedef struct i2c { + u8 i2c_i2mod; + u8 res1[3]; + u8 i2c_i2add; + u8 res2[3]; + u8 i2c_i2brg; + u8 res3[3]; + u8 i2c_i2com; + u8 res4[3]; + u8 i2c_i2cer; + u8 res5[3]; + u8 i2c_i2cmr; + u8 res6[331]; +} i2c_cpm2_t; + +typedef struct scc { /* Serial communication channels */ + u32 scc_gsmrl; + u32 scc_gsmrh; + u16 scc_psmr; + u8 res1[2]; + u16 scc_todr; + u16 scc_dsr; + u16 scc_scce; + u8 res2[2]; + u16 scc_sccm; + u8 res3; + u8 scc_sccs; + u8 res4[8]; +} scc_t; + +typedef struct smc { /* Serial management channels */ + u8 res1[2]; + u16 smc_smcmr; + u8 res2[2]; + u8 smc_smce; + u8 res3[3]; + u8 smc_smcm; + u8 res4[5]; +} smc_t; + +/* Serial Peripheral Interface. +*/ +typedef struct spi_ctrl { + u16 spi_spmode; + u8 res1[4]; + u8 spi_spie; + u8 res2[3]; + u8 spi_spim; + u8 res3[2]; + u8 spi_spcom; + u8 res4[82]; +} spictl_cpm2_t; + +/* CPM Mux. +*/ +typedef struct cpmux { + u8 cmx_si1cr; + u8 res1; + u8 cmx_si2cr; + u8 res2; + u32 cmx_fcr; + u32 cmx_scr; + u8 cmx_smr; + u8 res3; + u16 cmx_uar; + u8 res4[16]; +} cpmux_t; + +/* SIRAM control +*/ +typedef struct siram { + u16 si_amr; + u16 si_bmr; + u16 si_cmr; + u16 si_dmr; + u8 si_gmr; + u8 res1; + u8 si_cmdr; + u8 res2; + u8 si_str; + u8 res3; + u16 si_rsr; +} siramctl_t; + +typedef struct mcc { + u16 mcc_mcce; + u8 res1[2]; + u16 mcc_mccm; + u8 res2[2]; + u8 mcc_mccf; + u8 res3[7]; +} mcc_t; + +typedef struct comm_proc { + u32 cp_cpcr; + u32 cp_rccr; + u8 res1[14]; + u16 cp_rter; + u8 res2[2]; + u16 cp_rtmr; + u16 cp_rtscr; + u8 res3[2]; + u32 cp_rtsr; + u8 res4[12]; +} cpm_cpm2_t; + +/* USB Controller. +*/ +typedef struct usb_ctlr { + u8 usb_usmod; + u8 usb_usadr; + u8 usb_uscom; + u8 res1[1]; + u16 usb_usep1; + u16 usb_usep2; + u16 usb_usep3; + u16 usb_usep4; + u8 res2[4]; + u16 usb_usber; + u8 res3[2]; + u16 usb_usbmr; + u8 usb_usbs; + u8 res4[7]; +} usb_cpm2_t; + +/* ...and the whole thing wrapped up.... +*/ + +typedef struct immap { + /* Some references are into the unique and known dpram spaces, + * others are from the generic base. + */ +#define im_dprambase im_dpram1 + u8 im_dpram1[16*1024]; + u8 res1[16*1024]; + u8 im_dpram2[4*1024]; + u8 res2[8*1024]; + u8 im_dpram3[4*1024]; + u8 res3[16*1024]; + + sysconf_cpm2_t im_siu_conf; /* SIU Configuration */ + memctl_cpm2_t im_memctl; /* Memory Controller */ + sit_cpm2_t im_sit; /* System Integration Timers */ + pci_cpm2_t im_pci; /* PCI Controller */ + intctl_cpm2_t im_intctl; /* Interrupt Controller */ + car_cpm2_t im_clkrst; /* Clocks and reset */ + iop_cpm2_t im_ioport; /* IO Port control/status */ + cpmtimer_cpm2_t im_cpmtimer; /* CPM timers */ + sdma_cpm2_t im_sdma; /* SDMA control/status */ + + fcc_t im_fcc[3]; /* Three FCCs */ + u8 res4z[32]; + fcc_c_t im_fcc_c[3]; /* Continued FCCs */ + + u8 res4[32]; + + tclayer_t im_tclayer[8]; /* Eight TCLayers */ + u16 tc_tcgsr; + u16 tc_tcger; + + /* First set of baud rate generators. + */ + u8 res[236]; + u32 im_brgc5; + u32 im_brgc6; + u32 im_brgc7; + u32 im_brgc8; + + u8 res5[608]; + + i2c_cpm2_t im_i2c; /* I2C control/status */ + cpm_cpm2_t im_cpm; /* Communication processor */ + + /* Second set of baud rate generators. + */ + u32 im_brgc1; + u32 im_brgc2; + u32 im_brgc3; + u32 im_brgc4; + + scc_t im_scc[4]; /* Four SCCs */ + smc_t im_smc[2]; /* Couple of SMCs */ + spictl_cpm2_t im_spi; /* A SPI */ + cpmux_t im_cpmux; /* CPM clock route mux */ + siramctl_t im_siramctl1; /* First SI RAM Control */ + mcc_t im_mcc1; /* First MCC */ + siramctl_t im_siramctl2; /* Second SI RAM Control */ + mcc_t im_mcc2; /* Second MCC */ + usb_cpm2_t im_usb; /* USB Controller */ + + u8 res6[1153]; + + u16 im_si1txram[256]; + u8 res7[512]; + u16 im_si1rxram[256]; + u8 res8[512]; + u16 im_si2txram[256]; + u8 res9[512]; + u16 im_si2rxram[256]; + u8 res10[512]; + u8 res11[4096]; +} cpm2_map_t; + +extern cpm2_map_t *cpm2_immr; + +#endif /* __IMMAP_CPM2__ */ +#endif /* __KERNEL__ */ diff --git a/include/asm-ppc/m8260_pci.h b/include/asm-ppc/m8260_pci.h new file mode 100644 index 000000000..163a6b91d --- /dev/null +++ b/include/asm-ppc/m8260_pci.h @@ -0,0 +1,186 @@ +/* + * include/asm-ppc/m8260_pci.h + * + * Definitions for the MPC8250/MPC8265/MPC8266 integrated PCI host bridge. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifdef __KERNEL__ +#ifndef __M8260_PCI_H +#define __M8260_PCI_H + +#include + +/* + * Define the vendor/device ID for the MPC8265. + */ +#define PCI_DEVICE_ID_MPC8265 ((0x18C0 << 16) | PCI_VENDOR_ID_MOTOROLA) + +#define M8265_PCIBR0 0x101ac +#define M8265_PCIBR1 0x101b0 +#define M8265_PCIMSK0 0x101c4 +#define M8265_PCIMSK1 0x101c8 + +/* Bit definitions for PCIBR registers */ + +#define PCIBR_ENABLE 0x00000001 + +/* Bit definitions for PCIMSK registers */ + +#define PCIMSK_32KiB 0xFFFF8000 /* Size of window, smallest */ +#define PCIMSK_64KiB 0xFFFF0000 +#define PCIMSK_128KiB 0xFFFE0000 +#define PCIMSK_256KiB 0xFFFC0000 +#define PCIMSK_512KiB 0xFFF80000 +#define PCIMSK_1MiB 0xFFF00000 +#define PCIMSK_2MiB 0xFFE00000 +#define PCIMSK_4MiB 0xFFC00000 +#define PCIMSK_8MiB 0xFF800000 +#define PCIMSK_16MiB 0xFF000000 +#define PCIMSK_32MiB 0xFE000000 +#define PCIMSK_64MiB 0xFC000000 +#define PCIMSK_128MiB 0xF8000000 +#define PCIMSK_256MiB 0xF0000000 +#define PCIMSK_512MiB 0xE0000000 +#define PCIMSK_1GiB 0xC0000000 /* Size of window, largest */ + + +#define M826X_SCCR_PCI_MODE_EN 0x100 + + +/* + * Outbound ATU registers (3 sets). These registers control how 60x bus (local) + * addresses are translated to PCI addresses when the MPC826x is a PCI bus + * master (initiator). + */ + +#define POTAR_REG0 0x10800 /* PCI Outbound Translation Addr registers */ +#define POTAR_REG1 0x10818 +#define POTAR_REG2 0x10830 + +#define POBAR_REG0 0x10808 /* PCI Outbound Base Addr registers */ +#define POBAR_REG1 0x10820 +#define POBAR_REG2 0x10838 + +#define POCMR_REG0 0x10810 /* PCI Outbound Comparison Mask registers */ +#define POCMR_REG1 0x10828 +#define POCMR_REG2 0x10840 + +/* Bit definitions for POMCR registers */ + +#define POCMR_MASK_4KiB 0x000FFFFF +#define POCMR_MASK_8KiB 0x000FFFFE +#define POCMR_MASK_16KiB 0x000FFFFC +#define POCMR_MASK_32KiB 0x000FFFF8 +#define POCMR_MASK_64KiB 0x000FFFF0 +#define POCMR_MASK_128KiB 0x000FFFE0 +#define POCMR_MASK_256KiB 0x000FFFC0 +#define POCMR_MASK_512KiB 0x000FFF80 +#define POCMR_MASK_1MiB 0x000FFF00 +#define POCMR_MASK_2MiB 0x000FFE00 +#define POCMR_MASK_4MiB 0x000FFC00 +#define POCMR_MASK_8MiB 0x000FF800 +#define POCMR_MASK_16MiB 0x000FF000 +#define POCMR_MASK_32MiB 0x000FE000 +#define POCMR_MASK_64MiB 0x000FC000 +#define POCMR_MASK_128MiB 0x000F8000 +#define POCMR_MASK_256MiB 0x000F0000 +#define POCMR_MASK_512MiB 0x000E0000 +#define POCMR_MASK_1GiB 0x000C0000 + +#define POCMR_ENABLE 0x80000000 +#define POCMR_PCI_IO 0x40000000 +#define POCMR_PREFETCH_EN 0x20000000 + +/* Soft PCI reset */ + +#define PCI_GCR_REG 0x10880 + +/* Bit definitions for PCI_GCR registers */ + +#define PCIGCR_PCI_BUS_EN 0x1 + +#define PCI_EMR_REG 0x10888 +/* + * Inbound ATU registers (2 sets). These registers control how PCI addresses + * are translated to 60x bus (local) addresses when the MPC826x is a PCI bus target. + */ + +#define PITAR_REG1 0x108D0 +#define PIBAR_REG1 0x108D8 +#define PICMR_REG1 0x108E0 +#define PITAR_REG0 0x108E8 +#define PIBAR_REG0 0x108F0 +#define PICMR_REG0 0x108F8 + +/* Bit definitions for PCI Inbound Comparison Mask registers */ + +#define PICMR_MASK_4KiB 0x000FFFFF +#define PICMR_MASK_8KiB 0x000FFFFE +#define PICMR_MASK_16KiB 0x000FFFFC +#define PICMR_MASK_32KiB 0x000FFFF8 +#define PICMR_MASK_64KiB 0x000FFFF0 +#define PICMR_MASK_128KiB 0x000FFFE0 +#define PICMR_MASK_256KiB 0x000FFFC0 +#define PICMR_MASK_512KiB 0x000FFF80 +#define PICMR_MASK_1MiB 0x000FFF00 +#define PICMR_MASK_2MiB 0x000FFE00 +#define PICMR_MASK_4MiB 0x000FFC00 +#define PICMR_MASK_8MiB 0x000FF800 +#define PICMR_MASK_16MiB 0x000FF000 +#define PICMR_MASK_32MiB 0x000FE000 +#define PICMR_MASK_64MiB 0x000FC000 +#define PICMR_MASK_128MiB 0x000F8000 +#define PICMR_MASK_256MiB 0x000F0000 +#define PICMR_MASK_512MiB 0x000E0000 +#define PICMR_MASK_1GiB 0x000C0000 + +#define PICMR_ENABLE 0x80000000 +#define PICMR_NO_SNOOP_EN 0x40000000 +#define PICMR_PREFETCH_EN 0x20000000 + +/* PCI error Registers */ + +#define PCI_ERROR_STATUS_REG 0x10884 +#define PCI_ERROR_MASK_REG 0x10888 +#define PCI_ERROR_CONTROL_REG 0x1088C +#define PCI_ERROR_ADRS_CAPTURE_REG 0x10890 +#define PCI_ERROR_DATA_CAPTURE_REG 0x10898 +#define PCI_ERROR_CTRL_CAPTURE_REG 0x108A0 + +/* PCI error Register bit defines */ + +#define PCI_ERROR_PCI_ADDR_PAR 0x00000001 +#define PCI_ERROR_PCI_DATA_PAR_WR 0x00000002 +#define PCI_ERROR_PCI_DATA_PAR_RD 0x00000004 +#define PCI_ERROR_PCI_NO_RSP 0x00000008 +#define PCI_ERROR_PCI_TAR_ABT 0x00000010 +#define PCI_ERROR_PCI_SERR 0x00000020 +#define PCI_ERROR_PCI_PERR_RD 0x00000040 +#define PCI_ERROR_PCI_PERR_WR 0x00000080 +#define PCI_ERROR_I2O_OFQO 0x00000100 +#define PCI_ERROR_I2O_IPQO 0x00000200 +#define PCI_ERROR_IRA 0x00000400 +#define PCI_ERROR_NMI 0x00000800 +#define PCI_ERROR_I2O_DBMC 0x00001000 + +/* + * Register pair used to generate configuration cycles on the PCI bus + * and access the MPC826x's own PCI configuration registers. + */ + +#define PCI_CFG_ADDR_REG 0x10900 +#define PCI_CFG_DATA_REG 0x10904 + +/* Bus parking decides where the bus control sits when idle */ +/* If modifying memory controllers for PCI park on the core */ + +#define PPC_ACR_BUS_PARK_CORE 0x6 +#define PPC_ACR_BUS_PARK_PCI 0x3 + +#endif /* __M8260_PCI_H */ +#endif /* __KERNEL__ */ diff --git a/include/asm-ppc/mpc52xx_psc.h b/include/asm-ppc/mpc52xx_psc.h new file mode 100644 index 000000000..483102ea6 --- /dev/null +++ b/include/asm-ppc/mpc52xx_psc.h @@ -0,0 +1,191 @@ +/* + * include/asm-ppc/mpc52xx_psc.h + * + * Definitions of consts/structs to drive the Freescale MPC52xx OnChip + * PSCs. Theses are shared between multiple drivers since a PSC can be + * UART, AC97, IR, I2S, ... So this header is in asm-ppc. + * + * + * Maintainer : Sylvain Munaut + * + * Based/Extracted from some header of the 2.4 originally written by + * Dale Farnsworth + * + * Copyright (C) 2004 Sylvain Munaut + * Copyright (C) 2003 MontaVista, Software, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __MPC52xx_PSC_H__ +#define __MPC52xx_PSC_H__ + +#include + +/* Max number of PSCs */ +#define MPC52xx_PSC_MAXNUM 6 + +/* Programmable Serial Controller (PSC) status register bits */ +#define MPC52xx_PSC_SR_CDE 0x0080 +#define MPC52xx_PSC_SR_RXRDY 0x0100 +#define MPC52xx_PSC_SR_RXFULL 0x0200 +#define MPC52xx_PSC_SR_TXRDY 0x0400 +#define MPC52xx_PSC_SR_TXEMP 0x0800 +#define MPC52xx_PSC_SR_OE 0x1000 +#define MPC52xx_PSC_SR_PE 0x2000 +#define MPC52xx_PSC_SR_FE 0x4000 +#define MPC52xx_PSC_SR_RB 0x8000 + +/* PSC Command values */ +#define MPC52xx_PSC_RX_ENABLE 0x0001 +#define MPC52xx_PSC_RX_DISABLE 0x0002 +#define MPC52xx_PSC_TX_ENABLE 0x0004 +#define MPC52xx_PSC_TX_DISABLE 0x0008 +#define MPC52xx_PSC_SEL_MODE_REG_1 0x0010 +#define MPC52xx_PSC_RST_RX 0x0020 +#define MPC52xx_PSC_RST_TX 0x0030 +#define MPC52xx_PSC_RST_ERR_STAT 0x0040 +#define MPC52xx_PSC_RST_BRK_CHG_INT 0x0050 +#define MPC52xx_PSC_START_BRK 0x0060 +#define MPC52xx_PSC_STOP_BRK 0x0070 + +/* PSC TxRx FIFO status bits */ +#define MPC52xx_PSC_RXTX_FIFO_ERR 0x0040 +#define MPC52xx_PSC_RXTX_FIFO_UF 0x0020 +#define MPC52xx_PSC_RXTX_FIFO_OF 0x0010 +#define MPC52xx_PSC_RXTX_FIFO_FR 0x0008 +#define MPC52xx_PSC_RXTX_FIFO_FULL 0x0004 +#define MPC52xx_PSC_RXTX_FIFO_ALARM 0x0002 +#define MPC52xx_PSC_RXTX_FIFO_EMPTY 0x0001 + +/* PSC interrupt mask bits */ +#define MPC52xx_PSC_IMR_TXRDY 0x0100 +#define MPC52xx_PSC_IMR_RXRDY 0x0200 +#define MPC52xx_PSC_IMR_DB 0x0400 +#define MPC52xx_PSC_IMR_IPC 0x8000 + +/* PSC input port change bit */ +#define MPC52xx_PSC_CTS 0x01 +#define MPC52xx_PSC_DCD 0x02 +#define MPC52xx_PSC_D_CTS 0x10 +#define MPC52xx_PSC_D_DCD 0x20 + +/* PSC mode fields */ +#define MPC52xx_PSC_MODE_5_BITS 0x00 +#define MPC52xx_PSC_MODE_6_BITS 0x01 +#define MPC52xx_PSC_MODE_7_BITS 0x02 +#define MPC52xx_PSC_MODE_8_BITS 0x03 +#define MPC52xx_PSC_MODE_BITS_MASK 0x03 +#define MPC52xx_PSC_MODE_PAREVEN 0x00 +#define MPC52xx_PSC_MODE_PARODD 0x04 +#define MPC52xx_PSC_MODE_PARFORCE 0x08 +#define MPC52xx_PSC_MODE_PARNONE 0x10 +#define MPC52xx_PSC_MODE_ERR 0x20 +#define MPC52xx_PSC_MODE_FFULL 0x40 +#define MPC52xx_PSC_MODE_RXRTS 0x80 + +#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00 +#define MPC52xx_PSC_MODE_ONE_STOP 0x07 +#define MPC52xx_PSC_MODE_TWO_STOP 0x0f + +#define MPC52xx_PSC_RFNUM_MASK 0x01ff + + +/* Structure of the hardware registers */ +struct mpc52xx_psc { + volatile u8 mode; /* PSC + 0x00 */ + volatile u8 reserved0[3]; + union { /* PSC + 0x04 */ + volatile u16 status; + volatile u16 clock_select; + } sr_csr; +#define mpc52xx_psc_status sr_csr.status +#define mpc52xx_psc_clock_select sr_csr.clock_select + volatile u16 reserved1; + volatile u8 command; /* PSC + 0x08 */ +volatile u8 reserved2[3]; + union { /* PSC + 0x0c */ + volatile u8 buffer_8; + volatile u16 buffer_16; + volatile u32 buffer_32; + } buffer; +#define mpc52xx_psc_buffer_8 buffer.buffer_8 +#define mpc52xx_psc_buffer_16 buffer.buffer_16 +#define mpc52xx_psc_buffer_32 buffer.buffer_32 + union { /* PSC + 0x10 */ + volatile u8 ipcr; + volatile u8 acr; + } ipcr_acr; +#define mpc52xx_psc_ipcr ipcr_acr.ipcr +#define mpc52xx_psc_acr ipcr_acr.acr + volatile u8 reserved3[3]; + union { /* PSC + 0x14 */ + volatile u16 isr; + volatile u16 imr; + } isr_imr; +#define mpc52xx_psc_isr isr_imr.isr +#define mpc52xx_psc_imr isr_imr.imr + volatile u16 reserved4; + volatile u8 ctur; /* PSC + 0x18 */ + volatile u8 reserved5[3]; + volatile u8 ctlr; /* PSC + 0x1c */ + volatile u8 reserved6[3]; + volatile u16 ccr; /* PSC + 0x20 */ + volatile u8 reserved7[14]; + volatile u8 ivr; /* PSC + 0x30 */ + volatile u8 reserved8[3]; + volatile u8 ip; /* PSC + 0x34 */ + volatile u8 reserved9[3]; + volatile u8 op1; /* PSC + 0x38 */ + volatile u8 reserved10[3]; + volatile u8 op0; /* PSC + 0x3c */ + volatile u8 reserved11[3]; + volatile u32 sicr; /* PSC + 0x40 */ + volatile u8 ircr1; /* PSC + 0x44 */ + volatile u8 reserved13[3]; + volatile u8 ircr2; /* PSC + 0x44 */ + volatile u8 reserved14[3]; + volatile u8 irsdr; /* PSC + 0x4c */ + volatile u8 reserved15[3]; + volatile u8 irmdr; /* PSC + 0x50 */ + volatile u8 reserved16[3]; + volatile u8 irfdr; /* PSC + 0x54 */ + volatile u8 reserved17[3]; + volatile u16 rfnum; /* PSC + 0x58 */ + volatile u16 reserved18; + volatile u16 tfnum; /* PSC + 0x5c */ + volatile u16 reserved19; + volatile u32 rfdata; /* PSC + 0x60 */ + volatile u16 rfstat; /* PSC + 0x64 */ + volatile u16 reserved20; + volatile u8 rfcntl; /* PSC + 0x68 */ + volatile u8 reserved21[5]; + volatile u16 rfalarm; /* PSC + 0x6e */ + volatile u16 reserved22; + volatile u16 rfrptr; /* PSC + 0x72 */ + volatile u16 reserved23; + volatile u16 rfwptr; /* PSC + 0x76 */ + volatile u16 reserved24; + volatile u16 rflrfptr; /* PSC + 0x7a */ + volatile u16 reserved25; + volatile u16 rflwfptr; /* PSC + 0x7e */ + volatile u32 tfdata; /* PSC + 0x80 */ + volatile u16 tfstat; /* PSC + 0x84 */ + volatile u16 reserved26; + volatile u8 tfcntl; /* PSC + 0x88 */ + volatile u8 reserved27[5]; + volatile u16 tfalarm; /* PSC + 0x8e */ + volatile u16 reserved28; + volatile u16 tfrptr; /* PSC + 0x92 */ + volatile u16 reserved29; + volatile u16 tfwptr; /* PSC + 0x96 */ + volatile u16 reserved30; + volatile u16 tflrfptr; /* PSC + 0x9a */ + volatile u16 reserved31; + volatile u16 tflwfptr; /* PSC + 0x9e */ +}; + + +#endif /* __MPC52xx_PSC_H__ */ diff --git a/include/asm-ppc/mpc85xx.h b/include/asm-ppc/mpc85xx.h new file mode 100644 index 000000000..276a81706 --- /dev/null +++ b/include/asm-ppc/mpc85xx.h @@ -0,0 +1,128 @@ +/* + * include/asm-ppc/mpc85xx.h + * + * MPC85xx definitions + * + * Maintainer: Kumar Gala + * + * Copyright 2004 Freescale Semiconductor, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifdef __KERNEL__ +#ifndef __ASM_MPC85xx_H__ +#define __ASM_MPC85xx_H__ + +#include +#include + +#ifdef CONFIG_85xx + +#ifdef CONFIG_MPC8540_ADS +#include +#endif + +#define _IO_BASE isa_io_base +#define _ISA_MEM_BASE isa_mem_base +#define PCI_DRAM_OFFSET pci_dram_offset + +/* + * The "residual" board information structure the boot loader passes + * into the kernel. + */ +extern unsigned char __res[]; + +/* Internal IRQs on MPC85xx OpenPIC */ +/* Not all of these exist on all MPC85xx implementations */ + +#ifndef MPC85xx_OPENPIC_IRQ_OFFSET +#define MPC85xx_OPENPIC_IRQ_OFFSET 64 +#endif + +/* The 32 internal sources */ +#define MPC85xx_IRQ_L2CACHE ( 0 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_ECM ( 1 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_DDR ( 2 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_LBIU ( 3 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_DMA0 ( 4 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_DMA1 ( 5 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_DMA2 ( 6 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_DMA3 ( 7 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_PCI1 ( 8 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_PCI2 ( 9 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_RIO_ERROR ( 9 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_RIO_BELL (10 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_RIO_TX (11 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_RIO_RX (12 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC1_TX (13 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC1_RX (14 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC1_ERROR (18 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC2_TX (19 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC2_RX (20 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_TSEC2_ERROR (24 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_FEC (25 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_DUART (26 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_IIC1 (27 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_PERFMON (28 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_CPM (30 + MPC85xx_OPENPIC_IRQ_OFFSET) + +/* The 12 external interrupt lines */ +#define MPC85xx_IRQ_EXT0 (32 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT1 (33 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT2 (34 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT3 (35 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT4 (36 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT5 (37 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT6 (38 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT7 (39 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT8 (40 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT9 (41 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT10 (42 + MPC85xx_OPENPIC_IRQ_OFFSET) +#define MPC85xx_IRQ_EXT11 (43 + MPC85xx_OPENPIC_IRQ_OFFSET) + +/* Offset from CCSRBAR */ +#define MPC85xx_CPM_OFFSET (0x80000) +#define MPC85xx_CPM_SIZE (0x40000) +#define MPC85xx_DMA_OFFSET (0x21000) +#define MPC85xx_DMA_SIZE (0x01000) +#define MPC85xx_ENET1_OFFSET (0x24000) +#define MPC85xx_ENET1_SIZE (0x01000) +#define MPC85xx_ENET2_OFFSET (0x25000) +#define MPC85xx_ENET2_SIZE (0x01000) +#define MPC85xx_ENET3_OFFSET (0x26000) +#define MPC85xx_ENET3_SIZE (0x01000) +#define MPC85xx_GUTS_OFFSET (0xe0000) +#define MPC85xx_GUTS_SIZE (0x01000) +#define MPC85xx_IIC1_OFFSET (0x03000) +#define MPC85xx_IIC1_SIZE (0x01000) +#define MPC85xx_OPENPIC_OFFSET (0x40000) +#define MPC85xx_OPENPIC_SIZE (0x40000) +#define MPC85xx_PCI1_OFFSET (0x08000) +#define MPC85xx_PCI1_SIZE (0x01000) +#define MPC85xx_PCI2_OFFSET (0x09000) +#define MPC85xx_PCI2_SIZE (0x01000) +#define MPC85xx_PERFMON_OFFSET (0xe1000) +#define MPC85xx_PERFMON_SIZE (0x01000) +#define MPC85xx_UART0_OFFSET (0x04500) +#define MPC85xx_UART0_SIZE (0x00100) +#define MPC85xx_UART1_OFFSET (0x04600) +#define MPC85xx_UART1_SIZE (0x00100) + +#define MPC85xx_CCSRBAR_SIZE (1024*1024) + +/* Let modules/drivers get at CCSRBAR */ +extern phys_addr_t get_ccsrbar(void); + +#ifdef MODULE +#define CCSRBAR get_ccsrbar() +#else +#define CCSRBAR BOARD_CCSRBAR +#endif + +#endif /* CONFIG_85xx */ +#endif /* __ASM_MPC85xx_H__ */ +#endif /* __KERNEL__ */ diff --git a/include/asm-sh/adc.h b/include/asm-sh/adc.h new file mode 100644 index 000000000..64747dc61 --- /dev/null +++ b/include/asm-sh/adc.h @@ -0,0 +1,12 @@ +#ifndef __ASM_ADC_H +#define __ASM_ADC_H + +/* + * Copyright (C) 2004 Andriy Skulysh + */ + +#include + +int adc_single(unsigned int channel); + +#endif /* __ASM_ADC_H */ diff --git a/include/asm-sh/cpu-sh3/adc.h b/include/asm-sh/cpu-sh3/adc.h new file mode 100644 index 000000000..b289e3ca1 --- /dev/null +++ b/include/asm-sh/cpu-sh3/adc.h @@ -0,0 +1,28 @@ +#ifndef __ASM_CPU_SH3_ADC_H +#define __ASM_CPU_SH3_ADC_H + +/* + * Copyright (C) 2004 Andriy Skulysh + */ + + +#define ADDRAH 0xa4000080 +#define ADDRAL 0xa4000082 +#define ADDRBH 0xa4000084 +#define ADDRBL 0xa4000086 +#define ADDRCH 0xa4000088 +#define ADDRCL 0xa400008a +#define ADDRDH 0xa400008c +#define ADDRDL 0xa400008e +#define ADCSR 0xa4000090 + +#define ADCSR_ADF 0x80 +#define ADCSR_ADIE 0x40 +#define ADCSR_ADST 0x20 +#define ADCSR_MULTI 0x10 +#define ADCSR_CKS 0x08 +#define ADCSR_CH_MASK 0x07 + +#define ADCR 0xa4000092 + +#endif /* __ASM_CPU_SH3_ADC_H */ diff --git a/include/asm-sh64/a.out.h b/include/asm-sh64/a.out.h new file mode 100644 index 000000000..e1995e86b --- /dev/null +++ b/include/asm-sh64/a.out.h @@ -0,0 +1,37 @@ +#ifndef __ASM_SH64_A_OUT_H +#define __ASM_SH64_A_OUT_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/a.out.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +struct exec +{ + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ + unsigned a_text; /* length of text, in bytes */ + unsigned a_data; /* length of data, in bytes */ + unsigned a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned a_syms; /* length of symbol table data in file, in bytes */ + unsigned a_entry; /* start address */ + unsigned a_trsize; /* length of relocation info for text, in bytes */ + unsigned a_drsize; /* length of relocation info for data, in bytes */ +}; + +#define N_TRSIZE(a) ((a).a_trsize) +#define N_DRSIZE(a) ((a).a_drsize) +#define N_SYMSIZE(a) ((a).a_syms) + +#ifdef __KERNEL__ + +#define STACK_TOP TASK_SIZE + +#endif + +#endif /* __ASM_SH64_A_OUT_H */ diff --git a/include/asm-sh64/atomic.h b/include/asm-sh64/atomic.h new file mode 100644 index 000000000..8c3872d3e --- /dev/null +++ b/include/asm-sh64/atomic.h @@ -0,0 +1,126 @@ +#ifndef __ASM_SH64_ATOMIC_H +#define __ASM_SH64_ATOMIC_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/atomic.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + */ + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc.. + * + */ + +typedef struct { volatile int counter; } atomic_t; + +#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) + +#define atomic_read(v) ((v)->counter) +#define atomic_set(v,i) ((v)->counter = (i)) + +#include + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ + +static __inline__ void atomic_add(int i, atomic_t * v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v += i; + local_irq_restore(flags); +} + +static __inline__ void atomic_sub(int i, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v -= i; + local_irq_restore(flags); +} + +static __inline__ int atomic_add_return(int i, atomic_t * v) +{ + unsigned long temp, flags; + + local_irq_save(flags); + temp = *(long *)v; + temp += i; + *(long *)v = temp; + local_irq_restore(flags); + + return temp; +} + +#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) + +static __inline__ int atomic_sub_return(int i, atomic_t * v) +{ + unsigned long temp, flags; + + local_irq_save(flags); + temp = *(long *)v; + temp -= i; + *(long *)v = temp; + local_irq_restore(flags); + + return temp; +} + +#define atomic_dec_return(v) atomic_sub_return(1,(v)) +#define atomic_inc_return(v) atomic_add_return(1,(v)) + +/* + * atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) + +#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) + +#define atomic_inc(v) atomic_add(1,(v)) +#define atomic_dec(v) atomic_sub(1,(v)) + +static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v &= ~mask; + local_irq_restore(flags); +} + +static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) +{ + unsigned long flags; + + local_irq_save(flags); + *(long *)v |= mask; + local_irq_restore(flags); +} + +/* Atomic operations are already serializing on SH */ +#define smp_mb__before_atomic_dec() barrier() +#define smp_mb__after_atomic_dec() barrier() +#define smp_mb__before_atomic_inc() barrier() +#define smp_mb__after_atomic_inc() barrier() + +#endif /* __ASM_SH64_ATOMIC_H */ diff --git a/include/asm-sh64/bugs.h b/include/asm-sh64/bugs.h new file mode 100644 index 000000000..05554aaea --- /dev/null +++ b/include/asm-sh64/bugs.h @@ -0,0 +1,38 @@ +#ifndef __ASM_SH64_BUGS_H +#define __ASM_SH64_BUGS_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/bugs.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + */ + +/* + * This is included by init/main.c to check for architecture-dependent bugs. + * + * Needs: + * void check_bugs(void); + */ + +/* + * I don't know of any Super-H bugs yet. + */ + +#include + +static void __init check_bugs(void) +{ + extern char *get_cpu_subtype(void); + extern unsigned long loops_per_jiffy; + + cpu_data->loops_per_jiffy = loops_per_jiffy; + + printk("CPU: %s\n", get_cpu_subtype()); +} +#endif /* __ASM_SH64_BUGS_H */ diff --git a/include/asm-sh64/cache.h b/include/asm-sh64/cache.h new file mode 100644 index 000000000..f54e85e8a --- /dev/null +++ b/include/asm-sh64/cache.h @@ -0,0 +1,141 @@ +#ifndef __ASM_SH64_CACHE_H +#define __ASM_SH64_CACHE_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/cache.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003, 2004 Paul Mundt + * + */ +#include + +#define L1_CACHE_SHIFT 5 +/* bytes per L1 cache line */ +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define L1_CACHE_ALIGN_MASK (~(L1_CACHE_BYTES - 1)) +#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES - 1)) & L1_CACHE_ALIGN_MASK) +#define L1_CACHE_SIZE_BYTES (L1_CACHE_BYTES << 10) +/* Largest L1 which this arch supports */ +#define L1_CACHE_SHIFT_MAX 5 + +#ifdef MODULE +#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES))) +#else +#define __cacheline_aligned \ + __attribute__((__aligned__(L1_CACHE_BYTES), \ + __section__(".data.cacheline_aligned"))) +#endif + +/* + * Control Registers. + */ +#define ICCR_BASE 0x01600000 /* Instruction Cache Control Register */ +#define ICCR_REG0 0 /* Register 0 offset */ +#define ICCR_REG1 1 /* Register 1 offset */ +#define ICCR0 ICCR_BASE+ICCR_REG0 +#define ICCR1 ICCR_BASE+ICCR_REG1 + +#define ICCR0_OFF 0x0 /* Set ICACHE off */ +#define ICCR0_ON 0x1 /* Set ICACHE on */ +#define ICCR0_ICI 0x2 /* Invalidate all in IC */ + +#define ICCR1_NOLOCK 0x0 /* Set No Locking */ + +#define OCCR_BASE 0x01E00000 /* Operand Cache Control Register */ +#define OCCR_REG0 0 /* Register 0 offset */ +#define OCCR_REG1 1 /* Register 1 offset */ +#define OCCR0 OCCR_BASE+OCCR_REG0 +#define OCCR1 OCCR_BASE+OCCR_REG1 + +#define OCCR0_OFF 0x0 /* Set OCACHE off */ +#define OCCR0_ON 0x1 /* Set OCACHE on */ +#define OCCR0_OCI 0x2 /* Invalidate all in OC */ +#define OCCR0_WT 0x4 /* Set OCACHE in WT Mode */ +#define OCCR0_WB 0x0 /* Set OCACHE in WB Mode */ + +#define OCCR1_NOLOCK 0x0 /* Set No Locking */ + + +/* + * SH-5 + * A bit of description here, for neff=32. + * + * |<--- tag (19 bits) --->| + * +-----------------------------+-----------------+------+----------+------+ + * | | | ways |set index |offset| + * +-----------------------------+-----------------+------+----------+------+ + * ^ 2 bits 8 bits 5 bits + * +- Bit 31 + * + * Cacheline size is based on offset: 5 bits = 32 bytes per line + * A cache line is identified by a tag + set but OCACHETAG/ICACHETAG + * have a broader space for registers. These are outlined by + * CACHE_?C_*_STEP below. + * + */ + +/* Valid and Dirty bits */ +#define SH_CACHE_VALID (1LL<<0) +#define SH_CACHE_UPDATED (1LL<<57) + +/* Cache flags */ +#define SH_CACHE_MODE_WT (1LL<<0) +#define SH_CACHE_MODE_WB (1LL<<1) + +#ifndef __ASSEMBLY__ + +/* + * Cache information structure. + * + * Defined for both I and D cache, per-processor. + */ +struct cache_info { + unsigned int ways; + unsigned int sets; + unsigned int linesz; + + unsigned int way_shift; + unsigned int entry_shift; + unsigned int set_shift; + unsigned int way_step_shift; + unsigned int asid_shift; + + unsigned int way_ofs; + + unsigned int asid_mask; + unsigned int idx_mask; + unsigned int epn_mask; + + unsigned long flags; +}; + +#endif /* __ASSEMBLY__ */ + +/* Instruction cache */ +#define CACHE_IC_ADDRESS_ARRAY 0x01000000 + +/* Operand Cache */ +#define CACHE_OC_ADDRESS_ARRAY 0x01800000 + +/* These declarations relate to cache 'synonyms' in the operand cache. A + 'synonym' occurs where effective address bits overlap between those used for + indexing the cache sets and those passed to the MMU for translation. In the + case of SH5-101 & SH5-103, only bit 12 is affected for 4k pages. */ + +#define CACHE_OC_N_SYNBITS 1 /* Number of synonym bits */ +#define CACHE_OC_SYN_SHIFT 12 +/* Mask to select synonym bit(s) */ +#define CACHE_OC_SYN_MASK (((1UL< + +#endif /* __ASM_SH64_CPUMASK_H */ diff --git a/include/asm-sh64/current.h b/include/asm-sh64/current.h new file mode 100644 index 000000000..261224339 --- /dev/null +++ b/include/asm-sh64/current.h @@ -0,0 +1,28 @@ +#ifndef __ASM_SH64_CURRENT_H +#define __ASM_SH64_CURRENT_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/current.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + */ + +#include + +struct task_struct; + +static __inline__ struct task_struct * get_current(void) +{ + return current_thread_info()->task; +} + +#define current get_current() + +#endif /* __ASM_SH64_CURRENT_H */ + diff --git a/include/asm-sh64/delay.h b/include/asm-sh64/delay.h new file mode 100644 index 000000000..6ae31301a --- /dev/null +++ b/include/asm-sh64/delay.h @@ -0,0 +1,11 @@ +#ifndef __ASM_SH64_DELAY_H +#define __ASM_SH64_DELAY_H + +extern void __delay(int loops); +extern void __udelay(unsigned long long usecs, unsigned long lpj); +extern void __ndelay(unsigned long long nsecs, unsigned long lpj); +extern void udelay(unsigned long usecs); +extern void ndelay(unsigned long nsecs); + +#endif /* __ASM_SH64_DELAY_H */ + diff --git a/include/asm-sh64/div64.h b/include/asm-sh64/div64.h new file mode 100644 index 000000000..f75869565 --- /dev/null +++ b/include/asm-sh64/div64.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_DIV64_H +#define __ASM_SH64_DIV64_H + +#include + +#endif /* __ASM_SH64_DIV64_H */ diff --git a/include/asm-sh64/dma.h b/include/asm-sh64/dma.h new file mode 100644 index 000000000..e701f3947 --- /dev/null +++ b/include/asm-sh64/dma.h @@ -0,0 +1,41 @@ +#ifndef __ASM_SH64_DMA_H +#define __ASM_SH64_DMA_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/dma.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + */ + +#include +#include +#include + +#define MAX_DMA_CHANNELS 4 + +/* + * SH5 can DMA in any memory area. + * + * The static definition is dodgy because it should limit + * the highest DMA-able address based on the actual + * Physical memory available. This is actually performed + * at run time in defining the memory allowed to DMA_ZONE. + */ +#define MAX_DMA_ADDRESS ~(NPHYS_MASK) + +#define DMA_MODE_READ 0 +#define DMA_MODE_WRITE 1 + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* __ASM_SH64_DMA_H */ diff --git a/include/asm-sh64/elf.h b/include/asm-sh64/elf.h new file mode 100644 index 000000000..bc483669d --- /dev/null +++ b/include/asm-sh64/elf.h @@ -0,0 +1,101 @@ +#ifndef __ASM_SH64_ELF_H +#define __ASM_SH64_ELF_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/elf.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +/* + * ELF register definitions.. + */ + +#include +#include +#include + +typedef unsigned long elf_greg_t; + +#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +typedef struct user_fpu_struct elf_fpregset_t; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ( (x)->e_machine == EM_SH ) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS32 +#ifdef __LITTLE_ENDIAN__ +#define ELF_DATA ELFDATA2LSB +#else +#define ELF_DATA ELFDATA2MSB +#endif +#define ELF_ARCH EM_SH + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) + + +#define ELF_CORE_COPY_REGS(_dest,_regs) \ + memcpy((char *) &_dest, (char *) _regs, \ + sizeof(struct pt_regs)); + +/* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, + but it's not easy, and we've already done it here. */ + +#define ELF_HWCAP (0) + +/* This yields a string that ld.so will use to load implementation + specific libraries for optimization. This is more specific in + intent than poking at uname or /proc/cpuinfo. + + For the moment, we have only optimizations for the Intel generations, + but that could change... */ + +#define ELF_PLATFORM (NULL) + +#define ELF_PLAT_INIT(_r, load_addr) \ + do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \ + _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \ + _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \ + _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \ + _r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \ + _r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \ + _r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \ + _r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \ + _r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \ + _r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \ + _r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \ + _r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \ + _r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \ + _r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \ + _r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \ + _r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \ + _r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \ + _r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \ + _r->sr = SR_FD | SR_MMU; } while (0) + +#ifdef __KERNEL__ +#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT) +#endif + +#endif /* __ASM_SH64_ELF_H */ diff --git a/include/asm-sh64/errno.h b/include/asm-sh64/errno.h new file mode 100644 index 000000000..57b46d4bd --- /dev/null +++ b/include/asm-sh64/errno.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_ERRNO_H +#define __ASM_SH64_ERRNO_H + +#include + +#endif /* __ASM_SH64_ERRNO_H */ diff --git a/include/asm-sh64/fcntl.h b/include/asm-sh64/fcntl.h new file mode 100644 index 000000000..ffcc36c64 --- /dev/null +++ b/include/asm-sh64/fcntl.h @@ -0,0 +1,7 @@ +#ifndef __ASM_SH64_FCNTL_H +#define __ASM_SH64_FCNTL_H + +#include + +#endif /* __ASM_SH64_FCNTL_H */ + diff --git a/include/asm-sh64/hardware.h b/include/asm-sh64/hardware.h new file mode 100644 index 000000000..a2e611262 --- /dev/null +++ b/include/asm-sh64/hardware.h @@ -0,0 +1,45 @@ +#ifndef __ASM_SH64_HARDWARE_H +#define __ASM_SH64_HARDWARE_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/hardware.h + * + * Copyright (C) 2002 Stuart Menefy + * Copyright (C) 2003 Paul Mundt + * + * Defitions of the locations of registers in the physical address space. + */ + +#define PHYS_PERIPHERAL_BLOCK 0x09000000 +#define PHYS_DMAC_BLOCK 0x0e000000 +#define PHYS_PCI_BLOCK 0x60000000 + +#ifndef __ASSEMBLY__ +#include +#include + +struct vcr_info { + u8 perr_flags; /* P-port Error flags */ + u8 merr_flags; /* Module Error flags */ + u16 mod_vers; /* Module Version */ + u16 mod_id; /* Module ID */ + u8 bot_mb; /* Bottom Memory block */ + u8 top_mb; /* Top Memory block */ +}; + +static inline struct vcr_info sh64_get_vcr_info(unsigned long base) +{ + unsigned long long tmp; + + tmp = sh64_in64(base); + + return *((struct vcr_info *)&tmp); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_SH64_HARDWARE_H */ diff --git a/include/asm-sh64/ioctl.h b/include/asm-sh64/ioctl.h new file mode 100644 index 000000000..c089a6fb7 --- /dev/null +++ b/include/asm-sh64/ioctl.h @@ -0,0 +1,83 @@ +#ifndef __ASM_SH64_IOCTL_H +#define __ASM_SH64_IOCTL_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/ioctl.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * linux/ioctl.h for Linux by H.H. Bergman. + * + */ + +/* ioctl command encoding: 32 bits total, command in lower 16 bits, + * size of the parameter structure in the lower 14 bits of the + * upper 16 bits. + * Encoding the size of the parameter structure in the ioctl request + * is useful for catching programs compiled with old versions + * and to avoid overwriting user space outside the user buffer area. + * The highest 2 bits are reserved for indicating the ``access mode''. + * NOTE: This limits the max parameter size to 16kB -1 ! + */ + +/* + * The following is for compatibility across the various Linux + * platforms. The i386 ioctl numbering scheme doesn't really enforce + * a type field. De facto, however, the top 8 bits of the lower 16 + * bits are indeed used as a type field, so we might just as well make + * this explicit here. Please be sure to use the decoding macros + * below from now on. + */ +#define _IOC_NRBITS 8 +#define _IOC_TYPEBITS 8 +#define _IOC_SIZEBITS 14 +#define _IOC_DIRBITS 2 + +#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) +#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) +#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1) +#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1) + +#define _IOC_NRSHIFT 0 +#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS) +#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS) +#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) + +/* + * Direction bits. + */ +#define _IOC_NONE 0U +#define _IOC_WRITE 1U +#define _IOC_READ 2U + +#define _IOC(dir,type,nr,size) \ + (((dir) << _IOC_DIRSHIFT) | \ + ((type) << _IOC_TYPESHIFT) | \ + ((nr) << _IOC_NRSHIFT) | \ + ((size) << _IOC_SIZESHIFT)) + +/* used to create numbers */ +#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) +#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) +#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) +#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) + +/* used to decode ioctl numbers.. */ +#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) +#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK) +#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK) +#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK) + +/* ...and for the drivers/sound files... */ + +#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT) +#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT) +#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT) +#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT) +#define IOCSIZE_SHIFT (_IOC_SIZESHIFT) + +#endif /* __ASM_SH64_IOCTL_H */ diff --git a/include/asm-sh64/ioctls.h b/include/asm-sh64/ioctls.h new file mode 100644 index 000000000..e5d55629f --- /dev/null +++ b/include/asm-sh64/ioctls.h @@ -0,0 +1,111 @@ +#ifndef __ASM_SH64_IOCTLS_H +#define __ASM_SH64_IOCTLS_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/ioctls.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +#include + +#define FIOCLEX _IO('f', 1) +#define FIONCLEX _IO('f', 2) +#define FIOASYNC _IOW('f', 125, int) +#define FIONBIO _IOW('f', 126, int) +#define FIONREAD _IOR('f', 127, int) +#define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) + +#define TCGETS 0x5401 +#define TCSETS 0x5402 +#define TCSETSW 0x5403 +#define TCSETSF 0x5404 + +#define TCGETA _IOR('t', 23, struct termio) +#define TCSETA _IOW('t', 24, struct termio) +#define TCSETAW _IOW('t', 25, struct termio) +#define TCSETAF _IOW('t', 28, struct termio) + +#define TCSBRK _IO('t', 29) +#define TCXONC _IO('t', 30) +#define TCFLSH _IO('t', 31) + +#define TIOCSWINSZ _IOW('t', 103, struct winsize) +#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ +#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ +#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ + +#define TIOCSPGRP _IOW('t', 118, int) +#define TIOCGPGRP _IOR('t', 119, int) + +#define TIOCEXCL _IO('T', 12) /* 0x540C */ +#define TIOCNXCL _IO('T', 13) /* 0x540D */ +#define TIOCSCTTY _IO('T', 14) /* 0x540E */ + +#define TIOCSTI _IOW('T', 18, char) /* 0x5412 */ +#define TIOCMGET _IOR('T', 21, unsigned int) /* 0x5415 */ +#define TIOCMBIS _IOW('T', 22, unsigned int) /* 0x5416 */ +#define TIOCMBIC _IOW('T', 23, unsigned int) /* 0x5417 */ +#define TIOCMSET _IOW('T', 24, unsigned int) /* 0x5418 */ +# define TIOCM_LE 0x001 +# define TIOCM_DTR 0x002 +# define TIOCM_RTS 0x004 +# define TIOCM_ST 0x008 +# define TIOCM_SR 0x010 +# define TIOCM_CTS 0x020 +# define TIOCM_CAR 0x040 +# define TIOCM_RNG 0x080 +# define TIOCM_DSR 0x100 +# define TIOCM_CD TIOCM_CAR +# define TIOCM_RI TIOCM_RNG + +#define TIOCGSOFTCAR _IOR('T', 25, unsigned int) /* 0x5419 */ +#define TIOCSSOFTCAR _IOW('T', 26, unsigned int) /* 0x541A */ +#define TIOCLINUX _IOW('T', 28, char) /* 0x541C */ +#define TIOCCONS _IO('T', 29) /* 0x541D */ +#define TIOCGSERIAL _IOR('T', 30, struct serial_struct) /* 0x541E */ +#define TIOCSSERIAL _IOW('T', 31, struct serial_struct) /* 0x541F */ +#define TIOCPKT _IOW('T', 32, int) /* 0x5420 */ +# define TIOCPKT_DATA 0 +# define TIOCPKT_FLUSHREAD 1 +# define TIOCPKT_FLUSHWRITE 2 +# define TIOCPKT_STOP 4 +# define TIOCPKT_START 8 +# define TIOCPKT_NOSTOP 16 +# define TIOCPKT_DOSTOP 32 + + +#define TIOCNOTTY _IO('T', 34) /* 0x5422 */ +#define TIOCSETD _IOW('T', 35, int) /* 0x5423 */ +#define TIOCGETD _IOR('T', 36, int) /* 0x5424 */ +#define TCSBRKP _IOW('T', 37, int) /* 0x5425 */ /* Needed for POSIX tcsendbreak() */ +#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* 0x5426 */ /* For debugging only */ +#define TIOCSBRK _IO('T', 39) /* 0x5427 */ /* BSD compatibility */ +#define TIOCCBRK _IO('T', 40) /* 0x5428 */ /* BSD compatibility */ +#define TIOCGSID _IOR('T', 41, pid_t) /* 0x5429 */ /* Return the session ID of FD */ +#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ + +#define TIOCSERCONFIG _IO('T', 83) /* 0x5453 */ +#define TIOCSERGWILD _IOR('T', 84, int) /* 0x5454 */ +#define TIOCSERSWILD _IOW('T', 85, int) /* 0x5455 */ +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT _IOR('T', 88, struct async_struct) /* 0x5458 */ /* For debugging only */ +#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* 0x5459 */ /* Get line status register */ + /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ +# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* 0x545A */ /* Get multiport config */ +#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* 0x545B */ /* Set multiport config */ + +#define TIOCMIWAIT _IO('T', 92) /* 0x545C */ /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT _IOR('T', 93, struct async_icount) /* 0x545D */ /* read serial port inline interrupt counts */ + +#endif /* __ASM_SH64_IOCTLS_H */ diff --git a/include/asm-sh64/ipc.h b/include/asm-sh64/ipc.h new file mode 100644 index 000000000..d8d9389bd --- /dev/null +++ b/include/asm-sh64/ipc.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_IPC_H +#define __ASM_SH64_IPC_H + +#include + +#endif /* __ASM_SH64_IPC_H */ diff --git a/include/asm-sh64/ipcbuf.h b/include/asm-sh64/ipcbuf.h new file mode 100644 index 000000000..c441e3529 --- /dev/null +++ b/include/asm-sh64/ipcbuf.h @@ -0,0 +1,40 @@ +#ifndef __ASM_SH64_IPCBUF_H__ +#define __ASM_SH64_IPCBUF_H__ + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/ipcbuf.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +/* + * The ipc64_perm structure for i386 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 32-bit mode_t and seq + * - 2 miscellaneous 32-bit values + */ + +struct ipc64_perm +{ + __kernel_key_t key; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_uid32_t cuid; + __kernel_gid32_t cgid; + __kernel_mode_t mode; + unsigned short __pad1; + unsigned short seq; + unsigned short __pad2; + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* __ASM_SH64_IPCBUF_H__ */ diff --git a/include/asm-sh64/kmap_types.h b/include/asm-sh64/kmap_types.h new file mode 100644 index 000000000..2ae7c7587 --- /dev/null +++ b/include/asm-sh64/kmap_types.h @@ -0,0 +1,7 @@ +#ifndef __ASM_SH64_KMAP_TYPES_H +#define __ASM_SH64_KMAP_TYPES_H + +#include + +#endif /* __ASM_SH64_KMAP_TYPES_H */ + diff --git a/include/asm-sh64/linkage.h b/include/asm-sh64/linkage.h new file mode 100644 index 000000000..1dd0e84a2 --- /dev/null +++ b/include/asm-sh64/linkage.h @@ -0,0 +1,7 @@ +#ifndef __ASM_SH64_LINKAGE_H +#define __ASM_SH64_LINKAGE_H + +#include + +#endif /* __ASM_SH64_LINKAGE_H */ + diff --git a/include/asm-sh64/local.h b/include/asm-sh64/local.h new file mode 100644 index 000000000..d9bd95dd3 --- /dev/null +++ b/include/asm-sh64/local.h @@ -0,0 +1,7 @@ +#ifndef __ASM_SH64_LOCAL_H +#define __ASM_SH64_LOCAL_H + +#include + +#endif /* __ASM_SH64_LOCAL_H */ + diff --git a/include/asm-sh64/mc146818rtc.h b/include/asm-sh64/mc146818rtc.h new file mode 100644 index 000000000..6cd3aec68 --- /dev/null +++ b/include/asm-sh64/mc146818rtc.h @@ -0,0 +1,7 @@ +/* + * linux/include/asm-sh64/mc146818rtc.h + * +*/ + +/* For now, an empty place-holder to get IDE to compile. */ + diff --git a/include/asm-sh64/mman.h b/include/asm-sh64/mman.h new file mode 100644 index 000000000..a9be6d885 --- /dev/null +++ b/include/asm-sh64/mman.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_MMAN_H +#define __ASM_SH64_MMAN_H + +#include + +#endif /* __ASM_SH64_MMAN_H */ diff --git a/include/asm-sh64/mmu.h b/include/asm-sh64/mmu.h new file mode 100644 index 000000000..ccd36d266 --- /dev/null +++ b/include/asm-sh64/mmu.h @@ -0,0 +1,7 @@ +#ifndef __MMU_H +#define __MMU_H + +/* Default "unsigned long" context */ +typedef unsigned long mm_context_t; + +#endif diff --git a/include/asm-sh64/module.h b/include/asm-sh64/module.h new file mode 100644 index 000000000..bf382bccf --- /dev/null +++ b/include/asm-sh64/module.h @@ -0,0 +1,12 @@ +#ifndef __ASM_SH64_MODULE_H +#define __ASM_SH64_MODULE_H +/* + * This file contains the SH architecture specific module code. + */ + +#define module_map(x) vmalloc(x) +#define module_unmap(x) vfree(x) +#define module_arch_init(x) (0) +#define arch_init_modules(x) do { } while (0) + +#endif /* __ASM_SH64_MODULE_H */ diff --git a/include/asm-sh64/msgbuf.h b/include/asm-sh64/msgbuf.h new file mode 100644 index 000000000..cf0494ce0 --- /dev/null +++ b/include/asm-sh64/msgbuf.h @@ -0,0 +1,42 @@ +#ifndef __ASM_SH64_MSGBUF_H +#define __ASM_SH64_MSGBUF_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/msgbuf.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +/* + * The msqid64_ds structure for i386 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct msqid64_ds { + struct ipc64_perm msg_perm; + __kernel_time_t msg_stime; /* last msgsnd time */ + unsigned long __unused1; + __kernel_time_t msg_rtime; /* last msgrcv time */ + unsigned long __unused2; + __kernel_time_t msg_ctime; /* last change time */ + unsigned long __unused3; + unsigned long msg_cbytes; /* current number of bytes on queue */ + unsigned long msg_qnum; /* number of messages in queue */ + unsigned long msg_qbytes; /* max number of bytes on queue */ + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ + __kernel_pid_t msg_lrpid; /* last receive pid */ + unsigned long __unused4; + unsigned long __unused5; +}; + +#endif /* __ASM_SH64_MSGBUF_H */ diff --git a/include/asm-sh64/namei.h b/include/asm-sh64/namei.h new file mode 100644 index 000000000..99d759a80 --- /dev/null +++ b/include/asm-sh64/namei.h @@ -0,0 +1,24 @@ +#ifndef __ASM_SH64_NAMEI_H +#define __ASM_SH64_NAMEI_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/namei.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * Included from linux/fs/namei.c + * + */ + +/* This dummy routine maybe changed to something useful + * for /usr/gnemul/ emulation stuff. + * Look at asm-sparc/namei.h for details. + */ + +#define __emul_prefix() NULL + +#endif /* __ASM_SH64_NAMEI_H */ diff --git a/include/asm-sh64/pci.h b/include/asm-sh64/pci.h new file mode 100644 index 000000000..8cc14e139 --- /dev/null +++ b/include/asm-sh64/pci.h @@ -0,0 +1,110 @@ +#ifndef __ASM_SH64_PCI_H +#define __ASM_SH64_PCI_H + +#ifdef __KERNEL__ + +#include + +/* Can be used to override the logic in pci_scan_bus for skipping + already-configured bus numbers - to be used for buggy BIOSes + or architectures with incomplete PCI setup by the loader */ + +#define pcibios_assign_all_busses() 1 + +/* + * These are currently the correct values for the STM overdrive board + * We need some way of setting this on a board specific way, it will + * not be the same on other boards I think + */ +#if defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103) +#define PCIBIOS_MIN_IO 0x2000 +#define PCIBIOS_MIN_MEM 0x40000000 +#endif + +extern void pcibios_set_master(struct pci_dev *dev); + +/* + * Set penalize isa irq function + */ +static inline void pcibios_penalize_isa_irq(int irq) +{ + /* We don't do dynamic PCI IRQ allocation */ +} + +/* Dynamic DMA mapping stuff. + * SuperH has everything mapped statically like x86. + */ + +/* The PCI address space does equal the physical memory + * address space. The networking and block device layers use + * this boolean for bounce buffer decisions. + */ +#define PCI_DMA_BUS_IS_PHYS (1) + +#include +#include +#include +#include +#include + +/* pci_unmap_{single,page} being a nop depends upon the + * configuration. + */ +#ifdef CONFIG_SH_PCIDMA_NONCOHERENT +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ + dma_addr_t ADDR_NAME; +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ + __u32 LEN_NAME; +#define pci_unmap_addr(PTR, ADDR_NAME) \ + ((PTR)->ADDR_NAME) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + (((PTR)->ADDR_NAME) = (VAL)) +#define pci_unmap_len(PTR, LEN_NAME) \ + ((PTR)->LEN_NAME) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ + (((PTR)->LEN_NAME) = (VAL)) +#else +#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) +#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) +#define pci_unmap_addr(PTR, ADDR_NAME) (0) +#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) +#define pci_unmap_len(PTR, LEN_NAME) (0) +#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) +#endif + +/* Not supporting more than 32-bit PCI bus addresses now, but + * must satisfy references to this function. Change if needed. + */ +#define pci_dac_dma_supported(pci_dev, mask) (0) + +/* These macros should be used after a pci_map_sg call has been done + * to get bus addresses of each of the SG entries and their lengths. + * You should only work with the number of sg entries pci_map_sg + * returns, or alternatively stop on the first sg_dma_len(sg) which + * is 0. + */ +#define sg_dma_address(sg) ((sg)->dma_address) +#define sg_dma_len(sg) ((sg)->length) + +/* Board-specific fixup routines. */ +extern void pcibios_fixup(void); +extern void pcibios_fixup_irqs(void); + +#ifdef CONFIG_PCI_AUTO +extern int pciauto_assign_resources(int busno, struct pci_channel *hose); +#endif + +static inline void pcibios_add_platform_entries(struct pci_dev *dev) +{ +} + +#endif /* __KERNEL__ */ + +/* generic pci stuff */ +#include + +/* generic DMA-mapping stuff */ +#include + +#endif /* __ASM_SH64_PCI_H */ + diff --git a/include/asm-sh64/percpu.h b/include/asm-sh64/percpu.h new file mode 100644 index 000000000..a01d16cd0 --- /dev/null +++ b/include/asm-sh64/percpu.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_PERCPU +#define __ASM_SH64_PERCPU + +#include + +#endif /* __ASM_SH64_PERCPU */ diff --git a/include/asm-sh64/posix_types.h b/include/asm-sh64/posix_types.h new file mode 100644 index 000000000..0620317a6 --- /dev/null +++ b/include/asm-sh64/posix_types.h @@ -0,0 +1,131 @@ +#ifndef __ASM_SH64_POSIX_TYPES_H +#define __ASM_SH64_POSIX_TYPES_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/posix_types.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003 Paul Mundt + * + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned long __kernel_ino_t; +typedef unsigned short __kernel_mode_t; +typedef unsigned short __kernel_nlink_t; +typedef long __kernel_off_t; +typedef int __kernel_pid_t; +typedef unsigned short __kernel_ipc_pid_t; +typedef unsigned short __kernel_uid_t; +typedef unsigned short __kernel_gid_t; +typedef long unsigned int __kernel_size_t; +typedef int __kernel_ssize_t; +typedef int __kernel_ptrdiff_t; +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef int __kernel_timer_t; +typedef int __kernel_clockid_t; +typedef int __kernel_daddr_t; +typedef char * __kernel_caddr_t; +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +typedef unsigned int __kernel_uid32_t; +typedef unsigned int __kernel_gid32_t; + +typedef unsigned short __kernel_old_uid_t; +typedef unsigned short __kernel_old_gid_t; +typedef unsigned short __kernel_old_dev_t; + +#ifdef __GNUC__ +typedef long long __kernel_loff_t; +#endif + +typedef struct { +#if defined(__KERNEL__) || defined(__USE_ALL) + int val[2]; +#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */ + int __val[2]; +#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */ +} __kernel_fsid_t; + +#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) + +#undef __FD_SET +static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + __fdsetp->fds_bits[__tmp] |= (1UL<<__rem); +} + +#undef __FD_CLR +static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); +} + + +#undef __FD_ISSET +static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; +} + +/* + * This will unroll the loop for the normal constant case (8 ints, + * for a 256-bit fd_set) + */ +#undef __FD_ZERO +static __inline__ void __FD_ZERO(__kernel_fd_set *__p) +{ + unsigned long *__tmp = __p->fds_bits; + int __i; + + if (__builtin_constant_p(__FDSET_LONGS)) { + switch (__FDSET_LONGS) { + case 16: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + __tmp[ 4] = 0; __tmp[ 5] = 0; + __tmp[ 6] = 0; __tmp[ 7] = 0; + __tmp[ 8] = 0; __tmp[ 9] = 0; + __tmp[10] = 0; __tmp[11] = 0; + __tmp[12] = 0; __tmp[13] = 0; + __tmp[14] = 0; __tmp[15] = 0; + return; + + case 8: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + __tmp[ 4] = 0; __tmp[ 5] = 0; + __tmp[ 6] = 0; __tmp[ 7] = 0; + return; + + case 4: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + return; + } + } + __i = __FDSET_LONGS; + while (__i) { + __i--; + *__tmp = 0; + __tmp++; + } +} + +#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ + +#endif /* __ASM_SH64_POSIX_TYPES_H */ diff --git a/include/asm-sh64/registers.h b/include/asm-sh64/registers.h new file mode 100644 index 000000000..7eec666ac --- /dev/null +++ b/include/asm-sh64/registers.h @@ -0,0 +1,106 @@ +#ifndef __ASM_SH64_REGISTERS_H +#define __ASM_SH64_REGISTERS_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/registers.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2004 Richard Curnow + */ + +#ifdef __ASSEMBLY__ +/* ===================================================================== +** +** Section 1: acts on assembly sources pre-processed by GPP ( ). +** Assigns symbolic names to control & target registers. +*/ + +/* + * Define some useful aliases for control registers. + */ +#define SR cr0 +#define SSR cr1 +#define PSSR cr2 + /* cr3 UNDEFINED */ +#define INTEVT cr4 +#define EXPEVT cr5 +#define PEXPEVT cr6 +#define TRA cr7 +#define SPC cr8 +#define PSPC cr9 +#define RESVEC cr10 +#define VBR cr11 + /* cr12 UNDEFINED */ +#define TEA cr13 + /* cr14-cr15 UNDEFINED */ +#define DCR cr16 +#define KCR0 cr17 +#define KCR1 cr18 + /* cr19-cr31 UNDEFINED */ + /* cr32-cr61 RESERVED */ +#define CTC cr62 +#define USR cr63 + +/* + * ABI dependent registers (general purpose set) + */ +#define RET r2 +#define ARG1 r2 +#define ARG2 r3 +#define ARG3 r4 +#define ARG4 r5 +#define ARG5 r6 +#define ARG6 r7 +#define SP r15 +#define LINK r18 +#define ZERO r63 + +/* + * Status register defines: used only by assembly sources (and + * syntax independednt) + */ +#define SR_RESET_VAL 0x0000000050008000 +#define SR_HARMLESS 0x00000000500080f0 /* Write ignores for most */ +#define SR_ENABLE_FPU 0xffffffffffff7fff /* AND with this */ + +#if defined (CONFIG_SH64_SR_WATCH) +#define SR_ENABLE_MMU 0x0000000084000000 /* OR with this */ +#else +#define SR_ENABLE_MMU 0x0000000080000000 /* OR with this */ +#endif + +#define SR_UNBLOCK_EXC 0xffffffffefffffff /* AND with this */ +#define SR_BLOCK_EXC 0x0000000010000000 /* OR with this */ + +#else /* Not __ASSEMBLY__ syntax */ + +/* +** Stringify reg. name +*/ +#define __str(x) #x + +/* Stringify control register names for use in inline assembly */ +#define __SR __str(SR) +#define __SSR __str(SSR) +#define __PSSR __str(PSSR) +#define __INTEVT __str(INTEVT) +#define __EXPEVT __str(EXPEVT) +#define __PEXPEVT __str(PEXPEVT) +#define __TRA __str(TRA) +#define __SPC __str(SPC) +#define __PSPC __str(PSPC) +#define __RESVEC __str(RESVEC) +#define __VBR __str(VBR) +#define __TEA __str(TEA) +#define __DCR __str(DCR) +#define __KCR0 __str(KCR0) +#define __KCR1 __str(KCR1) +#define __CTC __str(CTC) +#define __USR __str(USR) + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_SH64_REGISTERS_H */ diff --git a/include/asm-sh64/resource.h b/include/asm-sh64/resource.h new file mode 100644 index 000000000..8ff93944a --- /dev/null +++ b/include/asm-sh64/resource.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_RESOURCE_H +#define __ASM_SH64_RESOURCE_H + +#include + +#endif /* __ASM_SH64_RESOURCE_H */ diff --git a/include/asm-sh64/scatterlist.h b/include/asm-sh64/scatterlist.h new file mode 100644 index 000000000..5d8fa32d2 --- /dev/null +++ b/include/asm-sh64/scatterlist.h @@ -0,0 +1,23 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/scatterlist.h + * + * Copyright (C) 2003 Paul Mundt + * + */ +#ifndef __ASM_SH64_SCATTERLIST_H +#define __ASM_SH64_SCATTERLIST_H + +struct scatterlist { + struct page * page; /* Location for highmem page, if any */ + unsigned int offset;/* for highmem, page offset */ + dma_addr_t dma_address; + unsigned int length; +}; + +#define ISA_DMA_THRESHOLD (0xffffffff) + +#endif /* !__ASM_SH64_SCATTERLIST_H */ diff --git a/include/asm-sh64/sections.h b/include/asm-sh64/sections.h new file mode 100644 index 000000000..897f36bcd --- /dev/null +++ b/include/asm-sh64/sections.h @@ -0,0 +1,7 @@ +#ifndef __ASM_SH64_SECTIONS_H +#define __ASM_SH64_SECTIONS_H + +#include + +#endif /* __ASM_SH64_SECTIONS_H */ + diff --git a/include/asm-sh64/segment.h b/include/asm-sh64/segment.h new file mode 100644 index 000000000..92ac001fc --- /dev/null +++ b/include/asm-sh64/segment.h @@ -0,0 +1,6 @@ +#ifndef _ASM_SEGMENT_H +#define _ASM_SEGMENT_H + +/* Only here because we have some old header files that expect it.. */ + +#endif /* _ASM_SEGMENT_H */ diff --git a/include/asm-sh64/semaphore-helper.h b/include/asm-sh64/semaphore-helper.h new file mode 100644 index 000000000..fcfafe263 --- /dev/null +++ b/include/asm-sh64/semaphore-helper.h @@ -0,0 +1,101 @@ +#ifndef __ASM_SH64_SEMAPHORE_HELPER_H +#define __ASM_SH64_SEMAPHORE_HELPER_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/semaphore-helper.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ +#include + +/* + * SMP- and interrupt-safe semaphores helper functions. + * + * (C) Copyright 1996 Linus Torvalds + * (C) Copyright 1999 Andrea Arcangeli + */ + +/* + * These two _must_ execute atomically wrt each other. + * + * This is trivially done with load_locked/store_cond, + * which we have. Let the rest of the losers suck eggs. + */ +static __inline__ void wake_one_more(struct semaphore * sem) +{ + atomic_inc((atomic_t *)&sem->sleepers); +} + +static __inline__ int waking_non_zero(struct semaphore *sem) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (sem->sleepers > 0) { + sem->sleepers--; + ret = 1; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +/* + * waking_non_zero_interruptible: + * 1 got the lock + * 0 go to sleep + * -EINTR interrupted + * + * We must undo the sem->count down_interruptible() increment while we are + * protected by the spinlock in order to make atomic this atomic_inc() with the + * atomic_read() in wake_one_more(), otherwise we can race. -arca + */ +static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, + struct task_struct *tsk) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (sem->sleepers > 0) { + sem->sleepers--; + ret = 1; + } else if (signal_pending(tsk)) { + atomic_inc(&sem->count); + ret = -EINTR; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +/* + * waking_non_zero_trylock: + * 1 failed to lock + * 0 got the lock + * + * We must undo the sem->count down_trylock() increment while we are + * protected by the spinlock in order to make atomic this atomic_inc() with the + * atomic_read() in wake_one_more(), otherwise we can race. -arca + */ +static __inline__ int waking_non_zero_trylock(struct semaphore *sem) +{ + unsigned long flags; + int ret = 1; + + spin_lock_irqsave(&semaphore_wake_lock, flags); + if (sem->sleepers <= 0) + atomic_inc(&sem->count); + else { + sem->sleepers--; + ret = 0; + } + spin_unlock_irqrestore(&semaphore_wake_lock, flags); + return ret; +} + +#endif /* __ASM_SH64_SEMAPHORE_HELPER_H */ diff --git a/include/asm-sh64/semaphore.h b/include/asm-sh64/semaphore.h new file mode 100644 index 000000000..3e97ead32 --- /dev/null +++ b/include/asm-sh64/semaphore.h @@ -0,0 +1,146 @@ +#ifndef __ASM_SH64_SEMAPHORE_H +#define __ASM_SH64_SEMAPHORE_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/semaphore.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * SMP- and interrupt-safe semaphores. + * + * (C) Copyright 1996 Linus Torvalds + * + * SuperH verison by Niibe Yutaka + * (Currently no asm implementation but generic C code...) + * + */ + +#include +#include +#include +#include + +#include +#include + +struct semaphore { + atomic_t count; + int sleepers; + wait_queue_head_t wait; +#ifdef WAITQUEUE_DEBUG + long __magic; +#endif +}; + +#ifdef WAITQUEUE_DEBUG +# define __SEM_DEBUG_INIT(name) \ + , (int)&(name).__magic +#else +# define __SEM_DEBUG_INIT(name) +#endif + +#define __SEMAPHORE_INITIALIZER(name,count) \ +{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ + __SEM_DEBUG_INIT(name) } + +#define __MUTEX_INITIALIZER(name) \ + __SEMAPHORE_INITIALIZER(name,1) + +#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ + struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) + +#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) +#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) + +static inline void sema_init (struct semaphore *sem, int val) +{ +/* + * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val); + * + * i'd rather use the more flexible initialization above, but sadly + * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well. + */ + atomic_set(&sem->count, val); + sem->sleepers = 0; + init_waitqueue_head(&sem->wait); +#ifdef WAITQUEUE_DEBUG + sem->__magic = (int)&sem->__magic; +#endif +} + +static inline void init_MUTEX (struct semaphore *sem) +{ + sema_init(sem, 1); +} + +static inline void init_MUTEX_LOCKED (struct semaphore *sem) +{ + sema_init(sem, 0); +} + +#if 0 +asmlinkage void __down_failed(void /* special register calling convention */); +asmlinkage int __down_failed_interruptible(void /* params in registers */); +asmlinkage int __down_failed_trylock(void /* params in registers */); +asmlinkage void __up_wakeup(void /* special register calling convention */); +#endif + +asmlinkage void __down(struct semaphore * sem); +asmlinkage int __down_interruptible(struct semaphore * sem); +asmlinkage int __down_trylock(struct semaphore * sem); +asmlinkage void __up(struct semaphore * sem); + +extern spinlock_t semaphore_wake_lock; + +static inline void down(struct semaphore * sem) +{ +#ifdef WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + if (atomic_dec_return(&sem->count) < 0) + __down(sem); +} + +static inline int down_interruptible(struct semaphore * sem) +{ + int ret = 0; +#ifdef WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + if (atomic_dec_return(&sem->count) < 0) + ret = __down_interruptible(sem); + return ret; +} + +static inline int down_trylock(struct semaphore * sem) +{ + int ret = 0; +#ifdef WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + if (atomic_dec_return(&sem->count) < 0) + ret = __down_trylock(sem); + return ret; +} + +/* + * Note! This is subtle. We jump to wake people up only if + * the semaphore was negative (== somebody was waiting on it). + */ +static inline void up(struct semaphore * sem) +{ +#ifdef WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + if (atomic_inc_return(&sem->count) <= 0) + __up(sem); +} + +#endif /* __ASM_SH64_SEMAPHORE_H */ diff --git a/include/asm-sh64/sembuf.h b/include/asm-sh64/sembuf.h new file mode 100644 index 000000000..ec4d9f143 --- /dev/null +++ b/include/asm-sh64/sembuf.h @@ -0,0 +1,36 @@ +#ifndef __ASM_SH64_SEMBUF_H +#define __ASM_SH64_SEMBUF_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/sembuf.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +/* + * The semid64_ds structure for i386 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct semid64_ds { + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ + __kernel_time_t sem_otime; /* last semop time */ + unsigned long __unused1; + __kernel_time_t sem_ctime; /* last change time */ + unsigned long __unused2; + unsigned long sem_nsems; /* no. of semaphores in array */ + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* __ASM_SH64_SEMBUF_H */ diff --git a/include/asm-sh64/shmbuf.h b/include/asm-sh64/shmbuf.h new file mode 100644 index 000000000..022f3494d --- /dev/null +++ b/include/asm-sh64/shmbuf.h @@ -0,0 +1,53 @@ +#ifndef __ASM_SH64_SHMBUF_H +#define __ASM_SH64_SHMBUF_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/shmbuf.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +/* + * The shmid64_ds structure for i386 architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + * + * Pad space is left for: + * - 64-bit time_t to solve y2038 problem + * - 2 miscellaneous 32-bit values + */ + +struct shmid64_ds { + struct ipc64_perm shm_perm; /* operation perms */ + size_t shm_segsz; /* size of segment (bytes) */ + __kernel_time_t shm_atime; /* last attach time */ + unsigned long __unused1; + __kernel_time_t shm_dtime; /* last detach time */ + unsigned long __unused2; + __kernel_time_t shm_ctime; /* last change time */ + unsigned long __unused3; + __kernel_pid_t shm_cpid; /* pid of creator */ + __kernel_pid_t shm_lpid; /* pid of last operator */ + unsigned long shm_nattch; /* no. of current attaches */ + unsigned long __unused4; + unsigned long __unused5; +}; + +struct shminfo64 { + unsigned long shmmax; + unsigned long shmmin; + unsigned long shmmni; + unsigned long shmseg; + unsigned long shmall; + unsigned long __unused1; + unsigned long __unused2; + unsigned long __unused3; + unsigned long __unused4; +}; + +#endif /* __ASM_SH64_SHMBUF_H */ diff --git a/include/asm-sh64/sigcontext.h b/include/asm-sh64/sigcontext.h new file mode 100644 index 000000000..6293509d8 --- /dev/null +++ b/include/asm-sh64/sigcontext.h @@ -0,0 +1,30 @@ +#ifndef __ASM_SH64_SIGCONTEXT_H +#define __ASM_SH64_SIGCONTEXT_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/sigcontext.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +struct sigcontext { + unsigned long oldmask; + + /* CPU registers */ + unsigned long long sc_regs[63]; + unsigned long long sc_tregs[8]; + unsigned long long sc_pc; + unsigned long long sc_sr; + + /* FPU registers */ + unsigned long long sc_fpregs[32]; + unsigned int sc_fpscr; + unsigned int sc_fpvalid; +}; + +#endif /* __ASM_SH64_SIGCONTEXT_H */ diff --git a/include/asm-sh64/siginfo.h b/include/asm-sh64/siginfo.h new file mode 100644 index 000000000..56ef1da53 --- /dev/null +++ b/include/asm-sh64/siginfo.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_SIGINFO_H +#define __ASM_SH64_SIGINFO_H + +#include + +#endif /* __ASM_SH64_SIGINFO_H */ diff --git a/include/asm-sh64/smp.h b/include/asm-sh64/smp.h new file mode 100644 index 000000000..4a4d0da39 --- /dev/null +++ b/include/asm-sh64/smp.h @@ -0,0 +1,15 @@ +#ifndef __ASM_SH64_SMP_H +#define __ASM_SH64_SMP_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/smp.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +#endif /* __ASM_SH64_SMP_H */ diff --git a/include/asm-sh64/socket.h b/include/asm-sh64/socket.h new file mode 100644 index 000000000..1853f7246 --- /dev/null +++ b/include/asm-sh64/socket.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_SOCKET_H +#define __ASM_SH64_SOCKET_H + +#include + +#endif /* __ASM_SH64_SOCKET_H */ diff --git a/include/asm-sh64/sockios.h b/include/asm-sh64/sockios.h new file mode 100644 index 000000000..1ae23ae82 --- /dev/null +++ b/include/asm-sh64/sockios.h @@ -0,0 +1,24 @@ +#ifndef __ASM_SH64_SOCKIOS_H +#define __ASM_SH64_SOCKIOS_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/sockios.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +/* Socket-level I/O control calls. */ +#define FIOGETOWN _IOR('f', 123, int) +#define FIOSETOWN _IOW('f', 124, int) + +#define SIOCATMARK _IOR('s', 7, int) +#define SIOCSPGRP _IOW('s', 8, pid_t) +#define SIOCGPGRP _IOR('s', 9, pid_t) + +#define SIOCGSTAMP _IOR('s', 100, struct timeval) /* Get stamp - linux-specific */ +#endif /* __ASM_SH64_SOCKIOS_H */ diff --git a/include/asm-sh64/spinlock.h b/include/asm-sh64/spinlock.h new file mode 100644 index 000000000..296b0c9b2 --- /dev/null +++ b/include/asm-sh64/spinlock.h @@ -0,0 +1,17 @@ +#ifndef __ASM_SH64_SPINLOCK_H +#define __ASM_SH64_SPINLOCK_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/spinlock.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +#error "No SMP on SH64" + +#endif /* __ASM_SH64_SPINLOCK_H */ diff --git a/include/asm-sh64/stat.h b/include/asm-sh64/stat.h new file mode 100644 index 000000000..86f551b19 --- /dev/null +++ b/include/asm-sh64/stat.h @@ -0,0 +1,88 @@ +#ifndef __ASM_SH64_STAT_H +#define __ASM_SH64_STAT_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/stat.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +struct __old_kernel_stat { + unsigned short st_dev; + unsigned short st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; +}; + +struct stat { + unsigned short st_dev; + unsigned short __pad1; + unsigned long st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned short __pad2; + unsigned long st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + unsigned long __unused4; + unsigned long __unused5; +}; + +/* This matches struct stat64 in glibc2.1, hence the absolutely + * insane amounts of padding around dev_t's. + */ +struct stat64 { + unsigned short st_dev; + unsigned char __pad0[10]; + + unsigned long st_ino; + unsigned int st_mode; + unsigned int st_nlink; + + unsigned long st_uid; + unsigned long st_gid; + + unsigned short st_rdev; + unsigned char __pad3[10]; + + long long st_size; + unsigned long st_blksize; + + unsigned long st_blocks; /* Number 512-byte blocks allocated. */ + unsigned long __pad4; /* future possible st_blocks high bits */ + + unsigned long st_atime; + unsigned long st_atime_nsec; + + unsigned long st_mtime; + unsigned long st_mtime_nsec; + + unsigned long st_ctime; + unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */ + + unsigned long __unused1; + unsigned long __unused2; +}; + +#endif /* __ASM_SH64_STAT_H */ diff --git a/include/asm-sh64/statfs.h b/include/asm-sh64/statfs.h new file mode 100644 index 000000000..083fd79b2 --- /dev/null +++ b/include/asm-sh64/statfs.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_STATFS_H +#define __ASM_SH64_STATFS_H + +#include + +#endif /* __ASM_SH64_STATFS_H */ diff --git a/include/asm-sh64/string.h b/include/asm-sh64/string.h new file mode 100644 index 000000000..8a7357366 --- /dev/null +++ b/include/asm-sh64/string.h @@ -0,0 +1,21 @@ +#ifndef __ASM_SH64_STRING_H +#define __ASM_SH64_STRING_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/string.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + * Empty on purpose. ARCH SH64 ASM libs are out of the current project scope. + * + */ + +#define __HAVE_ARCH_MEMCPY + +extern void *memcpy(void *dest, const void *src, size_t count); + +#endif diff --git a/include/asm-sh64/termbits.h b/include/asm-sh64/termbits.h new file mode 100644 index 000000000..86bde5ec1 --- /dev/null +++ b/include/asm-sh64/termbits.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_TERMBITS_H +#define __ASM_SH64_TERMBITS_H + +#include + +#endif /* __ASM_SH64_TERMBITS_H */ diff --git a/include/asm-sh64/termios.h b/include/asm-sh64/termios.h new file mode 100644 index 000000000..4a9c7fb41 --- /dev/null +++ b/include/asm-sh64/termios.h @@ -0,0 +1,117 @@ +#ifndef __ASM_SH64_TERMIOS_H +#define __ASM_SH64_TERMIOS_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/termios.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +#include +#include + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +/* modem lines */ +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ + +/* line disciplines */ +#define N_TTY 0 +#define N_SLIP 1 +#define N_MOUSE 2 +#define N_PPP 3 +#define N_STRIP 4 +#define N_AX25 5 +#define N_X25 6 /* X.25 async */ +#define N_6PACK 7 +#define N_MASC 8 /* Reserved for Mobitex module */ +#define N_R3964 9 /* Reserved for Simatic R3964 module */ +#define N_PROFIBUS_FDL 10 /* Reserved for Profibus */ +#define N_IRDA 11 /* Linux IR - http://www.cs.uit.no/~dagb/irda/irda.html */ +#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */ +#define N_HDLC 13 /* synchronous HDLC */ +#define N_SYNC_PPP 14 +#define N_HCI 15 /* Bluetooth HCI UART */ + +#ifdef __KERNEL__ + +/* intr=^C quit=^\ erase=del kill=^U + eof=^D vtime=\0 vmin=\1 sxtc=\0 + start=^Q stop=^S susp=^Z eol=\0 + reprint=^R discard=^U werase=^W lnext=^V + eol2=\0 +*/ +#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ +#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \ + unsigned short __tmp; \ + get_user(__tmp,&(termio)->x); \ + *(unsigned short *) &(termios)->x = __tmp; \ +} + +#define user_termio_to_kernel_termios(termios, termio) \ +({ \ + SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \ + SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \ + copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ +}) + +/* + * Translate a "termios" structure into a "termio". Ugh. + */ +#define kernel_termios_to_user_termio(termio, termios) \ +({ \ + put_user((termios)->c_iflag, &(termio)->c_iflag); \ + put_user((termios)->c_oflag, &(termio)->c_oflag); \ + put_user((termios)->c_cflag, &(termio)->c_cflag); \ + put_user((termios)->c_lflag, &(termio)->c_lflag); \ + put_user((termios)->c_line, &(termio)->c_line); \ + copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ +}) + +#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) + +#endif /* __KERNEL__ */ + +#endif /* __ASM_SH64_TERMIOS_H */ diff --git a/include/asm-sh64/thread_info.h b/include/asm-sh64/thread_info.h new file mode 100644 index 000000000..f9d2a22db --- /dev/null +++ b/include/asm-sh64/thread_info.h @@ -0,0 +1,82 @@ +#ifndef __ASM_SH64_THREAD_INFO_H +#define __ASM_SH64_THREAD_INFO_H + +/* + * SuperH 5 version + * Copyright (C) 2003 Paul Mundt + */ + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - this struct shares the supervisor stack pages + * - if the contents of this structure are changed, the assembly constants must also be changed + */ +struct thread_info { + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + unsigned long flags; /* low level flags */ + /* Put the 4 32-bit fields together to make asm offsetting easier. */ + __s32 preempt_count; /* 0 => preemptable, <0 => BUG */ + __u16 cpu; + + mm_segment_t addr_limit; + struct restart_block restart_block; + + __u8 supervisor_stack[0]; +}; + +/* + * macros/functions for gaining access to the thread information structure + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .cpu = 0, \ + .preempt_count = 1, \ + .addr_limit = KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* how to get the thread information struct from C */ +static inline struct thread_info *current_thread_info(void) +{ + struct thread_info *ti; + + __asm__ __volatile__ ("getcon " __KCR0 ", %0\n\t" : "=r" (ti)); + + return ti; +} + +/* thread information allocation */ +#define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,2)) +#define free_thread_info(ti) free_pages((unsigned long) (ti), 1) +#define get_thread_info(ti) get_task_struct((ti)->task) +#define put_thread_info(ti) put_task_struct((ti)->task) + +#endif /* __ASSEMBLY__ */ + +#define PREEMPT_ACTIVE 0x4000000 + +/* thread information flags */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ + +#define THREAD_SIZE 16384 + +#endif /* __KERNEL__ */ + +#endif /* __ASM_SH64_THREAD_INFO_H */ diff --git a/include/asm-sh64/tlb.h b/include/asm-sh64/tlb.h new file mode 100644 index 000000000..4979408bd --- /dev/null +++ b/include/asm-sh64/tlb.h @@ -0,0 +1,92 @@ +/* + * include/asm-sh64/tlb.h + * + * Copyright (C) 2003 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + */ +#ifndef __ASM_SH64_TLB_H +#define __ASM_SH64_TLB_H + +/* + * Note! These are mostly unused, we just need the xTLB_LAST_VAR_UNRESTRICTED + * for head.S! Once this limitation is gone, we can clean the rest of this up. + */ + +/* ITLB defines */ +#define ITLB_FIXED 0x00000000 /* First fixed ITLB, see head.S */ +#define ITLB_LAST_VAR_UNRESTRICTED 0x000003F0 /* Last ITLB */ + +/* DTLB defines */ +#define DTLB_FIXED 0x00800000 /* First fixed DTLB, see head.S */ +#define DTLB_LAST_VAR_UNRESTRICTED 0x008003F0 /* Last DTLB */ + +#ifndef __ASSEMBLY__ + +/** + * for_each_dtlb_entry + * + * @tlb: TLB entry + * + * Iterate over free (non-wired) DTLB entries + */ +#define for_each_dtlb_entry(tlb) \ + for (tlb = cpu_data->dtlb.first; \ + tlb <= cpu_data->dtlb.last; \ + tlb += cpu_data->dtlb.step) + +/** + * for_each_itlb_entry + * + * @tlb: TLB entry + * + * Iterate over free (non-wired) ITLB entries + */ +#define for_each_itlb_entry(tlb) \ + for (tlb = cpu_data->itlb.first; \ + tlb <= cpu_data->itlb.last; \ + tlb += cpu_data->itlb.step) + +/** + * __flush_tlb_slot + * + * @slot: Address of TLB slot. + * + * Flushes TLB slot @slot. + */ +static inline void __flush_tlb_slot(unsigned long long slot) +{ + __asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot)); +} + +/* arch/sh64/mm/tlb.c */ +extern int sh64_tlb_init(void); +extern unsigned long long sh64_next_free_dtlb_entry(void); +extern unsigned long long sh64_get_wired_dtlb_entry(void); +extern int sh64_put_wired_dtlb_entry(unsigned long long entry); + +extern void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, unsigned long asid, unsigned long paddr); +extern void sh64_teardown_tlb_slot(unsigned long long config_addr); + +#define tlb_start_vma(tlb, vma) \ + flush_cache_range(vma, vma->vm_start, vma->vm_end) + +#define tlb_end_vma(tlb, vma) \ + flush_tlb_range(vma, vma->vm_start, vma->vm_end) + +#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) + +/* + * Flush whole TLBs for MM + */ +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_SH64_TLB_H */ + diff --git a/include/asm-sh64/tlbflush.h b/include/asm-sh64/tlbflush.h new file mode 100644 index 000000000..15c0719ee --- /dev/null +++ b/include/asm-sh64/tlbflush.h @@ -0,0 +1,31 @@ +#ifndef __ASM_SH64_TLBFLUSH_H +#define __ASM_SH64_TLBFLUSH_H + +#include + +/* + * TLB flushing: + * + * - flush_tlb() flushes the current mm struct TLBs + * - flush_tlb_all() flushes all processes TLBs + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - flush_tlb_range(mm, start, end) flushes a range of pages + * + */ + +extern void flush_tlb(void); +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page); +extern inline void flush_tlb_pgtables(struct mm_struct *mm, + unsigned long start, unsigned long end) +{ +} + +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +#endif /* __ASM_SH64_TLBFLUSH_H */ + diff --git a/include/asm-sh64/topology.h b/include/asm-sh64/topology.h new file mode 100644 index 000000000..342117873 --- /dev/null +++ b/include/asm-sh64/topology.h @@ -0,0 +1,6 @@ +#ifndef __ASM_SH64_TOPOLOGY_H +#define __ASM_SH64_TOPOLOGY_H + +#include + +#endif /* __ASM_SH64_TOPOLOGY_H */ diff --git a/include/asm-sh64/types.h b/include/asm-sh64/types.h new file mode 100644 index 000000000..41d4d2f82 --- /dev/null +++ b/include/asm-sh64/types.h @@ -0,0 +1,76 @@ +#ifndef __ASM_SH64_TYPES_H +#define __ASM_SH64_TYPES_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/types.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +#ifndef __ASSEMBLY__ + +typedef unsigned short umode_t; + +/* + * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the + * header files exported to user space + */ + +typedef __signed__ char __s8; +typedef unsigned char __u8; + +typedef __signed__ short __s16; +typedef unsigned short __u16; + +typedef __signed__ int __s32; +typedef unsigned int __u32; + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +typedef __signed__ long long __s64; +typedef unsigned long long __u64; +#endif + +#endif /* __ASSEMBLY__ */ + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +typedef __signed__ char s8; +typedef unsigned char u8; + +typedef __signed__ short s16; +typedef unsigned short u16; + +typedef __signed__ int s32; +typedef unsigned int u32; + +typedef __signed__ long long s64; +typedef unsigned long long u64; + +/* DMA addresses come in generic and 64-bit flavours. */ + +#ifdef CONFIG_HIGHMEM64G +typedef u64 dma_addr_t; +#else +typedef u32 dma_addr_t; +#endif +typedef u64 dma64_addr_t; + +typedef unsigned int kmem_bufctl_t; + +#endif /* __ASSEMBLY__ */ + +#define BITS_PER_LONG 32 + +#endif /* __KERNEL__ */ + +#endif /* __ASM_SH64_TYPES_H */ diff --git a/include/asm-sh64/uaccess.h b/include/asm-sh64/uaccess.h new file mode 100644 index 000000000..588065c63 --- /dev/null +++ b/include/asm-sh64/uaccess.h @@ -0,0 +1,320 @@ +#ifndef __ASM_SH64_UACCESS_H +#define __ASM_SH64_UACCESS_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/uaccess.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * Copyright (C) 2003, 2004 Paul Mundt + * + * User space memory access functions + * + * Copyright (C) 1999 Niibe Yutaka + * + * Based on: + * MIPS implementation version 1.15 by + * Copyright (C) 1996, 1997, 1998 by Ralf Baechle + * and i386 version. + * + */ + +#include +#include + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * For historical reasons (Data Segment Register?), these macros are misnamed. + */ + +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) + +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) +#define USER_DS MAKE_MM_SEG(0x80000000) + +#define get_ds() (KERNEL_DS) +#define get_fs() (current_thread_info()->addr_limit) +#define set_fs(x) (current_thread_info()->addr_limit=(x)) + +#define segment_eq(a,b) ((a).seg == (b).seg) + +#define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) + +/* + * Uhhuh, this needs 33-bit arithmetic. We have a carry.. + * + * sum := addr + size; carry? --> flag = true; + * if (sum >= addr_limit) flag = true; + */ +#define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1) + +#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) +#define __access_ok(addr,size) (__range_ok(addr,size) == 0) + +extern inline int verify_area(int type, const void __user * addr, unsigned long size) +{ + return access_ok(type,addr,size) ? 0 : -EFAULT; +} + +/* + * Uh, these should become the main single-value transfer routines ... + * They automatically use the right size if we just have the right + * pointer type ... + * + * As MIPS uses the same address space for kernel and user data, we + * can just do these as direct assignments. + * + * Careful to not + * (a) re-use the arguments for side effects (sizeof is ok) + * (b) require any knowledge of processes at this stage + */ +#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) +#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) + +/* + * The "__xxx" versions do not do address space checking, useful when + * doing multiple accesses to the same area (the user has to do the + * checks by hand with "access_ok()") + */ +#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) + +/* + * The "xxx_ret" versions return constant specified in third argument, if + * something bad happens. These macros can be optimized for the + * case of just returning from the function xxx_ret is used. + */ + +#define put_user_ret(x,ptr,ret) ({ \ +if (put_user(x,ptr)) return ret; }) + +#define get_user_ret(x,ptr,ret) ({ \ +if (get_user(x,ptr)) return ret; }) + +#define __put_user_ret(x,ptr,ret) ({ \ +if (__put_user(x,ptr)) return ret; }) + +#define __get_user_ret(x,ptr,ret) ({ \ +if (__get_user(x,ptr)) return ret; }) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct *)(x)) + +#define __get_user_size(x,ptr,size,retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: \ + retval = __get_user_asm_b(x, ptr); \ + break; \ + case 2: \ + retval = __get_user_asm_w(x, ptr); \ + break; \ + case 4: \ + retval = __get_user_asm_l(x, ptr); \ + break; \ + case 8: \ + retval = __get_user_asm_q(x, ptr); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ +} while (0) + +#define __get_user_nocheck(x,ptr,size) \ +({ \ + long __gu_addr = (long)(ptr); \ + long __gu_err; \ + __typeof(*(ptr)) __gu_val; \ + __asm__ ("":"=r" (__gu_val)); \ + __asm__ ("":"=r" (__gu_err)); \ + __get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x,ptr,size) \ +({ \ + long __gu_addr = (long)(ptr); \ + long __gu_err = -EFAULT; \ + __typeof(*(ptr)) __gu_val; \ + __asm__ ("":"=r" (__gu_val)); \ + __asm__ ("":"=r" (__gu_err)); \ + if (__access_ok(__gu_addr, (size))) \ + __get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +extern long __get_user_asm_b(void *, long); +extern long __get_user_asm_w(void *, long); +extern long __get_user_asm_l(void *, long); +extern long __get_user_asm_q(void *, long); +extern void __get_user_unknown(void); + +#define __put_user_size(x,ptr,size,retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: \ + retval = __put_user_asm_b(x, ptr); \ + break; \ + case 2: \ + retval = __put_user_asm_w(x, ptr); \ + break; \ + case 4: \ + retval = __put_user_asm_l(x, ptr); \ + break; \ + case 8: \ + retval = __put_user_asm_q(x, ptr); \ + break; \ + default: \ + __put_user_unknown(); \ + } \ +} while (0) + +#define __put_user_nocheck(x,ptr,size) \ +({ \ + long __pu_err; \ + __typeof__(*(ptr)) __pu_val = (x); \ + __put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_check(x,ptr,size) \ +({ \ + long __pu_err = -EFAULT; \ + long __pu_addr = (long)(ptr); \ + __typeof__(*(ptr)) __pu_val = (x); \ + \ + if (__access_ok(__pu_addr, (size))) \ + __put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\ + __pu_err; \ +}) + +extern long __put_user_asm_b(void *, long); +extern long __put_user_asm_w(void *, long); +extern long __put_user_asm_l(void *, long); +extern long __put_user_asm_q(void *, long); +extern void __put_user_unknown(void); + + +/* Generic arbitrary sized copy. */ +/* Return the number of bytes NOT copied */ +/* XXX: should be such that: 4byte and the rest. */ +extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n); + +#define copy_to_user(to,from,n) ({ \ +void *__copy_to = (void *) (to); \ +__kernel_size_t __copy_size = (__kernel_size_t) (n); \ +__kernel_size_t __copy_res; \ +if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ +__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ +} else __copy_res = __copy_size; \ +__copy_res; }) + +#define copy_to_user_ret(to,from,n,retval) ({ \ +if (copy_to_user(to,from,n)) \ + return retval; \ +}) + +#define __copy_to_user(to,from,n) \ + __copy_user((void *)(to), \ + (void *)(from), n) + +#define __copy_to_user_ret(to,from,n,retval) ({ \ +if (__copy_to_user(to,from,n)) \ + return retval; \ +}) + +#define copy_from_user(to,from,n) ({ \ +void *__copy_to = (void *) (to); \ +void *__copy_from = (void *) (from); \ +__kernel_size_t __copy_size = (__kernel_size_t) (n); \ +__kernel_size_t __copy_res; \ +if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ +__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ +} else __copy_res = __copy_size; \ +__copy_res; }) + +#define copy_from_user_ret(to,from,n,retval) ({ \ +if (copy_from_user(to,from,n)) \ + return retval; \ +}) + +#define __copy_from_user(to,from,n) \ + __copy_user((void *)(to), \ + (void *)(from), n) + +#define __copy_from_user_ret(to,from,n,retval) ({ \ +if (__copy_from_user(to,from,n)) \ + return retval; \ +}) + +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + +/* XXX: Not sure it works well.. + should be such that: 4byte clear and the rest. */ +extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size); + +#define clear_user(addr,n) ({ \ +void * __cl_addr = (addr); \ +unsigned long __cl_size = (n); \ +if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \ +__cl_size = __clear_user(__cl_addr, __cl_size); \ +__cl_size; }) + +extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count); + +#define strncpy_from_user(dest,src,count) ({ \ +unsigned long __sfu_src = (unsigned long) (src); \ +int __sfu_count = (int) (count); \ +long __sfu_res = -EFAULT; \ +if(__access_ok(__sfu_src, __sfu_count)) { \ +__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ +} __sfu_res; }) + +#define strlen_user(str) strnlen_user(str, ~0UL >> 1) + +/* + * Return the size of a string (including the ending 0!) + */ +extern long __strnlen_user(const char *__s, long __n); + +extern __inline__ long strnlen_user(const char *s, long n) +{ + if (!__addr_ok(s)) + return 0; + else + return __strnlen_user(s, n); +} + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + +#define ARCH_HAS_SEARCH_EXTABLE + +/* If gcc inlines memset, it will use st.q instructions. Therefore, we need + kmalloc allocations to be 8-byte aligned. Without this, the alignment + becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on + sh64 at the moment). */ +#define ARCH_KMALLOC_MINALIGN 8 + +/* Returns 0 if exception not found and fixup.unit otherwise. */ +extern unsigned long search_exception_table(unsigned long addr); +extern const struct exception_table_entry *search_exception_tables (unsigned long addr); + +#endif /* __ASM_SH64_UACCESS_H */ diff --git a/include/asm-sh64/ucontext.h b/include/asm-sh64/ucontext.h new file mode 100644 index 000000000..cf77a0855 --- /dev/null +++ b/include/asm-sh64/ucontext.h @@ -0,0 +1,23 @@ +#ifndef __ASM_SH64_UCONTEXT_H +#define __ASM_SH64_UCONTEXT_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/ucontext.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* __ASM_SH64_UCONTEXT_H */ diff --git a/include/asm-sh64/unaligned.h b/include/asm-sh64/unaligned.h new file mode 100644 index 000000000..ad2248708 --- /dev/null +++ b/include/asm-sh64/unaligned.h @@ -0,0 +1,28 @@ +#ifndef __ASM_SH64_UNALIGNED_H +#define __ASM_SH64_UNALIGNED_H + +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * include/asm-sh64/unaligned.h + * + * Copyright (C) 2000, 2001 Paolo Alberelli + * + */ + +#include + + +/* Use memmove here, so gcc does not insert a __builtin_memcpy. */ + +#define get_unaligned(ptr) \ + ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) + +#define put_unaligned(val, ptr) \ + ({ __typeof__(*(ptr)) __tmp = (val); \ + memmove((ptr), &__tmp, sizeof(*(ptr))); \ + (void)0; }) + +#endif /* __ASM_SH64_UNALIGNED_H */ diff --git a/include/asm-sparc64/cmt.h b/include/asm-sparc64/cmt.h new file mode 100644 index 000000000..870db5928 --- /dev/null +++ b/include/asm-sparc64/cmt.h @@ -0,0 +1,59 @@ +#ifndef _SPARC64_CMT_H +#define _SPARC64_CMT_H + +/* cmt.h: Chip Multi-Threading register definitions + * + * Copyright (C) 2004 David S. Miller (davem@redhat.com) + */ + +/* ASI_CORE_ID - private */ +#define LP_ID 0x0000000000000010UL +#define LP_ID_MAX 0x00000000003f0000UL +#define LP_ID_ID 0x000000000000003fUL + +/* ASI_INTR_ID - private */ +#define LP_INTR_ID 0x0000000000000000UL +#define LP_INTR_ID_ID 0x00000000000003ffUL + +/* ASI_CESR_ID - private */ +#define CESR_ID 0x0000000000000040UL +#define CESR_ID_ID 0x00000000000000ffUL + +/* ASI_CORE_AVAILABLE - shared */ +#define LP_AVAIL 0x0000000000000000UL +#define LP_AVAIL_1 0x0000000000000002UL +#define LP_AVAIL_0 0x0000000000000001UL + +/* ASI_CORE_ENABLE_STATUS - shared */ +#define LP_ENAB_STAT 0x0000000000000010UL +#define LP_ENAB_STAT_1 0x0000000000000002UL +#define LP_ENAB_STAT_0 0x0000000000000001UL + +/* ASI_CORE_ENABLE - shared */ +#define LP_ENAB 0x0000000000000020UL +#define LP_ENAB_1 0x0000000000000002UL +#define LP_ENAB_0 0x0000000000000001UL + +/* ASI_CORE_RUNNING - shared */ +#define LP_RUNNING_RW 0x0000000000000050UL +#define LP_RUNNING_W1S 0x0000000000000060UL +#define LP_RUNNING_W1C 0x0000000000000068UL +#define LP_RUNNING_1 0x0000000000000002UL +#define LP_RUNNING_0 0x0000000000000001UL + +/* ASI_CORE_RUNNING_STAT - shared */ +#define LP_RUN_STAT 0x0000000000000058UL +#define LP_RUN_STAT_1 0x0000000000000002UL +#define LP_RUN_STAT_0 0x0000000000000001UL + +/* ASI_XIR_STEERING - shared */ +#define LP_XIR_STEER 0x0000000000000030UL +#define LP_XIR_STEER_1 0x0000000000000002UL +#define LP_XIR_STEER_0 0x0000000000000001UL + +/* ASI_CMT_ERROR_STEERING - shared */ +#define CMT_ER_STEER 0x0000000000000040UL +#define CMT_ER_STEER_1 0x0000000000000002UL +#define CMT_ER_STEER_0 0x0000000000000001UL + +#endif /* _SPARC64_CMT_H */ diff --git a/include/asm-um/setup.h b/include/asm-um/setup.h new file mode 100644 index 000000000..e5787bb80 --- /dev/null +++ b/include/asm-um/setup.h @@ -0,0 +1,6 @@ +#ifndef SETUP_H_INCLUDED +#define SETUP_H_INCLUDED + +#define COMMAND_LINE_SIZE 512 + +#endif /* SETUP_H_INCLUDED */ diff --git a/include/asm-v850/setup.h b/include/asm-v850/setup.h new file mode 100644 index 000000000..c48a9b97d --- /dev/null +++ b/include/asm-v850/setup.h @@ -0,0 +1,6 @@ +#ifndef _V850_SETUP_H +#define _V850_SETUP_H + +#define COMMAND_LINE_SIZE 512 + +#endif /* __SETUP_H */ diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h new file mode 100644 index 000000000..f52696a1f --- /dev/null +++ b/include/linux/crc-ccitt.h @@ -0,0 +1,15 @@ +#ifndef _LINUX_CRC_CCITT_H +#define _LINUX_CRC_CCITT_H + +#include + +extern u16 const crc_ccitt_table[256]; + +extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len); + +static inline u16 crc_ccitt_byte(u16 crc, const u8 c) +{ + return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; +} + +#endif /* _LINUX_CRC_CCITT_H */ diff --git a/include/linux/ds1286.h b/include/linux/ds1286.h new file mode 100644 index 000000000..d8989860e --- /dev/null +++ b/include/linux/ds1286.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 1998, 1999, 2003 Ralf Baechle + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef __LINUX_DS1286_H +#define __LINUX_DS1286_H + +#include + +/********************************************************************** + * register summary + **********************************************************************/ +#define RTC_HUNDREDTH_SECOND 0 +#define RTC_SECONDS 1 +#define RTC_MINUTES 2 +#define RTC_MINUTES_ALARM 3 +#define RTC_HOURS 4 +#define RTC_HOURS_ALARM 5 +#define RTC_DAY 6 +#define RTC_DAY_ALARM 7 +#define RTC_DATE 8 +#define RTC_MONTH 9 +#define RTC_YEAR 10 +#define RTC_CMD 11 +#define RTC_WHSEC 12 +#define RTC_WSEC 13 +#define RTC_UNUSED 14 + +/* RTC_*_alarm is always true if 2 MSBs are set */ +# define RTC_ALARM_DONT_CARE 0xC0 + + +/* + * Bits in the month register + */ +#define RTC_EOSC 0x80 +#define RTC_ESQW 0x40 + +/* + * Bits in the Command register + */ +#define RTC_TDF 0x01 +#define RTC_WAF 0x02 +#define RTC_TDM 0x04 +#define RTC_WAM 0x08 +#define RTC_PU_LVL 0x10 +#define RTC_IBH_LO 0x20 +#define RTC_IPSW 0x40 +#define RTC_TE 0x80 + +#endif /* __LINUX_DS1286_H */ diff --git a/include/linux/hpet.h b/include/linux/hpet.h new file mode 100644 index 000000000..9ab04e988 --- /dev/null +++ b/include/linux/hpet.h @@ -0,0 +1,133 @@ +#ifndef __HPET__ +#define __HPET__ 1 + +/* + * Offsets into HPET Registers + */ + +struct hpet { + u64 hpet_cap; /* capabilities */ + u64 res0; /* reserved */ + u64 hpet_config; /* configuration */ + u64 res1; /* reserved */ + u64 hpet_isr; /* interrupt status reg */ + u64 res2[25]; /* reserved */ + union { /* main counter */ + u64 _hpet_mc64; + u32 _hpet_mc32; + unsigned long _hpet_mc; + } _u0; + u64 res3; /* reserved */ + struct hpet_timer { + u64 hpet_config; /* configuration/cap */ + union { /* timer compare register */ + u64 _hpet_hc64; + u32 _hpet_hc32; + unsigned long _hpet_compare; + } _u1; + u64 hpet_fsb[2]; /* FSB route */ + } hpet_timers[1]; +}; + +#define hpet_mc _u0._hpet_mc +#define hpet_compare _u1._hpet_compare + +#define HPET_MAX_TIMERS (32) + +/* + * HPET general capabilities register + */ + +#define HPET_COUNTER_CLK_PERIOD_MASK (0xffffffff00000000ULL) +#define HPET_COUNTER_CLK_PERIOD_SHIFT (32UL) +#define HPET_VENDOR_ID_MASK (0x00000000ffff0000ULL) +#define HPET_VENDOR_ID_SHIFT (16ULL) +#define HPET_LEG_RT_CAP_MASK (0x8000) +#define HPET_COUNTER_SIZE_MASK (0x2000) +#define HPET_NUM_TIM_CAP_MASK (0x1f00) +#define HPET_NUM_TIM_CAP_SHIFT (8ULL) + +/* + * HPET general configuration register + */ + +#define HPET_LEG_RT_CNF_MASK (2UL) +#define HPET_ENABLE_CNF_MASK (1UL) + +/* + * HPET interrupt status register + */ + +#define HPET_ISR_CLEAR(HPET, TIMER) \ + (HPET)->hpet_isr |= (1UL << TIMER) + +/* + * Timer configuration register + */ + +#define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL) +#define Tn_INI_ROUTE_CAP_SHIFT (32UL) +#define Tn_FSB_INT_DELCAP_MASK (0x8000UL) +#define Tn_FSB_INT_DELCAP_SHIFT (15) +#define Tn_FSB_EN_CNF_MASK (0x4000UL) +#define Tn_FSB_EN_CNF_SHIFT (14) +#define Tn_INT_ROUTE_CNF_MASK (0x3e00UL) +#define Tn_INT_ROUTE_CNF_SHIFT (9) +#define Tn_32MODE_CNF_MASK (0x0100UL) +#define Tn_VAL_SET_CNF_MASK (0x0040UL) +#define Tn_SIZE_CAP_MASK (0x0020UL) +#define Tn_PER_INT_CAP_MASK (0x0010UL) +#define Tn_TYPE_CNF_MASK (0x0008UL) +#define Tn_INT_ENB_CNF_MASK (0x0004UL) +#define Tn_INT_TYPE_CNF_MASK (0x0002UL) + +/* + * Timer FSB Interrupt Route Register + */ + +#define Tn_FSB_INT_ADDR_MASK (0xffffffff00000000ULL) +#define Tn_FSB_INT_ADDR_SHIFT (32UL) +#define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL) + +struct hpet_info { + unsigned long hi_ireqfreq; /* Hz */ + unsigned long hi_flags; /* information */ + unsigned short hi_hpet; + unsigned short hi_timer; +}; + +#define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */ + +#define HPET_IE_ON _IO('h', 0x01) /* interrupt on */ +#define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */ +#define HPET_INFO _IOR('h', 0x03, struct hpet_info) +#define HPET_EPI _IO('h', 0x04) /* enable periodic */ +#define HPET_DPI _IO('h', 0x05) /* disable periodic */ +#define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */ + +/* + * exported interfaces + */ + +struct hpet_task { + void (*ht_func) (void *); + void *ht_data; + void *ht_opaque; +}; + +struct hpet_data { + unsigned long hd_address; + unsigned short hd_nirqs; + unsigned short hd_flags; + unsigned int hd_state; /* timer allocated */ + unsigned int hd_irq[HPET_MAX_TIMERS]; +}; + +#define HPET_DATA_PLATFORM 0x0001 /* platform call to hpet_alloc */ + +int hpet_alloc(struct hpet_data *); +int hpet_register(struct hpet_task *, int); +int hpet_unregister(struct hpet_task *); +int hpet_control(struct hpet_task *, unsigned int, unsigned long); + +#endif /* !__HPET__ */ diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h new file mode 100644 index 000000000..d522d43d4 --- /dev/null +++ b/include/linux/mtd/physmap.h @@ -0,0 +1,61 @@ +/* + * For boards with physically mapped flash and using + * drivers/mtd/maps/physmap.c mapping driver. + * + * $Id: physmap.h,v 1.2 2004/07/14 17:48:46 dwmw2 Exp $ + * + * Copyright (C) 2003 MontaVista Software Inc. + * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __LINUX_MTD_PHYSMAP__ + +#include + +#if defined(CONFIG_MTD_PHYSMAP) + +#include +#include +#include + +/* + * The map_info for physmap. Board can override size, buswidth, phys, + * (*set_vpp)(), etc in their initial setup routine. + */ +extern struct map_info physmap_map; + +/* + * Board needs to specify the exact mapping during their setup time. + */ +static inline void physmap_configure(unsigned long addr, unsigned long size, int buswidth, void (*set_vpp)(struct map_info *, int) ) +{ + physmap_map.phys = addr; + physmap_map.size = size; + physmap_map.buswidth = buswidth; + physmap_map.set_vpp = set_vpp; +} + +#if defined(CONFIG_MTD_PARTITIONS) + +/* + * Machines that wish to do flash partition may want to call this function in + * their setup routine. + * + * physmap_set_partitions(mypartitions, num_parts); + * + * Note that one can always override this hard-coded partition with + * command line partition (you need to enable CONFIG_MTD_CMDLINE_PARTS). + */ +void physmap_set_partitions(struct mtd_partition *parts, int num_parts); + +#endif /* defined(CONFIG_MTD_PARTITIONS) */ +#endif /* defined(CONFIG_MTD) */ + +#endif /* __LINUX_MTD_PHYSMAP__ */ + diff --git a/include/linux/netfilter_ipv4/ipt_addrtype.h b/include/linux/netfilter_ipv4/ipt_addrtype.h new file mode 100644 index 000000000..166ed01a8 --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_addrtype.h @@ -0,0 +1,11 @@ +#ifndef _IPT_ADDRTYPE_H +#define _IPT_ADDRTYPE_H + +struct ipt_addrtype_info { + u_int16_t source; /* source-type mask */ + u_int16_t dest; /* dest-type mask */ + u_int32_t invert_source; + u_int32_t invert_dest; +}; + +#endif diff --git a/include/linux/netfilter_ipv4/ipt_realm.h b/include/linux/netfilter_ipv4/ipt_realm.h new file mode 100644 index 000000000..a4d669872 --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_realm.h @@ -0,0 +1,10 @@ +#ifndef _IPT_REALM_H +#define _IPT_REALM_H + +struct ipt_realm_info { + u_int32_t id; + u_int32_t mask; + u_int8_t invert; +}; + +#endif /* _IPT_REALM_H */ diff --git a/include/mtd/inftl-user.h b/include/mtd/inftl-user.h new file mode 100644 index 000000000..bda4f2c8f --- /dev/null +++ b/include/mtd/inftl-user.h @@ -0,0 +1,91 @@ +/* + * $Id: inftl-user.h,v 1.1 2004/05/05 15:17:00 dwmw2 Exp $ + * + * Parts of INFTL headers shared with userspace + * + */ + +#ifndef __MTD_INFTL_USER_H__ +#define __MTD_INFTL_USER_H__ + +#define OSAK_VERSION 0x5120 +#define PERCENTUSED 98 + +#define SECTORSIZE 512 + +/* Block Control Information */ + +struct inftl_bci { + uint8_t ECCsig[6]; + uint8_t Status; + uint8_t Status1; +} __attribute__((packed)); + +struct inftl_unithead1 { + uint16_t virtualUnitNo; + uint16_t prevUnitNo; + uint8_t ANAC; + uint8_t NACs; + uint8_t parityPerField; + uint8_t discarded; +} __attribute__((packed)); + +struct inftl_unithead2 { + uint8_t parityPerField; + uint8_t ANAC; + uint16_t prevUnitNo; + uint16_t virtualUnitNo; + uint8_t NACs; + uint8_t discarded; +} __attribute__((packed)); + +struct inftl_unittail { + uint8_t Reserved[4]; + uint16_t EraseMark; + uint16_t EraseMark1; +} __attribute__((packed)); + +union inftl_uci { + struct inftl_unithead1 a; + struct inftl_unithead2 b; + struct inftl_unittail c; +}; + +struct inftl_oob { + struct inftl_bci b; + union inftl_uci u; +}; + + +/* INFTL Media Header */ + +struct INFTLPartition { + __u32 virtualUnits; + __u32 firstUnit; + __u32 lastUnit; + __u32 flags; + __u32 spareUnits; + __u32 Reserved0; + __u32 Reserved1; +} __attribute__((packed)); + +struct INFTLMediaHeader { + char bootRecordID[8]; + __u32 NoOfBootImageBlocks; + __u32 NoOfBinaryPartitions; + __u32 NoOfBDTLPartitions; + __u32 BlockMultiplierBits; + __u32 FormatFlags; + __u32 OsakVersion; + __u32 PercentUsed; + struct INFTLPartition Partitions[4]; +} __attribute__((packed)); + +/* Partition flag types */ +#define INFTL_BINARY 0x20000000 +#define INFTL_BDTL 0x40000000 +#define INFTL_LAST 0x80000000 + +#endif /* __MTD_INFTL_USER_H__ */ + + diff --git a/include/mtd/jffs2-user.h b/include/mtd/jffs2-user.h new file mode 100644 index 000000000..d508ef0ae --- /dev/null +++ b/include/mtd/jffs2-user.h @@ -0,0 +1,35 @@ +/* + * $Id: jffs2-user.h,v 1.1 2004/05/05 11:57:54 dwmw2 Exp $ + * + * JFFS2 definitions for use in user space only + */ + +#ifndef __JFFS2_USER_H__ +#define __JFFS2_USER_H__ + +/* This file is blessed for inclusion by userspace */ +#include +#include +#include + +#undef cpu_to_je16 +#undef cpu_to_je32 +#undef cpu_to_jemode +#undef je16_to_cpu +#undef je32_to_cpu +#undef jemode_to_cpu + +extern int target_endian; + +#define t16(x) ({ uint16_t __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_16(__b); }) +#define t32(x) ({ uint32_t __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_32(__b); }) + +#define cpu_to_je16(x) ((jint16_t){t16(x)}) +#define cpu_to_je32(x) ((jint32_t){t32(x)}) +#define cpu_to_jemode(x) ((jmode_t){t32(x)}) + +#define je16_to_cpu(x) (t16((x).v16)) +#define je32_to_cpu(x) (t32((x).v32)) +#define jemode_to_cpu(x) (t32((x).m)) + +#endif /* __JFFS2_USER_H__ */ diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h new file mode 100644 index 000000000..db5fca2fa --- /dev/null +++ b/include/mtd/mtd-abi.h @@ -0,0 +1,97 @@ +/* + * $Id: mtd-abi.h,v 1.5 2004/06/22 09:29:35 gleixner Exp $ + * + * Portions of MTD ABI definition which are shared by kernel and user space + */ + +#ifndef __MTD_ABI_H__ +#define __MTD_ABI_H__ + +struct erase_info_user { + uint32_t start; + uint32_t length; +}; + +struct mtd_oob_buf { + uint32_t start; + uint32_t length; + unsigned char *ptr; +}; + +#define MTD_ABSENT 0 +#define MTD_RAM 1 +#define MTD_ROM 2 +#define MTD_NORFLASH 3 +#define MTD_NANDFLASH 4 +#define MTD_PEROM 5 +#define MTD_OTHER 14 +#define MTD_UNKNOWN 15 + +#define MTD_CLEAR_BITS 1 // Bits can be cleared (flash) +#define MTD_SET_BITS 2 // Bits can be set +#define MTD_ERASEABLE 4 // Has an erase function +#define MTD_WRITEB_WRITEABLE 8 // Direct IO is possible +#define MTD_VOLATILE 16 // Set for RAMs +#define MTD_XIP 32 // eXecute-In-Place possible +#define MTD_OOB 64 // Out-of-band data (NAND flash) +#define MTD_ECC 128 // Device capable of automatic ECC + +// Some common devices / combinations of capabilities +#define MTD_CAP_ROM 0 +#define MTD_CAP_RAM (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEB_WRITEABLE) +#define MTD_CAP_NORFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE) +#define MTD_CAP_NANDFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE|MTD_OOB) +#define MTD_WRITEABLE (MTD_CLEAR_BITS|MTD_SET_BITS) + + +// Types of automatic ECC/Checksum available +#define MTD_ECC_NONE 0 // No automatic ECC available +#define MTD_ECC_RS_DiskOnChip 1 // Automatic ECC on DiskOnChip +#define MTD_ECC_SW 2 // SW ECC for Toshiba & Samsung devices + +/* ECC byte placement */ +#define MTD_NANDECC_OFF 0 // Switch off ECC (Not recommended) +#define MTD_NANDECC_PLACE 1 // Use the given placement in the structure (YAFFS1 legacy mode) +#define MTD_NANDECC_AUTOPLACE 2 // Use the default placement scheme +#define MTD_NANDECC_PLACEONLY 3 // Use the given placement in the structure (Do not store ecc result on read) + +struct mtd_info_user { + uint8_t type; + uint32_t flags; + uint32_t size; // Total size of the MTD + uint32_t erasesize; + uint32_t oobblock; // Size of OOB blocks (e.g. 512) + uint32_t oobsize; // Amount of OOB data per block (e.g. 16) + uint32_t ecctype; + uint32_t eccsize; +}; + +struct region_info_user { + uint32_t offset; /* At which this region starts, + * from the beginning of the MTD */ + uint32_t erasesize; /* For this region */ + uint32_t numblocks; /* Number of blocks in this region */ + uint32_t regionindex; +}; + +#define MEMGETINFO _IOR('M', 1, struct mtd_info_user) +#define MEMERASE _IOW('M', 2, struct erase_info_user) +#define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf) +#define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf) +#define MEMLOCK _IOW('M', 5, struct erase_info_user) +#define MEMUNLOCK _IOW('M', 6, struct erase_info_user) +#define MEMGETREGIONCOUNT _IOR('M', 7, int) +#define MEMGETREGIONINFO _IOWR('M', 8, struct region_info_user) +#define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo) +#define MEMGETOOBSEL _IOR('M', 10, struct nand_oobinfo) +#define MEMGETBADBLOCK _IOW('M', 11, loff_t) +#define MEMSETBADBLOCK _IOW('M', 12, loff_t) + +struct nand_oobinfo { + uint32_t useecc; + uint32_t eccbytes; + uint32_t oobfree[8][2]; + uint32_t eccpos[32]; +}; + +#endif /* __MTD_ABI_H__ */ diff --git a/include/mtd/nftl-user.h b/include/mtd/nftl-user.h new file mode 100644 index 000000000..924ec0459 --- /dev/null +++ b/include/mtd/nftl-user.h @@ -0,0 +1,76 @@ +/* + * $Id: nftl-user.h,v 1.1 2004/05/05 14:44:57 dwmw2 Exp $ + * + * Parts of NFTL headers shared with userspace + * + */ + +#ifndef __MTD_NFTL_USER_H__ +#define __MTD_NFTL_USER_H__ + +/* Block Control Information */ + +struct nftl_bci { + unsigned char ECCSig[6]; + uint8_t Status; + uint8_t Status1; +}__attribute__((packed)); + +/* Unit Control Information */ + +struct nftl_uci0 { + uint16_t VirtUnitNum; + uint16_t ReplUnitNum; + uint16_t SpareVirtUnitNum; + uint16_t SpareReplUnitNum; +} __attribute__((packed)); + +struct nftl_uci1 { + uint32_t WearInfo; + uint16_t EraseMark; + uint16_t EraseMark1; +} __attribute__((packed)); + +struct nftl_uci2 { + uint16_t FoldMark; + uint16_t FoldMark1; + uint32_t unused; +} __attribute__((packed)); + +union nftl_uci { + struct nftl_uci0 a; + struct nftl_uci1 b; + struct nftl_uci2 c; +}; + +struct nftl_oob { + struct nftl_bci b; + union nftl_uci u; +}; + +/* NFTL Media Header */ + +struct NFTLMediaHeader { + char DataOrgID[6]; + uint16_t NumEraseUnits; + uint16_t FirstPhysicalEUN; + uint32_t FormattedSize; + unsigned char UnitSizeFactor; +} __attribute__((packed)); + +#define MAX_ERASE_ZONES (8192 - 512) + +#define ERASE_MARK 0x3c69 +#define SECTOR_FREE 0xff +#define SECTOR_USED 0x55 +#define SECTOR_IGNORE 0x11 +#define SECTOR_DELETED 0x00 + +#define FOLD_MARK_IN_PROGRESS 0x5555 + +#define ZONE_GOOD 0xff +#define ZONE_BAD_ORIGINAL 0 +#define ZONE_BAD_MARKED 7 + + +#endif /* __MTD_NFTL_USER_H__ */ diff --git a/net/bluetooth/hidp/Makefile b/net/bluetooth/hidp/Makefile new file mode 100644 index 000000000..a9ee11569 --- /dev/null +++ b/net/bluetooth/hidp/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the Linux Bluetooth HIDP layer +# + +obj-$(CONFIG_BT_HIDP) += hidp.o + +hidp-objs := core.o sock.o diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c new file mode 100644 index 000000000..7e8e7ba7b --- /dev/null +++ b/net/bluetooth/hidp/core.c @@ -0,0 +1,649 @@ +/* + HIDP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2003-2004 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "hidp.h" + +#ifndef CONFIG_BT_HIDP_DEBUG +#undef BT_DBG +#define BT_DBG(D...) +#endif + +#define VERSION "1.0" + +static DECLARE_RWSEM(hidp_session_sem); +static LIST_HEAD(hidp_session_list); + +static unsigned char hidp_keycode[256] = { + 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, + 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, + 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, + 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, + 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, + 72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190, + 191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113, + 115,114, 0, 0, 0,121, 0, 89, 93,124, 92, 94, 95, 0, 0, 0, + 122,123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, + 150,158,159,128,136,177,178,176,142,152,173,140 +}; + +static struct hidp_session *__hidp_get_session(bdaddr_t *bdaddr) +{ + struct hidp_session *session; + struct list_head *p; + + BT_DBG(""); + + list_for_each(p, &hidp_session_list) { + session = list_entry(p, struct hidp_session, list); + if (!bacmp(bdaddr, &session->bdaddr)) + return session; + } + return NULL; +} + +static void __hidp_link_session(struct hidp_session *session) +{ + __module_get(THIS_MODULE); + list_add(&session->list, &hidp_session_list); +} + +static void __hidp_unlink_session(struct hidp_session *session) +{ + list_del(&session->list); + module_put(THIS_MODULE); +} + +static void __hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) +{ + bacpy(&ci->bdaddr, &session->bdaddr); + + ci->flags = session->flags; + ci->state = session->state; + + ci->vendor = 0x0000; + ci->product = 0x0000; + ci->version = 0x0000; + memset(ci->name, 0, 128); + + if (session->input) { + ci->vendor = session->input->id.vendor; + ci->product = session->input->id.product; + ci->version = session->input->id.version; + if (session->input->name) + strncpy(ci->name, session->input->name, 128); + else + strncpy(ci->name, "HID Boot Device", 128); + } +} + +static int hidp_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) +{ + struct hidp_session *session = dev->private; + struct sk_buff *skb; + unsigned char newleds; + + BT_DBG("session %p hid %p data %p size %d", session, device, data, size); + + if (type != EV_LED) + return -1; + + newleds = (!!test_bit(LED_KANA, dev->led) << 3) | + (!!test_bit(LED_COMPOSE, dev->led) << 3) | + (!!test_bit(LED_SCROLLL, dev->led) << 2) | + (!!test_bit(LED_CAPSL, dev->led) << 1) | + (!!test_bit(LED_NUML, dev->led)); + + if (session->leds == newleds) + return 0; + + session->leds = newleds; + + if (!(skb = alloc_skb(3, GFP_ATOMIC))) { + BT_ERR("Can't allocate memory for new frame"); + return -ENOMEM; + } + + *skb_put(skb, 1) = 0xa2; + *skb_put(skb, 1) = 0x01; + *skb_put(skb, 1) = newleds; + + skb_queue_tail(&session->intr_transmit, skb); + + hidp_schedule(session); + + return 0; +} + +static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) +{ + struct input_dev *dev = session->input; + unsigned char *keys = session->keys; + unsigned char *udata = skb->data + 1; + signed char *sdata = skb->data + 1; + int i, size = skb->len - 1; + + switch (skb->data[0]) { + case 0x01: /* Keyboard report */ + for (i = 0; i < 8; i++) + input_report_key(dev, hidp_keycode[i + 224], (udata[0] >> i) & 1); + + for (i = 2; i < 8; i++) { + if (keys[i] > 3 && memscan(udata + 2, keys[i], 6) == udata + 8) { + if (hidp_keycode[keys[i]]) + input_report_key(dev, hidp_keycode[keys[i]], 0); + else + BT_ERR("Unknown key (scancode %#x) released.", keys[i]); + } + + if (udata[i] > 3 && memscan(keys + 2, udata[i], 6) == keys + 8) { + if (hidp_keycode[udata[i]]) + input_report_key(dev, hidp_keycode[udata[i]], 1); + else + BT_ERR("Unknown key (scancode %#x) pressed.", udata[i]); + } + } + + memcpy(keys, udata, 8); + break; + + case 0x02: /* Mouse report */ + input_report_key(dev, BTN_LEFT, sdata[0] & 0x01); + input_report_key(dev, BTN_RIGHT, sdata[0] & 0x02); + input_report_key(dev, BTN_MIDDLE, sdata[0] & 0x04); + input_report_key(dev, BTN_SIDE, sdata[0] & 0x08); + input_report_key(dev, BTN_EXTRA, sdata[0] & 0x10); + + input_report_rel(dev, REL_X, sdata[1]); + input_report_rel(dev, REL_Y, sdata[2]); + + if (size > 3) + input_report_rel(dev, REL_WHEEL, sdata[3]); + break; + } + + input_sync(dev); +} + +static void hidp_idle_timeout(unsigned long arg) +{ + struct hidp_session *session = (struct hidp_session *) arg; + + atomic_inc(&session->terminate); + hidp_schedule(session); +} + +static inline void hidp_set_timer(struct hidp_session *session) +{ + if (session->idle_to > 0) + mod_timer(&session->timer, jiffies + HZ * session->idle_to); +} + +static inline void hidp_del_timer(struct hidp_session *session) +{ + if (session->idle_to > 0) + del_timer(&session->timer); +} + +static inline void hidp_send_message(struct hidp_session *session, unsigned char hdr) +{ + struct sk_buff *skb; + + BT_DBG("session %p", session); + + if (!(skb = alloc_skb(1, GFP_ATOMIC))) { + BT_ERR("Can't allocate memory for message"); + return; + } + + *skb_put(skb, 1) = hdr; + + skb_queue_tail(&session->ctrl_transmit, skb); + + hidp_schedule(session); +} + +static inline int hidp_recv_frame(struct hidp_session *session, struct sk_buff *skb) +{ + __u8 hdr; + + BT_DBG("session %p skb %p len %d", session, skb, skb->len); + + hdr = skb->data[0]; + skb_pull(skb, 1); + + if (hdr == 0xa1) { + hidp_set_timer(session); + + if (session->input) + hidp_input_report(session, skb); + } else { + BT_DBG("Unsupported protocol header 0x%02x", hdr); + } + + kfree_skb(skb); + return 0; +} + +static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) +{ + struct iovec iv = { data, len }; + struct msghdr msg; + + BT_DBG("sock %p data %p len %d", sock, data, len); + + if (!len) + return 0; + + memset(&msg, 0, sizeof(msg)); + msg.msg_iovlen = 1; + msg.msg_iov = &iv; + + return sock_sendmsg(sock, &msg, len); +} + +static int hidp_process_transmit(struct hidp_session *session) +{ + struct sk_buff *skb; + + BT_DBG("session %p", session); + + while ((skb = skb_dequeue(&session->ctrl_transmit))) { + if (hidp_send_frame(session->ctrl_sock, skb->data, skb->len) < 0) { + skb_queue_head(&session->ctrl_transmit, skb); + break; + } + + hidp_set_timer(session); + kfree_skb(skb); + } + + while ((skb = skb_dequeue(&session->intr_transmit))) { + if (hidp_send_frame(session->intr_sock, skb->data, skb->len) < 0) { + skb_queue_head(&session->intr_transmit, skb); + break; + } + + hidp_set_timer(session); + kfree_skb(skb); + } + + return skb_queue_len(&session->ctrl_transmit) + + skb_queue_len(&session->intr_transmit); +} + +static int hidp_session(void *arg) +{ + struct hidp_session *session = arg; + struct sock *ctrl_sk = session->ctrl_sock->sk; + struct sock *intr_sk = session->intr_sock->sk; + struct sk_buff *skb; + int vendor = 0x0000, product = 0x0000; + wait_queue_t ctrl_wait, intr_wait; + unsigned long timeo = HZ; + + BT_DBG("session %p", session); + + if (session->input) { + vendor = session->input->id.vendor; + product = session->input->id.product; + } + + daemonize("khidpd_%04x%04x", vendor, product); + set_user_nice(current, -15); + current->flags |= PF_NOFREEZE; + + set_fs(KERNEL_DS); + + init_waitqueue_entry(&ctrl_wait, current); + init_waitqueue_entry(&intr_wait, current); + add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); + add_wait_queue(intr_sk->sk_sleep, &intr_wait); + while (!atomic_read(&session->terminate)) { + set_current_state(TASK_INTERRUPTIBLE); + + if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED) + break; + + while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { + skb_orphan(skb); + hidp_recv_frame(session, skb); + } + + while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { + skb_orphan(skb); + hidp_recv_frame(session, skb); + } + + hidp_process_transmit(session); + + schedule(); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(intr_sk->sk_sleep, &intr_wait); + remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); + + down_write(&hidp_session_sem); + + hidp_del_timer(session); + + if (intr_sk->sk_state != BT_CONNECTED) { + init_waitqueue_entry(&ctrl_wait, current); + add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); + while (timeo && ctrl_sk->sk_state != BT_CLOSED) { + set_current_state(TASK_INTERRUPTIBLE); + timeo = schedule_timeout(timeo); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); + timeo = HZ; + } + + fput(session->ctrl_sock->file); + + init_waitqueue_entry(&intr_wait, current); + add_wait_queue(intr_sk->sk_sleep, &intr_wait); + while (timeo && intr_sk->sk_state != BT_CLOSED) { + set_current_state(TASK_INTERRUPTIBLE); + timeo = schedule_timeout(timeo); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(intr_sk->sk_sleep, &intr_wait); + + fput(session->intr_sock->file); + + __hidp_unlink_session(session); + + if (session->input) { + input_unregister_device(session->input); + kfree(session->input); + } + + up_write(&hidp_session_sem); + + kfree(session); + return 0; +} + +static inline void hidp_setup_input(struct hidp_session *session, struct hidp_connadd_req *req) +{ + struct input_dev *input = session->input; + int i; + + input->private = session; + + input->id.bustype = BUS_BLUETOOTH; + input->id.vendor = req->vendor; + input->id.product = req->product; + input->id.version = req->version; + + if (req->subclass & 0x40) { + set_bit(EV_KEY, input->evbit); + set_bit(EV_LED, input->evbit); + set_bit(EV_REP, input->evbit); + + set_bit(LED_NUML, input->ledbit); + set_bit(LED_CAPSL, input->ledbit); + set_bit(LED_SCROLLL, input->ledbit); + set_bit(LED_COMPOSE, input->ledbit); + set_bit(LED_KANA, input->ledbit); + + for (i = 0; i < sizeof(hidp_keycode); i++) + set_bit(hidp_keycode[i], input->keybit); + clear_bit(0, input->keybit); + } + + if (req->subclass & 0x80) { + input->evbit[0] = BIT(EV_KEY) | BIT(EV_REL); + input->keybit[LONG(BTN_MOUSE)] = BIT(BTN_LEFT) | BIT(BTN_RIGHT) | BIT(BTN_MIDDLE); + input->relbit[0] = BIT(REL_X) | BIT(REL_Y); + input->keybit[LONG(BTN_MOUSE)] |= BIT(BTN_SIDE) | BIT(BTN_EXTRA); + input->relbit[0] |= BIT(REL_WHEEL); + } + + input->event = hidp_input_event; + + input_register_device(input); +} + +int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) +{ + struct hidp_session *session, *s; + int err; + + BT_DBG(""); + + if (bacmp(&bt_sk(ctrl_sock->sk)->src, &bt_sk(intr_sock->sk)->src) || + bacmp(&bt_sk(ctrl_sock->sk)->dst, &bt_sk(intr_sock->sk)->dst)) + return -ENOTUNIQ; + + session = kmalloc(sizeof(struct hidp_session), GFP_KERNEL); + if (!session) + return -ENOMEM; + memset(session, 0, sizeof(struct hidp_session)); + + session->input = kmalloc(sizeof(struct input_dev), GFP_KERNEL); + if (!session->input) { + kfree(session); + return -ENOMEM; + } + memset(session->input, 0, sizeof(struct input_dev)); + + down_write(&hidp_session_sem); + + s = __hidp_get_session(&bt_sk(ctrl_sock->sk)->dst); + if (s && s->state == BT_CONNECTED) { + err = -EEXIST; + goto failed; + } + + bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); + + session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu); + session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu); + + BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu); + + session->ctrl_sock = ctrl_sock; + session->intr_sock = intr_sock; + session->state = BT_CONNECTED; + + init_timer(&session->timer); + + session->timer.function = hidp_idle_timeout; + session->timer.data = (unsigned long) session; + + skb_queue_head_init(&session->ctrl_transmit); + skb_queue_head_init(&session->intr_transmit); + + session->flags = req->flags & (1 << HIDP_BLUETOOTH_VENDOR_ID); + session->idle_to = req->idle_to; + + if (session->input) + hidp_setup_input(session, req); + + __hidp_link_session(session); + + hidp_set_timer(session); + + err = kernel_thread(hidp_session, session, CLONE_KERNEL); + if (err < 0) + goto unlink; + + if (session->input) { + hidp_send_message(session, 0x70); + session->flags |= (1 << HIDP_BOOT_PROTOCOL_MODE); + + session->leds = 0xff; + hidp_input_event(session->input, EV_LED, 0, 0); + } + + up_write(&hidp_session_sem); + return 0; + +unlink: + hidp_del_timer(session); + + __hidp_unlink_session(session); + + if (session->input) + input_unregister_device(session->input); + +failed: + up_write(&hidp_session_sem); + + if (session->input) + kfree(session->input); + + kfree(session); + return err; +} + +int hidp_del_connection(struct hidp_conndel_req *req) +{ + struct hidp_session *session; + int err = 0; + + BT_DBG(""); + + down_read(&hidp_session_sem); + + session = __hidp_get_session(&req->bdaddr); + if (session) { + if (req->flags & (1 << HIDP_VIRTUAL_CABLE_UNPLUG)) { + hidp_send_message(session, 0x15); + } else { + /* Flush the transmit queues */ + skb_queue_purge(&session->ctrl_transmit); + skb_queue_purge(&session->intr_transmit); + + /* Kill session thread */ + atomic_inc(&session->terminate); + hidp_schedule(session); + } + } else + err = -ENOENT; + + up_read(&hidp_session_sem); + return err; +} + +int hidp_get_connlist(struct hidp_connlist_req *req) +{ + struct list_head *p; + int err = 0, n = 0; + + BT_DBG(""); + + down_read(&hidp_session_sem); + + list_for_each(p, &hidp_session_list) { + struct hidp_session *session; + struct hidp_conninfo ci; + + session = list_entry(p, struct hidp_session, list); + + __hidp_copy_session(session, &ci); + + if (copy_to_user(req->ci, &ci, sizeof(ci))) { + err = -EFAULT; + break; + } + + if (++n >= req->cnum) + break; + + req->ci++; + } + req->cnum = n; + + up_read(&hidp_session_sem); + return err; +} + +int hidp_get_conninfo(struct hidp_conninfo *ci) +{ + struct hidp_session *session; + int err = 0; + + down_read(&hidp_session_sem); + + session = __hidp_get_session(&ci->bdaddr); + if (session) + __hidp_copy_session(session, ci); + else + err = -ENOENT; + + up_read(&hidp_session_sem); + return err; +} + +static int __init hidp_init(void) +{ + l2cap_load(); + + BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); + + return hidp_init_sockets(); +} + +static void __exit hidp_exit(void) +{ + hidp_cleanup_sockets(); +} + +module_init(hidp_init); +module_exit(hidp_exit); + +MODULE_AUTHOR("Marcel Holtmann "); +MODULE_DESCRIPTION("Bluetooth HIDP ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-6"); diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h new file mode 100644 index 000000000..98a742a64 --- /dev/null +++ b/net/bluetooth/hidp/hidp.h @@ -0,0 +1,122 @@ +/* + HIDP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2003-2004 Marcel Holtmann + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __HIDP_H +#define __HIDP_H + +#include +#include + +/* HIDP ioctl defines */ +#define HIDPCONNADD _IOW('H', 200, int) +#define HIDPCONNDEL _IOW('H', 201, int) +#define HIDPGETCONNLIST _IOR('H', 210, int) +#define HIDPGETCONNINFO _IOR('H', 211, int) + +#define HIDP_VIRTUAL_CABLE_UNPLUG 0 +#define HIDP_BOOT_PROTOCOL_MODE 1 +#define HIDP_BLUETOOTH_VENDOR_ID 9 + +struct hidp_connadd_req { + int ctrl_sock; // Connected control socket + int intr_sock; // Connteted interrupt socket + __u16 parser; + __u16 rd_size; + __u8 *rd_data; + __u8 country; + __u8 subclass; + __u16 vendor; + __u16 product; + __u16 version; + __u32 flags; + __u32 idle_to; + char name[128]; +}; + +struct hidp_conndel_req { + bdaddr_t bdaddr; + __u32 flags; +}; + +struct hidp_conninfo { + bdaddr_t bdaddr; + __u32 flags; + __u16 state; + __u16 vendor; + __u16 product; + __u16 version; + char name[128]; +}; + +struct hidp_connlist_req { + __u32 cnum; + struct hidp_conninfo __user *ci; +}; + +int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock); +int hidp_del_connection(struct hidp_conndel_req *req); +int hidp_get_connlist(struct hidp_connlist_req *req); +int hidp_get_conninfo(struct hidp_conninfo *ci); + +/* HIDP session defines */ +struct hidp_session { + struct list_head list; + + struct socket *ctrl_sock; + struct socket *intr_sock; + + bdaddr_t bdaddr; + + unsigned long state; + unsigned long flags; + unsigned long idle_to; + + uint ctrl_mtu; + uint intr_mtu; + + atomic_t terminate; + + unsigned char keys[8]; + unsigned char leds; + + struct input_dev *input; + + struct timer_list timer; + + struct sk_buff_head ctrl_transmit; + struct sk_buff_head intr_transmit; +}; + +static inline void hidp_schedule(struct hidp_session *session) +{ + struct sock *ctrl_sk = session->ctrl_sock->sk; + struct sock *intr_sk = session->intr_sock->sk; + + wake_up_interruptible(ctrl_sk->sk_sleep); + wake_up_interruptible(intr_sk->sk_sleep); +} + +/* HIDP init defines */ +extern int __init hidp_init_sockets(void); +extern void __exit hidp_cleanup_sockets(void); + +#endif /* __HIDP_H */ diff --git a/net/core/stream.c b/net/core/stream.c new file mode 100644 index 000000000..24a6f72a0 --- /dev/null +++ b/net/core/stream.c @@ -0,0 +1,41 @@ +/* + * SUCS NET3: + * + * Generic stream handling routines. These are generic for most + * protocols. Even IP. Tonight 8-). + * This is used because TCP, LLC (others too) layer all have mostly + * identical sendmsg() and recvmsg() code. + * So we (will) share it here. + * + * Authors: Arnaldo Carvalho de Melo + * (from old tcp.c code) + * Alan Cox (Borrowed comments 8-)) + */ + +#include +#include +#include +#include +#include + +/** + * sk_stream_write_space - stream socket write_space callback. + * sk - socket + * + * FIXME: write proper description + */ +void sk_stream_write_space(struct sock *sk) +{ + struct socket *sock = sk->sk_socket; + + if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { + clear_bit(SOCK_NOSPACE, &sock->flags); + + if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) + wake_up_interruptible(sk->sk_sleep); + if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) + sock_wake_async(sock, 2, POLL_OUT); + } +} + +EXPORT_SYMBOL(sk_stream_write_space); diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c new file mode 100644 index 000000000..f65ec865a --- /dev/null +++ b/net/ipv4/xfrm4_output.c @@ -0,0 +1,120 @@ +/* + * xfrm4_output.c - Common IPsec encapsulation code for IPv4. + * Copyright (c) 2004 Herbert Xu + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include + +/* Add encapsulation header. + * + * In transport mode, the IP header will be moved forward to make space + * for the encapsulation header. + * + * In tunnel mode, the top IP header will be constructed per RFC 2401. + * The following fields in it shall be filled in by x->type->output: + * tot_len + * check + * + * On exit, skb->h will be set to the start of the payload to be processed + * by x->type->output and skb->nh will be set to the top IP header. + */ +static void xfrm4_encap(struct sk_buff *skb) +{ + struct dst_entry *dst = skb->dst; + struct xfrm_state *x = dst->xfrm; + struct iphdr *iph, *top_iph; + + iph = skb->nh.iph; + skb->h.ipiph = iph; + + skb->nh.raw = skb_push(skb, x->props.header_len); + top_iph = skb->nh.iph; + + if (!x->props.mode) { + skb->h.raw += iph->ihl*4; + memmove(top_iph, iph, iph->ihl*4); + return; + } + + top_iph->ihl = 5; + top_iph->version = 4; + + /* DS disclosed */ + top_iph->tos = INET_ECN_encapsulate(iph->tos, iph->tos); + if (x->props.flags & XFRM_STATE_NOECN) + IP_ECN_clear(top_iph); + + top_iph->frag_off = iph->frag_off & htons(IP_DF); + if (!top_iph->frag_off) + __ip_select_ident(top_iph, dst, 0); + + /* TTL disclosed */ + top_iph->ttl = iph->ttl; + + top_iph->saddr = x->props.saddr.a4; + top_iph->daddr = x->id.daddr.a4; + top_iph->protocol = IPPROTO_IPIP; + + memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); +} + +int xfrm4_output(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct dst_entry *dst = skb->dst; + struct xfrm_state *x = dst->xfrm; + int err; + + if (skb->ip_summed == CHECKSUM_HW) { + err = skb_checksum_help(pskb, 0); + skb = *pskb; + if (err) + goto error_nolock; + } + + spin_lock_bh(&x->lock); + err = xfrm_state_check(x, skb); + if (err) + goto error; + + if (x->props.mode) { + err = xfrm4_tunnel_check_size(skb); + if (err) + goto error; + } + + xfrm4_encap(skb); + + err = x->type->output(pskb); + skb = *pskb; + if (err) + goto error; + + x->curlft.bytes += skb->len; + x->curlft.packets++; + + spin_unlock_bh(&x->lock); + + if (!(skb->dst = dst_pop(dst))) { + err = -EHOSTUNREACH; + goto error_nolock; + } + err = NET_XMIT_BYPASS; + +out_exit: + return err; +error: + spin_unlock_bh(&x->lock); +error_nolock: + kfree_skb(skb); + goto out_exit; +} diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c new file mode 100644 index 000000000..f5a06546d --- /dev/null +++ b/net/ipv6/xfrm6_tunnel.c @@ -0,0 +1,634 @@ +/* + * Copyright (C)2003,2004 USAGI/WIDE Project + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Authors Mitsuru KANDA + * YOSHIFUJI Hideaki + * + * Based on net/ipv4/xfrm4_tunnel.c + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG +# define X6TDEBUG 3 +#else +# define X6TDEBUG 1 +#endif + +#define X6TPRINTK(fmt, args...) printk(fmt, ## args) +#define X6TNOPRINTK(fmt, args...) do { ; } while(0) + +#if X6TDEBUG >= 1 +# define X6TPRINTK1 X6TPRINTK +#else +# define X6TPRINTK1 X6TNOPRINTK +#endif + +#if X6TDEBUG >= 3 +# define X6TPRINTK3 X6TPRINTK +#else +# define X6TPRINTK3 X6TNOPRINTK +#endif + +/* + * xfrm_tunnel_spi things are for allocating unique id ("spi") + * per xfrm_address_t. + */ +struct xfrm6_tunnel_spi { + struct hlist_node list_byaddr; + struct hlist_node list_byspi; + xfrm_address_t addr; + u32 spi; + atomic_t refcnt; +#ifdef XFRM6_TUNNEL_SPI_MAGIC + u32 magic; +#endif +}; + +#ifdef CONFIG_IPV6_XFRM6_TUNNEL_DEBUG +# define XFRM6_TUNNEL_SPI_MAGIC 0xdeadbeef +#endif + +static rwlock_t xfrm6_tunnel_spi_lock = RW_LOCK_UNLOCKED; + +static u32 xfrm6_tunnel_spi; + +#define XFRM6_TUNNEL_SPI_MIN 1 +#define XFRM6_TUNNEL_SPI_MAX 0xffffffff + +static kmem_cache_t *xfrm6_tunnel_spi_kmem; + +#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 +#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 + +static struct hlist_head xfrm6_tunnel_spi_byaddr[XFRM6_TUNNEL_SPI_BYADDR_HSIZE]; +static struct hlist_head xfrm6_tunnel_spi_byspi[XFRM6_TUNNEL_SPI_BYSPI_HSIZE]; + +#ifdef XFRM6_TUNNEL_SPI_MAGIC +static int x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi, + const char *name) +{ + if (unlikely(x6spi->magic != XFRM6_TUNNEL_SPI_MAGIC)) { + X6TPRINTK3(KERN_DEBUG "%s(): x6spi object " + "at %p has corrupted magic %08x " + "(should be %08x)\n", + name, x6spi, x6spi->magic, XFRM6_TUNNEL_SPI_MAGIC); + return -1; + } + return 0; +} +#else +static int inline x6spi_check_magic(const struct xfrm6_tunnel_spi *x6spi, + const char *name) +{ + return 0; +} +#endif + +#define X6SPI_CHECK_MAGIC(x6spi) x6spi_check_magic((x6spi), __FUNCTION__) + + +static unsigned inline xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) +{ + unsigned h; + + X6TPRINTK3(KERN_DEBUG "%s(addr=%p)\n", __FUNCTION__, addr); + + h = addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]; + h ^= h >> 16; + h ^= h >> 8; + h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1; + + X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, h); + + return h; +} + +static unsigned inline xfrm6_tunnel_spi_hash_byspi(u32 spi) +{ + return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; +} + + +static int xfrm6_tunnel_spi_init(void) +{ + int i; + + X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); + + xfrm6_tunnel_spi = 0; + xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi", + sizeof(struct xfrm6_tunnel_spi), + 0, SLAB_HWCACHE_ALIGN, + NULL, NULL); + if (!xfrm6_tunnel_spi_kmem) { + X6TPRINTK1(KERN_ERR + "%s(): failed to allocate xfrm6_tunnel_spi_kmem\n", + __FUNCTION__); + return -ENOMEM; + } + + for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) + INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]); + for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) + INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]); + return 0; +} + +static void xfrm6_tunnel_spi_fini(void) +{ + int i; + + X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); + + for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) { + if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i])) + goto err; + } + for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) { + if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i])) + goto err; + } + kmem_cache_destroy(xfrm6_tunnel_spi_kmem); + xfrm6_tunnel_spi_kmem = NULL; + return; +err: + X6TPRINTK1(KERN_ERR "%s(): table is not empty\n", __FUNCTION__); + return; +} + +static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) +{ + struct xfrm6_tunnel_spi *x6spi; + struct hlist_node *pos; + + X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); + + hlist_for_each_entry(x6spi, pos, + &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], + list_byaddr) { + if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { + X6SPI_CHECK_MAGIC(x6spi); + X6TPRINTK3(KERN_DEBUG "%s() = %p(%u)\n", __FUNCTION__, x6spi, x6spi->spi); + return x6spi; + } + } + + X6TPRINTK3(KERN_DEBUG "%s() = NULL(0)\n", __FUNCTION__); + return NULL; +} + +u32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr) +{ + struct xfrm6_tunnel_spi *x6spi; + u32 spi; + + X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); + + read_lock_bh(&xfrm6_tunnel_spi_lock); + x6spi = __xfrm6_tunnel_spi_lookup(saddr); + spi = x6spi ? x6spi->spi : 0; + read_unlock_bh(&xfrm6_tunnel_spi_lock); + return spi; +} + +EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup); + +static u32 __xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) +{ + u32 spi; + struct xfrm6_tunnel_spi *x6spi; + struct hlist_node *pos; + unsigned index; + + X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); + + if (xfrm6_tunnel_spi < XFRM6_TUNNEL_SPI_MIN || + xfrm6_tunnel_spi >= XFRM6_TUNNEL_SPI_MAX) + xfrm6_tunnel_spi = XFRM6_TUNNEL_SPI_MIN; + else + xfrm6_tunnel_spi++; + + for (spi = xfrm6_tunnel_spi; spi <= XFRM6_TUNNEL_SPI_MAX; spi++) { + index = xfrm6_tunnel_spi_hash_byspi(spi); + hlist_for_each_entry(x6spi, pos, + &xfrm6_tunnel_spi_byspi[index], + list_byspi) { + if (x6spi->spi == spi) + goto try_next_1; + } + xfrm6_tunnel_spi = spi; + goto alloc_spi; +try_next_1:; + } + for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tunnel_spi; spi++) { + index = xfrm6_tunnel_spi_hash_byspi(spi); + hlist_for_each_entry(x6spi, pos, + &xfrm6_tunnel_spi_byspi[index], + list_byspi) { + if (x6spi->spi == spi) + goto try_next_2; + } + xfrm6_tunnel_spi = spi; + goto alloc_spi; +try_next_2:; + } + spi = 0; + goto out; +alloc_spi: + X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for " + "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", + __FUNCTION__, + NIP6(*(struct in6_addr *)saddr)); + x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC); + if (!x6spi) { + X6TPRINTK1(KERN_ERR "%s(): kmem_cache_alloc() failed\n", + __FUNCTION__); + goto out; + } +#ifdef XFRM6_TUNNEL_SPI_MAGIC + x6spi->magic = XFRM6_TUNNEL_SPI_MAGIC; +#endif + memcpy(&x6spi->addr, saddr, sizeof(x6spi->addr)); + x6spi->spi = spi; + atomic_set(&x6spi->refcnt, 1); + + hlist_add_head(&x6spi->list_byspi, &xfrm6_tunnel_spi_byspi[index]); + + index = xfrm6_tunnel_spi_hash_byaddr(saddr); + hlist_add_head(&x6spi->list_byaddr, &xfrm6_tunnel_spi_byaddr[index]); + X6SPI_CHECK_MAGIC(x6spi); +out: + X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi); + return spi; +} + +u32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr) +{ + struct xfrm6_tunnel_spi *x6spi; + u32 spi; + + X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); + + write_lock_bh(&xfrm6_tunnel_spi_lock); + x6spi = __xfrm6_tunnel_spi_lookup(saddr); + if (x6spi) { + atomic_inc(&x6spi->refcnt); + spi = x6spi->spi; + } else + spi = __xfrm6_tunnel_alloc_spi(saddr); + write_unlock_bh(&xfrm6_tunnel_spi_lock); + + X6TPRINTK3(KERN_DEBUG "%s() = %u\n", __FUNCTION__, spi); + + return spi; +} + +EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi); + +void xfrm6_tunnel_free_spi(xfrm_address_t *saddr) +{ + struct xfrm6_tunnel_spi *x6spi; + struct hlist_node *pos, *n; + + X6TPRINTK3(KERN_DEBUG "%s(saddr=%p)\n", __FUNCTION__, saddr); + + write_lock_bh(&xfrm6_tunnel_spi_lock); + + hlist_for_each_entry_safe(x6spi, pos, n, + &xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], + list_byaddr) + { + if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) { + X6TPRINTK3(KERN_DEBUG "%s(): x6spi object " + "for %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " + "found at %p\n", + __FUNCTION__, + NIP6(*(struct in6_addr *)saddr), + x6spi); + X6SPI_CHECK_MAGIC(x6spi); + if (atomic_dec_and_test(&x6spi->refcnt)) { + hlist_del(&x6spi->list_byaddr); + hlist_del(&x6spi->list_byspi); + kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi); + break; + } + } + } + write_unlock_bh(&xfrm6_tunnel_spi_lock); +} + +EXPORT_SYMBOL(xfrm6_tunnel_free_spi); + +int xfrm6_tunnel_check_size(struct sk_buff *skb) +{ + int mtu, ret = 0; + struct dst_entry *dst = skb->dst; + + mtu = dst_pmtu(dst) - sizeof(struct ipv6hdr); + if (mtu < IPV6_MIN_MTU) + mtu = IPV6_MIN_MTU; + + if (skb->len > mtu) { + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); + ret = -EMSGSIZE; + } + + return ret; +} + +EXPORT_SYMBOL(xfrm6_tunnel_check_size); + +static int xfrm6_tunnel_output(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct dst_entry *dst = skb->dst; + struct xfrm_state *x = dst->xfrm; + struct ipv6hdr *iph, *top_iph; + int err; + + if ((err = xfrm6_tunnel_check_size(skb)) != 0) + goto error_nolock; + + iph = skb->nh.ipv6h; + + top_iph = (struct ipv6hdr *)skb_push(skb, x->props.header_len); + top_iph->version = 6; + top_iph->priority = iph->priority; + top_iph->flow_lbl[0] = iph->flow_lbl[0]; + top_iph->flow_lbl[1] = iph->flow_lbl[1]; + top_iph->flow_lbl[2] = iph->flow_lbl[2]; + top_iph->nexthdr = IPPROTO_IPV6; + top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + top_iph->hop_limit = iph->hop_limit; + memcpy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr, sizeof(struct in6_addr)); + memcpy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr, sizeof(struct in6_addr)); + skb->nh.raw = skb->data; + skb->h.raw = skb->nh.raw + sizeof(struct ipv6hdr); + + x->curlft.bytes += skb->len; + x->curlft.packets++; + + spin_unlock_bh(&x->lock); + + if ((skb->dst = dst_pop(dst)) == NULL) { + kfree_skb(skb); + err = -EHOSTUNREACH; + goto error_nolock; + } + + return NET_XMIT_BYPASS; + +error_nolock: + kfree_skb(skb); + return err; +} + +static int xfrm6_tunnel_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struct sk_buff *skb) +{ + if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) + return -EINVAL; + + skb->mac.raw = skb->nh.raw; + skb->nh.raw = skb->data; + dst_release(skb->dst); + skb->dst = NULL; + skb->protocol = htons(ETH_P_IPV6); + skb->pkt_type = PACKET_HOST; + netif_rx(skb); + + return 0; +} + +static struct xfrm6_tunnel *xfrm6_tunnel_handler; +static DECLARE_MUTEX(xfrm6_tunnel_sem); + +int xfrm6_tunnel_register(struct xfrm6_tunnel *handler) +{ + int ret; + + down(&xfrm6_tunnel_sem); + ret = 0; + if (xfrm6_tunnel_handler != NULL) + ret = -EINVAL; + if (!ret) + xfrm6_tunnel_handler = handler; + up(&xfrm6_tunnel_sem); + + return ret; +} + +EXPORT_SYMBOL(xfrm6_tunnel_register); + +int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler) +{ + int ret; + + down(&xfrm6_tunnel_sem); + ret = 0; + if (xfrm6_tunnel_handler != handler) + ret = -EINVAL; + if (!ret) + xfrm6_tunnel_handler = NULL; + up(&xfrm6_tunnel_sem); + + synchronize_net(); + + return ret; +} + +EXPORT_SYMBOL(xfrm6_tunnel_deregister); + +static int xfrm6_tunnel_rcv(struct sk_buff **pskb, unsigned int *nhoffp) +{ + struct sk_buff *skb = *pskb; + struct xfrm6_tunnel *handler = xfrm6_tunnel_handler; + struct xfrm_state *x = NULL; + struct ipv6hdr *iph = skb->nh.ipv6h; + int err = 0; + u32 spi; + + /* device-like_ip6ip6_handler() */ + if (handler) { + err = handler->handler(pskb, nhoffp); + if (!err) + goto out; + } + + spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr); + x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, + spi, + IPPROTO_IPV6, AF_INET6); + + if (!x) + goto drop; + + spin_lock(&x->lock); + + if (unlikely(x->km.state != XFRM_STATE_VALID)) + goto drop_unlock; + + err = xfrm6_tunnel_input(x, NULL, skb); + if (err) + goto drop_unlock; + + x->curlft.bytes += skb->len; + x->curlft.packets++; + spin_unlock(&x->lock); + xfrm_state_put(x); + +out: + return 0; + +drop_unlock: + spin_unlock(&x->lock); + xfrm_state_put(x); +drop: + kfree_skb(skb); + return -1; +} + +static void xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + int type, int code, int offset, __u32 info) +{ + struct xfrm6_tunnel *handler = xfrm6_tunnel_handler; + + /* call here first for device-like ip6ip6 err handling */ + if (handler) { + handler->err_handler(skb, opt, type, code, offset, info); + return; + } + + /* xfrm6_tunnel native err handling */ + switch (type) { + case ICMPV6_DEST_UNREACH: + switch (code) { + case ICMPV6_NOROUTE: + case ICMPV6_ADM_PROHIBITED: + case ICMPV6_NOT_NEIGHBOUR: + case ICMPV6_ADDR_UNREACH: + case ICMPV6_PORT_UNREACH: + default: + X6TPRINTK3(KERN_DEBUG + "xfrm6_tunnel: Destination Unreach.\n"); + break; + } + break; + case ICMPV6_PKT_TOOBIG: + X6TPRINTK3(KERN_DEBUG + "xfrm6_tunnel: Packet Too Big.\n"); + break; + case ICMPV6_TIME_EXCEED: + switch (code) { + case ICMPV6_EXC_HOPLIMIT: + X6TPRINTK3(KERN_DEBUG + "xfrm6_tunnel: Too small Hoplimit.\n"); + break; + case ICMPV6_EXC_FRAGTIME: + default: + break; + } + break; + case ICMPV6_PARAMPROB: + switch (code) { + case ICMPV6_HDR_FIELD: break; + case ICMPV6_UNK_NEXTHDR: break; + case ICMPV6_UNK_OPTION: break; + } + break; + default: + break; + } + return; +} + +static int xfrm6_tunnel_init_state(struct xfrm_state *x, void *args) +{ + if (!x->props.mode) + return -EINVAL; + + x->props.header_len = sizeof(struct ipv6hdr); + + return 0; +} + +static void xfrm6_tunnel_destroy(struct xfrm_state *x) +{ + xfrm6_tunnel_free_spi((xfrm_address_t *)&x->props.saddr); +} + +static struct xfrm_type xfrm6_tunnel_type = { + .description = "IP6IP6", + .owner = THIS_MODULE, + .proto = IPPROTO_IPV6, + .init_state = xfrm6_tunnel_init_state, + .destructor = xfrm6_tunnel_destroy, + .input = xfrm6_tunnel_input, + .output = xfrm6_tunnel_output, +}; + +static struct inet6_protocol xfrm6_tunnel_protocol = { + .handler = xfrm6_tunnel_rcv, + .err_handler = xfrm6_tunnel_err, + .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, +}; + +void __init xfrm6_tunnel_init(void) +{ + X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); + + if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0) { + X6TPRINTK1(KERN_ERR + "xfrm6_tunnel init: can't add xfrm type\n"); + return; + } + if (inet6_add_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6) < 0) { + X6TPRINTK1(KERN_ERR + "xfrm6_tunnel init(): can't add protocol\n"); + xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); + return; + } + if (xfrm6_tunnel_spi_init() < 0) { + X6TPRINTK1(KERN_ERR + "xfrm6_tunnel init: failed to initialize spi\n"); + inet6_del_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6); + xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6); + return; + } +} + +void __exit xfrm6_tunnel_fini(void) +{ + X6TPRINTK3(KERN_DEBUG "%s()\n", __FUNCTION__); + + xfrm6_tunnel_spi_fini(); + if (inet6_del_protocol(&xfrm6_tunnel_protocol, IPPROTO_IPV6) < 0) + X6TPRINTK1(KERN_ERR + "xfrm6_tunnel close: can't remove protocol\n"); + if (xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6) < 0) + X6TPRINTK1(KERN_ERR + "xfrm6_tunnel close: can't remove xfrm type\n"); +} diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c new file mode 100644 index 000000000..70a3e8ec8 --- /dev/null +++ b/net/sched/sch_netem.c @@ -0,0 +1,858 @@ +/* + * net/sched/sch_netem.c Network emulator + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Authors: Stephen Hemminger + * Catalin(ux aka Dino) BOIE + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* Network emulator + * + * This scheduler can alters spacing and order + * Similar to NISTnet and BSD Dummynet. + */ + +struct netem_sched_data { + struct Qdisc *qdisc; + struct sk_buff_head delayed; + struct timer_list timer; + + u32 latency; + u32 loss; + u32 limit; + u32 counter; + u32 gap; + u32 jitter; +}; + +/* Time stamp put into socket buffer control block */ +struct netem_skb_cb { + psched_time_t time_to_send; +}; + +/* This is the distribution table for the normal distribution produced + * with NISTnet tools. + * The entries represent a scaled inverse of the cumulative distribution + * function. + */ +#define TABLESIZE 2048 +#define TABLEFACTOR 8192 + +static const short disttable[TABLESIZE] = { + -31473, -26739, -25226, -24269, + -23560, -22993, -22518, -22109, + -21749, -21426, -21133, -20865, + -20618, -20389, -20174, -19972, + -19782, -19601, -19430, -19267, + -19112, -18962, -18819, -18681, + -18549, -18421, -18298, -18178, + -18062, -17950, -17841, -17735, + -17632, -17532, -17434, -17339, + -17245, -17155, -17066, -16979, + -16894, -16811, -16729, -16649, + -16571, -16494, -16419, -16345, + -16272, -16201, -16130, -16061, + -15993, -15926, -15861, -15796, + -15732, -15669, -15607, -15546, + -15486, -15426, -15368, -15310, + -15253, -15196, -15140, -15086, + -15031, -14977, -14925, -14872, + -14821, -14769, -14719, -14669, + -14619, -14570, -14522, -14473, + -14426, -14379, -14332, -14286, + -14241, -14196, -14150, -14106, + -14062, -14019, -13976, -13933, + -13890, -13848, -13807, -13765, + -13724, -13684, -13643, -13604, + -13564, -13525, -13486, -13447, + -13408, -13370, -13332, -13295, + -13258, -13221, -13184, -13147, + -13111, -13075, -13040, -13004, + -12969, -12934, -12899, -12865, + -12830, -12796, -12762, -12729, + -12695, -12662, -12629, -12596, + -12564, -12531, -12499, -12467, + -12435, -12404, -12372, -12341, + -12310, -12279, -12248, -12218, + -12187, -12157, -12127, -12097, + -12067, -12038, -12008, -11979, + -11950, -11921, -11892, -11863, + -11835, -11806, -11778, -11750, + -11722, -11694, -11666, -11639, + -11611, -11584, -11557, -11530, + -11503, -11476, -11450, -11423, + -11396, -11370, -11344, -11318, + -11292, -11266, -11240, -11214, + -11189, -11164, -11138, -11113, + -11088, -11063, -11038, -11013, + -10988, -10964, -10939, -10915, + -10891, -10866, -10843, -10818, + -10794, -10770, -10747, -10723, + -10700, -10676, -10652, -10630, + -10606, -10583, -10560, -10537, + -10514, -10491, -10469, -10446, + -10424, -10401, -10378, -10356, + -10334, -10312, -10290, -10267, + -10246, -10224, -10202, -10180, + -10158, -10137, -10115, -10094, + -10072, -10051, -10030, -10009, + -9988, -9967, -9945, -9925, + -9904, -9883, -9862, -9842, + -9821, -9800, -9780, -9760, + -9739, -9719, -9699, -9678, + -9658, -9638, -9618, -9599, + -9578, -9559, -9539, -9519, + -9499, -9480, -9461, -9441, + -9422, -9402, -9383, -9363, + -9344, -9325, -9306, -9287, + -9268, -9249, -9230, -9211, + -9192, -9173, -9155, -9136, + -9117, -9098, -9080, -9062, + -9043, -9025, -9006, -8988, + -8970, -8951, -8933, -8915, + -8897, -8879, -8861, -8843, + -8825, -8807, -8789, -8772, + -8754, -8736, -8718, -8701, + -8683, -8665, -8648, -8630, + -8613, -8595, -8578, -8561, + -8543, -8526, -8509, -8492, + -8475, -8458, -8441, -8423, + -8407, -8390, -8373, -8356, + -8339, -8322, -8305, -8289, + -8272, -8255, -8239, -8222, + -8206, -8189, -8172, -8156, + -8140, -8123, -8107, -8090, + -8074, -8058, -8042, -8025, + -8009, -7993, -7977, -7961, + -7945, -7929, -7913, -7897, + -7881, -7865, -7849, -7833, + -7817, -7802, -7786, -7770, + -7754, -7739, -7723, -7707, + -7692, -7676, -7661, -7645, + -7630, -7614, -7599, -7583, + -7568, -7553, -7537, -7522, + -7507, -7492, -7476, -7461, + -7446, -7431, -7416, -7401, + -7385, -7370, -7356, -7340, + -7325, -7311, -7296, -7281, + -7266, -7251, -7236, -7221, + -7207, -7192, -7177, -7162, + -7148, -7133, -7118, -7104, + -7089, -7075, -7060, -7046, + -7031, -7016, -7002, -6988, + -6973, -6959, -6944, -6930, + -6916, -6901, -6887, -6873, + -6859, -6844, -6830, -6816, + -6802, -6788, -6774, -6760, + -6746, -6731, -6717, -6704, + -6690, -6675, -6661, -6647, + -6633, -6620, -6606, -6592, + -6578, -6564, -6550, -6537, + -6523, -6509, -6495, -6482, + -6468, -6454, -6441, -6427, + -6413, -6400, -6386, -6373, + -6359, -6346, -6332, -6318, + -6305, -6291, -6278, -6264, + -6251, -6238, -6224, -6211, + -6198, -6184, -6171, -6158, + -6144, -6131, -6118, -6105, + -6091, -6078, -6065, -6052, + -6039, -6025, -6012, -5999, + -5986, -5973, -5960, -5947, + -5934, -5921, -5908, -5895, + -5882, -5869, -5856, -5843, + -5830, -5817, -5804, -5791, + -5779, -5766, -5753, -5740, + -5727, -5714, -5702, -5689, + -5676, -5663, -5650, -5638, + -5625, -5612, -5600, -5587, + -5575, -5562, -5549, -5537, + -5524, -5512, -5499, -5486, + -5474, -5461, -5449, -5436, + -5424, -5411, -5399, -5386, + -5374, -5362, -5349, -5337, + -5324, -5312, -5299, -5287, + -5275, -5263, -5250, -5238, + -5226, -5213, -5201, -5189, + -5177, -5164, -5152, -5140, + -5128, -5115, -5103, -5091, + -5079, -5067, -5055, -5043, + -5030, -5018, -5006, -4994, + -4982, -4970, -4958, -4946, + -4934, -4922, -4910, -4898, + -4886, -4874, -4862, -4850, + -4838, -4826, -4814, -4803, + -4791, -4778, -4767, -4755, + -4743, -4731, -4719, -4708, + -4696, -4684, -4672, -4660, + -4649, -4637, -4625, -4613, + -4601, -4590, -4578, -4566, + -4554, -4543, -4531, -4520, + -4508, -4496, -4484, -4473, + -4461, -4449, -4438, -4427, + -4415, -4403, -4392, -4380, + -4368, -4357, -4345, -4334, + -4322, -4311, -4299, -4288, + -4276, -4265, -4253, -4242, + -4230, -4219, -4207, -4196, + -4184, -4173, -4162, -4150, + -4139, -4128, -4116, -4105, + -4094, -4082, -4071, -4060, + -4048, -4037, -4026, -4014, + -4003, -3992, -3980, -3969, + -3958, -3946, -3935, -3924, + -3913, -3901, -3890, -3879, + -3868, -3857, -3845, -3834, + -3823, -3812, -3801, -3790, + -3779, -3767, -3756, -3745, + -3734, -3723, -3712, -3700, + -3689, -3678, -3667, -3656, + -3645, -3634, -3623, -3612, + -3601, -3590, -3579, -3568, + -3557, -3545, -3535, -3524, + -3513, -3502, -3491, -3480, + -3469, -3458, -3447, -3436, + -3425, -3414, -3403, -3392, + -3381, -3370, -3360, -3348, + -3337, -3327, -3316, -3305, + -3294, -3283, -3272, -3262, + -3251, -3240, -3229, -3218, + -3207, -3197, -3185, -3175, + -3164, -3153, -3142, -3132, + -3121, -3110, -3099, -3088, + -3078, -3067, -3056, -3045, + -3035, -3024, -3013, -3003, + -2992, -2981, -2970, -2960, + -2949, -2938, -2928, -2917, + -2906, -2895, -2885, -2874, + -2864, -2853, -2842, -2832, + -2821, -2810, -2800, -2789, + -2778, -2768, -2757, -2747, + -2736, -2725, -2715, -2704, + -2694, -2683, -2673, -2662, + -2651, -2641, -2630, -2620, + -2609, -2599, -2588, -2578, + -2567, -2556, -2546, -2535, + -2525, -2515, -2504, -2493, + -2483, -2472, -2462, -2451, + -2441, -2431, -2420, -2410, + -2399, -2389, -2378, -2367, + -2357, -2347, -2336, -2326, + -2315, -2305, -2295, -2284, + -2274, -2263, -2253, -2243, + -2232, -2222, -2211, -2201, + -2191, -2180, -2170, -2159, + -2149, -2139, -2128, -2118, + -2107, -2097, -2087, -2076, + -2066, -2056, -2046, -2035, + -2025, -2014, -2004, -1994, + -1983, -1973, -1963, -1953, + -1942, -1932, -1921, -1911, + -1901, -1891, -1880, -1870, + -1860, -1849, -1839, -1829, + -1819, -1808, -1798, -1788, + -1778, -1767, -1757, -1747, + -1736, -1726, -1716, -1706, + -1695, -1685, -1675, -1665, + -1654, -1644, -1634, -1624, + -1613, -1603, -1593, -1583, + -1573, -1563, -1552, -1542, + -1532, -1522, -1511, -1501, + -1491, -1481, -1471, -1461, + -1450, -1440, -1430, -1420, + -1409, -1400, -1389, -1379, + -1369, -1359, -1348, -1339, + -1328, -1318, -1308, -1298, + -1288, -1278, -1267, -1257, + -1247, -1237, -1227, -1217, + -1207, -1196, -1186, -1176, + -1166, -1156, -1146, -1135, + -1126, -1115, -1105, -1095, + -1085, -1075, -1065, -1055, + -1044, -1034, -1024, -1014, + -1004, -994, -984, -974, + -964, -954, -944, -933, + -923, -913, -903, -893, + -883, -873, -863, -853, + -843, -833, -822, -812, + -802, -792, -782, -772, + -762, -752, -742, -732, + -722, -712, -702, -691, + -682, -671, -662, -651, + -641, -631, -621, -611, + -601, -591, -581, -571, + -561, -551, -541, -531, + -521, -511, -501, -491, + -480, -471, -460, -451, + -440, -430, -420, -410, + -400, -390, -380, -370, + -360, -350, -340, -330, + -320, -310, -300, -290, + -280, -270, -260, -250, + -240, -230, -220, -210, + -199, -190, -179, -170, + -159, -150, -139, -129, + -119, -109, -99, -89, + -79, -69, -59, -49, + -39, -29, -19, -9, + 1, 11, 21, 31, + 41, 51, 61, 71, + 81, 91, 101, 111, + 121, 131, 141, 152, + 161, 172, 181, 192, + 202, 212, 222, 232, + 242, 252, 262, 272, + 282, 292, 302, 312, + 322, 332, 342, 352, + 362, 372, 382, 392, + 402, 412, 422, 433, + 442, 453, 462, 473, + 483, 493, 503, 513, + 523, 533, 543, 553, + 563, 573, 583, 593, + 603, 613, 623, 633, + 643, 653, 664, 673, + 684, 694, 704, 714, + 724, 734, 744, 754, + 764, 774, 784, 794, + 804, 815, 825, 835, + 845, 855, 865, 875, + 885, 895, 905, 915, + 925, 936, 946, 956, + 966, 976, 986, 996, + 1006, 1016, 1026, 1037, + 1047, 1057, 1067, 1077, + 1087, 1097, 1107, 1117, + 1128, 1138, 1148, 1158, + 1168, 1178, 1188, 1198, + 1209, 1219, 1229, 1239, + 1249, 1259, 1269, 1280, + 1290, 1300, 1310, 1320, + 1330, 1341, 1351, 1361, + 1371, 1381, 1391, 1402, + 1412, 1422, 1432, 1442, + 1452, 1463, 1473, 1483, + 1493, 1503, 1513, 1524, + 1534, 1544, 1554, 1565, + 1575, 1585, 1595, 1606, + 1616, 1626, 1636, 1647, + 1656, 1667, 1677, 1687, + 1697, 1708, 1718, 1729, + 1739, 1749, 1759, 1769, + 1780, 1790, 1800, 1810, + 1821, 1831, 1841, 1851, + 1862, 1872, 1883, 1893, + 1903, 1913, 1923, 1934, + 1944, 1955, 1965, 1975, + 1985, 1996, 2006, 2016, + 2027, 2037, 2048, 2058, + 2068, 2079, 2089, 2099, + 2110, 2120, 2130, 2141, + 2151, 2161, 2172, 2182, + 2193, 2203, 2213, 2224, + 2234, 2245, 2255, 2265, + 2276, 2286, 2297, 2307, + 2318, 2328, 2338, 2349, + 2359, 2370, 2380, 2391, + 2401, 2412, 2422, 2433, + 2443, 2454, 2464, 2475, + 2485, 2496, 2506, 2517, + 2527, 2537, 2548, 2559, + 2569, 2580, 2590, 2601, + 2612, 2622, 2632, 2643, + 2654, 2664, 2675, 2685, + 2696, 2707, 2717, 2728, + 2738, 2749, 2759, 2770, + 2781, 2791, 2802, 2813, + 2823, 2834, 2845, 2855, + 2866, 2877, 2887, 2898, + 2909, 2919, 2930, 2941, + 2951, 2962, 2973, 2984, + 2994, 3005, 3015, 3027, + 3037, 3048, 3058, 3069, + 3080, 3091, 3101, 3113, + 3123, 3134, 3145, 3156, + 3166, 3177, 3188, 3199, + 3210, 3220, 3231, 3242, + 3253, 3264, 3275, 3285, + 3296, 3307, 3318, 3329, + 3340, 3351, 3362, 3373, + 3384, 3394, 3405, 3416, + 3427, 3438, 3449, 3460, + 3471, 3482, 3493, 3504, + 3515, 3526, 3537, 3548, + 3559, 3570, 3581, 3592, + 3603, 3614, 3625, 3636, + 3647, 3659, 3670, 3681, + 3692, 3703, 3714, 3725, + 3736, 3747, 3758, 3770, + 3781, 3792, 3803, 3814, + 3825, 3837, 3848, 3859, + 3870, 3881, 3893, 3904, + 3915, 3926, 3937, 3949, + 3960, 3971, 3983, 3994, + 4005, 4017, 4028, 4039, + 4051, 4062, 4073, 4085, + 4096, 4107, 4119, 4130, + 4141, 4153, 4164, 4175, + 4187, 4198, 4210, 4221, + 4233, 4244, 4256, 4267, + 4279, 4290, 4302, 4313, + 4325, 4336, 4348, 4359, + 4371, 4382, 4394, 4406, + 4417, 4429, 4440, 4452, + 4464, 4475, 4487, 4499, + 4510, 4522, 4533, 4545, + 4557, 4569, 4581, 4592, + 4604, 4616, 4627, 4639, + 4651, 4663, 4674, 4686, + 4698, 4710, 4722, 4734, + 4746, 4758, 4769, 4781, + 4793, 4805, 4817, 4829, + 4841, 4853, 4865, 4877, + 4889, 4900, 4913, 4925, + 4936, 4949, 4961, 4973, + 4985, 4997, 5009, 5021, + 5033, 5045, 5057, 5070, + 5081, 5094, 5106, 5118, + 5130, 5143, 5155, 5167, + 5179, 5191, 5204, 5216, + 5228, 5240, 5253, 5265, + 5278, 5290, 5302, 5315, + 5327, 5340, 5352, 5364, + 5377, 5389, 5401, 5414, + 5426, 5439, 5451, 5464, + 5476, 5489, 5502, 5514, + 5527, 5539, 5552, 5564, + 5577, 5590, 5603, 5615, + 5628, 5641, 5653, 5666, + 5679, 5691, 5704, 5717, + 5730, 5743, 5756, 5768, + 5781, 5794, 5807, 5820, + 5833, 5846, 5859, 5872, + 5885, 5897, 5911, 5924, + 5937, 5950, 5963, 5976, + 5989, 6002, 6015, 6028, + 6042, 6055, 6068, 6081, + 6094, 6108, 6121, 6134, + 6147, 6160, 6174, 6187, + 6201, 6214, 6227, 6241, + 6254, 6267, 6281, 6294, + 6308, 6321, 6335, 6348, + 6362, 6375, 6389, 6403, + 6416, 6430, 6443, 6457, + 6471, 6485, 6498, 6512, + 6526, 6540, 6554, 6567, + 6581, 6595, 6609, 6623, + 6637, 6651, 6665, 6679, + 6692, 6706, 6721, 6735, + 6749, 6763, 6777, 6791, + 6805, 6819, 6833, 6848, + 6862, 6876, 6890, 6905, + 6919, 6933, 6948, 6962, + 6976, 6991, 7005, 7020, + 7034, 7049, 7064, 7078, + 7093, 7107, 7122, 7136, + 7151, 7166, 7180, 7195, + 7210, 7225, 7240, 7254, + 7269, 7284, 7299, 7314, + 7329, 7344, 7359, 7374, + 7389, 7404, 7419, 7434, + 7449, 7465, 7480, 7495, + 7510, 7526, 7541, 7556, + 7571, 7587, 7602, 7618, + 7633, 7648, 7664, 7680, + 7695, 7711, 7726, 7742, + 7758, 7773, 7789, 7805, + 7821, 7836, 7852, 7868, + 7884, 7900, 7916, 7932, + 7948, 7964, 7981, 7997, + 8013, 8029, 8045, 8061, + 8078, 8094, 8110, 8127, + 8143, 8160, 8176, 8193, + 8209, 8226, 8242, 8259, + 8276, 8292, 8309, 8326, + 8343, 8360, 8377, 8394, + 8410, 8428, 8444, 8462, + 8479, 8496, 8513, 8530, + 8548, 8565, 8582, 8600, + 8617, 8634, 8652, 8670, + 8687, 8704, 8722, 8740, + 8758, 8775, 8793, 8811, + 8829, 8847, 8865, 8883, + 8901, 8919, 8937, 8955, + 8974, 8992, 9010, 9029, + 9047, 9066, 9084, 9103, + 9121, 9140, 9159, 9177, + 9196, 9215, 9234, 9253, + 9272, 9291, 9310, 9329, + 9349, 9368, 9387, 9406, + 9426, 9445, 9465, 9484, + 9504, 9524, 9544, 9563, + 9583, 9603, 9623, 9643, + 9663, 9683, 9703, 9723, + 9744, 9764, 9785, 9805, + 9826, 9846, 9867, 9888, + 9909, 9930, 9950, 9971, + 9993, 10013, 10035, 10056, + 10077, 10099, 10120, 10142, + 10163, 10185, 10207, 10229, + 10251, 10273, 10294, 10317, + 10339, 10361, 10384, 10406, + 10428, 10451, 10474, 10496, + 10519, 10542, 10565, 10588, + 10612, 10635, 10658, 10682, + 10705, 10729, 10752, 10776, + 10800, 10824, 10848, 10872, + 10896, 10921, 10945, 10969, + 10994, 11019, 11044, 11069, + 11094, 11119, 11144, 11169, + 11195, 11221, 11246, 11272, + 11298, 11324, 11350, 11376, + 11402, 11429, 11456, 11482, + 11509, 11536, 11563, 11590, + 11618, 11645, 11673, 11701, + 11728, 11756, 11785, 11813, + 11842, 11870, 11899, 11928, + 11957, 11986, 12015, 12045, + 12074, 12104, 12134, 12164, + 12194, 12225, 12255, 12286, + 12317, 12348, 12380, 12411, + 12443, 12475, 12507, 12539, + 12571, 12604, 12637, 12670, + 12703, 12737, 12771, 12804, + 12839, 12873, 12907, 12942, + 12977, 13013, 13048, 13084, + 13120, 13156, 13192, 13229, + 13267, 13304, 13341, 13379, + 13418, 13456, 13495, 13534, + 13573, 13613, 13653, 13693, + 13734, 13775, 13817, 13858, + 13901, 13943, 13986, 14029, + 14073, 14117, 14162, 14206, + 14252, 14297, 14343, 14390, + 14437, 14485, 14533, 14582, + 14631, 14680, 14731, 14782, + 14833, 14885, 14937, 14991, + 15044, 15099, 15154, 15210, + 15266, 15324, 15382, 15441, + 15500, 15561, 15622, 15684, + 15747, 15811, 15877, 15943, + 16010, 16078, 16148, 16218, + 16290, 16363, 16437, 16513, + 16590, 16669, 16749, 16831, + 16915, 17000, 17088, 17177, + 17268, 17362, 17458, 17556, + 17657, 17761, 17868, 17977, + 18090, 18207, 18328, 18452, + 18581, 18715, 18854, 18998, + 19149, 19307, 19472, 19645, + 19828, 20021, 20226, 20444, + 20678, 20930, 21204, 21503, + 21835, 22206, 22630, 23124, + 23721, 24478, 25529, 27316, +}; + +/* tabledist - return a pseudo-randomly distributed value with mean mu and + * std deviation sigma. Uses table lookup to approximate the desired + * distribution, and a uniformly-distributed pseudo-random source. + */ +static inline int tabledist(int mu, int sigma) +{ + int x; + int index; + int sigmamod, sigmadiv; + + if (sigma == 0) + return mu; + + index = (net_random() & (TABLESIZE-1)); + sigmamod = sigma%TABLEFACTOR; + sigmadiv = sigma/TABLEFACTOR; + x = sigmamod*disttable[index]; + + if (x >= 0) + x += TABLEFACTOR/2; + else + x -= TABLEFACTOR/2; + + x /= TABLEFACTOR; + x += sigmadiv*disttable[index]; + x += mu; + return x; +} + +/* Enqueue packets with underlying discipline (fifo) + * but mark them with current time first. + */ +static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) +{ + struct netem_sched_data *q = (struct netem_sched_data *)sch->data; + struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; + psched_time_t now; + long delay; + + pr_debug("netem_enqueue skb=%p @%lu\n", skb, jiffies); + + /* Random packet drop 0 => none, ~0 => all */ + if (q->loss && q->loss >= net_random()) { + sch->stats.drops++; + return 0; /* lie about loss so TCP doesn't know */ + } + + + /* If doing simple delay then gap == 0 so all packets + * go into the delayed holding queue + * otherwise if doing out of order only "1 out of gap" + * packets will be delayed. + */ + if (q->counter < q->gap) { + int ret; + + ++q->counter; + ret = q->qdisc->enqueue(skb, q->qdisc); + if (ret) + sch->stats.drops++; + return ret; + } + + q->counter = 0; + + PSCHED_GET_TIME(now); + if (q->jitter) + delay = tabledist(q->latency, q->jitter); + else + delay = q->latency; + + PSCHED_TADD2(now, delay, cb->time_to_send); + + /* Always queue at tail to keep packets in order */ + __skb_queue_tail(&q->delayed, skb); + sch->q.qlen++; + sch->stats.bytes += skb->len; + sch->stats.packets++; + return 0; +} + +/* Requeue packets but don't change time stamp */ +static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch) +{ + struct netem_sched_data *q = (struct netem_sched_data *)sch->data; + int ret; + + if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) + sch->q.qlen++; + + return ret; +} + +static unsigned int netem_drop(struct Qdisc* sch) +{ + struct netem_sched_data *q = (struct netem_sched_data *)sch->data; + unsigned int len; + + if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) { + sch->q.qlen--; + sch->stats.drops++; + } + return len; +} + +/* Dequeue packet. + * Move all packets that are ready to send from the delay holding + * list to the underlying qdisc, then just call dequeue + */ +static struct sk_buff *netem_dequeue(struct Qdisc *sch) +{ + struct netem_sched_data *q = (struct netem_sched_data *)sch->data; + struct sk_buff *skb; + psched_time_t now; + + PSCHED_GET_TIME(now); + while ((skb = skb_peek(&q->delayed)) != NULL) { + const struct netem_skb_cb *cb + = (const struct netem_skb_cb *)skb->cb; + long delay + = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); + pr_debug("netem_dequeue: delay queue %p@%lu %ld\n", + skb, jiffies, delay); + + /* if more time remaining? */ + if (delay > 0) { + mod_timer(&q->timer, jiffies + delay); + break; + } + __skb_unlink(skb, &q->delayed); + + if (q->qdisc->enqueue(skb, q->qdisc)) + sch->stats.drops++; + } + + skb = q->qdisc->dequeue(q->qdisc); + if (skb) + sch->q.qlen--; + return skb; +} + +static void netem_watchdog(unsigned long arg) +{ + struct Qdisc *sch = (struct Qdisc *)arg; + + pr_debug("netem_watchdog: fired @%lu\n", jiffies); + netif_schedule(sch->dev); +} + +static void netem_reset(struct Qdisc *sch) +{ + struct netem_sched_data *q = (struct netem_sched_data *)sch->data; + + qdisc_reset(q->qdisc); + skb_queue_purge(&q->delayed); + + sch->q.qlen = 0; + del_timer_sync(&q->timer); +} + +static int set_fifo_limit(struct Qdisc *q, int limit) +{ + struct rtattr *rta; + int ret = -ENOMEM; + + rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); + if (rta) { + rta->rta_type = RTM_NEWQDISC; + rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt)); + ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit; + + ret = q->ops->change(q, rta); + kfree(rta); + } + return ret; +} + +static int netem_change(struct Qdisc *sch, struct rtattr *opt) +{ + struct netem_sched_data *q = (struct netem_sched_data *)sch->data; + struct tc_netem_qopt *qopt = RTA_DATA(opt); + struct Qdisc *child; + int ret; + + if (opt->rta_len < RTA_LENGTH(sizeof(*qopt))) + return -EINVAL; + + child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); + if (!child) + return -EINVAL; + + ret = set_fifo_limit(child, qopt->limit); + if (ret) { + qdisc_destroy(child); + return ret; + } + + sch_tree_lock(sch); + if (child) { + child = xchg(&q->qdisc, child); + if (child != &noop_qdisc) + qdisc_destroy(child); + + q->latency = qopt->latency; + q->jitter = qopt->jitter; + q->limit = qopt->limit; + q->gap = qopt->gap; + q->loss = qopt->loss; + } + sch_tree_unlock(sch); + + return 0; +} + +static int netem_init(struct Qdisc *sch, struct rtattr *opt) +{ + struct netem_sched_data *q = (struct netem_sched_data *)sch->data; + + if (!opt) + return -EINVAL; + + skb_queue_head_init(&q->delayed); + q->qdisc = &noop_qdisc; + + init_timer(&q->timer); + q->timer.function = netem_watchdog; + q->timer.data = (unsigned long) sch; + q->counter = 0; + + return netem_change(sch, opt); +} + +static void netem_destroy(struct Qdisc *sch) +{ + struct netem_sched_data *q = (struct netem_sched_data *)sch->data; + + del_timer_sync(&q->timer); +} + +static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) +{ + struct netem_sched_data *q = (struct netem_sched_data *)sch->data; + unsigned char *b = skb->tail; + struct tc_netem_qopt qopt; + + qopt.latency = q->latency; + qopt.jitter = q->jitter; + qopt.limit = sch->dev->tx_queue_len; + qopt.loss = q->loss; + qopt.gap = q->gap; + + RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); + + return skb->len; + +rtattr_failure: + skb_trim(skb, b - skb->data); + return -1; +} + +static struct Qdisc_ops netem_qdisc_ops = { + .id = "netem", + .priv_size = sizeof(struct netem_sched_data), + .enqueue = netem_enqueue, + .dequeue = netem_dequeue, + .requeue = netem_requeue, + .drop = netem_drop, + .init = netem_init, + .reset = netem_reset, + .destroy = netem_destroy, + .change = netem_change, + .dump = netem_dump, + .owner = THIS_MODULE, +}; + + +static int __init netem_module_init(void) +{ + return register_qdisc(&netem_qdisc_ops); +} +static void __exit netem_module_exit(void) +{ + unregister_qdisc(&netem_qdisc_ops); +} +module_init(netem_module_init) +module_exit(netem_module_exit) +MODULE_LICENSE("GPL"); diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile new file mode 100644 index 000000000..f66bf5262 --- /dev/null +++ b/scripts/mod/Makefile @@ -0,0 +1,16 @@ +host-progs := modpost mk_elfconfig +always := $(host-progs) empty.o + +modpost-objs := modpost.o file2alias.o sumversion.o + +# dependencies on generated files need to be listed explicitly + +$(obj)/modpost.o $(obj)/file2alias.o $(obj)/sumversion.o: $(obj)/elfconfig.h + +quiet_cmd_elfconfig = MKELF $@ + cmd_elfconfig = $(obj)/mk_elfconfig $(ARCH) < $< > $@ + +$(obj)/elfconfig.h: $(obj)/empty.o $(obj)/mk_elfconfig FORCE + $(call if_changed,elfconfig) + +targets += elfconfig.h diff --git a/scripts/mod/empty.c b/scripts/mod/empty.c new file mode 100644 index 000000000..49839cc4f --- /dev/null +++ b/scripts/mod/empty.c @@ -0,0 +1 @@ +/* empty file to figure out endianness / word size */ diff --git a/scripts/package/Makefile b/scripts/package/Makefile new file mode 100644 index 000000000..f3df4bc95 --- /dev/null +++ b/scripts/package/Makefile @@ -0,0 +1,71 @@ +# Makefile for the different targets used to generate full packages of a kernel +# It uses the generic clean infrastructure of kbuild + +# Ignore the following files/directories during tar operation +TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS + + +# RPM target +# --------------------------------------------------------------------------- +# The rpm target generates two rpm files: +# /usr/src/packages/SRPMS/kernel-2.6.7rc2-1.src.rpm +# /usr/src/packages/RPMS/i386/kernel-2.6.7rc2-1..rpm +# The src.rpm files includes all source for the kernel being built +# The .rpm includes kernel configuration, modules etc. +# +# Process to create the rpm files +# a) clean the kernel +# b) Generate .spec file +# c) Build a tar ball, using symlink to make kernel version +# first entry in the path +# d) and pack the result to a tar.gz file +# e) generate the rpm files, based on kernel.spec +# - Use /. to avoid tar packing just the symlink + +# Do we have rpmbuild, otherwise fall back to the older rpm +RPM := $(shell if [ -x "/usr/bin/rpmbuild" ]; then echo rpmbuild; \ + else echo rpm; fi) + +# Remove hyphens since they have special meaning in RPM filenames +KERNELPATH := kernel-$(subst -,,$(KERNELRELEASE)) +MKSPEC := $(srctree)/scripts/package/mkspec +PREV := set -e; cd ..; + +.PHONY: rpm-pkg rpm + +$(objtree)/kernel.spec: $(MKSPEC) + $(CONFIG_SHELL) $(MKSPEC) > $@ + +rpm-pkg rpm: $(objtree)/kernel.spec + $(MAKE) clean + $(PREV) ln -sf $(srctree) $(KERNELPATH) + $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/. + $(PREV) rm $(KERNELPATH) + + set -e; \ + $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version + set -e; \ + mv -f $(objtree)/.tmp_version $(objtree)/.version + + $(RPM) --target $(UTS_MACHINE) -ta ../$(KERNELPATH).tar.gz + rm ../$(KERNELPATH).tar.gz + +clean-rule += rm -f $(objtree)/kernel.spec + +# Deb target +# --------------------------------------------------------------------------- +# +.PHONY: deb-pkg +deb-pkg: + $(MAKE) + $(CONFIG_SHELL) $(srctree)/scripts/package/builddeb + +clean-rule += && rm -rf $(objtree)/debian/ + + +# Help text displayed when executing 'make help' +# --------------------------------------------------------------------------- +help: + @echo ' rpm-pkg - Build the kernel as an RPM package' + @echo ' deb-pkg - Build the kernel as an deb package' + diff --git a/scripts/package/builddeb b/scripts/package/builddeb new file mode 100644 index 000000000..968a0a77d --- /dev/null +++ b/scripts/package/builddeb @@ -0,0 +1,79 @@ +#!/bin/sh +# +# builddeb 1.2 +# Copyright 2003 Wichert Akkerman +# +# Simple script to generate a deb package for a Linux kernel. All the +# complexity of what to do with a kernel after it is installer or removed +# is left to other scripts and packages: they can install scripts in the +# /etc/kernel/{pre,post}{inst,rm}.d/ directories that will be called on +# package install and removal. + +set -e + +# Some variables and settings used throughout the script +version="$VERSION.$PATCHLEVEL.$SUBLEVEL$EXTRAVERSION" +tmpdir="$objtree/debian/tmp" + +# Setup the directory structure +rm -rf "$tmpdir" +mkdir -p "$tmpdir/DEBIAN" "$tmpdir/lib" "$tmpdir/boot" + +# Build and install the kernel +cp System.map "$tmpdir/boot/System.map-$version" +cp .config "$tmpdir/boot/config-$version" +cp $KBUILD_IMAGE "$tmpdir/boot/vmlinuz-$version" + +if grep -q '^CONFIG_MODULES=y' .config ; then + INSTALL_MOD_PATH="$tmpdir" make modules_install +fi + +# Install the maintainer scripts +for script in postinst postrm preinst prerm ; do + mkdir -p "$tmpdir/etc/kernel/$script.d" + cat < "$tmpdir/DEBIAN/$script" +#!/bin/sh + +set -e + +test -d /etc/kernel/$script.d && run-parts --arg="$version" /etc/kernel/$script.d +exit 0 +EOF + chmod 755 "$tmpdir/DEBIAN/$script" +done + +name="Kernel Compiler <$(id -nu)@$(hostname -f)>" +# Generate a simple changelog template +cat < debian/changelog +linux ($version) unstable; urgency=low + + * A standard release + + -- $name $(date -R) +EOF + +# Generate a control file +cat < debian/control +Source: linux +Section: base +Priority: optional +Maintainer: $name +Standards-Version: 3.6.1 + +Package: linux-$version +Architecture: any +Description: Linux kernel, version $version + This package contains the Linux kernel, modules and corresponding other + files version $version. +EOF + +# Fix some ownership and permissions +chown -R root:root "$tmpdir" +chmod -R go-w "$tmpdir" + +# Perform the final magic +dpkg-gencontrol -isp +dpkg --build "$tmpdir" .. + +exit 0 + diff --git a/scripts/package/mkspec b/scripts/package/mkspec new file mode 100644 index 000000000..3ecfc23e0 --- /dev/null +++ b/scripts/package/mkspec @@ -0,0 +1,63 @@ +#!/bin/sh +# +# Output a simple RPM spec file that uses no fancy features requring +# RPM v4. This is intended to work with any RPM distro. +# +# The only gothic bit here is redefining install_post to avoid +# stripping the symbols from files in the kernel which we want +# +# Patched for non-x86 by Opencon (L) 2002 +# + +# starting to output the spec +if [ "`grep CONFIG_DRM=y .config | cut -f2 -d\=`" = "y" ]; then + PROVIDES=kernel-drm +fi + +PROVIDES="$PROVIDES kernel-$VERSION.$PATCHLEVEL.$SUBLEVEL$EXTRAVERSION" + +echo "Name: kernel" +echo "Summary: The Linux Kernel" +echo "Version: "$VERSION.$PATCHLEVEL.$SUBLEVEL$EXTRAVERSION | sed -e "s/-//g" +# we need to determine the NEXT version number so that uname and +# rpm -q will agree +echo "Release: `. $srctree/scripts/mkversion`" +echo "License: GPL" +echo "Group: System Environment/Kernel" +echo "Vendor: The Linux Community" +echo "URL: http://www.kernel.org" +echo -n "Source: kernel-$VERSION.$PATCHLEVEL.$SUBLEVEL" +echo "$EXTRAVERSION.tar.gz" | sed -e "s/-//g" +echo "BuildRoot: /var/tmp/%{name}-%{PACKAGE_VERSION}-root" +echo "Provides: $PROVIDES" +echo "%define __spec_install_post /usr/lib/rpm/brp-compress || :" +echo "%define debug_package %{nil}" +echo "" +echo "%description" +echo "The Linux Kernel, the operating system core itself" +echo "" +echo "%prep" +echo "%setup -q" +echo "" +echo "%build" +echo "make clean && make" +echo "" +echo "%install" +echo 'mkdir -p $RPM_BUILD_ROOT/boot $RPM_BUILD_ROOT/lib $RPM_BUILD_ROOT/lib/modules' + +echo 'INSTALL_MOD_PATH=$RPM_BUILD_ROOT make modules_install' +echo 'cp $KBUILD_IMAGE $RPM_BUILD_ROOT'"/boot/vmlinuz-$VERSION.$PATCHLEVEL.$SUBLEVEL$EXTRAVERSION" + +echo 'cp System.map $RPM_BUILD_ROOT'"/boot/System.map-$VERSION.$PATCHLEVEL.$SUBLEVEL$EXTRAVERSION" + +echo 'cp .config $RPM_BUILD_ROOT'"/boot/config-$VERSION.$PATCHLEVEL.$SUBLEVEL$EXTRAVERSION" +echo "" +echo "%clean" +echo '#echo -rf $RPM_BUILD_ROOT' +echo "" +echo "%files" +echo '%defattr (-, root, root)' +echo "%dir /lib/modules" +echo "/lib/modules/$VERSION.$PATCHLEVEL.$SUBLEVEL$EXTRAVERSION" +echo "/boot/*" +echo "" -- 2.43.0