X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-mips%2Fio.h;h=6b17eb9d79a52017f819f68073bc0fe5ba8add47;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=9ec7ced7a0f0c7d463f997a9c505e52cd440e82b;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-mips/io.h b/include/asm-mips/io.h index 9ec7ced7a..6b17eb9d7 100644 --- a/include/asm-mips/io.h +++ b/include/asm-mips/io.h @@ -4,22 +4,29 @@ * for more details. * * Copyright (C) 1994, 1995 Waldorf GmbH - * Copyright (C) 1994 - 2000 Ralf Baechle + * Copyright (C) 1994 - 2000, 06 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * Author: Maciej W. Rozycki */ #ifndef _ASM_IO_H #define _ASM_IO_H #include +#include +#include #include #include +#include #include #include #include #include #include -#include +#include + +#include #include /* @@ -28,33 +35,18 @@ #undef CONF_SLOWDOWN_IO /* - * Sane hardware offers swapping of I/O space accesses in hardware; less - * sane hardware forces software to fiddle with this ... + * Raw operations are never swapped in software. OTOH values that raw + * operations are working on may or may not have been swapped by the bus + * hardware. An example use would be for flash memory that's used for + * execute in place. */ -#if defined(CONFIG_SWAP_IO_SPACE) && defined(__MIPSEB__) - -#define __ioswab8(x) (x) +# define __raw_ioswabb(a,x) (x) +# define __raw_ioswabw(a,x) (x) +# define __raw_ioswabl(a,x) (x) +# define __raw_ioswabq(a,x) (x) +# define ____raw_ioswabq(a,x) (x) -#ifdef CONFIG_SGI_IP22 -/* - * IP22 seems braindead enough to swap 16bits values in hardware, but - * not 32bits. Go figure... Can't tell without documentation. - */ -#define __ioswab16(x) (x) -#else -#define __ioswab16(x) swab16(x) -#endif -#define __ioswab32(x) swab32(x) -#define __ioswab64(x) swab64(x) - -#else - -#define __ioswab8(x) (x) -#define __ioswab16(x) (x) -#define __ioswab32(x) (x) -#define __ioswab64(x) (x) - -#endif +/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ #define IO_SPACE_LIMIT 0xffff @@ -68,8 +60,20 @@ */ extern const unsigned long mips_io_port_base; -#define set_io_port_base(base) \ - do { * (unsigned long *) &mips_io_port_base = (base); } while (0) +/* + * Gcc will generate code to load the value of mips_io_port_base after each + * function call which may be fairly wasteful in some cases. So we don't + * play quite by the book. We tell gcc mips_io_port_base is a long variable + * which solves the code generation issue. Now we need to violate the + * aliasing rules a little to make initialization possible and finally we + * will need the barrier() to fight side effects of the aliasing chat. + * This trickery will eventually collapse under gcc's optimizer. Oh well. + */ +static inline void set_io_port_base(unsigned long base) +{ + * (unsigned long *) &mips_io_port_base = base; + barrier(); +} /* * Thanks to James van Artsdalen for a better timing-fix than @@ -168,12 +172,14 @@ extern unsigned long isa_slot_offset; */ #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) -extern void * __ioremap(phys_t offset, phys_t size, unsigned long flags); -extern void __iounmap(void *addr); +extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); +extern void __iounmap(volatile void __iomem *addr); -static inline void * __ioremap_mode(unsigned long offset, unsigned long size, +static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, unsigned long flags) { +#define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) + if (cpu_has_64bit_addresses) { u64 base = UNCAC_BASE; @@ -183,10 +189,30 @@ static inline void * __ioremap_mode(unsigned long offset, unsigned long size, */ if (flags == _CACHE_UNCACHED) base = (u64) IO_BASE; - return (void *) (unsigned long) (base + offset); + return (void __iomem *) (unsigned long) (base + offset); + } else if (__builtin_constant_p(offset) && + __builtin_constant_p(size) && __builtin_constant_p(flags)) { + phys_t phys_addr, last_addr; + + phys_addr = fixup_bigphys_addr(offset, size); + + /* Don't allow wraparound or zero size. */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * Map uncached objects in the low 512MB of address + * space using KSEG1. + */ + if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && + flags == _CACHE_UNCACHED) + return (void __iomem *)CKSEG1ADDR(phys_addr); } return __ioremap(offset, size, flags); + +#undef __IS_LOW512 } /* @@ -225,6 +251,24 @@ static inline void * __ioremap_mode(unsigned long offset, unsigned long size, #define ioremap_nocache(offset, size) \ __ioremap_mode((offset), (size), _CACHE_UNCACHED) +/* + * ioremap_cachable - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap_nocache performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * This version of ioremap ensures that the memory is marked cachable by + * the CPU. Also enables full write-combining. Useful for some + * memory-like regions on I/O busses. + */ +#define ioremap_cachable(offset, size) \ + __ioremap_mode((offset), (size), PAGE_CACHABLE_DEFAULT) + /* * These two are MIPS specific ioremap variant. ioremap_cacheable_cow * requests a cachable mapping, ioremap_uncached_accelerated requests a @@ -236,98 +280,271 @@ static inline void * __ioremap_mode(unsigned long offset, unsigned long size, #define ioremap_uncached_accelerated(offset, size) \ __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) -static inline void iounmap(void *addr) +static inline void iounmap(volatile void __iomem *addr) { - if (cpu_has_64bits) +#define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) + + if (cpu_has_64bit_addresses || + (__builtin_constant_p(addr) && __IS_KSEG1(addr))) return; __iounmap(addr); + +#undef __IS_KSEG1 } -#define __raw_readb(addr) (*(volatile unsigned char *)(addr)) -#define __raw_readw(addr) (*(volatile unsigned short *)(addr)) -#define __raw_readl(addr) (*(volatile unsigned int *)(addr)) -#ifdef CONFIG_MIPS32 -#define ____raw_readq(addr) \ -({ \ - u64 __res; \ - \ - __asm__ __volatile__ ( \ - " .set mips3 # ____raw_readq \n" \ - " ld %L0, (%1) \n" \ - " dsra32 %M0, %L0, 0 \n" \ - " sll %L0, %L0, 0 \n" \ - " .set mips0 \n" \ - : "=r" (__res) \ - : "r" (addr)); \ - __res; \ -}) -#define __raw_readq(addr) \ -({ \ - unsigned long __flags; \ - u64 __res; \ - \ - local_irq_save(__flags); \ - __res = ____raw_readq(addr); \ - local_irq_restore(__flags); \ - __res; \ -}) -#endif -#ifdef CONFIG_MIPS64 -#define ____raw_readq(addr) (*(volatile unsigned long *)(addr)) -#define __raw_readq(addr) ____raw_readq(addr) -#endif +#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ + \ +static inline void pfx##write##bwlq(type val, \ + volatile void __iomem *mem) \ +{ \ + volatile type *__mem; \ + type __val; \ + \ + __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ + \ + __val = pfx##ioswab##bwlq(__mem, val); \ + \ + if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ + *__mem = __val; \ + else if (cpu_has_64bits) { \ + unsigned long __flags; \ + type __tmp; \ + \ + if (irq) \ + local_irq_save(__flags); \ + __asm__ __volatile__( \ + ".set mips3" "\t\t# __writeq""\n\t" \ + "dsll32 %L0, %L0, 0" "\n\t" \ + "dsrl32 %L0, %L0, 0" "\n\t" \ + "dsll32 %M0, %M0, 0" "\n\t" \ + "or %L0, %L0, %M0" "\n\t" \ + "sd %L0, %2" "\n\t" \ + ".set mips0" "\n" \ + : "=r" (__tmp) \ + : "0" (__val), "m" (*__mem)); \ + if (irq) \ + local_irq_restore(__flags); \ + } else \ + BUG(); \ +} \ + \ +static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ +{ \ + volatile type *__mem; \ + type __val; \ + \ + __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ + \ + if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ + __val = *__mem; \ + else if (cpu_has_64bits) { \ + unsigned long __flags; \ + \ + if (irq) \ + local_irq_save(__flags); \ + __asm__ __volatile__( \ + ".set mips3" "\t\t# __readq" "\n\t" \ + "ld %L0, %1" "\n\t" \ + "dsra32 %M0, %L0, 0" "\n\t" \ + "sll %L0, %L0, 0" "\n\t" \ + ".set mips0" "\n" \ + : "=r" (__val) \ + : "m" (*__mem)); \ + if (irq) \ + local_irq_restore(__flags); \ + } else { \ + __val = 0; \ + BUG(); \ + } \ + \ + return pfx##ioswab##bwlq(__mem, __val); \ +} -#define readb(addr) __ioswab8(__raw_readb(addr)) -#define readw(addr) __ioswab16(__raw_readw(addr)) -#define readl(addr) __ioswab32(__raw_readl(addr)) -#define readq(addr) __ioswab64(__raw_readq(addr)) -#define readb_relaxed(addr) readb(addr) -#define readw_relaxed(addr) readw(addr) -#define readl_relaxed(addr) readl(addr) -#define readq_relaxed(addr) readq(addr) - -#define __raw_writeb(b,addr) ((*(volatile unsigned char *)(addr)) = (b)) -#define __raw_writew(w,addr) ((*(volatile unsigned short *)(addr)) = (w)) -#define __raw_writel(l,addr) ((*(volatile unsigned int *)(addr)) = (l)) -#ifdef CONFIG_MIPS32 -#define ____raw_writeq(val,addr) \ -({ \ - u64 __tmp; \ - \ - __asm__ __volatile__ ( \ - " .set mips3 \n" \ - " dsll32 %L0, %L0, 0 # ____raw_writeq\n" \ - " dsrl32 %L0, %L0, 0 \n" \ - " dsll32 %M0, %M0, 0 \n" \ - " or %L0, %L0, %M0 \n" \ - " sd %L0, (%2) \n" \ - " .set mips0 \n" \ - : "=r" (__tmp) \ - : "0" ((unsigned long long)val), "r" (addr)); \ -}) -#define __raw_writeq(val,addr) \ -({ \ - unsigned long __flags; \ - \ - local_irq_save(__flags); \ - ____raw_writeq(val, addr); \ - local_irq_restore(__flags); \ -}) +#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \ + \ +static inline void pfx##out##bwlq##p(type val, unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ + \ + __val = pfx##ioswab##bwlq(__addr, val); \ + \ + /* Really, we want this to be atomic */ \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + *__addr = __val; \ + slow; \ +} \ + \ +static inline type pfx##in##bwlq##p(unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + __val = *__addr; \ + slow; \ + \ + return pfx##ioswab##bwlq(__addr, __val); \ +} + +#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ + \ +__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) + +#define BUILDIO_MEM(bwlq, type) \ + \ +__BUILD_MEMORY_PFX(__raw_, bwlq, type) \ +__BUILD_MEMORY_PFX(, bwlq, type) \ +__BUILD_MEMORY_PFX(__mem_, bwlq, type) \ + +BUILDIO_MEM(b, u8) +BUILDIO_MEM(w, u16) +BUILDIO_MEM(l, u32) +BUILDIO_MEM(q, u64) + +#define __BUILD_IOPORT_PFX(bus, bwlq, type) \ + __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \ + __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO) + +#define BUILDIO_IOPORT(bwlq, type) \ + __BUILD_IOPORT_PFX(, bwlq, type) \ + __BUILD_IOPORT_PFX(__mem_, bwlq, type) + +BUILDIO_IOPORT(b, u8) +BUILDIO_IOPORT(w, u16) +BUILDIO_IOPORT(l, u32) +#ifdef CONFIG_64BIT +BUILDIO_IOPORT(q, u64) #endif -#ifdef CONFIG_MIPS64 -#define ____raw_writeq(q,addr) ((*(volatile unsigned long *)(addr)) = (q)) -#define __raw_writeq(q,addr) ____raw_writeq(q, addr) + +#define __BUILDIO(bwlq, type) \ + \ +__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) + +__BUILDIO(q, u64) + +#define readb_relaxed readb +#define readw_relaxed readw +#define readl_relaxed readl +#define readq_relaxed readq + +/* + * Some code tests for these symbols + */ +#define readq readq +#define writeq writeq + +#define __BUILD_MEMORY_STRING(bwlq, type) \ + \ +static inline void writes##bwlq(volatile void __iomem *mem, \ + const void *addr, unsigned int count) \ +{ \ + const volatile type *__addr = addr; \ + \ + while (count--) { \ + __mem_write##bwlq(*__addr, mem); \ + __addr++; \ + } \ +} \ + \ +static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ + unsigned int count) \ +{ \ + volatile type *__addr = addr; \ + \ + while (count--) { \ + *__addr = __mem_read##bwlq(mem); \ + __addr++; \ + } \ +} + +#define __BUILD_IOPORT_STRING(bwlq, type) \ + \ +static inline void outs##bwlq(unsigned long port, const void *addr, \ + unsigned int count) \ +{ \ + const volatile type *__addr = addr; \ + \ + while (count--) { \ + __mem_out##bwlq(*__addr, port); \ + __addr++; \ + } \ +} \ + \ +static inline void ins##bwlq(unsigned long port, void *addr, \ + unsigned int count) \ +{ \ + volatile type *__addr = addr; \ + \ + while (count--) { \ + *__addr = __mem_in##bwlq(port); \ + __addr++; \ + } \ +} + +#define BUILDSTRING(bwlq, type) \ + \ +__BUILD_MEMORY_STRING(bwlq, type) \ +__BUILD_IOPORT_STRING(bwlq, type) + +BUILDSTRING(b, u8) +BUILDSTRING(w, u16) +BUILDSTRING(l, u32) +#ifdef CONFIG_64BIT +BUILDSTRING(q, u64) #endif -#define writeb(b,addr) __raw_writeb(__ioswab8(b),(addr)) -#define writew(w,addr) __raw_writew(__ioswab16(w),(addr)) -#define writel(l,addr) __raw_writel(__ioswab32(l),(addr)) -#define writeq(q,addr) __raw_writeq(__ioswab64(q),(addr)) -#define memset_io(a,b,c) memset((void *)(a),(b),(c)) -#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) -#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) +/* Depends on MIPS II instruction set */ +#define mmiowb() asm volatile ("sync" ::: "memory") + +static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) +{ + memset((void __force *) addr, val, count); +} +static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) +{ + memcpy(dst, (void __force *) src, count); +} +static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) +{ + memcpy((void __force *) dst, src, count); +} + +/* + * Memory Mapped I/O + */ +#define ioread8(addr) readb(addr) +#define ioread16(addr) readw(addr) +#define ioread32(addr) readl(addr) + +#define iowrite8(b,addr) writeb(b,addr) +#define iowrite16(w,addr) writew(w,addr) +#define iowrite32(l,addr) writel(l,addr) + +#define ioread8_rep(a,b,c) readsb(a,b,c) +#define ioread16_rep(a,b,c) readsw(a,b,c) +#define ioread32_rep(a,b,c) readsl(a,b,c) + +#define iowrite8_rep(a,b,c) writesb(a,b,c) +#define iowrite16_rep(a,b,c) writesw(a,b,c) +#define iowrite32_rep(a,b,c) writesl(a,b,c) + +/* Create a virtual mapping cookie for an IO port range */ +extern void __iomem *ioport_map(unsigned long port, unsigned int nr); +extern void ioport_unmap(void __iomem *); + +/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ +struct pci_dev; +extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); +extern void pci_iounmap(struct pci_dev *dev, void __iomem *); /* * ISA space is 'always mapped' on currently supported MIPS systems, no need @@ -339,24 +556,11 @@ static inline void iounmap(void *addr) */ #define __ISA_IO_base ((char *)(isa_slot_offset)) -#define isa_readb(a) readb(__ISA_IO_base + (a)) -#define isa_readw(a) readw(__ISA_IO_base + (a)) -#define isa_readl(a) readl(__ISA_IO_base + (a)) -#define isa_readq(a) readq(__ISA_IO_base + (a)) -#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a)) -#define isa_writew(w,a) writew(w,__ISA_IO_base + (a)) -#define isa_writel(l,a) writel(l,__ISA_IO_base + (a)) -#define isa_writeq(q,a) writeq(q,__ISA_IO_base + (a)) -#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c)) -#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c)) -#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c)) - /* * We don't have csum_partial_copy_fromio() yet, so we cheat here and * just copy it. The net code will then do the checksum later. */ #define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len)) -#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d)) /* * check_signature - find BIOS signatures @@ -368,7 +572,7 @@ static inline void iounmap(void *addr) * address should have been obtained by ioremap. * Returns 1 on a match. */ -static inline int check_signature(unsigned long io_addr, +static inline int check_signature(char __iomem *io_addr, const unsigned char *signature, int length) { int retval = 0; @@ -384,191 +588,6 @@ out: return retval; } -/* - * isa_check_signature - find BIOS signatures - * @io_addr: mmio address to check - * @signature: signature block - * @length: length of signature - * - * Perform a signature comparison with the ISA mmio address io_addr. - * Returns 1 on a match. - * - * This function is deprecated. New drivers should use ioremap and - * check_signature. - */ -#define isa_check_signature(io, s, l) check_signature(i,s,l) - -static inline void __outb(unsigned char val, unsigned long port) -{ - port = __swizzle_addr_b(port); - - *(volatile u8 *)(mips_io_port_base + port) = __ioswab8(val); -} - -static inline void __outw(unsigned short val, unsigned long port) -{ - port = __swizzle_addr_w(port); - - *(volatile u16 *)(mips_io_port_base + port) = __ioswab16(val); -} - -static inline void __outl(unsigned int val, unsigned long port) -{ - port = __swizzle_addr_l(port); - - *(volatile u32 *)(mips_io_port_base + port) = __ioswab32(val); -} - -static inline void __outb_p(unsigned char val, unsigned long port) -{ - port = __swizzle_addr_b(port); - - *(volatile u8 *)(mips_io_port_base + port) = __ioswab8(val); - SLOW_DOWN_IO; -} - -static inline void __outw_p(unsigned short val, unsigned long port) -{ - port = __swizzle_addr_w(port); - - *(volatile u16 *)(mips_io_port_base + port) = __ioswab16(val); - SLOW_DOWN_IO; -} - -static inline void __outl_p(unsigned int val, unsigned long port) -{ - port = __swizzle_addr_l(port); - - *(volatile u32 *)(mips_io_port_base + port) = __ioswab32(val); - SLOW_DOWN_IO; -} - -#define outb(val, port) __outb(val, port) -#define outw(val, port) __outw(val, port) -#define outl(val, port) __outl(val, port) -#define outb_p(val, port) __outb_p(val, port) -#define outw_p(val, port) __outw_p(val, port) -#define outl_p(val, port) __outl_p(val, port) - -static inline unsigned char __inb(unsigned long port) -{ - port = __swizzle_addr_b(port); - - return __ioswab8(*(volatile u8 *)(mips_io_port_base + port)); -} - -static inline unsigned short __inw(unsigned long port) -{ - port = __swizzle_addr_w(port); - - return __ioswab16(*(volatile u16 *)(mips_io_port_base + port)); -} - -static inline unsigned int __inl(unsigned long port) -{ - port = __swizzle_addr_l(port); - - return __ioswab32(*(volatile u32 *)(mips_io_port_base + port)); -} - -static inline unsigned char __inb_p(unsigned long port) -{ - u8 __val; - - port = __swizzle_addr_b(port); - - __val = *(volatile u8 *)(mips_io_port_base + port); - SLOW_DOWN_IO; - - return __ioswab8(__val); -} - -static inline unsigned short __inw_p(unsigned long port) -{ - u16 __val; - - port = __swizzle_addr_w(port); - - __val = *(volatile u16 *)(mips_io_port_base + port); - SLOW_DOWN_IO; - - return __ioswab16(__val); -} - -static inline unsigned int __inl_p(unsigned long port) -{ - u32 __val; - - port = __swizzle_addr_l(port); - - __val = *(volatile u32 *)(mips_io_port_base + port); - SLOW_DOWN_IO; - - return __ioswab32(__val); -} - -#define inb(port) __inb(port) -#define inw(port) __inw(port) -#define inl(port) __inl(port) -#define inb_p(port) __inb_p(port) -#define inw_p(port) __inw_p(port) -#define inl_p(port) __inl_p(port) - -static inline void __outsb(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - outb(*(u8 *)addr, port); - addr++; - } -} - -static inline void __insb(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - *(u8 *)addr = inb(port); - addr++; - } -} - -static inline void __outsw(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - outw(*(u16 *)addr, port); - addr += 2; - } -} - -static inline void __insw(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - *(u16 *)addr = inw(port); - addr += 2; - } -} - -static inline void __outsl(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - outl(*(u32 *)addr, port); - addr += 4; - } -} - -static inline void __insl(unsigned long port, void *addr, unsigned int count) -{ - while (count--) { - *(u32 *)addr = inl(port); - addr += 4; - } -} - -#define outsb(port, addr, count) __outsb(port, addr, count) -#define insb(port, addr, count) __insb(port, addr, count) -#define outsw(port, addr, count) __outsw(port, addr, count) -#define insw(port, addr, count) __insw(port, addr, count) -#define outsl(port, addr, count) __outsl(port, addr, count) -#define insl(port, addr, count) __insl(port, addr, count) - /* * The caches on some architectures aren't dma-coherent and have need to * handle this in software. There are three types of operations that @@ -622,4 +641,15 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); #define csr_out32(v,a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + #endif /* _ASM_IO_H */