* - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*/
-#ifdef SLOW_IO_BY_JUMPING
-#define __SLOW_DOWN_IO "\njmp 1f\n1:\tjmp 1f\n1:"
-#else
#define __SLOW_DOWN_IO "\noutb %%al,$0x80"
-#endif
#ifdef REALLY_SLOW_IO
#define __FULL_SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO __SLOW_DOWN_IO
* Talk about misusing macros..
*/
#define __OUT1(s,x) \
-extern inline void out##s(unsigned x value, unsigned short port) {
+static inline void out##s(unsigned x value, unsigned short port) {
#define __OUT2(s,s1,s2) \
__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
__OUT1(s##_p,x) __OUT2(s,s1,"w") __FULL_SLOW_DOWN_IO : : "a" (value), "Nd" (port));} \
#define __IN1(s) \
-extern inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
+static inline RETURN_TYPE in##s(unsigned short port) { RETURN_TYPE _v;
#define __IN2(s,s1,s2) \
__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
__IN1(s##_p) __IN2(s,s1,"w") __FULL_SLOW_DOWN_IO : "=a" (_v) : "Nd" (port) ,##i ); return _v; } \
#define __INS(s) \
-extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
+static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
{ __asm__ __volatile__ ("rep ; ins" #s \
: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
#define __OUTS(s) \
-extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
+static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
{ __asm__ __volatile__ ("rep ; outs" #s \
: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
-extern inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(volatile void * address)
{
return __pa(address);
}
-extern inline void * phys_to_virt(unsigned long address)
+static inline void * phys_to_virt(unsigned long address)
{
return __va(address);
}
/*
* Change "struct page" to physical address.
*/
-#ifdef CONFIG_DISCONTIGMEM
-#include <asm/mmzone.h>
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-#else
-#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
-#endif
-extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
+#include <asm-generic/iomap.h>
+
+extern void __iomem *__ioremap(unsigned long offset, unsigned long size, unsigned long flags);
-extern inline void * ioremap (unsigned long offset, unsigned long size)
+static inline void __iomem * ioremap (unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, 0);
}
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
-extern void * ioremap_nocache (unsigned long offset, unsigned long size);
-extern void iounmap(void *addr);
+extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
+extern void iounmap(volatile void __iomem *addr);
+
+/* Use normal IO mappings for DMI */
+#define dmi_ioremap ioremap
+#define dmi_iounmap(x,l) iounmap(x)
+#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
* memory location directly.
*/
-#define readb(addr) (*(volatile unsigned char *) (addr))
-#define readw(addr) (*(volatile unsigned short *) (addr))
-#define readl(addr) (*(volatile unsigned int *) (addr))
-#define readq(addr) (*(volatile unsigned long *) (addr))
+static inline __u8 __readb(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u8 *)addr;
+}
+static inline __u16 __readw(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u16 *)addr;
+}
+static inline __u32 __readl(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u32 *)addr;
+}
+static inline __u64 __readq(const volatile void __iomem *addr)
+{
+ return *(__force volatile __u64 *)addr;
+}
+#define readb(x) __readb(x)
+#define readw(x) __readw(x)
+#define readl(x) __readl(x)
+#define readq(x) __readq(x)
#define readb_relaxed(a) readb(a)
#define readw_relaxed(a) readw(a)
#define readl_relaxed(a) readl(a)
#define __raw_readl readl
#define __raw_readq readq
-#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
-#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
-#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
-#define writeq(b,addr) (*(volatile unsigned long *) (addr) = (b))
+#define mmiowb()
+
+#ifdef CONFIG_UNORDERED_IO
+static inline void __writel(__u32 val, volatile void __iomem *addr)
+{
+ volatile __u32 __iomem *target = addr;
+ asm volatile("movnti %1,%0"
+ : "=m" (*target)
+ : "r" (val) : "memory");
+}
+
+static inline void __writeq(__u64 val, volatile void __iomem *addr)
+{
+ volatile __u64 __iomem *target = addr;
+ asm volatile("movnti %1,%0"
+ : "=m" (*target)
+ : "r" (val) : "memory");
+}
+#else
+static inline void __writel(__u32 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u32 *)addr = b;
+}
+static inline void __writeq(__u64 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u64 *)addr = b;
+}
+#endif
+static inline void __writeb(__u8 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u8 *)addr = b;
+}
+static inline void __writew(__u16 b, volatile void __iomem *addr)
+{
+ *(__force volatile __u16 *)addr = b;
+}
+#define writeq(val,addr) __writeq((val),(addr))
+#define writel(val,addr) __writel((val),(addr))
+#define writew(val,addr) __writew((val),(addr))
+#define writeb(val,addr) __writeb((val),(addr))
#define __raw_writeb writeb
#define __raw_writew writew
#define __raw_writel writel
#define __raw_writeq writeq
-void *__memcpy_fromio(void*,unsigned long,unsigned);
-void *__memcpy_toio(unsigned long,const void*,unsigned);
+void __memcpy_fromio(void*,unsigned long,unsigned);
+void __memcpy_toio(unsigned long,const void*,unsigned);
-#define memcpy_fromio(to,from,len) \
- __memcpy_fromio((to),(unsigned long)(from),(len))
-#define memcpy_toio(to,from,len) \
- __memcpy_toio((unsigned long)(to),(from),(len))
-#define memset_io(a,b,c) memset((void *)(a),(b),(c))
+static inline void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned len)
+{
+ __memcpy_fromio(to,(unsigned long)from,len);
+}
+static inline void memcpy_toio(volatile void __iomem *to, const void *from, unsigned len)
+{
+ __memcpy_toio((unsigned long)to,from,len);
+}
+
+void memset_io(volatile void __iomem *a, int b, size_t c);
/*
* ISA space is 'always mapped' on a typical x86 system, no need to
* used as the IO-area pointer (it can be iounmapped as well, so the
* analogy with PCI is quite large):
*/
-#define __ISA_IO_base ((char *)(PAGE_OFFSET))
+#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
#define isa_readb(a) readb(__ISA_IO_base + (a))
#define isa_readw(a) readw(__ISA_IO_base + (a))
* Returns 1 on a match.
*/
-static inline int check_signature(unsigned long io_addr,
+static inline int check_signature(void __iomem *io_addr,
const unsigned char *signature, int length)
{
int retval = 0;
return retval;
}
-#ifndef __i386__
-/**
- * isa_check_signature - find BIOS signatures
- * @io_addr: mmio address to check
- * @signature: signature block
- * @length: length of signature
- *
- * Perform a signature comparison with the ISA mmio address io_addr.
- * Returns 1 on a match.
- *
- * This function is deprecated. New drivers should use ioremap and
- * check_signature.
- */
-
-
-static inline int isa_check_signature(unsigned long io_addr,
- const unsigned char *signature, int length)
-{
- int retval = 0;
- do {
- if (isa_readb(io_addr) != *signature)
- goto out;
- io_addr++;
- signature++;
- length--;
- } while (length);
- retval = 1;
-out:
- return retval;
-}
-#endif
-
/* Nothing to do */
#define dma_cache_inv(_start,_size) do { } while (0)
#define flush_write_buffers()
-/* Disable vmerge for now. Need to fix the block layer code
- to check for non iommu addresses first.
- When the IOMMU is force it is safe to enable. */
-extern int iommu_merge;
-#define BIO_VMERGE_BOUNDARY (iommu_merge ? 4096 : 0)
+extern int iommu_bio_merge;
+#define BIO_VMERGE_BOUNDARY iommu_bio_merge
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#define xlate_dev_mem_ptr(p) __va(p)
+
+/*
+ * Convert a virtual cached pointer to an uncached pointer
+ */
+#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */