2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
13 #include <linux/config.h>
14 #include <linux/types.h>
16 #include <asm/addrspace.h>
18 #include <asm/cpu-features.h>
20 #include <asm/pgtable-bits.h>
21 #include <asm/processor.h>
22 #include <asm/byteorder.h>
23 #include <mangle-port.h>
26 * Slowdown I/O port space accesses for antique hardware.
28 #undef CONF_SLOWDOWN_IO
31 * Sane hardware offers swapping of I/O space accesses in hardware; less
32 * sane hardware forces software to fiddle with this ...
34 #if defined(CONFIG_SWAP_IO_SPACE) && defined(__MIPSEB__)
36 #define __ioswab8(x) (x)
38 #ifdef CONFIG_SGI_IP22
40 * IP22 seems braindead enough to swap 16bits values in hardware, but
41 * not 32bits. Go figure... Can't tell without documentation.
43 #define __ioswab16(x) (x)
45 #define __ioswab16(x) swab16(x)
47 #define __ioswab32(x) swab32(x)
48 #define __ioswab64(x) swab64(x)
52 #define __ioswab8(x) (x)
53 #define __ioswab16(x) (x)
54 #define __ioswab32(x) (x)
55 #define __ioswab64(x) (x)
59 #define IO_SPACE_LIMIT 0xffff
62 * On MIPS I/O ports are memory mapped, so we access them using normal
63 * load/store instructions. mips_io_port_base is the virtual address to
64 * which all ports are being mapped. For sake of efficiency some code
65 * assumes that this is an address that can be loaded with a single lui
66 * instruction, so the lower 16 bits must be zero. Should be true on
67 * on any sane architecture; generic code does not use this assumption.
69 extern const unsigned long mips_io_port_base;
71 #define set_io_port_base(base) \
72 do { * (unsigned long *) &mips_io_port_base = (base); } while (0)
75 * Thanks to James van Artsdalen for a better timing-fix than
76 * the two short jumps: using outb's to a nonexistent port seems
77 * to guarantee better timings even on fast machines.
79 * On the other hand, I'd like to be sure of a non-existent port:
80 * I feel a bit unsafe about using 0x80 (should be safe, though)
86 #define __SLOW_DOWN_IO \
87 __asm__ __volatile__( \
89 : : "r" (mips_io_port_base));
91 #ifdef CONF_SLOWDOWN_IO
93 #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
95 #define SLOW_DOWN_IO __SLOW_DOWN_IO
102 * virt_to_phys - map virtual addresses to physical
103 * @address: address to remap
105 * The returned physical address is the physical (CPU) mapping for
106 * the memory address given. It is only valid to use this function on
107 * addresses directly mapped or allocated via kmalloc.
109 * This function does not give bus mappings for DMA transfers. In
110 * almost all conceivable cases a device driver should not be using
113 static inline unsigned long virt_to_phys(volatile void * address)
115 return (unsigned long)address - PAGE_OFFSET;
119 * phys_to_virt - map physical address to virtual
120 * @address: address to remap
122 * The returned virtual address is a current CPU mapping for
123 * the memory address given. It is only valid to use this function on
124 * addresses that have a kernel mapping
126 * This function does not handle bus mappings for DMA transfers. In
127 * almost all conceivable cases a device driver should not be using
130 static inline void * phys_to_virt(unsigned long address)
132 return (void *)(address + PAGE_OFFSET);
136 * ISA I/O bus memory addresses are 1:1 with the physical address.
138 static inline unsigned long isa_virt_to_bus(volatile void * address)
140 return (unsigned long)address - PAGE_OFFSET;
143 static inline void * isa_bus_to_virt(unsigned long address)
145 return (void *)(address + PAGE_OFFSET);
148 #define isa_page_to_bus page_to_phys
151 * However PCI ones are not necessarily 1:1 and therefore these interfaces
152 * are forbidden in portable PCI drivers.
154 * Allow them for x86 for legacy drivers, though.
156 #define virt_to_bus virt_to_phys
157 #define bus_to_virt phys_to_virt
160 * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
161 * for the processor. This implies the assumption that there is only
162 * one of these busses.
164 extern unsigned long isa_slot_offset;
167 * Change "struct page" to physical address.
169 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
171 extern void * __ioremap(phys_t offset, phys_t size, unsigned long flags);
172 extern void __iounmap(void *addr);
174 static inline void * __ioremap_mode(unsigned long offset, unsigned long size,
177 if (cpu_has_64bit_addresses) {
178 u64 base = UNCAC_BASE;
181 * R10000 supports a 2 bit uncached attribute therefore
182 * UNCAC_BASE may not equal IO_BASE.
184 if (flags == _CACHE_UNCACHED)
185 base = (u64) IO_BASE;
186 return (void *) (unsigned long) (base + offset);
189 return __ioremap(offset, size, flags);
193 * ioremap - map bus memory into CPU space
194 * @offset: bus address of the memory
195 * @size: size of the resource to map
197 * ioremap performs a platform specific sequence of operations to
198 * make bus memory CPU accessible via the readb/readw/readl/writeb/
199 * writew/writel functions and the other mmio helpers. The returned
200 * address is not guaranteed to be usable directly as a virtual
203 #define ioremap(offset, size) \
204 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
207 * ioremap_nocache - map bus memory into CPU space
208 * @offset: bus address of the memory
209 * @size: size of the resource to map
211 * ioremap_nocache performs a platform specific sequence of operations to
212 * make bus memory CPU accessible via the readb/readw/readl/writeb/
213 * writew/writel functions and the other mmio helpers. The returned
214 * address is not guaranteed to be usable directly as a virtual
217 * This version of ioremap ensures that the memory is marked uncachable
218 * on the CPU as well as honouring existing caching rules from things like
219 * the PCI bus. Note that there are other caches and buffers on many
220 * busses. In paticular driver authors should read up on PCI writes
222 * It's useful if some control registers are in such an area and
223 * write combining or read caching is not desirable:
225 #define ioremap_nocache(offset, size) \
226 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
229 * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
230 * requests a cachable mapping, ioremap_uncached_accelerated requests a
231 * mapping using the uncached accelerated mode which isn't supported on
234 #define ioremap_cacheable_cow(offset, size) \
235 __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW)
236 #define ioremap_uncached_accelerated(offset, size) \
237 __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED)
239 static inline void iounmap(void *addr)
247 #define __raw_readb(addr) (*(volatile unsigned char *)(addr))
248 #define __raw_readw(addr) (*(volatile unsigned short *)(addr))
249 #define __raw_readl(addr) (*(volatile unsigned int *)(addr))
251 #define ____raw_readq(addr) \
255 __asm__ __volatile__ ( \
256 " .set mips3 # ____raw_readq \n" \
258 " dsra32 %M0, %L0, 0 \n" \
259 " sll %L0, %L0, 0 \n" \
265 #define __raw_readq(addr) \
267 unsigned long __flags; \
270 local_irq_save(__flags); \
271 __res = ____raw_readq(addr); \
272 local_irq_restore(__flags); \
277 #define ____raw_readq(addr) (*(volatile unsigned long *)(addr))
278 #define __raw_readq(addr) ____raw_readq(addr)
281 #define readb(addr) __ioswab8(__raw_readb(addr))
282 #define readw(addr) __ioswab16(__raw_readw(addr))
283 #define readl(addr) __ioswab32(__raw_readl(addr))
284 #define readq(addr) __ioswab64(__raw_readq(addr))
285 #define readb_relaxed(addr) readb(addr)
286 #define readw_relaxed(addr) readw(addr)
287 #define readl_relaxed(addr) readl(addr)
288 #define readq_relaxed(addr) readq(addr)
290 #define __raw_writeb(b,addr) ((*(volatile unsigned char *)(addr)) = (b))
291 #define __raw_writew(w,addr) ((*(volatile unsigned short *)(addr)) = (w))
292 #define __raw_writel(l,addr) ((*(volatile unsigned int *)(addr)) = (l))
294 #define ____raw_writeq(val,addr) \
298 __asm__ __volatile__ ( \
300 " dsll32 %L0, %L0, 0 # ____raw_writeq\n" \
301 " dsrl32 %L0, %L0, 0 \n" \
302 " dsll32 %M0, %M0, 0 \n" \
303 " or %L0, %L0, %M0 \n" \
307 : "0" ((unsigned long long)val), "r" (addr)); \
309 #define __raw_writeq(val,addr) \
311 unsigned long __flags; \
313 local_irq_save(__flags); \
314 ____raw_writeq(val, addr); \
315 local_irq_restore(__flags); \
319 #define ____raw_writeq(q,addr) ((*(volatile unsigned long *)(addr)) = (q))
320 #define __raw_writeq(q,addr) ____raw_writeq(q, addr)
323 #define writeb(b,addr) __raw_writeb(__ioswab8(b),(addr))
324 #define writew(w,addr) __raw_writew(__ioswab16(w),(addr))
325 #define writel(l,addr) __raw_writel(__ioswab32(l),(addr))
326 #define writeq(q,addr) __raw_writeq(__ioswab64(q),(addr))
328 #define memset_io(a,b,c) memset((void *)(a),(b),(c))
329 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
330 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
333 * ISA space is 'always mapped' on currently supported MIPS systems, no need
334 * to explicitly ioremap() it. The fact that the ISA IO space is mapped
335 * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
336 * are physical addresses. The following constant pointer can be
337 * used as the IO-area pointer (it can be iounmapped as well, so the
338 * analogy with PCI is quite large):
340 #define __ISA_IO_base ((char *)(isa_slot_offset))
342 #define isa_readb(a) readb(__ISA_IO_base + (a))
343 #define isa_readw(a) readw(__ISA_IO_base + (a))
344 #define isa_readl(a) readl(__ISA_IO_base + (a))
345 #define isa_readq(a) readq(__ISA_IO_base + (a))
346 #define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
347 #define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
348 #define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
349 #define isa_writeq(q,a) writeq(q,__ISA_IO_base + (a))
350 #define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
351 #define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
352 #define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
355 * We don't have csum_partial_copy_fromio() yet, so we cheat here and
356 * just copy it. The net code will then do the checksum later.
358 #define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
359 #define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d))
362 * check_signature - find BIOS signatures
363 * @io_addr: mmio address to check
364 * @signature: signature block
365 * @length: length of signature
367 * Perform a signature comparison with the mmio address io_addr. This
368 * address should have been obtained by ioremap.
369 * Returns 1 on a match.
371 static inline int check_signature(unsigned long io_addr,
372 const unsigned char *signature, int length)
376 if (readb(io_addr) != *signature)
388 * isa_check_signature - find BIOS signatures
389 * @io_addr: mmio address to check
390 * @signature: signature block
391 * @length: length of signature
393 * Perform a signature comparison with the ISA mmio address io_addr.
394 * Returns 1 on a match.
396 * This function is deprecated. New drivers should use ioremap and
399 #define isa_check_signature(io, s, l) check_signature(i,s,l)
401 static inline void __outb(unsigned char val, unsigned long port)
403 port = __swizzle_addr_b(port);
405 *(volatile u8 *)(mips_io_port_base + port) = __ioswab8(val);
408 static inline void __outw(unsigned short val, unsigned long port)
410 port = __swizzle_addr_w(port);
412 *(volatile u16 *)(mips_io_port_base + port) = __ioswab16(val);
415 static inline void __outl(unsigned int val, unsigned long port)
417 port = __swizzle_addr_l(port);
419 *(volatile u32 *)(mips_io_port_base + port) = __ioswab32(val);
422 static inline void __outb_p(unsigned char val, unsigned long port)
424 port = __swizzle_addr_b(port);
426 *(volatile u8 *)(mips_io_port_base + port) = __ioswab8(val);
430 static inline void __outw_p(unsigned short val, unsigned long port)
432 port = __swizzle_addr_w(port);
434 *(volatile u16 *)(mips_io_port_base + port) = __ioswab16(val);
438 static inline void __outl_p(unsigned int val, unsigned long port)
440 port = __swizzle_addr_l(port);
442 *(volatile u32 *)(mips_io_port_base + port) = __ioswab32(val);
446 #define outb(val, port) __outb(val, port)
447 #define outw(val, port) __outw(val, port)
448 #define outl(val, port) __outl(val, port)
449 #define outb_p(val, port) __outb_p(val, port)
450 #define outw_p(val, port) __outw_p(val, port)
451 #define outl_p(val, port) __outl_p(val, port)
453 static inline unsigned char __inb(unsigned long port)
455 port = __swizzle_addr_b(port);
457 return __ioswab8(*(volatile u8 *)(mips_io_port_base + port));
460 static inline unsigned short __inw(unsigned long port)
462 port = __swizzle_addr_w(port);
464 return __ioswab16(*(volatile u16 *)(mips_io_port_base + port));
467 static inline unsigned int __inl(unsigned long port)
469 port = __swizzle_addr_l(port);
471 return __ioswab32(*(volatile u32 *)(mips_io_port_base + port));
474 static inline unsigned char __inb_p(unsigned long port)
478 port = __swizzle_addr_b(port);
480 __val = *(volatile u8 *)(mips_io_port_base + port);
483 return __ioswab8(__val);
486 static inline unsigned short __inw_p(unsigned long port)
490 port = __swizzle_addr_w(port);
492 __val = *(volatile u16 *)(mips_io_port_base + port);
495 return __ioswab16(__val);
498 static inline unsigned int __inl_p(unsigned long port)
502 port = __swizzle_addr_l(port);
504 __val = *(volatile u32 *)(mips_io_port_base + port);
507 return __ioswab32(__val);
510 #define inb(port) __inb(port)
511 #define inw(port) __inw(port)
512 #define inl(port) __inl(port)
513 #define inb_p(port) __inb_p(port)
514 #define inw_p(port) __inw_p(port)
515 #define inl_p(port) __inl_p(port)
517 static inline void __outsb(unsigned long port, void *addr, unsigned int count)
520 outb(*(u8 *)addr, port);
525 static inline void __insb(unsigned long port, void *addr, unsigned int count)
528 *(u8 *)addr = inb(port);
533 static inline void __outsw(unsigned long port, void *addr, unsigned int count)
536 outw(*(u16 *)addr, port);
541 static inline void __insw(unsigned long port, void *addr, unsigned int count)
544 *(u16 *)addr = inw(port);
549 static inline void __outsl(unsigned long port, void *addr, unsigned int count)
552 outl(*(u32 *)addr, port);
557 static inline void __insl(unsigned long port, void *addr, unsigned int count)
560 *(u32 *)addr = inl(port);
565 #define outsb(port, addr, count) __outsb(port, addr, count)
566 #define insb(port, addr, count) __insb(port, addr, count)
567 #define outsw(port, addr, count) __outsw(port, addr, count)
568 #define insw(port, addr, count) __insw(port, addr, count)
569 #define outsl(port, addr, count) __outsl(port, addr, count)
570 #define insl(port, addr, count) __insl(port, addr, count)
573 * The caches on some architectures aren't dma-coherent and have need to
574 * handle this in software. There are three types of operations that
575 * can be applied to dma buffers.
577 * - dma_cache_wback_inv(start, size) makes caches and coherent by
578 * writing the content of the caches back to memory, if necessary.
579 * The function also invalidates the affected part of the caches as
580 * necessary before DMA transfers from outside to memory.
581 * - dma_cache_wback(start, size) makes caches and coherent by
582 * writing the content of the caches back to memory, if necessary.
583 * The function also invalidates the affected part of the caches as
584 * necessary before DMA transfers from outside to memory.
585 * - dma_cache_inv(start, size) invalidates the affected parts of the
586 * caches. Dirty lines of the caches may be written back or simply
587 * be discarded. This operation is necessary before dma operations
590 #ifdef CONFIG_DMA_NONCOHERENT
592 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
593 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
594 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
596 #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start,size)
597 #define dma_cache_wback(start, size) _dma_cache_wback(start,size)
598 #define dma_cache_inv(start, size) _dma_cache_inv(start,size)
600 #else /* Sane hardware */
602 #define dma_cache_wback_inv(start,size) \
603 do { (void) (start); (void) (size); } while (0)
604 #define dma_cache_wback(start,size) \
605 do { (void) (start); (void) (size); } while (0)
606 #define dma_cache_inv(start,size) \
607 do { (void) (start); (void) (size); } while (0)
609 #endif /* CONFIG_DMA_NONCOHERENT */
612 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
613 * Avoid interrupt mucking, just adjust the address for 4-byte access.
614 * Assume the addresses are 8-byte aligned.
617 #define __CSR_32_ADJUST 4
619 #define __CSR_32_ADJUST 0
622 #define csr_out32(v,a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
623 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
625 #endif /* _ASM_IO_H */