5 * This file contains the definitions for the emulated IO instructions
6 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
7 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
8 * versions of the single-IO instructions (inb_p/inw_p/..).
10 * This file is not meant to be obfuscating: it's just complicated to
11 * (a) handle it all in a way that makes gcc able to optimize it as
12 * well as possible and (b) trying to avoid writing the same thing
13 * over and over again with slight variations and possibly making a
16 * Copyright (C) 1998-2003 Hewlett-Packard Co
17 * David Mosberger-Tang <davidm@hpl.hp.com>
18 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
19 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
22 /* We don't use IO slowdowns on the ia64, but.. */
23 #define __SLOW_DOWN_IO do { } while (0)
24 #define SLOW_DOWN_IO do { } while (0)
26 #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED)
29 * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
30 * large machines may have multiple other I/O spaces so we can't place any a priori limit
31 * on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
33 #define IO_SPACE_LIMIT 0xffffffffffffffffUL
35 #define MAX_IO_SPACES_BITS 4
36 #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
37 #define IO_SPACE_BITS 24
38 #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
40 #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
41 #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
42 #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
44 #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff))
47 unsigned long mmio_base; /* base in MMIO space */
51 extern struct io_space io_space[];
52 extern unsigned int num_io_spaces;
57 * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
58 * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
59 * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
61 * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
62 * code that uses bare port numbers without the prerequisite pci_iomap().
64 #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
65 #define PIO_MASK (PIO_OFFSET - 1)
66 #define PIO_RESERVED __IA64_UNCACHED_OFFSET
67 #define HAVE_ARCH_PIO_SIZE
69 #include <asm/hypervisor.h>
70 #include <asm/intrinsics.h>
71 #include <asm/machvec.h>
73 #include <asm/privop.h>
74 #include <asm/system.h>
75 #include <asm-generic/iomap.h>
78 * Change virtual addresses to physical addresses and vv.
80 static inline unsigned long
81 virt_to_phys (volatile void *address)
83 return (unsigned long) address - PAGE_OFFSET;
87 phys_to_virt (unsigned long address)
89 return (void *) (address + PAGE_OFFSET);
92 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
93 extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
94 extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */
95 extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
98 * The following two macros are deprecated and scheduled for removal.
99 * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
102 #define bus_to_virt phys_to_virt
103 #define virt_to_bus virt_to_phys
104 #define page_to_bus page_to_phys
105 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
106 #define page_to_pseudophys(page) page_to_phys(page)
107 #else /* CONFIG_XEN */
108 #define bus_to_virt(bus) \
109 phys_to_virt(machine_to_phys_for_dma(bus))
110 #define virt_to_bus(virt) \
111 phys_to_machine_for_dma(virt_to_phys(virt))
112 #define page_to_bus(page) \
113 phys_to_machine_for_dma(page_to_pseudophys(page))
115 #define page_to_pseudophys(page) \
116 ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
119 * Drivers that use page_to_phys() for bus addresses are broken.
121 * drivers/ide/cris/ide-cris.c
122 * drivers/scsi/dec_esp.c
124 #define page_to_phys(page) (page_to_pseudophys(page))
125 #define bvec_to_bus(bv) (page_to_bus((bv)->bv_page) + \
126 (unsigned long) (bv)->bv_offset)
127 #define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + \
128 (unsigned long) bio_offset((bio)))
129 #define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + \
130 (unsigned long) (bv)->bv_offset)
131 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
132 (((bvec_to_bus((vec1)) + (vec1)->bv_len) == bvec_to_bus((vec2))) && \
133 ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
134 bvec_to_pseudophys((vec2))))
135 #endif /* CONFIG_XEN */
140 * Memory fence w/accept. This should never be used in code that is
141 * not IA-64 specific.
143 #define __ia64_mf_a() ia64_mfa()
146 * ___ia64_mmiowb - I/O write barrier
148 * Ensure ordering of I/O space writes. This will make sure that writes
149 * following the barrier will arrive after all previous writes. For most
150 * ia64 platforms, this is a simple 'mf.a' instruction.
152 * See Documentation/DocBook/deviceiobook.tmpl for more information.
154 static inline void ___ia64_mmiowb(void)
160 __ia64_mk_io_addr (unsigned long port)
162 struct io_space *space;
163 unsigned long offset;
165 space = &io_space[IO_SPACE_NR(port)];
166 port = IO_SPACE_PORT(port);
168 offset = IO_SPACE_SPARSE_ENCODING(port);
172 return (void *) (space->mmio_base | offset);
175 #define __ia64_inb ___ia64_inb
176 #define __ia64_inw ___ia64_inw
177 #define __ia64_inl ___ia64_inl
178 #define __ia64_outb ___ia64_outb
179 #define __ia64_outw ___ia64_outw
180 #define __ia64_outl ___ia64_outl
181 #define __ia64_readb ___ia64_readb
182 #define __ia64_readw ___ia64_readw
183 #define __ia64_readl ___ia64_readl
184 #define __ia64_readq ___ia64_readq
185 #define __ia64_readb_relaxed ___ia64_readb
186 #define __ia64_readw_relaxed ___ia64_readw
187 #define __ia64_readl_relaxed ___ia64_readl
188 #define __ia64_readq_relaxed ___ia64_readq
189 #define __ia64_writeb ___ia64_writeb
190 #define __ia64_writew ___ia64_writew
191 #define __ia64_writel ___ia64_writel
192 #define __ia64_writeq ___ia64_writeq
193 #define __ia64_mmiowb ___ia64_mmiowb
196 * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
197 * that the access has completed before executing other I/O accesses. Since we're doing
198 * the accesses through an uncachable (UC) translation, the CPU will execute them in
199 * program order. However, we still need to tell the compiler not to shuffle them around
200 * during optimization, which is why we use "volatile" pointers.
203 static inline unsigned int
204 ___ia64_inb (unsigned long port)
206 volatile unsigned char *addr = __ia64_mk_io_addr(port);
214 static inline unsigned int
215 ___ia64_inw (unsigned long port)
217 volatile unsigned short *addr = __ia64_mk_io_addr(port);
225 static inline unsigned int
226 ___ia64_inl (unsigned long port)
228 volatile unsigned int *addr = __ia64_mk_io_addr(port);
237 ___ia64_outb (unsigned char val, unsigned long port)
239 volatile unsigned char *addr = __ia64_mk_io_addr(port);
246 ___ia64_outw (unsigned short val, unsigned long port)
248 volatile unsigned short *addr = __ia64_mk_io_addr(port);
255 ___ia64_outl (unsigned int val, unsigned long port)
257 volatile unsigned int *addr = __ia64_mk_io_addr(port);
264 __insb (unsigned long port, void *dst, unsigned long count)
266 unsigned char *dp = dst;
269 *dp++ = platform_inb(port);
273 __insw (unsigned long port, void *dst, unsigned long count)
275 unsigned short *dp = dst;
278 *dp++ = platform_inw(port);
282 __insl (unsigned long port, void *dst, unsigned long count)
284 unsigned int *dp = dst;
287 *dp++ = platform_inl(port);
291 __outsb (unsigned long port, const void *src, unsigned long count)
293 const unsigned char *sp = src;
296 platform_outb(*sp++, port);
300 __outsw (unsigned long port, const void *src, unsigned long count)
302 const unsigned short *sp = src;
305 platform_outw(*sp++, port);
309 __outsl (unsigned long port, const void *src, unsigned long count)
311 const unsigned int *sp = src;
314 platform_outl(*sp++, port);
318 * Unfortunately, some platforms are broken and do not follow the IA-64 architecture
319 * specification regarding legacy I/O support. Thus, we have to make these operations
320 * platform dependent...
322 #define __inb platform_inb
323 #define __inw platform_inw
324 #define __inl platform_inl
325 #define __outb platform_outb
326 #define __outw platform_outw
327 #define __outl platform_outl
328 #define __mmiowb platform_mmiowb
330 #define inb(p) __inb(p)
331 #define inw(p) __inw(p)
332 #define inl(p) __inl(p)
333 #define insb(p,d,c) __insb(p,d,c)
334 #define insw(p,d,c) __insw(p,d,c)
335 #define insl(p,d,c) __insl(p,d,c)
336 #define outb(v,p) __outb(v,p)
337 #define outw(v,p) __outw(v,p)
338 #define outl(v,p) __outl(v,p)
339 #define outsb(p,s,c) __outsb(p,s,c)
340 #define outsw(p,s,c) __outsw(p,s,c)
341 #define outsl(p,s,c) __outsl(p,s,c)
342 #define mmiowb() __mmiowb()
345 * The address passed to these functions are ioremap()ped already.
347 * We need these to be machine vectors since some platforms don't provide
348 * DMA coherence via PIO reads (PCI drivers and the spec imply that this is
349 * a good idea). Writes are ok though for all existing ia64 platforms (and
350 * hopefully it'll stay that way).
352 static inline unsigned char
353 ___ia64_readb (const volatile void __iomem *addr)
355 return *(volatile unsigned char __force *)addr;
358 static inline unsigned short
359 ___ia64_readw (const volatile void __iomem *addr)
361 return *(volatile unsigned short __force *)addr;
364 static inline unsigned int
365 ___ia64_readl (const volatile void __iomem *addr)
367 return *(volatile unsigned int __force *) addr;
370 static inline unsigned long
371 ___ia64_readq (const volatile void __iomem *addr)
373 return *(volatile unsigned long __force *) addr;
377 __writeb (unsigned char val, volatile void __iomem *addr)
379 *(volatile unsigned char __force *) addr = val;
383 __writew (unsigned short val, volatile void __iomem *addr)
385 *(volatile unsigned short __force *) addr = val;
389 __writel (unsigned int val, volatile void __iomem *addr)
391 *(volatile unsigned int __force *) addr = val;
395 __writeq (unsigned long val, volatile void __iomem *addr)
397 *(volatile unsigned long __force *) addr = val;
400 #define __readb platform_readb
401 #define __readw platform_readw
402 #define __readl platform_readl
403 #define __readq platform_readq
404 #define __readb_relaxed platform_readb_relaxed
405 #define __readw_relaxed platform_readw_relaxed
406 #define __readl_relaxed platform_readl_relaxed
407 #define __readq_relaxed platform_readq_relaxed
409 #define readb(a) __readb((a))
410 #define readw(a) __readw((a))
411 #define readl(a) __readl((a))
412 #define readq(a) __readq((a))
413 #define readb_relaxed(a) __readb_relaxed((a))
414 #define readw_relaxed(a) __readw_relaxed((a))
415 #define readl_relaxed(a) __readl_relaxed((a))
416 #define readq_relaxed(a) __readq_relaxed((a))
417 #define __raw_readb readb
418 #define __raw_readw readw
419 #define __raw_readl readl
420 #define __raw_readq readq
421 #define __raw_readb_relaxed readb_relaxed
422 #define __raw_readw_relaxed readw_relaxed
423 #define __raw_readl_relaxed readl_relaxed
424 #define __raw_readq_relaxed readq_relaxed
425 #define writeb(v,a) __writeb((v), (a))
426 #define writew(v,a) __writew((v), (a))
427 #define writel(v,a) __writel((v), (a))
428 #define writeq(v,a) __writeq((v), (a))
429 #define __raw_writeb writeb
430 #define __raw_writew writew
431 #define __raw_writel writel
432 #define __raw_writeq writeq
454 extern void __iomem * ioremap(unsigned long offset, unsigned long size);
455 extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
458 iounmap (volatile void __iomem *addr)
462 /* Use normal IO mappings for DMI */
463 #define dmi_ioremap ioremap
464 #define dmi_iounmap(x,l) iounmap(x)
465 #define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
470 * String version of IO memory access ops:
472 extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
473 extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
474 extern void memset_io(volatile void __iomem *s, int c, long n);
476 #define dma_cache_inv(_start,_size) do { } while (0)
477 #define dma_cache_wback(_start,_size) do { } while (0)
478 #define dma_cache_wback_inv(_start,_size) do { } while (0)
480 # endif /* __KERNEL__ */
483 * Enabling BIO_VMERGE_BOUNDARY forces us to turn off I/O MMU bypassing. It is said that
484 * BIO-level virtual merging can give up to 4% performance boost (not verified for ia64).
485 * On the other hand, we know that I/O MMU bypassing gives ~8% performance improvement on
486 * SPECweb-like workloads on zx1-based machines. Thus, for now we favor I/O MMU bypassing
487 * over BIO-level virtual merging.
489 extern unsigned long ia64_max_iommu_merge_mask;
491 #define BIO_VMERGE_BOUNDARY 0
494 * It makes no sense at all to have this BIO_VMERGE_BOUNDARY macro here. Should be
495 * replaced by dma_merge_mask() or something of that sort. Note: the only way
496 * BIO_VMERGE_BOUNDARY is used is to mask off bits. Effectively, our definition gets
499 * addr & ((ia64_max_iommu_merge_mask + 1) - 1) == (addr & ia64_max_iommu_vmerge_mask)
501 * which is precisely what we want.
503 #define BIO_VMERGE_BOUNDARY (ia64_max_iommu_merge_mask + 1)
506 #endif /* _ASM_IA64_IO_H */