2 * arch/parisc/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
9 * (C) Copyright 2001 Helge Deller <deller@gmx.de>
12 #include <linux/vmalloc.h>
13 #include <linux/errno.h>
15 #include <asm/pgalloc.h>
17 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
18 unsigned long phys_addr, unsigned long flags)
29 if (!pte_none(*pte)) {
30 printk(KERN_ERR "remap_area_pte: page already exists\n");
33 set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
34 _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
36 phys_addr += PAGE_SIZE;
38 } while (address && (address < end));
41 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
42 unsigned long phys_addr, unsigned long flags)
46 address &= ~PGDIR_MASK;
54 pte_t * pte = pte_alloc_kernel(NULL, pmd, address);
57 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
58 address = (address + PMD_SIZE) & PMD_MASK;
60 } while (address && (address < end));
64 #if (USE_HPPA_IOREMAP)
65 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
66 unsigned long size, unsigned long flags)
70 unsigned long end = address + size;
73 dir = pgd_offset(&init_mm, address);
77 spin_lock(&init_mm.page_table_lock);
80 pmd = pmd_alloc(dir, address);
84 if (remap_area_pmd(pmd, address, end - address,
85 phys_addr + address, flags))
88 address = (address + PGDIR_SIZE) & PGDIR_MASK;
90 } while (address && (address < end));
91 spin_unlock(&init_mm.page_table_lock);
95 #endif /* USE_HPPA_IOREMAP */
98 * Generic mapping function (not visible outside):
102 * Remap an arbitrary physical address space into the kernel virtual
103 * address space. Needed when the kernel wants to access high addresses
106 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
107 * have to convert them into an offset in a page-aligned mapping, but the
108 * caller shouldn't need to know that small detail.
110 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
112 #if !(USE_HPPA_IOREMAP)
114 unsigned long end = phys_addr + size - 1;
115 /* Support EISA addresses */
116 if ((phys_addr >= 0x00080000 && end < 0x000fffff)
117 || (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
118 phys_addr |= 0xfc000000;
121 return (void *)phys_addr;
125 struct vm_struct * area;
126 unsigned long offset, last_addr;
128 /* Don't allow wraparound or zero size */
129 last_addr = phys_addr + size - 1;
130 if (!size || last_addr < phys_addr)
134 * Don't allow anybody to remap normal RAM that we're using..
136 if (phys_addr < virt_to_phys(high_memory)) {
137 char *t_addr, *t_end;
140 t_addr = __va(phys_addr);
141 t_end = t_addr + (size - 1);
143 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
144 if(!PageReserved(page))
149 * Mappings have to be page-aligned
151 offset = phys_addr & ~PAGE_MASK;
152 phys_addr &= PAGE_MASK;
153 size = PAGE_ALIGN(last_addr) - phys_addr;
158 area = get_vm_area(size, VM_IOREMAP);
162 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
166 return (void *) (offset + (char *)addr);
170 void iounmap(void *addr)
172 #if !(USE_HPPA_IOREMAP)
175 if (addr > high_memory)
176 return vfree((void *) (PAGE_MASK & (unsigned long) addr));