2 * arch/i386/mm/ioremap.c
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
8 * (C) Copyright 1995 1996 Linus Torvalds
11 #include <linux/vmalloc.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
22 #define ISA_START_ADDRESS 0x0
23 #define ISA_END_ADDRESS 0x100000
25 static int direct_remap_area_pte_fn(pte_t *pte,
26 struct page *pmd_page,
27 unsigned long address,
30 mmu_update_t **v = (mmu_update_t **)data;
32 BUG_ON(!pte_none(*pte));
34 (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) <<
35 PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
41 static int __direct_remap_pfn_range(struct mm_struct *mm,
42 unsigned long address,
49 unsigned long i, start_address;
50 mmu_update_t *u, *v, *w;
52 u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
56 start_address = address;
60 for (i = 0; i < size; i += PAGE_SIZE) {
61 if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
62 /* Flush a full batch after filling in the PTE ptrs. */
63 rc = apply_to_page_range(mm, start_address,
64 address - start_address,
65 direct_remap_area_pte_fn, &w);
69 if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
72 start_address = address;
76 * Fill in the machine address: PTE ptr is done later by
77 * __direct_remap_area_pages().
79 v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
88 rc = apply_to_page_range(mm, start_address,
89 address - start_address,
90 direct_remap_area_pte_fn, &w);
94 if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
103 free_page((unsigned long)u);
108 int direct_remap_pfn_range(struct vm_area_struct *vma,
109 unsigned long address,
115 if (xen_feature(XENFEAT_auto_translated_physmap))
116 return remap_pfn_range(vma, address, mfn, size, prot);
118 if (domid == DOMID_SELF)
121 vma->vm_flags |= VM_IO | VM_RESERVED;
123 vma->vm_mm->context.has_foreign_mappings = 1;
125 return __direct_remap_pfn_range(
126 vma->vm_mm, address, mfn, size, prot, domid);
128 EXPORT_SYMBOL(direct_remap_pfn_range);
130 int direct_kernel_remap_pfn_range(unsigned long address,
136 return __direct_remap_pfn_range(
137 &init_mm, address, mfn, size, prot, domid);
139 EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
141 static int lookup_pte_fn(
142 pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
144 uint64_t *ptep = (uint64_t *)data;
146 *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) <<
147 PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
151 int create_lookup_pte_addr(struct mm_struct *mm,
152 unsigned long address,
155 return apply_to_page_range(mm, address, PAGE_SIZE,
156 lookup_pte_fn, ptep);
159 EXPORT_SYMBOL(create_lookup_pte_addr);
162 pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
167 int touch_pte_range(struct mm_struct *mm,
168 unsigned long address,
171 return apply_to_page_range(mm, address, size, noop_fn, NULL);
174 EXPORT_SYMBOL(touch_pte_range);
177 * Does @address reside within a non-highmem page that is local to this virtual
178 * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
179 * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
182 static inline int is_local_lowmem(unsigned long address)
184 extern unsigned long max_low_pfn;
185 return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
189 * Generic mapping function (not visible outside):
193 * Remap an arbitrary physical address space into the kernel virtual
194 * address space. Needed when the kernel wants to access high addresses
197 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
198 * have to convert them into an offset in a page-aligned mapping, but the
199 * caller shouldn't need to know that small detail.
201 void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
204 struct vm_struct * area;
205 unsigned long offset, last_addr;
206 domid_t domid = DOMID_IO;
208 /* Don't allow wraparound or zero size */
209 last_addr = phys_addr + size - 1;
210 if (!size || last_addr < phys_addr)
214 * Don't remap the low PCI/ISA area, it's always mapped..
216 if (is_initial_xendomain() &&
217 phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
218 return (void __iomem *) isa_bus_to_virt(phys_addr);
221 * Don't allow anybody to remap normal RAM that we're using..
223 if (is_local_lowmem(phys_addr)) {
224 char *t_addr, *t_end;
227 t_addr = bus_to_virt(phys_addr);
228 t_end = t_addr + (size - 1);
230 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
231 if(!PageReserved(page))
238 * Mappings have to be page-aligned
240 offset = phys_addr & ~PAGE_MASK;
241 phys_addr &= PAGE_MASK;
242 size = PAGE_ALIGN(last_addr+1) - phys_addr;
247 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
250 area->phys_addr = phys_addr;
251 addr = (void __iomem *) area->addr;
252 flags |= _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED;
253 if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
254 phys_addr>>PAGE_SHIFT,
255 size, __pgprot(flags), domid)) {
256 vunmap((void __force *) addr);
259 return (void __iomem *) (offset + (char __iomem *)addr);
261 EXPORT_SYMBOL(__ioremap);
264 * ioremap_nocache - map bus memory into CPU space
265 * @offset: bus address of the memory
266 * @size: size of the resource to map
268 * ioremap_nocache performs a platform specific sequence of operations to
269 * make bus memory CPU accessible via the readb/readw/readl/writeb/
270 * writew/writel functions and the other mmio helpers. The returned
271 * address is not guaranteed to be usable directly as a virtual
274 * This version of ioremap ensures that the memory is marked uncachable
275 * on the CPU as well as honouring existing caching rules from things like
276 * the PCI bus. Note that there are other caches and buffers on many
277 * busses. In particular driver authors should read up on PCI writes
279 * It's useful if some control registers are in such an area and
280 * write combining or read caching is not desirable:
282 * Must be freed with iounmap.
285 void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
287 unsigned long last_addr;
288 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
292 /* Guaranteed to be > phys_addr, as per __ioremap() */
293 last_addr = phys_addr + size - 1;
295 if (is_local_lowmem(last_addr)) {
296 struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
297 unsigned long npages;
299 phys_addr &= PAGE_MASK;
301 /* This might overflow and become zero.. */
302 last_addr = PAGE_ALIGN(last_addr);
304 /* .. but that's ok, because modulo-2**n arithmetic will make
305 * the page-aligned "last - first" come out right.
307 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
309 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
318 EXPORT_SYMBOL(ioremap_nocache);
321 * iounmap - Free a IO remapping
322 * @addr: virtual address from ioremap_*
324 * Caller must ensure there is only one unmapping for the same pointer.
326 void iounmap(volatile void __iomem *addr)
328 struct vm_struct *p, *o;
330 if ((void __force *)addr <= high_memory)
334 * __ioremap special-cases the PCI/ISA range by not instantiating a
335 * vm_area and by simply returning an address into the kernel mapping
336 * of ISA space. So handle that here.
338 if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
341 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
343 /* Use the vm area unlocked, assuming the caller
344 ensures there isn't another iounmap for the same address
345 in parallel. Reuse of the virtual address is prevented by
346 leaving it in the global lists until we're done with it.
347 cpa takes care of the direct mappings. */
348 read_lock(&vmlist_lock);
349 for (p = vmlist; p; p = p->next) {
353 read_unlock(&vmlist_lock);
356 printk("iounmap: bad address %p\n", addr);
361 /* Reset the direct mapping. Can block */
362 if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
363 /* p->size includes the guard page, but cpa doesn't like that */
364 change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
365 (p->size - PAGE_SIZE) >> PAGE_SHIFT,
370 /* Finally remove it */
371 o = remove_vm_area((void *)addr);
372 BUG_ON(p != o || o == NULL);
375 EXPORT_SYMBOL(iounmap);
377 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
379 unsigned long offset, last_addr;
380 unsigned int nrpages;
381 enum fixed_addresses idx;
383 /* Don't allow wraparound or zero size */
384 last_addr = phys_addr + size - 1;
385 if (!size || last_addr < phys_addr)
389 * Don't remap the low PCI/ISA area, it's always mapped..
391 if (is_initial_xendomain() &&
392 phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
393 return isa_bus_to_virt(phys_addr);
396 * Mappings have to be page-aligned
398 offset = phys_addr & ~PAGE_MASK;
399 phys_addr &= PAGE_MASK;
400 size = PAGE_ALIGN(last_addr) - phys_addr;
403 * Mappings have to fit in the FIX_BTMAP area.
405 nrpages = size >> PAGE_SHIFT;
406 if (nrpages > NR_FIX_BTMAPS)
412 idx = FIX_BTMAP_BEGIN;
413 while (nrpages > 0) {
414 set_fixmap(idx, phys_addr);
415 phys_addr += PAGE_SIZE;
419 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
422 void __init bt_iounmap(void *addr, unsigned long size)
424 unsigned long virt_addr;
425 unsigned long offset;
426 unsigned int nrpages;
427 enum fixed_addresses idx;
429 virt_addr = (unsigned long)addr;
430 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
432 if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
434 offset = virt_addr & ~PAGE_MASK;
435 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
437 idx = FIX_BTMAP_BEGIN;
438 while (nrpages > 0) {