#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <asm/io.h>
#include <asm/fixmap.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
-static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
+#define ISA_START_ADDRESS 0xa0000
+#define ISA_END_ADDRESS 0x100000
+
+static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
+ unsigned long end, unsigned long phys_addr, unsigned long flags)
{
- unsigned long end;
+ pte_t *pte;
unsigned long pfn;
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- if (address >= end)
- BUG();
pfn = phys_addr >> PAGE_SHIFT;
+ pte = pte_alloc_kernel(pmd, addr);
+ if (!pte)
+ return -ENOMEM;
do {
- if (!pte_none(*pte)) {
- printk("remap_area_pte: page already exists\n");
- BUG();
- }
+ BUG_ON(!pte_none(*pte));
set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
_PAGE_DIRTY | _PAGE_ACCESSED | flags)));
- address += PAGE_SIZE;
pfn++;
- pte++;
- } while (address && (address < end));
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ return 0;
}
-static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
+static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
+ unsigned long end, unsigned long phys_addr, unsigned long flags)
{
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- phys_addr -= address;
- if (address >= end)
- BUG();
+ pmd_t *pmd;
+ unsigned long next;
+
+ phys_addr -= addr;
+ pmd = pmd_alloc(&init_mm, pud, addr);
+ if (!pmd)
+ return -ENOMEM;
do {
- pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
- if (!pte)
+ next = pmd_addr_end(addr, end);
+ if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, flags))
return -ENOMEM;
- remap_area_pte(pte, address, end - address, address + phys_addr, flags);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
+ } while (pmd++, addr = next, addr != end);
return 0;
}
-static int remap_area_pages(unsigned long address, unsigned long phys_addr,
- unsigned long size, unsigned long flags)
+static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
+ unsigned long end, unsigned long phys_addr, unsigned long flags)
{
- int error;
- pgd_t * dir;
- unsigned long end = address + size;
+ pud_t *pud;
+ unsigned long next;
- phys_addr -= address;
- dir = pgd_offset(&init_mm, address);
+ phys_addr -= addr;
+ pud = pud_alloc(&init_mm, pgd, addr);
+ if (!pud)
+ return -ENOMEM;
+ do {
+ next = pud_addr_end(addr, end);
+ if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, flags))
+ return -ENOMEM;
+ } while (pud++, addr = next, addr != end);
+ return 0;
+}
+
+static int ioremap_page_range(unsigned long addr,
+ unsigned long end, unsigned long phys_addr, unsigned long flags)
+{
+ pgd_t *pgd;
+ unsigned long next;
+ int err;
+
+ BUG_ON(addr >= end);
flush_cache_all();
- if (address >= end)
- BUG();
- spin_lock(&init_mm.page_table_lock);
+ phys_addr -= addr;
+ pgd = pgd_offset_k(addr);
do {
- pmd_t *pmd;
- pmd = pmd_alloc(&init_mm, dir, address);
- error = -ENOMEM;
- if (!pmd)
- break;
- if (remap_area_pmd(pmd, address, end - address,
- phys_addr + address, flags))
+ next = pgd_addr_end(addr, end);
+ err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags);
+ if (err)
break;
- error = 0;
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- spin_unlock(&init_mm.page_table_lock);
+ } while (pgd++, addr = next, addr != end);
flush_tlb_all();
- return error;
+ return err;
}
/*
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
- if (phys_addr >= 0xA0000 && last_addr < 0x100000)
+ if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
return (void __iomem *) phys_to_virt(phys_addr);
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
- if (phys_addr < virt_to_phys(high_memory)) {
+ if (phys_addr <= virt_to_phys(high_memory - 1)) {
char *t_addr, *t_end;
struct page *page;
/*
* Ok, go for it..
*/
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
if (!area)
return NULL;
area->phys_addr = phys_addr;
addr = (void __iomem *) area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
+ if (ioremap_page_range((unsigned long) addr,
+ (unsigned long) addr + size, phys_addr, flags)) {
vunmap((void __force *) addr);
return NULL;
}
return (void __iomem *) (offset + (char __iomem *)addr);
}
-
+EXPORT_SYMBOL(__ioremap);
/**
* ioremap_nocache - map bus memory into CPU space
/* Guaranteed to be > phys_addr, as per __ioremap() */
last_addr = phys_addr + size - 1;
- if (last_addr < virt_to_phys(high_memory)) {
+ if (last_addr < virt_to_phys(high_memory) - 1) {
struct page *ppage = virt_to_page(__va(phys_addr));
unsigned long npages;
return p;
}
+EXPORT_SYMBOL(ioremap_nocache);
+/**
+ * iounmap - Free a IO remapping
+ * @addr: virtual address from ioremap_*
+ *
+ * Caller must ensure there is only one unmapping for the same pointer.
+ */
void iounmap(volatile void __iomem *addr)
{
- struct vm_struct *p;
- if ((void __force *) addr <= high_memory)
- return;
- p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
- if (!p) {
- printk("__iounmap: bad address %p\n", addr);
+ struct vm_struct *p, *o;
+
+ if ((void __force *)addr <= high_memory)
return;
- }
- if (p->flags && p->phys_addr < virt_to_phys(high_memory)) {
+ /*
+ * __ioremap special-cases the PCI/ISA range by not instantiating a
+ * vm_area and by simply returning an address into the kernel mapping
+ * of ISA space. So handle that here.
+ */
+ if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
+ addr < phys_to_virt(ISA_END_ADDRESS))
+ return;
+
+ addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
+
+ /* Use the vm area unlocked, assuming the caller
+ ensures there isn't another iounmap for the same address
+ in parallel. Reuse of the virtual address is prevented by
+ leaving it in the global lists until we're done with it.
+ cpa takes care of the direct mappings. */
+ read_lock(&vmlist_lock);
+ for (p = vmlist; p; p = p->next) {
+ if (p->addr == addr)
+ break;
+ }
+ read_unlock(&vmlist_lock);
+
+ if (!p) {
+ printk("iounmap: bad address %p\n", addr);
+ dump_stack();
+ return;
+ }
+
+ /* Reset the direct mapping. Can block */
+ if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
change_page_attr(virt_to_page(__va(p->phys_addr)),
p->size >> PAGE_SHIFT,
- PAGE_KERNEL);
+ PAGE_KERNEL);
global_flush_tlb();
}
+
+ /* Finally remove it */
+ o = remove_vm_area((void *)addr);
+ BUG_ON(p != o || o == NULL);
kfree(p);
}
+EXPORT_SYMBOL(iounmap);
void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
{
/*
* Don't remap the low PCI/ISA area, it's always mapped..
*/
- if (phys_addr >= 0xA0000 && last_addr < 0x100000)
+ if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
return phys_to_virt(phys_addr);
/*