X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fx86_64%2Fmm%2Fioremap.c;h=ae207064201e19697160a332a3399f9a0594b2f8;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=bae79b818d85108b5cc674c28303afb92438fdb4;hpb=8e8ece46a861c84343256819eaec77e608ff9217;p=linux-2.6.git diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c index bae79b818..ae2070642 100644 --- a/arch/x86_64/mm/ioremap.c +++ b/arch/x86_64/mm/ioremap.c @@ -18,6 +18,9 @@ #include #include +#define ISA_START_ADDRESS 0xa0000 +#define ISA_END_ADDRESS 0x100000 + static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, unsigned long phys_addr, unsigned long flags) { @@ -57,7 +60,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo if (address >= end) BUG(); do { - pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); + pte_t * pte = pte_alloc_kernel(pmd, address); if (!pte) return -ENOMEM; remap_area_pte(pte, address, end - address, address + phys_addr, flags); @@ -102,7 +105,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, flush_cache_all(); if (address >= end) BUG(); - spin_lock(&init_mm.page_table_lock); do { pud_t *pud; pud = pud_alloc(&init_mm, pgd, address); @@ -116,7 +118,6 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, address = (address + PGDIR_SIZE) & PGDIR_MASK; pgd++; } while (address && (address < end)); - spin_unlock(&init_mm.page_table_lock); flush_tlb_all(); return error; } @@ -130,7 +131,7 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, unsigned long flags) { int err = 0; - if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { + if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long vaddr = (unsigned long) __va(phys_addr); @@ -172,10 +173,10 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l /* * Don't remap the low PCI/ISA area, it's always mapped.. */ - if (phys_addr >= 0xA0000 && last_addr < 0x100000) + if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) return (__force void __iomem *)phys_to_virt(phys_addr); -#ifndef CONFIG_DISCONTIGMEM +#ifdef CONFIG_FLATMEM /* * Don't allow anybody to remap normal RAM that we're using.. */ @@ -211,7 +212,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); return NULL; } - if (ioremap_change_attr(phys_addr, size, flags) < 0) { + if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) { area->flags &= 0xffffff; vunmap(addr); return NULL; @@ -246,32 +247,47 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) return __ioremap(phys_addr, size, _PAGE_PCD); } +/** + * iounmap - Free a IO remapping + * @addr: virtual address from ioremap_* + * + * Caller must ensure there is only one unmapping for the same pointer. + */ void iounmap(volatile void __iomem *addr) { - struct vm_struct *p, **pprev; + struct vm_struct *p, *o; if (addr <= high_memory) return; + if (addr >= phys_to_virt(ISA_START_ADDRESS) && + addr < phys_to_virt(ISA_END_ADDRESS)) + return; - write_lock(&vmlist_lock); - for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev) - if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr)) + addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); + /* Use the vm area unlocked, assuming the caller + ensures there isn't another iounmap for the same address + in parallel. Reuse of the virtual address is prevented by + leaving it in the global lists until we're done with it. + cpa takes care of the direct mappings. */ + read_lock(&vmlist_lock); + for (p = vmlist; p; p = p->next) { + if (p->addr == addr) break; - if (!p) { - printk("__iounmap: bad address %p\n", addr); - goto out_unlock; } - *pprev = p->next; - unmap_vm_area(p); - if ((p->flags >> 20) && - p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) { - /* p->size includes the guard page, but cpa doesn't like that */ - change_page_attr_addr((unsigned long)(__va(p->phys_addr)), - (p->size - PAGE_SIZE) >> PAGE_SHIFT, - PAGE_KERNEL); - global_flush_tlb(); - } -out_unlock: - write_unlock(&vmlist_lock); + read_unlock(&vmlist_lock); + + if (!p) { + printk("iounmap: bad address %p\n", addr); + dump_stack(); + return; + } + + /* Reset the direct mapping. Can block */ + if (p->flags >> 20) + ioremap_change_attr(p->phys_addr, p->size, 0); + + /* Finally remove it */ + o = remove_vm_area((void *)addr); + BUG_ON(p != o || o == NULL); kfree(p); }