X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fsh%2Fmm%2Fioremap.c;h=96fa4a999e2af1147b58c23ae4e04f8142a34b68;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=9f490c2742f06576b47a9436699928356c375a1d;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 9f490c274..96fa4a999 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -6,13 +6,19 @@ * 640k-1MB IO memory area on PC's * * (C) Copyright 1995 1996 Linus Torvalds + * (C) Copyright 2005, 2006 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. */ - #include +#include #include #include #include #include +#include #include #include @@ -57,7 +63,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, if (address >= end) BUG(); do { - pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); + pte_t * pte = pte_alloc_kernel(pmd, address); if (!pte) return -ENOMEM; remap_area_pte(pte, address, end - address, address + phys_addr, flags); @@ -79,11 +85,16 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr, flush_cache_all(); if (address >= end) BUG(); - spin_lock(&init_mm.page_table_lock); do { + pud_t *pud; pmd_t *pmd; - pmd = pmd_alloc(&init_mm, dir, address); + error = -ENOMEM; + + pud = pud_alloc(&init_mm, dir, address); + if (!pud) + break; + pmd = pmd_alloc(&init_mm, pud, address); if (!pmd) break; if (remap_area_pmd(pmd, address, end - address, @@ -93,15 +104,10 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr, address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (address && (address < end)); - spin_unlock(&init_mm.page_table_lock); flush_tlb_all(); return error; } -/* - * Generic mapping function (not visible outside): - */ - /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses @@ -111,11 +117,11 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr, * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ -void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) +void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, + unsigned long flags) { - void * addr; struct vm_struct * area; - unsigned long offset, last_addr; + unsigned long offset, last_addr, addr, orig_addr; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -126,7 +132,7 @@ void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long fla * Don't remap the low PCI/ISA area, it's always mapped.. */ if (phys_addr >= 0xA0000 && last_addr < 0x100000) - return phys_to_virt(phys_addr); + return (void __iomem *)phys_to_virt(phys_addr); /* * Don't allow anybody to remap normal RAM that we're using.. @@ -148,16 +154,71 @@ void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long fla if (!area) return NULL; area->phys_addr = phys_addr; - addr = area->addr; - if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { - vunmap(addr); - return NULL; + orig_addr = addr = (unsigned long)area->addr; + +#ifdef CONFIG_32BIT + /* + * First try to remap through the PMB once a valid VMA has been + * established. Smaller allocations (or the rest of the size + * remaining after a PMB mapping due to the size not being + * perfectly aligned on a PMB size boundary) are then mapped + * through the UTLB using conventional page tables. + * + * PMB entries are all pre-faulted. + */ + if (unlikely(size >= 0x1000000)) { + unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); + + if (likely(mapped)) { + addr += mapped; + phys_addr += mapped; + size -= mapped; + } } - return (void *) (offset + (char *)addr); +#endif + + if (likely(size)) + if (remap_area_pages(addr, phys_addr, size, flags)) { + vunmap((void *)orig_addr); + return NULL; + } + + return (void __iomem *)(offset + (char *)orig_addr); } +EXPORT_SYMBOL(__ioremap); -void p3_iounmap(void *addr) +void __iounmap(void __iomem *addr) { - if (addr > high_memory) - vfree((void *)(PAGE_MASK & (unsigned long)addr)); + unsigned long vaddr = (unsigned long __force)addr; + struct vm_struct *p; + + if (PXSEG(vaddr) < P3SEG) + return; + +#ifdef CONFIG_32BIT + /* + * Purge any PMB entries that may have been established for this + * mapping, then proceed with conventional VMA teardown. + * + * XXX: Note that due to the way that remove_vm_area() does + * matching of the resultant VMA, we aren't able to fast-forward + * the address past the PMB space until the end of the VMA where + * the page tables reside. As such, unmap_vm_area() will be + * forced to linearly scan over the area until it finds the page + * tables where PTEs that need to be unmapped actually reside, + * which is far from optimal. Perhaps we need to use a separate + * VMA for the PMB mappings? + * -- PFM. + */ + pmb_unmap(vaddr); +#endif + + p = remove_vm_area((void *)(vaddr & PAGE_MASK)); + if (!p) { + printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr); + return; + } + + kfree(p); } +EXPORT_SYMBOL(__iounmap);