Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / arch / sh / mm / ioremap.c
index 9f490c2..96fa4a9 100644 (file)
@@ -6,13 +6,19 @@
  * 640k-1MB IO memory area on PC's
  *
  * (C) Copyright 1995 1996 Linus Torvalds
+ * (C) Copyright 2005, 2006 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file "COPYING" in the main directory of this
+ * archive for more details.
  */
-
 #include <linux/vmalloc.h>
+#include <linux/module.h>
 #include <linux/mm.h>
 #include <asm/io.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
+#include <asm/addrspace.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 
@@ -57,7 +63,7 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
        if (address >= end)
                BUG();
        do {
-               pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
+               pte_t * pte = pte_alloc_kernel(pmd, address);
                if (!pte)
                        return -ENOMEM;
                remap_area_pte(pte, address, end - address, address + phys_addr, flags);
@@ -79,11 +85,16 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr,
        flush_cache_all();
        if (address >= end)
                BUG();
-       spin_lock(&init_mm.page_table_lock);
        do {
+               pud_t *pud;
                pmd_t *pmd;
-               pmd = pmd_alloc(&init_mm, dir, address);
+
                error = -ENOMEM;
+
+               pud = pud_alloc(&init_mm, dir, address);
+               if (!pud)
+                       break;
+               pmd = pmd_alloc(&init_mm, pud, address);
                if (!pmd)
                        break;
                if (remap_area_pmd(pmd, address, end - address,
@@ -93,15 +104,10 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr,
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
                dir++;
        } while (address && (address < end));
-       spin_unlock(&init_mm.page_table_lock);
        flush_tlb_all();
        return error;
 }
 
-/*
- * Generic mapping function (not visible outside):
- */
-
 /*
  * Remap an arbitrary physical address space into the kernel virtual
  * address space. Needed when the kernel wants to access high addresses
@@ -111,11 +117,11 @@ int remap_area_pages(unsigned long address, unsigned long phys_addr,
  * have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  */
-void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
+void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
+                       unsigned long flags)
 {
-       void * addr;
        struct vm_struct * area;
-       unsigned long offset, last_addr;
+       unsigned long offset, last_addr, addr, orig_addr;
 
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
@@ -126,7 +132,7 @@ void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long fla
         * Don't remap the low PCI/ISA area, it's always mapped..
         */
        if (phys_addr >= 0xA0000 && last_addr < 0x100000)
-               return phys_to_virt(phys_addr);
+               return (void __iomem *)phys_to_virt(phys_addr);
 
        /*
         * Don't allow anybody to remap normal RAM that we're using..
@@ -148,16 +154,71 @@ void * p3_ioremap(unsigned long phys_addr, unsigned long size, unsigned long fla
        if (!area)
                return NULL;
        area->phys_addr = phys_addr;
-       addr = area->addr;
-       if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
-               vunmap(addr);
-               return NULL;
+       orig_addr = addr = (unsigned long)area->addr;
+
+#ifdef CONFIG_32BIT
+       /*
+        * First try to remap through the PMB once a valid VMA has been
+        * established. Smaller allocations (or the rest of the size
+        * remaining after a PMB mapping due to the size not being
+        * perfectly aligned on a PMB size boundary) are then mapped
+        * through the UTLB using conventional page tables.
+        *
+        * PMB entries are all pre-faulted.
+        */
+       if (unlikely(size >= 0x1000000)) {
+               unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
+
+               if (likely(mapped)) {
+                       addr            += mapped;
+                       phys_addr       += mapped;
+                       size            -= mapped;
+               }
        }
-       return (void *) (offset + (char *)addr);
+#endif
+
+       if (likely(size))
+               if (remap_area_pages(addr, phys_addr, size, flags)) {
+                       vunmap((void *)orig_addr);
+                       return NULL;
+               }
+
+       return (void __iomem *)(offset + (char *)orig_addr);
 }
+EXPORT_SYMBOL(__ioremap);
 
-void p3_iounmap(void *addr)
+void __iounmap(void __iomem *addr)
 {
-       if (addr > high_memory)
-               vfree((void *)(PAGE_MASK & (unsigned long)addr));
+       unsigned long vaddr = (unsigned long __force)addr;
+       struct vm_struct *p;
+
+       if (PXSEG(vaddr) < P3SEG)
+               return;
+
+#ifdef CONFIG_32BIT
+       /*
+        * Purge any PMB entries that may have been established for this
+        * mapping, then proceed with conventional VMA teardown.
+        *
+        * XXX: Note that due to the way that remove_vm_area() does
+        * matching of the resultant VMA, we aren't able to fast-forward
+        * the address past the PMB space until the end of the VMA where
+        * the page tables reside. As such, unmap_vm_area() will be
+        * forced to linearly scan over the area until it finds the page
+        * tables where PTEs that need to be unmapped actually reside,
+        * which is far from optimal. Perhaps we need to use a separate
+        * VMA for the PMB mappings?
+        *                                      -- PFM.
+        */
+       pmb_unmap(vaddr);
+#endif
+
+       p = remove_vm_area((void *)(vaddr & PAGE_MASK));
+       if (!p) {
+               printk(KERN_ERR "%s: bad address %p\n", __FUNCTION__, addr);
+               return;
+       }
+
+       kfree(p);
 }
+EXPORT_SYMBOL(__iounmap);