#include <asm/fixmap.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-
+#include <asm/proto.h>
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
- address &= ~PGDIR_MASK;
+ address &= ~PUD_MASK;
end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
+ if (end > PUD_SIZE)
+ end = PUD_SIZE;
phys_addr -= address;
if (address >= end)
BUG();
return 0;
}
+static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size,
+ unsigned long phys_addr, unsigned long flags)
+{
+ unsigned long end;
+
+ address &= ~PGDIR_MASK;
+ end = address + size;
+ if (end > PGDIR_SIZE)
+ end = PGDIR_SIZE;
+ phys_addr -= address;
+ if (address >= end)
+ BUG();
+ do {
+ pmd_t * pmd = pmd_alloc(&init_mm, pud, address);
+ if (!pmd)
+ return -ENOMEM;
+ remap_area_pmd(pmd, address, end - address, address + phys_addr, flags);
+ address = (address + PUD_SIZE) & PUD_MASK;
+ pud++;
+ } while (address && (address < end));
+ return 0;
+}
+
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
unsigned long size, unsigned long flags)
{
int error;
- pgd_t * dir;
+ pgd_t *pgd;
unsigned long end = address + size;
phys_addr -= address;
- dir = pgd_offset_k(address);
+ pgd = pgd_offset_k(address);
flush_cache_all();
if (address >= end)
BUG();
spin_lock(&init_mm.page_table_lock);
do {
- pmd_t *pmd;
- pmd = pmd_alloc(&init_mm, dir, address);
+ pud_t *pud;
+ pud = pud_alloc(&init_mm, pgd, address);
error = -ENOMEM;
- if (!pmd)
+ if (!pud)
break;
- if (remap_area_pmd(pmd, address, end - address,
+ if (remap_area_pud(pud, address, end - address,
phys_addr + address, flags))
break;
error = 0;
address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
+ pgd++;
} while (address && (address < end));
spin_unlock(&init_mm.page_table_lock);
flush_tlb_all();
}
/*
- * Generic mapping function (not visible outside):
+ * Fix up the linear direct mapping of the kernel to avoid cache attribute
+ * conflicts.
+ */
+static int
+ioremap_change_attr(unsigned long phys_addr, unsigned long size,
+ unsigned long flags)
+{
+ int err = 0;
+ if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
+ unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ unsigned long vaddr = (unsigned long) __va(phys_addr);
+
+ /*
+ * Must use a address here and not struct page because the phys addr
+ * can be a in hole between nodes and not have an memmap entry.
+ */
+ err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
+ if (!err)
+ global_flush_tlb();
+ }
+ return err;
+}
+
+/*
+ * Generic mapping function
*/
/*
if (phys_addr >= 0xA0000 && last_addr < 0x100000)
return (__force void __iomem *)phys_to_virt(phys_addr);
+#ifndef CONFIG_DISCONTIGMEM
/*
* Don't allow anybody to remap normal RAM that we're using..
*/
- if (phys_addr < virt_to_phys(high_memory)) {
-#ifndef CONFIG_DISCONTIGMEM
+ if (last_addr < virt_to_phys(high_memory)) {
char *t_addr, *t_end;
struct page *page;
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
if(!PageReserved(page))
return NULL;
-#endif
}
+#endif
/*
* Mappings have to be page-aligned
/*
* Ok, go for it..
*/
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
if (!area)
return NULL;
area->phys_addr = phys_addr;
addr = area->addr;
if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
+ remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
+ return NULL;
+ }
+ if (ioremap_change_attr(phys_addr, size, flags) < 0) {
+ area->flags &= 0xffffff;
vunmap(addr);
return NULL;
}
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
- void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
- if (!p)
- return p;
-
- if (phys_addr + size < virt_to_phys(high_memory)) {
- struct page *ppage = virt_to_page(__va(phys_addr));
- unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-
- BUG_ON(phys_addr+size > (unsigned long)high_memory);
- BUG_ON(phys_addr + size < phys_addr);
-
- if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
- iounmap(p);
- p = NULL;
- }
- global_flush_tlb();
- }
-
- return p;
+ return __ioremap(phys_addr, size, _PAGE_PCD);
}
-void iounmap(void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
{
- struct vm_struct *p;
+ struct vm_struct *p, **pprev;
+
if (addr <= high_memory)
return;
- p = remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
+
+ write_lock(&vmlist_lock);
+ for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev)
+ if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr))
+ break;
if (!p) {
printk("__iounmap: bad address %p\n", addr);
- return;
- }
-
- if (p->flags && p->phys_addr < virt_to_phys(high_memory)) {
+ goto out_unlock;
+ }
+ *pprev = p->next;
+ unmap_vm_area(p);
+ if ((p->flags >> 20) &&
+ p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
+ /* p->size includes the guard page, but cpa doesn't like that */
change_page_attr(virt_to_page(__va(p->phys_addr)),
- p->size >> PAGE_SHIFT,
+ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
PAGE_KERNEL);
global_flush_tlb();
}
+out_unlock:
+ write_unlock(&vmlist_lock);
kfree(p);
}