X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fx86_64%2Fmm%2Fioremap.c;h=bae79b818d85108b5cc674c28303afb92438fdb4;hb=8e8ece46a861c84343256819eaec77e608ff9217;hp=0cc4a612fb25f21f6e4b468bce7dedc771dea8ed;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c index 0cc4a612f..bae79b818 100644 --- a/arch/x86_64/mm/ioremap.c +++ b/arch/x86_64/mm/ioremap.c @@ -16,7 +16,7 @@ #include #include #include - +#include static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, unsigned long phys_addr, unsigned long flags) @@ -49,10 +49,10 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo { unsigned long end; - address &= ~PGDIR_MASK; + address &= ~PUD_MASK; end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; + if (end > PUD_SIZE) + end = PUD_SIZE; phys_addr -= address; if (address >= end) BUG(); @@ -67,31 +67,54 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned lo return 0; } +static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size, + unsigned long phys_addr, unsigned long flags) +{ + unsigned long end; + + address &= ~PGDIR_MASK; + end = address + size; + if (end > PGDIR_SIZE) + end = PGDIR_SIZE; + phys_addr -= address; + if (address >= end) + BUG(); + do { + pmd_t * pmd = pmd_alloc(&init_mm, pud, address); + if (!pmd) + return -ENOMEM; + remap_area_pmd(pmd, address, end - address, address + phys_addr, flags); + address = (address + PUD_SIZE) & PUD_MASK; + pud++; + } while (address && (address < end)); + return 0; +} + static int remap_area_pages(unsigned long address, unsigned long phys_addr, unsigned long size, unsigned long flags) { int error; - pgd_t * dir; + pgd_t *pgd; unsigned long end = address + size; phys_addr -= address; - dir = pgd_offset_k(address); + pgd = pgd_offset_k(address); flush_cache_all(); if (address >= end) BUG(); spin_lock(&init_mm.page_table_lock); do { - pmd_t *pmd; - pmd = pmd_alloc(&init_mm, dir, address); + pud_t *pud; + pud = pud_alloc(&init_mm, pgd, address); error = -ENOMEM; - if (!pmd) + if (!pud) break; - if (remap_area_pmd(pmd, address, end - address, + if (remap_area_pud(pud, address, end - address, phys_addr + address, flags)) break; error = 0; address = (address + PGDIR_SIZE) & PGDIR_MASK; - dir++; + pgd++; } while (address && (address < end)); spin_unlock(&init_mm.page_table_lock); flush_tlb_all(); @@ -99,7 +122,31 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, } /* - * Generic mapping function (not visible outside): + * Fix up the linear direct mapping of the kernel to avoid cache attribute + * conflicts. + */ +static int +ioremap_change_attr(unsigned long phys_addr, unsigned long size, + unsigned long flags) +{ + int err = 0; + if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { + unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + unsigned long vaddr = (unsigned long) __va(phys_addr); + + /* + * Must use a address here and not struct page because the phys addr + * can be a in hole between nodes and not have an memmap entry. + */ + err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags)); + if (!err) + global_flush_tlb(); + } + return err; +} + +/* + * Generic mapping function */ /* @@ -111,7 +158,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ -void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) +void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void * addr; struct vm_struct * area; @@ -126,13 +173,13 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag * Don't remap the low PCI/ISA area, it's always mapped.. */ if (phys_addr >= 0xA0000 && last_addr < 0x100000) - return phys_to_virt(phys_addr); + return (__force void __iomem *)phys_to_virt(phys_addr); +#ifndef CONFIG_DISCONTIGMEM /* * Don't allow anybody to remap normal RAM that we're using.. */ - if (phys_addr < virt_to_phys(high_memory)) { -#ifndef CONFIG_DISCONTIGMEM + if (last_addr < virt_to_phys(high_memory)) { char *t_addr, *t_end; struct page *page; @@ -142,8 +189,8 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) if(!PageReserved(page)) return NULL; -#endif } +#endif /* * Mappings have to be page-aligned @@ -155,16 +202,21 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag /* * Ok, go for it.. */ - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area(size, VM_IOREMAP | (flags << 20)); if (!area) return NULL; area->phys_addr = phys_addr; addr = area->addr; if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { + remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); + return NULL; + } + if (ioremap_change_attr(phys_addr, size, flags) < 0) { + area->flags &= 0xffffff; vunmap(addr); return NULL; } - return (void *) (offset + (char *)addr); + return (__force void __iomem *) (offset + (char *)addr); } /** @@ -189,45 +241,37 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag * Must be freed with iounmap. */ -void *ioremap_nocache (unsigned long phys_addr, unsigned long size) +void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) { - void *p = __ioremap(phys_addr, size, _PAGE_PCD); - if (!p) - return p; - - if (phys_addr + size < virt_to_phys(high_memory)) { - struct page *ppage = virt_to_page(__va(phys_addr)); - unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - - BUG_ON(phys_addr+size > (unsigned long)high_memory); - BUG_ON(phys_addr + size < phys_addr); - - if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { - iounmap(p); - p = NULL; - } - global_flush_tlb(); - } - - return p; + return __ioremap(phys_addr, size, _PAGE_PCD); } -void iounmap(void *addr) +void iounmap(volatile void __iomem *addr) { - struct vm_struct *p; + struct vm_struct *p, **pprev; + if (addr <= high_memory) return; - p = remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); + + write_lock(&vmlist_lock); + for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev) + if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr)) + break; if (!p) { printk("__iounmap: bad address %p\n", addr); - return; - } - - if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { - change_page_attr(virt_to_page(__va(p->phys_addr)), - p->size >> PAGE_SHIFT, + goto out_unlock; + } + *pprev = p->next; + unmap_vm_area(p); + if ((p->flags >> 20) && + p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) { + /* p->size includes the guard page, but cpa doesn't like that */ + change_page_attr_addr((unsigned long)(__va(p->phys_addr)), + (p->size - PAGE_SIZE) >> PAGE_SHIFT, PAGE_KERNEL); global_flush_tlb(); } +out_unlock: + write_unlock(&vmlist_lock); kfree(p); }