X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Farm%2Fmm%2Fioremap.c;h=0ac615c0f7987f81ccc7aa3a16c8ace113f65192;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=88a999df0ab3f59ddaf8fe6da4f942fca750895d;hpb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;p=linux-2.6.git diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 88a999df0..0ac615c0f 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -38,89 +38,71 @@ */ #define VM_ARM_SECTION_MAPPING 0x80000000 -static inline void -remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, - unsigned long phys_addr, pgprot_t pgprot) +static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, + unsigned long phys_addr, pgprot_t prot) { - unsigned long end; + pte_t *pte; + + pte = pte_alloc_kernel(pmd, addr); + if (!pte) + return -ENOMEM; - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; - BUG_ON(address >= end); do { if (!pte_none(*pte)) goto bad; - set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot)); - address += PAGE_SIZE; + set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); phys_addr += PAGE_SIZE; - pte++; - } while (address && (address < end)); - return; + } while (pte++, addr += PAGE_SIZE, addr != end); + return 0; bad: - printk("remap_area_pte: page already exists\n"); + printk(KERN_CRIT "remap_area_pte: page already exists\n"); BUG(); } -static inline int -remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, - unsigned long phys_addr, unsigned long flags) +static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, + unsigned long end, unsigned long phys_addr, + pgprot_t prot) { - unsigned long end; - pgprot_t pgprot; - - address &= ~PGDIR_MASK; - end = address + size; - - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; + unsigned long next; + pmd_t *pmd; + int ret = 0; - phys_addr -= address; - BUG_ON(address >= end); + pmd = pmd_alloc(&init_mm, pgd, addr); + if (!pmd) + return -ENOMEM; - pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags); do { - pte_t * pte = pte_alloc_kernel(pmd, address); - if (!pte) - return -ENOMEM; - remap_area_pte(pte, address, end - address, address + phys_addr, pgprot); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address && (address < end)); - return 0; + next = pmd_addr_end(addr, end); + ret = remap_area_pte(pmd, addr, next, phys_addr, prot); + if (ret) + return ret; + phys_addr += next - addr; + } while (pmd++, addr = next, addr != end); + return ret; } -static int -remap_area_pages(unsigned long start, unsigned long pfn, - unsigned long size, unsigned long flags) +static int remap_area_pages(unsigned long start, unsigned long pfn, + unsigned long size, unsigned long flags) { - unsigned long address = start; - unsigned long end = start + size; + unsigned long addr = start; + unsigned long next, end = start + size; unsigned long phys_addr = __pfn_to_phys(pfn); + pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | + L_PTE_DIRTY | L_PTE_WRITE | flags); + pgd_t *pgd; int err = 0; - pgd_t * dir; - phys_addr -= address; - dir = pgd_offset(&init_mm, address); - BUG_ON(address >= end); + BUG_ON(addr >= end); + pgd = pgd_offset_k(addr); do { - pmd_t *pmd = pmd_alloc(&init_mm, dir, address); - if (!pmd) { - err = -ENOMEM; - break; - } - if (remap_area_pmd(pmd, address, end - address, - phys_addr + address, flags)) { - err = -ENOMEM; + next = pgd_addr_end(addr, end); + err = remap_area_pmd(pgd, addr, next, phys_addr, prot); + if (err) break; - } - - address = (address + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } while (address && (address < end)); + phys_addr += next - addr; + } while (pgd++, addr = next, addr != end); return err; } @@ -177,7 +159,7 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) * Free the page table, if there was one. */ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) - pte_free_kernel(pmd_page_kernel(pmd)); + pte_free_kernel(pmd_page_vaddr(pmd)); } addr += PGDIR_SIZE; @@ -310,13 +292,16 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) return NULL; + size = PAGE_ALIGN(size); + area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; addr = (unsigned long)area->addr; #ifndef CONFIG_SMP - if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || + if (DOMAIN_IO == 0 && + (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || cpu_is_xsc3()) && !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; @@ -361,14 +346,14 @@ __ioremap(unsigned long phys_addr, size_t size, unsigned long flags) } EXPORT_SYMBOL(__ioremap); -void __iounmap(void __iomem *addr) +void __iounmap(volatile void __iomem *addr) { #ifndef CONFIG_SMP struct vm_struct **p, *tmp; #endif unsigned int section_mapping = 0; - addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr); + addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr); #ifndef CONFIG_SMP /* @@ -395,6 +380,6 @@ void __iounmap(void __iomem *addr) #endif if (!section_mapping) - vunmap(addr); + vunmap((void __force *)addr); } EXPORT_SYMBOL(__iounmap);