+static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
+ unsigned long end, unsigned long phys_addr,
+ pgprot_t prot)
+{
+ unsigned long next;
+ pmd_t *pmd;
+ int ret = 0;
+
+ pmd = pmd_alloc(&init_mm, pgd, addr);
+ if (!pmd)
+ return -ENOMEM;
+
+ do {
+ next = pmd_addr_end(addr, end);
+ ret = remap_area_pte(pmd, addr, next, phys_addr, prot);
+ if (ret)
+ return ret;
+ phys_addr += next - addr;
+ } while (pmd++, addr = next, addr != end);
+ return ret;
+}
+
+static int remap_area_pages(unsigned long start, unsigned long pfn,
+ unsigned long size, unsigned long flags)
+{
+ unsigned long addr = start;
+ unsigned long next, end = start + size;
+ unsigned long phys_addr = __pfn_to_phys(pfn);
+ pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
+ L_PTE_DIRTY | L_PTE_WRITE | flags);
+ pgd_t *pgd;
+ int err = 0;
+
+ BUG_ON(addr >= end);
+ pgd = pgd_offset_k(addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ err = remap_area_pmd(pgd, addr, next, phys_addr, prot);
+ if (err)
+ break;
+ phys_addr += next - addr;
+ } while (pgd++, addr = next, addr != end);
+
+ return err;
+}
+
+
+void __check_kvm_seq(struct mm_struct *mm)
+{
+ unsigned int seq;
+
+ do {
+ seq = init_mm.context.kvm_seq;
+ memcpy(pgd_offset(mm, VMALLOC_START),
+ pgd_offset_k(VMALLOC_START),
+ sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
+ pgd_index(VMALLOC_START)));
+ mm->context.kvm_seq = seq;
+ } while (seq != init_mm.context.kvm_seq);
+}
+
+#ifndef CONFIG_SMP
+/*
+ * Section support is unsafe on SMP - If you iounmap and ioremap a region,
+ * the other CPUs will not see this change until their next context switch.
+ * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
+ * which requires the new ioremap'd region to be referenced, the CPU will
+ * reference the _old_ region.
+ *
+ * Note that get_vm_area() allocates a guard 4K page, so we need to mask
+ * the size back to 1MB aligned or we will overflow in the loop below.
+ */
+static void unmap_area_sections(unsigned long virt, unsigned long size)