-
-void __check_kvm_seq(struct mm_struct *mm)
-{
- unsigned int seq;
-
- do {
- seq = init_mm.context.kvm_seq;
- memcpy(pgd_offset(mm, VMALLOC_START),
- pgd_offset_k(VMALLOC_START),
- sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
- pgd_index(VMALLOC_START)));
- mm->context.kvm_seq = seq;
- } while (seq != init_mm.context.kvm_seq);
-}
-
-#ifndef CONFIG_SMP
-/*
- * Section support is unsafe on SMP - If you iounmap and ioremap a region,
- * the other CPUs will not see this change until their next context switch.
- * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
- * which requires the new ioremap'd region to be referenced, the CPU will
- * reference the _old_ region.
- *
- * Note that get_vm_area() allocates a guard 4K page, so we need to mask
- * the size back to 1MB aligned or we will overflow in the loop below.
- */
-static void unmap_area_sections(unsigned long virt, unsigned long size)
-{
- unsigned long addr = virt, end = virt + (size & ~SZ_1M);
- pgd_t *pgd;
-
- flush_cache_vunmap(addr, end);
- pgd = pgd_offset_k(addr);
- do {
- pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
-
- pmd = *pmdp;
- if (!pmd_none(pmd)) {
- /*
- * Clear the PMD from the page table, and
- * increment the kvm sequence so others
- * notice this change.
- *
- * Note: this is still racy on SMP machines.
- */
- pmd_clear(pmdp);
- init_mm.context.kvm_seq++;
-
- /*
- * Free the page table, if there was one.
- */
- if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
- pte_free_kernel(pmd_page_kernel(pmd));
- }
-
- addr += PGDIR_SIZE;
- pgd++;
- } while (addr < end);
-
- /*
- * Ensure that the active_mm is up to date - we want to
- * catch any use-after-iounmap cases.
- */
- if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
- __check_kvm_seq(current->active_mm);
-
- flush_tlb_kernel_range(virt, end);
-}
-
-static int
-remap_area_sections(unsigned long virt, unsigned long pfn,
- unsigned long size, unsigned long flags)
-{
- unsigned long prot, addr = virt, end = virt + size;
- pgd_t *pgd;
-
- /*
- * Remove and free any PTE-based mapping, and
- * sync the current kernel mapping.
- */
- unmap_area_sections(virt, size);
-
- prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
- (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
-
- /*
- * ARMv6 and above need XN set to prevent speculative prefetches
- * hitting IO.
- */
- if (cpu_architecture() >= CPU_ARCH_ARMv6)
- prot |= PMD_SECT_XN;
-
- pgd = pgd_offset_k(addr);
- do {
- pmd_t *pmd = pmd_offset(pgd, addr);
-
- pmd[0] = __pmd(__pfn_to_phys(pfn) | prot);
- pfn += SZ_1M >> PAGE_SHIFT;
- pmd[1] = __pmd(__pfn_to_phys(pfn) | prot);
- pfn += SZ_1M >> PAGE_SHIFT;
- flush_pmd_entry(pmd);
-
- addr += PGDIR_SIZE;
- pgd++;
- } while (addr < end);
-
- return 0;
-}
-
-static int
-remap_area_supersections(unsigned long virt, unsigned long pfn,
- unsigned long size, unsigned long flags)
-{
- unsigned long prot, addr = virt, end = virt + size;
- pgd_t *pgd;
-
- /*
- * Remove and free any PTE-based mapping, and
- * sync the current kernel mapping.
- */
- unmap_area_sections(virt, size);
-
- prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
- PMD_DOMAIN(DOMAIN_IO) |
- (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
-
- /*
- * ARMv6 and above need XN set to prevent speculative prefetches
- * hitting IO.
- */
- if (cpu_architecture() >= CPU_ARCH_ARMv6)
- prot |= PMD_SECT_XN;
-
- pgd = pgd_offset_k(virt);
- do {
- unsigned long super_pmd_val, i;
-
- super_pmd_val = __pfn_to_phys(pfn) | prot;
- super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
-
- for (i = 0; i < 8; i++) {
- pmd_t *pmd = pmd_offset(pgd, addr);
-
- pmd[0] = __pmd(super_pmd_val);
- pmd[1] = __pmd(super_pmd_val);
- flush_pmd_entry(pmd);
-
- addr += PGDIR_SIZE;
- pgd++;
- }
-
- pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
- } while (addr < end);
-
- return 0;
-}
-#endif
-
-