{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr);
- pmd = pmd_alloc(mm, pgd, taddr);
- if (pmd)
- pte = pte_alloc_map(mm, pmd, taddr);
+ pud = pud_alloc(mm, pgd, taddr);
+ if (pud) {
+ pmd = pmd_alloc(mm, pud, taddr);
+ if (pmd)
+ pte = pte_alloc_map(mm, pmd, taddr);
+ }
return pte;
}
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte = NULL;
pgd = pgd_offset(mm, taddr);
if (pgd_present(*pgd)) {
- pmd = pmd_offset(pgd, taddr);
- if (pmd_present(*pmd))
- pte = pte_offset_map(pmd, taddr);
+ pud = pud_offset(pgd, taddr);
+ if (pud_present(*pud)) {
+ pmd = pmd_offset(pud, taddr);
+ if (pmd_present(*pmd))
+ pte = pte_offset_map(pmd, taddr);
+ }
}
return pte;
{
unsigned long first = start & HUGETLB_PGDIR_MASK;
unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
- unsigned long start_index, end_index;
struct mm_struct *mm = tlb->mm;
if (!prev) {
last = next->vm_start;
}
if (prev->vm_end > first)
- first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1;
+ first = prev->vm_end;
break;
}
no_mmaps:
if (last < first) /* for arches with discontiguous pgd indices */
return;
- /*
- * If the PGD bits are not consecutive in the virtual address, the
- * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
- */
-
- start_index = pgd_index(htlbpage_to_page(first));
- end_index = pgd_index(htlbpage_to_page(last));
-
- if (end_index > start_index) {
- clear_page_tables(tlb, start_index, end_index - start_index);
- }
+ clear_page_range(tlb, first, last);
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)