{
pte_t entry;
- vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
+ add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
if (write_access) {
entry =
pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
ptepage = pte_page(entry);
get_page(ptepage);
set_pte(dst_pte, entry);
- vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
+ add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
addr += HPAGE_SIZE;
}
return 0;
return NULL;
}
-/*
- * Same as generic free_pgtables(), except constant PGDIR_* and pgd_offset
- * are hugetlb region specific.
- */
-void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
- unsigned long start, unsigned long end)
+void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
{
- unsigned long first = start & HUGETLB_PGDIR_MASK;
- unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
- struct mm_struct *mm = tlb->mm;
-
- if (!prev) {
- prev = mm->mmap;
- if (!prev)
- goto no_mmaps;
- if (prev->vm_end > start) {
- if (last > prev->vm_start)
- last = prev->vm_start;
- goto no_mmaps;
- }
- }
- for (;;) {
- struct vm_area_struct *next = prev->vm_next;
+ /*
+ * This is called only when is_hugepage_only_range(addr,),
+ * and it follows that is_hugepage_only_range(end,) also.
+ *
+ * The offset of these addresses from the base of the hugetlb
+ * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
+ * the standard free_pgd_range will free the right page tables.
+ *
+ * If floor and ceiling are also in the hugetlb region, they
+ * must likewise be scaled down; but if outside, left unchanged.
+ */
- if (next) {
- if (next->vm_start < start) {
- prev = next;
- continue;
- }
- if (last > next->vm_start)
- last = next->vm_start;
- }
- if (prev->vm_end > first)
- first = prev->vm_end;
- break;
- }
-no_mmaps:
- if (last < first) /* for arches with discontiguous pgd indices */
- return;
- clear_page_range(tlb, first, last);
+ addr = htlbpage_to_page(addr);
+ end = htlbpage_to_page(end);
+ if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
+ floor = htlbpage_to_page(floor);
+ if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
+ ceiling = htlbpage_to_page(ceiling);
+
+ free_pgd_range(tlb, addr, end, floor, ceiling);
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
continue;
page = pte_page(*pte);
put_page(page);
- pte_clear(pte);
+ pte_clear(mm, address, pte);
}
- vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
+ add_mm_counter(mm, rss, - ((end - start) >> PAGE_SHIFT));
flush_tlb_range(vma, start, end);
}