vserver 1.9.5.x5
[linux-2.6.git] / arch / ia64 / mm / hugetlbpage.c
index fbd24a0..678a0a5 100644 (file)
@@ -29,13 +29,17 @@ huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
 {
        unsigned long taddr = htlbpage_to_page(addr);
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, taddr);
-       pmd = pmd_alloc(mm, pgd, taddr);
-       if (pmd)
-               pte = pte_alloc_map(mm, pmd, taddr);
+       pud = pud_alloc(mm, pgd, taddr);
+       if (pud) {
+               pmd = pmd_alloc(mm, pud, taddr);
+               if (pmd)
+                       pte = pte_alloc_map(mm, pmd, taddr);
+       }
        return pte;
 }
 
@@ -44,14 +48,18 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
 {
        unsigned long taddr = htlbpage_to_page(addr);
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, taddr);
        if (pgd_present(*pgd)) {
-               pmd = pmd_offset(pgd, taddr);
-               if (pmd_present(*pmd))
-                       pte = pte_offset_map(pmd, taddr);
+               pud = pud_offset(pgd, taddr);
+               if (pud_present(*pud)) {
+                       pmd = pmd_offset(pud, taddr);
+                       if (pmd_present(*pmd))
+                               pte = pte_offset_map(pmd, taddr);
+               }
        }
 
        return pte;
@@ -189,7 +197,6 @@ void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
 {
        unsigned long first = start & HUGETLB_PGDIR_MASK;
        unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
-       unsigned long start_index, end_index;
        struct mm_struct *mm = tlb->mm;
 
        if (!prev) {
@@ -214,23 +221,13 @@ void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
                                last = next->vm_start;
                }
                if (prev->vm_end > first)
-                       first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1;
+                       first = prev->vm_end;
                break;
        }
 no_mmaps:
        if (last < first)       /* for arches with discontiguous pgd indices */
                return;
-       /*
-        * If the PGD bits are not consecutive in the virtual address, the
-        * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
-        */
-
-       start_index = pgd_index(htlbpage_to_page(first));
-       end_index = pgd_index(htlbpage_to_page(last));
-
-       if (end_index > start_index) {
-               clear_page_tables(tlb, start_index, end_index - start_index);
-       }
+       clear_page_range(tlb, first, last);
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)