vserver 2.0-rc4
[linux-2.6.git] / arch / ia64 / mm / hugetlbpage.c
index c724358..8a33e10 100644 (file)
@@ -29,13 +29,17 @@ huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
 {
        unsigned long taddr = htlbpage_to_page(addr);
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, taddr);
-       pmd = pmd_alloc(mm, pgd, taddr);
-       if (pmd)
-               pte = pte_alloc_map(mm, pmd, taddr);
+       pud = pud_alloc(mm, pgd, taddr);
+       if (pud) {
+               pmd = pmd_alloc(mm, pud, taddr);
+               if (pmd)
+                       pte = pte_alloc_map(mm, pmd, taddr);
+       }
        return pte;
 }
 
@@ -44,14 +48,18 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
 {
        unsigned long taddr = htlbpage_to_page(addr);
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, taddr);
        if (pgd_present(*pgd)) {
-               pmd = pmd_offset(pgd, taddr);
-               if (pmd_present(*pmd))
-                       pte = pte_offset_map(pmd, taddr);
+               pud = pud_offset(pgd, taddr);
+               if (pud_present(*pud)) {
+                       pmd = pmd_offset(pud, taddr);
+                       if (pmd_present(*pmd))
+                               pte = pte_offset_map(pmd, taddr);
+               }
        }
 
        return pte;
@@ -65,7 +73,6 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
 {
        pte_t entry;
 
-       // mm->rss += (HPAGE_SIZE / PAGE_SIZE);
        vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE);
        if (write_access) {
                entry =
@@ -109,7 +116,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                ptepage = pte_page(entry);
                get_page(ptepage);
                set_pte(dst_pte, entry);
-               // dst->rss += (HPAGE_SIZE / PAGE_SIZE);
                vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE);
                addr += HPAGE_SIZE;
        }
@@ -160,8 +166,6 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int writ
        struct page *page;
        pte_t *ptep;
 
-       if (! mm->used_hugetlb)
-               return ERR_PTR(-EINVAL);
        if (REGION_NUMBER(addr) != REGION_HPAGE)
                return ERR_PTR(-EINVAL);
 
@@ -191,7 +195,6 @@ void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
 {
        unsigned long first = start & HUGETLB_PGDIR_MASK;
        unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
-       unsigned long start_index, end_index;
        struct mm_struct *mm = tlb->mm;
 
        if (!prev) {
@@ -216,23 +219,13 @@ void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
                                last = next->vm_start;
                }
                if (prev->vm_end > first)
-                       first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1;
+                       first = prev->vm_end;
                break;
        }
 no_mmaps:
        if (last < first)       /* for arches with discontiguous pgd indices */
                return;
-       /*
-        * If the PGD bits are not consecutive in the virtual address, the
-        * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
-        */
-
-       start_index = pgd_index(htlbpage_to_page(first));
-       end_index = pgd_index(htlbpage_to_page(last));
-
-       if (end_index > start_index) {
-               clear_page_tables(tlb, start_index, end_index - start_index);
-       }
+       clear_page_range(tlb, first, last);
 }
 
 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
@@ -253,7 +246,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsig
                put_page(page);
                pte_clear(pte);
        }
-       // mm->rss -= (end - start) >> PAGE_SHIFT;
        vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT);
        flush_tlb_range(vma, start, end);
 }
@@ -300,7 +292,7 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
                                unlock_page(page);
                        } else {
                                hugetlb_put_quota(mapping);
-                               free_huge_page(page);
+                               page_cache_release(page);
                                goto out;
                        }
                }