X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fia64%2Fmm%2Fhugetlbpage.c;h=8a33e10c752aaf83f862ff40dc1e4ce29b2aa176;hb=8e8ece46a861c84343256819eaec77e608ff9217;hp=114e3d96c6cc7c2c7205b0fadd62c4f52be6d6fb;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 114e3d96c..8a33e10c7 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -29,13 +29,17 @@ huge_pte_alloc (struct mm_struct *mm, unsigned long addr) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, taddr); - pmd = pmd_alloc(mm, pgd, taddr); - if (pmd) - pte = pte_alloc_map(mm, pmd, taddr); + pud = pud_alloc(mm, pgd, taddr); + if (pud) { + pmd = pmd_alloc(mm, pud, taddr); + if (pmd) + pte = pte_alloc_map(mm, pmd, taddr); + } return pte; } @@ -44,14 +48,18 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte = NULL; pgd = pgd_offset(mm, taddr); if (pgd_present(*pgd)) { - pmd = pmd_offset(pgd, taddr); - if (pmd_present(*pmd)) - pte = pte_offset_map(pmd, taddr); + pud = pud_offset(pgd, taddr); + if (pud_present(*pud)) { + pmd = pmd_offset(pud, taddr); + if (pmd_present(*pmd)) + pte = pte_offset_map(pmd, taddr); + } } return pte; @@ -65,7 +73,7 @@ set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma, { pte_t entry; - mm->rss += (HPAGE_SIZE / PAGE_SIZE); + vx_rsspages_add(mm, HPAGE_SIZE / PAGE_SIZE); if (write_access) { entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); @@ -108,7 +116,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, ptepage = pte_page(entry); get_page(ptepage); set_pte(dst_pte, entry); - dst->rss += (HPAGE_SIZE / PAGE_SIZE); + vx_rsspages_add(dst, HPAGE_SIZE / PAGE_SIZE); addr += HPAGE_SIZE; } return 0; @@ -158,8 +166,6 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int writ struct page *page; pte_t *ptep; - if (! mm->used_hugetlb) - return ERR_PTR(-EINVAL); if (REGION_NUMBER(addr) != REGION_HPAGE) return ERR_PTR(-EINVAL); @@ -189,7 +195,6 @@ void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, { unsigned long first = start & HUGETLB_PGDIR_MASK; unsigned long last = end + HUGETLB_PGDIR_SIZE - 1; - unsigned long start_index, end_index; struct mm_struct *mm = tlb->mm; if (!prev) { @@ -214,23 +219,13 @@ void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, last = next->vm_start; } if (prev->vm_end > first) - first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1; + first = prev->vm_end; break; } no_mmaps: if (last < first) /* for arches with discontiguous pgd indices */ return; - /* - * If the PGD bits are not consecutive in the virtual address, the - * old method of shifting the VA >> by PGDIR_SHIFT doesn't work. - */ - - start_index = pgd_index(htlbpage_to_page(first)); - end_index = pgd_index(htlbpage_to_page(last)); - - if (end_index > start_index) { - clear_page_tables(tlb, start_index, end_index - start_index); - } + clear_page_range(tlb, first, last); } void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) @@ -251,7 +246,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsig put_page(page); pte_clear(pte); } - mm->rss -= (end - start) >> PAGE_SHIFT; + vx_rsspages_sub(mm, (end - start) >> PAGE_SHIFT); flush_tlb_range(vma, start, end); } @@ -293,10 +288,11 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); - free_huge_page(page); + page_cache_release(page); goto out; } }