linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / arch / sparc64 / mm / hugetlbpage.c
index 6da2759..625cbb3 100644 (file)
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
 
-static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, addr);
        if (pgd) {
-               pmd = pmd_alloc(mm, pgd, addr);
-               if (pmd)
-                       pte = pte_alloc_map(mm, pmd, addr);
+               pud = pud_offset(pgd, addr);
+               if (pud) {
+                       pmd = pmd_alloc(mm, pud, addr);
+                       if (pmd)
+                               pte = pte_alloc_map(mm, pmd, addr);
+               }
        }
        return pte;
 }
 
-static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte = NULL;
 
        pgd = pgd_offset(mm, addr);
        if (pgd) {
-               pmd = pmd_offset(pgd, addr);
-               if (pmd)
-                       pte = pte_offset_map(pmd, addr);
+               pud = pud_offset(pgd, addr);
+               if (pud) {
+                       pmd = pmd_offset(pud, addr);
+                       if (pmd)
+                               pte = pte_offset_map(pmd, addr);
+               }
        }
        return pte;
 }
 
 #define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
 
-static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
-                        struct page *page, pte_t * page_table, int write_access)
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t entry)
 {
-       unsigned long i;
-       pte_t entry;
+       int i;
 
-       mm->rss += (HPAGE_SIZE / PAGE_SIZE);
+       for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+               set_pte_at(mm, addr, ptep, entry);
+               ptep++;
+               addr += PAGE_SIZE;
+               pte_val(entry) += PAGE_SIZE;
+       }
+}
 
-       if (write_access)
-               entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
-                                                      vma->vm_page_prot)));
-       else
-               entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
-       entry = pte_mkyoung(entry);
-       mk_pte_huge(entry);
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+                             pte_t *ptep)
+{
+       pte_t entry;
+       int i;
 
-       for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
-               set_pte(page_table, entry);
-               page_table++;
+       entry = *ptep;
 
-               pte_val(entry) += PAGE_SIZE;
+       for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+               pte_clear(mm, addr, ptep);
+               addr += PAGE_SIZE;
+               ptep++;
        }
+
+       return entry;
 }
 
 /*
@@ -89,79 +104,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
        return 0;
 }
 
-int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
-                           struct vm_area_struct *vma)
-{
-       pte_t *src_pte, *dst_pte, entry;
-       struct page *ptepage;
-       unsigned long addr = vma->vm_start;
-       unsigned long end = vma->vm_end;
-       int i;
-
-       while (addr < end) {
-               dst_pte = huge_pte_alloc(dst, addr);
-               if (!dst_pte)
-                       goto nomem;
-               src_pte = huge_pte_offset(src, addr);
-               BUG_ON(!src_pte || pte_none(*src_pte));
-               entry = *src_pte;
-               ptepage = pte_page(entry);
-               get_page(ptepage);
-               for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
-                       set_pte(dst_pte, entry);
-                       pte_val(entry) += PAGE_SIZE;
-                       dst_pte++;
-               }
-               dst->rss += (HPAGE_SIZE / PAGE_SIZE);
-               addr += HPAGE_SIZE;
-       }
-       return 0;
-
-nomem:
-       return -ENOMEM;
-}
-
-int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
-                       struct page **pages, struct vm_area_struct **vmas,
-                       unsigned long *position, int *length, int i)
-{
-       unsigned long vaddr = *position;
-       int remainder = *length;
-
-       WARN_ON(!is_vm_hugetlb_page(vma));
-
-       while (vaddr < vma->vm_end && remainder) {
-               if (pages) {
-                       pte_t *pte;
-                       struct page *page;
-
-                       pte = huge_pte_offset(mm, vaddr);
-
-                       /* hugetlb should be locked, and hence, prefaulted */
-                       BUG_ON(!pte || pte_none(*pte));
-
-                       page = pte_page(*pte);
-
-                       WARN_ON(!PageCompound(page));
-
-                       get_page(page);
-                       pages[i] = page;
-               }
-
-               if (vmas)
-                       vmas[i] = vma;
-
-               vaddr += PAGE_SIZE;
-               --remainder;
-               ++i;
-       }
-
-       *length = remainder;
-       *position = vaddr;
-
-       return i;
-}
-
 struct page *follow_huge_addr(struct mm_struct *mm,
                              unsigned long address, int write)
 {
@@ -179,83 +121,43 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
        return NULL;
 }
 
-void unmap_hugepage_range(struct vm_area_struct *vma,
-                         unsigned long start, unsigned long end)
+static void context_reload(void *__data)
 {
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long address;
-       pte_t *pte;
-       struct page *page;
-       int i;
-
-       BUG_ON(start & (HPAGE_SIZE - 1));
-       BUG_ON(end & (HPAGE_SIZE - 1));
+       struct mm_struct *mm = __data;
 
-       for (address = start; address < end; address += HPAGE_SIZE) {
-               pte = huge_pte_offset(mm, address);
-               BUG_ON(!pte);
-               if (pte_none(*pte))
-                       continue;
-               page = pte_page(*pte);
-               put_page(page);
-               for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
-                       pte_clear(pte);
-                       pte++;
-               }
-       }
-       mm->rss -= (end - start) >> PAGE_SHIFT;
-       flush_tlb_range(vma, start, end);
+       if (mm == current->mm)
+               load_secondary_context(mm);
 }
 
-int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
+void hugetlb_prefault_arch_hook(struct mm_struct *mm)
 {
-       struct mm_struct *mm = current->mm;
-       unsigned long addr;
-       int ret = 0;
-
-       BUG_ON(vma->vm_start & ~HPAGE_MASK);
-       BUG_ON(vma->vm_end & ~HPAGE_MASK);
-
-       spin_lock(&mm->page_table_lock);
-       for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
-               unsigned long idx;
-               pte_t *pte = huge_pte_alloc(mm, addr);
-               struct page *page;
-
-               if (!pte) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-               if (!pte_none(*pte))
-                       continue;
-
-               idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
-                       + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
-               page = find_get_page(mapping, idx);
-               if (!page) {
-                       /* charge the fs quota first */
-                       if (hugetlb_get_quota(mapping)) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       page = alloc_huge_page();
-                       if (!page) {
-                               hugetlb_put_quota(mapping);
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
-                       if (! ret) {
-                               unlock_page(page);
-                       } else {
-                               hugetlb_put_quota(mapping);
-                               free_huge_page(page);
-                               goto out;
-                       }
+       /* On UltraSPARC-III+ and later, configure the second half of
+        * the Data-TLB for huge pages.
+        */
+       if (tlb_type == cheetah_plus) {
+               unsigned long ctx;
+
+               spin_lock(&ctx_alloc_lock);
+               ctx = mm->context.sparc64_ctx_val;
+               ctx &= ~CTX_PGSZ_MASK;
+               ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
+               ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
+
+               if (ctx != mm->context.sparc64_ctx_val) {
+                       /* When changing the page size fields, we
+                        * must perform a context flush so that no
+                        * stale entries match.  This flush must
+                        * occur with the original context register
+                        * settings.
+                        */
+                       do_flush_tlb_mm(mm);
+
+                       /* Reload the context register of all processors
+                        * also executing in this address space.
+                        */
+                       mm->context.sparc64_ctx_val = ctx;
+                       on_each_cpu(context_reload, mm, 0, 0);
                }
-               set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
+               spin_unlock(&ctx_alloc_lock);
        }
-out:
-       spin_unlock(&mm->page_table_lock);
-       return ret;
 }