fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / sparc64 / mm / hugetlbpage.c
index 625cbb3..00677b5 100644 (file)
@@ -1,10 +1,9 @@
 /*
  * SPARC64 Huge TLB page support.
  *
- * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
  */
 
-#include <linux/config.h>
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <asm/cacheflush.h>
 #include <asm/mmu_context.h>
 
+/* Slightly simplified from the non-hugepage variant because by
+ * definition we don't have to worry about any page coloring stuff
+ */
+#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
+#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
+
+static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+                                                       unsigned long addr,
+                                                       unsigned long len,
+                                                       unsigned long pgoff,
+                                                       unsigned long flags)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct * vma;
+       unsigned long task_size = TASK_SIZE;
+       unsigned long start_addr;
+
+       if (test_thread_flag(TIF_32BIT))
+               task_size = STACK_TOP32;
+       if (unlikely(len >= VA_EXCLUDE_START))
+               return -ENOMEM;
+
+       if (len > mm->cached_hole_size) {
+               start_addr = addr = mm->free_area_cache;
+       } else {
+               start_addr = addr = TASK_UNMAPPED_BASE;
+               mm->cached_hole_size = 0;
+       }
+
+       task_size -= len;
+
+full_search:
+       addr = ALIGN(addr, HPAGE_SIZE);
+
+       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+               /* At this point:  (!vma || addr < vma->vm_end). */
+               if (addr < VA_EXCLUDE_START &&
+                   (addr + len) >= VA_EXCLUDE_START) {
+                       addr = VA_EXCLUDE_END;
+                       vma = find_vma(mm, VA_EXCLUDE_END);
+               }
+               if (unlikely(task_size < addr)) {
+                       if (start_addr != TASK_UNMAPPED_BASE) {
+                               start_addr = addr = TASK_UNMAPPED_BASE;
+                               mm->cached_hole_size = 0;
+                               goto full_search;
+                       }
+                       return -ENOMEM;
+               }
+               if (likely(!vma || addr + len <= vma->vm_start)) {
+                       /*
+                        * Remember the place where we stopped the search:
+                        */
+                       mm->free_area_cache = addr + len;
+                       return addr;
+               }
+               if (addr + mm->cached_hole_size < vma->vm_start)
+                       mm->cached_hole_size = vma->vm_start - addr;
+
+               addr = ALIGN(vma->vm_end, HPAGE_SIZE);
+       }
+}
+
+static unsigned long
+hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+                                 const unsigned long len,
+                                 const unsigned long pgoff,
+                                 const unsigned long flags)
+{
+       struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+
+       /* This should only ever run for 32-bit processes.  */
+       BUG_ON(!test_thread_flag(TIF_32BIT));
+
+       /* check if free_area_cache is useful for us */
+       if (len <= mm->cached_hole_size) {
+               mm->cached_hole_size = 0;
+               mm->free_area_cache = mm->mmap_base;
+       }
+
+       /* either no address requested or can't fit in requested address hole */
+       addr = mm->free_area_cache & HPAGE_MASK;
+
+       /* make sure it can fit in the remaining address space */
+       if (likely(addr > len)) {
+               vma = find_vma(mm, addr-len);
+               if (!vma || addr <= vma->vm_start) {
+                       /* remember the address as a hint for next time */
+                       return (mm->free_area_cache = addr-len);
+               }
+       }
+
+       if (unlikely(mm->mmap_base < len))
+               goto bottomup;
+
+       addr = (mm->mmap_base-len) & HPAGE_MASK;
+
+       do {
+               /*
+                * Lookup failure means no vma is above this address,
+                * else if new region fits below vma->vm_start,
+                * return with success:
+                */
+               vma = find_vma(mm, addr);
+               if (likely(!vma || addr+len <= vma->vm_start)) {
+                       /* remember the address as a hint for next time */
+                       return (mm->free_area_cache = addr);
+               }
+
+               /* remember the largest hole we saw so far */
+               if (addr + mm->cached_hole_size < vma->vm_start)
+                       mm->cached_hole_size = vma->vm_start - addr;
+
+               /* try just below the current vma->vm_start */
+               addr = (vma->vm_start-len) & HPAGE_MASK;
+       } while (likely(len < vma->vm_start));
+
+bottomup:
+       /*
+        * A failed mmap() very likely causes application failure,
+        * so fall back to the bottom-up function here. This scenario
+        * can happen with large stack limits and large mmap()
+        * allocations.
+        */
+       mm->cached_hole_size = ~0UL;
+       mm->free_area_cache = TASK_UNMAPPED_BASE;
+       addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+       /*
+        * Restore the topdown base:
+        */
+       mm->free_area_cache = mm->mmap_base;
+       mm->cached_hole_size = ~0UL;
+
+       return addr;
+}
+
+unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+               unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long task_size = TASK_SIZE;
+
+       if (test_thread_flag(TIF_32BIT))
+               task_size = STACK_TOP32;
+
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (len > task_size)
+               return -ENOMEM;
+
+       if (addr) {
+               addr = ALIGN(addr, HPAGE_SIZE);
+               vma = find_vma(mm, addr);
+               if (task_size - len >= addr &&
+                   (!vma || addr + len <= vma->vm_start))
+                       return addr;
+       }
+       if (mm->get_unmapped_area == arch_get_unmapped_area)
+               return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+                               pgoff, flags);
+       else
+               return hugetlb_get_unmapped_area_topdown(file, addr, len,
+                               pgoff, flags);
+}
+
 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
 {
        pgd_t *pgd;
@@ -29,14 +197,19 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
        pmd_t *pmd;
        pte_t *pte = NULL;
 
+       /* We must align the address, because our caller will run
+        * set_huge_pte_at() on whatever we return, which writes out
+        * all of the sub-ptes for the hugepage range.  So we have
+        * to give it the first such sub-pte.
+        */
+       addr &= HPAGE_MASK;
+
        pgd = pgd_offset(mm, addr);
-       if (pgd) {
-               pud = pud_offset(pgd, addr);
-               if (pud) {
-                       pmd = pmd_alloc(mm, pud, addr);
-                       if (pmd)
-                               pte = pte_alloc_map(mm, pmd, addr);
-               }
+       pud = pud_alloc(mm, pgd, addr);
+       if (pud) {
+               pmd = pmd_alloc(mm, pud, addr);
+               if (pmd)
+                       pte = pte_alloc_map(mm, pmd, addr);
        }
        return pte;
 }
@@ -48,25 +221,34 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
        pmd_t *pmd;
        pte_t *pte = NULL;
 
+       addr &= HPAGE_MASK;
+
        pgd = pgd_offset(mm, addr);
-       if (pgd) {
+       if (!pgd_none(*pgd)) {
                pud = pud_offset(pgd, addr);
-               if (pud) {
+               if (!pud_none(*pud)) {
                        pmd = pmd_offset(pud, addr);
-                       if (pmd)
+                       if (!pmd_none(*pmd))
                                pte = pte_offset_map(pmd, addr);
                }
        }
        return pte;
 }
 
-#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       return 0;
+}
 
 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
                     pte_t *ptep, pte_t entry)
 {
        int i;
 
+       if (!pte_present(*ptep) && pte_present(entry))
+               mm->context.huge_pte_count++;
+
+       addr &= HPAGE_MASK;
        for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
                set_pte_at(mm, addr, ptep, entry);
                ptep++;
@@ -82,6 +264,10 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
        int i;
 
        entry = *ptep;
+       if (pte_present(entry))
+               mm->context.huge_pte_count--;
+
+       addr &= HPAGE_MASK;
 
        for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
                pte_clear(mm, addr, ptep);
@@ -92,18 +278,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
        return entry;
 }
 
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
-       if (len & ~HPAGE_MASK)
-               return -EINVAL;
-       if (addr & ~HPAGE_MASK)
-               return -EINVAL;
-       return 0;
-}
-
 struct page *follow_huge_addr(struct mm_struct *mm,
                              unsigned long address, int write)
 {
@@ -131,6 +305,15 @@ static void context_reload(void *__data)
 
 void hugetlb_prefault_arch_hook(struct mm_struct *mm)
 {
+       struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
+
+       if (likely(tp->tsb != NULL))
+               return;
+
+       tsb_grow(mm, MM_TSB_HUGE, 0);
+       tsb_context_switch(mm);
+       smp_tsb_sync(mm);
+
        /* On UltraSPARC-III+ and later, configure the second half of
         * the Data-TLB for huge pages.
         */