vserver 1.9.3
[linux-2.6.git] / arch / ppc64 / mm / hugetlbpage.c
index 403c79b..7e1a9bf 100644 (file)
@@ -530,6 +530,108 @@ full_search:
        return -ENOMEM;
 }
 
+/*
+ * This mmap-allocator allocates new areas top-down from below the
+ * stack's low limit (the base):
+ *
+ * Because we have an exclusive hugepage region which lies within the
+ * normal user address space, we have to take special measures to make
+ * non-huge mmap()s evade the hugepage reserved regions.
+ */
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+                         const unsigned long len, const unsigned long pgoff,
+                         const unsigned long flags)
+{
+       struct vm_area_struct *vma, *prev_vma;
+       struct mm_struct *mm = current->mm;
+       unsigned long base = mm->mmap_base, addr = addr0;
+       int first_time = 1;
+
+       /* requested length too big for entire address space */
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       /* dont allow allocations above current base */
+       if (mm->free_area_cache > base)
+               mm->free_area_cache = base;
+
+       /* requesting a specific address */
+       if (addr) {
+               addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                               (!vma || addr + len <= vma->vm_start)
+                               && !is_hugepage_only_range(addr,len))
+                       return addr;
+       }
+
+try_again:
+       /* make sure it can fit in the remaining address space */
+       if (mm->free_area_cache < len)
+               goto fail;
+
+       /* either no address requested or cant fit in requested address hole */
+       addr = (mm->free_area_cache - len) & PAGE_MASK;
+       do {
+hugepage_recheck:
+               if (touches_hugepage_low_range(addr, len)) {
+                       addr = (addr & ((~0) << SID_SHIFT)) - len;
+                       goto hugepage_recheck;
+               } else if (touches_hugepage_high_range(addr, len)) {
+                       addr = TASK_HPAGE_BASE - len;
+               }
+
+               /*
+                * Lookup failure means no vma is above this address,
+                * i.e. return with success:
+                */
+               if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+                       return addr;
+
+               /*
+                * new region fits between prev_vma->vm_end and
+                * vma->vm_start, use it:
+                */
+               if (addr+len <= vma->vm_start &&
+                               (!prev_vma || (addr >= prev_vma->vm_end)))
+                       /* remember the address as a hint for next time */
+                       return (mm->free_area_cache = addr);
+               else
+                       /* pull free_area_cache down to the first hole */
+                       if (mm->free_area_cache == vma->vm_end)
+                               mm->free_area_cache = vma->vm_start;
+
+               /* try just below the current vma->vm_start */
+               addr = vma->vm_start-len;
+       } while (len <= vma->vm_start);
+
+fail:
+       /*
+        * if hint left us with no space for the requested
+        * mapping then try again:
+        */
+       if (first_time) {
+               mm->free_area_cache = base;
+               first_time = 0;
+               goto try_again;
+       }
+       /*
+        * A failed mmap() very likely causes application failure,
+        * so fall back to the bottom-up function here. This scenario
+        * can happen with large stack limits and large mmap()
+        * allocations.
+        */
+       mm->free_area_cache = TASK_UNMAPPED_BASE;
+       addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+       /*
+        * Restore the topdown base:
+        */
+       mm->free_area_cache = base;
+
+       return addr;
+}
+
 static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
 {
        unsigned long addr = 0;
@@ -754,7 +856,7 @@ static void flush_hash_hugepage(mm_context_t context, unsigned long ea,
        vsid = get_vsid(context.id, ea);
 
        va = (vsid << 28) | (ea & 0x0fffffff);
-       vpn = va >> LARGE_PAGE_SHIFT;
+       vpn = va >> HPAGE_SHIFT;
        hash = hpt_hash(vpn, 1);
        if (hugepte_val(pte) & _HUGEPAGE_SECONDARY)
                hash = ~hash;