#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
{
struct file *file = vma->vm_file;
+ might_sleep();
if (file) {
struct address_space *mapping = file->f_mapping;
spin_lock(&mapping->i_mmap_lock);
struct vm_area_struct *prev, struct rb_node **rb_link,
struct rb_node *rb_parent)
{
- vma_prio_tree_init(vma);
__vma_link_list(mm, vma, prev, rb_parent);
__vma_link_rb(mm, vma, rb_link, rb_parent);
- __vma_link_file(vma);
__anon_vma_link(vma);
}
if (mapping)
spin_lock(&mapping->i_mmap_lock);
anon_vma_lock(vma);
+
__vma_link(mm, vma, prev, rb_link, rb_parent);
+ __vma_link_file(vma);
+
anon_vma_unlock(vma);
if (mapping)
spin_unlock(&mapping->i_mmap_lock);
- mark_mm_hugetlb(mm, vma);
mm->map_count++;
validate_mm(mm);
}
/*
- * Insert vm structure into process list sorted by address and into the
- * inode's i_mmap tree. The caller should hold mm->mmap_sem and
- * ->f_mappping->i_mmap_lock if vm_file is non-NULL.
+ * Helper for vma_adjust in the split_vma insert case:
+ * insert vm structure into list and rbtree and anon_vma,
+ * but it has already been inserted into prio_tree earlier.
*/
static void
__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
if (__vma && __vma->vm_start < vma->vm_end)
BUG();
__vma_link(mm, vma, prev, rb_link, rb_parent);
- mark_mm_hugetlb(mm, vma);
mm->map_count++;
- validate_mm(mm);
}
-static inline void __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev)
+static inline void
+__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
+ struct vm_area_struct *prev)
{
prev->vm_next = vma->vm_next;
rb_erase(&vma->vm_rb, &mm->mm_rb);
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next = vma->vm_next;
+ struct vm_area_struct *importer = NULL;
struct address_space *mapping = NULL;
struct prio_tree_root *root = NULL;
struct file *file = vma->vm_file;
if (next && !insert) {
if (end >= next->vm_end) {
+ /*
+ * vma expands, overlapping all the next, and
+ * perhaps the one after too (mprotect case 6).
+ */
again: remove_next = 1 + (end > next->vm_end);
end = next->vm_end;
anon_vma = next->anon_vma;
- } else if (end < vma->vm_end || end > next->vm_start) {
+ } else if (end > next->vm_start) {
/*
- * vma shrinks, and !insert tells it's not
- * split_vma inserting another: so it must
- * be mprotect shifting the boundary down.
- * Or:
* vma expands, overlapping part of the next:
- * must be mprotect shifting the boundary up.
+ * mprotect case 5 shifting the boundary up.
*/
- BUG_ON(vma->vm_end != next->vm_start);
- adjust_next = end - next->vm_start;
+ adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
anon_vma = next->anon_vma;
+ importer = vma;
+ } else if (end < vma->vm_end) {
+ /*
+ * vma shrinks, and !insert tells it's not
+ * split_vma inserting another: so it must be
+ * mprotect case 4 shifting the boundary down.
+ */
+ adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
+ anon_vma = next->anon_vma;
+ importer = next;
}
}
if (!(vma->vm_flags & VM_NONLINEAR))
root = &mapping->i_mmap;
spin_lock(&mapping->i_mmap_lock);
+ if (insert) {
+ /*
+ * Put into prio_tree now, so instantiated pages
+ * are visible to arm/parisc __flush_dcache_page
+ * throughout; but we cannot insert into address
+ * space until vma start or end is updated.
+ */
+ __vma_link_file(insert);
+ }
}
/*
*/
if (vma->anon_vma)
anon_vma = vma->anon_vma;
- if (anon_vma)
+ if (anon_vma) {
spin_lock(&anon_vma->lock);
+ /*
+ * Easily overlooked: when mprotect shifts the boundary,
+ * make sure the expanding vma has anon_vma set if the
+ * shrinking vma had, to cover any anon pages imported.
+ */
+ if (importer && !importer->anon_vma) {
+ importer->anon_vma = anon_vma;
+ __anon_vma_link(importer);
+ }
+ }
if (root) {
flush_dcache_mmap_lock(mapping);
vma->vm_end = end;
vma->vm_pgoff = pgoff;
if (adjust_next) {
- next->vm_start += adjust_next;
- next->vm_pgoff += adjust_next >> PAGE_SHIFT;
+ next->vm_start += adjust_next << PAGE_SHIFT;
+ next->vm_pgoff += adjust_next;
}
if (root) {
* The caller must hold down_write(current->mm->mmap_sem).
*/
-unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
- unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long pgoff)
+unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file * file,
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long pgoff)
{
- struct mm_struct * mm = current->mm;
struct vm_area_struct * vma, * prev;
struct inode *inode;
unsigned int vm_flags;
int accountable = 1;
unsigned long charged = 0;
+ /*
+ * Does the application expect PROT_READ to imply PROT_EXEC:
+ */
+ if (unlikely((prot & PROT_READ) &&
+ (current->personality & READ_IMPLIES_EXEC)))
+ prot |= PROT_EXEC;
+
if (file) {
if (is_file_hugepages(file))
accountable = 0;
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
- addr = get_unmapped_area(file, addr, len, pgoff, flags, prot & PROT_EXEC);
+ addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
if (addr & ~PAGE_MASK)
return addr;
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
if (flags & MAP_LOCKED) {
- if (!capable(CAP_IPC_LOCK))
+ if (!can_do_mlock())
return -EPERM;
vm_flags |= VM_LOCKED;
}
/* mlock MCL_FUTURE? */
if (vm_flags & VM_LOCKED) {
- unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ unsigned long locked, lock_limit;
+ locked = mm->locked_vm << PAGE_SHIFT;
+ lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
locked += len;
- if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
}
* This function "knows" that -ENOMEM has the bits set.
*/
#ifndef HAVE_ARCH_UNMAPPED_AREA
-static inline unsigned long
+unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
+ unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
addr = vma->vm_end;
}
}
-#else
-extern unsigned long
-arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long, unsigned long);
#endif
+void arch_unmap_area(struct vm_area_struct *area)
+{
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+ if (area->vm_start >= TASK_UNMAPPED_BASE &&
+ area->vm_start < area->vm_mm->free_area_cache)
+ area->vm_mm->free_area_cache = area->vm_start;
+}
+
+/*
+ * This mmap-allocator allocates new areas top-down from below the
+ * stack's low limit (the base):
+ */
unsigned long
-get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags, unsigned long exec)
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ const unsigned long len, const unsigned long pgoff,
+ const unsigned long flags)
+{
+ struct vm_area_struct *vma, *prev_vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long base = mm->mmap_base, addr = addr0;
+ int first_time = 1;
+
+ /* requested length too big for entire address space */
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ /* dont allow allocations above current base */
+ if (mm->free_area_cache > base)
+ mm->free_area_cache = base;
+
+ /* requesting a specific address */
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+
+try_again:
+ /* make sure it can fit in the remaining address space */
+ if (mm->free_area_cache < len)
+ goto fail;
+
+ /* either no address requested or cant fit in requested address hole */
+ addr = (mm->free_area_cache - len) & PAGE_MASK;
+ do {
+ /*
+ * Lookup failure means no vma is above this address,
+ * i.e. return with success:
+ */
+ if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+ return addr;
+
+ /*
+ * new region fits between prev_vma->vm_end and
+ * vma->vm_start, use it:
+ */
+ if (addr+len <= vma->vm_start &&
+ (!prev_vma || (addr >= prev_vma->vm_end)))
+ /* remember the address as a hint for next time */
+ return (mm->free_area_cache = addr);
+ else
+ /* pull free_area_cache down to the first hole */
+ if (mm->free_area_cache == vma->vm_end)
+ mm->free_area_cache = vma->vm_start;
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start-len;
+ } while (len <= vma->vm_start);
+
+fail:
+ /*
+ * if hint left us with no space for the requested
+ * mapping then try again:
+ */
+ if (first_time) {
+ mm->free_area_cache = base;
+ first_time = 0;
+ goto try_again;
+ }
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+ /*
+ * Restore the topdown base:
+ */
+ mm->free_area_cache = base;
+
+ return addr;
+}
+
+void arch_unmap_area_topdown(struct vm_area_struct *area)
+{
+ /*
+ * Is this a new hole at the highest possible address?
+ */
+ if (area->vm_end > area->vm_mm->free_area_cache)
+ area->vm_mm->free_area_cache = area->vm_end;
+}
+
+
+unsigned long
+get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags, int exec)
{
if (flags & MAP_FIXED) {
unsigned long ret;
return file->f_op->get_unmapped_area(file, addr, len,
pgoff, flags);
- return arch_get_unmapped_area(file, addr, len, pgoff, flags, exec);
+ if (exec && current->mm->get_unmapped_exec_area)
+ return current->mm->get_unmapped_exec_area(file, addr, len, pgoff, flags);
+ else
+ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
+EXPORT_SYMBOL(get_unmapped_area_prot);
+
+
+#define SHLIB_BASE 0x00111000
+
+unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
+ unsigned long len0, unsigned long pgoff, unsigned long flags)
+{
+ unsigned long addr = addr0, len = len0;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long tmp;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (!addr && !(flags & MAP_FIXED))
+ addr = randomize_range(SHLIB_BASE, 0x01000000, len);
+
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start)) {
+ return addr;
+ }
+ }
+
+ addr = SHLIB_BASE;
+
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr) {
+ return -ENOMEM;
+ }
+ if (!vma || addr + len <= vma->vm_start) {
+ /*
+ * Must not let a PROT_EXEC mapping get into the
+ * brk area:
+ */
+ if (addr + len > mm->brk)
+ goto failed;
+
+ /*
+ * Up until the brk area we randomize addresses
+ * as much as possible:
+ */
+ if (addr >= 0x01000000) {
+ tmp = randomize_range(0x01000000, mm->brk, len);
+ vma = find_vma(mm, tmp);
+ if (TASK_SIZE - len >= tmp &&
+ (!vma || tmp + len <= vma->vm_start))
+ return tmp;
+ }
+ /*
+ * Ok, randomization didnt work out - return
+ * the result of the linear search:
+ */
+ return addr;
+ }
+ addr = vma->vm_end;
+ }
+
+failed:
+ return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
}
-EXPORT_SYMBOL(get_unmapped_area);
+
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
address &= PAGE_MASK;
grow = (address - vma->vm_end) >> PAGE_SHIFT;
- /* Overcommit.. */
- if (security_vm_enough_memory(grow) ||
- !vx_vmpages_avail(vma->vm_mm, grow)) {
+ /* Overcommit.. vx check first to avoid vm_unacct_memory() */
+ if (!vx_vmpages_avail(vma->vm_mm, grow) ||
+ security_vm_enough_memory(grow)) {
anon_vma_unlock(vma);
return -ENOMEM;
}
vm_unacct_memory(grow);
return -ENOMEM;
}
-
vma->vm_end = address;
// vma->vm_mm->total_vm += grow;
vx_vmpages_add(vma->vm_mm, grow);
address &= PAGE_MASK;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
- /* Overcommit.. */
- if (security_vm_enough_memory(grow) ||
- !vx_vmpages_avail(vma->vm_mm, grow)) {
+ /* Overcommit.. vx check first to avoid vm_unacct_memory() */
+ if (!vx_vmpages_avail(vma->vm_mm, grow) ||
+ security_vm_enough_memory(grow)) {
anon_vma_unlock(vma);
return -ENOMEM;
}
vm_unacct_memory(grow);
return -ENOMEM;
}
-
vma->vm_start = address;
vma->vm_pgoff -= grow;
// vma->vm_mm->total_vm += grow;
if (area->vm_flags & VM_LOCKED)
// area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
- /*
- * Is this a new hole at the lowest possible address?
- */
- if (area->vm_start >= TASK_UNMAPPED_BASE &&
- area->vm_start < area->vm_mm->free_area_cache)
- area->vm_mm->free_area_cache = area->vm_start;
- /*
- * Is this a new hole at the highest possible address?
- */
- if (area->vm_start > area->vm_mm->non_executable_cache)
- area->vm_mm->non_executable_cache = area->vm_start;
-
+ area->vm_mm->unmap_area(area);
remove_vm_struct(area);
- if (unlikely(area->vm_flags & VM_EXEC))
- arch_remove_exec_range(mm, area->vm_end);
}
/*
/* most fields are the same, copy all, and then fixup */
*new = *vma;
+ vma_prio_tree_init(new);
- if (new_below) {
- if (vma->vm_flags & VM_EXEC)
- arch_remove_exec_range(mm, new->vm_end);
+ if (new_below)
new->vm_end = addr;
- }
else {
new->vm_start = addr;
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
- if (new_below)
+ if (new_below) {
+ unsigned long old_end = vma->vm_end;
+
vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
((addr - new->vm_start) >> PAGE_SHIFT), new);
- else
+ if (vma->vm_flags & VM_EXEC)
+ arch_remove_exec_range(mm, old_end);
+ } else
vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
return 0;
* mlock MCL_FUTURE?
*/
if (mm->def_flags & VM_LOCKED) {
- unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ unsigned long locked, lock_limit;
+ locked = mm->locked_vm << PAGE_SHIFT;
+ lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
locked += len;
- if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
- /* vserver checks ? */
+ if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT))
+ return -ENOMEM;
}
/*
new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (new_vma) {
*new_vma = *vma;
+ vma_prio_tree_init(new_vma);
pol = mpol_copy(vma_policy(vma));
if (IS_ERR(pol)) {
kmem_cache_free(vm_area_cachep, new_vma);