#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
+#include <linux/capability.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
-#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
+#ifndef arch_mmap_check
+#define arch_mmap_check(addr, len, flags) (0)
+#endif
+
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
-int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
+int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
atomic_t vm_committed_space = ATOMIC_INIT(0);
/*
leave 3% of the size of this process for other processes */
allowed -= current->mm->total_vm / 32;
- if (atomic_read(&vm_committed_space) < allowed)
+ /*
+ * cast `allowed' as a signed long because vm_committed_space
+ * sometimes has a negative value
+ */
+ if (atomic_read(&vm_committed_space) < (long)allowed)
return 0;
vm_unacct_memory(pages);
return -ENOMEM;
}
-EXPORT_SYMBOL(sysctl_overcommit_memory);
-EXPORT_SYMBOL(sysctl_overcommit_ratio);
-EXPORT_SYMBOL(sysctl_max_map_count);
-EXPORT_SYMBOL(vm_committed_space);
EXPORT_SYMBOL(__vm_enough_memory);
/*
}
/*
- * Remove one vm structure and free it.
+ * Unlink a file-based vm structure from its prio_tree, to hide
+ * vma from rmap and vmtruncate before freeing its page tables.
*/
-static void remove_vm_struct(struct vm_area_struct *vma)
+void unlink_file_vma(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
- might_sleep();
if (file) {
struct address_space *mapping = file->f_mapping;
spin_lock(&mapping->i_mmap_lock);
__remove_shared_vm_struct(vma, file, mapping);
spin_unlock(&mapping->i_mmap_lock);
}
+}
+
+/*
+ * Close a vm structure and free it, returning the next.
+ */
+static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+{
+ struct vm_area_struct *next = vma->vm_next;
+
+ might_sleep();
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
- if (file)
- fput(file);
- anon_vma_unlink(vma);
+ if (vma->vm_file)
+ fput(vma->vm_file);
mpol_free(vma_policy(vma));
kmem_cache_free(vm_area_cachep, vma);
+ return next;
}
-/*
- * sys_brk() for the most part doesn't need the global kernel
- * lock, except when an application is doing something nasty
- * like trying to un-brk an area that has already been mapped
- * to a regular file. in this case, the unmapping will need
- * to invoke file system routines that need the global lock.
- */
asmlinkage unsigned long sys_brk(unsigned long brk)
{
unsigned long rlim, retval;
__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node *rb_parent)
{
- if (vma->vm_flags & VM_EXEC)
- arch_add_exec_range(mm, vma->vm_end);
if (prev) {
vma->vm_next = prev->vm_next;
prev->vm_next = vma;
rb_erase(&vma->vm_rb, &mm->mm_rb);
if (mm->mmap_cache == vma)
mm->mmap_cache = prev;
- if (vma->vm_flags & VM_EXEC)
- arch_remove_exec_range(mm, vma->vm_end);
}
/*
* If the vma has a ->close operation then the driver probably needs to release
* per-vma resources, so we don't attempt to merge those.
*/
-#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
+#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags)
} else /* cases 2, 5, 7 */
vma_adjust(prev, prev->vm_start,
end, prev->vm_pgoff, NULL);
- if (prev->vm_flags & VM_EXEC)
- arch_add_exec_range(mm, prev->vm_end);
return prev;
}
}
#ifdef CONFIG_PROC_FS
-void __vm_stat_account(struct mm_struct *mm, unsigned long flags,
+void vm_stat_account(struct mm_struct *mm, unsigned long flags,
struct file *file, long pages)
{
const unsigned long stack_flags
if (!len)
return -EINVAL;
+ error = arch_mmap_check(addr, len, flags);
+ if (error)
+ return error;
+
/* Careful about overflows.. */
len = PAGE_ALIGN(len);
if (!len || len > TASK_SIZE)
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
- addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
+ addr = get_unmapped_area(file, addr, len, pgoff, flags);
if (addr & ~PAGE_MASK)
return addr;
if (!may_expand_vm(mm, len >> PAGE_SHIFT))
return -ENOMEM;
- /* check context space, maybe only Private writable mapping? */
- if (!vx_vmpages_avail(mm, len >> PAGE_SHIFT))
- return -ENOMEM;
-
if (accountable && (!(flags & MAP_NORESERVE) ||
sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
if (vm_flags & VM_SHARED) {
}
out:
vx_vmpages_add(mm, len >> PAGE_SHIFT);
- __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
+ vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
vx_vmlocked_add(mm, len >> PAGE_SHIFT);
make_pages_present(addr, addr + len);
(!vma || addr + len <= vma->vm_start))
return addr;
}
- start_addr = addr = mm->free_area_cache;
+ if (len > mm->cached_hole_size) {
+ start_addr = addr = mm->free_area_cache;
+ } else {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ }
full_search:
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
- start_addr = addr = TASK_UNMAPPED_BASE;
+ addr = TASK_UNMAPPED_BASE;
+ start_addr = addr;
+ mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
mm->free_area_cache = addr + len;
return addr;
}
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
addr = vma->vm_end;
}
}
#endif
-void arch_unmap_area(struct vm_area_struct *area)
+void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
{
/*
* Is this a new hole at the lowest possible address?
*/
- if (area->vm_start >= TASK_UNMAPPED_BASE &&
- area->vm_start < area->vm_mm->free_area_cache)
- area->vm_mm->free_area_cache = area->vm_start;
+ if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
+ mm->free_area_cache = addr;
+ mm->cached_hole_size = ~0UL;
+ }
}
/*
return addr;
}
+ /* check if free_area_cache is useful for us */
+ if (len <= mm->cached_hole_size) {
+ mm->cached_hole_size = 0;
+ mm->free_area_cache = mm->mmap_base;
+ }
+
/* either no address requested or can't fit in requested address hole */
addr = mm->free_area_cache;
return (mm->free_area_cache = addr-len);
}
+ if (mm->mmap_base < len)
+ goto bottomup;
+
addr = mm->mmap_base-len;
do {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
+ /* remember the largest hole we saw so far */
+ if (addr + mm->cached_hole_size < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
/* try just below the current vma->vm_start */
addr = vma->vm_start-len;
} while (len < vma->vm_start);
+bottomup:
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
- mm->free_area_cache = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = ~0UL;
+ mm->free_area_cache = TASK_UNMAPPED_BASE;
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
return addr;
}
#endif
-void arch_unmap_area_topdown(struct vm_area_struct *area)
+void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
{
/*
* Is this a new hole at the highest possible address?
*/
- if (area->vm_end > area->vm_mm->free_area_cache)
- area->vm_mm->free_area_cache = area->vm_end;
+ if (addr > mm->free_area_cache)
+ mm->free_area_cache = addr;
/* dont allow allocations above current base */
- if (area->vm_mm->free_area_cache > area->vm_mm->mmap_base)
- area->vm_mm->free_area_cache = area->vm_mm->mmap_base;
+ if (mm->free_area_cache > mm->mmap_base)
+ mm->free_area_cache = mm->mmap_base;
}
-
unsigned long
-get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags, int exec)
+get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
{
unsigned long ret;
if (!(flags & MAP_FIXED)) {
unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
- if (exec && current->mm->get_unmapped_exec_area)
- get_area = current->mm->get_unmapped_exec_area;
- else
- get_area = current->mm->get_unmapped_area;
-
+ get_area = current->mm->get_unmapped_area;
if (file && file->f_op && file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
addr = get_area(file, addr, len, pgoff, flags);
return addr;
}
-EXPORT_SYMBOL(get_unmapped_area_prot);
-
-#define SHLIB_BASE 0x00111000
-
-unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
- unsigned long len0, unsigned long pgoff, unsigned long flags)
-{
- unsigned long addr = addr0, len = len0;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long tmp;
-
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (!addr && !(flags & MAP_FIXED))
- addr = randomize_range(SHLIB_BASE, 0x01000000, len);
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start)) {
- return addr;
- }
- }
-
- addr = SHLIB_BASE;
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
-
- if (!vma || addr + len <= vma->vm_start) {
- /*
- * Must not let a PROT_EXEC mapping get into the
- * brk area:
- */
- if (addr + len > mm->brk)
- goto failed;
-
- /*
- * Up until the brk area we randomize addresses
- * as much as possible:
- */
- if (addr >= 0x01000000) {
- tmp = randomize_range(0x01000000, PAGE_ALIGN(max(mm->start_brk, 0x08000000)), len);
- vma = find_vma(mm, tmp);
- if (TASK_SIZE - len >= tmp &&
- (!vma || tmp + len <= vma->vm_start))
- return tmp;
- }
- /*
- * Ok, randomization didnt work out - return
- * the result of the linear search:
- */
- return addr;
- }
- addr = vma->vm_end;
- }
-
-failed:
- return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
-}
-
+EXPORT_SYMBOL(get_unmapped_area);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
return prev ? prev->vm_next : vma;
}
-static int over_stack_limit(unsigned long sz)
-{
- if (sz < EXEC_STACK_BIAS)
- return 0;
- return (sz - EXEC_STACK_BIAS) >
- current->signal->rlim[RLIMIT_STACK].rlim_cur;
-}
-
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
return -ENOMEM;
/* Stack limit test */
- if (over_stack_limit(size))
+ if (size > rlim[RLIMIT_STACK].rlim_cur)
return -ENOMEM;
/* mlock limit tests */
return -ENOMEM;
}
- if (!vx_vmpages_avail(vma->vm_mm, grow))
- return -ENOMEM;
-
/*
* Overcommit.. This must be the final test, as it will
* update security statistics.
vx_vmpages_add(mm, grow);
if (vma->vm_flags & VM_LOCKED)
vx_vmlocked_add(mm, grow);
- __vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
+ vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
return 0;
}
-#ifdef CONFIG_STACK_GROWSUP
+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
/*
- * vma is the first one with address > vma->vm_end. Have to extend vma.
+ * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
+ * vma is the last one with address > vma->vm_end. Have to extend vma.
*/
-int expand_stack(struct vm_area_struct * vma, unsigned long address)
+#ifndef CONFIG_IA64
+static inline
+#endif
+int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
int error;
anon_vma_unlock(vma);
return error;
}
+#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
+
+#ifdef CONFIG_STACK_GROWSUP
+int expand_stack(struct vm_area_struct *vma, unsigned long address)
+{
+ return expand_upwards(vma, address);
+}
struct vm_area_struct *
find_extend_vma(struct mm_struct *mm, unsigned long addr)
}
#endif
-/* Normal function to fix up a mapping
- * This function is the default for when an area has no specific
- * function. This may be used as part of a more specific routine.
- *
- * By the time this function is called, the area struct has been
- * removed from the process mapping list.
- */
-static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area)
-{
- size_t len = area->vm_end - area->vm_start;
-
- vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
-
- if (area->vm_flags & VM_LOCKED)
- vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
- vm_stat_unaccount(area);
- area->vm_mm->unmap_area(area);
- remove_vm_struct(area);
-}
-
/*
- * Update the VMA and inode share lists.
- *
- * Ok - we have the memory areas we should free on the 'free' list,
+ * Ok - we have the memory areas we should free on the vma list,
* so release them, and do the vma updates.
+ *
+ * Called with the mm semaphore held.
*/
-static void unmap_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
{
+ /* Update high watermark before we lower total_vm */
+ update_hiwater_vm(mm);
do {
- struct vm_area_struct *next = vma->vm_next;
- unmap_vma(mm, vma);
- vma = next;
+ long nrpages = vma_pages(vma);
+
+ vx_vmpages_sub(mm, nrpages);
+ if (vma->vm_flags & VM_LOCKED)
+ vx_vmlocked_sub(mm, nrpages);
+ vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
+ vma = remove_vma(vma);
} while (vma);
validate_mm(mm);
}
/*
* Get rid of page table information in the indicated region.
*
- * Called with the page table lock held.
+ * Called with the mm semaphore held.
*/
static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long nr_accounted = 0;
lru_add_drain();
- spin_lock(&mm->page_table_lock);
tlb = tlb_gather_mmu(mm, 0);
- unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
+ update_hiwater_rss(mm);
+ unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
next? next->vm_start: 0);
tlb_finish_mmu(tlb, start, end);
- spin_unlock(&mm->page_table_lock);
}
/*
{
struct vm_area_struct **insertion_point;
struct vm_area_struct *tail_vma = NULL;
+ unsigned long addr;
insertion_point = (prev ? &prev->vm_next : &mm->mmap);
do {
} while (vma && vma->vm_start < end);
*insertion_point = vma;
tail_vma->vm_next = NULL;
+ if (mm->unmap_area == arch_unmap_area)
+ addr = prev ? prev->vm_end : mm->mmap_base;
+ else
+ addr = vma ? vma->vm_start : mm->mmap_base;
+ mm->unmap_area(mm, addr);
mm->mmap_cache = NULL; /* Kill the cache. */
}
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
- if (new_below) {
- unsigned long old_end = vma->vm_end;
-
+ if (new_below)
vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
((addr - new->vm_start) >> PAGE_SHIFT), new);
- if (vma->vm_flags & VM_EXEC)
- arch_remove_exec_range(mm, old_end);
- } else
+ else
vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
return 0;
unmap_region(mm, vma, prev, start, end);
/* Fix up all other VM information */
- unmap_vma_list(mm, vma);
+ remove_vma_list(mm, vma);
return 0;
}
static inline void verify_mm_writelocked(struct mm_struct *mm)
{
-#ifdef CONFIG_DEBUG_KERNEL
+#ifdef CONFIG_DEBUG_VM
if (unlikely(down_read_trylock(&mm->mmap_sem))) {
WARN_ON(1);
up_read(&mm->mmap_sem);
unsigned long flags;
struct rb_node ** rb_link, * rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
+ int error;
len = PAGE_ALIGN(len);
if (!len)
if ((addr + len) > TASK_SIZE || (addr + len) < addr)
return -EINVAL;
+ flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
+ error = arch_mmap_check(addr, len, flags);
+ if (error)
+ return error;
+
/*
* mlock MCL_FUTURE?
*/
!vx_vmpages_avail(mm, len >> PAGE_SHIFT))
return -ENOMEM;
- flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
-
/* Can we just expand an old private anonymous mapping? */
if (vma_merge(mm, prev, addr, addr + len, flags,
NULL, NULL, pgoff, NULL))
unsigned long end;
lru_add_drain();
-
- spin_lock(&mm->page_table_lock);
-
flush_cache_mm(mm);
tlb = tlb_gather_mmu(mm, 1);
+ /* Don't update_hiwater_rss(mm) here, do_exit already did */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
- end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL);
+ end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
tlb_finish_mmu(tlb, 0, end);
- mm->mmap = mm->mmap_cache = NULL;
- mm->mm_rb = RB_ROOT;
- set_mm_counter(mm, rss, 0);
+ set_mm_counter(mm, file_rss, 0);
+ set_mm_counter(mm, anon_rss, 0);
vx_vmpages_sub(mm, mm->total_vm);
vx_vmlocked_sub(mm, mm->locked_vm);
- arch_flush_exec_range(mm);
-
- spin_unlock(&mm->page_table_lock);
/*
- * Walk the list again, actually closing and freeing it
- * without holding any MM locks.
+ * Walk the list again, actually closing and freeing it,
+ * with preemption enabled, without holding any MM locks.
*/
- while (vma) {
- struct vm_area_struct *next = vma->vm_next;
- remove_vm_struct(vma);
- vma = next;
- }
+ while (vma)
+ vma = remove_vma(vma);
BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
}
__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
if (__vma && __vma->vm_start < vma->vm_end)
return -ENOMEM;
+ if ((vma->vm_flags & VM_ACCOUNT) &&
+ (security_vm_enough_memory(vma_pages(vma)) ||
+ !vx_vmpages_avail(mm, vma_pages(vma))))
+ return -ENOMEM;
vma_link(mm, vma, prev, rb_link, rb_parent);
return 0;
}
return 0;
return 1;
}
-
-/*
- * Insert a new vma covering the given region, with the given flags and
- * protections. Pre-install the mappings to zero or more leading pages
- * in the region. Note, this does put_page on pages[0..npages-1] in all
- * cases, even on error return.
- */
-int install_special_mapping(struct mm_struct *mm,
- unsigned long addr, unsigned long len,
- unsigned long vm_flags, pgprot_t pgprot,
- struct page **pages, unsigned int npages)
-{
- struct vm_area_struct *vma;
- int err = -ENOMEM;
-
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (unlikely(vma == NULL))
- goto out;
- memset(vma, 0, sizeof(*vma));
-
- vma->vm_mm = mm;
- vma->vm_start = addr;
- vma->vm_end = addr + len;
-
- vma->vm_flags = vm_flags;
- vma->vm_page_prot = pgprot;
-
- insert_vm_struct(mm, vma);
- mm->total_vm += len >> PAGE_SHIFT;
-
- for (err = 0; npages > 0 && !err; --npages, ++pages, addr += PAGE_SIZE)
- err = install_page(mm, vma, addr, *pages, vma->vm_page_prot);
-
- out:
- for (; npages > 0; --npages, ++pages)
- put_page(*pages);
-
- return err;
-}