__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
-int sysctl_overcommit_memory = 0; /* default is heuristic overcommit */
+int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
atomic_t vm_committed_space = ATOMIC_INIT(0);
flush_dcache_mmap_lock(mapping);
if (unlikely(vma->vm_flags & VM_NONLINEAR))
- list_add_tail(&vma->shared.vm_set.list,
- &mapping->i_mmap_nonlinear);
+ vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
else
vma_prio_tree_insert(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
}
if (root) {
- if (adjust_next) {
- vma_prio_tree_init(next);
+ if (adjust_next)
vma_prio_tree_insert(next, root);
- }
- vma_prio_tree_init(vma);
vma_prio_tree_insert(vma, root);
flush_dcache_mmap_unlock(mapping);
}
return NULL;
}
+#ifdef CONFIG_PROC_FS
+void __vm_stat_account(struct mm_struct *mm, unsigned long flags,
+ struct file *file, long pages)
+{
+ const unsigned long stack_flags
+ = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
+
+#ifdef CONFIG_HUGETLB
+ if (flags & VM_HUGETLB) {
+ if (!(flags & VM_DONTCOPY))
+ mm->shared_vm += pages;
+ return;
+ }
+#endif /* CONFIG_HUGETLB */
+
+ if (file)
+ mm->shared_vm += pages;
+ else if (flags & stack_flags)
+ mm->stack_vm += pages;
+ if (flags & VM_EXEC)
+ mm->exec_vm += pages;
+ if (flags & (VM_RESERVED|VM_IO))
+ mm->reserved_vm += pages;
+}
+#endif /* CONFIG_PROC_FS */
+
/*
* The caller must hold down_write(current->mm->mmap_sem).
*/
int accountable = 1;
unsigned long charged = 0;
- /*
- * Does the application expect PROT_READ to imply PROT_EXEC:
- */
- if (unlikely((prot & PROT_READ) &&
- (current->personality & READ_IMPLIES_EXEC)))
- prot |= PROT_EXEC;
-
if (file) {
if (is_file_hugepages(file))
accountable = 0;
(file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
return -EPERM;
}
+ /*
+ * Does the application expect PROT_READ to imply PROT_EXEC?
+ *
+ * (the exception is when the underlying filesystem is noexec
+ * mounted, in which case we dont add PROT_EXEC.)
+ */
+ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+ if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)))
+ prot |= PROT_EXEC;
if (!len)
return addr;
return -ENOMEM;
if (accountable && (!(flags & MAP_NORESERVE) ||
- sysctl_overcommit_memory > 1)) {
+ sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
if (vm_flags & VM_SHARED) {
/* Check memory availability in shmem_file_setup? */
vm_flags |= VM_ACCOUNT;
* f_op->mmap method. -DaveM
*/
addr = vma->vm_start;
+ pgoff = vma->vm_pgoff;
+ vm_flags = vma->vm_flags;
if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
+ file = vma->vm_file;
vma_link(mm, vma, prev, rb_link, rb_parent);
if (correct_wcount)
atomic_inc(&inode->i_writecount);
out:
// mm->total_vm += len >> PAGE_SHIFT;
vx_vmpages_add(mm, len >> PAGE_SHIFT);
+ __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
// mm->locked_vm += len >> PAGE_SHIFT;
vx_vmlocked_add(mm, len >> PAGE_SHIFT);
pgoff, flags & MAP_NONBLOCK);
down_write(&mm->mmap_sem);
}
+ __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
return addr;
unmap_and_free_vma:
* This mmap-allocator allocates new areas top-down from below the
* stack's low limit (the base):
*/
+#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
return addr;
}
+#endif
void arch_unmap_area_topdown(struct vm_area_struct *area)
{
vma->vm_end = address;
// vma->vm_mm->total_vm += grow;
vx_vmpages_add(vma->vm_mm, grow);
- if (vma->vm_flags & VM_LOCKED)
+ if (vma->vm_flags & VM_LOCKED) {
// vma->vm_mm->locked_vm += grow;
vx_vmlocked_add(vma->vm_mm, grow);
+ }
+ __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
anon_vma_unlock(vma);
return 0;
}
vma->vm_pgoff -= grow;
// vma->vm_mm->total_vm += grow;
vx_vmpages_add(vma->vm_mm, grow);
- if (vma->vm_flags & VM_LOCKED)
+ if (vma->vm_flags & VM_LOCKED) {
// vma->vm_mm->locked_vm += grow;
vx_vmlocked_add(vma->vm_mm, grow);
+ }
+ __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
anon_vma_unlock(vma);
return 0;
}
// area->vm_mm->total_vm -= len >> PAGE_SHIFT;
vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
- if (area->vm_flags & VM_LOCKED)
+ if (area->vm_flags & VM_LOCKED) {
// area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
+ }
+ vm_stat_unaccount(area);
area->vm_mm->unmap_area(area);
remove_vm_struct(area);
}
/* most fields are the same, copy all, and then fixup */
*new = *vma;
- vma_prio_tree_init(new);
if (new_below)
new->vm_end = addr;
if (mpnt->vm_start >= end)
return 0;
- /* Something will probably happen, so notify. */
- if (mpnt->vm_file && (mpnt->vm_flags & VM_EXEC))
- profile_exec_unmap(mm);
-
/*
* If we need to split any vma, do it now to save pain later.
*
int ret;
struct mm_struct *mm = current->mm;
+ profile_munmap(addr);
+
down_write(&mm->mmap_sem);
ret = do_munmap(mm, addr, len);
up_write(&mm->mmap_sem);
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
- profile_exit_mmap(mm);
-
lru_add_drain();
spin_lock(&mm->page_table_lock);
* and into the inode's i_mmap tree. If vm_file is non-NULL
* then i_mmap_lock is taken here.
*/
-void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
+int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
{
struct vm_area_struct * __vma, * prev;
struct rb_node ** rb_link, * rb_parent;
}
__vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent);
if (__vma && __vma->vm_start < vma->vm_end)
- BUG();
+ return -ENOMEM;
vma_link(mm, vma, prev, rb_link, rb_parent);
+ return 0;
}
/*
new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (new_vma) {
*new_vma = *vma;
- vma_prio_tree_init(new_vma);
pol = mpol_copy(vma_policy(vma));
if (IS_ERR(pol)) {
kmem_cache_free(vm_area_cachep, new_vma);