X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fmmap.c;h=d04d60a1dd36adc38b6af6abecc65849388fb179;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=e2364f60e928706165cdb1f7cea19bbb0e645707;hpb=87fc8d1bb10cd459024a742c6a10961fefcef18f;p=linux-2.6.git diff --git a/mm/mmap.c b/mm/mmap.c index e2364f60e..d04d60a1d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -59,10 +61,98 @@ int sysctl_overcommit_ratio = 50; /* default is 50% */ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; atomic_t vm_committed_space = ATOMIC_INIT(0); +/* + * Check that a process has enough memory to allocate a new virtual + * mapping. 0 means there is enough memory for the allocation to + * succeed and -ENOMEM implies there is not. + * + * We currently support three overcommit policies, which are set via the + * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting + * + * Strict overcommit modes added 2002 Feb 26 by Alan Cox. + * Additional code 2002 Jul 20 by Robert Love. + * + * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. + * + * Note this is a helper function intended to be used by LSMs which + * wish to use this logic. + */ +int __vm_enough_memory(long pages, int cap_sys_admin) +{ + unsigned long free, allowed; + + vm_acct_memory(pages); + + /* + * Sometimes we want to use more memory than we have + */ + if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) + return 0; + + if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { + unsigned long n; + + free = get_page_cache_size(); + free += nr_swap_pages; + + /* + * Any slabs which are created with the + * SLAB_RECLAIM_ACCOUNT flag claim to have contents + * which are reclaimable, under pressure. The dentry + * cache and most inode caches should fall into this + */ + free += atomic_read(&slab_reclaim_pages); + + /* + * Leave the last 3% for root + */ + if (!cap_sys_admin) + free -= free / 32; + + if (free > pages) + return 0; + + /* + * nr_free_pages() is very expensive on large systems, + * only call if we're about to fail. + */ + n = nr_free_pages(); + if (!cap_sys_admin) + n -= n / 32; + free += n; + + if (free > pages) + return 0; + vm_unacct_memory(pages); + return -ENOMEM; + } + + allowed = (totalram_pages - hugetlb_total_pages()) + * sysctl_overcommit_ratio / 100; + /* + * Leave the last 3% for root + */ + if (!cap_sys_admin) + allowed -= allowed / 32; + allowed += total_swap_pages; + + /* Don't let a single process grow too big: + leave 3% of the size of this process for other processes */ + allowed -= current->mm->total_vm / 32; + + if (atomic_read(&vm_committed_space) < allowed) + return 0; + + vm_unacct_memory(pages); + + return -ENOMEM; +} + EXPORT_SYMBOL(sysctl_overcommit_memory); EXPORT_SYMBOL(sysctl_overcommit_ratio); EXPORT_SYMBOL(sysctl_max_map_count); EXPORT_SYMBOL(vm_committed_space); +EXPORT_SYMBOL(__vm_enough_memory); /* * Requires inode->i_mapping->i_mmap_lock @@ -136,7 +226,7 @@ asmlinkage unsigned long sys_brk(unsigned long brk) } /* Check against rlimit.. */ - rlim = current->rlim[RLIMIT_DATA].rlim_cur; + rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim) goto out; @@ -306,8 +396,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_file) mapping = vma->vm_file->f_mapping; - if (mapping) + if (mapping) { spin_lock(&mapping->i_mmap_lock); + vma->vm_truncate_count = mapping->truncate_count; + } anon_vma_lock(vma); __vma_link(mm, vma, prev, rb_link, rb_parent); @@ -378,6 +470,7 @@ void vma_adjust(struct vm_area_struct *vma, unsigned long start, again: remove_next = 1 + (end > next->vm_end); end = next->vm_end; anon_vma = next->anon_vma; + importer = vma; } else if (end > next->vm_start) { /* * vma expands, overlapping part of the next: @@ -403,7 +496,16 @@ again: remove_next = 1 + (end > next->vm_end); if (!(vma->vm_flags & VM_NONLINEAR)) root = &mapping->i_mmap; spin_lock(&mapping->i_mmap_lock); + if (importer && + vma->vm_truncate_count != next->vm_truncate_count) { + /* + * unmap_mapping_range might be in progress: + * ensure that the expanding vma is rescanned. + */ + importer->vm_truncate_count = 0; + } if (insert) { + insert->vm_truncate_count = vma->vm_truncate_count; /* * Put into prio_tree now, so instantiated pages * are visible to arm/parisc __flush_dcache_page @@ -744,12 +846,12 @@ void __vm_stat_account(struct mm_struct *mm, unsigned long flags, } #endif /* CONFIG_HUGETLB */ - if (file) + if (file) { mm->shared_vm += pages; - else if (flags & stack_flags) + if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) + mm->exec_vm += pages; + } else if (flags & stack_flags) mm->stack_vm += pages; - if (flags & VM_EXEC) - mm->exec_vm += pages; if (flags & (VM_RESERVED|VM_IO)) mm->reserved_vm += pages; } @@ -833,7 +935,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, if (vm_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; - lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur; + lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; locked += len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; @@ -905,7 +1007,7 @@ munmap_back: /* Check against address space limit. */ if ((mm->total_vm << PAGE_SHIFT) + len - > current->rlim[RLIMIT_AS].rlim_cur) + > current->signal->rlim[RLIMIT_AS].rlim_cur) return -ENOMEM; /* check context space, maybe only Private writable mapping? */ @@ -992,9 +1094,12 @@ munmap_back: * f_op->mmap method. -DaveM */ addr = vma->vm_start; + pgoff = vma->vm_pgoff; + vm_flags = vma->vm_flags; if (!file || !vma_merge(mm, prev, addr, vma->vm_end, vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { + file = vma->vm_file; vma_link(mm, vma, prev, rb_link, rb_parent); if (correct_wcount) atomic_inc(&inode->i_writecount); @@ -1010,6 +1115,7 @@ munmap_back: out: // mm->total_vm += len >> PAGE_SHIFT; vx_vmpages_add(mm, len >> PAGE_SHIFT); + __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); if (vm_flags & VM_LOCKED) { // mm->locked_vm += len >> PAGE_SHIFT; vx_vmlocked_add(mm, len >> PAGE_SHIFT); @@ -1021,7 +1127,8 @@ out: pgoff, flags & MAP_NONBLOCK); down_write(&mm->mmap_sem); } - __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); + acct_update_integrals(); + update_mem_hiwater(); return addr; unmap_and_free_vma: @@ -1322,13 +1429,63 @@ out: return prev ? prev->vm_next : vma; } +/* + * Verify that the stack growth is acceptable and + * update accounting. This is shared with both the + * grow-up and grow-down cases. + */ +static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow) +{ + struct mm_struct *mm = vma->vm_mm; + struct rlimit *rlim = current->signal->rlim; + + /* address space limit tests */ + if (mm->total_vm + grow > rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT) + return -ENOMEM; + + /* Stack limit test */ + if (size > rlim[RLIMIT_STACK].rlim_cur) + return -ENOMEM; + + /* mlock limit tests */ + if (vma->vm_flags & VM_LOCKED) { + unsigned long locked; + unsigned long limit; + locked = mm->locked_vm + grow; + limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; + if (locked > limit && !capable(CAP_IPC_LOCK)) + return -ENOMEM; + } + + if (!vx_vmpages_avail(vma->vm_mm, grow)) + return -ENOMEM; + + /* + * Overcommit.. This must be the final test, as it will + * update security statistics. + */ + if (security_vm_enough_memory(grow)) + return -ENOMEM; + + /* Ok, everything looks good - let it rip */ + // mm->total_vm += grow; + vx_vmpages_add(mm, grow); + if (vma->vm_flags & VM_LOCKED) + // mm->locked_vm += grow; + vx_vmlocked_add(mm, grow); + __vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); + acct_update_integrals(); + update_mem_hiwater(); + return 0; +} + #ifdef CONFIG_STACK_GROWSUP /* * vma is the first one with address > vma->vm_end. Have to extend vma. */ int expand_stack(struct vm_area_struct * vma, unsigned long address) { - unsigned long grow; + int error; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; @@ -1348,31 +1505,21 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address) */ address += 4 + PAGE_SIZE - 1; address &= PAGE_MASK; - grow = (address - vma->vm_end) >> PAGE_SHIFT; + error = 0; - /* Overcommit.. vx check first to avoid vm_unacct_memory() */ - if (!vx_vmpages_avail(vma->vm_mm, grow) || - security_vm_enough_memory(grow)) { - anon_vma_unlock(vma); - return -ENOMEM; - } - - if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur || - ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > - current->rlim[RLIMIT_AS].rlim_cur) { - anon_vma_unlock(vma); - vm_unacct_memory(grow); - return -ENOMEM; + /* Somebody else might have raced and expanded it already */ + if (address > vma->vm_end) { + unsigned long size, grow; + + size = address - vma->vm_start; + grow = (address - vma->vm_end) >> PAGE_SHIFT; + + error = acct_stack_growth(vma, size, grow); + if (!error) + vma->vm_end = address; } - vma->vm_end = address; - // vma->vm_mm->total_vm += grow; - vx_vmpages_add(vma->vm_mm, grow); - if (vma->vm_flags & VM_LOCKED) - // vma->vm_mm->locked_vm += grow; - vx_vmlocked_add(vma->vm_mm, grow); - __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow); anon_vma_unlock(vma); - return 0; + return error; } struct vm_area_struct * @@ -1397,7 +1544,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) */ int expand_stack(struct vm_area_struct *vma, unsigned long address) { - unsigned long grow; + int error; /* * We must make sure the anon_vma is allocated @@ -1413,32 +1560,23 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address) * anon_vma lock to serialize against concurrent expand_stacks. */ address &= PAGE_MASK; - grow = (vma->vm_start - address) >> PAGE_SHIFT; + error = 0; - /* Overcommit.. vx check first to avoid vm_unacct_memory() */ - if (!vx_vmpages_avail(vma->vm_mm, grow) || - security_vm_enough_memory(grow)) { - anon_vma_unlock(vma); - return -ENOMEM; - } - - if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || - ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > - current->rlim[RLIMIT_AS].rlim_cur) { - anon_vma_unlock(vma); - vm_unacct_memory(grow); - return -ENOMEM; + /* Somebody else might have raced and expanded it already */ + if (address < vma->vm_start) { + unsigned long size, grow; + + size = vma->vm_end - address; + grow = (vma->vm_start - address) >> PAGE_SHIFT; + + error = acct_stack_growth(vma, size, grow); + if (!error) { + vma->vm_start = address; + vma->vm_pgoff -= grow; + } } - vma->vm_start = address; - vma->vm_pgoff -= grow; - // vma->vm_mm->total_vm += grow; - vx_vmpages_add(vma->vm_mm, grow); - if (vma->vm_flags & VM_LOCKED) - // vma->vm_mm->locked_vm += grow; - vx_vmlocked_add(vma->vm_mm, grow); - __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow); anon_vma_unlock(vma); - return 0; + return error; } struct vm_area_struct * @@ -1483,9 +1621,11 @@ static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, { unsigned long first = start & PGDIR_MASK; unsigned long last = end + PGDIR_SIZE - 1; - unsigned long start_index, end_index; struct mm_struct *mm = tlb->mm; + if (last > MM_VM_SIZE(mm) || last < end) + last = MM_VM_SIZE(mm); + if (!prev) { prev = mm->mmap; if (!prev) @@ -1508,23 +1648,18 @@ static void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev, last = next->vm_start; } if (prev->vm_end > first) - first = prev->vm_end + PGDIR_SIZE - 1; + first = prev->vm_end; break; } no_mmaps: if (last < first) /* for arches with discontiguous pgd indices */ return; - /* - * If the PGD bits are not consecutive in the virtual address, the - * old method of shifting the VA >> by PGDIR_SHIFT doesn't work. - */ - start_index = pgd_index(first); - if (start_index < FIRST_USER_PGD_NR) - start_index = FIRST_USER_PGD_NR; - end_index = pgd_index(last); - if (end_index > start_index) { - clear_page_tables(tlb, start_index, end_index - start_index); - flush_tlb_pgtables(mm, first & PGDIR_MASK, last & PGDIR_MASK); + if (first < FIRST_USER_PGD_NR * PGDIR_SIZE) + first = FIRST_USER_PGD_NR * PGDIR_SIZE; + /* No point trying to free anything if we're in the same pte page */ + if ((first & PMD_MASK) < (last & PMD_MASK)) { + clear_page_range(tlb, first, last); + flush_tlb_pgtables(mm, first, last); } } @@ -1626,6 +1761,9 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, struct mempolicy *pol; struct vm_area_struct *new; + if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK)) + return -EINVAL; + if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; @@ -1687,13 +1825,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) return 0; /* we have start < mpnt->vm_end */ - if (is_vm_hugetlb_page(mpnt)) { - int ret = is_aligned_hugepage_range(start, len); - - if (ret) - return ret; - } - /* if it doesn't overlap, we have nothing.. */ end = start + len; if (mpnt->vm_start >= end) @@ -1707,16 +1838,18 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) * places tmp vma above, and higher split_vma places tmp vma below. */ if (start > mpnt->vm_start) { - if (split_vma(mm, mpnt, start, 0)) - return -ENOMEM; + int error = split_vma(mm, mpnt, start, 0); + if (error) + return error; prev = mpnt; } /* Does it split the last one? */ last = find_vma(mm, end); if (last && end > last->vm_start) { - if (split_vma(mm, last, end, 1)) - return -ENOMEM; + int error = split_vma(mm, last, end, 1); + if (error) + return error; } mpnt = prev? prev->vm_next: mm->mmap; @@ -1749,6 +1882,16 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len) return ret; } +static inline void verify_mm_writelocked(struct mm_struct *mm) +{ +#ifdef CONFIG_DEBUG_KERNEL + if (unlikely(down_read_trylock(&mm->mmap_sem))) { + WARN_ON(1); + up_read(&mm->mmap_sem); + } +#endif +} + /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some @@ -1775,7 +1918,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) if (mm->def_flags & VM_LOCKED) { unsigned long locked, lock_limit; locked = mm->locked_vm << PAGE_SHIFT; - lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur; + lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; locked += len; if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; @@ -1783,6 +1926,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len) return -ENOMEM; } + /* + * mm->mmap_sem is required to protect against another thread + * changing the mappings in case we sleep. + */ + verify_mm_writelocked(mm); + /* * Clear old maps. this also does some error checking for us */ @@ -1796,7 +1945,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) /* Check against address space limits *after* clearing old maps... */ if ((mm->total_vm << PAGE_SHIFT) + len - > current->rlim[RLIMIT_AS].rlim_cur) + > current->signal->rlim[RLIMIT_AS].rlim_cur) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) @@ -1838,6 +1987,8 @@ out: vx_vmlocked_add(mm, len >> PAGE_SHIFT); make_pages_present(addr, addr + len); } + acct_update_integrals(); + update_mem_hiwater(); return addr; } @@ -1861,7 +2012,8 @@ void exit_mmap(struct mm_struct *mm) ~0UL, &nr_accounted, NULL); vm_unacct_memory(nr_accounted); BUG_ON(mm->map_count); /* This is just debugging */ - clear_page_tables(tlb, FIRST_USER_PGD_NR, USER_PTRS_PER_PGD); + clear_page_range(tlb, FIRST_USER_PGD_NR * PGDIR_SIZE, MM_VM_SIZE(mm)); + tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm)); vma = mm->mmap; @@ -1891,7 +2043,7 @@ void exit_mmap(struct mm_struct *mm) * and into the inode's i_mmap tree. If vm_file is non-NULL * then i_mmap_lock is taken here. */ -void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) +int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) { struct vm_area_struct * __vma, * prev; struct rb_node ** rb_link, * rb_parent; @@ -1914,8 +2066,9 @@ void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) } __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); if (__vma && __vma->vm_start < vma->vm_end) - BUG(); + return -ENOMEM; vma_link(mm, vma, prev, rb_link, rb_parent); + return 0; } /*