}
/* Check against rlimit.. */
- rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+ rlim = current->rlim[RLIMIT_DATA].rlim_cur;
if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
goto out;
goto out;
/* Ok, looks good - let it rip. */
- if (__do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
+ if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
goto out;
set_brk:
mm->brk = brk;
}
#endif /* CONFIG_HUGETLB */
- if (file) {
+ if (file)
mm->shared_vm += pages;
- if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
- mm->exec_vm += pages;
- } else if (flags & stack_flags)
+ else if (flags & stack_flags)
mm->stack_vm += pages;
+ if (flags & VM_EXEC)
+ mm->exec_vm += pages;
if (flags & (VM_RESERVED|VM_IO))
mm->reserved_vm += pages;
}
* The caller must hold down_write(current->mm->mmap_sem).
*/
-unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
- unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long pgoff)
+unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file * file,
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long pgoff)
{
- struct mm_struct * mm = current->mm;
struct vm_area_struct * vma, * prev;
struct inode *inode;
unsigned int vm_flags;
if (vm_flags & VM_LOCKED) {
unsigned long locked, lock_limit;
locked = mm->locked_vm << PAGE_SHIFT;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
locked += len;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
/* Check against address space limit. */
if ((mm->total_vm << PAGE_SHIFT) + len
- > current->signal->rlim[RLIMIT_AS].rlim_cur)
+ > current->rlim[RLIMIT_AS].rlim_cur)
return -ENOMEM;
/* check context space, maybe only Private writable mapping? */
{
if (sz < EXEC_STACK_BIAS)
return 0;
- return (sz - EXEC_STACK_BIAS) > current->signal->rlim[RLIMIT_STACK].rlim_cur;
+ return (sz - EXEC_STACK_BIAS) > current->rlim[RLIMIT_STACK].rlim_cur;
}
#ifdef CONFIG_STACK_GROWSUP
address &= PAGE_MASK;
grow = (address - vma->vm_end) >> PAGE_SHIFT;
- if (address < vma->vm_end) {
- anon_vma_unlock(vma);
- return 0;
- }
/* Overcommit.. vx check first to avoid vm_unacct_memory() */
if (!vx_vmpages_avail(vma->vm_mm, grow) ||
security_vm_enough_memory(grow)) {
if (over_stack_limit(address - vma->vm_start) ||
((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
- current->signal->rlim[RLIMIT_AS].rlim_cur) {
- anon_vma_unlock(vma);
- vm_unacct_memory(grow);
- return -ENOMEM;
- }
- if ((vma->vm_flags & VM_LOCKED) && !capable(CAP_IPC_LOCK) &&
- ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
- current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) {
+ current->rlim[RLIMIT_AS].rlim_cur) {
anon_vma_unlock(vma);
vm_unacct_memory(grow);
return -ENOMEM;
vma->vm_end = address;
// vma->vm_mm->total_vm += grow;
vx_vmpages_add(vma->vm_mm, grow);
- if (vma->vm_flags & VM_LOCKED)
+ if (vma->vm_flags & VM_LOCKED) {
// vma->vm_mm->locked_vm += grow;
vx_vmlocked_add(vma->vm_mm, grow);
+ }
__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
anon_vma_unlock(vma);
return 0;
address &= PAGE_MASK;
grow = (vma->vm_start - address) >> PAGE_SHIFT;
- if (address >= vma->vm_start) {
- anon_vma_unlock(vma);
- return 0;
- }
/* Overcommit.. vx check first to avoid vm_unacct_memory() */
if (!vx_vmpages_avail(vma->vm_mm, grow) ||
security_vm_enough_memory(grow)) {
if (over_stack_limit(vma->vm_end - address) ||
((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
- current->signal->rlim[RLIMIT_AS].rlim_cur) {
- anon_vma_unlock(vma);
- vm_unacct_memory(grow);
- return -ENOMEM;
- }
- if ((vma->vm_flags & VM_LOCKED) && !capable(CAP_IPC_LOCK) &&
- ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
- current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) {
+ current->rlim[RLIMIT_AS].rlim_cur) {
anon_vma_unlock(vma);
vm_unacct_memory(grow);
return -ENOMEM;
vma->vm_pgoff -= grow;
// vma->vm_mm->total_vm += grow;
vx_vmpages_add(vma->vm_mm, grow);
- if (vma->vm_flags & VM_LOCKED)
+ if (vma->vm_flags & VM_LOCKED) {
// vma->vm_mm->locked_vm += grow;
vx_vmlocked_add(vma->vm_mm, grow);
+ }
__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow);
anon_vma_unlock(vma);
return 0;
// area->vm_mm->total_vm -= len >> PAGE_SHIFT;
vx_vmpages_sub(area->vm_mm, len >> PAGE_SHIFT);
-
- if (area->vm_flags & VM_LOCKED)
+
+ if (area->vm_flags & VM_LOCKED) {
// area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
vx_vmlocked_sub(area->vm_mm, len >> PAGE_SHIFT);
+ }
vm_stat_unaccount(area);
area->vm_mm->unmap_area(area);
remove_vm_struct(area);
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
-unsigned long __do_brk(unsigned long addr, unsigned long len)
+unsigned long do_brk(unsigned long addr, unsigned long len)
{
struct mm_struct * mm = current->mm;
struct vm_area_struct * vma, * prev;
if (mm->def_flags & VM_LOCKED) {
unsigned long locked, lock_limit;
locked = mm->locked_vm << PAGE_SHIFT;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+ lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
locked += len;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
/* Check against address space limits *after* clearing old maps... */
if ((mm->total_vm << PAGE_SHIFT) + len
- > current->signal->rlim[RLIMIT_AS].rlim_cur)
+ > current->rlim[RLIMIT_AS].rlim_cur)
return -ENOMEM;
if (mm->map_count > sysctl_max_map_count)
return addr;
}
-EXPORT_SYMBOL(__do_brk);
-
-unsigned long do_brk(unsigned long addr, unsigned long len)
-{
- unsigned long ret;
-
- down_write(¤t->mm->mmap_sem);
- ret = __do_brk(addr, len);
- up_write(¤t->mm->mmap_sem);
- return ret;
-}
-
EXPORT_SYMBOL(do_brk);
-
/* Release all mmaps. */
void exit_mmap(struct mm_struct *mm)
vx_vmpages_sub(mm, mm->total_vm);
// mm->locked_vm = 0;
vx_vmlocked_sub(mm, mm->locked_vm);
+ arch_flush_exec_range(mm);
spin_unlock(&mm->page_table_lock);