/* * linux/mm/mlock.c * * (C) Copyright 1995 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig */ #include #include #include static int mlock_fixup(struct vm_area_struct * vma, unsigned long start, unsigned long end, unsigned int newflags) { struct mm_struct * mm = vma->vm_mm; int pages; int ret = 0; if (newflags == vma->vm_flags) goto out; if (start != vma->vm_start) { if (split_vma(mm, vma, start, 1)) { ret = -EAGAIN; goto out; } } if (end != vma->vm_end) { if (split_vma(mm, vma, end, 0)) { ret = -EAGAIN; goto out; } } /* * vm_flags is protected by the mmap_sem held in write mode. * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, make_pages_present below will bring it back. */ vma->vm_flags = newflags; /* * Keep track of amount of locked VM. */ pages = (end - start) >> PAGE_SHIFT; if (newflags & VM_LOCKED) { pages = -pages; ret = make_pages_present(start, end); } // vma->vm_mm->locked_vm -= pages; vx_vmlocked_sub(vma->vm_mm, pages); out: return ret; } static int do_mlock(unsigned long start, size_t len, int on) { unsigned long nstart, end, tmp; struct vm_area_struct * vma, * next; int error; if (on && !capable(CAP_IPC_LOCK)) return -EPERM; len = PAGE_ALIGN(len); end = start + len; if (end < start) return -EINVAL; if (end == start) return 0; vma = find_vma(current->mm, start); if (!vma || vma->vm_start > start) return -ENOMEM; for (nstart = start ; ; ) { unsigned int newflags; /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ newflags = vma->vm_flags | VM_LOCKED; if (!on) newflags &= ~VM_LOCKED; if (vma->vm_end >= end) { error = mlock_fixup(vma, nstart, end, newflags); break; } tmp = vma->vm_end; next = vma->vm_next; error = mlock_fixup(vma, nstart, tmp, newflags); if (error) break; nstart = tmp; vma = next; if (!vma || vma->vm_start != nstart) { error = -ENOMEM; break; } } return error; } asmlinkage long sys_mlock(unsigned long start, size_t len) { unsigned long locked, grow; unsigned long lock_limit; int error = -ENOMEM; down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; grow = len >> PAGE_SHIFT; if (!vx_vmlocked_avail(current->mm, grow)) goto out; locked = current->mm->locked_vm + grow; lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; /* check against resource limits */ if (locked <= lock_limit) error = do_mlock(start, len, 1); out: up_write(¤t->mm->mmap_sem); return error; } asmlinkage long sys_munlock(unsigned long start, size_t len) { int ret; down_write(¤t->mm->mmap_sem); len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); start &= PAGE_MASK; ret = do_mlock(start, len, 0); up_write(¤t->mm->mmap_sem); return ret; } static int do_mlockall(int flags) { int error; unsigned int def_flags; struct vm_area_struct * vma; if (!capable(CAP_IPC_LOCK)) return -EPERM; def_flags = 0; if (flags & MCL_FUTURE) def_flags = VM_LOCKED; current->mm->def_flags = def_flags; error = 0; for (vma = current->mm->mmap; vma ; vma = vma->vm_next) { unsigned int newflags; newflags = vma->vm_flags | VM_LOCKED; if (!(flags & MCL_CURRENT)) newflags &= ~VM_LOCKED; /* Ignore errors */ mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags); } return error; } asmlinkage long sys_mlockall(int flags) { unsigned long lock_limit; int ret = -EINVAL; down_write(¤t->mm->mmap_sem); if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) goto out; lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; if (!vx_vmlocked_avail(current->mm, current->mm->total_vm)) goto out; if (current->mm->total_vm <= lock_limit) ret = do_mlockall(flags); out: up_write(¤t->mm->mmap_sem); return ret; } asmlinkage long sys_munlockall(void) { int ret; down_write(¤t->mm->mmap_sem); ret = do_mlockall(0); up_write(¤t->mm->mmap_sem); return ret; }