4 * (C) Copyright 1995 Linus Torvalds
5 * (C) Copyright 2002 Christoph Hellwig
8 #include <linux/mman.h>
10 #include <linux/syscalls.h>
11 #include <linux/vs_memory.h>
14 static int mlock_fixup(struct vm_area_struct * vma,
15 unsigned long start, unsigned long end, unsigned int newflags)
17 struct mm_struct * mm = vma->vm_mm;
21 if (newflags == vma->vm_flags)
24 if (start != vma->vm_start) {
25 ret = split_vma(mm, vma, start, 1);
30 if (end != vma->vm_end) {
31 ret = split_vma(mm, vma, end, 0);
37 * vm_flags is protected by the mmap_sem held in write mode.
38 * It's okay if try_to_unmap_one unmaps a page just after we
39 * set VM_LOCKED, make_pages_present below will bring it back.
41 vma->vm_flags = newflags;
44 * Keep track of amount of locked VM.
46 pages = (end - start) >> PAGE_SHIFT;
47 if (newflags & VM_LOCKED) {
49 if (!(newflags & VM_IO))
50 ret = make_pages_present(start, end);
53 // vma->vm_mm->locked_vm -= pages;
54 vx_vmlocked_sub(vma->vm_mm, pages);
61 static int do_mlock(unsigned long start, size_t len, int on)
63 unsigned long nstart, end, tmp;
64 struct vm_area_struct * vma, * next;
67 len = PAGE_ALIGN(len);
73 vma = find_vma(current->mm, start);
74 if (!vma || vma->vm_start > start)
77 for (nstart = start ; ; ) {
78 unsigned int newflags;
80 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
82 newflags = vma->vm_flags | VM_LOCKED;
84 newflags &= ~VM_LOCKED;
86 if (vma->vm_end >= end) {
87 error = mlock_fixup(vma, nstart, end, newflags);
93 error = mlock_fixup(vma, nstart, tmp, newflags);
98 if (!vma || vma->vm_start != nstart) {
106 asmlinkage long sys_mlock(unsigned long start, size_t len)
108 unsigned long locked, grow;
109 unsigned long lock_limit;
115 down_write(¤t->mm->mmap_sem);
116 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
119 grow = len >> PAGE_SHIFT;
120 if (!vx_vmlocked_avail(current->mm, grow))
122 locked = current->mm->locked_vm + grow;
124 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
125 lock_limit >>= PAGE_SHIFT;
127 /* check against resource limits */
128 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
129 error = do_mlock(start, len, 1);
131 up_write(¤t->mm->mmap_sem);
135 asmlinkage long sys_munlock(unsigned long start, size_t len)
139 down_write(¤t->mm->mmap_sem);
140 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
142 ret = do_mlock(start, len, 0);
143 up_write(¤t->mm->mmap_sem);
147 static int do_mlockall(int flags)
149 struct vm_area_struct * vma;
150 unsigned int def_flags = 0;
152 if (flags & MCL_FUTURE)
153 def_flags = VM_LOCKED;
154 current->mm->def_flags = def_flags;
155 if (flags == MCL_FUTURE)
158 for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
159 unsigned int newflags;
161 newflags = vma->vm_flags | VM_LOCKED;
162 if (!(flags & MCL_CURRENT))
163 newflags &= ~VM_LOCKED;
166 mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
172 asmlinkage long sys_mlockall(int flags)
174 unsigned long lock_limit;
177 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
184 down_write(¤t->mm->mmap_sem);
186 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
187 lock_limit >>= PAGE_SHIFT;
190 if (!vx_vmlocked_avail(current->mm, current->mm->total_vm))
192 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
193 capable(CAP_IPC_LOCK))
194 ret = do_mlockall(flags);
195 up_write(¤t->mm->mmap_sem);
200 asmlinkage long sys_munlockall(void)
204 down_write(¤t->mm->mmap_sem);
205 ret = do_mlockall(0);
206 up_write(¤t->mm->mmap_sem);
211 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
212 * shm segments) get accounted against the user_struct instead.
214 static DEFINE_SPINLOCK(shmlock_user_lock);
216 int user_shm_lock(size_t size, struct user_struct *user)
218 unsigned long lock_limit, locked;
221 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
222 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
223 lock_limit >>= PAGE_SHIFT;
224 spin_lock(&shmlock_user_lock);
225 if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
228 user->locked_shm += locked;
231 spin_unlock(&shmlock_user_lock);
235 void user_shm_unlock(size_t size, struct user_struct *user)
237 spin_lock(&shmlock_user_lock);
238 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
239 spin_unlock(&shmlock_user_lock);