This commit was manufactured by cvs2svn to create branch
[linux-2.6.git] / mm / mlock.c
index a9e3716..aa3f047 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/mman.h>
 #include <linux/mm.h>
+#include <linux/vs_memory.h>
 
 
 static int mlock_fixup(struct vm_area_struct * vma, 
@@ -49,7 +50,8 @@ static int mlock_fixup(struct vm_area_struct * vma,
                ret = make_pages_present(start, end);
        }
 
-       vma->vm_mm->locked_vm -= pages;
+       // vma->vm_mm->locked_vm -= pages;
+       vx_vmlocked_sub(vma->vm_mm, pages);
 out:
        return ret;
 }
@@ -60,7 +62,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
        struct vm_area_struct * vma, * next;
        int error;
 
-       if (on && !capable(CAP_IPC_LOCK))
+       if (on && !can_do_mlock())
                return -EPERM;
        len = PAGE_ALIGN(len);
        end = start + len;
@@ -103,7 +105,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
 
 asmlinkage long sys_mlock(unsigned long start, size_t len)
 {
-       unsigned long locked;
+       unsigned long locked, grow;
        unsigned long lock_limit;
        int error = -ENOMEM;
 
@@ -111,15 +113,18 @@ asmlinkage long sys_mlock(unsigned long start, size_t len)
        len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
        start &= PAGE_MASK;
 
-       locked = len >> PAGE_SHIFT;
-       locked += current->mm->locked_vm;
+       grow = len >> PAGE_SHIFT;
+       if (!vx_vmlocked_avail(current->mm, grow))
+               goto out;
+       locked = current->mm->locked_vm + grow;
 
        lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
        lock_limit >>= PAGE_SHIFT;
 
        /* check against resource limits */
-       if (locked <= lock_limit)
+       if ( (locked <= lock_limit) || capable(CAP_IPC_LOCK))
                error = do_mlock(start, len, 1);
+out:
        up_write(&current->mm->mmap_sem);
        return error;
 }
@@ -142,7 +147,7 @@ static int do_mlockall(int flags)
        unsigned int def_flags;
        struct vm_area_struct * vma;
 
-       if (!capable(CAP_IPC_LOCK))
+       if (!can_do_mlock())
                return -EPERM;
 
        def_flags = 0;
@@ -177,7 +182,9 @@ asmlinkage long sys_mlockall(int flags)
        lock_limit >>= PAGE_SHIFT;
 
        ret = -ENOMEM;
-       if (current->mm->total_vm <= lock_limit)
+       if (!vx_vmlocked_avail(current->mm, current->mm->total_vm))
+               goto out;
+       if ((current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK))
                ret = do_mlockall(flags);
 out:
        up_write(&current->mm->mmap_sem);
@@ -193,3 +200,36 @@ asmlinkage long sys_munlockall(void)
        up_write(&current->mm->mmap_sem);
        return ret;
 }
+
+/*
+ * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
+ * shm segments) get accounted against the user_struct instead.
+ */
+static spinlock_t shmlock_user_lock = SPIN_LOCK_UNLOCKED;
+
+int user_shm_lock(size_t size, struct user_struct *user)
+{
+       unsigned long lock_limit, locked;
+       int allowed = 0;
+
+       spin_lock(&shmlock_user_lock);
+       locked = size >> PAGE_SHIFT;
+       lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
+       lock_limit >>= PAGE_SHIFT;
+       if (locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
+               goto out;
+       get_uid(user);
+       user->locked_shm += locked;
+       allowed = 1;
+out:
+       spin_unlock(&shmlock_user_lock);
+       return allowed;
+}
+
+void user_shm_unlock(size_t size, struct user_struct *user)
+{
+       spin_lock(&shmlock_user_lock);
+       user->locked_shm -= (size >> PAGE_SHIFT);
+       spin_unlock(&shmlock_user_lock);
+       free_uid(user);
+}