fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / kernel / vserver / context.c
index 7dd3deb..d38b23c 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Virtual Server: Context Support
  *
- *  Copyright (C) 2003-2005  Herbert Pötzl
+ *  Copyright (C) 2003-2007  Herbert Pötzl
  *
  *  V0.01  context helper
  *  V0.02  vx_ctx_kill syscall command
  *  V0.09  revert to non RCU for now
  *  V0.10  and back to working RCU hash
  *  V0.11  and back to locking again
- *  V0.12  have __create claim() the vxi
+ *  V0.12  referenced context store
+ *  V0.13  separate per cpu data
+ *  V0.14  changed vcmds to vxi arg
+ *  V0.15  added context stat
+ *  V0.16  have __create claim() the vxi
  *
  */
 
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <linux/namespace.h>
+#include <linux/mnt_namespace.h>
+#include <linux/pid_namespace.h>
 
 #include <linux/sched.h>
+#include <linux/vserver/context.h>
 #include <linux/vserver/network.h>
 #include <linux/vserver/legacy.h>
-#include <linux/vserver/limit.h>
 #include <linux/vserver/debug.h>
+#include <linux/vserver/limit.h>
 #include <linux/vserver/limit_int.h>
+#include <linux/vserver/space.h>
 
 #include <linux/vs_context.h>
 #include <linux/vs_limit.h>
 #include <asm/errno.h>
 
 #include "cvirt_init.h"
+#include "cacct_init.h"
 #include "limit_init.h"
 #include "sched_init.h"
 
 
+atomic_t vx_global_ctotal      = ATOMIC_INIT(0);
+atomic_t vx_global_cactive     = ATOMIC_INIT(0);
+
+
+/*     now inactive context structures */
+
+static struct hlist_head vx_info_inactive = HLIST_HEAD_INIT;
+
+static spinlock_t vx_info_inactive_lock = SPIN_LOCK_UNLOCKED;
+
+
 /*     __alloc_vx_info()
 
        * allocate an initialized vx_info struct
@@ -51,6 +70,7 @@
 static struct vx_info *__alloc_vx_info(xid_t xid)
 {
        struct vx_info *new = NULL;
+       int cpu;
 
        vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
 
@@ -60,6 +80,11 @@ static struct vx_info *__alloc_vx_info(xid_t xid)
                return 0;
 
        memset (new, 0, sizeof(struct vx_info));
+#ifdef CONFIG_SMP
+       new->ptr_pc = alloc_percpu(struct _vx_info_pc);
+       if (!new->ptr_pc)
+               goto error;
+#endif
        new->vx_id = xid;
        INIT_HLIST_NODE(&new->vx_hlist);
        atomic_set(&new->vx_usecnt, 0);
@@ -69,8 +94,8 @@ static struct vx_info *__alloc_vx_info(xid_t xid)
        init_waitqueue_head(&new->vx_wait);
 
        /* prepare reaper */
-       get_task_struct(child_reaper);
-       new->vx_reaper = child_reaper;
+       get_task_struct(init_pid_ns.child_reaper);
+       new->vx_reaper = init_pid_ns.child_reaper;
 
        /* rest of init goes here */
        vx_info_init_limit(&new->limit);
@@ -78,9 +103,18 @@ static struct vx_info *__alloc_vx_info(xid_t xid)
        vx_info_init_cvirt(&new->cvirt);
        vx_info_init_cacct(&new->cacct);
 
+       /* per cpu data structures */
+       for_each_possible_cpu(cpu) {
+               vx_info_init_sched_pc(
+                       &vx_per_cpu(new, sched_pc, cpu), cpu);
+               vx_info_init_cvirt_pc(
+                       &vx_per_cpu(new, cvirt_pc, cpu), cpu);
+       }
+
        new->vx_flags = VXF_INIT_SET;
        new->vx_bcaps = CAP_INIT_EFF_SET;
        new->vx_ccaps = 0;
+       new->vx_cap_bset = cap_bset;
 
        new->reboot_cmd = 0;
        new->exit_code = 0;
@@ -88,7 +122,13 @@ static struct vx_info *__alloc_vx_info(xid_t xid)
        vxdprintk(VXD_CBIT(xid, 0),
                "alloc_vx_info(%d) = %p", xid, new);
        vxh_alloc_vx_info(new);
+       atomic_inc(&vx_global_ctotal);
        return new;
+#ifdef CONFIG_SMP
+error:
+       kfree(new);
+       return 0;
+#endif
 }
 
 /*     __dealloc_vx_info()
@@ -97,11 +137,12 @@ static struct vx_info *__alloc_vx_info(xid_t xid)
 
 static void __dealloc_vx_info(struct vx_info *vxi)
 {
+       int cpu;
+
        vxdprintk(VXD_CBIT(xid, 0),
                "dealloc_vx_info(%p)", vxi);
        vxh_dealloc_vx_info(vxi);
 
-       vxi->vx_hlist.next = LIST_POISON1;
        vxi->vx_id = -1;
 
        vx_info_exit_limit(&vxi->limit);
@@ -109,13 +150,25 @@ static void __dealloc_vx_info(struct vx_info *vxi)
        vx_info_exit_cvirt(&vxi->cvirt);
        vx_info_exit_cacct(&vxi->cacct);
 
+       for_each_possible_cpu(cpu) {
+               vx_info_exit_sched_pc(
+                       &vx_per_cpu(vxi, sched_pc, cpu), cpu);
+               vx_info_exit_cvirt_pc(
+                       &vx_per_cpu(vxi, cvirt_pc, cpu), cpu);
+       }
+
        vxi->vx_state |= VXS_RELEASED;
+
+#ifdef CONFIG_SMP
+       free_percpu(vxi->ptr_pc);
+#endif
        kfree(vxi);
+       atomic_dec(&vx_global_ctotal);
 }
 
 static void __shutdown_vx_info(struct vx_info *vxi)
 {
-       struct namespace *namespace;
+       struct nsproxy *nsproxy;
        struct fs_struct *fs;
 
        might_sleep();
@@ -123,11 +176,11 @@ static void __shutdown_vx_info(struct vx_info *vxi)
        vxi->vx_state |= VXS_SHUTDOWN;
        vs_state_change(vxi, VSC_SHUTDOWN);
 
-       namespace = xchg(&vxi->vx_namespace, NULL);
-       if (namespace)
-               put_namespace(namespace);
-
+       nsproxy = xchg(&vxi->vx_nsproxy, NULL);
        fs = xchg(&vxi->vx_fs, NULL);
+
+       if (nsproxy)
+               put_nsproxy(nsproxy);
        if (fs)
                put_fs_struct(fs);
 }
@@ -136,6 +189,8 @@ static void __shutdown_vx_info(struct vx_info *vxi)
 
 void free_vx_info(struct vx_info *vxi)
 {
+       unsigned long flags;
+
        /* context shutdown is mandatory */
        BUG_ON(!vx_info_state(vxi, VXS_SHUTDOWN));
 
@@ -144,9 +199,13 @@ void free_vx_info(struct vx_info *vxi)
 
        BUG_ON(vx_info_state(vxi, VXS_HASHED));
 
-       BUG_ON(vxi->vx_namespace);
+       BUG_ON(vxi->vx_nsproxy);
        BUG_ON(vxi->vx_fs);
 
+       spin_lock_irqsave(&vx_info_inactive_lock, flags);
+       hlist_del(&vxi->vx_hlist);
+       spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
+
        __dealloc_vx_info(vxi);
 }
 
@@ -155,7 +214,8 @@ void free_vx_info(struct vx_info *vxi)
 
 #define VX_HASH_SIZE   13
 
-struct hlist_head vx_info_hash[VX_HASH_SIZE];
+static struct hlist_head vx_info_hash[VX_HASH_SIZE] =
+       { [0 ... VX_HASH_SIZE-1] = HLIST_HEAD_INIT };
 
 static spinlock_t vx_info_hash_lock = SPIN_LOCK_UNLOCKED;
 
@@ -187,6 +247,7 @@ static inline void __hash_vx_info(struct vx_info *vxi)
        vxi->vx_state |= VXS_HASHED;
        head = &vx_info_hash[__hashval(vxi->vx_id)];
        hlist_add_head(&vxi->vx_hlist, head);
+       atomic_inc(&vx_global_cactive);
 }
 
 /*     __unhash_vx_info()
@@ -196,17 +257,25 @@ static inline void __hash_vx_info(struct vx_info *vxi)
 
 static inline void __unhash_vx_info(struct vx_info *vxi)
 {
+       unsigned long flags;
+
+       vxd_assert_lock(&vx_info_hash_lock);
        vxdprintk(VXD_CBIT(xid, 4),
-               "__unhash_vx_info: %p[#%d]", vxi, vxi->vx_id);
-       spin_lock(&vx_info_hash_lock);
+               "__unhash_vx_info: %p[#%d.%d.%d]", vxi, vxi->vx_id,
+               atomic_read(&vxi->vx_usecnt), atomic_read(&vxi->vx_tasks));
        vxh_unhash_vx_info(vxi);
 
        /* context must be hashed */
        BUG_ON(!vx_info_state(vxi, VXS_HASHED));
+       /* but without tasks */
+       BUG_ON(atomic_read(&vxi->vx_tasks));
 
        vxi->vx_state &= ~VXS_HASHED;
-       hlist_del(&vxi->vx_hlist);
-       spin_unlock(&vx_info_hash_lock);
+       hlist_del_init(&vxi->vx_hlist);
+       spin_lock_irqsave(&vx_info_inactive_lock, flags);
+       hlist_add_head(&vxi->vx_hlist, &vx_info_inactive);
+       spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
+       atomic_dec(&vx_global_cactive);
 }
 
 
@@ -284,12 +353,17 @@ static struct vx_info * __loc_vx_info(int id, int *err)
 
        /* dynamic context requested */
        if (id == VX_DYNAMIC_ID) {
+#ifdef CONFIG_VSERVER_DYNAMIC_IDS
                id = __vx_dynamic_id();
                if (!id) {
                        printk(KERN_ERR "no dynamic context available.\n");
                        goto out_unlock;
                }
                new->vx_id = id;
+#else
+               printk(KERN_ERR "dynamic contexts disabled.\n");
+               goto out_unlock;
+#endif
        }
        /* existing context requested */
        else if ((vxi = __lookup_vx_info(id))) {
@@ -344,6 +418,7 @@ static struct vx_info * __create_vx_info(int id)
 
        /* dynamic context requested */
        if (id == VX_DYNAMIC_ID) {
+#ifdef CONFIG_VSERVER_DYNAMIC_IDS
                id = __vx_dynamic_id();
                if (!id) {
                        printk(KERN_ERR "no dynamic context available.\n");
@@ -351,6 +426,11 @@ static struct vx_info * __create_vx_info(int id)
                        goto out_unlock;
                }
                new->vx_id = id;
+#else
+               printk(KERN_ERR "dynamic contexts disabled.\n");
+               vxi = ERR_PTR(-EINVAL);
+               goto out_unlock;
+#endif
        }
        /* static context requested */
        else if ((vxi = __lookup_vx_info(id))) {
@@ -362,6 +442,7 @@ static struct vx_info * __create_vx_info(int id)
                        vxi = ERR_PTR(-EEXIST);
                goto out_unlock;
        }
+#ifdef CONFIG_VSERVER_DYNAMIC_IDS
        /* dynamic xid creation blocker */
        else if (id >= MIN_D_CONTEXT) {
                vxdprintk(VXD_CBIT(xid, 0),
@@ -369,6 +450,7 @@ static struct vx_info * __create_vx_info(int id)
                vxi = ERR_PTR(-EINVAL);
                goto out_unlock;
        }
+#endif
 
        /* new context */
        vxdprintk(VXD_CBIT(xid, 0),
@@ -392,7 +474,9 @@ out_unlock:
 void unhash_vx_info(struct vx_info *vxi)
 {
        __shutdown_vx_info(vxi);
+       spin_lock(&vx_info_hash_lock);
        __unhash_vx_info(vxi);
+       spin_unlock(&vx_info_hash_lock);
        __wakeup_vx_info(vxi);
 }
 
@@ -443,10 +527,23 @@ struct vx_info *lookup_or_create_vx_info(int id)
 
 #ifdef CONFIG_PROC_FS
 
+/*     get_xid_list()
+
+       * get a subset of hashed xids for proc
+       * assumes size is at least one                          */
+
 int get_xid_list(int index, unsigned int *xids, int size)
 {
        int hindex, nr_xids = 0;
 
+       /* only show current and children */
+       if (!vx_check(0, VS_ADMIN|VS_WATCH)) {
+               if (index > 0)
+                       return 0;
+               xids[nr_xids] = vx_current_xid();
+               return 1;
+       }
+
        for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) {
                struct hlist_head *head = &vx_info_hash[hindex];
                struct hlist_node *pos;
@@ -473,6 +570,21 @@ out:
 }
 #endif
 
+#ifdef CONFIG_VSERVER_DEBUG
+
+void   dump_vx_info_inactive(int level)
+{
+       struct hlist_node *entry, *next;
+
+       hlist_for_each_safe(entry, next, &vx_info_inactive) {
+               struct vx_info *vxi =
+                       list_entry(entry, struct vx_info, vx_hlist);
+
+               dump_vx_info(vxi, level);
+       }
+}
+
+#endif
 
 int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
 {
@@ -480,6 +592,10 @@ int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
 
        if (!p || !vxi)
                BUG();
+
+       if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
+               return -EACCES;
+
        new_user = alloc_uid(vxi->vx_id, p->uid);
        if (!new_user)
                return -ENOMEM;
@@ -494,11 +610,11 @@ int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
        return 0;
 }
 
-void vx_mask_bcaps(struct vx_info *vxi, struct task_struct *p)
+void vx_mask_cap_bset(struct vx_info *vxi, struct task_struct *p)
 {
-       p->cap_effective &= vxi->vx_bcaps;
-       p->cap_inheritable &= vxi->vx_bcaps;
-       p->cap_permitted &= vxi->vx_bcaps;
+       p->cap_effective &= vxi->vx_cap_bset;
+       p->cap_inheritable &= vxi->vx_cap_bset;
+       p->cap_permitted &= vxi->vx_cap_bset;
 }
 
 
@@ -525,12 +641,18 @@ static int vx_openfd_task(struct task_struct *tsk)
        return total;
 }
 
+
+/*     for *space compatibility */
+
+asmlinkage long sys_unshare(unsigned long);
+
 /*
  *     migrate task to new context
  *     gets vxi, puts old_vxi on change
+ *     optionally unshares namespaces (hack)
  */
 
-int vx_migrate_task(struct task_struct *p, struct vx_info *vxi)
+int vx_migrate_task(struct task_struct *p, struct vx_info *vxi, int unshare)
 {
        struct vx_info *old_vxi;
        int ret = 0;
@@ -538,14 +660,21 @@ int vx_migrate_task(struct task_struct *p, struct vx_info *vxi)
        if (!p || !vxi)
                BUG();
 
-       old_vxi = task_get_vx_info(p);
-       if (old_vxi == vxi)
-               goto out;
-
        vxdprintk(VXD_CBIT(xid, 5),
                "vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
                vxi->vx_id, atomic_read(&vxi->vx_usecnt));
 
+       if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0) &&
+               !vx_info_flags(vxi, VXF_STATE_SETUP, 0))
+               return -EACCES;
+
+       if (vx_info_state(vxi, VXS_SHUTDOWN))
+               return -EFAULT;
+
+       old_vxi = task_get_vx_info(p);
+       if (old_vxi == vxi)
+               goto out;
+
        if (!(ret = vx_migrate_user(p, vxi))) {
                int openfd;
 
@@ -555,15 +684,19 @@ int vx_migrate_task(struct task_struct *p, struct vx_info *vxi)
                if (old_vxi) {
                        atomic_dec(&old_vxi->cvirt.nr_threads);
                        atomic_dec(&old_vxi->cvirt.nr_running);
-                       atomic_dec(&old_vxi->limit.rcur[RLIMIT_NPROC]);
+                       __rlim_dec(&old_vxi->limit, RLIMIT_NPROC);
                        /* FIXME: what about the struct files here? */
-                       atomic_sub(openfd, &old_vxi->limit.rcur[VLIMIT_OPENFD]);
+                       __rlim_sub(&old_vxi->limit, VLIMIT_OPENFD, openfd);
+                       /* account for the executable */
+                       __rlim_dec(&old_vxi->limit, VLIMIT_DENTRY);
                }
                atomic_inc(&vxi->cvirt.nr_threads);
                atomic_inc(&vxi->cvirt.nr_running);
-               atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
+               __rlim_inc(&vxi->limit, RLIMIT_NPROC);
                /* FIXME: what about the struct files here? */
-               atomic_add(openfd, &vxi->limit.rcur[VLIMIT_OPENFD]);
+               __rlim_add(&vxi->limit, VLIMIT_OPENFD, openfd);
+               /* account for the executable */
+               __rlim_inc(&vxi->limit, VLIMIT_DENTRY);
 
                if (old_vxi) {
                        release_vx_info(old_vxi, p);
@@ -577,8 +710,14 @@ int vx_migrate_task(struct task_struct *p, struct vx_info *vxi)
                        "moved task %p into vxi:%p[#%d]",
                        p, vxi, vxi->vx_id);
 
-               vx_mask_bcaps(vxi, p);
+               vx_mask_cap_bset(vxi, p);
                task_unlock(p);
+
+               /* hack for *spaces to provide compatibility */
+               if (unshare) {
+                       ret = sys_unshare(CLONE_NEWUTS|CLONE_NEWIPC);
+                       vx_set_space(vxi, CLONE_NEWUTS|CLONE_NEWIPC);
+               }
        }
 out:
        put_vx_info(old_vxi);
@@ -631,6 +770,7 @@ void vx_exit_init(struct vx_info *vxi, struct task_struct *p, int code)
        vxi->vx_initpid = 0;
 }
 
+
 void vx_set_persistent(struct vx_info *vxi)
 {
        vxdprintk(VXD_CBIT(xid, 6),
@@ -673,7 +813,7 @@ void        exit_vx_info(struct task_struct *p, int code)
        }
 }
 
-void    exit_vx_info_early(struct task_struct *p, int code)
+void   exit_vx_info_early(struct task_struct *p, int code)
 {
        struct vx_info *vxi = p->vx_info;
 
@@ -681,7 +821,7 @@ void    exit_vx_info_early(struct task_struct *p, int code)
                if (vxi->vx_initpid == p->tgid)
                        vx_exit_init(vxi, p, code);
                if (vxi->vx_reaper == p)
-                       vx_set_reaper(vxi, child_reaper);
+                       vx_set_reaper(vxi, init_pid_ns.child_reaper);
        }
 }
 
@@ -700,7 +840,7 @@ int vc_task_xid(uint32_t id, void __user *data)
        if (id) {
                struct task_struct *tsk;
 
-               if (!vx_check(0, VX_ADMIN|VX_WATCH))
+               if (!vx_check(0, VS_ADMIN|VS_WATCH))
                        return -EPERM;
 
                read_lock(&tasklist_lock);
@@ -714,23 +854,25 @@ int vc_task_xid(uint32_t id, void __user *data)
 }
 
 
-int vc_vx_info(uint32_t id, void __user *data)
+int vc_vx_info(struct vx_info *vxi, void __user *data)
 {
-       struct vx_info *vxi;
        struct vcmd_vx_info_v0 vc_data;
 
-       if (!vx_check(0, VX_ADMIN))
-               return -ENOSYS;
-       if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RESOURCE))
-               return -EPERM;
-
-       vxi = lookup_vx_info(id);
-       if (!vxi)
-               return -ESRCH;
-
        vc_data.xid = vxi->vx_id;
        vc_data.initpid = vxi->vx_initpid;
-       put_vx_info(vxi);
+
+       if (copy_to_user (data, &vc_data, sizeof(vc_data)))
+               return -EFAULT;
+       return 0;
+}
+
+
+int vc_ctx_stat(struct vx_info *vxi, void __user *data)
+{
+       struct vcmd_ctx_stat_v0 vc_data;
+
+       vc_data.usecnt = atomic_read(&vxi->vx_usecnt);
+       vc_data.tasks = atomic_read(&vxi->vx_tasks);
 
        if (copy_to_user (data, &vc_data, sizeof(vc_data)))
                return -EFAULT;
@@ -746,8 +888,6 @@ int vc_ctx_create(uint32_t xid, void __user *data)
        struct vx_info *new_vxi;
        int ret;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
        if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
                return -EFAULT;
 
@@ -767,7 +907,7 @@ int vc_ctx_create(uint32_t xid, void __user *data)
        if (vs_state_change(new_vxi, VSC_STARTUP))
                goto out;
 
-       ret = vx_migrate_task(current, new_vxi);
+       ret = vx_migrate_task(current, new_vxi, (!data));
        if (ret)
                goto out;
 
@@ -784,139 +924,172 @@ out:
 }
 
 
-int vc_ctx_migrate(uint32_t id, void __user *data)
+int vc_ctx_migrate(struct vx_info *vxi, void __user *data)
 {
        struct vcmd_ctx_migrate vc_data = { .flagword = 0 };
-       struct vx_info *vxi;
+       int ret;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
        if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
                return -EFAULT;
 
-       /* dirty hack until Spectator becomes a cap */
-       if (id == 1) {
-               current->xid = 1;
-               return 0;
-       }
-
-       vxi = lookup_vx_info(id);
-       if (!vxi)
-               return -ESRCH;
-       vx_migrate_task(current, vxi);
+       ret = vx_migrate_task(current, vxi, 0);
+       if (ret)
+               return ret;
        if (vc_data.flagword & VXM_SET_INIT)
-               vx_set_init(vxi, current);
+               ret = vx_set_init(vxi, current);
+       if (ret)
+               return ret;
        if (vc_data.flagword & VXM_SET_REAPER)
-               vx_set_reaper(vxi, current);
-       put_vx_info(vxi);
-       return 0;
+               ret = vx_set_reaper(vxi, current);
+       return ret;
 }
 
 
-int vc_get_cflags(uint32_t id, void __user *data)
+int vc_get_cflags(struct vx_info *vxi, void __user *data)
 {
-       struct vx_info *vxi;
        struct vcmd_ctx_flags_v0 vc_data;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
-       vxi = lookup_vx_info(id);
-       if (!vxi)
-               return -ESRCH;
-
        vc_data.flagword = vxi->vx_flags;
 
        /* special STATE flag handling */
-       vc_data.mask = vx_mask_flags(~0UL, vxi->vx_flags, VXF_ONE_TIME);
-
-       put_vx_info(vxi);
+       vc_data.mask = vs_mask_flags(~0UL, vxi->vx_flags, VXF_ONE_TIME);
 
        if (copy_to_user (data, &vc_data, sizeof(vc_data)))
                return -EFAULT;
        return 0;
 }
 
-int vc_set_cflags(uint32_t id, void __user *data)
+int vc_set_cflags(struct vx_info *vxi, void __user *data)
 {
-       struct vx_info *vxi;
        struct vcmd_ctx_flags_v0 vc_data;
        uint64_t mask, trigger;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
        if (copy_from_user (&vc_data, data, sizeof(vc_data)))
                return -EFAULT;
 
-       vxi = lookup_vx_info(id);
-       if (!vxi)
-               return -ESRCH;
-
        /* special STATE flag handling */
-       mask = vx_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
+       mask = vs_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
        trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword);
 
        if (vxi == current->vx_info) {
                if (trigger & VXF_STATE_SETUP)
-                       vx_mask_bcaps(vxi, current);
+                       vx_mask_cap_bset(vxi, current);
                if (trigger & VXF_STATE_INIT) {
-                       vx_set_init(vxi, current);
-                       vx_set_reaper(vxi, current);
+                       int ret;
+
+                       ret = vx_set_init(vxi, current);
+                       if (ret)
+                               return ret;
+                       ret = vx_set_reaper(vxi, current);
+                       if (ret)
+                               return ret;
                }
        }
 
-       vxi->vx_flags = vx_mask_flags(vxi->vx_flags,
+       vxi->vx_flags = vs_mask_flags(vxi->vx_flags,
                vc_data.flagword, mask);
        if (trigger & VXF_PERSISTENT)
                vx_update_persistent(vxi);
 
-       put_vx_info(vxi);
        return 0;
 }
 
-int vc_get_ccaps(uint32_t id, void __user *data)
+static int do_get_caps(struct vx_info *vxi, uint64_t *bcaps, uint64_t *ccaps)
+{
+       if (bcaps)
+               *bcaps = vxi->vx_bcaps;
+       if (ccaps)
+               *ccaps = vxi->vx_ccaps;
+
+       return 0;
+}
+
+int vc_get_ccaps_v0(struct vx_info *vxi, void __user *data)
 {
-       struct vx_info *vxi;
        struct vcmd_ctx_caps_v0 vc_data;
+       int ret;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
+       ret = do_get_caps(vxi, &vc_data.bcaps, &vc_data.ccaps);
+       if (ret)
+               return ret;
+       vc_data.cmask = ~0UL;
 
-       vxi = lookup_vx_info(id);
-       if (!vxi)
-               return -ESRCH;
+       if (copy_to_user (data, &vc_data, sizeof(vc_data)))
+               return -EFAULT;
+       return 0;
+}
 
-       vc_data.bcaps = vxi->vx_bcaps;
-       vc_data.ccaps = vxi->vx_ccaps;
+int vc_get_ccaps(struct vx_info *vxi, void __user *data)
+{
+       struct vcmd_ctx_caps_v1 vc_data;
+       int ret;
+
+       ret = do_get_caps(vxi, NULL, &vc_data.ccaps);
+       if (ret)
+               return ret;
        vc_data.cmask = ~0UL;
-       put_vx_info(vxi);
 
        if (copy_to_user (data, &vc_data, sizeof(vc_data)))
                return -EFAULT;
        return 0;
 }
 
-int vc_set_ccaps(uint32_t id, void __user *data)
+static int do_set_caps(struct vx_info *vxi,
+       uint64_t bcaps, uint64_t bmask, uint64_t ccaps, uint64_t cmask)
+{
+       vxi->vx_bcaps = vs_mask_flags(vxi->vx_bcaps, bcaps, bmask);
+       vxi->vx_ccaps = vs_mask_flags(vxi->vx_ccaps, ccaps, cmask);
+
+       return 0;
+}
+
+int vc_set_ccaps_v0(struct vx_info *vxi, void __user *data)
 {
-       struct vx_info *vxi;
        struct vcmd_ctx_caps_v0 vc_data;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
        if (copy_from_user (&vc_data, data, sizeof(vc_data)))
                return -EFAULT;
 
-       vxi = lookup_vx_info(id);
-       if (!vxi)
-               return -ESRCH;
-
-       vxi->vx_bcaps &= vc_data.bcaps;
-       vxi->vx_ccaps = vx_mask_flags(vxi->vx_ccaps,
+       /* simulate old &= behaviour for bcaps */
+       return do_set_caps(vxi, 0, ~vc_data.bcaps,
                vc_data.ccaps, vc_data.cmask);
-       put_vx_info(vxi);
+}
+
+int vc_set_ccaps(struct vx_info *vxi, void __user *data)
+{
+       struct vcmd_ctx_caps_v1 vc_data;
+
+       if (copy_from_user (&vc_data, data, sizeof(vc_data)))
+               return -EFAULT;
+
+       return do_set_caps(vxi, 0, 0, vc_data.ccaps, vc_data.cmask);
+}
+
+int vc_get_bcaps(struct vx_info *vxi, void __user *data)
+{
+       struct vcmd_bcaps vc_data;
+       int ret;
+
+       ret = do_get_caps(vxi, &vc_data.bcaps, NULL);
+       if (ret)
+               return ret;
+       vc_data.bmask = ~0UL;
+
+       if (copy_to_user (data, &vc_data, sizeof(vc_data)))
+               return -EFAULT;
        return 0;
 }
 
+int vc_set_bcaps(struct vx_info *vxi, void __user *data)
+{
+       struct vcmd_bcaps vc_data;
+
+       if (copy_from_user (&vc_data, data, sizeof(vc_data)))
+               return -EFAULT;
+
+       return do_set_caps(vxi, vc_data.bcaps, vc_data.bmask, 0, 0);
+}
+
 #include <linux/module.h>
 
 EXPORT_SYMBOL_GPL(free_vx_info);