6b1c9bec0a20ec9c73c72ef19ccdf982c494365e
[linux-2.6.git] / kernel / vserver / context.c
1 /*
2  *  linux/kernel/vserver/context.c
3  *
4  *  Virtual Server: Context Support
5  *
6  *  Copyright (C) 2003-2005  Herbert Pƶtzl
7  *
8  *  V0.01  context helper
9  *  V0.02  vx_ctx_kill syscall command
10  *  V0.03  replaced context_info calls
11  *  V0.04  redesign of struct (de)alloc
12  *  V0.05  rlimit basic implementation
13  *  V0.06  task_xid and info commands
14  *  V0.07  context flags and caps
15  *  V0.08  switch to RCU based hash
16  *  V0.09  revert to non RCU for now
17  *  V0.10  and back to working RCU hash
18  *
19  */
20
21 #include <linux/config.h>
22 #include <linux/slab.h>
23 #include <linux/types.h>
24 #include <linux/namespace.h>
25
26 #include <linux/sched.h>
27 #include <linux/vserver/network.h>
28 #include <linux/vserver/legacy.h>
29 #include <linux/vserver/limit.h>
30 #include <linux/vserver/debug.h>
31 #include <linux/vs_context.h>
32 #include <linux/vserver/context_cmd.h>
33 #include <linux/ckrm_events.h> /* needed for ckrm_cb_xid() */
34
35 #include <asm/errno.h>
36
37 #include "cvirt_init.h"
38 #include "limit_init.h"
39 #include "sched_init.h"
40
41
42 /*      __alloc_vx_info()
43
44         * allocate an initialized vx_info struct
45         * doesn't make it visible (hash)                        */
46
47 static struct vx_info *__alloc_vx_info(xid_t xid)
48 {
49         struct vx_info *new = NULL;
50
51         vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
52
53         /* would this benefit from a slab cache? */
54         new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
55         if (!new)
56                 return 0;
57
58         memset (new, 0, sizeof(struct vx_info));
59         new->vx_id = xid;
60         INIT_RCU_HEAD(&new->vx_rcu);
61         INIT_HLIST_NODE(&new->vx_hlist);
62         atomic_set(&new->vx_refcnt, 0);
63         atomic_set(&new->vx_usecnt, 0);
64         new->vx_parent = NULL;
65         new->vx_state = 0;
66         new->vx_lock = SPIN_LOCK_UNLOCKED;
67         init_waitqueue_head(&new->vx_exit);
68
69         /* rest of init goes here */
70         vx_info_init_limit(&new->limit);
71         vx_info_init_sched(&new->sched);
72         vx_info_init_cvirt(&new->cvirt);
73         vx_info_init_cacct(&new->cacct);
74
75
76         new->vx_flags = VXF_STATE_SETUP|VXF_STATE_INIT;
77         new->vx_bcaps = CAP_INIT_EFF_SET;
78         new->vx_ccaps = 0;
79
80         vxdprintk(VXD_CBIT(xid, 0),
81                 "alloc_vx_info(%d) = %p", xid, new);
82         vxh_alloc_vx_info(new);
83         return new;
84 }
85
86 /*      __dealloc_vx_info()
87
88         * final disposal of vx_info                             */
89
90 static void __dealloc_vx_info(struct vx_info *vxi)
91 {
92         vxdprintk(VXD_CBIT(xid, 0),
93                 "dealloc_vx_info(%p)", vxi);
94         vxh_dealloc_vx_info(vxi);
95
96         vxi->vx_hlist.next = LIST_POISON1;
97         vxi->vx_id = -1;
98
99         vx_info_exit_limit(&vxi->limit);
100         vx_info_exit_sched(&vxi->sched);
101         vx_info_exit_cvirt(&vxi->cvirt);
102         vx_info_exit_cacct(&vxi->cacct);
103
104
105         BUG_ON(atomic_read(&vxi->vx_usecnt));
106         BUG_ON(atomic_read(&vxi->vx_refcnt));
107
108         BUG_ON(vx_info_state(vxi, VXS_HASHED));
109         // BUG_ON(!vx_state(vxi, VXS_DEFUNCT));
110
111         vxi->vx_state |= VXS_RELEASED;
112         kfree(vxi);
113 }
114
115 static inline int __free_vx_info(struct vx_info *vxi)
116 {
117         int usecnt, refcnt;
118
119         BUG_ON(!vxi);
120
121         usecnt = atomic_read(&vxi->vx_usecnt);
122         BUG_ON(usecnt < 0);
123
124         refcnt = atomic_read(&vxi->vx_refcnt);
125         BUG_ON(refcnt < 0);
126
127         if (!usecnt)
128                 __dealloc_vx_info(vxi);
129         return usecnt;
130 }
131
132 static void __rcu_put_vx_info(struct rcu_head *head)
133 {
134         struct vx_info *vxi = container_of(head, struct vx_info, vx_rcu);
135
136         vxdprintk(VXD_CBIT(xid, 3),
137                 "__rcu_put_vx_info(%p[#%d]): %d,%d",
138                 vxi, vxi->vx_id,
139                 atomic_read(&vxi->vx_usecnt),
140                 atomic_read(&vxi->vx_refcnt));
141         put_vx_info(vxi);
142 }
143
144 void __shutdown_vx_info(struct vx_info *vxi)
145 {
146         struct namespace *namespace;
147         struct fs_struct *fs;
148
149         might_sleep();
150
151         namespace = xchg(&vxi->vx_namespace, NULL);
152         if (namespace)
153                 put_namespace(namespace);
154
155         fs = xchg(&vxi->vx_fs, NULL);
156         if (fs)
157                 put_fs_struct(fs);
158 }
159
160 /* exported stuff */
161
162 void free_vx_info(struct vx_info *vxi)
163 {
164         /* context shutdown is mandatory */
165         // BUG_ON(vxi->vx_state != VXS_SHUTDOWN);
166
167         BUG_ON(vxi->vx_state & VXS_HASHED);
168
169         BUG_ON(vxi->vx_namespace);
170         BUG_ON(vxi->vx_fs);
171
172         BUG_ON(__free_vx_info(vxi));
173 }
174
175
176 /*      hash table for vx_info hash */
177
178 #define VX_HASH_SIZE    13
179
180 struct hlist_head vx_info_hash[VX_HASH_SIZE];
181
182 static spinlock_t vx_info_hash_lock = SPIN_LOCK_UNLOCKED;
183
184
185 static inline unsigned int __hashval(xid_t xid)
186 {
187         return (xid % VX_HASH_SIZE);
188 }
189
190
191
192 /*      __hash_vx_info()
193
194         * add the vxi to the global hash table
195         * requires the hash_lock to be held                     */
196
197 static inline void __hash_vx_info(struct vx_info *vxi)
198 {
199         struct hlist_head *head;
200
201         vxdprintk(VXD_CBIT(xid, 4),
202                 "__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
203         vxh_hash_vx_info(vxi);
204
205         get_vx_info(vxi);
206         vxi->vx_state |= VXS_HASHED;
207         head = &vx_info_hash[__hashval(vxi->vx_id)];
208         hlist_add_head_rcu(&vxi->vx_hlist, head);
209 }
210
211 /*      __unhash_vx_info()
212
213         * remove the vxi from the global hash table
214         * requires the hash_lock to be held                     */
215
216 static inline void __unhash_vx_info(struct vx_info *vxi)
217 {
218         vxdprintk(VXD_CBIT(xid, 4),
219                 "__unhash_vx_info: %p[#%d]", vxi, vxi->vx_id);
220         vxh_unhash_vx_info(vxi);
221
222         vxi->vx_state &= ~VXS_HASHED;
223         hlist_del_rcu(&vxi->vx_hlist);
224
225         call_rcu(&vxi->vx_rcu, __rcu_put_vx_info);
226 }
227
228
229 /*      __lookup_vx_info()
230
231         * requires the rcu_read_lock()
232         * doesn't increment the vx_refcnt                       */
233
234 static inline struct vx_info *__lookup_vx_info(xid_t xid)
235 {
236         struct hlist_head *head = &vx_info_hash[__hashval(xid)];
237         struct hlist_node *pos;
238         struct vx_info *vxi;
239
240         hlist_for_each_rcu(pos, head) {
241                 vxi = hlist_entry(pos, struct vx_info, vx_hlist);
242
243                 if ((vxi->vx_id == xid) &&
244                         vx_info_state(vxi, VXS_HASHED))
245                         goto found;
246         }
247         vxi = NULL;
248 found:
249         vxdprintk(VXD_CBIT(xid, 0),
250                 "__lookup_vx_info(#%u): %p[#%u]",
251                 xid, vxi, vxi?vxi->vx_id:0);
252         vxh_lookup_vx_info(xid, vxi);
253         return vxi;
254 }
255
256
257 /*      __vx_dynamic_id()
258
259         * find unused dynamic xid
260         * requires the rcu_read_lock()
261         * requires the hash_lock to be held                     */
262
263 static inline xid_t __vx_dynamic_id(void)
264 {
265         static xid_t seq = MAX_S_CONTEXT;
266         xid_t barrier = seq;
267
268         do {
269                 if (++seq > MAX_S_CONTEXT)
270                         seq = MIN_D_CONTEXT;
271                 if (!__lookup_vx_info(seq)) {
272                         vxdprintk(VXD_CBIT(xid, 4),
273                                 "__vx_dynamic_id: [#%d]", seq);
274                         return seq;
275                 }
276         } while (barrier != seq);
277         return 0;
278 }
279
280 /*      __loc_vx_info()
281
282         * locate or create the requested context
283         * get() it and if new hash it                           */
284
285 static struct vx_info * __loc_vx_info(int id, int *err)
286 {
287         struct vx_info *new, *vxi = NULL;
288
289         vxdprintk(VXD_CBIT(xid, 1), "loc_vx_info(%d)*", id);
290
291         if (!(new = __alloc_vx_info(id))) {
292                 *err = -ENOMEM;
293                 return NULL;
294         }
295
296         /* FIXME is this required at all ? */
297         rcu_read_lock();
298         /* required to make dynamic xids unique */
299         spin_lock(&vx_info_hash_lock);
300
301         /* dynamic context requested */
302         if (id == VX_DYNAMIC_ID) {
303                 id = __vx_dynamic_id();
304                 if (!id) {
305                         printk(KERN_ERR "no dynamic context available.\n");
306                         goto out_unlock;
307                 }
308                 new->vx_id = id;
309         }
310         /* existing context requested */
311         else if ((vxi = __lookup_vx_info(id))) {
312                 /* context in setup is not available */
313                 if (vxi->vx_flags & VXF_STATE_SETUP) {
314                         vxdprintk(VXD_CBIT(xid, 0),
315                                 "loc_vx_info(%d) = %p (not available)", id, vxi);
316                         vxi = NULL;
317                         *err = -EBUSY;
318                 } else {
319                         vxdprintk(VXD_CBIT(xid, 0),
320                                 "loc_vx_info(%d) = %p (found)", id, vxi);
321                         get_vx_info(vxi);
322                         *err = 0;
323                 }
324                 goto out_unlock;
325         }
326
327         /* new context requested */
328         vxdprintk(VXD_CBIT(xid, 0),
329                 "loc_vx_info(%d) = %p (new)", id, new);
330         __hash_vx_info(get_vx_info(new));
331         vxi = new, new = NULL;
332         *err = 1;
333
334 out_unlock:
335         spin_unlock(&vx_info_hash_lock);
336         rcu_read_unlock();
337         vxh_loc_vx_info(id, vxi);
338         if (new)
339                 __dealloc_vx_info(new);
340         return vxi;
341 }
342
343
344
345 /*      exported stuff                                          */
346
347
348 void unhash_vx_info(struct vx_info *vxi)
349 {
350         __shutdown_vx_info(vxi);
351         spin_lock(&vx_info_hash_lock);
352         __unhash_vx_info(vxi);
353         spin_unlock(&vx_info_hash_lock);
354 }
355
356 /*      locate_vx_info()
357
358         * search for a vx_info and get() it
359         * negative id means current                             */
360
361 struct vx_info *locate_vx_info(int id)
362 {
363         struct vx_info *vxi;
364
365         if (id < 0) {
366                 vxi = get_vx_info(current->vx_info);
367         } else {
368                 rcu_read_lock();
369                 vxi = get_vx_info(__lookup_vx_info(id));
370                 rcu_read_unlock();
371         }
372         return vxi;
373 }
374
375 /*      vx_info_is_hashed()
376
377         * verify that xid is still hashed                       */
378
379 int vx_info_is_hashed(xid_t xid)
380 {
381         int hashed;
382
383         rcu_read_lock();
384         hashed = (__lookup_vx_info(xid) != NULL);
385         rcu_read_unlock();
386         return hashed;
387 }
388
389 #ifdef  CONFIG_VSERVER_LEGACY
390
391 #if 0
392 struct vx_info *alloc_vx_info(xid_t xid)
393 {
394         return __alloc_vx_info(xid);
395 }
396 #endif
397
398 struct vx_info *locate_or_create_vx_info(int id)
399 {
400         int err;
401
402         return __loc_vx_info(id, &err);
403 }
404
405 #endif
406
407 #ifdef  CONFIG_PROC_FS
408
409 int get_xid_list(int index, unsigned int *xids, int size)
410 {
411         int hindex, nr_xids = 0;
412
413         rcu_read_lock();
414         for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) {
415                 struct hlist_head *head = &vx_info_hash[hindex];
416                 struct hlist_node *pos;
417
418                 hlist_for_each_rcu(pos, head) {
419                         struct vx_info *vxi;
420
421                         if (--index > 0)
422                                 continue;
423
424                         vxi = hlist_entry(pos, struct vx_info, vx_hlist);
425                         xids[nr_xids] = vxi->vx_id;
426                         if (++nr_xids >= size)
427                                 goto out;
428                 }
429         }
430 out:
431         rcu_read_unlock();
432         return nr_xids;
433 }
434 #endif
435
436 int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
437 {
438         struct user_struct *new_user, *old_user;
439
440         if (!p || !vxi)
441                 BUG();
442         new_user = alloc_uid(vxi->vx_id, p->uid);
443         if (!new_user)
444                 return -ENOMEM;
445
446         old_user = p->user;
447         if (new_user != old_user) {
448                 atomic_inc(&new_user->processes);
449                 atomic_dec(&old_user->processes);
450                 p->user = new_user;
451         }
452         free_uid(old_user);
453         return 0;
454 }
455
456 void vx_mask_bcaps(struct task_struct *p)
457 {
458         struct vx_info *vxi = p->vx_info;
459
460         p->cap_effective &= vxi->vx_bcaps;
461         p->cap_inheritable &= vxi->vx_bcaps;
462         p->cap_permitted &= vxi->vx_bcaps;
463 }
464
465
466 #include <linux/file.h>
467
468 static inline int vx_nofiles_task(struct task_struct *tsk)
469 {
470         struct files_struct *files = tsk->files;
471         unsigned long *obptr;
472         int count, total;
473
474         spin_lock(&files->file_lock);
475         obptr = files->open_fds->fds_bits;
476         count = files->max_fds / (sizeof(unsigned long) * 8);
477         for (total = 0; count > 0; count--) {
478                 if (*obptr)
479                         total += hweight_long(*obptr);
480                 obptr++;
481         }
482         spin_unlock(&files->file_lock);
483         return total;
484 }
485
486 #if 0
487
488 static inline int vx_openfd_task(struct task_struct *tsk)
489 {
490         struct files_struct *files = tsk->files;
491         const unsigned long *bptr;
492         int count, total;
493
494         spin_lock(&files->file_lock);
495         bptr = files->open_fds->fds_bits;
496         count = files->max_fds / (sizeof(unsigned long) * 8);
497         for (total = 0; count > 0; count--) {
498                 if (*bptr)
499                         total += hweight_long(*bptr);
500                 bptr++;
501         }
502         spin_unlock(&files->file_lock);
503         return total;
504 }
505
506 #endif
507
508 /*
509  *      migrate task to new context
510  *      gets vxi, puts old_vxi on change
511  */
512
513 int vx_migrate_task(struct task_struct *p, struct vx_info *vxi)
514 {
515         struct vx_info *old_vxi;
516         int ret = 0;
517
518         if (!p || !vxi)
519                 BUG();
520
521         old_vxi = task_get_vx_info(p);
522         if (old_vxi == vxi)
523                 goto out;
524
525         vxdprintk(VXD_CBIT(xid, 5),
526                 "vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
527                 vxi->vx_id, atomic_read(&vxi->vx_usecnt));
528
529         if (!(ret = vx_migrate_user(p, vxi))) {
530                 int nofiles;
531
532                 task_lock(p);
533                 // openfd = vx_openfd_task(p);
534                 nofiles = vx_nofiles_task(p);
535
536                 if (old_vxi) {
537                         atomic_dec(&old_vxi->cvirt.nr_threads);
538                         atomic_dec(&old_vxi->cvirt.nr_running);
539                         atomic_dec(&old_vxi->limit.rcur[RLIMIT_NPROC]);
540                         /* FIXME: what about the struct files here? */
541                         // atomic_sub(nofiles, &old_vxi->limit.rcur[RLIMIT_NOFILE]);
542                         // atomic_sub(openfd, &old_vxi->limit.rcur[RLIMIT_OPENFD]);
543                 }
544                 atomic_inc(&vxi->cvirt.nr_threads);
545                 atomic_inc(&vxi->cvirt.nr_running);
546                 atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
547                 /* FIXME: what about the struct files here? */
548                 // atomic_add(nofiles, &vxi->limit.rcur[RLIMIT_NOFILE]);
549                 // atomic_add(openfd, &vxi->limit.rcur[RLIMIT_OPENFD]);
550
551                 vxdprintk(VXD_CBIT(xid, 5),
552                         "moved task %p into vxi:%p[#%d]",
553                         p, vxi, vxi->vx_id);
554
555                 /* should be handled in set_vx_info !! */
556                 if (old_vxi)
557                         clr_vx_info(&p->vx_info);
558                 set_vx_info(&p->vx_info, vxi);
559                 p->xid = vxi->vx_id;
560                 vx_mask_bcaps(p);
561                 task_unlock(p);
562
563                 /* obsoleted by clr/set */
564                 // put_vx_info(old_vxi);
565         }
566 out:
567
568
569         ckrm_cb_xid(p);
570
571         put_vx_info(old_vxi);
572         return ret;
573 }
574
575 int vx_set_init(struct vx_info *vxi, struct task_struct *p)
576 {
577         if (!vxi)
578                 return -EINVAL;
579         if (vxi->vx_initpid)
580                 return -EPERM;
581
582         vxdprintk(VXD_CBIT(xid, 6),
583                 "vx_set_init(%p[#%d],%p[#%d,%d,%d])",
584                 vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
585
586         vxi->vx_initpid = p->tgid;
587         return 0;
588 }
589
590
591 /* vserver syscall commands below here */
592
593 /* taks xid and vx_info functions */
594
595 #include <asm/uaccess.h>
596
597
598 int vc_task_xid(uint32_t id, void __user *data)
599 {
600         xid_t xid;
601
602         if (id) {
603                 struct task_struct *tsk;
604
605                 if (!vx_check(0, VX_ADMIN|VX_WATCH))
606                         return -EPERM;
607
608                 read_lock(&tasklist_lock);
609                 tsk = find_task_by_real_pid(id);
610                 xid = (tsk) ? tsk->xid : -ESRCH;
611                 read_unlock(&tasklist_lock);
612         }
613         else
614                 xid = vx_current_xid();
615         return xid;
616 }
617
618
619 int vc_vx_info(uint32_t id, void __user *data)
620 {
621         struct vx_info *vxi;
622         struct vcmd_vx_info_v0 vc_data;
623
624         if (!vx_check(0, VX_ADMIN))
625                 return -ENOSYS;
626         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RESOURCE))
627                 return -EPERM;
628
629         vxi = locate_vx_info(id);
630         if (!vxi)
631                 return -ESRCH;
632
633         vc_data.xid = vxi->vx_id;
634         vc_data.initpid = vxi->vx_initpid;
635         put_vx_info(vxi);
636
637         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
638                 return -EFAULT;
639         return 0;
640 }
641
642
643 /* context functions */
644
645 int vc_ctx_create(uint32_t xid, void __user *data)
646 {
647         struct vx_info *new_vxi;
648         int ret;
649
650         if (!capable(CAP_SYS_ADMIN))
651                 return -EPERM;
652
653         if ((xid >= MIN_D_CONTEXT) && (xid != VX_DYNAMIC_ID))
654                 return -EINVAL;
655
656         if (xid < 1)
657                 return -EINVAL;
658
659         new_vxi = __loc_vx_info(xid, &ret);
660         if (!new_vxi)
661                 return ret;
662         if (!(new_vxi->vx_flags & VXF_STATE_SETUP)) {
663                 ret = -EEXIST;
664                 goto out_put;
665         }
666
667         ret = new_vxi->vx_id;
668         vx_migrate_task(current, new_vxi);
669         /* if this fails, we might end up with a hashed vx_info */
670 out_put:
671         put_vx_info(new_vxi);
672         return ret;
673 }
674
675
676 int vc_ctx_migrate(uint32_t id, void __user *data)
677 {
678         struct vx_info *vxi;
679
680         if (!capable(CAP_SYS_ADMIN))
681                 return -EPERM;
682
683         /* dirty hack until Spectator becomes a cap */
684         if (id == 1) {
685                 current->xid = 1;
686                 return 0;
687         }
688
689         vxi = locate_vx_info(id);
690         if (!vxi)
691                 return -ESRCH;
692         vx_migrate_task(current, vxi);
693         put_vx_info(vxi);
694         return 0;
695 }
696
697
698 int vc_get_cflags(uint32_t id, void __user *data)
699 {
700         struct vx_info *vxi;
701         struct vcmd_ctx_flags_v0 vc_data;
702
703         if (!capable(CAP_SYS_ADMIN))
704                 return -EPERM;
705
706         vxi = locate_vx_info(id);
707         if (!vxi)
708                 return -ESRCH;
709
710         vc_data.flagword = vxi->vx_flags;
711
712         /* special STATE flag handling */
713         vc_data.mask = vx_mask_flags(~0UL, vxi->vx_flags, VXF_ONE_TIME);
714
715         put_vx_info(vxi);
716
717         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
718                 return -EFAULT;
719         return 0;
720 }
721
722 int vc_set_cflags(uint32_t id, void __user *data)
723 {
724         struct vx_info *vxi;
725         struct vcmd_ctx_flags_v0 vc_data;
726         uint64_t mask, trigger;
727
728         if (!capable(CAP_SYS_ADMIN))
729                 return -EPERM;
730         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
731                 return -EFAULT;
732
733         vxi = locate_vx_info(id);
734         if (!vxi)
735                 return -ESRCH;
736
737         /* special STATE flag handling */
738         mask = vx_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
739         trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword);
740
741         if (trigger & VXF_STATE_SETUP)
742                 vx_mask_bcaps(current);
743         if (trigger & VXF_STATE_INIT)
744                 if (vxi == current->vx_info)
745                         vx_set_init(vxi, current);
746
747         vxi->vx_flags = vx_mask_flags(vxi->vx_flags,
748                 vc_data.flagword, mask);
749         put_vx_info(vxi);
750         return 0;
751 }
752
753 int vc_get_ccaps(uint32_t id, void __user *data)
754 {
755         struct vx_info *vxi;
756         struct vcmd_ctx_caps_v0 vc_data;
757
758         if (!capable(CAP_SYS_ADMIN))
759                 return -EPERM;
760
761         vxi = locate_vx_info(id);
762         if (!vxi)
763                 return -ESRCH;
764
765         vc_data.bcaps = vxi->vx_bcaps;
766         vc_data.ccaps = vxi->vx_ccaps;
767         vc_data.cmask = ~0UL;
768         put_vx_info(vxi);
769
770         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
771                 return -EFAULT;
772         return 0;
773 }
774
775 int vc_set_ccaps(uint32_t id, void __user *data)
776 {
777         struct vx_info *vxi;
778         struct vcmd_ctx_caps_v0 vc_data;
779
780         if (!capable(CAP_SYS_ADMIN))
781                 return -EPERM;
782         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
783                 return -EFAULT;
784
785         vxi = locate_vx_info(id);
786         if (!vxi)
787                 return -ESRCH;
788
789         vxi->vx_bcaps &= vc_data.bcaps;
790         vxi->vx_ccaps = vx_mask_flags(vxi->vx_ccaps,
791                 vc_data.ccaps, vc_data.cmask);
792         put_vx_info(vxi);
793         return 0;
794 }
795
796 #include <linux/module.h>
797
798 EXPORT_SYMBOL_GPL(free_vx_info);
799 EXPORT_SYMBOL_GPL(unhash_vx_info);
800