edcfebbbd6eeb9445999164a3edc074332051ab9
[linux-2.6.git] / kernel / vserver / context.c
1 /*
2  *  linux/kernel/vserver/context.c
3  *
4  *  Virtual Server: Context Support
5  *
6  *  Copyright (C) 2003-2005  Herbert Pƶtzl
7  *
8  *  V0.01  context helper
9  *  V0.02  vx_ctx_kill syscall command
10  *  V0.03  replaced context_info calls
11  *  V0.04  redesign of struct (de)alloc
12  *  V0.05  rlimit basic implementation
13  *  V0.06  task_xid and info commands
14  *  V0.07  context flags and caps
15  *  V0.08  switch to RCU based hash
16  *  V0.09  revert to non RCU for now
17  *  V0.10  and back to working RCU hash
18  *  V0.11  and back to locking again
19  *
20  */
21
22 #include <linux/config.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/namespace.h>
26
27 #include <linux/sched.h>
28 #include <linux/vserver/network.h>
29 #include <linux/vserver/legacy.h>
30 #include <linux/vserver/limit.h>
31 #include <linux/vserver/debug.h>
32
33 #include <linux/vs_context.h>
34 #include <linux/vserver/context_cmd.h>
35
36 #include <linux/err.h>
37 #include <asm/errno.h>
38
39 #include "cvirt_init.h"
40 #include "limit_init.h"
41 #include "sched_init.h"
42
43
44 /*      __alloc_vx_info()
45
46         * allocate an initialized vx_info struct
47         * doesn't make it visible (hash)                        */
48
49 static struct vx_info *__alloc_vx_info(xid_t xid)
50 {
51         struct vx_info *new = NULL;
52
53         vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
54
55         /* would this benefit from a slab cache? */
56         new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
57         if (!new)
58                 return 0;
59
60         memset (new, 0, sizeof(struct vx_info));
61         new->vx_id = xid;
62         // INIT_RCU_HEAD(&new->vx_rcu);
63         INIT_HLIST_NODE(&new->vx_hlist);
64         atomic_set(&new->vx_usecnt, 0);
65         atomic_set(&new->vx_tasks, 0);
66         new->vx_parent = NULL;
67         new->vx_state = 0;
68         new->vx_lock = SPIN_LOCK_UNLOCKED;
69         init_waitqueue_head(&new->vx_exit);
70
71         /* rest of init goes here */
72         vx_info_init_limit(&new->limit);
73         vx_info_init_sched(&new->sched);
74         vx_info_init_cvirt(&new->cvirt);
75         vx_info_init_cacct(&new->cacct);
76
77
78         new->vx_flags = VXF_STATE_SETUP|VXF_STATE_INIT;
79         new->vx_bcaps = CAP_INIT_EFF_SET;
80         new->vx_ccaps = 0;
81
82         vxdprintk(VXD_CBIT(xid, 0),
83                 "alloc_vx_info(%d) = %p", xid, new);
84         vxh_alloc_vx_info(new);
85         return new;
86 }
87
88 /*      __dealloc_vx_info()
89
90         * final disposal of vx_info                             */
91
92 static void __dealloc_vx_info(struct vx_info *vxi)
93 {
94         vxdprintk(VXD_CBIT(xid, 0),
95                 "dealloc_vx_info(%p)", vxi);
96         vxh_dealloc_vx_info(vxi);
97
98         vxi->vx_hlist.next = LIST_POISON1;
99         vxi->vx_id = -1;
100
101         vx_info_exit_limit(&vxi->limit);
102         vx_info_exit_sched(&vxi->sched);
103         vx_info_exit_cvirt(&vxi->cvirt);
104         vx_info_exit_cacct(&vxi->cacct);
105
106         vxi->vx_state |= VXS_RELEASED;
107         kfree(vxi);
108 }
109
110 void __shutdown_vx_info(struct vx_info *vxi)
111 {
112         struct namespace *namespace;
113         struct fs_struct *fs;
114
115         might_sleep();
116
117         namespace = xchg(&vxi->vx_namespace, NULL);
118         if (namespace)
119                 put_namespace(namespace);
120
121         fs = xchg(&vxi->vx_fs, NULL);
122         if (fs)
123                 put_fs_struct(fs);
124 }
125
126 /* exported stuff */
127
128 void free_vx_info(struct vx_info *vxi)
129 {
130         /* context shutdown is mandatory */
131         // BUG_ON(vxi->vx_state != VXS_SHUTDOWN);
132
133         BUG_ON(atomic_read(&vxi->vx_usecnt));
134         BUG_ON(atomic_read(&vxi->vx_tasks));
135
136         BUG_ON(vx_info_state(vxi, VXS_HASHED));
137         // BUG_ON(!vx_state(vxi, VXS_DEFUNCT));
138
139         BUG_ON(vxi->vx_namespace);
140         BUG_ON(vxi->vx_fs);
141
142         __dealloc_vx_info(vxi);
143 }
144
145
146 /*      hash table for vx_info hash */
147
148 #define VX_HASH_SIZE    13
149
150 struct hlist_head vx_info_hash[VX_HASH_SIZE];
151
152 static spinlock_t vx_info_hash_lock = SPIN_LOCK_UNLOCKED;
153
154
155 static inline unsigned int __hashval(xid_t xid)
156 {
157         return (xid % VX_HASH_SIZE);
158 }
159
160
161
162 /*      __hash_vx_info()
163
164         * add the vxi to the global hash table
165         * requires the hash_lock to be held                     */
166
167 static inline void __hash_vx_info(struct vx_info *vxi)
168 {
169         struct hlist_head *head;
170
171         vxd_assert_lock(&vx_info_hash_lock);
172         vxdprintk(VXD_CBIT(xid, 4),
173                 "__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
174         vxh_hash_vx_info(vxi);
175
176         /* context must not be hashed */
177         BUG_ON(vxi->vx_state & VXS_HASHED);
178
179         get_vx_info(vxi);
180         vxi->vx_state |= VXS_HASHED;
181         head = &vx_info_hash[__hashval(vxi->vx_id)];
182         hlist_add_head(&vxi->vx_hlist, head);
183 }
184
185 /*      __unhash_vx_info()
186
187         * remove the vxi from the global hash table
188         * requires the hash_lock to be held                     */
189
190 static inline void __unhash_vx_info(struct vx_info *vxi)
191 {
192         vxd_assert_lock(&vx_info_hash_lock);
193         vxdprintk(VXD_CBIT(xid, 4),
194                 "__unhash_vx_info: %p[#%d]", vxi, vxi->vx_id);
195         vxh_unhash_vx_info(vxi);
196
197         /* maybe warn on that? */
198         if (!(vxi->vx_state & VXS_HASHED))
199                 return;
200
201         vxi->vx_state &= ~VXS_HASHED;
202         hlist_del(&vxi->vx_hlist);
203         put_vx_info(vxi);
204 }
205
206
207 /*      __lookup_vx_info()
208
209         * requires the hash_lock to be held
210         * doesn't increment the vx_refcnt                       */
211
212 static inline struct vx_info *__lookup_vx_info(xid_t xid)
213 {
214         struct hlist_head *head = &vx_info_hash[__hashval(xid)];
215         struct hlist_node *pos;
216         struct vx_info *vxi;
217
218         vxd_assert_lock(&vx_info_hash_lock);
219         hlist_for_each(pos, head) {
220                 vxi = hlist_entry(pos, struct vx_info, vx_hlist);
221
222                 if (vxi->vx_id == xid)
223                         goto found;
224         }
225         vxi = NULL;
226 found:
227         vxdprintk(VXD_CBIT(xid, 0),
228                 "__lookup_vx_info(#%u): %p[#%u]",
229                 xid, vxi, vxi?vxi->vx_id:0);
230         vxh_lookup_vx_info(xid, vxi);
231         return vxi;
232 }
233
234
235 /*      __vx_dynamic_id()
236
237         * find unused dynamic xid
238         * requires the hash_lock to be held                     */
239
240 static inline xid_t __vx_dynamic_id(void)
241 {
242         static xid_t seq = MAX_S_CONTEXT;
243         xid_t barrier = seq;
244
245         vxd_assert_lock(&vx_info_hash_lock);
246         do {
247                 if (++seq > MAX_S_CONTEXT)
248                         seq = MIN_D_CONTEXT;
249                 if (!__lookup_vx_info(seq)) {
250                         vxdprintk(VXD_CBIT(xid, 4),
251                                 "__vx_dynamic_id: [#%d]", seq);
252                         return seq;
253                 }
254         } while (barrier != seq);
255         return 0;
256 }
257
258 #ifdef  CONFIG_VSERVER_LEGACY
259
260 /*      __loc_vx_info()
261
262         * locate or create the requested context
263         * get() it and if new hash it                           */
264
265 static struct vx_info * __loc_vx_info(int id, int *err)
266 {
267         struct vx_info *new, *vxi = NULL;
268
269         vxdprintk(VXD_CBIT(xid, 1), "loc_vx_info(%d)*", id);
270
271         if (!(new = __alloc_vx_info(id))) {
272                 *err = -ENOMEM;
273                 return NULL;
274         }
275
276         /* required to make dynamic xids unique */
277         spin_lock(&vx_info_hash_lock);
278
279         /* dynamic context requested */
280         if (id == VX_DYNAMIC_ID) {
281                 id = __vx_dynamic_id();
282                 if (!id) {
283                         printk(KERN_ERR "no dynamic context available.\n");
284                         goto out_unlock;
285                 }
286                 new->vx_id = id;
287         }
288         /* existing context requested */
289         else if ((vxi = __lookup_vx_info(id))) {
290                 /* context in setup is not available */
291                 if (vxi->vx_flags & VXF_STATE_SETUP) {
292                         vxdprintk(VXD_CBIT(xid, 0),
293                                 "loc_vx_info(%d) = %p (not available)", id, vxi);
294                         vxi = NULL;
295                         *err = -EBUSY;
296                 } else {
297                         vxdprintk(VXD_CBIT(xid, 0),
298                                 "loc_vx_info(%d) = %p (found)", id, vxi);
299                         get_vx_info(vxi);
300                         *err = 0;
301                 }
302                 goto out_unlock;
303         }
304
305         /* new context requested */
306         vxdprintk(VXD_CBIT(xid, 0),
307                 "loc_vx_info(%d) = %p (new)", id, new);
308         __hash_vx_info(get_vx_info(new));
309         vxi = new, new = NULL;
310         *err = 1;
311
312 out_unlock:
313         spin_unlock(&vx_info_hash_lock);
314         vxh_loc_vx_info(id, vxi);
315         if (new)
316                 __dealloc_vx_info(new);
317         return vxi;
318 }
319
320 #endif
321
322 /*      __create_vx_info()
323
324         * create the requested context
325         * get() it and hash it                                  */
326
327 static struct vx_info * __create_vx_info(int id)
328 {
329         struct vx_info *new, *vxi = NULL;
330
331         vxdprintk(VXD_CBIT(xid, 1), "create_vx_info(%d)*", id);
332
333         if (!(new = __alloc_vx_info(id))) {
334                 return ERR_PTR(-ENOMEM);
335         }
336
337         /* required to make dynamic xids unique */
338         spin_lock(&vx_info_hash_lock);
339
340         /* dynamic context requested */
341         if (id == VX_DYNAMIC_ID) {
342                 id = __vx_dynamic_id();
343                 if (!id) {
344                         printk(KERN_ERR "no dynamic context available.\n");
345                         vxi = ERR_PTR(-EAGAIN);
346                         goto out_unlock;
347                 }
348                 new->vx_id = id;
349         }
350         /* existing context requested */
351         else if ((vxi = __lookup_vx_info(id))) {
352                 vxdprintk(VXD_CBIT(xid, 0),
353                         "create_vx_info(%d) = %p (already there)", id, vxi);
354                 if (vx_info_flags(vxi, VXF_STATE_SETUP, 0))
355                         vxi = ERR_PTR(-EBUSY);
356                 else
357                         vxi = ERR_PTR(-EEXIST);
358                 goto out_unlock;
359         }
360         /* dynamic xid creation blocker */
361         else if (id >= MIN_D_CONTEXT) {
362                 vxdprintk(VXD_CBIT(xid, 0),
363                         "create_vx_info(%d) (dynamic rejected)", id);
364                 vxi = ERR_PTR(-EINVAL);
365                 goto out_unlock;
366         }
367
368         /* new context requested */
369         vxdprintk(VXD_CBIT(xid, 0),
370                 "create_vx_info(%d) = %p (new)", id, new);
371         __hash_vx_info(get_vx_info(new));
372         vxi = new, new = NULL;
373
374 out_unlock:
375         spin_unlock(&vx_info_hash_lock);
376         vxh_create_vx_info(id, IS_ERR(vxi)?NULL:vxi);
377         if (new)
378                 __dealloc_vx_info(new);
379         return vxi;
380 }
381
382
383 /*      exported stuff                                          */
384
385
386 void unhash_vx_info(struct vx_info *vxi)
387 {
388         __shutdown_vx_info(vxi);
389         spin_lock(&vx_info_hash_lock);
390         __unhash_vx_info(vxi);
391         spin_unlock(&vx_info_hash_lock);
392 }
393
394
395 /*      locate_vx_info()
396
397         * search for a vx_info and get() it
398         * negative id means current                             */
399
400 struct vx_info *locate_vx_info(int id)
401 {
402         struct vx_info *vxi = NULL;
403
404         if (id < 0) {
405                 vxi = get_vx_info(current->vx_info);
406         } else if (id > 1) {
407                 spin_lock(&vx_info_hash_lock);
408                 vxi = get_vx_info(__lookup_vx_info(id));
409                 spin_unlock(&vx_info_hash_lock);
410         }
411         return vxi;
412 }
413
414 /*      xid_is_hashed()
415
416         * verify that xid is still hashed                       */
417
418 int xid_is_hashed(xid_t xid)
419 {
420         int hashed;
421
422         spin_lock(&vx_info_hash_lock);
423         hashed = (__lookup_vx_info(xid) != NULL);
424         spin_unlock(&vx_info_hash_lock);
425         return hashed;
426 }
427
428 #ifdef  CONFIG_VSERVER_LEGACY
429
430 struct vx_info *locate_or_create_vx_info(int id)
431 {
432         int err;
433
434         return __loc_vx_info(id, &err);
435 }
436
437 #endif
438
439 #ifdef  CONFIG_PROC_FS
440
441 int get_xid_list(int index, unsigned int *xids, int size)
442 {
443         int hindex, nr_xids = 0;
444
445         for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) {
446                 struct hlist_head *head = &vx_info_hash[hindex];
447                 struct hlist_node *pos;
448
449                 spin_lock(&vx_info_hash_lock);
450                 hlist_for_each(pos, head) {
451                         struct vx_info *vxi;
452
453                         if (--index > 0)
454                                 continue;
455
456                         vxi = hlist_entry(pos, struct vx_info, vx_hlist);
457                         xids[nr_xids] = vxi->vx_id;
458                         if (++nr_xids >= size) {
459                                 spin_unlock(&vx_info_hash_lock);
460                                 goto out;
461                         }
462                 }
463                 /* keep the lock time short */
464                 spin_unlock(&vx_info_hash_lock);
465         }
466 out:
467         return nr_xids;
468 }
469 #endif
470
471 int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
472 {
473         struct user_struct *new_user, *old_user;
474
475         if (!p || !vxi)
476                 BUG();
477         new_user = alloc_uid(vxi->vx_id, p->uid);
478         if (!new_user)
479                 return -ENOMEM;
480
481         old_user = p->user;
482         if (new_user != old_user) {
483                 atomic_inc(&new_user->processes);
484                 atomic_dec(&old_user->processes);
485                 p->user = new_user;
486         }
487         free_uid(old_user);
488         return 0;
489 }
490
491 void vx_mask_bcaps(struct task_struct *p)
492 {
493         struct vx_info *vxi = p->vx_info;
494
495         p->cap_effective &= vxi->vx_bcaps;
496         p->cap_inheritable &= vxi->vx_bcaps;
497         p->cap_permitted &= vxi->vx_bcaps;
498 }
499
500
501 #include <linux/file.h>
502
503 static int vx_openfd_task(struct task_struct *tsk)
504 {
505         struct files_struct *files = tsk->files;
506         const unsigned long *bptr;
507         int count, total;
508
509         spin_lock(&files->file_lock);
510         bptr = files->open_fds->fds_bits;
511         count = files->max_fds / (sizeof(unsigned long) * 8);
512         for (total = 0; count > 0; count--) {
513                 if (*bptr)
514                         total += hweight_long(*bptr);
515                 bptr++;
516         }
517         spin_unlock(&files->file_lock);
518         return total;
519 }
520
521 /*
522  *      migrate task to new context
523  *      gets vxi, puts old_vxi on change
524  */
525
526 int vx_migrate_task(struct task_struct *p, struct vx_info *vxi)
527 {
528         struct vx_info *old_vxi;
529         int ret = 0;
530
531         if (!p || !vxi)
532                 BUG();
533
534         old_vxi = task_get_vx_info(p);
535         if (old_vxi == vxi)
536                 goto out;
537
538         vxdprintk(VXD_CBIT(xid, 5),
539                 "vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
540                 vxi->vx_id, atomic_read(&vxi->vx_usecnt));
541
542         if (!(ret = vx_migrate_user(p, vxi))) {
543                 int openfd;
544
545                 task_lock(p);
546                 openfd = vx_openfd_task(p);
547
548                 if (old_vxi) {
549                         atomic_dec(&old_vxi->cvirt.nr_threads);
550                         atomic_dec(&old_vxi->cvirt.nr_running);
551                         atomic_dec(&old_vxi->limit.rcur[RLIMIT_NPROC]);
552                         /* FIXME: what about the struct files here? */
553                         atomic_sub(openfd, &old_vxi->limit.rcur[VLIMIT_OPENFD]);
554                 }
555                 atomic_inc(&vxi->cvirt.nr_threads);
556                 atomic_inc(&vxi->cvirt.nr_running);
557                 atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
558                 /* FIXME: what about the struct files here? */
559                 atomic_add(openfd, &vxi->limit.rcur[VLIMIT_OPENFD]);
560
561                 if (old_vxi) {
562                         release_vx_info(old_vxi, p);
563                         clr_vx_info(&p->vx_info);
564                 }
565                 claim_vx_info(vxi, p);
566                 set_vx_info(&p->vx_info, vxi);
567                 p->xid = vxi->vx_id;
568
569                 vxdprintk(VXD_CBIT(xid, 5),
570                         "moved task %p into vxi:%p[#%d]",
571                         p, vxi, vxi->vx_id);
572
573                 vx_mask_bcaps(p);
574                 task_unlock(p);
575         }
576 out:
577         put_vx_info(old_vxi);
578         return ret;
579 }
580
581 int vx_set_init(struct vx_info *vxi, struct task_struct *p)
582 {
583         if (!vxi)
584                 return -EINVAL;
585         if (vxi->vx_initpid)
586                 return -EPERM;
587
588         vxdprintk(VXD_CBIT(xid, 6),
589                 "vx_set_init(%p[#%d],%p[#%d,%d,%d])",
590                 vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
591
592         vxi->vx_initpid = p->tgid;
593         return 0;
594 }
595
596
597 /* vserver syscall commands below here */
598
599 /* taks xid and vx_info functions */
600
601 #include <asm/uaccess.h>
602
603
604 int vc_task_xid(uint32_t id, void __user *data)
605 {
606         xid_t xid;
607
608         if (id) {
609                 struct task_struct *tsk;
610
611                 if (!vx_check(0, VX_ADMIN|VX_WATCH))
612                         return -EPERM;
613
614                 read_lock(&tasklist_lock);
615                 tsk = find_task_by_real_pid(id);
616                 xid = (tsk) ? tsk->xid : -ESRCH;
617                 read_unlock(&tasklist_lock);
618         }
619         else
620                 xid = vx_current_xid();
621         return xid;
622 }
623
624
625 int vc_vx_info(uint32_t id, void __user *data)
626 {
627         struct vx_info *vxi;
628         struct vcmd_vx_info_v0 vc_data;
629
630         if (!vx_check(0, VX_ADMIN))
631                 return -ENOSYS;
632         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RESOURCE))
633                 return -EPERM;
634
635         vxi = locate_vx_info(id);
636         if (!vxi)
637                 return -ESRCH;
638
639         vc_data.xid = vxi->vx_id;
640         vc_data.initpid = vxi->vx_initpid;
641         put_vx_info(vxi);
642
643         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
644                 return -EFAULT;
645         return 0;
646 }
647
648
649 /* context functions */
650
651 int vc_ctx_create(uint32_t xid, void __user *data)
652 {
653         struct vx_info *new_vxi;
654         int ret;
655
656         if (!capable(CAP_SYS_ADMIN))
657                 return -EPERM;
658
659         if ((xid > MAX_S_CONTEXT) && (xid != VX_DYNAMIC_ID))
660                 return -EINVAL;
661
662         if (xid < 2)
663                 return -EINVAL;
664
665         new_vxi = __create_vx_info(xid);
666         if (IS_ERR(new_vxi))
667                 return PTR_ERR(new_vxi);
668
669         ret = new_vxi->vx_id;
670         vx_migrate_task(current, new_vxi);
671         /* if this fails, we might end up with a hashed vx_info */
672         put_vx_info(new_vxi);
673         return ret;
674 }
675
676
677 int vc_ctx_migrate(uint32_t id, void __user *data)
678 {
679         struct vx_info *vxi;
680
681         if (!capable(CAP_SYS_ADMIN))
682                 return -EPERM;
683
684         /* dirty hack until Spectator becomes a cap */
685         if (id == 1) {
686                 current->xid = 1;
687                 return 0;
688         }
689
690         vxi = locate_vx_info(id);
691         if (!vxi)
692                 return -ESRCH;
693         vx_migrate_task(current, vxi);
694         put_vx_info(vxi);
695         return 0;
696 }
697
698
699 int vc_get_cflags(uint32_t id, void __user *data)
700 {
701         struct vx_info *vxi;
702         struct vcmd_ctx_flags_v0 vc_data;
703
704         if (!capable(CAP_SYS_ADMIN))
705                 return -EPERM;
706
707         vxi = locate_vx_info(id);
708         if (!vxi)
709                 return -ESRCH;
710
711         vc_data.flagword = vxi->vx_flags;
712
713         /* special STATE flag handling */
714         vc_data.mask = vx_mask_flags(~0UL, vxi->vx_flags, VXF_ONE_TIME);
715
716         put_vx_info(vxi);
717
718         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
719                 return -EFAULT;
720         return 0;
721 }
722
723 int vc_set_cflags(uint32_t id, void __user *data)
724 {
725         struct vx_info *vxi;
726         struct vcmd_ctx_flags_v0 vc_data;
727         uint64_t mask, trigger;
728
729         if (!capable(CAP_SYS_ADMIN))
730                 return -EPERM;
731         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
732                 return -EFAULT;
733
734         vxi = locate_vx_info(id);
735         if (!vxi)
736                 return -ESRCH;
737
738         /* special STATE flag handling */
739         mask = vx_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
740         trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword);
741
742         if (trigger & VXF_STATE_SETUP)
743                 vx_mask_bcaps(current);
744         if (trigger & VXF_STATE_INIT)
745                 if (vxi == current->vx_info)
746                         vx_set_init(vxi, current);
747
748         vxi->vx_flags = vx_mask_flags(vxi->vx_flags,
749                 vc_data.flagword, mask);
750         put_vx_info(vxi);
751         return 0;
752 }
753
754 int vc_get_ccaps(uint32_t id, void __user *data)
755 {
756         struct vx_info *vxi;
757         struct vcmd_ctx_caps_v0 vc_data;
758
759         if (!capable(CAP_SYS_ADMIN))
760                 return -EPERM;
761
762         vxi = locate_vx_info(id);
763         if (!vxi)
764                 return -ESRCH;
765
766         vc_data.bcaps = vxi->vx_bcaps;
767         vc_data.ccaps = vxi->vx_ccaps;
768         vc_data.cmask = ~0UL;
769         put_vx_info(vxi);
770
771         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
772                 return -EFAULT;
773         return 0;
774 }
775
776 int vc_set_ccaps(uint32_t id, void __user *data)
777 {
778         struct vx_info *vxi;
779         struct vcmd_ctx_caps_v0 vc_data;
780
781         if (!capable(CAP_SYS_ADMIN))
782                 return -EPERM;
783         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
784                 return -EFAULT;
785
786         vxi = locate_vx_info(id);
787         if (!vxi)
788                 return -ESRCH;
789
790         vxi->vx_bcaps &= vc_data.bcaps;
791         vxi->vx_ccaps = vx_mask_flags(vxi->vx_ccaps,
792                 vc_data.ccaps, vc_data.cmask);
793         put_vx_info(vxi);
794         return 0;
795 }
796
797 #include <linux/module.h>
798
799 EXPORT_SYMBOL_GPL(free_vx_info);
800