vserver 2.0-pre4
[linux-2.6.git] / kernel / vserver / context.c
1 /*
2  *  linux/kernel/vserver/context.c
3  *
4  *  Virtual Server: Context Support
5  *
6  *  Copyright (C) 2003-2005  Herbert Pƶtzl
7  *
8  *  V0.01  context helper
9  *  V0.02  vx_ctx_kill syscall command
10  *  V0.03  replaced context_info calls
11  *  V0.04  redesign of struct (de)alloc
12  *  V0.05  rlimit basic implementation
13  *  V0.06  task_xid and info commands
14  *  V0.07  context flags and caps
15  *  V0.08  switch to RCU based hash
16  *  V0.09  revert to non RCU for now
17  *  V0.10  and back to working RCU hash
18  *  V0.11  and back to locking again
19  *
20  */
21
22 #include <linux/config.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/namespace.h>
26
27 #include <linux/sched.h>
28 #include <linux/vserver/network.h>
29 #include <linux/vserver/legacy.h>
30 #include <linux/vserver/limit.h>
31 #include <linux/vserver/debug.h>
32
33 #include <linux/vs_context.h>
34 #include <linux/vserver/context_cmd.h>
35
36 #include <linux/err.h>
37 #include <asm/errno.h>
38
39 #include "cvirt_init.h"
40 #include "limit_init.h"
41 #include "sched_init.h"
42
43
44 /*      __alloc_vx_info()
45
46         * allocate an initialized vx_info struct
47         * doesn't make it visible (hash)                        */
48
49 static struct vx_info *__alloc_vx_info(xid_t xid)
50 {
51         struct vx_info *new = NULL;
52
53         vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
54
55         /* would this benefit from a slab cache? */
56         new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
57         if (!new)
58                 return 0;
59
60         memset (new, 0, sizeof(struct vx_info));
61         new->vx_id = xid;
62         // INIT_RCU_HEAD(&new->vx_rcu);
63         INIT_HLIST_NODE(&new->vx_hlist);
64         atomic_set(&new->vx_usecnt, 0);
65         atomic_set(&new->vx_tasks, 0);
66         new->vx_parent = NULL;
67         new->vx_state = 0;
68         new->vx_lock = SPIN_LOCK_UNLOCKED;
69         init_waitqueue_head(&new->vx_wait);
70
71         /* rest of init goes here */
72         vx_info_init_limit(&new->limit);
73         vx_info_init_sched(&new->sched);
74         vx_info_init_cvirt(&new->cvirt);
75         vx_info_init_cacct(&new->cacct);
76
77
78         new->vx_flags = VXF_STATE_SETUP|VXF_STATE_INIT;
79         new->vx_bcaps = CAP_INIT_EFF_SET;
80         new->vx_ccaps = 0;
81
82         vxdprintk(VXD_CBIT(xid, 0),
83                 "alloc_vx_info(%d) = %p", xid, new);
84         vxh_alloc_vx_info(new);
85         return new;
86 }
87
88 /*      __dealloc_vx_info()
89
90         * final disposal of vx_info                             */
91
92 static void __dealloc_vx_info(struct vx_info *vxi)
93 {
94         vxdprintk(VXD_CBIT(xid, 0),
95                 "dealloc_vx_info(%p)", vxi);
96         vxh_dealloc_vx_info(vxi);
97
98         vxi->vx_hlist.next = LIST_POISON1;
99         vxi->vx_id = -1;
100
101         vx_info_exit_limit(&vxi->limit);
102         vx_info_exit_sched(&vxi->sched);
103         vx_info_exit_cvirt(&vxi->cvirt);
104         vx_info_exit_cacct(&vxi->cacct);
105
106         vxi->vx_state |= VXS_RELEASED;
107         kfree(vxi);
108 }
109
110 void __shutdown_vx_info(struct vx_info *vxi)
111 {
112         struct namespace *namespace;
113         struct fs_struct *fs;
114
115         might_sleep();
116
117         vxi->vx_state |= VXS_SHUTDOWN;
118         vs_state_change(vxi, VSC_SHUTDOWN);
119
120         namespace = xchg(&vxi->vx_namespace, NULL);
121         if (namespace)
122                 put_namespace(namespace);
123
124         fs = xchg(&vxi->vx_fs, NULL);
125         if (fs)
126                 put_fs_struct(fs);
127 }
128
129 /* exported stuff */
130
131 void free_vx_info(struct vx_info *vxi)
132 {
133         /* context shutdown is mandatory */
134         // BUG_ON(vxi->vx_state != VXS_SHUTDOWN);
135
136         BUG_ON(atomic_read(&vxi->vx_usecnt));
137         BUG_ON(atomic_read(&vxi->vx_tasks));
138
139         BUG_ON(vx_info_state(vxi, VXS_HASHED));
140         // BUG_ON(!vx_state(vxi, VXS_DEFUNCT));
141
142         BUG_ON(vxi->vx_namespace);
143         BUG_ON(vxi->vx_fs);
144
145         __dealloc_vx_info(vxi);
146 }
147
148
149 /*      hash table for vx_info hash */
150
151 #define VX_HASH_SIZE    13
152
153 struct hlist_head vx_info_hash[VX_HASH_SIZE];
154
155 static spinlock_t vx_info_hash_lock = SPIN_LOCK_UNLOCKED;
156
157
158 static inline unsigned int __hashval(xid_t xid)
159 {
160         return (xid % VX_HASH_SIZE);
161 }
162
163
164
165 /*      __hash_vx_info()
166
167         * add the vxi to the global hash table
168         * requires the hash_lock to be held                     */
169
170 static inline void __hash_vx_info(struct vx_info *vxi)
171 {
172         struct hlist_head *head;
173
174         vxd_assert_lock(&vx_info_hash_lock);
175         vxdprintk(VXD_CBIT(xid, 4),
176                 "__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
177         vxh_hash_vx_info(vxi);
178
179         /* context must not be hashed */
180         BUG_ON(vxi->vx_state & VXS_HASHED);
181
182         vxi->vx_state |= VXS_HASHED;
183         head = &vx_info_hash[__hashval(vxi->vx_id)];
184         hlist_add_head(&vxi->vx_hlist, head);
185 }
186
187 /*      __unhash_vx_info()
188
189         * remove the vxi from the global hash table
190         * requires the hash_lock to be held                     */
191
192 static inline void __unhash_vx_info(struct vx_info *vxi)
193 {
194         vxd_assert_lock(&vx_info_hash_lock);
195         vxdprintk(VXD_CBIT(xid, 4),
196                 "__unhash_vx_info: %p[#%d]", vxi, vxi->vx_id);
197         vxh_unhash_vx_info(vxi);
198
199         /* maybe warn on that? */
200         if (!(vxi->vx_state & VXS_HASHED))
201                 return;
202
203         vxi->vx_state &= ~VXS_HASHED;
204         hlist_del(&vxi->vx_hlist);
205 }
206
207
208 /*      __lookup_vx_info()
209
210         * requires the hash_lock to be held
211         * doesn't increment the vx_refcnt                       */
212
213 static inline struct vx_info *__lookup_vx_info(xid_t xid)
214 {
215         struct hlist_head *head = &vx_info_hash[__hashval(xid)];
216         struct hlist_node *pos;
217         struct vx_info *vxi;
218
219         vxd_assert_lock(&vx_info_hash_lock);
220         hlist_for_each(pos, head) {
221                 vxi = hlist_entry(pos, struct vx_info, vx_hlist);
222
223                 if (vxi->vx_id == xid)
224                         goto found;
225         }
226         vxi = NULL;
227 found:
228         vxdprintk(VXD_CBIT(xid, 0),
229                 "__lookup_vx_info(#%u): %p[#%u]",
230                 xid, vxi, vxi?vxi->vx_id:0);
231         vxh_lookup_vx_info(xid, vxi);
232         return vxi;
233 }
234
235
236 /*      __vx_dynamic_id()
237
238         * find unused dynamic xid
239         * requires the hash_lock to be held                     */
240
241 static inline xid_t __vx_dynamic_id(void)
242 {
243         static xid_t seq = MAX_S_CONTEXT;
244         xid_t barrier = seq;
245
246         vxd_assert_lock(&vx_info_hash_lock);
247         do {
248                 if (++seq > MAX_S_CONTEXT)
249                         seq = MIN_D_CONTEXT;
250                 if (!__lookup_vx_info(seq)) {
251                         vxdprintk(VXD_CBIT(xid, 4),
252                                 "__vx_dynamic_id: [#%d]", seq);
253                         return seq;
254                 }
255         } while (barrier != seq);
256         return 0;
257 }
258
259 #ifdef  CONFIG_VSERVER_LEGACY
260
261 /*      __loc_vx_info()
262
263         * locate or create the requested context
264         * get() it and if new hash it                           */
265
266 static struct vx_info * __loc_vx_info(int id, int *err)
267 {
268         struct vx_info *new, *vxi = NULL;
269
270         vxdprintk(VXD_CBIT(xid, 1), "loc_vx_info(%d)*", id);
271
272         if (!(new = __alloc_vx_info(id))) {
273                 *err = -ENOMEM;
274                 return NULL;
275         }
276
277         /* required to make dynamic xids unique */
278         spin_lock(&vx_info_hash_lock);
279
280         /* dynamic context requested */
281         if (id == VX_DYNAMIC_ID) {
282                 id = __vx_dynamic_id();
283                 if (!id) {
284                         printk(KERN_ERR "no dynamic context available.\n");
285                         goto out_unlock;
286                 }
287                 new->vx_id = id;
288         }
289         /* existing context requested */
290         else if ((vxi = __lookup_vx_info(id))) {
291                 /* context in setup is not available */
292                 if (vxi->vx_flags & VXF_STATE_SETUP) {
293                         vxdprintk(VXD_CBIT(xid, 0),
294                                 "loc_vx_info(%d) = %p (not available)", id, vxi);
295                         vxi = NULL;
296                         *err = -EBUSY;
297                 } else {
298                         vxdprintk(VXD_CBIT(xid, 0),
299                                 "loc_vx_info(%d) = %p (found)", id, vxi);
300                         get_vx_info(vxi);
301                         *err = 0;
302                 }
303                 goto out_unlock;
304         }
305
306         /* new context requested */
307         vxdprintk(VXD_CBIT(xid, 0),
308                 "loc_vx_info(%d) = %p (new)", id, new);
309         __hash_vx_info(get_vx_info(new));
310         vxi = new, new = NULL;
311         *err = 1;
312
313 out_unlock:
314         spin_unlock(&vx_info_hash_lock);
315         vxh_loc_vx_info(id, vxi);
316         if (new)
317                 __dealloc_vx_info(new);
318         return vxi;
319 }
320
321 #endif
322
323 /*      __create_vx_info()
324
325         * create the requested context
326         * get() it and hash it                                  */
327
328 static struct vx_info * __create_vx_info(int id)
329 {
330         struct vx_info *new, *vxi = NULL;
331
332         vxdprintk(VXD_CBIT(xid, 1), "create_vx_info(%d)*", id);
333
334         if (!(new = __alloc_vx_info(id))) {
335                 return ERR_PTR(-ENOMEM);
336         }
337
338         /* required to make dynamic xids unique */
339         spin_lock(&vx_info_hash_lock);
340
341         /* dynamic context requested */
342         if (id == VX_DYNAMIC_ID) {
343                 id = __vx_dynamic_id();
344                 if (!id) {
345                         printk(KERN_ERR "no dynamic context available.\n");
346                         vxi = ERR_PTR(-EAGAIN);
347                         goto out_unlock;
348                 }
349                 new->vx_id = id;
350         }
351         /* existing context requested */
352         else if ((vxi = __lookup_vx_info(id))) {
353                 vxdprintk(VXD_CBIT(xid, 0),
354                         "create_vx_info(%d) = %p (already there)", id, vxi);
355                 if (vx_info_flags(vxi, VXF_STATE_SETUP, 0))
356                         vxi = ERR_PTR(-EBUSY);
357                 else
358                         vxi = ERR_PTR(-EEXIST);
359                 goto out_unlock;
360         }
361         /* dynamic xid creation blocker */
362         else if (id >= MIN_D_CONTEXT) {
363                 vxdprintk(VXD_CBIT(xid, 0),
364                         "create_vx_info(%d) (dynamic rejected)", id);
365                 vxi = ERR_PTR(-EINVAL);
366                 goto out_unlock;
367         }
368
369         /* new context requested */
370         vxdprintk(VXD_CBIT(xid, 0),
371                 "create_vx_info(%d) = %p (new)", id, new);
372         __hash_vx_info(get_vx_info(new));
373         vxi = new, new = NULL;
374
375 out_unlock:
376         spin_unlock(&vx_info_hash_lock);
377         vxh_create_vx_info(id, IS_ERR(vxi)?NULL:vxi);
378         if (new)
379                 __dealloc_vx_info(new);
380         return vxi;
381 }
382
383
384 /*      exported stuff                                          */
385
386
387 void unhash_vx_info(struct vx_info *vxi)
388 {
389         __shutdown_vx_info(vxi);
390         spin_lock(&vx_info_hash_lock);
391         __unhash_vx_info(vxi);
392         spin_unlock(&vx_info_hash_lock);
393         __wakeup_vx_info(vxi);
394 }
395
396
397 /*      locate_vx_info()
398
399         * search for a vx_info and get() it
400         * negative id means current                             */
401
402 struct vx_info *locate_vx_info(int id)
403 {
404         struct vx_info *vxi = NULL;
405
406         if (id < 0) {
407                 vxi = get_vx_info(current->vx_info);
408         } else if (id > 1) {
409                 spin_lock(&vx_info_hash_lock);
410                 vxi = get_vx_info(__lookup_vx_info(id));
411                 spin_unlock(&vx_info_hash_lock);
412         }
413         return vxi;
414 }
415
416 /*      xid_is_hashed()
417
418         * verify that xid is still hashed                       */
419
420 int xid_is_hashed(xid_t xid)
421 {
422         int hashed;
423
424         spin_lock(&vx_info_hash_lock);
425         hashed = (__lookup_vx_info(xid) != NULL);
426         spin_unlock(&vx_info_hash_lock);
427         return hashed;
428 }
429
430 #ifdef  CONFIG_VSERVER_LEGACY
431
432 struct vx_info *locate_or_create_vx_info(int id)
433 {
434         int err;
435
436         return __loc_vx_info(id, &err);
437 }
438
439 #endif
440
441 #ifdef  CONFIG_PROC_FS
442
443 int get_xid_list(int index, unsigned int *xids, int size)
444 {
445         int hindex, nr_xids = 0;
446
447         for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) {
448                 struct hlist_head *head = &vx_info_hash[hindex];
449                 struct hlist_node *pos;
450
451                 spin_lock(&vx_info_hash_lock);
452                 hlist_for_each(pos, head) {
453                         struct vx_info *vxi;
454
455                         if (--index > 0)
456                                 continue;
457
458                         vxi = hlist_entry(pos, struct vx_info, vx_hlist);
459                         xids[nr_xids] = vxi->vx_id;
460                         if (++nr_xids >= size) {
461                                 spin_unlock(&vx_info_hash_lock);
462                                 goto out;
463                         }
464                 }
465                 /* keep the lock time short */
466                 spin_unlock(&vx_info_hash_lock);
467         }
468 out:
469         return nr_xids;
470 }
471 #endif
472
473 int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
474 {
475         struct user_struct *new_user, *old_user;
476
477         if (!p || !vxi)
478                 BUG();
479         new_user = alloc_uid(vxi->vx_id, p->uid);
480         if (!new_user)
481                 return -ENOMEM;
482
483         old_user = p->user;
484         if (new_user != old_user) {
485                 atomic_inc(&new_user->processes);
486                 atomic_dec(&old_user->processes);
487                 p->user = new_user;
488         }
489         free_uid(old_user);
490         return 0;
491 }
492
493 void vx_mask_bcaps(struct task_struct *p)
494 {
495         struct vx_info *vxi = p->vx_info;
496
497         p->cap_effective &= vxi->vx_bcaps;
498         p->cap_inheritable &= vxi->vx_bcaps;
499         p->cap_permitted &= vxi->vx_bcaps;
500 }
501
502
503 #include <linux/file.h>
504
505 static int vx_openfd_task(struct task_struct *tsk)
506 {
507         struct files_struct *files = tsk->files;
508         const unsigned long *bptr;
509         int count, total;
510
511         spin_lock(&files->file_lock);
512         bptr = files->open_fds->fds_bits;
513         count = files->max_fds / (sizeof(unsigned long) * 8);
514         for (total = 0; count > 0; count--) {
515                 if (*bptr)
516                         total += hweight_long(*bptr);
517                 bptr++;
518         }
519         spin_unlock(&files->file_lock);
520         return total;
521 }
522
523 /*
524  *      migrate task to new context
525  *      gets vxi, puts old_vxi on change
526  */
527
528 int vx_migrate_task(struct task_struct *p, struct vx_info *vxi)
529 {
530         struct vx_info *old_vxi;
531         int ret = 0;
532
533         if (!p || !vxi)
534                 BUG();
535
536         old_vxi = task_get_vx_info(p);
537         if (old_vxi == vxi)
538                 goto out;
539
540         vxdprintk(VXD_CBIT(xid, 5),
541                 "vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
542                 vxi->vx_id, atomic_read(&vxi->vx_usecnt));
543
544         if (!(ret = vx_migrate_user(p, vxi))) {
545                 int openfd;
546
547                 task_lock(p);
548                 openfd = vx_openfd_task(p);
549
550                 if (old_vxi) {
551                         atomic_dec(&old_vxi->cvirt.nr_threads);
552                         atomic_dec(&old_vxi->cvirt.nr_running);
553                         atomic_dec(&old_vxi->limit.rcur[RLIMIT_NPROC]);
554                         /* FIXME: what about the struct files here? */
555                         atomic_sub(openfd, &old_vxi->limit.rcur[VLIMIT_OPENFD]);
556                 }
557                 atomic_inc(&vxi->cvirt.nr_threads);
558                 atomic_inc(&vxi->cvirt.nr_running);
559                 atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
560                 /* FIXME: what about the struct files here? */
561                 atomic_add(openfd, &vxi->limit.rcur[VLIMIT_OPENFD]);
562
563                 if (old_vxi) {
564                         release_vx_info(old_vxi, p);
565                         clr_vx_info(&p->vx_info);
566                 }
567                 claim_vx_info(vxi, p);
568                 set_vx_info(&p->vx_info, vxi);
569                 p->xid = vxi->vx_id;
570
571                 vxdprintk(VXD_CBIT(xid, 5),
572                         "moved task %p into vxi:%p[#%d]",
573                         p, vxi, vxi->vx_id);
574
575                 vx_mask_bcaps(p);
576                 task_unlock(p);
577         }
578 out:
579         put_vx_info(old_vxi);
580         return ret;
581 }
582
583 int vx_set_init(struct vx_info *vxi, struct task_struct *p)
584 {
585         if (!vxi)
586                 return -EINVAL;
587         if (vxi->vx_initpid)
588                 return -EPERM;
589
590         vxdprintk(VXD_CBIT(xid, 6),
591                 "vx_set_init(%p[#%d],%p[#%d,%d,%d])",
592                 vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
593
594         vxi->vx_initpid = p->tgid;
595         return 0;
596 }
597
598
599 /* vserver syscall commands below here */
600
601 /* taks xid and vx_info functions */
602
603 #include <asm/uaccess.h>
604
605
606 int vc_task_xid(uint32_t id, void __user *data)
607 {
608         xid_t xid;
609
610         if (id) {
611                 struct task_struct *tsk;
612
613                 if (!vx_check(0, VX_ADMIN|VX_WATCH))
614                         return -EPERM;
615
616                 read_lock(&tasklist_lock);
617                 tsk = find_task_by_real_pid(id);
618                 xid = (tsk) ? tsk->xid : -ESRCH;
619                 read_unlock(&tasklist_lock);
620         }
621         else
622                 xid = vx_current_xid();
623         return xid;
624 }
625
626
627 int vc_vx_info(uint32_t id, void __user *data)
628 {
629         struct vx_info *vxi;
630         struct vcmd_vx_info_v0 vc_data;
631
632         if (!vx_check(0, VX_ADMIN))
633                 return -ENOSYS;
634         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RESOURCE))
635                 return -EPERM;
636
637         vxi = locate_vx_info(id);
638         if (!vxi)
639                 return -ESRCH;
640
641         vc_data.xid = vxi->vx_id;
642         vc_data.initpid = vxi->vx_initpid;
643         put_vx_info(vxi);
644
645         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
646                 return -EFAULT;
647         return 0;
648 }
649
650
651 /* context functions */
652
653 int vc_ctx_create(uint32_t xid, void __user *data)
654 {
655         struct vx_info *new_vxi;
656         int ret;
657
658         if (!capable(CAP_SYS_ADMIN))
659                 return -EPERM;
660
661         if ((xid > MAX_S_CONTEXT) && (xid != VX_DYNAMIC_ID))
662                 return -EINVAL;
663
664         if (xid < 2)
665                 return -EINVAL;
666
667         new_vxi = __create_vx_info(xid);
668         if (IS_ERR(new_vxi))
669                 return PTR_ERR(new_vxi);
670
671         vs_state_change(new_vxi, VSC_STARTUP);
672         ret = new_vxi->vx_id;
673         vx_migrate_task(current, new_vxi);
674         /* if this fails, we might end up with a hashed vx_info */
675         put_vx_info(new_vxi);
676         return ret;
677 }
678
679
680 int vc_ctx_migrate(uint32_t id, void __user *data)
681 {
682         struct vx_info *vxi;
683
684         if (!capable(CAP_SYS_ADMIN))
685                 return -EPERM;
686
687         /* dirty hack until Spectator becomes a cap */
688         if (id == 1) {
689                 current->xid = 1;
690                 return 0;
691         }
692
693         vxi = locate_vx_info(id);
694         if (!vxi)
695                 return -ESRCH;
696         vx_migrate_task(current, vxi);
697         put_vx_info(vxi);
698         return 0;
699 }
700
701
702 int vc_get_cflags(uint32_t id, void __user *data)
703 {
704         struct vx_info *vxi;
705         struct vcmd_ctx_flags_v0 vc_data;
706
707         if (!capable(CAP_SYS_ADMIN))
708                 return -EPERM;
709
710         vxi = locate_vx_info(id);
711         if (!vxi)
712                 return -ESRCH;
713
714         vc_data.flagword = vxi->vx_flags;
715
716         /* special STATE flag handling */
717         vc_data.mask = vx_mask_flags(~0UL, vxi->vx_flags, VXF_ONE_TIME);
718
719         put_vx_info(vxi);
720
721         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
722                 return -EFAULT;
723         return 0;
724 }
725
726 int vc_set_cflags(uint32_t id, void __user *data)
727 {
728         struct vx_info *vxi;
729         struct vcmd_ctx_flags_v0 vc_data;
730         uint64_t mask, trigger;
731
732         if (!capable(CAP_SYS_ADMIN))
733                 return -EPERM;
734         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
735                 return -EFAULT;
736
737         vxi = locate_vx_info(id);
738         if (!vxi)
739                 return -ESRCH;
740
741         /* special STATE flag handling */
742         mask = vx_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
743         trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword);
744
745         if (trigger & VXF_STATE_SETUP)
746                 vx_mask_bcaps(current);
747         if (trigger & VXF_STATE_INIT)
748                 if (vxi == current->vx_info)
749                         vx_set_init(vxi, current);
750
751         vxi->vx_flags = vx_mask_flags(vxi->vx_flags,
752                 vc_data.flagword, mask);
753         put_vx_info(vxi);
754         return 0;
755 }
756
757 int vc_get_ccaps(uint32_t id, void __user *data)
758 {
759         struct vx_info *vxi;
760         struct vcmd_ctx_caps_v0 vc_data;
761
762         if (!capable(CAP_SYS_ADMIN))
763                 return -EPERM;
764
765         vxi = locate_vx_info(id);
766         if (!vxi)
767                 return -ESRCH;
768
769         vc_data.bcaps = vxi->vx_bcaps;
770         vc_data.ccaps = vxi->vx_ccaps;
771         vc_data.cmask = ~0UL;
772         put_vx_info(vxi);
773
774         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
775                 return -EFAULT;
776         return 0;
777 }
778
779 int vc_set_ccaps(uint32_t id, void __user *data)
780 {
781         struct vx_info *vxi;
782         struct vcmd_ctx_caps_v0 vc_data;
783
784         if (!capable(CAP_SYS_ADMIN))
785                 return -EPERM;
786         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
787                 return -EFAULT;
788
789         vxi = locate_vx_info(id);
790         if (!vxi)
791                 return -ESRCH;
792
793         vxi->vx_bcaps &= vc_data.bcaps;
794         vxi->vx_ccaps = vx_mask_flags(vxi->vx_ccaps,
795                 vc_data.ccaps, vc_data.cmask);
796         put_vx_info(vxi);
797         return 0;
798 }
799
800 #include <linux/module.h>
801
802 EXPORT_SYMBOL_GPL(free_vx_info);
803