Removed CKRM code base from kernel tree.
[linux-2.6.git] / kernel / vserver / context.c
1 /*
2  *  linux/kernel/vserver/context.c
3  *
4  *  Virtual Server: Context Support
5  *
6  *  Copyright (C) 2003-2005  Herbert Pƶtzl
7  *
8  *  V0.01  context helper
9  *  V0.02  vx_ctx_kill syscall command
10  *  V0.03  replaced context_info calls
11  *  V0.04  redesign of struct (de)alloc
12  *  V0.05  rlimit basic implementation
13  *  V0.06  task_xid and info commands
14  *  V0.07  context flags and caps
15  *  V0.08  switch to RCU based hash
16  *  V0.09  revert to non RCU for now
17  *  V0.10  and back to working RCU hash
18  *
19  */
20
21 #include <linux/config.h>
22 #include <linux/slab.h>
23 #include <linux/types.h>
24 #include <linux/namespace.h>
25
26 #include <linux/sched.h>
27 #include <linux/vserver/network.h>
28 #include <linux/vserver/legacy.h>
29 #include <linux/vserver/limit.h>
30 #include <linux/vserver/debug.h>
31 #include <linux/vs_context.h>
32 #include <linux/vserver/context_cmd.h>
33
34 #include <asm/errno.h>
35
36 #include "cvirt_init.h"
37 #include "limit_init.h"
38 #include "sched_init.h"
39
40
41 /*      __alloc_vx_info()
42
43         * allocate an initialized vx_info struct
44         * doesn't make it visible (hash)                        */
45
46 static struct vx_info *__alloc_vx_info(xid_t xid)
47 {
48         struct vx_info *new = NULL;
49
50         vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
51
52         /* would this benefit from a slab cache? */
53         new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
54         if (!new)
55                 return 0;
56
57         memset (new, 0, sizeof(struct vx_info));
58         new->vx_id = xid;
59         INIT_RCU_HEAD(&new->vx_rcu);
60         INIT_HLIST_NODE(&new->vx_hlist);
61         atomic_set(&new->vx_refcnt, 0);
62         atomic_set(&new->vx_usecnt, 0);
63         new->vx_parent = NULL;
64         new->vx_state = 0;
65         new->vx_lock = SPIN_LOCK_UNLOCKED;
66         init_waitqueue_head(&new->vx_exit);
67
68         /* rest of init goes here */
69         vx_info_init_limit(&new->limit);
70         vx_info_init_sched(&new->sched);
71         vx_info_init_cvirt(&new->cvirt);
72         vx_info_init_cacct(&new->cacct);
73
74
75         new->vx_flags = VXF_STATE_SETUP|VXF_STATE_INIT;
76         new->vx_bcaps = CAP_INIT_EFF_SET;
77         new->vx_ccaps = 0;
78
79         vxdprintk(VXD_CBIT(xid, 0),
80                 "alloc_vx_info(%d) = %p", xid, new);
81         vxh_alloc_vx_info(new);
82         return new;
83 }
84
85 /*      __dealloc_vx_info()
86
87         * final disposal of vx_info                             */
88
89 static void __dealloc_vx_info(struct vx_info *vxi)
90 {
91         vxdprintk(VXD_CBIT(xid, 0),
92                 "dealloc_vx_info(%p)", vxi);
93         vxh_dealloc_vx_info(vxi);
94
95         vxi->vx_hlist.next = LIST_POISON1;
96         vxi->vx_id = -1;
97
98         vx_info_exit_limit(&vxi->limit);
99         vx_info_exit_sched(&vxi->sched);
100         vx_info_exit_cvirt(&vxi->cvirt);
101         vx_info_exit_cacct(&vxi->cacct);
102
103
104         BUG_ON(atomic_read(&vxi->vx_usecnt));
105         BUG_ON(atomic_read(&vxi->vx_refcnt));
106
107         BUG_ON(vx_info_state(vxi, VXS_HASHED));
108         // BUG_ON(!vx_state(vxi, VXS_DEFUNCT));
109
110         vxi->vx_state |= VXS_RELEASED;
111         kfree(vxi);
112 }
113
114 static inline int __free_vx_info(struct vx_info *vxi)
115 {
116         int usecnt, refcnt;
117
118         BUG_ON(!vxi);
119
120         usecnt = atomic_read(&vxi->vx_usecnt);
121         BUG_ON(usecnt < 0);
122
123         refcnt = atomic_read(&vxi->vx_refcnt);
124         BUG_ON(refcnt < 0);
125
126         if (!usecnt)
127                 __dealloc_vx_info(vxi);
128         return usecnt;
129 }
130
131 static void __rcu_put_vx_info(struct rcu_head *head)
132 {
133         struct vx_info *vxi = container_of(head, struct vx_info, vx_rcu);
134
135         vxdprintk(VXD_CBIT(xid, 3),
136                 "__rcu_put_vx_info(%p[#%d]): %d,%d",
137                 vxi, vxi->vx_id,
138                 atomic_read(&vxi->vx_usecnt),
139                 atomic_read(&vxi->vx_refcnt));
140         put_vx_info(vxi);
141 }
142
143 void __shutdown_vx_info(struct vx_info *vxi)
144 {
145         struct namespace *namespace;
146         struct fs_struct *fs;
147
148         might_sleep();
149
150         namespace = xchg(&vxi->vx_namespace, NULL);
151         if (namespace)
152                 put_namespace(namespace);
153
154         fs = xchg(&vxi->vx_fs, NULL);
155         if (fs)
156                 put_fs_struct(fs);
157 }
158
159 /* exported stuff */
160
161 void free_vx_info(struct vx_info *vxi)
162 {
163         /* context shutdown is mandatory */
164         // BUG_ON(vxi->vx_state != VXS_SHUTDOWN);
165
166         BUG_ON(vxi->vx_state & VXS_HASHED);
167
168         BUG_ON(vxi->vx_namespace);
169         BUG_ON(vxi->vx_fs);
170
171         BUG_ON(__free_vx_info(vxi));
172 }
173
174
175 /*      hash table for vx_info hash */
176
177 #define VX_HASH_SIZE    13
178
179 struct hlist_head vx_info_hash[VX_HASH_SIZE];
180
181 static spinlock_t vx_info_hash_lock = SPIN_LOCK_UNLOCKED;
182
183
184 static inline unsigned int __hashval(xid_t xid)
185 {
186         return (xid % VX_HASH_SIZE);
187 }
188
189
190
191 /*      __hash_vx_info()
192
193         * add the vxi to the global hash table
194         * requires the hash_lock to be held                     */
195
196 static inline void __hash_vx_info(struct vx_info *vxi)
197 {
198         struct hlist_head *head;
199
200         vxdprintk(VXD_CBIT(xid, 4),
201                 "__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
202         vxh_hash_vx_info(vxi);
203
204         get_vx_info(vxi);
205         vxi->vx_state |= VXS_HASHED;
206         head = &vx_info_hash[__hashval(vxi->vx_id)];
207         hlist_add_head_rcu(&vxi->vx_hlist, head);
208 }
209
210 /*      __unhash_vx_info()
211
212         * remove the vxi from the global hash table
213         * requires the hash_lock to be held                     */
214
215 static inline void __unhash_vx_info(struct vx_info *vxi)
216 {
217         vxdprintk(VXD_CBIT(xid, 4),
218                 "__unhash_vx_info: %p[#%d]", vxi, vxi->vx_id);
219         vxh_unhash_vx_info(vxi);
220
221         vxi->vx_state &= ~VXS_HASHED;
222         hlist_del_rcu(&vxi->vx_hlist);
223
224         call_rcu(&vxi->vx_rcu, __rcu_put_vx_info);
225 }
226
227
228 /*      __lookup_vx_info()
229
230         * requires the rcu_read_lock()
231         * doesn't increment the vx_refcnt                       */
232
233 static inline struct vx_info *__lookup_vx_info(xid_t xid)
234 {
235         struct hlist_head *head = &vx_info_hash[__hashval(xid)];
236         struct hlist_node *pos;
237         struct vx_info *vxi;
238
239         hlist_for_each_rcu(pos, head) {
240                 vxi = hlist_entry(pos, struct vx_info, vx_hlist);
241
242                 if ((vxi->vx_id == xid) &&
243                         vx_info_state(vxi, VXS_HASHED))
244                         goto found;
245         }
246         vxi = NULL;
247 found:
248         vxdprintk(VXD_CBIT(xid, 0),
249                 "__lookup_vx_info(#%u): %p[#%u]",
250                 xid, vxi, vxi?vxi->vx_id:0);
251         vxh_lookup_vx_info(xid, vxi);
252         return vxi;
253 }
254
255
256 /*      __vx_dynamic_id()
257
258         * find unused dynamic xid
259         * requires the rcu_read_lock()
260         * requires the hash_lock to be held                     */
261
262 static inline xid_t __vx_dynamic_id(void)
263 {
264         static xid_t seq = MAX_S_CONTEXT;
265         xid_t barrier = seq;
266
267         do {
268                 if (++seq > MAX_S_CONTEXT)
269                         seq = MIN_D_CONTEXT;
270                 if (!__lookup_vx_info(seq)) {
271                         vxdprintk(VXD_CBIT(xid, 4),
272                                 "__vx_dynamic_id: [#%d]", seq);
273                         return seq;
274                 }
275         } while (barrier != seq);
276         return 0;
277 }
278
279 /*      __loc_vx_info()
280
281         * locate or create the requested context
282         * get() it and if new hash it                           */
283
284 static struct vx_info * __loc_vx_info(int id, int *err)
285 {
286         struct vx_info *new, *vxi = NULL;
287
288         vxdprintk(VXD_CBIT(xid, 1), "loc_vx_info(%d)*", id);
289
290         if (!(new = __alloc_vx_info(id))) {
291                 *err = -ENOMEM;
292                 return NULL;
293         }
294
295         /* FIXME is this required at all ? */
296         rcu_read_lock();
297         /* required to make dynamic xids unique */
298         spin_lock(&vx_info_hash_lock);
299
300         /* dynamic context requested */
301         if (id == VX_DYNAMIC_ID) {
302                 id = __vx_dynamic_id();
303                 if (!id) {
304                         printk(KERN_ERR "no dynamic context available.\n");
305                         goto out_unlock;
306                 }
307                 new->vx_id = id;
308         }
309         /* existing context requested */
310         else if ((vxi = __lookup_vx_info(id))) {
311                 /* context in setup is not available */
312                 if (vxi->vx_flags & VXF_STATE_SETUP) {
313                         vxdprintk(VXD_CBIT(xid, 0),
314                                 "loc_vx_info(%d) = %p (not available)", id, vxi);
315                         vxi = NULL;
316                         *err = -EBUSY;
317                 } else {
318                         vxdprintk(VXD_CBIT(xid, 0),
319                                 "loc_vx_info(%d) = %p (found)", id, vxi);
320                         get_vx_info(vxi);
321                         *err = 0;
322                 }
323                 goto out_unlock;
324         }
325
326         /* new context requested */
327         vxdprintk(VXD_CBIT(xid, 0),
328                 "loc_vx_info(%d) = %p (new)", id, new);
329         __hash_vx_info(get_vx_info(new));
330         vxi = new, new = NULL;
331         *err = 1;
332
333 out_unlock:
334         spin_unlock(&vx_info_hash_lock);
335         rcu_read_unlock();
336         vxh_loc_vx_info(id, vxi);
337         if (new)
338                 __dealloc_vx_info(new);
339         return vxi;
340 }
341
342
343
344 /*      exported stuff                                          */
345
346
347 void unhash_vx_info(struct vx_info *vxi)
348 {
349         __shutdown_vx_info(vxi);
350         spin_lock(&vx_info_hash_lock);
351         __unhash_vx_info(vxi);
352         spin_unlock(&vx_info_hash_lock);
353 }
354
355 /*      locate_vx_info()
356
357         * search for a vx_info and get() it
358         * negative id means current                             */
359
360 struct vx_info *locate_vx_info(int id)
361 {
362         struct vx_info *vxi;
363
364         if (id < 0) {
365                 vxi = get_vx_info(current->vx_info);
366         } else {
367                 rcu_read_lock();
368                 vxi = get_vx_info(__lookup_vx_info(id));
369                 rcu_read_unlock();
370         }
371         return vxi;
372 }
373
374 /*      vx_info_is_hashed()
375
376         * verify that xid is still hashed                       */
377
378 int vx_info_is_hashed(xid_t xid)
379 {
380         int hashed;
381
382         rcu_read_lock();
383         hashed = (__lookup_vx_info(xid) != NULL);
384         rcu_read_unlock();
385         return hashed;
386 }
387
388 #ifdef  CONFIG_VSERVER_LEGACY
389
390 #if 0
391 struct vx_info *alloc_vx_info(xid_t xid)
392 {
393         return __alloc_vx_info(xid);
394 }
395 #endif
396
397 struct vx_info *locate_or_create_vx_info(int id)
398 {
399         int err;
400
401         return __loc_vx_info(id, &err);
402 }
403
404 #endif
405
406 #ifdef  CONFIG_PROC_FS
407
408 int get_xid_list(int index, unsigned int *xids, int size)
409 {
410         int hindex, nr_xids = 0;
411
412         rcu_read_lock();
413         for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) {
414                 struct hlist_head *head = &vx_info_hash[hindex];
415                 struct hlist_node *pos;
416
417                 hlist_for_each_rcu(pos, head) {
418                         struct vx_info *vxi;
419
420                         if (--index > 0)
421                                 continue;
422
423                         vxi = hlist_entry(pos, struct vx_info, vx_hlist);
424                         xids[nr_xids] = vxi->vx_id;
425                         if (++nr_xids >= size)
426                                 goto out;
427                 }
428         }
429 out:
430         rcu_read_unlock();
431         return nr_xids;
432 }
433 #endif
434
435 int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
436 {
437         struct user_struct *new_user, *old_user;
438
439         if (!p || !vxi)
440                 BUG();
441         new_user = alloc_uid(vxi->vx_id, p->uid);
442         if (!new_user)
443                 return -ENOMEM;
444
445         old_user = p->user;
446         if (new_user != old_user) {
447                 atomic_inc(&new_user->processes);
448                 atomic_dec(&old_user->processes);
449                 p->user = new_user;
450         }
451         free_uid(old_user);
452         return 0;
453 }
454
455 void vx_mask_bcaps(struct task_struct *p)
456 {
457         struct vx_info *vxi = p->vx_info;
458
459         p->cap_effective &= vxi->vx_bcaps;
460         p->cap_inheritable &= vxi->vx_bcaps;
461         p->cap_permitted &= vxi->vx_bcaps;
462 }
463
464
465 #include <linux/file.h>
466
467 static inline int vx_nofiles_task(struct task_struct *tsk)
468 {
469         struct files_struct *files = tsk->files;
470         unsigned long *obptr;
471         int count, total;
472
473         spin_lock(&files->file_lock);
474         obptr = files->open_fds->fds_bits;
475         count = files->max_fds / (sizeof(unsigned long) * 8);
476         for (total = 0; count > 0; count--) {
477                 if (*obptr)
478                         total += hweight_long(*obptr);
479                 obptr++;
480         }
481         spin_unlock(&files->file_lock);
482         return total;
483 }
484
485 #if 0
486
487 static inline int vx_openfd_task(struct task_struct *tsk)
488 {
489         struct files_struct *files = tsk->files;
490         const unsigned long *bptr;
491         int count, total;
492
493         spin_lock(&files->file_lock);
494         bptr = files->open_fds->fds_bits;
495         count = files->max_fds / (sizeof(unsigned long) * 8);
496         for (total = 0; count > 0; count--) {
497                 if (*bptr)
498                         total += hweight_long(*bptr);
499                 bptr++;
500         }
501         spin_unlock(&files->file_lock);
502         return total;
503 }
504
505 #endif
506
507 /*
508  *      migrate task to new context
509  *      gets vxi, puts old_vxi on change
510  */
511
512 int vx_migrate_task(struct task_struct *p, struct vx_info *vxi)
513 {
514         struct vx_info *old_vxi;
515         int ret = 0;
516
517         if (!p || !vxi)
518                 BUG();
519
520         old_vxi = task_get_vx_info(p);
521         if (old_vxi == vxi)
522                 goto out;
523
524         vxdprintk(VXD_CBIT(xid, 5),
525                 "vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
526                 vxi->vx_id, atomic_read(&vxi->vx_usecnt));
527
528         if (!(ret = vx_migrate_user(p, vxi))) {
529                 int nofiles;
530
531                 task_lock(p);
532                 // openfd = vx_openfd_task(p);
533                 nofiles = vx_nofiles_task(p);
534
535                 if (old_vxi) {
536                         atomic_dec(&old_vxi->cvirt.nr_threads);
537                         atomic_dec(&old_vxi->cvirt.nr_running);
538                         atomic_dec(&old_vxi->limit.rcur[RLIMIT_NPROC]);
539                         /* FIXME: what about the struct files here? */
540                         // atomic_sub(nofiles, &old_vxi->limit.rcur[RLIMIT_NOFILE]);
541                         // atomic_sub(openfd, &old_vxi->limit.rcur[RLIMIT_OPENFD]);
542                 }
543                 atomic_inc(&vxi->cvirt.nr_threads);
544                 atomic_inc(&vxi->cvirt.nr_running);
545                 atomic_inc(&vxi->limit.rcur[RLIMIT_NPROC]);
546                 /* FIXME: what about the struct files here? */
547                 // atomic_add(nofiles, &vxi->limit.rcur[RLIMIT_NOFILE]);
548                 // atomic_add(openfd, &vxi->limit.rcur[RLIMIT_OPENFD]);
549
550                 vxdprintk(VXD_CBIT(xid, 5),
551                         "moved task %p into vxi:%p[#%d]",
552                         p, vxi, vxi->vx_id);
553
554                 /* should be handled in set_vx_info !! */
555                 if (old_vxi)
556                         clr_vx_info(&p->vx_info);
557                 set_vx_info(&p->vx_info, vxi);
558                 p->xid = vxi->vx_id;
559                 vx_mask_bcaps(p);
560                 task_unlock(p);
561
562                 /* obsoleted by clr/set */
563                 // put_vx_info(old_vxi);
564         }
565 out:
566
567         put_vx_info(old_vxi);
568         return ret;
569 }
570
571 int vx_set_init(struct vx_info *vxi, struct task_struct *p)
572 {
573         if (!vxi)
574                 return -EINVAL;
575         if (vxi->vx_initpid)
576                 return -EPERM;
577
578         vxdprintk(VXD_CBIT(xid, 6),
579                 "vx_set_init(%p[#%d],%p[#%d,%d,%d])",
580                 vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
581
582         vxi->vx_initpid = p->tgid;
583         return 0;
584 }
585
586
587 /* vserver syscall commands below here */
588
589 /* taks xid and vx_info functions */
590
591 #include <asm/uaccess.h>
592
593
594 int vc_task_xid(uint32_t id, void __user *data)
595 {
596         xid_t xid;
597
598         if (id) {
599                 struct task_struct *tsk;
600
601                 if (!vx_check(0, VX_ADMIN|VX_WATCH))
602                         return -EPERM;
603
604                 read_lock(&tasklist_lock);
605                 tsk = find_task_by_real_pid(id);
606                 xid = (tsk) ? tsk->xid : -ESRCH;
607                 read_unlock(&tasklist_lock);
608         }
609         else
610                 xid = vx_current_xid();
611         return xid;
612 }
613
614
615 int vc_vx_info(uint32_t id, void __user *data)
616 {
617         struct vx_info *vxi;
618         struct vcmd_vx_info_v0 vc_data;
619
620         if (!vx_check(0, VX_ADMIN))
621                 return -ENOSYS;
622         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RESOURCE))
623                 return -EPERM;
624
625         vxi = locate_vx_info(id);
626         if (!vxi)
627                 return -ESRCH;
628
629         vc_data.xid = vxi->vx_id;
630         vc_data.initpid = vxi->vx_initpid;
631         put_vx_info(vxi);
632
633         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
634                 return -EFAULT;
635         return 0;
636 }
637
638
639 /* context functions */
640
641 int vc_ctx_create(uint32_t xid, void __user *data)
642 {
643         struct vx_info *new_vxi;
644         int ret;
645
646         if (!capable(CAP_SYS_ADMIN))
647                 return -EPERM;
648
649         if ((xid >= MIN_D_CONTEXT) && (xid != VX_DYNAMIC_ID))
650                 return -EINVAL;
651
652         if (xid < 1)
653                 return -EINVAL;
654
655         new_vxi = __loc_vx_info(xid, &ret);
656         if (!new_vxi)
657                 return ret;
658         if (!(new_vxi->vx_flags & VXF_STATE_SETUP)) {
659                 ret = -EEXIST;
660                 goto out_put;
661         }
662
663         ret = new_vxi->vx_id;
664         vx_migrate_task(current, new_vxi);
665         /* if this fails, we might end up with a hashed vx_info */
666 out_put:
667         put_vx_info(new_vxi);
668         return ret;
669 }
670
671
672 int vc_ctx_migrate(uint32_t id, void __user *data)
673 {
674         struct vx_info *vxi;
675
676         if (!capable(CAP_SYS_ADMIN))
677                 return -EPERM;
678
679         /* dirty hack until Spectator becomes a cap */
680         if (id == 1) {
681                 current->xid = 1;
682                 return 0;
683         }
684
685         vxi = locate_vx_info(id);
686         if (!vxi)
687                 return -ESRCH;
688         vx_migrate_task(current, vxi);
689         put_vx_info(vxi);
690         return 0;
691 }
692
693
694 int vc_get_cflags(uint32_t id, void __user *data)
695 {
696         struct vx_info *vxi;
697         struct vcmd_ctx_flags_v0 vc_data;
698
699         if (!capable(CAP_SYS_ADMIN))
700                 return -EPERM;
701
702         vxi = locate_vx_info(id);
703         if (!vxi)
704                 return -ESRCH;
705
706         vc_data.flagword = vxi->vx_flags;
707
708         /* special STATE flag handling */
709         vc_data.mask = vx_mask_flags(~0UL, vxi->vx_flags, VXF_ONE_TIME);
710
711         put_vx_info(vxi);
712
713         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
714                 return -EFAULT;
715         return 0;
716 }
717
718 int vc_set_cflags(uint32_t id, void __user *data)
719 {
720         struct vx_info *vxi;
721         struct vcmd_ctx_flags_v0 vc_data;
722         uint64_t mask, trigger;
723
724         if (!capable(CAP_SYS_ADMIN))
725                 return -EPERM;
726         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
727                 return -EFAULT;
728
729         vxi = locate_vx_info(id);
730         if (!vxi)
731                 return -ESRCH;
732
733         /* special STATE flag handling */
734         mask = vx_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
735         trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword);
736
737         if (trigger & VXF_STATE_SETUP)
738                 vx_mask_bcaps(current);
739         if (trigger & VXF_STATE_INIT)
740                 if (vxi == current->vx_info)
741                         vx_set_init(vxi, current);
742
743         vxi->vx_flags = vx_mask_flags(vxi->vx_flags,
744                 vc_data.flagword, mask);
745         put_vx_info(vxi);
746         return 0;
747 }
748
749 int vc_get_ccaps(uint32_t id, void __user *data)
750 {
751         struct vx_info *vxi;
752         struct vcmd_ctx_caps_v0 vc_data;
753
754         if (!capable(CAP_SYS_ADMIN))
755                 return -EPERM;
756
757         vxi = locate_vx_info(id);
758         if (!vxi)
759                 return -ESRCH;
760
761         vc_data.bcaps = vxi->vx_bcaps;
762         vc_data.ccaps = vxi->vx_ccaps;
763         vc_data.cmask = ~0UL;
764         put_vx_info(vxi);
765
766         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
767                 return -EFAULT;
768         return 0;
769 }
770
771 int vc_set_ccaps(uint32_t id, void __user *data)
772 {
773         struct vx_info *vxi;
774         struct vcmd_ctx_caps_v0 vc_data;
775
776         if (!capable(CAP_SYS_ADMIN))
777                 return -EPERM;
778         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
779                 return -EFAULT;
780
781         vxi = locate_vx_info(id);
782         if (!vxi)
783                 return -ESRCH;
784
785         vxi->vx_bcaps &= vc_data.bcaps;
786         vxi->vx_ccaps = vx_mask_flags(vxi->vx_ccaps,
787                 vc_data.ccaps, vc_data.cmask);
788         put_vx_info(vxi);
789         return 0;
790 }
791
792 #include <linux/module.h>
793
794 EXPORT_SYMBOL_GPL(free_vx_info);
795 EXPORT_SYMBOL_GPL(unhash_vx_info);
796