Merge to Fedora kernel-2.6.18-1.2255_FC5-vs2.0.2.2-rc9 patched with stable patch...
[linux-2.6.git] / kernel / vserver / network.c
1 /*
2  *  linux/kernel/vserver/network.c
3  *
4  *  Virtual Server: Network Support
5  *
6  *  Copyright (C) 2003-2006  Herbert Pƶtzl
7  *
8  *  V0.01  broken out from vcontext V0.05
9  *  V0.02  cleaned up implementation
10  *  V0.03  added equiv nx commands
11  *  V0.04  switch to RCU based hash
12  *  V0.05  and back to locking again
13  *  V0.06  have __create claim() the nxi
14  *
15  */
16
17 #include <linux/slab.h>
18 #include <linux/vserver/network_cmd.h>
19 #include <linux/rcupdate.h>
20 #include <net/tcp.h>
21
22 #include <asm/errno.h>
23
24
25 /*      __alloc_nx_info()
26
27         * allocate an initialized nx_info struct
28         * doesn't make it visible (hash)                        */
29
30 static struct nx_info *__alloc_nx_info(nid_t nid)
31 {
32         struct nx_info *new = NULL;
33
34         vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid);
35
36         /* would this benefit from a slab cache? */
37         new = kmalloc(sizeof(struct nx_info), GFP_KERNEL);
38         if (!new)
39                 return 0;
40
41         memset (new, 0, sizeof(struct nx_info));
42         new->nx_id = nid;
43         INIT_HLIST_NODE(&new->nx_hlist);
44         atomic_set(&new->nx_usecnt, 0);
45         atomic_set(&new->nx_tasks, 0);
46         new->nx_state = 0;
47
48         new->nx_flags = NXF_INIT_SET;
49
50         /* rest of init goes here */
51
52         vxdprintk(VXD_CBIT(nid, 0),
53                 "alloc_nx_info(%d) = %p", nid, new);
54         return new;
55 }
56
57 /*      __dealloc_nx_info()
58
59         * final disposal of nx_info                             */
60
61 static void __dealloc_nx_info(struct nx_info *nxi)
62 {
63         vxdprintk(VXD_CBIT(nid, 0),
64                 "dealloc_nx_info(%p)", nxi);
65
66         nxi->nx_hlist.next = LIST_POISON1;
67         nxi->nx_id = -1;
68
69         BUG_ON(atomic_read(&nxi->nx_usecnt));
70         BUG_ON(atomic_read(&nxi->nx_tasks));
71
72         nxi->nx_state |= NXS_RELEASED;
73         kfree(nxi);
74 }
75
76 static void __shutdown_nx_info(struct nx_info *nxi)
77 {
78         nxi->nx_state |= NXS_SHUTDOWN;
79         vs_net_change(nxi, VSC_NETDOWN);
80 }
81
82 /*      exported stuff                                          */
83
84 void free_nx_info(struct nx_info *nxi)
85 {
86         /* context shutdown is mandatory */
87         BUG_ON(nxi->nx_state != NXS_SHUTDOWN);
88
89         /* context must not be hashed */
90         BUG_ON(nxi->nx_state & NXS_HASHED);
91
92         BUG_ON(atomic_read(&nxi->nx_usecnt));
93         BUG_ON(atomic_read(&nxi->nx_tasks));
94
95         __dealloc_nx_info(nxi);
96 }
97
98
99 /*      hash table for nx_info hash */
100
101 #define NX_HASH_SIZE    13
102
103 struct hlist_head nx_info_hash[NX_HASH_SIZE];
104
105 static spinlock_t nx_info_hash_lock = SPIN_LOCK_UNLOCKED;
106
107
108 static inline unsigned int __hashval(nid_t nid)
109 {
110         return (nid % NX_HASH_SIZE);
111 }
112
113
114
115 /*      __hash_nx_info()
116
117         * add the nxi to the global hash table
118         * requires the hash_lock to be held                     */
119
120 static inline void __hash_nx_info(struct nx_info *nxi)
121 {
122         struct hlist_head *head;
123
124         vxd_assert_lock(&nx_info_hash_lock);
125         vxdprintk(VXD_CBIT(nid, 4),
126                 "__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
127
128         /* context must not be hashed */
129         BUG_ON(nx_info_state(nxi, NXS_HASHED));
130
131         nxi->nx_state |= NXS_HASHED;
132         head = &nx_info_hash[__hashval(nxi->nx_id)];
133         hlist_add_head(&nxi->nx_hlist, head);
134 }
135
136 /*      __unhash_nx_info()
137
138         * remove the nxi from the global hash table
139         * requires the hash_lock to be held                     */
140
141 static inline void __unhash_nx_info(struct nx_info *nxi)
142 {
143         vxdprintk(VXD_CBIT(nid, 4),
144                 "__unhash_nx_info: %p[#%d]", nxi, nxi->nx_id);
145
146         spin_lock(&nx_info_hash_lock);
147         /* context must be hashed */
148         BUG_ON(!nx_info_state(nxi, NXS_HASHED));
149
150         nxi->nx_state &= ~NXS_HASHED;
151         hlist_del(&nxi->nx_hlist);
152         spin_unlock(&nx_info_hash_lock);
153 }
154
155
156 /*      __lookup_nx_info()
157
158         * requires the hash_lock to be held
159         * doesn't increment the nx_refcnt                       */
160
161 static inline struct nx_info *__lookup_nx_info(nid_t nid)
162 {
163         struct hlist_head *head = &nx_info_hash[__hashval(nid)];
164         struct hlist_node *pos;
165         struct nx_info *nxi;
166
167         vxd_assert_lock(&nx_info_hash_lock);
168         hlist_for_each(pos, head) {
169                 nxi = hlist_entry(pos, struct nx_info, nx_hlist);
170
171                 if (nxi->nx_id == nid)
172                         goto found;
173         }
174         nxi = NULL;
175 found:
176         vxdprintk(VXD_CBIT(nid, 0),
177                 "__lookup_nx_info(#%u): %p[#%u]",
178                 nid, nxi, nxi?nxi->nx_id:0);
179         return nxi;
180 }
181
182
183 /*      __nx_dynamic_id()
184
185         * find unused dynamic nid
186         * requires the hash_lock to be held                     */
187
188 static inline nid_t __nx_dynamic_id(void)
189 {
190         static nid_t seq = MAX_N_CONTEXT;
191         nid_t barrier = seq;
192
193         vxd_assert_lock(&nx_info_hash_lock);
194         do {
195                 if (++seq > MAX_N_CONTEXT)
196                         seq = MIN_D_CONTEXT;
197                 if (!__lookup_nx_info(seq)) {
198                         vxdprintk(VXD_CBIT(nid, 4),
199                                 "__nx_dynamic_id: [#%d]", seq);
200                         return seq;
201                 }
202         } while (barrier != seq);
203         return 0;
204 }
205
206 /*      __create_nx_info()
207
208         * create the requested context
209         * get(), claim() and hash it                            */
210
211 static struct nx_info * __create_nx_info(int id)
212 {
213         struct nx_info *new, *nxi = NULL;
214
215         vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id);
216
217         if (!(new = __alloc_nx_info(id)))
218                 return ERR_PTR(-ENOMEM);
219
220         /* required to make dynamic xids unique */
221         spin_lock(&nx_info_hash_lock);
222
223         /* dynamic context requested */
224         if (id == NX_DYNAMIC_ID) {
225                 id = __nx_dynamic_id();
226                 if (!id) {
227                         printk(KERN_ERR "no dynamic context available.\n");
228                         nxi = ERR_PTR(-EAGAIN);
229                         goto out_unlock;
230                 }
231                 new->nx_id = id;
232         }
233         /* static context requested */
234         else if ((nxi = __lookup_nx_info(id))) {
235                 vxdprintk(VXD_CBIT(nid, 0),
236                         "create_nx_info(%d) = %p (already there)", id, nxi);
237                 if (nx_info_flags(nxi, NXF_STATE_SETUP, 0))
238                         nxi = ERR_PTR(-EBUSY);
239                 else
240                         nxi = ERR_PTR(-EEXIST);
241                 goto out_unlock;
242         }
243         /* dynamic nid creation blocker */
244         else if (id >= MIN_D_CONTEXT) {
245                 vxdprintk(VXD_CBIT(nid, 0),
246                         "create_nx_info(%d) (dynamic rejected)", id);
247                 nxi = ERR_PTR(-EINVAL);
248                 goto out_unlock;
249         }
250
251         /* new context */
252         vxdprintk(VXD_CBIT(nid, 0),
253                 "create_nx_info(%d) = %p (new)", id, new);
254         claim_nx_info(new, NULL);
255         __hash_nx_info(get_nx_info(new));
256         nxi = new, new = NULL;
257
258 out_unlock:
259         spin_unlock(&nx_info_hash_lock);
260         if (new)
261                 __dealloc_nx_info(new);
262         return nxi;
263 }
264
265
266
267 /*      exported stuff                                          */
268
269
270 void unhash_nx_info(struct nx_info *nxi)
271 {
272         __shutdown_nx_info(nxi);
273         __unhash_nx_info(nxi);
274 }
275
276 #ifdef  CONFIG_VSERVER_LEGACYNET
277
278 struct nx_info *create_nx_info(void)
279 {
280         return __create_nx_info(NX_DYNAMIC_ID);
281 }
282
283 #endif
284
285 /*      lookup_nx_info()
286
287         * search for a nx_info and get() it
288         * negative id means current                             */
289
290 struct nx_info *lookup_nx_info(int id)
291 {
292         struct nx_info *nxi = NULL;
293
294         if (id < 0) {
295                 nxi = get_nx_info(current->nx_info);
296         } else if (id > 1) {
297                 spin_lock(&nx_info_hash_lock);
298                 nxi = get_nx_info(__lookup_nx_info(id));
299                 spin_unlock(&nx_info_hash_lock);
300         }
301         return nxi;
302 }
303
304 /*      nid_is_hashed()
305
306         * verify that nid is still hashed                       */
307
308 int nid_is_hashed(nid_t nid)
309 {
310         int hashed;
311
312         spin_lock(&nx_info_hash_lock);
313         hashed = (__lookup_nx_info(nid) != NULL);
314         spin_unlock(&nx_info_hash_lock);
315         return hashed;
316 }
317
318
319 #ifdef  CONFIG_PROC_FS
320
321 int get_nid_list(int index, unsigned int *nids, int size)
322 {
323         int hindex, nr_nids = 0;
324
325         for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) {
326                 struct hlist_head *head = &nx_info_hash[hindex];
327                 struct hlist_node *pos;
328
329                 spin_lock(&nx_info_hash_lock);
330                 hlist_for_each(pos, head) {
331                         struct nx_info *nxi;
332
333                         if (--index > 0)
334                                 continue;
335
336                         nxi = hlist_entry(pos, struct nx_info, nx_hlist);
337                         nids[nr_nids] = nxi->nx_id;
338                         if (++nr_nids >= size) {
339                                 spin_unlock(&nx_info_hash_lock);
340                                 goto out;
341                         }
342                 }
343                 /* keep the lock time short */
344                 spin_unlock(&nx_info_hash_lock);
345         }
346 out:
347         return nr_nids;
348 }
349 #endif
350
351
352 /*
353  *      migrate task to new network
354  *      gets nxi, puts old_nxi on change
355  */
356
357 int nx_migrate_task(struct task_struct *p, struct nx_info *nxi)
358 {
359         struct nx_info *old_nxi;
360         int ret = 0;
361
362         if (!p || !nxi)
363                 BUG();
364
365         vxdprintk(VXD_CBIT(nid, 5),
366                 "nx_migrate_task(%p,%p[#%d.%d.%d])",
367                 p, nxi, nxi->nx_id,
368                 atomic_read(&nxi->nx_usecnt),
369                 atomic_read(&nxi->nx_tasks));
370
371         /* maybe disallow this completely? */
372         old_nxi = task_get_nx_info(p);
373         if (old_nxi == nxi)
374                 goto out;
375
376         task_lock(p);
377         if (old_nxi)
378                 clr_nx_info(&p->nx_info);
379         claim_nx_info(nxi, p);
380         set_nx_info(&p->nx_info, nxi);
381         p->nid = nxi->nx_id;
382         task_unlock(p);
383
384         vxdprintk(VXD_CBIT(nid, 5),
385                 "moved task %p into nxi:%p[#%d]",
386                 p, nxi, nxi->nx_id);
387
388         if (old_nxi)
389                 release_nx_info(old_nxi, p);
390         ret = 0;
391 out:
392         put_nx_info(old_nxi);
393         return ret;
394 }
395
396
397 #ifdef CONFIG_INET
398
399 #include <linux/netdevice.h>
400 #include <linux/inetdevice.h>
401
402 int ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi)
403 {
404         if (!nxi)
405                 return 1;
406         if (!ifa)
407                 return 0;
408         return addr_in_nx_info(nxi, ifa->ifa_local);
409 }
410
411 int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
412 {
413         struct in_device *in_dev;
414         struct in_ifaddr **ifap;
415         struct in_ifaddr *ifa;
416         int ret = 0;
417
418         if (!nxi)
419                 return 1;
420
421         in_dev = in_dev_get(dev);
422         if (!in_dev)
423                 goto out;
424
425         for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
426                 ifap = &ifa->ifa_next) {
427                 if (addr_in_nx_info(nxi, ifa->ifa_local)) {
428                         ret = 1;
429                         break;
430                 }
431         }
432         in_dev_put(in_dev);
433 out:
434         return ret;
435 }
436
437 /*
438  *      check if address is covered by socket
439  *
440  *      sk:     the socket to check against
441  *      addr:   the address in question (must be != 0)
442  */
443 static inline int __addr_in_socket(struct sock *sk, uint32_t addr)
444 {
445         struct nx_info *nxi = sk->sk_nx_info;
446         uint32_t saddr = inet_rcv_saddr(sk);
447
448         vxdprintk(VXD_CBIT(net, 5),
449                 "__addr_in_socket(%p,%d.%d.%d.%d) %p:%d.%d.%d.%d %p;%lx",
450                 sk, VXD_QUAD(addr), nxi, VXD_QUAD(saddr), sk->sk_socket,
451                 (sk->sk_socket?sk->sk_socket->flags:0));
452
453         if (saddr) {
454                 /* direct address match */
455                 return (saddr == addr);
456         } else if (nxi) {
457                 /* match against nx_info */
458                 return addr_in_nx_info(nxi, addr);
459         } else {
460                 /* unrestricted any socket */
461                 return 1;
462         }
463 }
464
465
466 int nx_addr_conflict(struct nx_info *nxi, uint32_t addr, struct sock *sk)
467 {
468         vxdprintk(VXD_CBIT(net, 2),
469                 "nx_addr_conflict(%p,%p) %d.%d,%d.%d",
470                 nxi, sk, VXD_QUAD(addr));
471
472         if (addr) {
473                 /* check real address */
474                 return __addr_in_socket(sk, addr);
475         } else if (nxi) {
476                 /* check against nx_info */
477                 int i, n = nxi->nbipv4;
478
479                 for (i=0; i<n; i++)
480                         if (__addr_in_socket(sk, nxi->ipv4[i]))
481                                 return 1;
482                 return 0;
483         } else {
484                 /* check against any */
485                 return 1;
486         }
487 }
488
489 #endif /* CONFIG_INET */
490
491 void nx_set_persistent(struct nx_info *nxi)
492 {
493         vxdprintk(VXD_CBIT(nid, 6),
494                 "nx_set_persistent(%p[#%d])", nxi, nxi->nx_id);
495
496         get_nx_info(nxi);
497         claim_nx_info(nxi, NULL);
498 }
499
500 void nx_clear_persistent(struct nx_info *nxi)
501 {
502         vxdprintk(VXD_CBIT(nid, 6),
503                 "nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id);
504
505         release_nx_info(nxi, NULL);
506         put_nx_info(nxi);
507 }
508
509 void nx_update_persistent(struct nx_info *nxi)
510 {
511         if (nx_info_flags(nxi, NXF_PERSISTENT, 0))
512                 nx_set_persistent(nxi);
513         else
514                 nx_clear_persistent(nxi);
515 }
516
517 /* vserver syscall commands below here */
518
519 /* taks nid and nx_info functions */
520
521 #include <asm/uaccess.h>
522
523
524 int vc_task_nid(uint32_t id, void __user *data)
525 {
526         nid_t nid;
527
528         if (id) {
529                 struct task_struct *tsk;
530
531                 if (!vx_check(0, VX_ADMIN|VX_WATCH))
532                         return -EPERM;
533
534                 read_lock(&tasklist_lock);
535                 tsk = find_task_by_real_pid(id);
536                 nid = (tsk) ? tsk->nid : -ESRCH;
537                 read_unlock(&tasklist_lock);
538         }
539         else
540                 nid = nx_current_nid();
541         return nid;
542 }
543
544
545 int vc_nx_info(uint32_t id, void __user *data)
546 {
547         struct nx_info *nxi;
548         struct vcmd_nx_info_v0 vc_data;
549
550         if (!vx_check(0, VX_ADMIN))
551                 return -ENOSYS;
552         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RESOURCE))
553                 return -EPERM;
554
555         nxi = lookup_nx_info(id);
556         if (!nxi)
557                 return -ESRCH;
558
559         vc_data.nid = nxi->nx_id;
560         put_nx_info(nxi);
561
562         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
563                 return -EFAULT;
564         return 0;
565 }
566
567
568 /* network functions */
569
570 int vc_net_create(uint32_t nid, void __user *data)
571 {
572         struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET };
573         struct nx_info *new_nxi;
574         int ret;
575
576         if (!capable(CAP_SYS_ADMIN))
577                 return -EPERM;
578         if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
579                 return -EFAULT;
580
581         if ((nid > MAX_S_CONTEXT) && (nid != VX_DYNAMIC_ID))
582                 return -EINVAL;
583         if (nid < 2)
584                 return -EINVAL;
585
586         new_nxi = __create_nx_info(nid);
587         if (IS_ERR(new_nxi))
588                 return PTR_ERR(new_nxi);
589
590         /* initial flags */
591         new_nxi->nx_flags = vc_data.flagword;
592
593         ret = -ENOEXEC;
594         if (vs_net_change(new_nxi, VSC_NETUP))
595                 goto out;
596
597         ret = nx_migrate_task(current, new_nxi);
598         if (ret)
599                 goto out;
600
601         /* return context id on success */
602         ret = new_nxi->nx_id;
603
604         /* get a reference for persistent contexts */
605         if ((vc_data.flagword & NXF_PERSISTENT))
606                 nx_set_persistent(new_nxi);
607 out:
608         release_nx_info(new_nxi, NULL);
609         put_nx_info(new_nxi);
610         return ret;
611 }
612
613
614 int vc_net_migrate(uint32_t id, void __user *data)
615 {
616         struct nx_info *nxi;
617
618         if (!capable(CAP_SYS_ADMIN))
619                 return -EPERM;
620
621         nxi = lookup_nx_info(id);
622         if (!nxi)
623                 return -ESRCH;
624         nx_migrate_task(current, nxi);
625         put_nx_info(nxi);
626         return 0;
627 }
628
629 int vc_net_add(uint32_t nid, void __user *data)
630 {
631         struct vcmd_net_addr_v0 vc_data;
632         struct nx_info *nxi;
633         int index, pos, ret = 0;
634
635         if (!capable(CAP_SYS_ADMIN))
636                 return -EPERM;
637         if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
638                 return -EFAULT;
639
640         switch (vc_data.type) {
641         case NXA_TYPE_IPV4:
642                 if ((vc_data.count < 1) || (vc_data.count > 4))
643                         return -EINVAL;
644                 break;
645
646         default:
647                 break;
648         }
649
650         nxi = lookup_nx_info(nid);
651         if (!nxi)
652                 return -ESRCH;
653
654         switch (vc_data.type) {
655         case NXA_TYPE_IPV4:
656                 index = 0;
657                 while ((index < vc_data.count) &&
658                         ((pos = nxi->nbipv4) < NB_IPV4ROOT)) {
659                         nxi->ipv4[pos] = vc_data.ip[index];
660                         nxi->mask[pos] = vc_data.mask[index];
661                         index++;
662                         nxi->nbipv4++;
663                 }
664                 ret = index;
665                 break;
666
667         case NXA_TYPE_IPV4|NXA_MOD_BCAST:
668                 nxi->v4_bcast = vc_data.ip[0];
669                 ret = 1;
670                 break;
671
672         default:
673                 ret = -EINVAL;
674                 break;
675         }
676
677         put_nx_info(nxi);
678         return ret;
679 }
680
681 int vc_net_remove(uint32_t nid, void __user *data)
682 {
683         struct vcmd_net_addr_v0 vc_data;
684         struct nx_info *nxi;
685         int ret = 0;
686
687         if (!capable(CAP_SYS_ADMIN))
688                 return -EPERM;
689         if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
690                 return -EFAULT;
691
692         nxi = lookup_nx_info(nid);
693         if (!nxi)
694                 return -ESRCH;
695
696         switch (vc_data.type) {
697         case NXA_TYPE_ANY:
698                 nxi->nbipv4 = 0;
699                 break;
700
701         default:
702                 ret = -EINVAL;
703                 break;
704         }
705
706         put_nx_info(nxi);
707         return ret;
708 }
709
710 int vc_get_nflags(uint32_t id, void __user *data)
711 {
712         struct nx_info *nxi;
713         struct vcmd_net_flags_v0 vc_data;
714
715         if (!capable(CAP_SYS_ADMIN))
716                 return -EPERM;
717
718         nxi = lookup_nx_info(id);
719         if (!nxi)
720                 return -ESRCH;
721
722         vc_data.flagword = nxi->nx_flags;
723
724         /* special STATE flag handling */
725         vc_data.mask = vx_mask_flags(~0UL, nxi->nx_flags, NXF_ONE_TIME);
726
727         put_nx_info(nxi);
728
729         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
730                 return -EFAULT;
731         return 0;
732 }
733
734 int vc_set_nflags(uint32_t id, void __user *data)
735 {
736         struct nx_info *nxi;
737         struct vcmd_net_flags_v0 vc_data;
738         uint64_t mask, trigger;
739
740         if (!capable(CAP_SYS_ADMIN))
741                 return -EPERM;
742         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
743                 return -EFAULT;
744
745         nxi = lookup_nx_info(id);
746         if (!nxi)
747                 return -ESRCH;
748
749         /* special STATE flag handling */
750         mask = vx_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME);
751         trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword);
752
753         nxi->nx_flags = vx_mask_flags(nxi->nx_flags,
754                 vc_data.flagword, mask);
755         if (trigger & NXF_PERSISTENT)
756                 nx_update_persistent(nxi);
757
758         put_nx_info(nxi);
759         return 0;
760 }
761
762 int vc_get_ncaps(uint32_t id, void __user *data)
763 {
764         struct nx_info *nxi;
765         struct vcmd_net_caps_v0 vc_data;
766
767         if (!capable(CAP_SYS_ADMIN))
768                 return -EPERM;
769
770         nxi = lookup_nx_info(id);
771         if (!nxi)
772                 return -ESRCH;
773
774         vc_data.ncaps = nxi->nx_ncaps;
775         vc_data.cmask = ~0UL;
776         put_nx_info(nxi);
777
778         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
779                 return -EFAULT;
780         return 0;
781 }
782
783 int vc_set_ncaps(uint32_t id, void __user *data)
784 {
785         struct nx_info *nxi;
786         struct vcmd_net_caps_v0 vc_data;
787
788         if (!capable(CAP_SYS_ADMIN))
789                 return -EPERM;
790         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
791                 return -EFAULT;
792
793         nxi = lookup_nx_info(id);
794         if (!nxi)
795                 return -ESRCH;
796
797         nxi->nx_ncaps = vx_mask_flags(nxi->nx_ncaps,
798                 vc_data.ncaps, vc_data.cmask);
799         put_nx_info(nxi);
800         return 0;
801 }
802
803
804 #include <linux/module.h>
805
806 EXPORT_SYMBOL_GPL(free_nx_info);
807 EXPORT_SYMBOL_GPL(unhash_nx_info);
808