IPv6 support for VServer
[linux-2.6.git] / kernel / vserver / network.c
1 /*
2  *  linux/kernel/vserver/network.c
3  *
4  *  Virtual Server: Network Support
5  *
6  *  Copyright (C) 2003-2007  Herbert Pƶtzl
7  *
8  *  V0.01  broken out from vcontext V0.05
9  *  V0.02  cleaned up implementation
10  *  V0.03  added equiv nx commands
11  *  V0.04  switch to RCU based hash
12  *  V0.05  and back to locking again
13  *  V0.06  changed vcmds to nxi arg
14  *  V0.07  have __create claim() the nxi
15  *
16  */
17
18 #include <linux/slab.h>
19 #include <linux/rcupdate.h>
20 #include <net/tcp.h>
21
22 #include <asm/errno.h>
23 #include <linux/vserver/base.h>
24 #include <linux/vserver/network_cmd.h>
25
26
27 atomic_t nx_global_ctotal       = ATOMIC_INIT(0);
28 atomic_t nx_global_cactive      = ATOMIC_INIT(0);
29
30
31 /*      __alloc_nx_info()
32
33         * allocate an initialized nx_info struct
34         * doesn't make it visible (hash)                        */
35
36 static struct nx_info *__alloc_nx_info(nid_t nid)
37 {
38         struct nx_info *new = NULL;
39
40         vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid);
41
42         /* would this benefit from a slab cache? */
43         new = kmalloc(sizeof(struct nx_info), GFP_KERNEL);
44         if (!new)
45                 return 0;
46
47         memset (new, 0, sizeof(struct nx_info));
48         new->nx_id = nid;
49         INIT_HLIST_NODE(&new->nx_hlist);
50         atomic_set(&new->nx_usecnt, 0);
51         atomic_set(&new->nx_tasks, 0);
52         new->nx_state = 0;
53
54         new->nx_flags = NXF_INIT_SET;
55
56         /* rest of init goes here */
57
58         vxdprintk(VXD_CBIT(nid, 0),
59                 "alloc_nx_info(%d) = %p", nid, new);
60         atomic_inc(&nx_global_ctotal);
61         return new;
62 }
63
64 /*      __dealloc_nx_info()
65
66         * final disposal of nx_info                             */
67
68 static void __dealloc_nx_info(struct nx_info *nxi)
69 {
70         vxdprintk(VXD_CBIT(nid, 0),
71                 "dealloc_nx_info(%p)", nxi);
72
73         nxi->nx_hlist.next = LIST_POISON1;
74         nxi->nx_id = -1;
75
76         BUG_ON(atomic_read(&nxi->nx_usecnt));
77         BUG_ON(atomic_read(&nxi->nx_tasks));
78
79         nxi->nx_state |= NXS_RELEASED;
80         kfree(nxi);
81         atomic_dec(&nx_global_ctotal);
82 }
83
84 static void __shutdown_nx_info(struct nx_info *nxi)
85 {
86         nxi->nx_state |= NXS_SHUTDOWN;
87         vs_net_change(nxi, VSC_NETDOWN);
88 }
89
90 /*      exported stuff                                          */
91
92 void free_nx_info(struct nx_info *nxi)
93 {
94         /* context shutdown is mandatory */
95         BUG_ON(nxi->nx_state != NXS_SHUTDOWN);
96
97         /* context must not be hashed */
98         BUG_ON(nxi->nx_state & NXS_HASHED);
99
100         BUG_ON(atomic_read(&nxi->nx_usecnt));
101         BUG_ON(atomic_read(&nxi->nx_tasks));
102
103         __dealloc_nx_info(nxi);
104 }
105
106
107 /*      hash table for nx_info hash */
108
109 #define NX_HASH_SIZE    13
110
111 struct hlist_head nx_info_hash[NX_HASH_SIZE];
112
113 static spinlock_t nx_info_hash_lock = SPIN_LOCK_UNLOCKED;
114
115
116 static inline unsigned int __hashval(nid_t nid)
117 {
118         return (nid % NX_HASH_SIZE);
119 }
120
121
122
123 /*      __hash_nx_info()
124
125         * add the nxi to the global hash table
126         * requires the hash_lock to be held                     */
127
128 static inline void __hash_nx_info(struct nx_info *nxi)
129 {
130         struct hlist_head *head;
131
132         vxd_assert_lock(&nx_info_hash_lock);
133         vxdprintk(VXD_CBIT(nid, 4),
134                 "__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
135
136         /* context must not be hashed */
137         BUG_ON(nx_info_state(nxi, NXS_HASHED));
138
139         nxi->nx_state |= NXS_HASHED;
140         head = &nx_info_hash[__hashval(nxi->nx_id)];
141         hlist_add_head(&nxi->nx_hlist, head);
142         atomic_inc(&nx_global_cactive);
143 }
144
145 /*      __unhash_nx_info()
146
147         * remove the nxi from the global hash table
148         * requires the hash_lock to be held                     */
149
150 static inline void __unhash_nx_info(struct nx_info *nxi)
151 {
152         vxd_assert_lock(&nx_info_hash_lock);
153         vxdprintk(VXD_CBIT(nid, 4),
154                 "__unhash_nx_info: %p[#%d.%d.%d]", nxi, nxi->nx_id,
155                 atomic_read(&nxi->nx_usecnt), atomic_read(&nxi->nx_tasks));
156
157         /* context must be hashed */
158         BUG_ON(!nx_info_state(nxi, NXS_HASHED));
159         /* but without tasks */
160         BUG_ON(atomic_read(&nxi->nx_tasks));
161
162         nxi->nx_state &= ~NXS_HASHED;
163         hlist_del(&nxi->nx_hlist);
164         atomic_dec(&nx_global_cactive);
165 }
166
167
168 /*      __lookup_nx_info()
169
170         * requires the hash_lock to be held
171         * doesn't increment the nx_refcnt                       */
172
173 static inline struct nx_info *__lookup_nx_info(nid_t nid)
174 {
175         struct hlist_head *head = &nx_info_hash[__hashval(nid)];
176         struct hlist_node *pos;
177         struct nx_info *nxi;
178
179         vxd_assert_lock(&nx_info_hash_lock);
180         hlist_for_each(pos, head) {
181                 nxi = hlist_entry(pos, struct nx_info, nx_hlist);
182
183                 if (nxi->nx_id == nid)
184                         goto found;
185         }
186         nxi = NULL;
187 found:
188         vxdprintk(VXD_CBIT(nid, 0),
189                 "__lookup_nx_info(#%u): %p[#%u]",
190                 nid, nxi, nxi?nxi->nx_id:0);
191         return nxi;
192 }
193
194
195 /*      __nx_dynamic_id()
196
197         * find unused dynamic nid
198         * requires the hash_lock to be held                     */
199
200 static inline nid_t __nx_dynamic_id(void)
201 {
202         static nid_t seq = MAX_N_CONTEXT;
203         nid_t barrier = seq;
204
205         vxd_assert_lock(&nx_info_hash_lock);
206         do {
207                 if (++seq > MAX_N_CONTEXT)
208                         seq = MIN_D_CONTEXT;
209                 if (!__lookup_nx_info(seq)) {
210                         vxdprintk(VXD_CBIT(nid, 4),
211                                 "__nx_dynamic_id: [#%d]", seq);
212                         return seq;
213                 }
214         } while (barrier != seq);
215         return 0;
216 }
217
218 /*      __create_nx_info()
219
220         * create the requested context
221         * get(), claim() and hash it                            */
222
223 static struct nx_info * __create_nx_info(int id)
224 {
225         struct nx_info *new, *nxi = NULL;
226
227         vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id);
228
229         if (!(new = __alloc_nx_info(id)))
230                 return ERR_PTR(-ENOMEM);
231
232         /* required to make dynamic xids unique */
233         spin_lock(&nx_info_hash_lock);
234
235         /* dynamic context requested */
236         if (id == NX_DYNAMIC_ID) {
237 #ifdef  CONFIG_VSERVER_DYNAMIC_IDS
238                 id = __nx_dynamic_id();
239                 if (!id) {
240                         printk(KERN_ERR "no dynamic context available.\n");
241                         nxi = ERR_PTR(-EAGAIN);
242                         goto out_unlock;
243                 }
244                 new->nx_id = id;
245 #else
246                 printk(KERN_ERR "dynamic contexts disabled.\n");
247                 nxi = ERR_PTR(-EINVAL);
248                 goto out_unlock;
249 #endif
250         }
251         /* static context requested */
252         else if ((nxi = __lookup_nx_info(id))) {
253                 vxdprintk(VXD_CBIT(nid, 0),
254                         "create_nx_info(%d) = %p (already there)", id, nxi);
255                 if (nx_info_flags(nxi, NXF_STATE_SETUP, 0))
256                         nxi = ERR_PTR(-EBUSY);
257                 else
258                         nxi = ERR_PTR(-EEXIST);
259                 goto out_unlock;
260         }
261         /* dynamic nid creation blocker */
262         else if (id >= MIN_D_CONTEXT) {
263                 vxdprintk(VXD_CBIT(nid, 0),
264                         "create_nx_info(%d) (dynamic rejected)", id);
265                 nxi = ERR_PTR(-EINVAL);
266                 goto out_unlock;
267         }
268
269         /* new context */
270         vxdprintk(VXD_CBIT(nid, 0),
271                 "create_nx_info(%d) = %p (new)", id, new);
272         claim_nx_info(new, NULL);
273         __hash_nx_info(get_nx_info(new));
274         nxi = new, new = NULL;
275
276 out_unlock:
277         spin_unlock(&nx_info_hash_lock);
278         if (new)
279                 __dealloc_nx_info(new);
280         return nxi;
281 }
282
283
284
285 /*      exported stuff                                          */
286
287
288 void unhash_nx_info(struct nx_info *nxi)
289 {
290         __shutdown_nx_info(nxi);
291         spin_lock(&nx_info_hash_lock);
292         __unhash_nx_info(nxi);
293         spin_unlock(&nx_info_hash_lock);
294 }
295
296 #ifdef  CONFIG_VSERVER_LEGACYNET
297
298 struct nx_info *create_nx_info(void)
299 {
300         return __create_nx_info(NX_DYNAMIC_ID);
301 }
302
303 #endif
304
305 /*      lookup_nx_info()
306
307         * search for a nx_info and get() it
308         * negative id means current                             */
309
310 struct nx_info *lookup_nx_info(int id)
311 {
312         struct nx_info *nxi = NULL;
313
314         if (id < 0) {
315                 nxi = get_nx_info(current->nx_info);
316         } else if (id > 1) {
317                 spin_lock(&nx_info_hash_lock);
318                 nxi = get_nx_info(__lookup_nx_info(id));
319                 spin_unlock(&nx_info_hash_lock);
320         }
321         return nxi;
322 }
323
324 /*      nid_is_hashed()
325
326         * verify that nid is still hashed                       */
327
328 int nid_is_hashed(nid_t nid)
329 {
330         int hashed;
331
332         spin_lock(&nx_info_hash_lock);
333         hashed = (__lookup_nx_info(nid) != NULL);
334         spin_unlock(&nx_info_hash_lock);
335         return hashed;
336 }
337
338
339 #ifdef  CONFIG_PROC_FS
340
341 /*      get_nid_list()
342
343         * get a subset of hashed nids for proc
344         * assumes size is at least one                          */
345
346 int get_nid_list(int index, unsigned int *nids, int size)
347 {
348         int hindex, nr_nids = 0;
349
350         /* only show current and children */
351         if (!nx_check(0, VS_ADMIN|VS_WATCH)) {
352                 if (index > 0)
353                         return 0;
354                 nids[nr_nids] = nx_current_nid();
355                 return 1;
356         }
357
358         for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) {
359                 struct hlist_head *head = &nx_info_hash[hindex];
360                 struct hlist_node *pos;
361
362                 spin_lock(&nx_info_hash_lock);
363                 hlist_for_each(pos, head) {
364                         struct nx_info *nxi;
365
366                         if (--index > 0)
367                                 continue;
368
369                         nxi = hlist_entry(pos, struct nx_info, nx_hlist);
370                         nids[nr_nids] = nxi->nx_id;
371                         if (++nr_nids >= size) {
372                                 spin_unlock(&nx_info_hash_lock);
373                                 goto out;
374                         }
375                 }
376                 /* keep the lock time short */
377                 spin_unlock(&nx_info_hash_lock);
378         }
379 out:
380         return nr_nids;
381 }
382 #endif
383
384
385 /*
386  *      migrate task to new network
387  *      gets nxi, puts old_nxi on change
388  */
389
390 int nx_migrate_task(struct task_struct *p, struct nx_info *nxi)
391 {
392         struct nx_info *old_nxi;
393         int ret = 0;
394
395         if (!p || !nxi)
396                 BUG();
397
398         vxdprintk(VXD_CBIT(nid, 5),
399                 "nx_migrate_task(%p,%p[#%d.%d.%d])",
400                 p, nxi, nxi->nx_id,
401                 atomic_read(&nxi->nx_usecnt),
402                 atomic_read(&nxi->nx_tasks));
403
404         if (nx_info_flags(nxi, NXF_INFO_PRIVATE, 0) &&
405                 !nx_info_flags(nxi, NXF_STATE_SETUP, 0))
406                 return -EACCES;
407
408         if (nx_info_state(nxi, NXS_SHUTDOWN))
409                 return -EFAULT;
410
411         /* maybe disallow this completely? */
412         old_nxi = task_get_nx_info(p);
413         if (old_nxi == nxi)
414                 goto out;
415
416         task_lock(p);
417         if (old_nxi)
418                 clr_nx_info(&p->nx_info);
419         claim_nx_info(nxi, p);
420         set_nx_info(&p->nx_info, nxi);
421         p->nid = nxi->nx_id;
422         task_unlock(p);
423
424         vxdprintk(VXD_CBIT(nid, 5),
425                 "moved task %p into nxi:%p[#%d]",
426                 p, nxi, nxi->nx_id);
427
428         if (old_nxi)
429                 release_nx_info(old_nxi, p);
430         ret = 0;
431 out:
432         put_nx_info(old_nxi);
433         return ret;
434 }
435
436
437 #ifdef CONFIG_INET
438
439 #include <linux/netdevice.h>
440 #include <linux/inetdevice.h>
441
442
443 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
444
445 #include <net/addrconf.h>
446
447 int nx_addr6_conflict(struct nx_info *nxi, struct nx_info *nxi2)
448 {
449         vxdprintk(VXD_CBIT(net, 2), "nx_addr6_conflict(%u,%u)",
450             nxi ? nxi->nx_id : 0, nxi2 ? nxi2->nx_id : 0);
451
452         if (nxi && nxi2 && nxi->nbipv6 > 0 && nxi2->nbipv6 > 0) {
453                 int i = 0;
454                 for (i = 0; i < nxi->nbipv6; i++)
455                         if (addr6_in_nx_info(nxi2, &(nxi->ipv6[i])))
456                                 return 1;
457         }
458         return 0;
459 }
460
461 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
462
463 int ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi)
464 {
465         if (!nxi)
466                 return 1;
467         if (!ifa)
468                 return 0;
469         return addr_in_nx_info(nxi, ifa->ifa_local);
470 }
471
472 #ifdef CONFIG_IPV6_MODULE
473
474 struct nx_ipv6_mod vc_net_ipv6 = {
475         .dev_in_nx_info6 = NULL,
476         .owner = NULL
477 };
478
479 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
480 static atomic_t nx_ipv6mod_lockct = ATOMIC_INIT(0);
481 static DEFINE_SPINLOCK(nx_ipv6mod_lock);
482
483 /* The strategy is: modifications of vc_net_ipv6 are short, do not
484    sleep and veeery rare, but read access should be free of any exclusive
485    locks. (copied from socket.c)
486    This should prevent any possible issues with module unloading!
487  */
488
489 static void nx_ipv6mod_write_lock(void)
490 {
491         spin_lock(&nx_ipv6mod_lock);
492         while (atomic_read(&nx_ipv6mod_lockct) != 0) {
493                 spin_unlock(&nx_ipv6mod_lock);
494
495                 yield();
496
497                 spin_lock(&nx_ipv6mod_lock);
498         }
499 }
500
501 static __inline__ void nx_ipv6mod_write_unlock(void)
502 {
503         spin_unlock(&nx_ipv6mod_lock);
504 }
505
506 static __inline__ void nx_ipv6mod_read_lock(void)
507 {
508         atomic_inc(&nx_ipv6mod_lockct);
509         spin_unlock_wait(&nx_ipv6mod_lock);
510 }
511
512 static __inline__ void nx_ipv6mod_read_unlock(void)
513 {
514         atomic_dec(&nx_ipv6mod_lockct);
515 }
516
517 #else
518 #define nx_ipv6mod_write_lock() do { } while(0)
519 #define nx_ipv6mod_write_unlock() do { } while(0)
520 #define nx_ipv6mod_read_lock() do { } while(0)
521 #define nx_ipv6mod_read_unlock() do { } while(0)
522 #endif
523
524 void vc_net_register_ipv6(struct nx_ipv6_mod *modv6) {
525         nx_ipv6mod_write_lock();
526         memcpy(&vc_net_ipv6, modv6, sizeof(struct nx_ipv6_mod));
527         nx_ipv6mod_write_unlock();
528 }
529
530 void vc_net_unregister_ipv6() {
531         nx_ipv6mod_write_lock();
532         memset(&vc_net_ipv6, 0, sizeof(struct nx_ipv6_mod));
533         nx_ipv6mod_write_unlock();
534 }
535
536 inline int dev_in_nx_info6(struct net_device *dev, struct nx_info *nxi) {
537         nx_ipv6mod_read_lock();
538         if (try_module_get(vc_net_ipv6.owner)) {
539                 if (vc_net_ipv6.dev_in_nx_info6)
540                         return vc_net_ipv6.dev_in_nx_info6(dev, nxi);
541                 else
542                         return 0;
543                 module_put(vc_net_ipv6.owner);
544         } else
545                 return 0;
546         nx_ipv6mod_read_unlock();
547 }
548 #endif
549
550 int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
551 {
552         int ret = 0;
553
554         if (!nxi)
555                 return 1;
556
557         if (!dev)
558                 goto out;
559
560         if (nxi->nbipv4 > 0) {
561                 struct in_device *in_dev;
562                 struct in_ifaddr **ifap;
563                 struct in_ifaddr *ifa;
564
565                 in_dev = in_dev_get(dev);
566                 if (in_dev) {
567                         for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
568                                         ifap = &ifa->ifa_next) {
569                                 if (addr_in_nx_info(nxi, ifa->ifa_local)) {
570                                         ret = 1;
571                                         break;
572                                 }
573                         }
574                         in_dev_put(in_dev);
575                 }
576         }
577
578 #if defined(CONFIG_IPV6_MODULE)
579         if (ret == 0)
580                 ret = dev_in_nx_info6(dev, nxi);
581 #elif defined(CONFIG_IPV6)
582         if (ret == 0)
583                 ret = ipv6_dev_in_nx_info6(dev, nxi);
584 #endif
585
586 out:
587         return ret;
588 }
589
590 /*
591  *      check if address is covered by socket
592  *
593  *      sk:     the socket to check against
594  *      addr:   the address in question (must be != 0)
595  */
596 static inline int __addr_in_socket(const struct sock *sk, uint32_t addr)
597 {
598         struct nx_info *nxi = sk->sk_nx_info;
599         uint32_t saddr = inet_rcv_saddr(sk);
600
601         vxdprintk(VXD_CBIT(net, 5),
602                 "__addr_in_socket(%p," NIPQUAD_FMT ") %p:" NIPQUAD_FMT " %p;%lx",
603                 sk, NIPQUAD(addr), nxi, NIPQUAD(saddr), sk->sk_socket,
604                 (sk->sk_socket?sk->sk_socket->flags:0));
605
606         if (saddr) {
607                 /* direct address match */
608                 return (saddr == addr);
609         } else if (nxi) {
610                 /* match against nx_info */
611                 return addr_in_nx_info(nxi, addr);
612         } else {
613                 /* unrestricted any socket */
614                 return 1;
615         }
616 }
617
618
619 int nx_addr_conflict(struct nx_info *nxi, uint32_t addr, const struct sock *sk)
620 {
621         vxdprintk(VXD_CBIT(net, 2),
622                 "nx_addr_conflict(%p,%p) " NIPQUAD_FMT,
623                 nxi, sk, NIPQUAD(addr));
624
625         if (addr) {
626                 /* check real address */
627                 return __addr_in_socket(sk, addr);
628         } else if (nxi) {
629                 /* check against nx_info */
630                 int i, n = nxi->nbipv4;
631
632                 for (i=0; i<n; i++)
633                         if (__addr_in_socket(sk, nxi->ipv4[i]))
634                                 return 1;
635                 return 0;
636         } else {
637                 /* check against any */
638                 return 1;
639         }
640 }
641
642 #endif /* CONFIG_INET */
643
644 void nx_set_persistent(struct nx_info *nxi)
645 {
646         vxdprintk(VXD_CBIT(nid, 6),
647                 "nx_set_persistent(%p[#%d])", nxi, nxi->nx_id);
648
649         get_nx_info(nxi);
650         claim_nx_info(nxi, NULL);
651 }
652
653 void nx_clear_persistent(struct nx_info *nxi)
654 {
655         vxdprintk(VXD_CBIT(nid, 6),
656                 "nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id);
657
658         release_nx_info(nxi, NULL);
659         put_nx_info(nxi);
660 }
661
662 void nx_update_persistent(struct nx_info *nxi)
663 {
664         if (nx_info_flags(nxi, NXF_PERSISTENT, 0))
665                 nx_set_persistent(nxi);
666         else
667                 nx_clear_persistent(nxi);
668 }
669
670 /* vserver syscall commands below here */
671
672 /* taks nid and nx_info functions */
673
674 #include <asm/uaccess.h>
675
676
677 int vc_task_nid(uint32_t id, void __user *data)
678 {
679         nid_t nid;
680
681         if (id) {
682                 struct task_struct *tsk;
683
684                 if (!nx_check(0, VS_ADMIN|VS_WATCH))
685                         return -EPERM;
686
687                 read_lock(&tasklist_lock);
688                 tsk = find_task_by_real_pid(id);
689                 nid = (tsk) ? tsk->nid : -ESRCH;
690                 read_unlock(&tasklist_lock);
691         }
692         else
693                 nid = nx_current_nid();
694         return nid;
695 }
696
697
698 int vc_nx_info(struct nx_info *nxi, void __user *data)
699 {
700         struct vcmd_nx_info_v0 vc_data;
701
702         vc_data.nid = nxi->nx_id;
703
704         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
705                 return -EFAULT;
706         return 0;
707 }
708
709
710 /* network functions */
711
712 int vc_net_create(uint32_t nid, void __user *data)
713 {
714         struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET };
715         struct nx_info *new_nxi;
716         int ret;
717
718         if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
719                 return -EFAULT;
720
721         if ((nid > MAX_S_CONTEXT) && (nid != NX_DYNAMIC_ID))
722                 return -EINVAL;
723         if (nid < 2)
724                 return -EINVAL;
725
726         new_nxi = __create_nx_info(nid);
727         if (IS_ERR(new_nxi))
728                 return PTR_ERR(new_nxi);
729
730         /* initial flags */
731         new_nxi->nx_flags = vc_data.flagword;
732
733         ret = -ENOEXEC;
734         if (vs_net_change(new_nxi, VSC_NETUP))
735                 goto out;
736
737         ret = nx_migrate_task(current, new_nxi);
738         if (ret)
739                 goto out;
740
741         /* return context id on success */
742         ret = new_nxi->nx_id;
743
744         /* get a reference for persistent contexts */
745         if ((vc_data.flagword & NXF_PERSISTENT))
746                 nx_set_persistent(new_nxi);
747 out:
748         release_nx_info(new_nxi, NULL);
749         put_nx_info(new_nxi);
750         return ret;
751 }
752
753
754 int vc_net_migrate(struct nx_info *nxi, void __user *data)
755 {
756         return nx_migrate_task(current, nxi);
757 }
758
759 /*
760  * Lookup address/mask pair in list of v4 addresses
761  * Returns position if found, -1 if not found
762  */
763 int vc_net_find_v4(const struct nx_info *nxi, uint32_t addr, uint32_t mask)
764 {
765         int ret = nxi->nbipv4 - 1;
766         while (ret >= 0) {
767                 if (nxi->ipv4[ret] == addr && nxi->mask[ret] == mask)
768                         break;
769                 else
770                         ret--;
771         }
772         return ret;
773 }
774
775 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
776 /*
777  * Lookup address/prefix pair list of v6 addresses
778  * Returns position if found, -1 if not found
779  */
780 int vc_net_find_v6(const struct nx_info *nxi, const struct in6_addr *addr, int prefix)
781 {
782         int ret = nxi->nbipv6 - 1;
783         while (ret >= 0) {
784                 if (memcmp(&(nxi->ipv6[ret]), addr, sizeof(struct in6_addr)) == 0 && nxi->prefix6[ret] == prefix)
785                         break;
786                 else
787                         ret--;
788         }
789         return ret;
790 }
791 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
792
793 int vc_net_add(struct nx_info *nxi, void __user *data)
794 {
795         struct vcmd_net_addr_v0 vc_data;
796         int index, pos, ret = 0;
797
798         if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
799                 return -EFAULT;
800
801         switch (vc_data.type) {
802         case NXA_TYPE_IPV4:
803                 if ((vc_data.count < 1) || (vc_data.count > 4))
804                         return -EINVAL;
805                 break;
806
807 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
808         case NXA_TYPE_IPV6:
809                 /* Note: all 4 items of IP and MASK must be set, but its 1 IPv6 address  */
810                 if ((vc_data.count != 1))
811                         return -EINVAL;
812                 break;
813 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
814
815         default:
816                 break;
817         }
818
819         switch (vc_data.type) {
820         case NXA_TYPE_IPV4:
821                 index = 0;
822                 ret = 0;
823                 while ((index < vc_data.count) &&
824                         ((pos = nxi->nbipv4) < NB_IPV4ROOT)) {
825                         if (vc_net_find_v4(nxi, vc_data.ip[index].s_addr, vc_data.mask[index].s_addr) == -1) {
826                                 /* Only add if address is new */
827                                 vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIPQUAD_FMT,
828                                         nxi->nx_id, index, NIPQUAD(vc_data.ip[index].s_addr));
829                                 nxi->ipv4[pos] = vc_data.ip[index].s_addr;
830                                 nxi->mask[pos] = vc_data.mask[index].s_addr;
831                                 nxi->nbipv4++;
832                                 ret++;
833                         } else
834                                 vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIPQUAD_FMT " EXISTS",
835                                         nxi->nx_id, index, NIPQUAD(vc_data.ip[index].s_addr));
836                         index++;
837                 }
838                 ret = index;
839                 break;
840
841         case NXA_TYPE_IPV4|NXA_MOD_BCAST:
842                 nxi->v4_bcast = vc_data.ip[0].s_addr;
843                 ret = 1;
844                 break;
845
846 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
847         case NXA_TYPE_IPV6:
848                 index = 0;
849                 ret = 0;
850                 while (nxi->nbipv6 < NB_IPV6ROOT && index < vc_data.count) {
851                         if (vc_net_find_v6(nxi, &vc_data.ip6, vc_data.prefix) == -1) {
852                                 /* Only add if address is new */
853                                 vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIP6_FMT,
854                                         nxi->nx_id, index, NIP6(vc_data.ip6));
855                                 nxi->ipv6[nxi->nbipv6] = vc_data.ip6;
856                                 nxi->prefix6[nxi->nbipv6] = vc_data.prefix;
857                                 nxi->nbipv6++;
858                                 ret++;
859                         } else
860                                 vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIP6_FMT " EXISTS",
861                                         nxi->nx_id, index, NIP6(vc_data.ip6));
862                         index++;
863                 }
864                 break;
865 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
866
867         default:
868                 ret = -EINVAL;
869                 break;
870         }
871         return ret;
872 }
873
874 int vc_net_remove(struct nx_info * nxi, void __user *data)
875 {
876         struct vcmd_net_addr_v0 vc_data;
877         int index, pos, ret = 0;
878
879         if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
880                 return -EFAULT;
881
882         switch (vc_data.type) {
883         case NXA_TYPE_ANY:
884 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
885                 ret = nxi->nbipv6;
886                 nxi->nbipv6 = 0;
887 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
888                 ret += nxi->nbipv4;
889                 nxi->nbipv4 = 0;
890                 break;
891
892         case NXA_TYPE_IPV4:
893                 index = 0;
894                 ret = 0;
895                 while (index < vc_data.count) {
896                         pos = vc_net_find_v4(nxi, vc_data.ip[index].s_addr, vc_data.mask[index].s_addr);
897                         if (pos >= 0) {
898                                 nxi->nbipv4--;
899                                 ret++;
900                                 vxdprintk(VXD_CBIT(nid, 1), "vc_net_remove(%d, data[%d]): " NIPQUAD_FMT,
901                                                 nxi->nx_id, index, NIPQUAD(vc_data.ip[index].s_addr));
902                         }
903                         while (pos >= 0 && pos < nxi->nbipv4) {
904                                 nxi->ipv4[pos] = nxi->ipv4[pos+1];
905                                 nxi->mask[pos] = nxi->mask[pos+1];
906                                 pos++;
907                         }
908                         index++;
909                 }
910                 break;
911
912 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
913         case NXA_TYPE_IPV6:
914                 index = 0;
915                 ret = 0;
916                 while (index < vc_data.count) {
917                         pos = vc_net_find_v6(nxi, &(vc_data.ip6), vc_data.prefix);
918                         if (pos >= 0) {
919                                 nxi->nbipv6--;
920                                 ret++;
921                                 vxdprintk(VXD_CBIT(nid, 1), "vc_net_remove(%d, data[%d]): " NIP6_FMT " EXISTS",
922                                                 nxi->nx_id, index, NIP6(vc_data.ip6));
923                         }
924                         while (pos >= 0 && pos < nxi->nbipv6) {
925                                 nxi->ipv6[pos] = nxi->ipv6[pos+1];
926                                 nxi->prefix6[pos] = nxi->prefix6[pos+1];
927                                 pos++;
928                         }
929                         index++;
930                 }
931                 break;
932 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
933
934         default:
935                 ret = -EINVAL;
936                 break;
937         }
938         return ret;
939 }
940
941 int vc_get_nflags(struct nx_info *nxi, void __user *data)
942 {
943         struct vcmd_net_flags_v0 vc_data;
944
945         vc_data.flagword = nxi->nx_flags;
946
947         /* special STATE flag handling */
948         vc_data.mask = vs_mask_flags(~0UL, nxi->nx_flags, NXF_ONE_TIME);
949
950         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
951                 return -EFAULT;
952         return 0;
953 }
954
955 int vc_set_nflags(struct nx_info *nxi, void __user *data)
956 {
957         struct vcmd_net_flags_v0 vc_data;
958         uint64_t mask, trigger;
959
960         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
961                 return -EFAULT;
962
963         /* special STATE flag handling */
964         mask = vs_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME);
965         trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword);
966
967         nxi->nx_flags = vs_mask_flags(nxi->nx_flags,
968                 vc_data.flagword, mask);
969         if (trigger & NXF_PERSISTENT)
970                 nx_update_persistent(nxi);
971
972         return 0;
973 }
974
975 int vc_get_ncaps(struct nx_info *nxi, void __user *data)
976 {
977         struct vcmd_net_caps_v0 vc_data;
978
979         vc_data.ncaps = nxi->nx_ncaps;
980         vc_data.cmask = ~0UL;
981
982         if (copy_to_user (data, &vc_data, sizeof(vc_data)))
983                 return -EFAULT;
984         return 0;
985 }
986
987 int vc_set_ncaps(struct nx_info *nxi, void __user *data)
988 {
989         struct vcmd_net_caps_v0 vc_data;
990
991         if (copy_from_user (&vc_data, data, sizeof(vc_data)))
992                 return -EFAULT;
993
994         nxi->nx_ncaps = vs_mask_flags(nxi->nx_ncaps,
995                 vc_data.ncaps, vc_data.cmask);
996         return 0;
997 }
998
999
1000 #include <linux/module.h>
1001
1002 EXPORT_SYMBOL_GPL(free_nx_info);
1003 EXPORT_SYMBOL_GPL(unhash_nx_info);
1004
1005 #ifdef CONFIG_IPV6_MODULE
1006 EXPORT_SYMBOL_GPL(nx_addr6_conflict);
1007 EXPORT_SYMBOL_GPL(vc_net_register_ipv6);
1008 EXPORT_SYMBOL_GPL(vc_net_unregister_ipv6);
1009 #elif defined(CONFIG_IPV6)
1010 EXPORT_SYMBOL_GPL(nx_addr6_conflict);
1011 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */