2 * linux/kernel/vserver/network.c
4 * Virtual Server: Network Support
6 * Copyright (C) 2003-2007 Herbert Pƶtzl
8 * V0.01 broken out from vcontext V0.05
9 * V0.02 cleaned up implementation
10 * V0.03 added equiv nx commands
11 * V0.04 switch to RCU based hash
12 * V0.05 and back to locking again
13 * V0.06 changed vcmds to nxi arg
14 * V0.07 have __create claim() the nxi
18 #include <linux/slab.h>
19 #include <linux/rcupdate.h>
22 #include <asm/errno.h>
23 #include <linux/vserver/base.h>
24 #include <linux/vserver/network_cmd.h>
27 atomic_t nx_global_ctotal = ATOMIC_INIT(0);
28 atomic_t nx_global_cactive = ATOMIC_INIT(0);
33 * allocate an initialized nx_info struct
34 * doesn't make it visible (hash) */
36 static struct nx_info *__alloc_nx_info(nid_t nid)
38 struct nx_info *new = NULL;
40 vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid);
42 /* would this benefit from a slab cache? */
43 new = kmalloc(sizeof(struct nx_info), GFP_KERNEL);
47 memset (new, 0, sizeof(struct nx_info));
49 INIT_HLIST_NODE(&new->nx_hlist);
50 atomic_set(&new->nx_usecnt, 0);
51 atomic_set(&new->nx_tasks, 0);
54 new->nx_flags = NXF_INIT_SET;
56 /* rest of init goes here */
58 vxdprintk(VXD_CBIT(nid, 0),
59 "alloc_nx_info(%d) = %p", nid, new);
60 atomic_inc(&nx_global_ctotal);
64 /* __dealloc_nx_info()
66 * final disposal of nx_info */
68 static void __dealloc_nx_info(struct nx_info *nxi)
70 vxdprintk(VXD_CBIT(nid, 0),
71 "dealloc_nx_info(%p)", nxi);
73 nxi->nx_hlist.next = LIST_POISON1;
76 BUG_ON(atomic_read(&nxi->nx_usecnt));
77 BUG_ON(atomic_read(&nxi->nx_tasks));
79 nxi->nx_state |= NXS_RELEASED;
81 atomic_dec(&nx_global_ctotal);
84 static void __shutdown_nx_info(struct nx_info *nxi)
86 nxi->nx_state |= NXS_SHUTDOWN;
87 vs_net_change(nxi, VSC_NETDOWN);
92 void free_nx_info(struct nx_info *nxi)
94 /* context shutdown is mandatory */
95 BUG_ON(nxi->nx_state != NXS_SHUTDOWN);
97 /* context must not be hashed */
98 BUG_ON(nxi->nx_state & NXS_HASHED);
100 BUG_ON(atomic_read(&nxi->nx_usecnt));
101 BUG_ON(atomic_read(&nxi->nx_tasks));
103 __dealloc_nx_info(nxi);
107 /* hash table for nx_info hash */
109 #define NX_HASH_SIZE 13
111 struct hlist_head nx_info_hash[NX_HASH_SIZE];
113 static spinlock_t nx_info_hash_lock = SPIN_LOCK_UNLOCKED;
116 static inline unsigned int __hashval(nid_t nid)
118 return (nid % NX_HASH_SIZE);
125 * add the nxi to the global hash table
126 * requires the hash_lock to be held */
128 static inline void __hash_nx_info(struct nx_info *nxi)
130 struct hlist_head *head;
132 vxd_assert_lock(&nx_info_hash_lock);
133 vxdprintk(VXD_CBIT(nid, 4),
134 "__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
136 /* context must not be hashed */
137 BUG_ON(nx_info_state(nxi, NXS_HASHED));
139 nxi->nx_state |= NXS_HASHED;
140 head = &nx_info_hash[__hashval(nxi->nx_id)];
141 hlist_add_head(&nxi->nx_hlist, head);
142 atomic_inc(&nx_global_cactive);
145 /* __unhash_nx_info()
147 * remove the nxi from the global hash table
148 * requires the hash_lock to be held */
150 static inline void __unhash_nx_info(struct nx_info *nxi)
152 vxd_assert_lock(&nx_info_hash_lock);
153 vxdprintk(VXD_CBIT(nid, 4),
154 "__unhash_nx_info: %p[#%d.%d.%d]", nxi, nxi->nx_id,
155 atomic_read(&nxi->nx_usecnt), atomic_read(&nxi->nx_tasks));
157 /* context must be hashed */
158 BUG_ON(!nx_info_state(nxi, NXS_HASHED));
159 /* but without tasks */
160 BUG_ON(atomic_read(&nxi->nx_tasks));
162 nxi->nx_state &= ~NXS_HASHED;
163 hlist_del(&nxi->nx_hlist);
164 atomic_dec(&nx_global_cactive);
168 /* __lookup_nx_info()
170 * requires the hash_lock to be held
171 * doesn't increment the nx_refcnt */
173 static inline struct nx_info *__lookup_nx_info(nid_t nid)
175 struct hlist_head *head = &nx_info_hash[__hashval(nid)];
176 struct hlist_node *pos;
179 vxd_assert_lock(&nx_info_hash_lock);
180 hlist_for_each(pos, head) {
181 nxi = hlist_entry(pos, struct nx_info, nx_hlist);
183 if (nxi->nx_id == nid)
188 vxdprintk(VXD_CBIT(nid, 0),
189 "__lookup_nx_info(#%u): %p[#%u]",
190 nid, nxi, nxi?nxi->nx_id:0);
197 * find unused dynamic nid
198 * requires the hash_lock to be held */
200 static inline nid_t __nx_dynamic_id(void)
202 static nid_t seq = MAX_N_CONTEXT;
205 vxd_assert_lock(&nx_info_hash_lock);
207 if (++seq > MAX_N_CONTEXT)
209 if (!__lookup_nx_info(seq)) {
210 vxdprintk(VXD_CBIT(nid, 4),
211 "__nx_dynamic_id: [#%d]", seq);
214 } while (barrier != seq);
218 /* __create_nx_info()
220 * create the requested context
221 * get(), claim() and hash it */
223 static struct nx_info * __create_nx_info(int id)
225 struct nx_info *new, *nxi = NULL;
227 vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id);
229 if (!(new = __alloc_nx_info(id)))
230 return ERR_PTR(-ENOMEM);
232 /* required to make dynamic xids unique */
233 spin_lock(&nx_info_hash_lock);
235 /* dynamic context requested */
236 if (id == NX_DYNAMIC_ID) {
237 #ifdef CONFIG_VSERVER_DYNAMIC_IDS
238 id = __nx_dynamic_id();
240 printk(KERN_ERR "no dynamic context available.\n");
241 nxi = ERR_PTR(-EAGAIN);
246 printk(KERN_ERR "dynamic contexts disabled.\n");
247 nxi = ERR_PTR(-EINVAL);
251 /* static context requested */
252 else if ((nxi = __lookup_nx_info(id))) {
253 vxdprintk(VXD_CBIT(nid, 0),
254 "create_nx_info(%d) = %p (already there)", id, nxi);
255 if (nx_info_flags(nxi, NXF_STATE_SETUP, 0))
256 nxi = ERR_PTR(-EBUSY);
258 nxi = ERR_PTR(-EEXIST);
261 /* dynamic nid creation blocker */
262 else if (id >= MIN_D_CONTEXT) {
263 vxdprintk(VXD_CBIT(nid, 0),
264 "create_nx_info(%d) (dynamic rejected)", id);
265 nxi = ERR_PTR(-EINVAL);
270 vxdprintk(VXD_CBIT(nid, 0),
271 "create_nx_info(%d) = %p (new)", id, new);
272 claim_nx_info(new, NULL);
273 __hash_nx_info(get_nx_info(new));
274 nxi = new, new = NULL;
277 spin_unlock(&nx_info_hash_lock);
279 __dealloc_nx_info(new);
288 void unhash_nx_info(struct nx_info *nxi)
290 __shutdown_nx_info(nxi);
291 spin_lock(&nx_info_hash_lock);
292 __unhash_nx_info(nxi);
293 spin_unlock(&nx_info_hash_lock);
296 #ifdef CONFIG_VSERVER_LEGACYNET
298 struct nx_info *create_nx_info(void)
300 return __create_nx_info(NX_DYNAMIC_ID);
307 * search for a nx_info and get() it
308 * negative id means current */
310 struct nx_info *lookup_nx_info(int id)
312 struct nx_info *nxi = NULL;
315 nxi = get_nx_info(current->nx_info);
317 spin_lock(&nx_info_hash_lock);
318 nxi = get_nx_info(__lookup_nx_info(id));
319 spin_unlock(&nx_info_hash_lock);
326 * verify that nid is still hashed */
328 int nid_is_hashed(nid_t nid)
332 spin_lock(&nx_info_hash_lock);
333 hashed = (__lookup_nx_info(nid) != NULL);
334 spin_unlock(&nx_info_hash_lock);
339 #ifdef CONFIG_PROC_FS
343 * get a subset of hashed nids for proc
344 * assumes size is at least one */
346 int get_nid_list(int index, unsigned int *nids, int size)
348 int hindex, nr_nids = 0;
350 /* only show current and children */
351 if (!nx_check(0, VS_ADMIN|VS_WATCH)) {
354 nids[nr_nids] = nx_current_nid();
358 for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) {
359 struct hlist_head *head = &nx_info_hash[hindex];
360 struct hlist_node *pos;
362 spin_lock(&nx_info_hash_lock);
363 hlist_for_each(pos, head) {
369 nxi = hlist_entry(pos, struct nx_info, nx_hlist);
370 nids[nr_nids] = nxi->nx_id;
371 if (++nr_nids >= size) {
372 spin_unlock(&nx_info_hash_lock);
376 /* keep the lock time short */
377 spin_unlock(&nx_info_hash_lock);
386 * migrate task to new network
387 * gets nxi, puts old_nxi on change
390 int nx_migrate_task(struct task_struct *p, struct nx_info *nxi)
392 struct nx_info *old_nxi;
398 vxdprintk(VXD_CBIT(nid, 5),
399 "nx_migrate_task(%p,%p[#%d.%d.%d])",
401 atomic_read(&nxi->nx_usecnt),
402 atomic_read(&nxi->nx_tasks));
404 if (nx_info_flags(nxi, NXF_INFO_PRIVATE, 0) &&
405 !nx_info_flags(nxi, NXF_STATE_SETUP, 0))
408 if (nx_info_state(nxi, NXS_SHUTDOWN))
411 /* maybe disallow this completely? */
412 old_nxi = task_get_nx_info(p);
418 clr_nx_info(&p->nx_info);
419 claim_nx_info(nxi, p);
420 set_nx_info(&p->nx_info, nxi);
424 vxdprintk(VXD_CBIT(nid, 5),
425 "moved task %p into nxi:%p[#%d]",
429 release_nx_info(old_nxi, p);
432 put_nx_info(old_nxi);
439 #include <linux/netdevice.h>
440 #include <linux/inetdevice.h>
443 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
445 #include <net/addrconf.h>
447 int nx_addr6_conflict(struct nx_info *nxi, struct nx_info *nxi2)
449 vxdprintk(VXD_CBIT(net, 2), "nx_addr6_conflict(%u,%u)",
450 nxi ? nxi->nx_id : 0, nxi2 ? nxi2->nx_id : 0);
452 if (nxi && nxi2 && nxi->nbipv6 > 0 && nxi2->nbipv6 > 0) {
454 for (i = 0; i < nxi->nbipv6; i++)
455 if (addr6_in_nx_info(nxi2, &(nxi->ipv6[i])))
461 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
463 int ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi)
469 return addr_in_nx_info(nxi, ifa->ifa_local);
472 #ifdef CONFIG_IPV6_MODULE
474 struct nx_ipv6_mod vc_net_ipv6 = {
475 .dev_in_nx_info6 = NULL,
479 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
480 static atomic_t nx_ipv6mod_lockct = ATOMIC_INIT(0);
481 static DEFINE_SPINLOCK(nx_ipv6mod_lock);
483 /* The strategy is: modifications of vc_net_ipv6 are short, do not
484 sleep and veeery rare, but read access should be free of any exclusive
485 locks. (copied from socket.c)
486 This should prevent any possible issues with module unloading!
489 static void nx_ipv6mod_write_lock(void)
491 spin_lock(&nx_ipv6mod_lock);
492 while (atomic_read(&nx_ipv6mod_lockct) != 0) {
493 spin_unlock(&nx_ipv6mod_lock);
497 spin_lock(&nx_ipv6mod_lock);
501 static __inline__ void nx_ipv6mod_write_unlock(void)
503 spin_unlock(&nx_ipv6mod_lock);
506 static __inline__ void nx_ipv6mod_read_lock(void)
508 atomic_inc(&nx_ipv6mod_lockct);
509 spin_unlock_wait(&nx_ipv6mod_lock);
512 static __inline__ void nx_ipv6mod_read_unlock(void)
514 atomic_dec(&nx_ipv6mod_lockct);
518 #define nx_ipv6mod_write_lock() do { } while(0)
519 #define nx_ipv6mod_write_unlock() do { } while(0)
520 #define nx_ipv6mod_read_lock() do { } while(0)
521 #define nx_ipv6mod_read_unlock() do { } while(0)
524 void vc_net_register_ipv6(struct nx_ipv6_mod *modv6) {
525 nx_ipv6mod_write_lock();
526 memcpy(&vc_net_ipv6, modv6, sizeof(struct nx_ipv6_mod));
527 nx_ipv6mod_write_unlock();
530 void vc_net_unregister_ipv6() {
531 nx_ipv6mod_write_lock();
532 memset(&vc_net_ipv6, 0, sizeof(struct nx_ipv6_mod));
533 nx_ipv6mod_write_unlock();
536 inline int dev_in_nx_info6(struct net_device *dev, struct nx_info *nxi) {
537 nx_ipv6mod_read_lock();
538 if (try_module_get(vc_net_ipv6.owner)) {
539 if (vc_net_ipv6.dev_in_nx_info6)
540 return vc_net_ipv6.dev_in_nx_info6(dev, nxi);
543 module_put(vc_net_ipv6.owner);
546 nx_ipv6mod_read_unlock();
550 int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
560 if (nxi->nbipv4 > 0) {
561 struct in_device *in_dev;
562 struct in_ifaddr **ifap;
563 struct in_ifaddr *ifa;
565 in_dev = in_dev_get(dev);
567 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
568 ifap = &ifa->ifa_next) {
569 if (addr_in_nx_info(nxi, ifa->ifa_local)) {
578 #if defined(CONFIG_IPV6_MODULE)
580 ret = dev_in_nx_info6(dev, nxi);
581 #elif defined(CONFIG_IPV6)
583 ret = ipv6_dev_in_nx_info6(dev, nxi);
591 * check if address is covered by socket
593 * sk: the socket to check against
594 * addr: the address in question (must be != 0)
596 static inline int __addr_in_socket(const struct sock *sk, uint32_t addr)
598 struct nx_info *nxi = sk->sk_nx_info;
599 uint32_t saddr = inet_rcv_saddr(sk);
601 vxdprintk(VXD_CBIT(net, 5),
602 "__addr_in_socket(%p," NIPQUAD_FMT ") %p:" NIPQUAD_FMT " %p;%lx",
603 sk, NIPQUAD(addr), nxi, NIPQUAD(saddr), sk->sk_socket,
604 (sk->sk_socket?sk->sk_socket->flags:0));
607 /* direct address match */
608 return (saddr == addr);
610 /* match against nx_info */
611 return addr_in_nx_info(nxi, addr);
613 /* unrestricted any socket */
619 int nx_addr_conflict(struct nx_info *nxi, uint32_t addr, const struct sock *sk)
621 vxdprintk(VXD_CBIT(net, 2),
622 "nx_addr_conflict(%p,%p) " NIPQUAD_FMT,
623 nxi, sk, NIPQUAD(addr));
626 /* check real address */
627 return __addr_in_socket(sk, addr);
629 /* check against nx_info */
630 int i, n = nxi->nbipv4;
633 if (__addr_in_socket(sk, nxi->ipv4[i]))
637 /* check against any */
642 #endif /* CONFIG_INET */
644 void nx_set_persistent(struct nx_info *nxi)
646 vxdprintk(VXD_CBIT(nid, 6),
647 "nx_set_persistent(%p[#%d])", nxi, nxi->nx_id);
650 claim_nx_info(nxi, NULL);
653 void nx_clear_persistent(struct nx_info *nxi)
655 vxdprintk(VXD_CBIT(nid, 6),
656 "nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id);
658 release_nx_info(nxi, NULL);
662 void nx_update_persistent(struct nx_info *nxi)
664 if (nx_info_flags(nxi, NXF_PERSISTENT, 0))
665 nx_set_persistent(nxi);
667 nx_clear_persistent(nxi);
670 /* vserver syscall commands below here */
672 /* taks nid and nx_info functions */
674 #include <asm/uaccess.h>
677 int vc_task_nid(uint32_t id, void __user *data)
682 struct task_struct *tsk;
684 if (!nx_check(0, VS_ADMIN|VS_WATCH))
687 read_lock(&tasklist_lock);
688 tsk = find_task_by_real_pid(id);
689 nid = (tsk) ? tsk->nid : -ESRCH;
690 read_unlock(&tasklist_lock);
693 nid = nx_current_nid();
698 int vc_nx_info(struct nx_info *nxi, void __user *data)
700 struct vcmd_nx_info_v0 vc_data;
702 vc_data.nid = nxi->nx_id;
704 if (copy_to_user (data, &vc_data, sizeof(vc_data)))
710 /* network functions */
712 int vc_net_create(uint32_t nid, void __user *data)
714 struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET };
715 struct nx_info *new_nxi;
718 if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
721 if ((nid > MAX_S_CONTEXT) && (nid != NX_DYNAMIC_ID))
726 new_nxi = __create_nx_info(nid);
728 return PTR_ERR(new_nxi);
731 new_nxi->nx_flags = vc_data.flagword;
734 if (vs_net_change(new_nxi, VSC_NETUP))
737 ret = nx_migrate_task(current, new_nxi);
741 /* return context id on success */
742 ret = new_nxi->nx_id;
744 /* get a reference for persistent contexts */
745 if ((vc_data.flagword & NXF_PERSISTENT))
746 nx_set_persistent(new_nxi);
748 release_nx_info(new_nxi, NULL);
749 put_nx_info(new_nxi);
754 int vc_net_migrate(struct nx_info *nxi, void __user *data)
756 return nx_migrate_task(current, nxi);
760 * Lookup address/mask pair in list of v4 addresses
761 * Returns position if found, -1 if not found
763 int vc_net_find_v4(const struct nx_info *nxi, uint32_t addr, uint32_t mask)
765 int ret = nxi->nbipv4 - 1;
767 if (nxi->ipv4[ret] == addr && nxi->mask[ret] == mask)
775 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
777 * Lookup address/prefix pair list of v6 addresses
778 * Returns position if found, -1 if not found
780 int vc_net_find_v6(const struct nx_info *nxi, const struct in6_addr *addr, int prefix)
782 int ret = nxi->nbipv6 - 1;
784 if (memcmp(&(nxi->ipv6[ret]), addr, sizeof(struct in6_addr)) == 0 && nxi->prefix6[ret] == prefix)
791 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
793 int vc_net_add(struct nx_info *nxi, void __user *data)
795 struct vcmd_net_addr_v0 vc_data;
796 int index, pos, ret = 0;
798 if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
801 switch (vc_data.type) {
803 if ((vc_data.count < 1) || (vc_data.count > 4))
807 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
809 /* Note: all 4 items of IP and MASK must be set, but its 1 IPv6 address */
810 if ((vc_data.count != 1))
813 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
819 switch (vc_data.type) {
823 while ((index < vc_data.count) &&
824 ((pos = nxi->nbipv4) < NB_IPV4ROOT)) {
825 if (vc_net_find_v4(nxi, vc_data.ip[index].s_addr, vc_data.mask[index].s_addr) == -1) {
826 /* Only add if address is new */
827 vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIPQUAD_FMT,
828 nxi->nx_id, index, NIPQUAD(vc_data.ip[index].s_addr));
829 nxi->ipv4[pos] = vc_data.ip[index].s_addr;
830 nxi->mask[pos] = vc_data.mask[index].s_addr;
834 vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIPQUAD_FMT " EXISTS",
835 nxi->nx_id, index, NIPQUAD(vc_data.ip[index].s_addr));
841 case NXA_TYPE_IPV4|NXA_MOD_BCAST:
842 nxi->v4_bcast = vc_data.ip[0].s_addr;
846 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
850 while (nxi->nbipv6 < NB_IPV6ROOT && index < vc_data.count) {
851 if (vc_net_find_v6(nxi, &vc_data.ip6, vc_data.prefix) == -1) {
852 /* Only add if address is new */
853 vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIP6_FMT,
854 nxi->nx_id, index, NIP6(vc_data.ip6));
855 nxi->ipv6[nxi->nbipv6] = vc_data.ip6;
856 nxi->prefix6[nxi->nbipv6] = vc_data.prefix;
860 vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIP6_FMT " EXISTS",
861 nxi->nx_id, index, NIP6(vc_data.ip6));
865 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
874 int vc_net_remove(struct nx_info * nxi, void __user *data)
876 struct vcmd_net_addr_v0 vc_data;
877 int index, pos, ret = 0;
879 if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
882 switch (vc_data.type) {
884 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
887 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
895 while (index < vc_data.count) {
896 pos = vc_net_find_v4(nxi, vc_data.ip[index].s_addr, vc_data.mask[index].s_addr);
900 vxdprintk(VXD_CBIT(nid, 1), "vc_net_remove(%d, data[%d]): " NIPQUAD_FMT,
901 nxi->nx_id, index, NIPQUAD(vc_data.ip[index].s_addr));
903 while (pos >= 0 && pos < nxi->nbipv4) {
904 nxi->ipv4[pos] = nxi->ipv4[pos+1];
905 nxi->mask[pos] = nxi->mask[pos+1];
912 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
916 while (index < vc_data.count) {
917 pos = vc_net_find_v6(nxi, &(vc_data.ip6), vc_data.prefix);
921 vxdprintk(VXD_CBIT(nid, 1), "vc_net_remove(%d, data[%d]): " NIP6_FMT " EXISTS",
922 nxi->nx_id, index, NIP6(vc_data.ip6));
924 while (pos >= 0 && pos < nxi->nbipv6) {
925 nxi->ipv6[pos] = nxi->ipv6[pos+1];
926 nxi->prefix6[pos] = nxi->prefix6[pos+1];
932 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
941 int vc_get_nflags(struct nx_info *nxi, void __user *data)
943 struct vcmd_net_flags_v0 vc_data;
945 vc_data.flagword = nxi->nx_flags;
947 /* special STATE flag handling */
948 vc_data.mask = vs_mask_flags(~0UL, nxi->nx_flags, NXF_ONE_TIME);
950 if (copy_to_user (data, &vc_data, sizeof(vc_data)))
955 int vc_set_nflags(struct nx_info *nxi, void __user *data)
957 struct vcmd_net_flags_v0 vc_data;
958 uint64_t mask, trigger;
960 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
963 /* special STATE flag handling */
964 mask = vs_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME);
965 trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword);
967 nxi->nx_flags = vs_mask_flags(nxi->nx_flags,
968 vc_data.flagword, mask);
969 if (trigger & NXF_PERSISTENT)
970 nx_update_persistent(nxi);
975 int vc_get_ncaps(struct nx_info *nxi, void __user *data)
977 struct vcmd_net_caps_v0 vc_data;
979 vc_data.ncaps = nxi->nx_ncaps;
980 vc_data.cmask = ~0UL;
982 if (copy_to_user (data, &vc_data, sizeof(vc_data)))
987 int vc_set_ncaps(struct nx_info *nxi, void __user *data)
989 struct vcmd_net_caps_v0 vc_data;
991 if (copy_from_user (&vc_data, data, sizeof(vc_data)))
994 nxi->nx_ncaps = vs_mask_flags(nxi->nx_ncaps,
995 vc_data.ncaps, vc_data.cmask);
1000 #include <linux/module.h>
1002 EXPORT_SYMBOL_GPL(free_nx_info);
1003 EXPORT_SYMBOL_GPL(unhash_nx_info);
1005 #ifdef CONFIG_IPV6_MODULE
1006 EXPORT_SYMBOL_GPL(nx_addr6_conflict);
1007 EXPORT_SYMBOL_GPL(vc_net_register_ipv6);
1008 EXPORT_SYMBOL_GPL(vc_net_unregister_ipv6);
1009 #elif defined(CONFIG_IPV6)
1010 EXPORT_SYMBOL_GPL(nx_addr6_conflict);
1011 #endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */