*
* Virtual Server: Network Support
*
- * Copyright (C) 2003-2005 Herbert Pötzl
+ * Copyright (C) 2003-2007 Herbert Pötzl
*
* V0.01 broken out from vcontext V0.05
* V0.02 cleaned up implementation
* V0.03 added equiv nx commands
* V0.04 switch to RCU based hash
* V0.05 and back to locking again
+ * V0.06 changed vcmds to nxi arg
+ * V0.07 have __create claim() the nxi
*
*/
#include <linux/slab.h>
#include <linux/rcupdate.h>
#include <net/tcp.h>
-#include <linux/vserver/network_cmd.h>
#include <asm/errno.h>
+#include <linux/vserver/base.h>
+#include <linux/vserver/network_cmd.h>
+
+
+atomic_t nx_global_ctotal = ATOMIC_INIT(0);
+atomic_t nx_global_cactive = ATOMIC_INIT(0);
/* __alloc_nx_info()
vxdprintk(VXD_CBIT(nid, 0),
"alloc_nx_info(%d) = %p", nid, new);
+ atomic_inc(&nx_global_ctotal);
return new;
}
nxi->nx_state |= NXS_RELEASED;
kfree(nxi);
+ atomic_dec(&nx_global_ctotal);
}
static void __shutdown_nx_info(struct nx_info *nxi)
nxi->nx_state |= NXS_HASHED;
head = &nx_info_hash[__hashval(nxi->nx_id)];
hlist_add_head(&nxi->nx_hlist, head);
+ atomic_inc(&nx_global_cactive);
}
/* __unhash_nx_info()
{
vxd_assert_lock(&nx_info_hash_lock);
vxdprintk(VXD_CBIT(nid, 4),
- "__unhash_nx_info: %p[#%d]", nxi, nxi->nx_id);
+ "__unhash_nx_info: %p[#%d.%d.%d]", nxi, nxi->nx_id,
+ atomic_read(&nxi->nx_usecnt), atomic_read(&nxi->nx_tasks));
/* context must be hashed */
BUG_ON(!nx_info_state(nxi, NXS_HASHED));
+ /* but without tasks */
+ BUG_ON(atomic_read(&nxi->nx_tasks));
nxi->nx_state &= ~NXS_HASHED;
hlist_del(&nxi->nx_hlist);
+ atomic_dec(&nx_global_cactive);
}
/* __create_nx_info()
* create the requested context
- * get() and hash it */
+ * get(), claim() and hash it */
static struct nx_info * __create_nx_info(int id)
{
/* dynamic context requested */
if (id == NX_DYNAMIC_ID) {
+#ifdef CONFIG_VSERVER_DYNAMIC_IDS
id = __nx_dynamic_id();
if (!id) {
printk(KERN_ERR "no dynamic context available.\n");
goto out_unlock;
}
new->nx_id = id;
+#else
+ printk(KERN_ERR "dynamic contexts disabled.\n");
+ nxi = ERR_PTR(-EINVAL);
+ goto out_unlock;
+#endif
}
/* static context requested */
else if ((nxi = __lookup_nx_info(id))) {
/* new context */
vxdprintk(VXD_CBIT(nid, 0),
"create_nx_info(%d) = %p (new)", id, new);
+ claim_nx_info(new, NULL);
__hash_nx_info(get_nx_info(new));
nxi = new, new = NULL;
#ifdef CONFIG_PROC_FS
+/* get_nid_list()
+
+ * get a subset of hashed nids for proc
+ * assumes size is at least one */
+
int get_nid_list(int index, unsigned int *nids, int size)
{
int hindex, nr_nids = 0;
+ /* only show current and children */
+ if (!nx_check(0, VS_ADMIN|VS_WATCH)) {
+ if (index > 0)
+ return 0;
+ nids[nr_nids] = nx_current_nid();
+ return 1;
+ }
+
for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) {
struct hlist_head *head = &nx_info_hash[hindex];
struct hlist_node *pos;
atomic_read(&nxi->nx_usecnt),
atomic_read(&nxi->nx_tasks));
+ if (nx_info_flags(nxi, NXF_INFO_PRIVATE, 0) &&
+ !nx_info_flags(nxi, NXF_STATE_SETUP, 0))
+ return -EACCES;
+
+ if (nx_info_state(nxi, NXS_SHUTDOWN))
+ return -EFAULT;
+
/* maybe disallow this completely? */
old_nxi = task_get_nx_info(p);
if (old_nxi == nxi)
if (old_nxi)
release_nx_info(old_nxi, p);
+ ret = 0;
out:
put_nx_info(old_nxi);
return ret;
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+#include <net/addrconf.h>
+
+int nx_addr6_conflict(struct nx_info *nxi, struct nx_info *nxi2)
+{
+ vxdprintk(VXD_CBIT(net, 2), "nx_addr6_conflict(%u,%u)",
+ nxi ? nxi->nx_id : 0, nxi2 ? nxi2->nx_id : 0);
+
+ if (nxi && nxi2 && nxi->nbipv6 > 0 && nxi2->nbipv6 > 0) {
+ int i = 0;
+ for (i = 0; i < nxi->nbipv6; i++)
+ if (addr6_in_nx_info(nxi2, &(nxi->ipv6[i])))
+ return 1;
+ }
+ return 0;
+}
+
+#endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
+
int ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi)
{
if (!nxi)
return addr_in_nx_info(nxi, ifa->ifa_local);
}
+#ifdef CONFIG_IPV6_MODULE
+
+struct nx_ipv6_mod vc_net_ipv6 = {
+ .dev_in_nx_info6 = NULL,
+ .owner = NULL
+};
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+static atomic_t nx_ipv6mod_lockct = ATOMIC_INIT(0);
+static DEFINE_SPINLOCK(nx_ipv6mod_lock);
+
+/* The strategy is: modifications of vc_net_ipv6 are short, do not
+ sleep and veeery rare, but read access should be free of any exclusive
+ locks. (copied from socket.c)
+ This should prevent any possible issues with module unloading!
+ */
+
+static void nx_ipv6mod_write_lock(void)
+{
+ spin_lock(&nx_ipv6mod_lock);
+ while (atomic_read(&nx_ipv6mod_lockct) != 0) {
+ spin_unlock(&nx_ipv6mod_lock);
+
+ yield();
+
+ spin_lock(&nx_ipv6mod_lock);
+ }
+}
+
+static __inline__ void nx_ipv6mod_write_unlock(void)
+{
+ spin_unlock(&nx_ipv6mod_lock);
+}
+
+static __inline__ void nx_ipv6mod_read_lock(void)
+{
+ atomic_inc(&nx_ipv6mod_lockct);
+ spin_unlock_wait(&nx_ipv6mod_lock);
+}
+
+static __inline__ void nx_ipv6mod_read_unlock(void)
+{
+ atomic_dec(&nx_ipv6mod_lockct);
+}
+
+#else
+#define nx_ipv6mod_write_lock() do { } while(0)
+#define nx_ipv6mod_write_unlock() do { } while(0)
+#define nx_ipv6mod_read_lock() do { } while(0)
+#define nx_ipv6mod_read_unlock() do { } while(0)
+#endif
+
+void vc_net_register_ipv6(struct nx_ipv6_mod *modv6) {
+ nx_ipv6mod_write_lock();
+ memcpy(&vc_net_ipv6, modv6, sizeof(struct nx_ipv6_mod));
+ nx_ipv6mod_write_unlock();
+}
+
+void vc_net_unregister_ipv6() {
+ nx_ipv6mod_write_lock();
+ memset(&vc_net_ipv6, 0, sizeof(struct nx_ipv6_mod));
+ nx_ipv6mod_write_unlock();
+}
+
+inline int dev_in_nx_info6(struct net_device *dev, struct nx_info *nxi) {
+ nx_ipv6mod_read_lock();
+ if (try_module_get(vc_net_ipv6.owner)) {
+ if (vc_net_ipv6.dev_in_nx_info6)
+ return vc_net_ipv6.dev_in_nx_info6(dev, nxi);
+ else
+ return 0;
+ module_put(vc_net_ipv6.owner);
+ } else
+ return 0;
+ nx_ipv6mod_read_unlock();
+}
+#endif
+
int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
{
- struct in_device *in_dev;
- struct in_ifaddr **ifap;
- struct in_ifaddr *ifa;
int ret = 0;
if (!nxi)
return 1;
- in_dev = in_dev_get(dev);
- if (!in_dev)
+ if (!dev)
goto out;
- for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
- ifap = &ifa->ifa_next) {
- if (addr_in_nx_info(nxi, ifa->ifa_local)) {
- ret = 1;
- break;
+ if (nxi->nbipv4 > 0) {
+ struct in_device *in_dev;
+ struct in_ifaddr **ifap;
+ struct in_ifaddr *ifa;
+
+ in_dev = in_dev_get(dev);
+ if (in_dev) {
+ for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+ ifap = &ifa->ifa_next) {
+ if (addr_in_nx_info(nxi, ifa->ifa_local)) {
+ ret = 1;
+ break;
+ }
+ }
+ in_dev_put(in_dev);
}
}
- in_dev_put(in_dev);
+
+#if defined(CONFIG_IPV6_MODULE)
+ if (ret == 0)
+ ret = dev_in_nx_info6(dev, nxi);
+#elif defined(CONFIG_IPV6)
+ if (ret == 0)
+ ret = ipv6_dev_in_nx_info6(dev, nxi);
+#endif
+
out:
return ret;
}
* sk: the socket to check against
* addr: the address in question (must be != 0)
*/
-static inline int __addr_in_socket(struct sock *sk, uint32_t addr)
+static inline int __addr_in_socket(const struct sock *sk, uint32_t addr)
{
struct nx_info *nxi = sk->sk_nx_info;
uint32_t saddr = inet_rcv_saddr(sk);
vxdprintk(VXD_CBIT(net, 5),
- "__addr_in_socket(%p,%d.%d.%d.%d) %p:%d.%d.%d.%d %p;%lx",
- sk, VXD_QUAD(addr), nxi, VXD_QUAD(saddr), sk->sk_socket,
+ "__addr_in_socket(%p," NIPQUAD_FMT ") %p:" NIPQUAD_FMT " %p;%lx",
+ sk, NIPQUAD(addr), nxi, NIPQUAD(saddr), sk->sk_socket,
(sk->sk_socket?sk->sk_socket->flags:0));
if (saddr) {
}
-int nx_addr_conflict(struct nx_info *nxi, uint32_t addr, struct sock *sk)
+int nx_addr_conflict(struct nx_info *nxi, uint32_t addr, const struct sock *sk)
{
vxdprintk(VXD_CBIT(net, 2),
- "nx_addr_conflict(%p,%p) %d.%d,%d.%d",
- nxi, sk, VXD_QUAD(addr));
+ "nx_addr_conflict(%p,%p) " NIPQUAD_FMT,
+ nxi, sk, NIPQUAD(addr));
if (addr) {
/* check real address */
void nx_set_persistent(struct nx_info *nxi)
{
+ vxdprintk(VXD_CBIT(nid, 6),
+ "nx_set_persistent(%p[#%d])", nxi, nxi->nx_id);
+
get_nx_info(nxi);
- claim_nx_info(nxi, current);
+ claim_nx_info(nxi, NULL);
}
void nx_clear_persistent(struct nx_info *nxi)
vxdprintk(VXD_CBIT(nid, 6),
"nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id);
- release_nx_info(nxi, current);
+ release_nx_info(nxi, NULL);
put_nx_info(nxi);
}
if (id) {
struct task_struct *tsk;
- if (!vx_check(0, VX_ADMIN|VX_WATCH))
+ if (!nx_check(0, VS_ADMIN|VS_WATCH))
return -EPERM;
read_lock(&tasklist_lock);
}
-int vc_nx_info(uint32_t id, void __user *data)
+int vc_nx_info(struct nx_info *nxi, void __user *data)
{
- struct nx_info *nxi;
struct vcmd_nx_info_v0 vc_data;
- if (!vx_check(0, VX_ADMIN))
- return -ENOSYS;
- if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RESOURCE))
- return -EPERM;
-
- nxi = lookup_nx_info(id);
- if (!nxi)
- return -ESRCH;
-
vc_data.nid = nxi->nx_id;
- put_nx_info(nxi);
if (copy_to_user (data, &vc_data, sizeof(vc_data)))
return -EFAULT;
struct nx_info *new_nxi;
int ret;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
return -EFAULT;
- if ((nid > MAX_S_CONTEXT) && (nid != VX_DYNAMIC_ID))
+ if ((nid > MAX_S_CONTEXT) && (nid != NX_DYNAMIC_ID))
return -EINVAL;
if (nid < 2)
return -EINVAL;
/* initial flags */
new_nxi->nx_flags = vc_data.flagword;
- /* get a reference for persistent contexts */
- if ((vc_data.flagword & NXF_PERSISTENT))
- nx_set_persistent(new_nxi);
-
ret = -ENOEXEC;
if (vs_net_change(new_nxi, VSC_NETUP))
- goto out_unhash;
+ goto out;
+
ret = nx_migrate_task(current, new_nxi);
- if (!ret) {
- /* return context id on success */
- ret = new_nxi->nx_id;
+ if (ret)
goto out;
- }
-out_unhash:
- /* prepare for context disposal */
- new_nxi->nx_state |= NXS_SHUTDOWN;
+
+ /* return context id on success */
+ ret = new_nxi->nx_id;
+
+ /* get a reference for persistent contexts */
if ((vc_data.flagword & NXF_PERSISTENT))
- nx_clear_persistent(new_nxi);
- __unhash_nx_info(new_nxi);
+ nx_set_persistent(new_nxi);
out:
+ release_nx_info(new_nxi, NULL);
put_nx_info(new_nxi);
return ret;
}
-int vc_net_migrate(uint32_t id, void __user *data)
+int vc_net_migrate(struct nx_info *nxi, void __user *data)
{
- struct nx_info *nxi;
+ return nx_migrate_task(current, nxi);
+}
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+/*
+ * Lookup address/mask pair in list of v4 addresses
+ * Returns position if found, -1 if not found
+ */
+int vc_net_find_v4(const struct nx_info *nxi, uint32_t addr, uint32_t mask)
+{
+ int ret = nxi->nbipv4 - 1;
+ while (ret >= 0) {
+ if (nxi->ipv4[ret] == addr && nxi->mask[ret] == mask)
+ break;
+ else
+ ret--;
+ }
+ return ret;
+}
- nxi = lookup_nx_info(id);
- if (!nxi)
- return -ESRCH;
- nx_migrate_task(current, nxi);
- put_nx_info(nxi);
- return 0;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+/*
+ * Lookup address/prefix pair list of v6 addresses
+ * Returns position if found, -1 if not found
+ */
+int vc_net_find_v6(const struct nx_info *nxi, const struct in6_addr *addr, int prefix)
+{
+ int ret = nxi->nbipv6 - 1;
+ while (ret >= 0) {
+ if (memcmp(&(nxi->ipv6[ret]), addr, sizeof(struct in6_addr)) == 0 && nxi->prefix6[ret] == prefix)
+ break;
+ else
+ ret--;
+ }
+ return ret;
}
+#endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
-int vc_net_add(uint32_t nid, void __user *data)
+int vc_net_add(struct nx_info *nxi, void __user *data)
{
struct vcmd_net_addr_v0 vc_data;
- struct nx_info *nxi;
int index, pos, ret = 0;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
return -EFAULT;
return -EINVAL;
break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case NXA_TYPE_IPV6:
+ /* Note: all 4 items of IP and MASK must be set, but its 1 IPv6 address */
+ if ((vc_data.count != 1))
+ return -EINVAL;
+ break;
+#endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
+
default:
break;
}
- nxi = lookup_nx_info(nid);
- if (!nxi)
- return -ESRCH;
-
switch (vc_data.type) {
case NXA_TYPE_IPV4:
index = 0;
+ ret = 0;
while ((index < vc_data.count) &&
((pos = nxi->nbipv4) < NB_IPV4ROOT)) {
- nxi->ipv4[pos] = vc_data.ip[index];
- nxi->mask[pos] = vc_data.mask[index];
+ if (vc_net_find_v4(nxi, vc_data.ip[index].s_addr, vc_data.mask[index].s_addr) == -1) {
+ /* Only add if address is new */
+ vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIPQUAD_FMT,
+ nxi->nx_id, index, NIPQUAD(vc_data.ip[index].s_addr));
+ nxi->ipv4[pos] = vc_data.ip[index].s_addr;
+ nxi->mask[pos] = vc_data.mask[index].s_addr;
+ nxi->nbipv4++;
+ ret++;
+ } else
+ vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIPQUAD_FMT " EXISTS",
+ nxi->nx_id, index, NIPQUAD(vc_data.ip[index].s_addr));
index++;
- nxi->nbipv4++;
}
ret = index;
break;
case NXA_TYPE_IPV4|NXA_MOD_BCAST:
- nxi->v4_bcast = vc_data.ip[0];
+ nxi->v4_bcast = vc_data.ip[0].s_addr;
ret = 1;
break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case NXA_TYPE_IPV6:
+ index = 0;
+ ret = 0;
+ while (nxi->nbipv6 < NB_IPV6ROOT && index < vc_data.count) {
+ if (vc_net_find_v6(nxi, &vc_data.ip6, vc_data.prefix) == -1) {
+ /* Only add if address is new */
+ vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIP6_FMT,
+ nxi->nx_id, index, NIP6(vc_data.ip6));
+ nxi->ipv6[nxi->nbipv6] = vc_data.ip6;
+ nxi->prefix6[nxi->nbipv6] = vc_data.prefix;
+ nxi->nbipv6++;
+ ret++;
+ } else
+ vxdprintk(VXD_CBIT(nid, 1), "vc_net_add(%d, data[%d]): " NIP6_FMT " EXISTS",
+ nxi->nx_id, index, NIP6(vc_data.ip6));
+ index++;
+ }
+ break;
+#endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
+
default:
ret = -EINVAL;
break;
}
-
- put_nx_info(nxi);
return ret;
}
-int vc_net_remove(uint32_t nid, void __user *data)
+int vc_net_remove(struct nx_info * nxi, void __user *data)
{
struct vcmd_net_addr_v0 vc_data;
- struct nx_info *nxi;
- int ret = 0;
+ int index, pos, ret = 0;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
if (data && copy_from_user (&vc_data, data, sizeof(vc_data)))
return -EFAULT;
- nxi = lookup_nx_info(nid);
- if (!nxi)
- return -ESRCH;
-
switch (vc_data.type) {
case NXA_TYPE_ANY:
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ ret = nxi->nbipv6;
+ nxi->nbipv6 = 0;
+#endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
+ ret += nxi->nbipv4;
nxi->nbipv4 = 0;
break;
+ case NXA_TYPE_IPV4:
+ index = 0;
+ ret = 0;
+ while (index < vc_data.count) {
+ pos = vc_net_find_v4(nxi, vc_data.ip[index].s_addr, vc_data.mask[index].s_addr);
+ if (pos >= 0) {
+ nxi->nbipv4--;
+ ret++;
+ vxdprintk(VXD_CBIT(nid, 1), "vc_net_remove(%d, data[%d]): " NIPQUAD_FMT,
+ nxi->nx_id, index, NIPQUAD(vc_data.ip[index].s_addr));
+ }
+ while (pos >= 0 && pos < nxi->nbipv4) {
+ nxi->ipv4[pos] = nxi->ipv4[pos+1];
+ nxi->mask[pos] = nxi->mask[pos+1];
+ pos++;
+ }
+ index++;
+ }
+ break;
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case NXA_TYPE_IPV6:
+ index = 0;
+ ret = 0;
+ while (index < vc_data.count) {
+ pos = vc_net_find_v6(nxi, &(vc_data.ip6), vc_data.prefix);
+ if (pos >= 0) {
+ nxi->nbipv6--;
+ ret++;
+ vxdprintk(VXD_CBIT(nid, 1), "vc_net_remove(%d, data[%d]): " NIP6_FMT " EXISTS",
+ nxi->nx_id, index, NIP6(vc_data.ip6));
+ }
+ while (pos >= 0 && pos < nxi->nbipv6) {
+ nxi->ipv6[pos] = nxi->ipv6[pos+1];
+ nxi->prefix6[pos] = nxi->prefix6[pos+1];
+ pos++;
+ }
+ index++;
+ }
+ break;
+#endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */
+
default:
ret = -EINVAL;
break;
}
-
- put_nx_info(nxi);
return ret;
}
-int vc_get_nflags(uint32_t id, void __user *data)
+int vc_get_nflags(struct nx_info *nxi, void __user *data)
{
- struct nx_info *nxi;
struct vcmd_net_flags_v0 vc_data;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- nxi = lookup_nx_info(id);
- if (!nxi)
- return -ESRCH;
-
vc_data.flagword = nxi->nx_flags;
/* special STATE flag handling */
- vc_data.mask = vx_mask_flags(~0UL, nxi->nx_flags, NXF_ONE_TIME);
-
- put_nx_info(nxi);
+ vc_data.mask = vs_mask_flags(~0UL, nxi->nx_flags, NXF_ONE_TIME);
if (copy_to_user (data, &vc_data, sizeof(vc_data)))
return -EFAULT;
return 0;
}
-int vc_set_nflags(uint32_t id, void __user *data)
+int vc_set_nflags(struct nx_info *nxi, void __user *data)
{
- struct nx_info *nxi;
struct vcmd_net_flags_v0 vc_data;
uint64_t mask, trigger;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
if (copy_from_user (&vc_data, data, sizeof(vc_data)))
return -EFAULT;
- nxi = lookup_nx_info(id);
- if (!nxi)
- return -ESRCH;
-
/* special STATE flag handling */
- mask = vx_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME);
+ mask = vs_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME);
trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword);
- nxi->nx_flags = vx_mask_flags(nxi->nx_flags,
+ nxi->nx_flags = vs_mask_flags(nxi->nx_flags,
vc_data.flagword, mask);
if (trigger & NXF_PERSISTENT)
nx_update_persistent(nxi);
- put_nx_info(nxi);
return 0;
}
-int vc_get_ncaps(uint32_t id, void __user *data)
+int vc_get_ncaps(struct nx_info *nxi, void __user *data)
{
- struct nx_info *nxi;
struct vcmd_net_caps_v0 vc_data;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- nxi = lookup_nx_info(id);
- if (!nxi)
- return -ESRCH;
-
vc_data.ncaps = nxi->nx_ncaps;
vc_data.cmask = ~0UL;
- put_nx_info(nxi);
if (copy_to_user (data, &vc_data, sizeof(vc_data)))
return -EFAULT;
return 0;
}
-int vc_set_ncaps(uint32_t id, void __user *data)
+int vc_set_ncaps(struct nx_info *nxi, void __user *data)
{
- struct nx_info *nxi;
struct vcmd_net_caps_v0 vc_data;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
if (copy_from_user (&vc_data, data, sizeof(vc_data)))
return -EFAULT;
- nxi = lookup_nx_info(id);
- if (!nxi)
- return -ESRCH;
-
- nxi->nx_ncaps = vx_mask_flags(nxi->nx_ncaps,
+ nxi->nx_ncaps = vs_mask_flags(nxi->nx_ncaps,
vc_data.ncaps, vc_data.cmask);
- put_nx_info(nxi);
return 0;
}
EXPORT_SYMBOL_GPL(free_nx_info);
EXPORT_SYMBOL_GPL(unhash_nx_info);
+#ifdef CONFIG_IPV6_MODULE
+EXPORT_SYMBOL_GPL(nx_addr6_conflict);
+EXPORT_SYMBOL_GPL(vc_net_register_ipv6);
+EXPORT_SYMBOL_GPL(vc_net_unregister_ipv6);
+#elif defined(CONFIG_IPV6)
+EXPORT_SYMBOL_GPL(nx_addr6_conflict);
+#endif /* CONFIG_IPV6 || CONFIG_IPV6_MODULE */