#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <linux/jhash.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/etherdevice.h>
+#include <linux/genetlink.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include "actions.h"
#include "flow.h"
#include "loop_counter.h"
-#include "odp-compat.h"
#include "table.h"
#include "vport-internal_dev.h"
int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
EXPORT_SYMBOL(dp_ioctl_hook);
-/* Datapaths. Protected on the read side by rcu_read_lock, on the write side
- * by dp_mutex.
+/**
+ * DOC: Locking:
*
- * dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
- * lock first.
+ * Writes to device state (add/remove datapath, port, set operations on vports,
+ * etc.) are protected by RTNL.
*
- * It is safe to access the datapath and vport structures with just
- * dp_mutex.
+ * Writes to other state (flow table modifications, set miscellaneous datapath
+ * parameters such as drop frags, etc.) are protected by genl_mutex. The RTNL
+ * lock nests inside genl_mutex.
+ *
+ * Reads are protected by RCU.
+ *
+ * There are a few special cases (mostly stats) that have their own
+ * synchronization but they nest under all of above and don't interact with
+ * each other.
*/
+
+/* Protected by genl_mutex. */
static struct datapath __rcu *dps[256];
-static DEFINE_MUTEX(dp_mutex);
static struct vport *new_vport(const struct vport_parms *);
-/* Must be called with rcu_read_lock or dp_mutex. */
+/* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
struct datapath *get_dp(int dp_idx)
{
if (dp_idx < 0 || dp_idx >= ARRAY_SIZE(dps))
return NULL;
+
return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
- lockdep_is_held(&dp_mutex));
+ lockdep_rtnl_is_held() ||
+ lockdep_genl_is_held());
}
EXPORT_SYMBOL_GPL(get_dp);
-static struct datapath *get_dp_locked(int dp_idx)
-{
- struct datapath *dp;
-
- mutex_lock(&dp_mutex);
- dp = get_dp(dp_idx);
- if (dp)
- mutex_lock(&dp->mutex);
- mutex_unlock(&dp_mutex);
- return dp;
-}
-
+/* Must be called with genl_mutex. */
static struct tbl *get_table_protected(struct datapath *dp)
{
- return rcu_dereference_protected(dp->table,
- lockdep_is_held(&dp->mutex));
+ return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
}
+/* Must be called with rcu_read_lock or RTNL lock. */
static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
{
- return rcu_dereference_protected(dp->ports[port_no],
- lockdep_is_held(&dp->mutex));
+ return rcu_dereference_rtnl(dp->ports[port_no]);
}
/* Must be called with rcu_read_lock or RTNL lock. */
+ nla_total_size(1); /* IFLA_OPERSTATE */
}
+/* Caller must hold RTNL lock. */
static int dp_fill_ifinfo(struct sk_buff *skb,
const struct vport *port,
int event, unsigned int flags)
return -EMSGSIZE;
}
+/* Caller must hold RTNL lock. */
static void dp_ifinfo_notify(int event, struct vport *port)
{
struct sk_buff *skb;
static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- int i;
-
- for (i = 0; i < DP_N_QUEUES; i++)
- skb_queue_purge(&dp->queues[i]);
tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
}
-/* Caller must hold RTNL, dp_mutex, and dp->mutex. */
-static void destroy_dp(struct datapath *dp)
-{
- struct vport *p, *n;
-
- list_for_each_entry_safe (p, n, &dp->port_list, node)
- if (p->port_no != ODPP_LOCAL)
- dp_detach_port(p);
-
- dp_sysfs_del_dp(dp);
- rcu_assign_pointer(dps[dp->dp_idx], NULL);
- dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
-
- mutex_unlock(&dp->mutex);
- call_rcu(&dp->rcu, destroy_dp_rcu);
- module_put(THIS_MODULE);
-}
-
-/* Called with RTNL lock and dp->mutex. */
+/* Called with RTNL lock and genl_lock. */
static struct vport *new_vport(const struct vport_parms *parms)
{
struct vport *vport;
- vport_lock();
vport = vport_add(parms);
if (!IS_ERR(vport)) {
struct datapath *dp = parms->dp;
rcu_assign_pointer(dp->ports[parms->port_no], vport);
- list_add_rcu(&vport->node, &dp->port_list);
+ list_add(&vport->node, &dp->port_list);
dp_ifinfo_notify(RTM_NEWLINK, vport);
}
- vport_unlock();
return vport;
}
+/* Called with RTNL lock. */
int dp_detach_port(struct vport *p)
{
- int err;
-
ASSERT_RTNL();
if (p->port_no != ODPP_LOCAL)
dp_ifinfo_notify(RTM_DELLINK, p);
/* First drop references to device. */
- list_del_rcu(&p->node);
+ list_del(&p->node);
rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
/* Then destroy it. */
- vport_lock();
- err = vport_del(p);
- vport_unlock();
-
- return err;
+ return vport_del(p);
}
/* Must be called with rcu_read_lock. */
bool is_frag;
/* Extract flow from 'skb' into 'key'. */
- error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
+ error = flow_extract(skb, p->port_no, &key, &is_frag);
if (unlikely(error)) {
kfree_skb(skb);
return;
if (unlikely(!flow_node)) {
struct dp_upcall_info upcall;
- upcall.type = _ODPL_MISS_NR;
+ upcall.cmd = ODP_PACKET_CMD_MISS;
upcall.key = &key;
upcall.userdata = 0;
upcall.sample_pool = 0;
*(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
}
-/* Append each packet in 'skb' list to 'queue'. There will be only one packet
- * unless we broke up a GSO packet. */
+static struct genl_family dp_packet_genl_family;
+#define PACKET_N_MC_GROUPS 16
+
+static int packet_mc_group(struct datapath *dp, u8 cmd)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
+ return jhash_2words(dp->dp_idx, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
+}
+
+/* Send each packet in the 'skb' list to userspace for 'dp' as directed by
+ * 'upcall_info'. There will be only one packet unless we broke up a GSO
+ * packet.
+ */
static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info)
{
+ u32 group = packet_mc_group(dp, upcall_info->cmd);
struct sk_buff *nskb;
int port_no;
int err;
port_no = ODPP_LOCAL;
do {
- struct odp_packet *upcall;
+ struct odp_header *upcall;
struct sk_buff *user_skb; /* to be queued to userspace */
struct nlattr *nla;
unsigned int len;
nskb = skb->next;
skb->next = NULL;
- len = sizeof(struct odp_packet);
+ len = sizeof(struct odp_header);
len += nla_total_size(4); /* ODP_PACKET_ATTR_TYPE. */
len += nla_total_size(skb->len);
len += nla_total_size(FLOW_BUFSIZE);
if (upcall_info->actions_len)
len += nla_total_size(upcall_info->actions_len);
- user_skb = alloc_skb(len, GFP_ATOMIC);
- if (!user_skb)
+ user_skb = genlmsg_new(len, GFP_ATOMIC);
+ if (!user_skb) {
+ netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
goto err_kfree_skbs;
+ }
- upcall = (struct odp_packet *)__skb_put(user_skb, sizeof(*upcall));
+ upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
upcall->dp_idx = dp->dp_idx;
- nla_put_u32(user_skb, ODP_PACKET_ATTR_TYPE, upcall_info->type);
-
nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
flow_to_nlattrs(upcall_info->key, user_skb);
nla_nest_end(user_skb, nla);
else
skb_copy_bits(skb, 0, nla_data(nla), skb->len);
- upcall->len = user_skb->len;
- skb_queue_tail(&dp->queues[upcall_info->type], user_skb);
+ err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC);
+ if (err)
+ goto err_kfree_skbs;
kfree_skb(skb);
skb = nskb;
return err;
}
+/* Generic Netlink multicast groups for upcalls.
+ *
+ * We really want three unique multicast groups per datapath, but we can't even
+ * get one, because genl_register_mc_group() takes genl_lock, which is also
+ * held during Generic Netlink message processing, so trying to acquire
+ * multicast groups during ODP_DP_NEW processing deadlocks. Instead, we
+ * preallocate a few groups and use them round-robin for datapaths. Collision
+ * isn't fatal--multicast listeners should check that the family is the one
+ * that they want and discard others--but it wastes time and memory to receive
+ * unwanted messages.
+ */
+static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
+
+static struct genl_family dp_packet_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = sizeof(struct odp_header),
+ .name = ODP_PACKET_FAMILY,
+ .version = 1,
+ .maxattr = ODP_PACKET_ATTR_MAX
+};
+
+static int packet_register_mc_groups(void)
+{
+ int i;
+
+ for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
+ struct genl_multicast_group *group = &packet_mc_groups[i];
+ int error;
+
+ sprintf(group->name, "packet%d", i);
+ error = genl_register_mc_group(&dp_packet_genl_family, group);
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
{
struct dp_stats_percpu *stats;
- struct sk_buff_head *queue;
int err;
WARN_ON_ONCE(skb_shared(skb));
- BUG_ON(upcall_info->type >= DP_N_QUEUES);
-
- queue = &dp->queues[upcall_info->type];
- err = -ENOBUFS;
- if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
- goto err_kfree_skb;
forward_ip_summed(skb);
}
}
- err = queue_control_packets(dp, skb, upcall_info);
- wake_up_interruptible(&dp->waitqueue);
- return err;
+ return queue_control_packets(dp, skb, upcall_info);
err_kfree_skb:
kfree_skb(skb);
return err;
}
+/* Called with genl_mutex. */
static int flush_flows(int dp_idx)
{
struct tbl *old_table;
struct tbl *new_table;
struct datapath *dp;
- int err;
- dp = get_dp_locked(dp_idx);
- err = -ENODEV;
+ dp = get_dp(dp_idx);
if (!dp)
- goto exit;
+ return -ENODEV;
old_table = get_table_protected(dp);
new_table = tbl_create(TBL_MIN_BUCKETS);
- err = -ENOMEM;
if (!new_table)
- goto exit_unlock;
+ return -ENOMEM;
rcu_assign_pointer(dp->table, new_table);
tbl_deferred_destroy(old_table, flow_free_tbl);
- err = 0;
-
-exit_unlock:
- mutex_unlock(&dp->mutex);
-exit:
- return err;
+ return 0;
}
static int validate_actions(const struct nlattr *actions, u32 actions_len)
flow->byte_count = 0;
}
+/* Called with genl_mutex. */
static int expand_table(struct datapath *dp)
{
struct tbl *old_table = get_table_protected(dp);
return 0;
}
-static int do_execute(struct datapath *dp, const struct odp_execute *execute)
+static int odp_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
+ struct odp_header *odp_header = info->userhdr;
+ struct nlattr **a = info->attrs;
+ struct sk_buff *packet;
+ unsigned int actions_len;
+ struct nlattr *actions;
struct sw_flow_key key;
- struct sk_buff *skb;
- struct sw_flow_actions *actions;
+ struct datapath *dp;
struct ethhdr *eth;
bool is_frag;
int err;
err = -EINVAL;
- if (execute->length < ETH_HLEN || execute->length > 65535)
- goto error;
-
- actions = flow_actions_alloc(execute->actions_len);
- if (IS_ERR(actions)) {
- err = PTR_ERR(actions);
- goto error;
- }
-
- err = -EFAULT;
- if (copy_from_user(actions->actions,
- (struct nlattr __user __force *)execute->actions, execute->actions_len))
- goto error_free_actions;
+ if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_ACTIONS] ||
+ nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
+ goto exit;
- err = validate_actions(actions->actions, execute->actions_len);
+ actions = nla_data(a[ODP_PACKET_ATTR_ACTIONS]);
+ actions_len = nla_len(a[ODP_PACKET_ATTR_ACTIONS]);
+ err = validate_actions(actions, actions_len);
if (err)
- goto error_free_actions;
+ goto exit;
+ packet = skb_clone(skb, GFP_KERNEL);
err = -ENOMEM;
- skb = alloc_skb(execute->length, GFP_KERNEL);
- if (!skb)
- goto error_free_actions;
-
- err = -EFAULT;
- if (copy_from_user(skb_put(skb, execute->length),
- (const void __user __force *)execute->data,
- execute->length))
- goto error_free_skb;
+ if (!packet)
+ goto exit;
+ packet->data = nla_data(a[ODP_PACKET_ATTR_PACKET]);
+ packet->len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
- skb_reset_mac_header(skb);
- eth = eth_hdr(skb);
+ skb_reset_mac_header(packet);
+ eth = eth_hdr(packet);
/* Normally, setting the skb 'protocol' field would be handled by a
* call to eth_type_trans(), but it assumes there's a sending
* device, which we may not have. */
if (ntohs(eth->h_proto) >= 1536)
- skb->protocol = eth->h_proto;
+ packet->protocol = eth->h_proto;
else
- skb->protocol = htons(ETH_P_802_2);
+ packet->protocol = htons(ETH_P_802_2);
- err = flow_extract(skb, -1, &key, &is_frag);
+ err = flow_extract(packet, -1, &key, &is_frag);
if (err)
- goto error_free_skb;
+ goto exit;
rcu_read_lock();
- err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
+ dp = get_dp(odp_header->dp_idx);
+ err = -ENODEV;
+ if (dp)
+ err = execute_actions(dp, packet, &key, actions, actions_len);
rcu_read_unlock();
- kfree(actions);
- return err;
-
-error_free_skb:
- kfree_skb(skb);
-error_free_actions:
- kfree(actions);
-error:
+exit:
return err;
}
-static int execute_packet(const struct odp_execute __user *executep)
-{
- struct odp_execute execute;
- struct datapath *dp;
- int error;
-
- if (copy_from_user(&execute, executep, sizeof(execute)))
- return -EFAULT;
-
- dp = get_dp_locked(execute.dp_idx);
- if (!dp)
- return -ENODEV;
- error = do_execute(dp, &execute);
- mutex_unlock(&dp->mutex);
+static const struct nla_policy packet_policy[ODP_PACKET_ATTR_MAX + 1] = {
+ [ODP_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
+ [ODP_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+};
- return error;
-}
+static struct genl_ops dp_packet_genl_ops[] = {
+ { .cmd = ODP_PACKET_CMD_EXECUTE,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = packet_policy,
+ .doit = odp_packet_cmd_execute
+ }
+};
static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
{
}
}
-/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
+/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
+ * Called with RTNL lock.
+ */
int dp_min_mtu(const struct datapath *dp)
{
struct vport *p;
ASSERT_RTNL();
- list_for_each_entry_rcu (p, &dp->port_list, node) {
+ list_for_each_entry (p, &dp->port_list, node) {
int dev_mtu;
/* Skip any internal ports, since that's what we're trying to
return mtu ? mtu : ETH_DATA_LEN;
}
-/* Sets the MTU of all datapath devices to the minimum of the ports. Must
- * be called with RTNL lock. */
+/* Sets the MTU of all datapath devices to the minimum of the ports
+ * Called with RTNL lock.
+ */
void set_internal_devs_mtu(const struct datapath *dp)
{
struct vport *p;
mtu = dp_min_mtu(dp);
- list_for_each_entry_rcu (p, &dp->port_list, node) {
+ list_for_each_entry (p, &dp->port_list, node) {
if (is_internal_vport(p))
vport_set_mtu(p, mtu);
}
}
-static int get_listen_mask(const struct file *f)
-{
- return (long)f->private_data;
-}
-
-static void set_listen_mask(struct file *f, int listen_mask)
-{
- f->private_data = (void*)(long)listen_mask;
-}
-
static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
[ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
[ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
[ODP_FLOW_ATTR_STATE] = { .type = NLA_U64 },
};
+
static int copy_flow_to_user(struct odp_flow __user *dst, struct datapath *dp,
struct sw_flow *flow, u32 total_len, u64 state)
{
int err;
sf_acts = rcu_dereference_protected(flow->sf_acts,
- lockdep_is_held(&dp->mutex));
+ lockdep_genl_is_held());
skb = alloc_skb(128 + FLOW_BUFSIZE + sf_acts->actions_len, GFP_KERNEL);
err = -ENOMEM;
if (!skb)
goto exit;
- rcu_read_lock();
odp_flow = (struct odp_flow*)__skb_put(skb, sizeof(struct odp_flow));
odp_flow->dp_idx = dp->dp_idx;
odp_flow->total_len = total_len;
goto nla_put_failure;
err = flow_to_nlattrs(&flow->key, skb);
if (err)
- goto exit_unlock;
+ goto exit_free;
nla_nest_end(skb, nla);
nla = nla_nest_start(skb, ODP_FLOW_ATTR_ACTIONS);
odp_flow->len = skb->len;
err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
- goto exit_unlock;
+ goto exit_free;
nla_put_failure:
err = -EMSGSIZE;
-exit_unlock:
- rcu_read_unlock();
+exit_free:
kfree_skb(skb);
exit:
return err;
}
+/* Called with genl_mutex. */
static struct sk_buff *copy_flow_from_user(struct odp_flow __user *uodp_flow,
struct dp_flowcmd *flowcmd)
{
if (IS_ERR(skb))
goto exit;
- dp = get_dp_locked(flowcmd.dp_idx);
+ dp = get_dp(flowcmd.dp_idx);
error = -ENODEV;
if (!dp)
- goto error_kfree_skb;
+ goto exit;
hash = flow_hash(&flowcmd.key);
table = get_table_protected(dp);
/* Bail out if we're not allowed to create a new flow. */
error = -ENOENT;
if (cmd == ODP_FLOW_SET)
- goto error_unlock_dp;
+ goto exit;
/* Expand table, if necessary, to make room. */
if (tbl_count(table) >= tbl_n_buckets(table)) {
error = expand_table(dp);
if (error)
- goto error_unlock_dp;
+ goto exit;
table = get_table_protected(dp);
}
flow = flow_alloc();
if (IS_ERR(flow)) {
error = PTR_ERR(flow);
- goto error_unlock_dp;
+ goto exit;
}
flow->key = flowcmd.key;
clear_stats(flow);
/* Update actions. */
flow = flow_cast(flow_node);
old_acts = rcu_dereference_protected(flow->sf_acts,
- lockdep_is_held(&dp->mutex));
+ lockdep_genl_is_held());
if (flowcmd.actions &&
(old_acts->actions_len != flowcmd.actions_len ||
memcmp(old_acts->actions, flowcmd.actions,
}
}
kfree_skb(skb);
- mutex_unlock(&dp->mutex);
return 0;
error_free_flow:
flow_put(flow);
-error_unlock_dp:
- mutex_unlock(&dp->mutex);
error_kfree_skb:
kfree_skb(skb);
exit:
int err;
skb = copy_flow_from_user(uodp_flow, &flowcmd);
- err = PTR_ERR(skb);
if (IS_ERR(skb))
- goto exit;
+ return PTR_ERR(skb);
- dp = get_dp_locked(flowcmd.dp_idx);
- err = -ENODEV;
+ dp = get_dp(flowcmd.dp_idx);
if (!dp)
- goto exit_kfree_skb;
+ return -ENODEV;
table = get_table_protected(dp);
flow_node = tbl_lookup(table, &flowcmd.key, flow_hash(&flowcmd.key), flow_cmp);
- err = -ENOENT;
if (!flow_node)
- goto exit_unlock_dp;
+ return -ENOENT;
if (cmd == ODP_FLOW_DEL) {
err = tbl_remove(table, flow_node);
if (err)
- goto exit_unlock_dp;
+ return err;
}
flow = flow_cast(flow_node);
if (!err && cmd == ODP_FLOW_DEL)
flow_deferred_free(flow);
-exit_unlock_dp:
- mutex_unlock(&dp->mutex);
-exit_kfree_skb:
- kfree_skb(skb);
-exit:
return err;
}
if (IS_ERR(skb))
goto exit;
- dp = get_dp_locked(flowcmd.dp_idx);
+ dp = get_dp(flowcmd.dp_idx);
err = -ENODEV;
if (!dp)
- goto exit_free;
+ goto exit_kfree_skb;
bucket = flowcmd.state >> 32;
obj = flowcmd.state;
- flow_node = tbl_next(dp->table, &bucket, &obj);
+ flow_node = tbl_next(get_table_protected(dp), &bucket, &obj);
err = -ENODEV;
if (!flow_node)
- goto exit_unlock_dp;
+ goto exit_kfree_skb;
flow = flow_cast(flow_node);
err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len,
((u64)bucket << 32) | obj);
-exit_unlock_dp:
- mutex_unlock(&dp->mutex);
-exit_free:
+exit_kfree_skb:
kfree_skb(skb);
exit:
return err;
}
static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
+#ifdef HAVE_NLA_NUL_STRING
[ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+#endif
[ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
[ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
};
-static int copy_datapath_to_user(void __user *dst, struct datapath *dp, uint32_t total_len)
+static struct genl_family dp_datapath_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = sizeof(struct odp_header),
+ .name = ODP_DATAPATH_FAMILY,
+ .version = 1,
+ .maxattr = ODP_DP_ATTR_MAX
+};
+
+static struct genl_multicast_group dp_datapath_multicast_group = {
+ .name = ODP_DATAPATH_MCGROUP
+};
+
+static int odp_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
+ u32 pid, u32 seq, u32 flags, u8 cmd)
{
- struct odp_datapath *odp_datapath;
- struct sk_buff *skb;
+ struct odp_header *odp_header;
struct nlattr *nla;
int err;
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
- err = -ENOMEM;
- if (!skb)
- goto exit;
+ odp_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
+ flags, cmd);
+ if (!odp_header)
+ goto error;
- odp_datapath = (struct odp_datapath*)__skb_put(skb, sizeof(struct odp_datapath));
- odp_datapath->dp_idx = dp->dp_idx;
- odp_datapath->total_len = total_len;
+ odp_header->dp_idx = dp->dp_idx;
rcu_read_lock();
err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
if (dp->sflow_probability)
NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
- if (skb->len > total_len)
+ nla = nla_nest_start(skb, ODP_DP_ATTR_MCGROUPS);
+ if (!nla)
goto nla_put_failure;
+ NLA_PUT_U32(skb, ODP_PACKET_CMD_MISS, packet_mc_group(dp, ODP_PACKET_CMD_MISS));
+ NLA_PUT_U32(skb, ODP_PACKET_CMD_ACTION, packet_mc_group(dp, ODP_PACKET_CMD_ACTION));
+ NLA_PUT_U32(skb, ODP_PACKET_CMD_SAMPLE, packet_mc_group(dp, ODP_PACKET_CMD_SAMPLE));
+ nla_nest_end(skb, nla);
- odp_datapath->len = skb->len;
- err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
- goto exit_free_skb;
+ return genlmsg_end(skb, odp_header);
nla_put_failure:
- err = -EMSGSIZE;
-exit_free_skb:
- kfree_skb(skb);
-exit:
- return err;
+ genlmsg_cancel(skb, odp_header);
+error:
+ return -EMSGSIZE;
}
-static struct sk_buff *copy_datapath_from_user(struct odp_datapath __user *uodp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
+static struct sk_buff *odp_dp_cmd_build_info(struct datapath *dp, u32 pid,
+ u32 seq, u8 cmd)
{
- struct odp_datapath *odp_datapath;
struct sk_buff *skb;
- u32 len;
- int err;
-
- if (get_user(len, &uodp_datapath->len))
- return ERR_PTR(-EFAULT);
- if (len < sizeof(struct odp_datapath))
- return ERR_PTR(-EINVAL);
+ int retval;
- skb = alloc_skb(len, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return ERR_PTR(-ENOMEM);
- err = -EFAULT;
- if (copy_from_user(__skb_put(skb, len), uodp_datapath, len))
- goto error_free_skb;
-
- odp_datapath = (struct odp_datapath *)skb->data;
- err = -EINVAL;
- if (odp_datapath->len != len)
- goto error_free_skb;
-
- err = nla_parse(a, ODP_DP_ATTR_MAX,
- (struct nlattr *)(skb->data + sizeof(struct odp_datapath)),
- skb->len - sizeof(struct odp_datapath), datapath_policy);
- if (err)
- goto error_free_skb;
+ retval = odp_dp_cmd_fill_info(dp, skb, pid, seq, 0, cmd);
+ if (retval < 0) {
+ kfree_skb(skb);
+ return ERR_PTR(retval);
+ }
+ return skb;
+}
+static int odp_dp_cmd_validate(struct nlattr *a[ODP_DP_ATTR_MAX + 1])
+{
if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
- err = -EINVAL;
if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
- goto error_free_skb;
+ return -EINVAL;
}
- err = VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
- if (err)
- goto error_free_skb;
-
- return skb;
-
-error_free_skb:
- kfree_skb(skb);
- return ERR_PTR(err);
+ return VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
}
-/* Called with dp_mutex and optionally with RTNL lock also.
- * Holds the returned datapath's mutex on return.
- */
-static struct datapath *lookup_datapath(struct odp_datapath *odp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
+/* Called with genl_mutex and optionally with RTNL lock also. */
+static struct datapath *lookup_datapath(struct odp_header *odp_header, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
{
- WARN_ON_ONCE(!mutex_is_locked(&dp_mutex));
-
if (!a[ODP_DP_ATTR_NAME]) {
- struct datapath *dp;
-
- dp = get_dp(odp_datapath->dp_idx);
+ struct datapath *dp = get_dp(odp_header->dp_idx);
if (!dp)
return ERR_PTR(-ENODEV);
- mutex_lock(&dp->mutex);
return dp;
} else {
- struct datapath *dp;
struct vport *vport;
int dp_idx;
- vport_lock();
+ rcu_read_lock();
vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
dp_idx = vport && vport->port_no == ODPP_LOCAL ? vport->dp->dp_idx : -1;
- vport_unlock();
+ rcu_read_unlock();
if (dp_idx < 0)
return ERR_PTR(-ENODEV);
-
- dp = get_dp(dp_idx);
- mutex_lock(&dp->mutex);
- return dp;
+ return vport->dp;
}
}
+/* Called with genl_mutex. */
static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
{
if (a[ODP_DP_ATTR_IPV4_FRAGS])
dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
}
-static int new_datapath(struct odp_datapath __user *uodp_datapath)
+static int odp_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *a[ODP_DP_ATTR_MAX + 1];
- struct odp_datapath *odp_datapath;
+ struct nlattr **a = info->attrs;
+ struct odp_header *odp_header = info->userhdr;
struct vport_parms parms;
- struct sk_buff *skb;
+ struct sk_buff *reply;
struct datapath *dp;
struct vport *vport;
int dp_idx;
int err;
- int i;
-
- skb = copy_datapath_from_user(uodp_datapath, a);
- err = PTR_ERR(skb);
- if (IS_ERR(skb))
- goto err;
- odp_datapath = (struct odp_datapath *)skb->data;
err = -EINVAL;
if (!a[ODP_DP_ATTR_NAME])
- goto err_free_skb;
+ goto err;
+
+ err = odp_dp_cmd_validate(a);
+ if (err)
+ goto err;
rtnl_lock();
- mutex_lock(&dp_mutex);
err = -ENODEV;
if (!try_module_get(THIS_MODULE))
- goto err_unlock_dp_mutex;
+ goto err_unlock_rtnl;
- dp_idx = odp_datapath->dp_idx;
+ dp_idx = odp_header->dp_idx;
if (dp_idx < 0) {
err = -EFBIG;
for (dp_idx = 0; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
if (dp == NULL)
goto err_put_module;
INIT_LIST_HEAD(&dp->port_list);
- mutex_init(&dp->mutex);
- mutex_lock(&dp->mutex);
dp->dp_idx = dp_idx;
- for (i = 0; i < DP_N_QUEUES; i++)
- skb_queue_head_init(&dp->queues[i]);
- init_waitqueue_head(&dp->waitqueue);
/* Initialize kobject for bridge. This will be added as
* /sys/class/net/<devname>/brif later, if sysfs is enabled. */
change_datapath(dp, a);
+ reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
+ err = PTR_ERR(reply);
+ if (IS_ERR(reply))
+ goto err_destroy_local_port;
+
rcu_assign_pointer(dps[dp_idx], dp);
dp_sysfs_add_dp(dp);
- mutex_unlock(&dp->mutex);
- mutex_unlock(&dp_mutex);
rtnl_unlock();
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
return 0;
err_destroy_local_port:
err_destroy_table:
tbl_destroy(get_table_protected(dp), NULL);
err_free_dp:
- mutex_unlock(&dp->mutex);
kfree(dp);
err_put_module:
module_put(THIS_MODULE);
-err_unlock_dp_mutex:
- mutex_unlock(&dp_mutex);
+err_unlock_rtnl:
rtnl_unlock();
-err_free_skb:
- kfree_skb(skb);
err:
return err;
}
-static int del_datapath(struct odp_datapath __user *uodp_datapath)
+static int odp_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+ struct vport *vport, *next_vport;
+ struct sk_buff *reply;
struct datapath *dp;
- struct sk_buff *skb;
int err;
- skb = copy_datapath_from_user(uodp_datapath, a);
- err = PTR_ERR(skb);
- if (IS_ERR(skb))
+ err = odp_dp_cmd_validate(info->attrs);
+ if (err)
goto exit;
rtnl_lock();
- mutex_lock(&dp_mutex);
- dp = lookup_datapath((struct odp_datapath *)skb->data, a);
+ dp = lookup_datapath(info->userhdr, info->attrs);
err = PTR_ERR(dp);
if (IS_ERR(dp))
- goto exit_free;
+ goto exit_unlock;
+
+ reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_DEL);
+ err = PTR_ERR(reply);
+ if (IS_ERR(reply))
+ goto exit_unlock;
+
+ list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
+ if (vport->port_no != ODPP_LOCAL)
+ dp_detach_port(vport);
- destroy_dp(dp);
+ dp_sysfs_del_dp(dp);
+ rcu_assign_pointer(dps[dp->dp_idx], NULL);
+ dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
+
+ call_rcu(&dp->rcu, destroy_dp_rcu);
+ module_put(THIS_MODULE);
+
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
err = 0;
-exit_free:
- kfree_skb(skb);
- mutex_unlock(&dp_mutex);
+exit_unlock:
rtnl_unlock();
exit:
return err;
}
-static int set_datapath(struct odp_datapath __user *uodp_datapath)
+static int odp_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+ struct sk_buff *reply;
struct datapath *dp;
- struct sk_buff *skb;
int err;
- skb = copy_datapath_from_user(uodp_datapath, a);
- err = PTR_ERR(skb);
- if (IS_ERR(skb))
- goto exit;
+ err = odp_dp_cmd_validate(info->attrs);
+ if (err)
+ return err;
- mutex_lock(&dp_mutex);
- dp = lookup_datapath((struct odp_datapath *)skb->data, a);
- err = PTR_ERR(dp);
+ dp = lookup_datapath(info->userhdr, info->attrs);
if (IS_ERR(dp))
- goto exit_free;
+ return PTR_ERR(dp);
- change_datapath(dp, a);
- mutex_unlock(&dp->mutex);
- err = 0;
+ change_datapath(dp, info->attrs);
-exit_free:
- kfree_skb(skb);
- mutex_unlock(&dp_mutex);
-exit:
- return err;
+ reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
+ if (IS_ERR(reply)) {
+ err = PTR_ERR(reply);
+ netlink_set_err(INIT_NET_GENL_SOCK, 0,
+ dp_datapath_multicast_group.id, err);
+ return 0;
+ }
+
+ genl_notify(reply, genl_info_net(info), info->snd_pid,
+ dp_datapath_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ return 0;
}
-static int get_datapath(struct odp_datapath __user *uodp_datapath)
+static int odp_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *a[ODP_DP_ATTR_MAX + 1];
- struct odp_datapath *odp_datapath;
+ struct sk_buff *reply;
struct datapath *dp;
- struct sk_buff *skb;
int err;
- skb = copy_datapath_from_user(uodp_datapath, a);
- err = PTR_ERR(skb);
- if (IS_ERR(skb))
- goto exit;
- odp_datapath = (struct odp_datapath *)skb->data;
-
- mutex_lock(&dp_mutex);
- dp = lookup_datapath(odp_datapath, a);
- mutex_unlock(&dp_mutex);
+ err = odp_dp_cmd_validate(info->attrs);
+ if (err)
+ return err;
- err = PTR_ERR(dp);
+ dp = lookup_datapath(info->userhdr, info->attrs);
if (IS_ERR(dp))
- goto exit_free;
+ return PTR_ERR(dp);
- err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
- mutex_unlock(&dp->mutex);
-exit_free:
- kfree_skb(skb);
-exit:
- return err;
+ reply = odp_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, ODP_DP_CMD_NEW);
+ if (IS_ERR(reply))
+ return PTR_ERR(reply);
+
+ return genlmsg_reply(reply, info);
}
-static int dump_datapath(struct odp_datapath __user *uodp_datapath)
+static int odp_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct nlattr *a[ODP_DP_ATTR_MAX + 1];
- struct odp_datapath *odp_datapath;
- struct sk_buff *skb;
u32 dp_idx;
- int err;
-
- skb = copy_datapath_from_user(uodp_datapath, a);
- err = PTR_ERR(skb);
- if (IS_ERR(skb))
- goto exit;
- odp_datapath = (struct odp_datapath *)skb->data;
- mutex_lock(&dp_mutex);
- for (dp_idx = odp_datapath->dp_idx; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
+ for (dp_idx = cb->args[0]; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
struct datapath *dp = get_dp(dp_idx);
if (!dp)
continue;
-
- mutex_lock(&dp->mutex);
- mutex_unlock(&dp_mutex);
- err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
- mutex_unlock(&dp->mutex);
- goto exit_free;
+ if (odp_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ ODP_DP_CMD_NEW) < 0)
+ break;
}
- mutex_unlock(&dp_mutex);
- err = -ENODEV;
-exit_free:
- kfree_skb(skb);
-exit:
- return err;
+ cb->args[0] = dp_idx;
+ return skb->len;
}
+static struct genl_ops dp_datapath_genl_ops[] = {
+ { .cmd = ODP_DP_CMD_NEW,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = datapath_policy,
+ .doit = odp_dp_cmd_new
+ },
+ { .cmd = ODP_DP_CMD_DEL,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = datapath_policy,
+ .doit = odp_dp_cmd_del
+ },
+ { .cmd = ODP_DP_CMD_GET,
+ .flags = 0, /* OK for unprivileged users. */
+ .policy = datapath_policy,
+ .doit = odp_dp_cmd_get,
+ .dumpit = odp_dp_cmd_dump
+ },
+ { .cmd = ODP_DP_CMD_SET,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = datapath_policy,
+ .doit = odp_dp_cmd_set,
+ },
+};
+
static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
[ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
[ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
[ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
};
-static int copy_vport_to_user(void __user *dst, struct vport *vport, uint32_t total_len)
+/* Called with RCU read lock. */
+static struct sk_buff *odp_vport_build_info(struct vport *vport, uint32_t total_len)
{
struct odp_vport *odp_vport;
struct sk_buff *skb;
int ifindex, iflink;
int err;
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
err = -ENOMEM;
if (!skb)
- goto exit;
+ goto err;
- rcu_read_lock();
odp_vport = (struct odp_vport*)__skb_put(skb, sizeof(struct odp_vport));
odp_vport->dp_idx = vport->dp->dp_idx;
odp_vport->total_len = total_len;
err = -EMSGSIZE;
if (skb->len > total_len)
- goto exit_unlock;
+ goto err_free;
odp_vport->len = skb->len;
- err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
- goto exit_unlock;
+ return skb;
nla_put_failure:
err = -EMSGSIZE;
-exit_unlock:
- rcu_read_unlock();
+err_free:
kfree_skb(skb);
-exit:
- return err;
+err:
+ return ERR_PTR(err);
}
static struct sk_buff *copy_vport_from_user(struct odp_vport __user *uodp_vport,
return ERR_PTR(err);
}
-
-/* Called without any locks (or with RTNL lock).
- * Returns holding vport->dp->mutex.
- */
+/* Called with RTNL lock or RCU read lock. */
static struct vport *lookup_vport(struct odp_vport *odp_vport,
struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
{
struct vport *vport;
if (a[ODP_VPORT_ATTR_NAME]) {
- int dp_idx, port_no;
-
- retry:
- vport_lock();
vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
- if (!vport) {
- vport_unlock();
+ if (!vport)
return ERR_PTR(-ENODEV);
- }
- dp_idx = vport->dp->dp_idx;
- port_no = vport->port_no;
- vport_unlock();
-
- dp = get_dp_locked(dp_idx);
- if (!dp)
- goto retry;
-
- vport = get_vport_protected(dp, port_no);
- if (!vport ||
- strcmp(vport_get_name(vport), nla_data(a[ODP_VPORT_ATTR_NAME]))) {
- mutex_unlock(&dp->mutex);
- goto retry;
- }
-
return vport;
} else if (a[ODP_VPORT_ATTR_PORT_NO]) {
u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
if (port_no >= DP_MAX_PORTS)
return ERR_PTR(-EINVAL);
- dp = get_dp_locked(odp_vport->dp_idx);
+ dp = get_dp(odp_vport->dp_idx);
if (!dp)
return ERR_PTR(-ENODEV);
vport = get_vport_protected(dp, port_no);
- if (!vport) {
- mutex_unlock(&dp->mutex);
+ if (!vport)
return ERR_PTR(-ENOENT);
- }
return vport;
} else
return ERR_PTR(-EINVAL);
}
+/* Called with RTNL lock. */
static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
{
int err = 0;
struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
struct odp_vport *odp_vport;
struct vport_parms parms;
+ struct sk_buff *reply;
struct vport *vport;
struct sk_buff *skb;
struct datapath *dp;
goto exit_kfree_skb;
rtnl_lock();
-
- dp = get_dp_locked(odp_vport->dp_idx);
+ dp = get_dp(odp_vport->dp_idx);
err = -ENODEV;
if (!dp)
- goto exit_unlock_rtnl;
+ goto exit_unlock;
if (a[ODP_VPORT_ATTR_PORT_NO]) {
port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
err = -EFBIG;
if (port_no >= DP_MAX_PORTS)
- goto exit_unlock_dp;
+ goto exit_unlock;
vport = get_vport_protected(dp, port_no);
err = -EBUSY;
if (vport)
- goto exit_unlock_dp;
+ goto exit_unlock;
} else {
for (port_no = 1; ; port_no++) {
if (port_no >= DP_MAX_PORTS) {
err = -EFBIG;
- goto exit_unlock_dp;
+ goto exit_unlock;
}
vport = get_vport_protected(dp, port_no);
if (!vport)
vport = new_vport(&parms);
err = PTR_ERR(vport);
if (IS_ERR(vport))
- goto exit_unlock_dp;
+ goto exit_unlock;
set_internal_devs_mtu(dp);
dp_sysfs_add_if(vport);
err = change_vport(vport, a);
if (err) {
dp_detach_port(vport);
- goto exit_unlock_dp;
+ goto exit_unlock;
}
- err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
+ reply = odp_vport_build_info(vport, odp_vport->total_len);
+ err = PTR_ERR(reply);
+ if (IS_ERR(reply))
+ goto exit_unlock;
+
+ err = copy_to_user(uodp_vport, reply->data, reply->len) ? -EFAULT : 0;
+ kfree_skb(reply);
-exit_unlock_dp:
- mutex_unlock(&dp->mutex);
-exit_unlock_rtnl:
+exit_unlock:
rtnl_unlock();
exit_kfree_skb:
kfree_skb(skb);
if (!err)
err = change_vport(vport, a);
- mutex_unlock(&vport->dp->mutex);
exit_free:
kfree_skb(skb);
rtnl_unlock();
static int del_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
{
struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
- struct datapath *dp;
struct vport *vport;
struct sk_buff *skb;
int err;
rtnl_lock();
vport = lookup_vport((struct odp_vport *)skb->data, a);
err = PTR_ERR(vport);
- if (IS_ERR(vport))
- goto exit_free;
- dp = vport->dp;
+ if (!IS_ERR(vport))
+ err = dp_detach_port(vport);
- err = -EINVAL;
- if (vport->port_no == ODPP_LOCAL)
- goto exit_free;
-
- err = dp_detach_port(vport);
- mutex_unlock(&dp->mutex);
-exit_free:
kfree_skb(skb);
rtnl_unlock();
exit:
{
struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
struct odp_vport *odp_vport;
+ struct sk_buff *reply;
struct vport *vport;
struct sk_buff *skb;
int err;
skb = copy_vport_from_user(uodp_vport, a);
err = PTR_ERR(skb);
if (IS_ERR(skb))
- goto exit;
+ goto err;
odp_vport = (struct odp_vport *)skb->data;
+ rcu_read_lock();
vport = lookup_vport(odp_vport, a);
err = PTR_ERR(vport);
if (IS_ERR(vport))
- goto exit_free;
+ goto err_unlock_rcu;
+ reply = odp_vport_build_info(vport, odp_vport->total_len);
+ rcu_read_unlock();
- err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
- mutex_unlock(&vport->dp->mutex);
-exit_free:
+ err = PTR_ERR(reply);
+ if (IS_ERR(reply))
+ goto err_kfree_skb;
+
+ err = copy_to_user(uodp_vport, reply->data, reply->len) ? -EFAULT : 0;
+ kfree_skb(reply);
kfree_skb(skb);
-exit:
+
+ return err;
+
+err_unlock_rcu:
+ rcu_read_unlock();
+err_kfree_skb:
+ kfree_skb(skb);
+err:
return err;
}
skb = copy_vport_from_user(uodp_vport, a);
err = PTR_ERR(skb);
if (IS_ERR(skb))
- goto exit;
+ goto err;
odp_vport = (struct odp_vport *)skb->data;
- dp = get_dp_locked(odp_vport->dp_idx);
+ dp = get_dp(odp_vport->dp_idx);
err = -ENODEV;
if (!dp)
- goto exit_free;
+ goto err_kfree_skb;
port_no = 0;
if (a[ODP_VPORT_ATTR_PORT_NO])
port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
+
+ rcu_read_lock();
for (; port_no < DP_MAX_PORTS; port_no++) {
- struct vport *vport = get_vport_protected(dp, port_no);
- if (vport) {
- err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
- goto exit_unlock_dp;
- }
+ struct sk_buff *skb_out;
+ struct vport *vport;
+ int retval;
+
+ vport = get_vport_protected(dp, port_no);
+ if (!vport)
+ continue;
+
+ skb_out = odp_vport_build_info(vport, odp_vport->total_len);
+ rcu_read_unlock();
+
+ err = PTR_ERR(skb_out);
+ if (IS_ERR(skb_out))
+ goto err_kfree_skb;
+
+ retval = copy_to_user(uodp_vport, skb_out->data, skb_out->len);
+ kfree_skb(skb_out);
+ kfree_skb(skb);
+
+ return retval ? -EFAULT : 0;
}
+ rcu_read_unlock();
err = -ENODEV;
-exit_unlock_dp:
- mutex_unlock(&dp->mutex);
-exit_free:
+err_kfree_skb:
kfree_skb(skb);
-exit:
+err:
return err;
}
static long openvswitch_ioctl(struct file *f, unsigned int cmd,
unsigned long argp)
{
- int dp_idx = iminor(f->f_dentry->d_inode);
- struct datapath *dp;
- int listeners;
int err;
- /* Handle commands with special locking requirements up front. */
+ genl_lock();
switch (cmd) {
- case ODP_DP_NEW:
- err = new_datapath((struct odp_datapath __user *)argp);
- goto exit;
-
- case ODP_DP_GET:
- err = get_datapath((struct odp_datapath __user *)argp);
- goto exit;
-
- case ODP_DP_DEL:
- err = del_datapath((struct odp_datapath __user *)argp);
- goto exit;
-
- case ODP_DP_SET:
- err = set_datapath((struct odp_datapath __user *)argp);
- goto exit;
-
- case ODP_DP_DUMP:
- err = dump_datapath((struct odp_datapath __user *)argp);
- goto exit;
-
case ODP_VPORT_NEW:
err = attach_vport((struct odp_vport __user *)argp);
goto exit;
err = dump_flow((struct odp_flow __user *)argp);
goto exit;
- case ODP_EXECUTE:
- err = execute_packet((struct odp_execute __user *)argp);
- goto exit;
- }
-
- dp = get_dp_locked(dp_idx);
- err = -ENODEV;
- if (!dp)
- goto exit;
-
- switch (cmd) {
- case ODP_GET_LISTEN_MASK:
- err = put_user(get_listen_mask(f), (int __user *)argp);
- break;
-
- case ODP_SET_LISTEN_MASK:
- err = get_user(listeners, (int __user *)argp);
- if (err)
- break;
- err = -EINVAL;
- if (listeners & ~ODPL_ALL)
- break;
- err = 0;
- set_listen_mask(f, listeners);
- break;
-
default:
err = -ENOIOCTLCMD;
break;
}
- mutex_unlock(&dp->mutex);
exit:
+ genl_unlock();
return err;
}
-static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
-{
- int i;
- for (i = 0; i < DP_N_QUEUES; i++) {
- if (listeners & (1 << i) && !skb_queue_empty(&dp->queues[i]))
- return 1;
- }
- return 0;
-}
-
#ifdef CONFIG_COMPAT
-static int compat_execute(const struct compat_odp_execute __user *uexecute)
-{
- struct odp_execute execute;
- compat_uptr_t actions;
- compat_uptr_t data;
- struct datapath *dp;
- int error;
-
- if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
- __get_user(execute.dp_idx, &uexecute->dp_idx) ||
- __get_user(actions, &uexecute->actions) ||
- __get_user(execute.actions_len, &uexecute->actions_len) ||
- __get_user(data, &uexecute->data) ||
- __get_user(execute.length, &uexecute->length))
- return -EFAULT;
-
- execute.actions = (struct nlattr __force *)compat_ptr(actions);
- execute.data = (const void __force *)compat_ptr(data);
-
- dp = get_dp_locked(execute.dp_idx);
- if (!dp)
- return -ENODEV;
- error = do_execute(dp, &execute);
- mutex_unlock(&dp->mutex);
-
- return error;
-}
-
static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
{
switch (cmd) {
/* Ioctls that don't need any translation at all. */
return openvswitch_ioctl(f, cmd, argp);
- case ODP_DP_NEW:
- case ODP_DP_GET:
- case ODP_DP_DEL:
- case ODP_DP_SET:
- case ODP_DP_DUMP:
case ODP_VPORT_NEW:
case ODP_VPORT_DEL:
case ODP_VPORT_GET:
case ODP_FLOW_GET:
case ODP_FLOW_SET:
case ODP_FLOW_DUMP:
- case ODP_SET_LISTEN_MASK:
- case ODP_GET_LISTEN_MASK:
/* Ioctls that just need their pointer argument extended. */
return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
- case ODP_EXECUTE32:
- return compat_execute(compat_ptr(argp));
-
default:
return -ENOIOCTLCMD;
}
}
#endif
-static ssize_t openvswitch_read(struct file *f, char __user *buf,
- size_t nbytes, loff_t *ppos)
-{
- int listeners = get_listen_mask(f);
- int dp_idx = iminor(f->f_dentry->d_inode);
- struct datapath *dp = get_dp_locked(dp_idx);
- struct sk_buff *skb;
- struct iovec iov;
- int retval;
+static struct file_operations openvswitch_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = openvswitch_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = openvswitch_compat_ioctl,
+#endif
+};
- if (!dp)
- return -ENODEV;
+static int major;
- if (nbytes == 0 || !listeners)
- return 0;
+struct genl_family_and_ops {
+ struct genl_family *family;
+ struct genl_ops *ops;
+ int n_ops;
+ struct genl_multicast_group *group;
+};
- for (;;) {
- int i;
+static const struct genl_family_and_ops dp_genl_families[] = {
+ { &dp_datapath_genl_family,
+ dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
+ &dp_datapath_multicast_group },
+ { &dp_packet_genl_family,
+ dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
+ NULL },
+};
- for (i = 0; i < DP_N_QUEUES; i++) {
- if (listeners & (1 << i)) {
- skb = skb_dequeue(&dp->queues[i]);
- if (skb)
- goto success;
- }
- }
+static void dp_unregister_genl(int n_families)
+{
+ int i;
- if (f->f_flags & O_NONBLOCK) {
- retval = -EAGAIN;
- goto error;
- }
+ for (i = 0; i < n_families; i++) {
+ genl_unregister_family(dp_genl_families[i].family);
+ }
+}
- wait_event_interruptible(dp->waitqueue,
- dp_has_packet_of_interest(dp,
- listeners));
+static int dp_register_genl(void)
+{
+ int n_registered;
+ int err;
+ int i;
- if (signal_pending(current)) {
- retval = -ERESTARTSYS;
+ n_registered = 0;
+ for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
+ const struct genl_family_and_ops *f = &dp_genl_families[i];
+
+ err = genl_register_family_with_ops(f->family, f->ops,
+ f->n_ops);
+ if (err)
goto error;
+ n_registered++;
+
+ if (f->group) {
+ err = genl_register_mc_group(f->family, f->group);
+ if (err)
+ goto error;
}
}
-success:
- mutex_unlock(&dp->mutex);
-
- iov.iov_base = buf;
- iov.iov_len = min_t(size_t, skb->len, nbytes);
- retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
- if (!retval)
- retval = skb->len;
- kfree_skb(skb);
- return retval;
+ err = packet_register_mc_groups();
+ if (err)
+ goto error;
+ return 0;
error:
- mutex_unlock(&dp->mutex);
- return retval;
-}
-
-static unsigned int openvswitch_poll(struct file *file, poll_table *wait)
-{
- int dp_idx = iminor(file->f_dentry->d_inode);
- struct datapath *dp = get_dp_locked(dp_idx);
- unsigned int mask;
-
- if (dp) {
- mask = 0;
- poll_wait(file, &dp->waitqueue, wait);
- if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
- mask |= POLLIN | POLLRDNORM;
- mutex_unlock(&dp->mutex);
- } else {
- mask = POLLIN | POLLRDNORM | POLLHUP;
- }
- return mask;
+ dp_unregister_genl(n_registered);
+ return err;
}
-static struct file_operations openvswitch_fops = {
- .owner = THIS_MODULE,
- .read = openvswitch_read,
- .poll = openvswitch_poll,
- .unlocked_ioctl = openvswitch_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = openvswitch_compat_ioctl,
-#endif
-};
-
-static int major;
-
static int __init dp_init(void)
{
struct sk_buff *dummy_skb;
if (err < 0)
goto error_unreg_notifier;
+ err = dp_register_genl();
+ if (err < 0)
+ goto error_unreg_chrdev;
+
return 0;
+error_unreg_chrdev:
+ unregister_chrdev(major, "openvswitch");
error_unreg_notifier:
unregister_netdevice_notifier(&dp_device_notifier);
error_vport_exit:
static void dp_cleanup(void)
{
rcu_barrier();
+ dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
unregister_chrdev(major, "openvswitch");
unregister_netdevice_notifier(&dp_device_notifier);
vport_exit();