#include "datapath.h"
#include "actions.h"
#include "flow.h"
-#include "table.h"
#include "vlan.h"
+#include "tunnel.h"
#include "vport-internal_dev.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
EXPORT_SYMBOL_GPL(get_dp);
/* Must be called with genl_mutex. */
-static struct tbl *get_table_protected(struct datapath *dp)
+static struct flow_table *get_table_protected(struct datapath *dp)
{
return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
}
return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]));
}
+static int get_dpifindex(struct datapath *dp)
+{
+ struct vport *local;
+ int ifindex;
+
+ rcu_read_lock();
+
+ local = get_vport_protected(dp, OVSP_LOCAL);
+ if (local)
+ ifindex = vport_get_ifindex(local);
+ else
+ ifindex = 0;
+
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
static inline size_t br_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
hdr->ifi_change = 0;
NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
- NLA_PUT_U32(skb, IFLA_MASTER,
- vport_get_ifindex(get_vport_protected(dp, OVSP_LOCAL)));
+ NLA_PUT_U32(skb, IFLA_MASTER, get_dpifindex(dp));
NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
#ifdef IFLA_OPERSTATE
NLA_PUT_U8(skb, IFLA_OPERSTATE,
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
+ flow_tbl_destroy(dp->table);
free_percpu(dp->stats_percpu);
kobject_put(&dp->ifobj);
}
}
/* Called with RTNL lock. */
-int dp_detach_port(struct vport *p)
+void dp_detach_port(struct vport *p)
{
ASSERT_RTNL();
rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
/* Then destroy it. */
- return vport_del(p);
+ vport_del(p);
}
/* Must be called with rcu_read_lock. */
void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
{
struct datapath *dp = p->dp;
+ struct sw_flow *flow;
struct dp_stats_percpu *stats;
- int stats_counter_off;
+ u64 *stats_counter;
int error;
+ stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
OVS_CB(skb)->vport = p;
if (!OVS_CB(skb)->flow) {
struct sw_flow_key key;
- struct tbl_node *flow_node;
int key_len;
bool is_frag;
if (is_frag && dp->drop_frags) {
consume_skb(skb);
- stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
+ stats_counter = &stats->n_frags;
goto out;
}
/* Look up flow. */
- flow_node = tbl_lookup(rcu_dereference(dp->table), &key, key_len,
- flow_hash(&key, key_len), flow_cmp);
- if (unlikely(!flow_node)) {
+ flow = flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
+ if (unlikely(!flow)) {
struct dp_upcall_info upcall;
upcall.cmd = OVS_PACKET_CMD_MISS;
upcall.key = &key;
- upcall.userdata = 0;
- upcall.sample_pool = 0;
- upcall.actions = NULL;
- upcall.actions_len = 0;
+ upcall.userdata = NULL;
+ upcall.pid = p->upcall_pid;
dp_upcall(dp, skb, &upcall);
- stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
+ kfree_skb(skb);
+ stats_counter = &stats->n_missed;
goto out;
}
- OVS_CB(skb)->flow = flow_cast(flow_node);
+ OVS_CB(skb)->flow = flow;
}
- stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
+ stats_counter = &stats->n_hit;
flow_used(OVS_CB(skb)->flow, skb);
execute_actions(dp, skb);
out:
/* Update datapath statistics. */
- local_bh_disable();
- stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
write_seqcount_begin(&stats->seqlock);
- (*(u64 *)((u8 *)stats + stats_counter_off))++;
+ (*stats_counter)++;
write_seqcount_end(&stats->seqlock);
-
- local_bh_enable();
}
static void copy_and_csum_skb(struct sk_buff *skb, void *to)
.maxattr = OVS_PACKET_ATTR_MAX
};
-/* Generic Netlink multicast groups for upcalls.
- *
- * We really want three unique multicast groups per datapath, but we can't even
- * get one, because genl_register_mc_group() takes genl_lock, which is also
- * held during Generic Netlink message processing, so trying to acquire
- * multicast groups during OVS_DP_NEW processing deadlocks. Instead, we
- * preallocate a few groups and use them round-robin for datapaths. Collision
- * isn't fatal--multicast listeners should check that the family is the one
- * that they want and discard others--but it wastes time and memory to receive
- * unwanted messages.
- */
-#define PACKET_N_MC_GROUPS 16
-static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
-
-static u32 packet_mc_group(struct datapath *dp, u8 cmd)
-{
- u32 idx;
- BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
-
- idx = jhash_2words(dp->dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
- return packet_mc_groups[idx].id;
-}
-
-static int packet_register_mc_groups(void)
-{
- int i;
-
- for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
- struct genl_multicast_group *group = &packet_mc_groups[i];
- int error;
-
- sprintf(group->name, "packet%d", i);
- error = genl_register_mc_group(&dp_packet_genl_family, group);
- if (error)
- return error;
- }
- return 0;
-}
-
-int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
+int dp_upcall(struct datapath *dp, struct sk_buff *skb,
+ const struct dp_upcall_info *upcall_info)
{
+ struct sk_buff *segs = NULL;
struct dp_stats_percpu *stats;
int err;
- WARN_ON_ONCE(skb_shared(skb));
+ if (upcall_info->pid == 0) {
+ err = -ENOTCONN;
+ goto err;
+ }
forward_ip_summed(skb, true);
/* Break apart GSO packets into their component pieces. Otherwise
* userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
if (skb_is_gso(skb)) {
- struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
+ segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
- if (IS_ERR(nskb)) {
- kfree_skb(skb);
- err = PTR_ERR(nskb);
+ if (IS_ERR(segs)) {
+ err = PTR_ERR(segs);
goto err;
}
- consume_skb(skb);
- skb = nskb;
+ skb = segs;
}
err = queue_userspace_packets(dp, skb, upcall_info);
+ if (segs) {
+ struct sk_buff *next;
+ /* Free GSO-segments */
+ do {
+ next = segs->next;
+ kfree_skb(segs);
+ } while ((segs = next) != NULL);
+ }
+
if (err)
goto err;
return 0;
err:
- local_bh_disable();
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
write_seqcount_begin(&stats->seqlock);
stats->n_lost++;
write_seqcount_end(&stats->seqlock);
- local_bh_enable();
-
return err;
}
* packet.
*/
static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb,
- const struct dp_upcall_info *upcall_info)
+ const struct dp_upcall_info *upcall_info)
{
- u32 group = packet_mc_group(dp, upcall_info->cmd);
- struct sk_buff *nskb;
- int err;
+ int dp_ifindex;
+
+ dp_ifindex = get_dpifindex(dp);
+ if (!dp_ifindex)
+ return -ENODEV;
do {
struct ovs_header *upcall;
struct sk_buff *user_skb; /* to be queued to userspace */
struct nlattr *nla;
unsigned int len;
-
- nskb = skb->next;
- skb->next = NULL;
+ int err;
err = vlan_deaccel_tag(skb);
if (unlikely(err))
- goto err_kfree_skbs;
+ return err;
if (nla_attr_size(skb->len) > USHRT_MAX)
- goto err_kfree_skbs;
+ return -EFBIG;
len = sizeof(struct ovs_header);
len += nla_total_size(skb->len);
len += nla_total_size(FLOW_BUFSIZE);
- if (upcall_info->userdata)
+ if (upcall_info->cmd == OVS_PACKET_CMD_ACTION)
len += nla_total_size(8);
- if (upcall_info->sample_pool)
- len += nla_total_size(4);
- if (upcall_info->actions_len)
- len += nla_total_size(upcall_info->actions_len);
user_skb = genlmsg_new(len, GFP_ATOMIC);
- if (!user_skb) {
- netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
- goto err_kfree_skbs;
- }
+ if (!user_skb)
+ return -ENOMEM;
- upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
- upcall->dp_ifindex = dp->dp_ifindex;
+ upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
+ 0, upcall_info->cmd);
+ upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
flow_to_nlattrs(upcall_info->key, user_skb);
nla_nest_end(user_skb, nla);
if (upcall_info->userdata)
- nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, upcall_info->userdata);
- if (upcall_info->sample_pool)
- nla_put_u32(user_skb, OVS_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
- if (upcall_info->actions_len) {
- const struct nlattr *actions = upcall_info->actions;
- u32 actions_len = upcall_info->actions_len;
-
- nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
- memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
- nla_nest_end(user_skb, nla);
- }
+ nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA,
+ nla_get_u64(upcall_info->userdata));
nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
if (skb->ip_summed == CHECKSUM_PARTIAL)
else
skb_copy_bits(skb, 0, nla_data(nla), skb->len);
- err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC);
+ err = genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
if (err)
- goto err_kfree_skbs;
+ return err;
- consume_skb(skb);
- skb = nskb;
- } while (skb);
- return 0;
+ } while ((skb = skb->next));
-err_kfree_skbs:
- kfree_skb(skb);
- while ((skb = nskb) != NULL) {
- nskb = skb->next;
- kfree_skb(skb);
- }
- return err;
+ return 0;
}
/* Called with genl_mutex. */
static int flush_flows(int dp_ifindex)
{
- struct tbl *old_table;
- struct tbl *new_table;
+ struct flow_table *old_table;
+ struct flow_table *new_table;
struct datapath *dp;
dp = get_dp(dp_ifindex);
return -ENODEV;
old_table = get_table_protected(dp);
- new_table = tbl_create(TBL_MIN_BUCKETS);
+ new_table = flow_tbl_alloc(TBL_MIN_BUCKETS);
if (!new_table)
return -ENOMEM;
rcu_assign_pointer(dp->table, new_table);
- tbl_deferred_destroy(old_table, flow_free_tbl);
+ flow_tbl_deferred_destroy(old_table);
+ return 0;
+}
+
+static int validate_actions(const struct nlattr *attr, int depth);
+
+static int validate_sample(const struct nlattr *attr, int depth)
+{
+ static const struct nla_policy sample_policy[OVS_SAMPLE_ATTR_MAX + 1] =
+ {
+ [OVS_SAMPLE_ATTR_PROBABILITY] = {.type = NLA_U32 },
+ [OVS_SAMPLE_ATTR_ACTIONS] = {.type = NLA_UNSPEC },
+ };
+ struct nlattr *a[OVS_SAMPLE_ATTR_MAX + 1];
+ int error;
+
+ error = nla_parse_nested(a, OVS_SAMPLE_ATTR_MAX, attr, sample_policy);
+ if (error)
+ return error;
+
+ if (!a[OVS_SAMPLE_ATTR_PROBABILITY])
+ return -EINVAL;
+ if (!a[OVS_SAMPLE_ATTR_ACTIONS])
+ return -EINVAL;
+
+ return validate_actions(a[OVS_SAMPLE_ATTR_ACTIONS], (depth + 1));
+}
+
+static int validate_userspace(const struct nlattr *attr)
+{
+ static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =
+ {
+ [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
+ [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
+ };
+ struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
+ int error;
+
+ error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr, userspace_policy);
+ if (error)
+ return error;
+
+ if (!a[OVS_USERSPACE_ATTR_PID] || !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
+ return -EINVAL;
return 0;
}
-static int validate_actions(const struct nlattr *attr)
+static int validate_actions(const struct nlattr *attr, int depth)
{
const struct nlattr *a;
- int rem;
+ int rem, err;
+
+ if (depth >= SAMPLE_ACTION_DEPTH)
+ return -EOVERFLOW;
nla_for_each_nested(a, attr, rem) {
+ /* Expected argument lengths, (u32)-1 for variable length. */
static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
[OVS_ACTION_ATTR_OUTPUT] = 4,
- [OVS_ACTION_ATTR_USERSPACE] = 8,
+ [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
[OVS_ACTION_ATTR_PUSH_VLAN] = 2,
[OVS_ACTION_ATTR_POP_VLAN] = 0,
[OVS_ACTION_ATTR_SET_DL_SRC] = ETH_ALEN,
[OVS_ACTION_ATTR_SET_TUNNEL] = 8,
[OVS_ACTION_ATTR_SET_PRIORITY] = 4,
[OVS_ACTION_ATTR_POP_PRIORITY] = 0,
+ [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
};
int type = nla_type(a);
- if (type > OVS_ACTION_ATTR_MAX || nla_len(a) != action_lens[type])
+ if (type > OVS_ACTION_ATTR_MAX ||
+ (action_lens[type] != nla_len(a) &&
+ action_lens[type] != (u32)-1))
return -EINVAL;
switch (type) {
case OVS_ACTION_ATTR_UNSPEC:
return -EINVAL;
- case OVS_ACTION_ATTR_USERSPACE:
case OVS_ACTION_ATTR_POP_VLAN:
case OVS_ACTION_ATTR_SET_DL_SRC:
case OVS_ACTION_ATTR_SET_DL_DST:
/* No validation needed. */
break;
+ case OVS_ACTION_ATTR_USERSPACE:
+ err = validate_userspace(a);
+ if (err)
+ return err;
+ break;
+
case OVS_ACTION_ATTR_OUTPUT:
if (nla_get_u32(a) >= DP_MAX_PORTS)
return -EINVAL;
return -EINVAL;
break;
+ case OVS_ACTION_ATTR_SAMPLE:
+ err = validate_sample(a, depth);
+ if (err)
+ return err;
+ break;
+
default:
return -EOPNOTSUPP;
}
flow->byte_count = 0;
}
-/* Called with genl_mutex. */
-static int expand_table(struct datapath *dp)
-{
- struct tbl *old_table = get_table_protected(dp);
- struct tbl *new_table;
-
- new_table = tbl_expand(old_table);
- if (IS_ERR(new_table)) {
- if (PTR_ERR(new_table) != -ENOSPC)
- return PTR_ERR(new_table);
- } else {
- rcu_assign_pointer(dp->table, new_table);
- tbl_deferred_destroy(old_table, NULL);
- }
-
- return 0;
-}
-
static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
struct ovs_header *ovs_header = info->userhdr;
nla_len(a[OVS_PACKET_ATTR_PACKET]) < ETH_HLEN)
goto err;
- err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS]);
+ err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], 0);
if (err)
goto err;
if (err)
goto err_flow_put;
- flow->tbl_node.hash = flow_hash(&flow->key, key_len);
+ flow->hash = flow_hash(&flow->key, key_len);
acts = flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
err = PTR_ERR(acts);
err = -ENODEV;
if (!dp)
goto err_unlock;
+
+ if (flow->key.eth.in_port < DP_MAX_PORTS)
+ OVS_CB(packet)->vport = get_vport_protected(dp,
+ flow->key.eth.in_port);
+
+ local_bh_disable();
err = execute_actions(dp, packet);
+ local_bh_enable();
rcu_read_unlock();
flow_put(flow);
static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
{
int i;
- struct tbl *table = get_table_protected(dp);
+ struct flow_table *table = get_table_protected(dp);
- stats->n_flows = tbl_count(table);
+ stats->n_flows = flow_tbl_count(table);
stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
for_each_possible_cpu(i) {
}
}
-/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
- * Called with RTNL lock.
- */
-int dp_min_mtu(const struct datapath *dp)
-{
- struct vport *p;
- int mtu = 0;
-
- ASSERT_RTNL();
-
- list_for_each_entry (p, &dp->port_list, node) {
- int dev_mtu;
-
- /* Skip any internal ports, since that's what we're trying to
- * set. */
- if (is_internal_vport(p))
- continue;
-
- dev_mtu = vport_get_mtu(p);
- if (!dev_mtu)
- continue;
- if (!mtu || dev_mtu < mtu)
- mtu = dev_mtu;
- }
-
- return mtu ? mtu : ETH_DATA_LEN;
-}
-
-/* Sets the MTU of all datapath devices to the minimum of the ports
- * Called with RTNL lock.
- */
-void set_internal_devs_mtu(const struct datapath *dp)
-{
- struct vport *p;
- int mtu;
-
- ASSERT_RTNL();
-
- mtu = dp_min_mtu(dp);
-
- list_for_each_entry (p, &dp->port_list, node) {
- if (is_internal_vport(p))
- vport_set_mtu(p, mtu);
- }
-}
-
static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
[OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
[OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
if (!ovs_header)
return -EMSGSIZE;
- ovs_header->dp_ifindex = dp->dp_ifindex;
+ ovs_header->dp_ifindex = get_dpifindex(dp);
nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
if (!nla)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
- struct tbl_node *flow_node;
struct sw_flow_key key;
struct sw_flow *flow;
struct sk_buff *reply;
struct datapath *dp;
- struct tbl *table;
- u32 hash;
+ struct flow_table *table;
int error;
int key_len;
/* Validate actions. */
if (a[OVS_FLOW_ATTR_ACTIONS]) {
- error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS]);
+ error = validate_actions(a[OVS_FLOW_ATTR_ACTIONS], 0);
if (error)
goto error;
} else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
if (!dp)
goto error;
- hash = flow_hash(&key, key_len);
table = get_table_protected(dp);
- flow_node = tbl_lookup(table, &key, key_len, hash, flow_cmp);
- if (!flow_node) {
+ flow = flow_tbl_lookup(table, &key, key_len);
+ if (!flow) {
struct sw_flow_actions *acts;
/* Bail out if we're not allowed to create a new flow. */
goto error;
/* Expand table, if necessary, to make room. */
- if (tbl_count(table) >= tbl_n_buckets(table)) {
- error = expand_table(dp);
- if (error)
- goto error;
- table = get_table_protected(dp);
+ if (flow_tbl_need_to_expand(table)) {
+ struct flow_table *new_table;
+
+ new_table = flow_tbl_expand(table);
+ if (!IS_ERR(new_table)) {
+ rcu_assign_pointer(dp->table, new_table);
+ flow_tbl_deferred_destroy(table);
+ table = get_table_protected(dp);
+ }
}
/* Allocate flow. */
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- error = tbl_insert(table, &flow->tbl_node, hash);
- if (error)
- goto error_free_flow;
+ flow->hash = flow_hash(&key, key_len);
+ flow_tbl_insert(table, flow);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
info->snd_seq, OVS_FLOW_CMD_NEW);
goto error;
/* Update actions. */
- flow = flow_cast(flow_node);
old_acts = rcu_dereference_protected(flow->sf_acts,
lockdep_genl_is_held());
if (a[OVS_FLOW_ATTR_ACTIONS] &&
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
struct sw_flow_key key;
- struct tbl_node *flow_node;
struct sk_buff *reply;
struct sw_flow *flow;
struct datapath *dp;
- struct tbl *table;
+ struct flow_table *table;
int err;
int key_len;
return -ENODEV;
table = get_table_protected(dp);
- flow_node = tbl_lookup(table, &key, key_len, flow_hash(&key, key_len),
- flow_cmp);
- if (!flow_node)
+ flow = flow_tbl_lookup(table, &key, key_len);
+ if (!flow)
return -ENOENT;
- flow = flow_cast(flow_node);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW);
if (IS_ERR(reply))
return PTR_ERR(reply);
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
struct sw_flow_key key;
- struct tbl_node *flow_node;
struct sk_buff *reply;
struct sw_flow *flow;
struct datapath *dp;
- struct tbl *table;
+ struct flow_table *table;
int err;
int key_len;
return -ENODEV;
table = get_table_protected(dp);
- flow_node = tbl_lookup(table, &key, key_len, flow_hash(&key, key_len),
- flow_cmp);
- if (!flow_node)
+ flow = flow_tbl_lookup(table, &key, key_len);
+ if (!flow)
return -ENOENT;
- flow = flow_cast(flow_node);
reply = ovs_flow_cmd_alloc_info(flow);
if (!reply)
return -ENOMEM;
- err = tbl_remove(table, flow_node);
- if (err) {
- kfree_skb(reply);
- return err;
- }
+ flow_tbl_remove(table, flow);
err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_pid,
info->snd_seq, 0, OVS_FLOW_CMD_DEL);
return -ENODEV;
for (;;) {
- struct tbl_node *flow_node;
struct sw_flow *flow;
u32 bucket, obj;
bucket = cb->args[0];
obj = cb->args[1];
- flow_node = tbl_next(get_table_protected(dp), &bucket, &obj);
- if (!flow_node)
+ flow = flow_tbl_next(get_table_protected(dp), &bucket, &obj);
+ if (!flow)
break;
- flow = flow_cast(flow_node);
if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
OVS_FLOW_CMD_NEW) < 0)
#ifdef HAVE_NLA_NUL_STRING
[OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
#endif
+ [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
[OVS_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
- [OVS_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
};
static struct genl_family dp_datapath_genl_family = {
if (!ovs_header)
goto error;
- ovs_header->dp_ifindex = dp->dp_ifindex;
+ ovs_header->dp_ifindex = get_dpifindex(dp);
rcu_read_lock();
err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
NLA_PUT_U32(skb, OVS_DP_ATTR_IPV4_FRAGS,
dp->drop_frags ? OVS_DP_FRAG_DROP : OVS_DP_FRAG_ZERO);
- if (dp->sflow_probability)
- NLA_PUT_U32(skb, OVS_DP_ATTR_SAMPLING, dp->sflow_probability);
-
- nla = nla_nest_start(skb, OVS_DP_ATTR_MCGROUPS);
- if (!nla)
- goto nla_put_failure;
- NLA_PUT_U32(skb, OVS_PACKET_CMD_MISS, packet_mc_group(dp, OVS_PACKET_CMD_MISS));
- NLA_PUT_U32(skb, OVS_PACKET_CMD_ACTION, packet_mc_group(dp, OVS_PACKET_CMD_ACTION));
- NLA_PUT_U32(skb, OVS_PACKET_CMD_SAMPLE, packet_mc_group(dp, OVS_PACKET_CMD_SAMPLE));
- nla_nest_end(skb, nla);
-
return genlmsg_end(skb, ovs_header);
nla_put_failure:
{
if (a[OVS_DP_ATTR_IPV4_FRAGS])
dp->drop_frags = nla_get_u32(a[OVS_DP_ATTR_IPV4_FRAGS]) == OVS_DP_FRAG_DROP;
- if (a[OVS_DP_ATTR_SAMPLING])
- dp->sflow_probability = nla_get_u32(a[OVS_DP_ATTR_SAMPLING]);
}
static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
/* Allocate table. */
err = -ENOMEM;
- rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
+ rcu_assign_pointer(dp->table, flow_tbl_alloc(TBL_MIN_BUCKETS));
if (!dp->table)
goto err_free_dp;
+ dp->drop_frags = 0;
+ dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+ if (!dp->stats_percpu) {
+ err = -ENOMEM;
+ goto err_destroy_table;
+ }
+
+ change_datapath(dp, a);
+
/* Set up our datapath device. */
parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
parms.type = OVS_VPORT_TYPE_INTERNAL;
parms.options = NULL;
parms.dp = dp;
parms.port_no = OVSP_LOCAL;
+ if (a[OVS_DP_ATTR_UPCALL_PID])
+ parms.upcall_pid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
+ else
+ parms.upcall_pid = NETLINK_CB(skb).pid;
+
vport = new_vport(&parms);
if (IS_ERR(vport)) {
err = PTR_ERR(vport);
if (err == -EBUSY)
err = -EEXIST;
- goto err_destroy_table;
- }
- dp->dp_ifindex = vport_get_ifindex(vport);
-
- dp->drop_frags = 0;
- dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
- if (!dp->stats_percpu) {
- err = -ENOMEM;
- goto err_destroy_local_port;
+ goto err_destroy_percpu;
}
- change_datapath(dp, a);
-
reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
err = PTR_ERR(reply);
if (IS_ERR(reply))
err_destroy_local_port:
dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
+err_destroy_percpu:
+ free_percpu(dp->stats_percpu);
err_destroy_table:
- tbl_destroy(get_table_protected(dp), NULL);
+ flow_tbl_destroy(get_table_protected(dp));
err_free_dp:
kfree(dp);
err_put_module:
static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
#ifdef HAVE_NLA_NUL_STRING
[OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
- [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
- [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
- [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
+ [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
[OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
#else
- [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct rtnl_link_stats64) },
+ [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
[OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
#endif
- [OVS_VPORT_ATTR_MTU] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
};
{
struct ovs_header *ovs_header;
struct nlattr *nla;
- int ifindex;
- int mtu;
int err;
ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
if (!ovs_header)
return -EMSGSIZE;
- ovs_header->dp_ifindex = vport->dp->dp_ifindex;
+ ovs_header->dp_ifindex = get_dpifindex(vport->dp);
NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport));
NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport));
+ NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
- nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
+ nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats));
if (!nla)
goto nla_put_failure;
- if (vport_get_stats(vport, nla_data(nla)))
- __skb_trim(skb, skb->len - nla->nla_len);
- NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
+ vport_get_stats(vport, nla_data(nla));
- mtu = vport_get_mtu(vport);
- if (mtu)
- NLA_PUT_U32(skb, OVS_VPORT_ATTR_MTU, mtu);
+ NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
err = vport_get_options(vport, skb);
if (err == -EMSGSIZE)
goto error;
- ifindex = vport_get_ifindex(vport);
- if (ifindex > 0)
- NLA_PUT_U32(skb, OVS_VPORT_ATTR_IFINDEX, ifindex);
-
return genlmsg_end(skb, ovs_header);
nla_put_failure:
static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
{
int err = 0;
+
if (a[OVS_VPORT_ATTR_STATS])
- err = vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
- if (!err && a[OVS_VPORT_ATTR_ADDRESS])
+ vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
+
+ if (a[OVS_VPORT_ATTR_ADDRESS])
err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
- if (!err && a[OVS_VPORT_ATTR_MTU])
- err = vport_set_mtu(vport, nla_get_u32(a[OVS_VPORT_ATTR_MTU]));
+
return err;
}
parms.options = a[OVS_VPORT_ATTR_OPTIONS];
parms.dp = dp;
parms.port_no = port_no;
+ if (a[OVS_VPORT_ATTR_UPCALL_PID])
+ parms.upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+ else
+ parms.upcall_pid = NETLINK_CB(skb).pid;
vport = new_vport(&parms);
err = PTR_ERR(vport);
if (IS_ERR(vport))
goto exit_unlock;
- set_internal_devs_mtu(dp);
dp_sysfs_add_if(vport);
err = change_vport(vport, a);
err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
if (!err)
err = change_vport(vport, a);
+ if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
+ vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
OVS_VPORT_CMD_NEW);
if (IS_ERR(reply))
goto exit_unlock;
- err = dp_detach_port(vport);
+ dp_detach_port(vport);
genl_notify(reply, genl_info_net(info), info->snd_pid,
dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL);
}
}
- err = packet_register_mc_groups();
- if (err)
- goto error;
return 0;
error:
printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
- err = flow_init();
+ err = tnl_init();
if (err)
goto error;
+ err = flow_init();
+ if (err)
+ goto error_tnl_exit;
+
err = vport_init();
if (err)
goto error_flow_exit;
vport_exit();
error_flow_exit:
flow_exit();
+error_tnl_exit:
+ tnl_exit();
error:
return err;
}
unregister_netdevice_notifier(&dp_device_notifier);
vport_exit();
flow_exit();
+ tnl_exit();
}
module_init(dp_init);