return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]));
}
+static int get_dpifindex(struct datapath *dp)
+{
+ struct vport *local;
+ int ifindex;
+
+ rcu_read_lock();
+
+ local = get_vport_protected(dp, OVSP_LOCAL);
+ if (local)
+ ifindex = vport_get_ifindex(local);
+ else
+ ifindex = 0;
+
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
static inline size_t br_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
hdr->ifi_change = 0;
NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
- NLA_PUT_U32(skb, IFLA_MASTER,
- vport_get_ifindex(get_vport_protected(dp, OVSP_LOCAL)));
+ NLA_PUT_U32(skb, IFLA_MASTER, get_dpifindex(dp));
NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
#ifdef IFLA_OPERSTATE
NLA_PUT_U8(skb, IFLA_OPERSTATE,
#define PACKET_N_MC_GROUPS 16
static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
-static u32 packet_mc_group(struct datapath *dp, u8 cmd)
+static u32 packet_mc_group(int dp_ifindex, u8 cmd)
{
u32 idx;
BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
- idx = jhash_2words(dp->dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
+ idx = jhash_2words(dp_ifindex, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
return packet_mc_groups[idx].id;
}
struct dp_stats_percpu *stats;
int err;
- WARN_ON_ONCE(skb_shared(skb));
-
forward_ip_summed(skb, true);
/* Break apart GSO packets into their component pieces. Otherwise
static int queue_userspace_packets(struct datapath *dp, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info)
{
- u32 group = packet_mc_group(dp, upcall_info->cmd);
+ int dp_ifindex;
+ u32 group;
struct sk_buff *nskb;
int err;
+ dp_ifindex = get_dpifindex(dp);
+ if (!dp_ifindex) {
+ err = -ENODEV;
+ nskb = skb->next;
+ goto err_kfree_skbs;
+ }
+
+ group = packet_mc_group(dp_ifindex, upcall_info->cmd);
+
do {
struct ovs_header *upcall;
struct sk_buff *user_skb; /* to be queued to userspace */
if (unlikely(err))
goto err_kfree_skbs;
- if (nla_attr_size(skb->len) > USHRT_MAX)
+ if (nla_attr_size(skb->len) > USHRT_MAX) {
+ err = -EFBIG;
goto err_kfree_skbs;
+ }
len = sizeof(struct ovs_header);
len += nla_total_size(skb->len);
user_skb = genlmsg_new(len, GFP_ATOMIC);
if (!user_skb) {
netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
+ err = -ENOMEM;
goto err_kfree_skbs;
}
upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
- upcall->dp_ifindex = dp->dp_ifindex;
+ upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
flow_to_nlattrs(upcall_info->key, user_skb);
err = -ENODEV;
if (!dp)
goto err_unlock;
+
+ if (flow->key.eth.in_port < DP_MAX_PORTS)
+ OVS_CB(packet)->vport = get_vport_protected(dp,
+ flow->key.eth.in_port);
+
err = execute_actions(dp, packet);
rcu_read_unlock();
}
}
-/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
- * Called with RTNL lock.
- */
-int dp_min_mtu(const struct datapath *dp)
-{
- struct vport *p;
- int mtu = 0;
-
- ASSERT_RTNL();
-
- list_for_each_entry (p, &dp->port_list, node) {
- int dev_mtu;
-
- /* Skip any internal ports, since that's what we're trying to
- * set. */
- if (is_internal_vport(p))
- continue;
-
- dev_mtu = vport_get_mtu(p);
- if (!dev_mtu)
- continue;
- if (!mtu || dev_mtu < mtu)
- mtu = dev_mtu;
- }
-
- return mtu ? mtu : ETH_DATA_LEN;
-}
-
-/* Sets the MTU of all datapath devices to the minimum of the ports
- * Called with RTNL lock.
- */
-void set_internal_devs_mtu(const struct datapath *dp)
-{
- struct vport *p;
- int mtu;
-
- ASSERT_RTNL();
-
- mtu = dp_min_mtu(dp);
-
- list_for_each_entry (p, &dp->port_list, node) {
- if (is_internal_vport(p))
- vport_set_mtu(p, mtu);
- }
-}
-
static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
[OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
[OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
if (!ovs_header)
return -EMSGSIZE;
- ovs_header->dp_ifindex = dp->dp_ifindex;
+ ovs_header->dp_ifindex = get_dpifindex(dp);
nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
if (!nla)
struct ovs_header *ovs_header;
struct nlattr *nla;
int err;
+ int dp_ifindex = get_dpifindex(dp);
ovs_header = genlmsg_put(skb, pid, seq, &dp_datapath_genl_family,
flags, cmd);
if (!ovs_header)
goto error;
- ovs_header->dp_ifindex = dp->dp_ifindex;
+ ovs_header->dp_ifindex = dp_ifindex;
rcu_read_lock();
err = nla_put_string(skb, OVS_DP_ATTR_NAME, dp_name(dp));
nla = nla_nest_start(skb, OVS_DP_ATTR_MCGROUPS);
if (!nla)
goto nla_put_failure;
- NLA_PUT_U32(skb, OVS_PACKET_CMD_MISS, packet_mc_group(dp, OVS_PACKET_CMD_MISS));
- NLA_PUT_U32(skb, OVS_PACKET_CMD_ACTION, packet_mc_group(dp, OVS_PACKET_CMD_ACTION));
- NLA_PUT_U32(skb, OVS_PACKET_CMD_SAMPLE, packet_mc_group(dp, OVS_PACKET_CMD_SAMPLE));
+ NLA_PUT_U32(skb, OVS_PACKET_CMD_MISS,
+ packet_mc_group(dp_ifindex, OVS_PACKET_CMD_MISS));
+ NLA_PUT_U32(skb, OVS_PACKET_CMD_ACTION,
+ packet_mc_group(dp_ifindex, OVS_PACKET_CMD_ACTION));
+ NLA_PUT_U32(skb, OVS_PACKET_CMD_SAMPLE,
+ packet_mc_group(dp_ifindex, OVS_PACKET_CMD_SAMPLE));
nla_nest_end(skb, nla);
return genlmsg_end(skb, ovs_header);
if (!dp->table)
goto err_free_dp;
+ dp->drop_frags = 0;
+ dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+ if (!dp->stats_percpu) {
+ err = -ENOMEM;
+ goto err_destroy_table;
+ }
+
+ change_datapath(dp, a);
+
/* Set up our datapath device. */
parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
parms.type = OVS_VPORT_TYPE_INTERNAL;
if (err == -EBUSY)
err = -EEXIST;
- goto err_destroy_table;
+ goto err_destroy_percpu;
}
- dp->dp_ifindex = vport_get_ifindex(vport);
-
- dp->drop_frags = 0;
- dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
- if (!dp->stats_percpu) {
- err = -ENOMEM;
- goto err_destroy_local_port;
- }
-
- change_datapath(dp, a);
reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
err = PTR_ERR(reply);
err_destroy_local_port:
dp_detach_port(get_vport_protected(dp, OVSP_LOCAL));
+err_destroy_percpu:
+ free_percpu(dp->stats_percpu);
err_destroy_table:
flow_tbl_destroy(get_table_protected(dp));
err_free_dp:
static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
#ifdef HAVE_NLA_NUL_STRING
[OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
- [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
- [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
- [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
+ [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
[OVS_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
#else
- [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct rtnl_link_stats64) },
+ [OVS_VPORT_ATTR_STATS] = { .minlen = sizeof(struct ovs_vport_stats) },
[OVS_VPORT_ATTR_ADDRESS] = { .minlen = ETH_ALEN },
#endif
- [OVS_VPORT_ATTR_MTU] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
[OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
};
struct ovs_header *ovs_header;
struct nlattr *nla;
int ifindex;
- int mtu;
int err;
ovs_header = genlmsg_put(skb, pid, seq, &dp_vport_genl_family,
if (!ovs_header)
return -EMSGSIZE;
- ovs_header->dp_ifindex = vport->dp->dp_ifindex;
+ ovs_header->dp_ifindex = get_dpifindex(vport->dp);
NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport));
NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport));
- nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
+ nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats));
if (!nla)
goto nla_put_failure;
- if (vport_get_stats(vport, nla_data(nla)))
- __skb_trim(skb, skb->len - nla->nla_len);
- NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
+ vport_get_stats(vport, nla_data(nla));
- mtu = vport_get_mtu(vport);
- if (mtu)
- NLA_PUT_U32(skb, OVS_VPORT_ATTR_MTU, mtu);
+ NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
err = vport_get_options(vport, skb);
if (err == -EMSGSIZE)
static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
{
int err = 0;
+
if (a[OVS_VPORT_ATTR_STATS])
- err = vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
- if (!err && a[OVS_VPORT_ATTR_ADDRESS])
+ vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
+
+ if (a[OVS_VPORT_ATTR_ADDRESS])
err = vport_set_addr(vport, nla_data(a[OVS_VPORT_ATTR_ADDRESS]));
- if (!err && a[OVS_VPORT_ATTR_MTU])
- err = vport_set_mtu(vport, nla_get_u32(a[OVS_VPORT_ATTR_MTU]));
+
return err;
}
if (IS_ERR(vport))
goto exit_unlock;
- set_internal_devs_mtu(dp);
dp_sysfs_add_if(vport);
err = change_vport(vport, a);