+ return 0;
+}
+
+static int odp_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
+{
+ struct odp_header *odp_header = info->userhdr;
+ struct nlattr **a = info->attrs;
+ struct sk_buff *packet;
+ struct sw_flow_key key;
+ struct datapath *dp;
+ struct ethhdr *eth;
+ bool is_frag;
+ int len;
+ int err;
+
+ err = -EINVAL;
+ if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_ACTIONS] ||
+ nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
+ goto err;
+
+ err = validate_actions(a[ODP_PACKET_ATTR_ACTIONS]);
+ if (err)
+ goto err;
+
+ len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
+ packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
+ err = -ENOMEM;
+ if (!packet)
+ goto err;
+ skb_reserve(packet, NET_IP_ALIGN);
+
+ memcpy(__skb_put(packet, len), nla_data(a[ODP_PACKET_ATTR_PACKET]), len);
+
+ skb_reset_mac_header(packet);
+ eth = eth_hdr(packet);
+
+ /* Normally, setting the skb 'protocol' field would be handled by a
+ * call to eth_type_trans(), but it assumes there's a sending
+ * device, which we may not have. */
+ if (ntohs(eth->h_proto) >= 1536)
+ packet->protocol = eth->h_proto;
+ else
+ packet->protocol = htons(ETH_P_802_2);
+
+ /* Initialize OVS_CB (it came from Netlink so might not be zeroed). */
+ memset(OVS_CB(packet), 0, sizeof(struct ovs_skb_cb));
+
+ err = flow_extract(packet, -1, &key, &is_frag);
+ if (err)
+ goto err_kfree_skb;
+
+ rcu_read_lock();
+ dp = get_dp(odp_header->dp_ifindex);
+ err = -ENODEV;
+ if (!dp)
+ goto err_unlock;
+ err = execute_actions(dp, packet, &key,
+ nla_data(a[ODP_PACKET_ATTR_ACTIONS]),
+ nla_len(a[ODP_PACKET_ATTR_ACTIONS]));
+ rcu_read_unlock();
+ return err;
+
+err_unlock:
+ rcu_read_unlock();
+err_kfree_skb:
+ kfree_skb(packet);
+err:
+ return err;
+}
+
+static const struct nla_policy packet_policy[ODP_PACKET_ATTR_MAX + 1] = {
+ [ODP_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
+ [ODP_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+};
+
+static struct genl_ops dp_packet_genl_ops[] = {
+ { .cmd = ODP_PACKET_CMD_EXECUTE,
+ .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
+ .policy = packet_policy,
+ .doit = odp_packet_cmd_execute
+ }
+};
+
+static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
+{
+ int i;
+
+ stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
+ for_each_possible_cpu(i) {
+ const struct dp_stats_percpu *percpu_stats;
+ struct dp_stats_percpu local_stats;
+ unsigned seqcount;
+
+ percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+ do {
+ seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+ local_stats = *percpu_stats;
+ } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+ stats->n_frags += local_stats.n_frags;
+ stats->n_hit += local_stats.n_hit;
+ stats->n_missed += local_stats.n_missed;
+ stats->n_lost += local_stats.n_lost;
+ }
+}
+
+/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
+ * Called with RTNL lock.
+ */
+int dp_min_mtu(const struct datapath *dp)
+{
+ struct vport *p;
+ int mtu = 0;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry (p, &dp->port_list, node) {
+ int dev_mtu;
+
+ /* Skip any internal ports, since that's what we're trying to
+ * set. */
+ if (is_internal_vport(p))
+ continue;
+
+ dev_mtu = vport_get_mtu(p);
+ if (!dev_mtu)
+ continue;
+ if (!mtu || dev_mtu < mtu)
+ mtu = dev_mtu;
+ }
+
+ return mtu ? mtu : ETH_DATA_LEN;
+}
+
+/* Sets the MTU of all datapath devices to the minimum of the ports
+ * Called with RTNL lock.
+ */
+void set_internal_devs_mtu(const struct datapath *dp)
+{
+ struct vport *p;
+ int mtu;
+
+ ASSERT_RTNL();
+
+ mtu = dp_min_mtu(dp);
+
+ list_for_each_entry (p, &dp->port_list, node) {
+ if (is_internal_vport(p))
+ vport_set_mtu(p, mtu);
+ }
+}
+
+static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
+ [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+ [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
+ [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+};
+
+static struct genl_family dp_flow_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = sizeof(struct odp_header),
+ .name = ODP_FLOW_FAMILY,
+ .version = 1,
+ .maxattr = ODP_FLOW_ATTR_MAX
+};
+
+static struct genl_multicast_group dp_flow_multicast_group = {
+ .name = ODP_FLOW_MCGROUP
+};
+
+/* Called with genl_lock. */
+static int odp_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
+ struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
+{
+ const int skb_orig_len = skb->len;
+ const struct sw_flow_actions *sf_acts;
+ struct odp_flow_stats stats;
+ struct odp_header *odp_header;
+ struct nlattr *nla;
+ unsigned long used;
+ u8 tcp_flags;
+ int err;
+
+ sf_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_genl_is_held());
+
+ odp_header = genlmsg_put(skb, pid, seq, &dp_flow_genl_family, flags, cmd);
+ if (!odp_header)
+ return -EMSGSIZE;
+
+ odp_header->dp_ifindex = dp->dp_ifindex;
+
+ nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
+ if (!nla)
+ goto nla_put_failure;
+ err = flow_to_nlattrs(&flow->key, skb);
+ if (err)
+ goto error;
+ nla_nest_end(skb, nla);
+
+ spin_lock_bh(&flow->lock);
+ used = flow->used;
+ stats.n_packets = flow->packet_count;
+ stats.n_bytes = flow->byte_count;
+ tcp_flags = flow->tcp_flags;
+ spin_unlock_bh(&flow->lock);
+
+ if (used)
+ NLA_PUT_U64(skb, ODP_FLOW_ATTR_USED, flow_used_time(used));
+
+ if (stats.n_packets)
+ NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
+
+ if (tcp_flags)
+ NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+
+ /* If ODP_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
+ * this is the first flow to be dumped into 'skb'. This is unusual for
+ * Netlink but individual action lists can be longer than
+ * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
+ * The userspace caller can always fetch the actions separately if it
+ * really wants them. (Most userspace callers in fact don't care.)
+ *
+ * This can only fail for dump operations because the skb is always
+ * properly sized for single flows.
+ */
+ err = nla_put(skb, ODP_FLOW_ATTR_ACTIONS, sf_acts->actions_len,
+ sf_acts->actions);
+ if (err < 0 && skb_orig_len)
+ goto error;
+
+ return genlmsg_end(skb, odp_header);
+
+nla_put_failure:
+ err = -EMSGSIZE;
+error:
+ genlmsg_cancel(skb, odp_header);
+ return err;
+}
+
+static struct sk_buff *odp_flow_cmd_alloc_info(struct sw_flow *flow)
+{
+ const struct sw_flow_actions *sf_acts;
+ int len;
+
+ sf_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_genl_is_held());
+
+ len = nla_total_size(FLOW_BUFSIZE); /* ODP_FLOW_ATTR_KEY */
+ len += nla_total_size(sf_acts->actions_len); /* ODP_FLOW_ATTR_ACTIONS */
+ len += nla_total_size(sizeof(struct odp_flow_stats)); /* ODP_FLOW_ATTR_STATS */
+ len += nla_total_size(1); /* ODP_FLOW_ATTR_TCP_FLAGS */
+ len += nla_total_size(8); /* ODP_FLOW_ATTR_USED */
+ return genlmsg_new(NLMSG_ALIGN(sizeof(struct odp_header)) + len, GFP_KERNEL);
+}
+
+static struct sk_buff *odp_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
+ u32 pid, u32 seq, u8 cmd)
+{
+ struct sk_buff *skb;
+ int retval;
+
+ skb = odp_flow_cmd_alloc_info(flow);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ retval = odp_flow_cmd_fill_info(flow, dp, skb, pid, seq, 0, cmd);
+ BUG_ON(retval < 0);
+ return skb;