+
+ if (ntohs(flow->dl_type) < ETH_TYPE_MIN) {
+ /* For backwards compatibility with kernels that don't support
+ * wildcarding, the following convention is used to encode the
+ * OVS_KEY_ATTR_ETHERTYPE for key and mask:
+ *
+ * key mask matches
+ * -------- -------- -------
+ * >0x5ff 0xffff Specified Ethernet II Ethertype.
+ * >0x5ff 0 Any Ethernet II or non-Ethernet II frame.
+ * <none> 0xffff Any non-Ethernet II frame (except valid
+ * 802.3 SNAP packet with valid eth_type).
+ */
+ if (is_mask) {
+ nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, OVS_BE16_MAX);
+ }
+ goto unencap;
+ }
+
+ nl_msg_put_be16(buf, OVS_KEY_ATTR_ETHERTYPE, data->dl_type);
+
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ struct ovs_key_ipv4 *ipv4_key;
+
+ ipv4_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV4,
+ sizeof *ipv4_key);
+ ipv4_key->ipv4_src = data->nw_src;
+ ipv4_key->ipv4_dst = data->nw_dst;
+ ipv4_key->ipv4_proto = data->nw_proto;
+ ipv4_key->ipv4_tos = data->nw_tos;
+ ipv4_key->ipv4_ttl = data->nw_ttl;
+ ipv4_key->ipv4_frag = is_mask ? ovs_to_odp_frag_mask(data->nw_frag)
+ : ovs_to_odp_frag(data->nw_frag);
+ } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ struct ovs_key_ipv6 *ipv6_key;
+
+ ipv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV6,
+ sizeof *ipv6_key);
+ memcpy(ipv6_key->ipv6_src, &data->ipv6_src, sizeof ipv6_key->ipv6_src);
+ memcpy(ipv6_key->ipv6_dst, &data->ipv6_dst, sizeof ipv6_key->ipv6_dst);
+ ipv6_key->ipv6_label = data->ipv6_label;
+ ipv6_key->ipv6_proto = data->nw_proto;
+ ipv6_key->ipv6_tclass = data->nw_tos;
+ ipv6_key->ipv6_hlimit = data->nw_ttl;
+ ipv6_key->ipv6_frag = is_mask ? ovs_to_odp_frag_mask(data->nw_frag)
+ : ovs_to_odp_frag(data->nw_frag);
+ } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
+ flow->dl_type == htons(ETH_TYPE_RARP)) {
+ struct ovs_key_arp *arp_key;
+
+ arp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ARP,
+ sizeof *arp_key);
+ memset(arp_key, 0, sizeof *arp_key);
+ arp_key->arp_sip = data->nw_src;
+ arp_key->arp_tip = data->nw_dst;
+ arp_key->arp_op = htons(data->nw_proto);
+ memcpy(arp_key->arp_sha, data->arp_sha, ETH_ADDR_LEN);
+ memcpy(arp_key->arp_tha, data->arp_tha, ETH_ADDR_LEN);
+ } else if (eth_type_mpls(flow->dl_type)) {
+ struct ovs_key_mpls *mpls_key;
+
+ mpls_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_MPLS,
+ sizeof *mpls_key);
+ mpls_key->mpls_lse = data->mpls_lse;
+ }
+
+ if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
+ if (flow->nw_proto == IPPROTO_TCP) {
+ struct ovs_key_tcp *tcp_key;
+
+ tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
+ sizeof *tcp_key);
+ tcp_key->tcp_src = data->tp_src;
+ tcp_key->tcp_dst = data->tp_dst;
+
+ if (data->tcp_flags) {
+ nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
+ }
+ } else if (flow->nw_proto == IPPROTO_UDP) {
+ struct ovs_key_udp *udp_key;
+
+ udp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_UDP,
+ sizeof *udp_key);
+ udp_key->udp_src = data->tp_src;
+ udp_key->udp_dst = data->tp_dst;
+ } else if (flow->nw_proto == IPPROTO_SCTP) {
+ struct ovs_key_sctp *sctp_key;
+
+ sctp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_SCTP,
+ sizeof *sctp_key);
+ sctp_key->sctp_src = data->tp_src;
+ sctp_key->sctp_dst = data->tp_dst;
+ } else if (flow->dl_type == htons(ETH_TYPE_IP)
+ && flow->nw_proto == IPPROTO_ICMP) {
+ struct ovs_key_icmp *icmp_key;
+
+ icmp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMP,
+ sizeof *icmp_key);
+ icmp_key->icmp_type = ntohs(data->tp_src);
+ icmp_key->icmp_code = ntohs(data->tp_dst);
+ } else if (flow->dl_type == htons(ETH_TYPE_IPV6)
+ && flow->nw_proto == IPPROTO_ICMPV6) {
+ struct ovs_key_icmpv6 *icmpv6_key;
+
+ icmpv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ICMPV6,
+ sizeof *icmpv6_key);
+ icmpv6_key->icmpv6_type = ntohs(data->tp_src);
+ icmpv6_key->icmpv6_code = ntohs(data->tp_dst);
+
+ if (flow->tp_dst == htons(0) &&
+ (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
+ flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) &&
+ (!is_mask || (data->tp_src == htons(0xffff) &&
+ data->tp_dst == htons(0xffff)))) {
+
+ struct ovs_key_nd *nd_key;
+
+ nd_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ND,
+ sizeof *nd_key);
+ memcpy(nd_key->nd_target, &data->nd_target,
+ sizeof nd_key->nd_target);
+ memcpy(nd_key->nd_sll, data->arp_sha, ETH_ADDR_LEN);
+ memcpy(nd_key->nd_tll, data->arp_tha, ETH_ADDR_LEN);
+ }
+ }
+ }
+
+unencap:
+ if (encap) {
+ nl_msg_end_nested(buf, encap);
+ }
+}
+
+/* Appends a representation of 'flow' as OVS_KEY_ATTR_* attributes to 'buf'.
+ * 'flow->in_port' is ignored (since it is likely to be an OpenFlow port
+ * number rather than a datapath port number). Instead, if 'odp_in_port'
+ * is anything other than ODPP_NONE, it is included in 'buf' as the input
+ * port.
+ *
+ * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
+ * capable of being expanded to allow for that much space. */
+void
+odp_flow_key_from_flow(struct ofpbuf *buf, const struct flow *flow,
+ odp_port_t odp_in_port)
+{
+ odp_flow_key_from_flow__(buf, flow, flow, odp_in_port);
+}
+
+/* Appends a representation of 'mask' as OVS_KEY_ATTR_* attributes to
+ * 'buf'. 'flow' is used as a template to determine how to interpret
+ * 'mask'. For example, the 'dl_type' of 'mask' describes the mask, but
+ * it doesn't indicate whether the other fields should be interpreted as
+ * ARP, IPv4, IPv6, etc.
+ *
+ * 'buf' must have at least ODPUTIL_FLOW_KEY_BYTES bytes of space, or be
+ * capable of being expanded to allow for that much space. */
+void
+odp_flow_key_from_mask(struct ofpbuf *buf, const struct flow *mask,
+ const struct flow *flow, uint32_t odp_in_port_mask)
+{
+ odp_flow_key_from_flow__(buf, mask, flow, u32_to_odp(odp_in_port_mask));
+}
+
+uint32_t
+odp_flow_key_hash(const struct nlattr *key, size_t key_len)
+{
+ BUILD_ASSERT_DECL(!(NLA_ALIGNTO % sizeof(uint32_t)));
+ return hash_words(ALIGNED_CAST(const uint32_t *, key),
+ key_len / sizeof(uint32_t), 0);
+}
+
+static void
+log_odp_key_attributes(struct vlog_rate_limit *rl, const char *title,
+ uint64_t attrs, int out_of_range_attr,
+ const struct nlattr *key, size_t key_len)
+{
+ struct ds s;
+ int i;
+
+ if (VLOG_DROP_DBG(rl)) {
+ return;
+ }
+
+ ds_init(&s);
+ for (i = 0; i < 64; i++) {
+ if (attrs & (UINT64_C(1) << i)) {
+ char namebuf[OVS_KEY_ATTR_BUFSIZE];
+
+ ds_put_format(&s, " %s",
+ ovs_key_attr_to_string(i, namebuf, sizeof namebuf));
+ }
+ }
+ if (out_of_range_attr) {
+ ds_put_format(&s, " %d (and possibly others)", out_of_range_attr);
+ }
+
+ ds_put_cstr(&s, ": ");
+ odp_flow_key_format(key, key_len, &s);
+
+ VLOG_DBG("%s:%s", title, ds_cstr(&s));
+ ds_destroy(&s);
+}
+
+static bool
+odp_to_ovs_frag(uint8_t odp_frag, struct flow *flow)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+
+ if (odp_frag > OVS_FRAG_TYPE_LATER) {
+ VLOG_ERR_RL(&rl, "invalid frag %"PRIu8" in flow key", odp_frag);
+ return false;
+ }
+
+ if (odp_frag != OVS_FRAG_TYPE_NONE) {
+ flow->nw_frag |= FLOW_NW_FRAG_ANY;
+ if (odp_frag == OVS_FRAG_TYPE_LATER) {
+ flow->nw_frag |= FLOW_NW_FRAG_LATER;
+ }
+ }
+ return true;
+}
+
+static bool
+parse_flow_nlattrs(const struct nlattr *key, size_t key_len,
+ const struct nlattr *attrs[], uint64_t *present_attrsp,
+ int *out_of_range_attrp)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
+ const struct nlattr *nla;
+ uint64_t present_attrs;
+ size_t left;
+
+ BUILD_ASSERT(OVS_KEY_ATTR_MAX < CHAR_BIT * sizeof present_attrs);
+ present_attrs = 0;
+ *out_of_range_attrp = 0;
+ NL_ATTR_FOR_EACH (nla, left, key, key_len) {
+ uint16_t type = nl_attr_type(nla);
+ size_t len = nl_attr_get_size(nla);
+ int expected_len = odp_flow_key_attr_len(type);
+
+ if (len != expected_len && expected_len >= 0) {
+ char namebuf[OVS_KEY_ATTR_BUFSIZE];
+
+ VLOG_ERR_RL(&rl, "attribute %s has length %zu but should have "
+ "length %d", ovs_key_attr_to_string(type, namebuf,
+ sizeof namebuf),
+ len, expected_len);
+ return false;
+ }
+
+ if (type > OVS_KEY_ATTR_MAX) {
+ *out_of_range_attrp = type;
+ } else {
+ if (present_attrs & (UINT64_C(1) << type)) {
+ char namebuf[OVS_KEY_ATTR_BUFSIZE];
+
+ VLOG_ERR_RL(&rl, "duplicate %s attribute in flow key",
+ ovs_key_attr_to_string(type,
+ namebuf, sizeof namebuf));
+ return false;
+ }
+
+ present_attrs |= UINT64_C(1) << type;
+ attrs[type] = nla;
+ }
+ }
+ if (left) {
+ VLOG_ERR_RL(&rl, "trailing garbage in flow key");
+ return false;
+ }
+
+ *present_attrsp = present_attrs;
+ return true;
+}
+
+static enum odp_key_fitness
+check_expectations(uint64_t present_attrs, int out_of_range_attr,
+ uint64_t expected_attrs,
+ const struct nlattr *key, size_t key_len)
+{
+ uint64_t missing_attrs;
+ uint64_t extra_attrs;
+
+ missing_attrs = expected_attrs & ~present_attrs;
+ if (missing_attrs) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
+ log_odp_key_attributes(&rl, "expected but not present",
+ missing_attrs, 0, key, key_len);
+ return ODP_FIT_TOO_LITTLE;
+ }
+
+ extra_attrs = present_attrs & ~expected_attrs;
+ if (extra_attrs || out_of_range_attr) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
+ log_odp_key_attributes(&rl, "present but not expected",
+ extra_attrs, out_of_range_attr, key, key_len);
+ return ODP_FIT_TOO_MUCH;
+ }
+
+ return ODP_FIT_PERFECT;
+}
+
+static bool
+parse_ethertype(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
+ uint64_t present_attrs, uint64_t *expected_attrs,
+ struct flow *flow, const struct flow *src_flow)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ bool is_mask = flow != src_flow;
+
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE)) {
+ flow->dl_type = nl_attr_get_be16(attrs[OVS_KEY_ATTR_ETHERTYPE]);
+ if (!is_mask && ntohs(flow->dl_type) < ETH_TYPE_MIN) {
+ VLOG_ERR_RL(&rl, "invalid Ethertype %"PRIu16" in flow key",
+ ntohs(flow->dl_type));
+ return false;
+ }
+ if (is_mask && ntohs(src_flow->dl_type) < ETH_TYPE_MIN &&
+ flow->dl_type != htons(0xffff)) {
+ return false;
+ }
+ *expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERTYPE;
+ } else {
+ if (!is_mask) {
+ flow->dl_type = htons(FLOW_DL_TYPE_NONE);
+ } else if (ntohs(src_flow->dl_type) < ETH_TYPE_MIN) {
+ /* See comments in odp_flow_key_from_flow__(). */
+ VLOG_ERR_RL(&rl, "mask expected for non-Ethernet II frame");
+ return false;
+ }
+ }
+ return true;
+}
+
+static enum odp_key_fitness
+parse_l2_5_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
+ uint64_t present_attrs, int out_of_range_attr,
+ uint64_t expected_attrs, struct flow *flow,
+ const struct nlattr *key, size_t key_len,
+ const struct flow *src_flow)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ bool is_mask = src_flow != flow;
+ const void *check_start = NULL;
+ size_t check_len = 0;
+ enum ovs_key_attr expected_bit = 0xff;
+
+ if (eth_type_mpls(src_flow->dl_type)) {
+ if (!is_mask) {
+ expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
+
+ if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS))) {
+ return ODP_FIT_TOO_LITTLE;
+ }
+ flow->mpls_lse = nl_attr_get_be32(attrs[OVS_KEY_ATTR_MPLS]);
+ } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
+ flow->mpls_lse = nl_attr_get_be32(attrs[OVS_KEY_ATTR_MPLS]);
+
+ if (flow->mpls_lse != 0 && flow->dl_type != htons(0xffff)) {
+ return ODP_FIT_ERROR;
+ }
+ expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
+ }
+ goto done;
+ } else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV4;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV4)) {
+ const struct ovs_key_ipv4 *ipv4_key;
+
+ ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
+ flow->nw_src = ipv4_key->ipv4_src;
+ flow->nw_dst = ipv4_key->ipv4_dst;
+ flow->nw_proto = ipv4_key->ipv4_proto;
+ flow->nw_tos = ipv4_key->ipv4_tos;
+ flow->nw_ttl = ipv4_key->ipv4_ttl;
+ if (is_mask) {
+ flow->nw_frag = ipv4_key->ipv4_frag;
+ check_start = ipv4_key;
+ check_len = sizeof *ipv4_key;
+ expected_bit = OVS_KEY_ATTR_IPV4;
+ } else if (!odp_to_ovs_frag(ipv4_key->ipv4_frag, flow)) {
+ return ODP_FIT_ERROR;
+ }
+ }
+ } else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IPV6;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IPV6)) {
+ const struct ovs_key_ipv6 *ipv6_key;
+
+ ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
+ memcpy(&flow->ipv6_src, ipv6_key->ipv6_src, sizeof flow->ipv6_src);
+ memcpy(&flow->ipv6_dst, ipv6_key->ipv6_dst, sizeof flow->ipv6_dst);
+ flow->ipv6_label = ipv6_key->ipv6_label;
+ flow->nw_proto = ipv6_key->ipv6_proto;
+ flow->nw_tos = ipv6_key->ipv6_tclass;
+ flow->nw_ttl = ipv6_key->ipv6_hlimit;
+ if (is_mask) {
+ flow->nw_frag = ipv6_key->ipv6_frag;
+ check_start = ipv6_key;
+ check_len = sizeof *ipv6_key;
+ expected_bit = OVS_KEY_ATTR_IPV6;
+ } else if (!odp_to_ovs_frag(ipv6_key->ipv6_frag, flow)) {
+ return ODP_FIT_ERROR;
+ }
+ }
+ } else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
+ src_flow->dl_type == htons(ETH_TYPE_RARP)) {
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ARP;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ARP)) {
+ const struct ovs_key_arp *arp_key;
+
+ arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
+ flow->nw_src = arp_key->arp_sip;
+ flow->nw_dst = arp_key->arp_tip;
+ if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
+ VLOG_ERR_RL(&rl, "unsupported ARP opcode %"PRIu16" in flow "
+ "key", ntohs(arp_key->arp_op));
+ return ODP_FIT_ERROR;
+ }
+ flow->nw_proto = ntohs(arp_key->arp_op);
+ memcpy(flow->arp_sha, arp_key->arp_sha, ETH_ADDR_LEN);
+ memcpy(flow->arp_tha, arp_key->arp_tha, ETH_ADDR_LEN);
+
+ if (is_mask) {
+ check_start = arp_key;
+ check_len = sizeof *arp_key;
+ expected_bit = OVS_KEY_ATTR_ARP;
+ }
+ }
+ } else {
+ goto done;
+ }
+ if (is_mask) {
+ if (!is_all_zeros(check_start, check_len) &&
+ flow->dl_type != htons(0xffff)) {
+ return ODP_FIT_ERROR;
+ } else {
+ expected_attrs |= UINT64_C(1) << expected_bit;
+ }
+ }
+
+ expected_bit = OVS_KEY_ATTR_UNSPEC;
+ if (src_flow->nw_proto == IPPROTO_TCP
+ && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
+ src_flow->dl_type == htons(ETH_TYPE_IPV6))
+ && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
+ const struct ovs_key_tcp *tcp_key;
+
+ tcp_key = nl_attr_get(attrs[OVS_KEY_ATTR_TCP]);
+ flow->tp_src = tcp_key->tcp_src;
+ flow->tp_dst = tcp_key->tcp_dst;
+ expected_bit = OVS_KEY_ATTR_TCP;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS;
+ flow->tcp_flags = nl_attr_get_be16(attrs[OVS_KEY_ATTR_TCP_FLAGS]);
+ }
+ } else if (src_flow->nw_proto == IPPROTO_UDP
+ && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
+ src_flow->dl_type == htons(ETH_TYPE_IPV6))
+ && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
+ const struct ovs_key_udp *udp_key;
+
+ udp_key = nl_attr_get(attrs[OVS_KEY_ATTR_UDP]);
+ flow->tp_src = udp_key->udp_src;
+ flow->tp_dst = udp_key->udp_dst;
+ expected_bit = OVS_KEY_ATTR_UDP;
+ }
+ } else if (src_flow->nw_proto == IPPROTO_SCTP
+ && (src_flow->dl_type == htons(ETH_TYPE_IP) ||
+ src_flow->dl_type == htons(ETH_TYPE_IPV6))
+ && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
+ const struct ovs_key_sctp *sctp_key;
+
+ sctp_key = nl_attr_get(attrs[OVS_KEY_ATTR_SCTP]);
+ flow->tp_src = sctp_key->sctp_src;
+ flow->tp_dst = sctp_key->sctp_dst;
+ expected_bit = OVS_KEY_ATTR_SCTP;
+ }
+ } else if (src_flow->nw_proto == IPPROTO_ICMP
+ && src_flow->dl_type == htons(ETH_TYPE_IP)
+ && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMP;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMP)) {
+ const struct ovs_key_icmp *icmp_key;
+
+ icmp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMP]);
+ flow->tp_src = htons(icmp_key->icmp_type);
+ flow->tp_dst = htons(icmp_key->icmp_code);
+ expected_bit = OVS_KEY_ATTR_ICMP;
+ }
+ } else if (src_flow->nw_proto == IPPROTO_ICMPV6
+ && src_flow->dl_type == htons(ETH_TYPE_IPV6)
+ && !(src_flow->nw_frag & FLOW_NW_FRAG_LATER)) {
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ICMPV6;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ICMPV6)) {
+ const struct ovs_key_icmpv6 *icmpv6_key;
+
+ icmpv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_ICMPV6]);
+ flow->tp_src = htons(icmpv6_key->icmpv6_type);
+ flow->tp_dst = htons(icmpv6_key->icmpv6_code);
+ expected_bit = OVS_KEY_ATTR_ICMPV6;
+ if (src_flow->tp_dst == htons(0) &&
+ (src_flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
+ src_flow->tp_src == htons(ND_NEIGHBOR_ADVERT))) {
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ND)) {
+ const struct ovs_key_nd *nd_key;
+
+ nd_key = nl_attr_get(attrs[OVS_KEY_ATTR_ND]);
+ memcpy(&flow->nd_target, nd_key->nd_target,
+ sizeof flow->nd_target);
+ memcpy(flow->arp_sha, nd_key->nd_sll, ETH_ADDR_LEN);
+ memcpy(flow->arp_tha, nd_key->nd_tll, ETH_ADDR_LEN);
+ if (is_mask) {
+ if (!is_all_zeros((const uint8_t *) nd_key,
+ sizeof *nd_key) &&
+ (flow->tp_src != htons(0xffff) ||
+ flow->tp_dst != htons(0xffff))) {
+ return ODP_FIT_ERROR;
+ } else {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ND;
+ }
+ }
+ }
+ }
+ }
+ }
+ if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
+ if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
+ return ODP_FIT_ERROR;
+ } else {
+ expected_attrs |= UINT64_C(1) << expected_bit;
+ }
+ }
+
+done:
+ return check_expectations(present_attrs, out_of_range_attr, expected_attrs,
+ key, key_len);
+}
+
+/* Parse 802.1Q header then encapsulated L3 attributes. */
+static enum odp_key_fitness
+parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
+ uint64_t present_attrs, int out_of_range_attr,
+ uint64_t expected_attrs, struct flow *flow,
+ const struct nlattr *key, size_t key_len,
+ const struct flow *src_flow)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ bool is_mask = src_flow != flow;
+
+ const struct nlattr *encap
+ = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)
+ ? attrs[OVS_KEY_ATTR_ENCAP] : NULL);
+ enum odp_key_fitness encap_fitness;
+ enum odp_key_fitness fitness;
+ ovs_be16 tci;
+
+ /* Calculate fitness of outer attributes. */
+ if (!is_mask) {
+ expected_attrs |= ((UINT64_C(1) << OVS_KEY_ATTR_VLAN) |
+ (UINT64_C(1) << OVS_KEY_ATTR_ENCAP));
+ } else {
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
+ expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)) {
+ expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_ENCAP);
+ }
+ }
+ fitness = check_expectations(present_attrs, out_of_range_attr,
+ expected_attrs, key, key_len);
+
+ /* Get the VLAN TCI value. */
+ if (!is_mask && !(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN))) {
+ return ODP_FIT_TOO_LITTLE;
+ } else {
+ tci = nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN]);
+ if (!is_mask) {
+ if (tci == htons(0)) {
+ /* Corner case for a truncated 802.1Q header. */
+ if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
+ return ODP_FIT_TOO_MUCH;
+ }
+ return fitness;
+ } else if (!(tci & htons(VLAN_CFI))) {
+ VLOG_ERR_RL(&rl, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
+ "but CFI bit is not set", ntohs(tci));
+ return ODP_FIT_ERROR;
+ }
+ }
+ /* Set vlan_tci.
+ * Remove the TPID from dl_type since it's not the real Ethertype. */
+ flow->dl_type = htons(0);
+ flow->vlan_tci = tci;
+ }
+
+ if (is_mask && !(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
+ return fitness;
+ }
+ /* Now parse the encapsulated attributes. */
+ if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
+ attrs, &present_attrs, &out_of_range_attr)) {
+ return ODP_FIT_ERROR;
+ }
+ expected_attrs = 0;
+
+ if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow, src_flow)) {
+ return ODP_FIT_ERROR;
+ }
+ encap_fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
+ expected_attrs, flow, key, key_len,
+ src_flow);
+
+ /* The overall fitness is the worse of the outer and inner attributes. */
+ return MAX(fitness, encap_fitness);
+}
+
+static enum odp_key_fitness
+odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
+ struct flow *flow, const struct flow *src_flow)
+{
+ const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
+ uint64_t expected_attrs;
+ uint64_t present_attrs;
+ int out_of_range_attr;
+ bool is_mask = src_flow != flow;
+
+ memset(flow, 0, sizeof *flow);
+
+ /* Parse attributes. */
+ if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
+ &out_of_range_attr)) {
+ return ODP_FIT_ERROR;
+ }
+ expected_attrs = 0;
+
+ /* Metadata. */
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PRIORITY)) {
+ flow->skb_priority = nl_attr_get_u32(attrs[OVS_KEY_ATTR_PRIORITY]);
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PRIORITY;
+ }
+
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK)) {
+ flow->pkt_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_SKB_MARK]);
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK;
+ }
+
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TUNNEL)) {
+ enum odp_key_fitness res;
+
+ res = odp_tun_key_from_attr(attrs[OVS_KEY_ATTR_TUNNEL], &flow->tunnel);
+ if (res == ODP_FIT_ERROR) {
+ return ODP_FIT_ERROR;
+ } else if (res == ODP_FIT_PERFECT) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
+ }
+ }
+
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IN_PORT)) {
+ flow->in_port.odp_port
+ = nl_attr_get_odp_port(attrs[OVS_KEY_ATTR_IN_PORT]);
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IN_PORT;
+ } else if (!is_mask) {
+ flow->in_port.odp_port = ODPP_NONE;
+ }
+
+ /* Ethernet header. */
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERNET)) {
+ const struct ovs_key_ethernet *eth_key;
+
+ eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
+ memcpy(flow->dl_src, eth_key->eth_src, ETH_ADDR_LEN);
+ memcpy(flow->dl_dst, eth_key->eth_dst, ETH_ADDR_LEN);
+ if (is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
+ }
+ }
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
+ }
+
+ /* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
+ if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
+ src_flow)) {
+ return ODP_FIT_ERROR;
+ }
+
+ if ((is_mask && (src_flow->vlan_tci & htons(VLAN_CFI))) ||
+ (!is_mask && src_flow->dl_type == htons(ETH_TYPE_VLAN))) {
+ return parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
+ expected_attrs, flow, key, key_len, src_flow);
+ }
+ if (is_mask) {
+ flow->vlan_tci = htons(0xffff);
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
+ flow->vlan_tci = nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN]);
+ expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
+ }
+ }
+ return parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
+ expected_attrs, flow, key, key_len, src_flow);
+}
+
+/* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
+ * structure in 'flow'. Returns an ODP_FIT_* value that indicates how well
+ * 'key' fits our expectations for what a flow key should contain.
+ *
+ * The 'in_port' will be the datapath's understanding of the port. The
+ * caller will need to translate with odp_port_to_ofp_port() if the
+ * OpenFlow port is needed.
+ *
+ * This function doesn't take the packet itself as an argument because none of
+ * the currently understood OVS_KEY_ATTR_* attributes require it. Currently,
+ * it is always possible to infer which additional attribute(s) should appear
+ * by looking at the attributes for lower-level protocols, e.g. if the network
+ * protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
+ * know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
+ * must be absent. */
+enum odp_key_fitness
+odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
+ struct flow *flow)
+{
+ return odp_flow_key_to_flow__(key, key_len, flow, flow);
+}
+
+/* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a mask
+ * structure in 'mask'. 'flow' must be a previously translated flow
+ * corresponding to 'mask'. Returns an ODP_FIT_* value that indicates how well
+ * 'key' fits our expectations for what a flow key should contain. */
+enum odp_key_fitness
+odp_flow_key_to_mask(const struct nlattr *key, size_t key_len,
+ struct flow *mask, const struct flow *flow)
+{
+ return odp_flow_key_to_flow__(key, key_len, mask, flow);
+}
+
+/* Returns 'fitness' as a string, for use in debug messages. */
+const char *
+odp_key_fitness_to_string(enum odp_key_fitness fitness)
+{
+ switch (fitness) {
+ case ODP_FIT_PERFECT:
+ return "OK";
+ case ODP_FIT_TOO_MUCH:
+ return "too_much";
+ case ODP_FIT_TOO_LITTLE:
+ return "too_little";
+ case ODP_FIT_ERROR:
+ return "error";
+ default:
+ return "<unknown>";
+ }
+}
+
+/* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
+ * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
+ * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
+ * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
+ * null, then the return value is not meaningful.) */
+size_t
+odp_put_userspace_action(uint32_t pid,
+ const void *userdata, size_t userdata_size,
+ struct ofpbuf *odp_actions)
+{
+ size_t userdata_ofs;
+ size_t offset;
+
+ offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
+ nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
+ if (userdata) {
+ userdata_ofs = odp_actions->size + NLA_HDRLEN;
+ nl_msg_put_unspec(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
+ userdata, userdata_size);
+ } else {
+ userdata_ofs = 0;
+ }
+ nl_msg_end_nested(odp_actions, offset);
+
+ return userdata_ofs;
+}
+
+void
+odp_put_tunnel_action(const struct flow_tnl *tunnel,
+ struct ofpbuf *odp_actions)
+{
+ size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
+ tun_key_to_attr(odp_actions, tunnel);
+ nl_msg_end_nested(odp_actions, offset);
+}
+\f
+/* The commit_odp_actions() function and its helpers. */
+
+static void
+commit_set_action(struct ofpbuf *odp_actions, enum ovs_key_attr key_type,
+ const void *key, size_t key_size)
+{
+ size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
+ nl_msg_put_unspec(odp_actions, key_type, key, key_size);
+ nl_msg_end_nested(odp_actions, offset);
+}
+
+void
+odp_put_pkt_mark_action(const uint32_t pkt_mark,
+ struct ofpbuf *odp_actions)
+{
+ commit_set_action(odp_actions, OVS_KEY_ATTR_SKB_MARK, &pkt_mark,
+ sizeof(pkt_mark));
+}
+
+/* If any of the flow key data that ODP actions can modify are different in
+ * 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
+ * 'odp_actions' that change the flow tunneling information in key from
+ * 'base->tunnel' into 'flow->tunnel', and then changes 'base->tunnel' in the
+ * same way. In other words, operates the same as commit_odp_actions(), but
+ * only on tunneling information. */
+void
+commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions)
+{
+ /* A valid IPV4_TUNNEL must have non-zero ip_dst. */
+ if (flow->tunnel.ip_dst) {
+ if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
+ return;
+ }
+ memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
+ odp_put_tunnel_action(&base->tunnel, odp_actions);
+ }
+}
+
+static void
+commit_set_ether_addr_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc)
+{
+ struct ovs_key_ethernet eth_key;
+
+ if (eth_addr_equals(base->dl_src, flow->dl_src) &&
+ eth_addr_equals(base->dl_dst, flow->dl_dst)) {
+ return;
+ }
+
+ memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
+ memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+
+ memcpy(base->dl_src, flow->dl_src, ETH_ADDR_LEN);
+ memcpy(base->dl_dst, flow->dl_dst, ETH_ADDR_LEN);
+
+ memcpy(eth_key.eth_src, base->dl_src, ETH_ADDR_LEN);
+ memcpy(eth_key.eth_dst, base->dl_dst, ETH_ADDR_LEN);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_ETHERNET,
+ ð_key, sizeof(eth_key));
+}
+
+static void
+commit_vlan_action(ovs_be16 vlan_tci, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ if (base->vlan_tci == vlan_tci) {
+ return;
+ }
+
+ memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
+
+ if (base->vlan_tci & htons(VLAN_CFI)) {
+ nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
+ }
+
+ if (vlan_tci & htons(VLAN_CFI)) {
+ struct ovs_action_push_vlan vlan;
+
+ vlan.vlan_tpid = htons(ETH_TYPE_VLAN);
+ vlan.vlan_tci = vlan_tci;
+ nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
+ &vlan, sizeof vlan);
+ }
+ base->vlan_tci = vlan_tci;
+}
+
+static void
+commit_mpls_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc,
+ int *mpls_depth_delta)
+{
+ if (flow->mpls_lse == base->mpls_lse && !*mpls_depth_delta) {
+ return;
+ }
+
+ memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
+
+ switch (*mpls_depth_delta) {
+ case -1:
+ nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, flow->dl_type);
+ break;
+ case 1: {
+ struct ovs_action_push_mpls *mpls;
+
+ mpls = nl_msg_put_unspec_uninit(odp_actions, OVS_ACTION_ATTR_PUSH_MPLS,
+ sizeof *mpls);
+ memset(mpls, 0, sizeof *mpls);
+ mpls->mpls_ethertype = flow->dl_type;
+ mpls->mpls_lse = flow->mpls_lse;
+ break;
+ }
+ case 0: {
+ struct ovs_key_mpls mpls_key;
+
+ mpls_key.mpls_lse = flow->mpls_lse;
+ commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
+ &mpls_key, sizeof(mpls_key));
+ break;
+ }
+ default:
+ NOT_REACHED();
+ }
+
+ base->dl_type = flow->dl_type;
+ base->mpls_lse = flow->mpls_lse;
+ *mpls_depth_delta = 0;
+}
+
+static void
+commit_set_ipv4_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ struct ovs_key_ipv4 ipv4_key;
+
+ if (base->nw_src == flow->nw_src &&
+ base->nw_dst == flow->nw_dst &&
+ base->nw_tos == flow->nw_tos &&
+ base->nw_ttl == flow->nw_ttl &&
+ base->nw_frag == flow->nw_frag) {
+ return;
+ }
+
+ memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
+ memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
+ memset(&wc->masks.nw_tos, 0xff, sizeof wc->masks.nw_tos);
+ memset(&wc->masks.nw_ttl, 0xff, sizeof wc->masks.nw_ttl);
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ memset(&wc->masks.nw_frag, 0xff, sizeof wc->masks.nw_frag);
+
+ ipv4_key.ipv4_src = base->nw_src = flow->nw_src;
+ ipv4_key.ipv4_dst = base->nw_dst = flow->nw_dst;
+ ipv4_key.ipv4_tos = base->nw_tos = flow->nw_tos;
+ ipv4_key.ipv4_ttl = base->nw_ttl = flow->nw_ttl;
+ ipv4_key.ipv4_proto = base->nw_proto;
+ ipv4_key.ipv4_frag = ovs_to_odp_frag(base->nw_frag);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_IPV4,
+ &ipv4_key, sizeof(ipv4_key));
+}
+
+static void
+commit_set_ipv6_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ struct ovs_key_ipv6 ipv6_key;
+
+ if (ipv6_addr_equals(&base->ipv6_src, &flow->ipv6_src) &&
+ ipv6_addr_equals(&base->ipv6_dst, &flow->ipv6_dst) &&
+ base->ipv6_label == flow->ipv6_label &&
+ base->nw_tos == flow->nw_tos &&
+ base->nw_ttl == flow->nw_ttl &&
+ base->nw_frag == flow->nw_frag) {
+ return;
+ }
+
+ memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
+ memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
+ memset(&wc->masks.ipv6_label, 0xff, sizeof wc->masks.ipv6_label);
+ memset(&wc->masks.nw_tos, 0xff, sizeof wc->masks.nw_tos);
+ memset(&wc->masks.nw_ttl, 0xff, sizeof wc->masks.nw_ttl);
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ memset(&wc->masks.nw_frag, 0xff, sizeof wc->masks.nw_frag);
+
+ base->ipv6_src = flow->ipv6_src;
+ memcpy(&ipv6_key.ipv6_src, &base->ipv6_src, sizeof(ipv6_key.ipv6_src));
+ base->ipv6_dst = flow->ipv6_dst;
+ memcpy(&ipv6_key.ipv6_dst, &base->ipv6_dst, sizeof(ipv6_key.ipv6_dst));
+
+ ipv6_key.ipv6_label = base->ipv6_label = flow->ipv6_label;
+ ipv6_key.ipv6_tclass = base->nw_tos = flow->nw_tos;
+ ipv6_key.ipv6_hlimit = base->nw_ttl = flow->nw_ttl;
+ ipv6_key.ipv6_proto = base->nw_proto;
+ ipv6_key.ipv6_frag = ovs_to_odp_frag(base->nw_frag);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_IPV6,
+ &ipv6_key, sizeof(ipv6_key));
+}
+
+static enum slow_path_reason
+commit_set_arp_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ struct ovs_key_arp arp_key;
+
+ if (base->nw_src == flow->nw_src &&
+ base->nw_dst == flow->nw_dst &&
+ base->nw_proto == flow->nw_proto &&
+ eth_addr_equals(base->arp_sha, flow->arp_sha) &&
+ eth_addr_equals(base->arp_tha, flow->arp_tha)) {
+ return 0;
+ }
+
+ memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
+ memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ memset(&wc->masks.arp_sha, 0xff, sizeof wc->masks.arp_sha);
+ memset(&wc->masks.arp_tha, 0xff, sizeof wc->masks.arp_tha);
+
+ base->nw_src = flow->nw_src;
+ base->nw_dst = flow->nw_dst;
+ base->nw_proto = flow->nw_proto;
+ memcpy(base->arp_sha, flow->arp_sha, ETH_ADDR_LEN);
+ memcpy(base->arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+
+ arp_key.arp_sip = base->nw_src;
+ arp_key.arp_tip = base->nw_dst;
+ arp_key.arp_op = htons(base->nw_proto);
+ memcpy(arp_key.arp_sha, flow->arp_sha, ETH_ADDR_LEN);
+ memcpy(arp_key.arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_ARP, &arp_key, sizeof arp_key);
+
+ return SLOW_ACTION;
+}
+
+static enum slow_path_reason
+commit_set_nw_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ /* Check if 'flow' really has an L3 header. */
+ if (!flow->nw_proto) {
+ return 0;
+ }
+
+ switch (ntohs(base->dl_type)) {
+ case ETH_TYPE_IP:
+ commit_set_ipv4_action(flow, base, odp_actions, wc);
+ break;
+
+ case ETH_TYPE_IPV6:
+ commit_set_ipv6_action(flow, base, odp_actions, wc);
+ break;
+
+ case ETH_TYPE_ARP:
+ return commit_set_arp_action(flow, base, odp_actions, wc);
+ }
+
+ return 0;
+}
+
+static void
+commit_set_port_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ if (!is_ip_any(base) || (!base->tp_src && !base->tp_dst)) {
+ return;
+ }
+
+ if (base->tp_src == flow->tp_src &&
+ base->tp_dst == flow->tp_dst) {
+ return;
+ }
+
+ memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
+ memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
+
+ if (flow->nw_proto == IPPROTO_TCP) {
+ struct ovs_key_tcp port_key;
+
+ port_key.tcp_src = base->tp_src = flow->tp_src;
+ port_key.tcp_dst = base->tp_dst = flow->tp_dst;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_TCP,
+ &port_key, sizeof(port_key));
+
+ } else if (flow->nw_proto == IPPROTO_UDP) {
+ struct ovs_key_udp port_key;
+
+ port_key.udp_src = base->tp_src = flow->tp_src;
+ port_key.udp_dst = base->tp_dst = flow->tp_dst;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_UDP,
+ &port_key, sizeof(port_key));
+ } else if (flow->nw_proto == IPPROTO_SCTP) {
+ struct ovs_key_sctp port_key;
+
+ port_key.sctp_src = base->tp_src = flow->tp_src;
+ port_key.sctp_dst = base->tp_dst = flow->tp_dst;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_SCTP,
+ &port_key, sizeof(port_key));
+ }
+}
+
+static void
+commit_set_priority_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc)
+{
+ if (base->skb_priority == flow->skb_priority) {
+ return;
+ }
+
+ memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
+ base->skb_priority = flow->skb_priority;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_PRIORITY,
+ &base->skb_priority, sizeof(base->skb_priority));
+}
+
+static void
+commit_set_pkt_mark_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc)
+{
+ if (base->pkt_mark == flow->pkt_mark) {
+ return;
+ }
+
+ memset(&wc->masks.pkt_mark, 0xff, sizeof wc->masks.pkt_mark);
+ base->pkt_mark = flow->pkt_mark;
+
+ odp_put_pkt_mark_action(base->pkt_mark, odp_actions);
+}
+
+/* If any of the flow key data that ODP actions can modify are different in
+ * 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
+ * key from 'base' into 'flow', and then changes 'base' the same way. Does not
+ * commit set_tunnel actions. Users should call commit_odp_tunnel_action()
+ * in addition to this function if needed. Sets fields in 'wc' that are
+ * used as part of the action.
+ *
+ * Returns a reason to force processing the flow's packets into the userspace
+ * slow path, if there is one, otherwise 0. */
+enum slow_path_reason
+commit_odp_actions(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc,
+ int *mpls_depth_delta)
+{
+ enum slow_path_reason slow;
+
+ commit_set_ether_addr_action(flow, base, odp_actions, wc);
+ commit_vlan_action(flow->vlan_tci, base, odp_actions, wc);
+ slow = commit_set_nw_action(flow, base, odp_actions, wc);
+ commit_set_port_action(flow, base, odp_actions, wc);
+ /* Committing MPLS actions should occur after committing nw and port
+ * actions. This is because committing MPLS actions may alter a packet so
+ * that it is no longer IP and thus nw and port actions are no longer valid.
+ */
+ commit_mpls_action(flow, base, odp_actions, wc, mpls_depth_delta);
+ commit_set_priority_action(flow, base, odp_actions, wc);
+ commit_set_pkt_mark_action(flow, base, odp_actions, wc);
+
+ return slow;