+ if (is_mask && expected_bit != OVS_KEY_ATTR_UNSPEC) {
+ if ((flow->tp_src || flow->tp_dst) && flow->nw_proto != 0xff) {
+ return ODP_FIT_ERROR;
+ } else {
+ expected_attrs |= UINT64_C(1) << expected_bit;
+ }
+ }
+
+done:
+ return check_expectations(present_attrs, out_of_range_attr, expected_attrs,
+ key, key_len);
+}
+
+/* Parse 802.1Q header then encapsulated L3 attributes. */
+static enum odp_key_fitness
+parse_8021q_onward(const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1],
+ uint64_t present_attrs, int out_of_range_attr,
+ uint64_t expected_attrs, struct flow *flow,
+ const struct nlattr *key, size_t key_len,
+ const struct flow *src_flow)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ bool is_mask = src_flow != flow;
+
+ const struct nlattr *encap
+ = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)
+ ? attrs[OVS_KEY_ATTR_ENCAP] : NULL);
+ enum odp_key_fitness encap_fitness;
+ enum odp_key_fitness fitness;
+
+ /* Calculate fitness of outer attributes. */
+ if (!is_mask) {
+ expected_attrs |= ((UINT64_C(1) << OVS_KEY_ATTR_VLAN) |
+ (UINT64_C(1) << OVS_KEY_ATTR_ENCAP));
+ } else {
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
+ expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP)) {
+ expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_ENCAP);
+ }
+ }
+ fitness = check_expectations(present_attrs, out_of_range_attr,
+ expected_attrs, key, key_len);
+
+ /* Set vlan_tci.
+ * Remove the TPID from dl_type since it's not the real Ethertype. */
+ flow->dl_type = htons(0);
+ flow->vlan_tci = (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)
+ ? nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN])
+ : htons(0));
+ if (!is_mask) {
+ if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN))) {
+ return ODP_FIT_TOO_LITTLE;
+ } else if (flow->vlan_tci == htons(0)) {
+ /* Corner case for a truncated 802.1Q header. */
+ if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
+ return ODP_FIT_TOO_MUCH;
+ }
+ return fitness;
+ } else if (!(flow->vlan_tci & htons(VLAN_CFI))) {
+ VLOG_ERR_RL(&rl, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
+ "but CFI bit is not set", ntohs(flow->vlan_tci));
+ return ODP_FIT_ERROR;
+ }
+ } else {
+ if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ENCAP))) {
+ return fitness;
+ }
+ }
+
+ /* Now parse the encapsulated attributes. */
+ if (!parse_flow_nlattrs(nl_attr_get(encap), nl_attr_get_size(encap),
+ attrs, &present_attrs, &out_of_range_attr)) {
+ return ODP_FIT_ERROR;
+ }
+ expected_attrs = 0;
+
+ if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow, src_flow)) {
+ return ODP_FIT_ERROR;
+ }
+ encap_fitness = parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
+ expected_attrs, flow, key, key_len,
+ src_flow);
+
+ /* The overall fitness is the worse of the outer and inner attributes. */
+ return MAX(fitness, encap_fitness);
+}
+
+static enum odp_key_fitness
+odp_flow_key_to_flow__(const struct nlattr *key, size_t key_len,
+ struct flow *flow, const struct flow *src_flow)
+{
+ const struct nlattr *attrs[OVS_KEY_ATTR_MAX + 1];
+ uint64_t expected_attrs;
+ uint64_t present_attrs;
+ int out_of_range_attr;
+ bool is_mask = src_flow != flow;
+
+ memset(flow, 0, sizeof *flow);
+
+ /* Parse attributes. */
+ if (!parse_flow_nlattrs(key, key_len, attrs, &present_attrs,
+ &out_of_range_attr)) {
+ return ODP_FIT_ERROR;
+ }
+ expected_attrs = 0;
+
+ /* Metadata. */
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID)) {
+ flow->recirc_id = nl_attr_get_u32(attrs[OVS_KEY_ATTR_RECIRC_ID]);
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_RECIRC_ID;
+ } else if (is_mask) {
+ /* Always exact match recirc_id when datapath does not sepcify it. */
+ flow->recirc_id = UINT32_MAX;
+ }
+
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_DP_HASH)) {
+ flow->dp_hash = nl_attr_get_u32(attrs[OVS_KEY_ATTR_DP_HASH]);
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_DP_HASH;
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_PRIORITY)) {
+ flow->skb_priority = nl_attr_get_u32(attrs[OVS_KEY_ATTR_PRIORITY]);
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_PRIORITY;
+ }
+
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK)) {
+ flow->pkt_mark = nl_attr_get_u32(attrs[OVS_KEY_ATTR_SKB_MARK]);
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SKB_MARK;
+ }
+
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TUNNEL)) {
+ enum odp_key_fitness res;
+
+ res = odp_tun_key_from_attr(attrs[OVS_KEY_ATTR_TUNNEL], &flow->tunnel);
+ if (res == ODP_FIT_ERROR) {
+ return ODP_FIT_ERROR;
+ } else if (res == ODP_FIT_PERFECT) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TUNNEL;
+ }
+ }
+
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_IN_PORT)) {
+ flow->in_port.odp_port
+ = nl_attr_get_odp_port(attrs[OVS_KEY_ATTR_IN_PORT]);
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_IN_PORT;
+ } else if (!is_mask) {
+ flow->in_port.odp_port = ODPP_NONE;
+ }
+
+ /* Ethernet header. */
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_ETHERNET)) {
+ const struct ovs_key_ethernet *eth_key;
+
+ eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
+ memcpy(flow->dl_src, eth_key->eth_src, ETH_ADDR_LEN);
+ memcpy(flow->dl_dst, eth_key->eth_dst, ETH_ADDR_LEN);
+ if (is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
+ }
+ }
+ if (!is_mask) {
+ expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
+ }
+
+ /* Get Ethertype or 802.1Q TPID or FLOW_DL_TYPE_NONE. */
+ if (!parse_ethertype(attrs, present_attrs, &expected_attrs, flow,
+ src_flow)) {
+ return ODP_FIT_ERROR;
+ }
+
+ if (is_mask
+ ? (src_flow->vlan_tci & htons(VLAN_CFI)) != 0
+ : src_flow->dl_type == htons(ETH_TYPE_VLAN)) {
+ return parse_8021q_onward(attrs, present_attrs, out_of_range_attr,
+ expected_attrs, flow, key, key_len, src_flow);
+ }
+ if (is_mask) {
+ flow->vlan_tci = htons(0xffff);
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_VLAN)) {
+ flow->vlan_tci = nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN]);
+ expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_VLAN);
+ }
+ }
+ return parse_l2_5_onward(attrs, present_attrs, out_of_range_attr,
+ expected_attrs, flow, key, key_len, src_flow);
+}
+
+/* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a flow
+ * structure in 'flow'. Returns an ODP_FIT_* value that indicates how well
+ * 'key' fits our expectations for what a flow key should contain.
+ *
+ * The 'in_port' will be the datapath's understanding of the port. The
+ * caller will need to translate with odp_port_to_ofp_port() if the
+ * OpenFlow port is needed.
+ *
+ * This function doesn't take the packet itself as an argument because none of
+ * the currently understood OVS_KEY_ATTR_* attributes require it. Currently,
+ * it is always possible to infer which additional attribute(s) should appear
+ * by looking at the attributes for lower-level protocols, e.g. if the network
+ * protocol in OVS_KEY_ATTR_IPV4 or OVS_KEY_ATTR_IPV6 is IPPROTO_TCP then we
+ * know that a OVS_KEY_ATTR_TCP attribute must appear and that otherwise it
+ * must be absent. */
+enum odp_key_fitness
+odp_flow_key_to_flow(const struct nlattr *key, size_t key_len,
+ struct flow *flow)
+{
+ return odp_flow_key_to_flow__(key, key_len, flow, flow);
+}
+
+/* Converts the 'key_len' bytes of OVS_KEY_ATTR_* attributes in 'key' to a mask
+ * structure in 'mask'. 'flow' must be a previously translated flow
+ * corresponding to 'mask'. Returns an ODP_FIT_* value that indicates how well
+ * 'key' fits our expectations for what a flow key should contain. */
+enum odp_key_fitness
+odp_flow_key_to_mask(const struct nlattr *key, size_t key_len,
+ struct flow *mask, const struct flow *flow)
+{
+ return odp_flow_key_to_flow__(key, key_len, mask, flow);
+}
+
+/* Returns 'fitness' as a string, for use in debug messages. */
+const char *
+odp_key_fitness_to_string(enum odp_key_fitness fitness)
+{
+ switch (fitness) {
+ case ODP_FIT_PERFECT:
+ return "OK";
+ case ODP_FIT_TOO_MUCH:
+ return "too_much";
+ case ODP_FIT_TOO_LITTLE:
+ return "too_little";
+ case ODP_FIT_ERROR:
+ return "error";
+ default:
+ return "<unknown>";
+ }
+}
+
+/* Appends an OVS_ACTION_ATTR_USERSPACE action to 'odp_actions' that specifies
+ * Netlink PID 'pid'. If 'userdata' is nonnull, adds a userdata attribute
+ * whose contents are the 'userdata_size' bytes at 'userdata' and returns the
+ * offset within 'odp_actions' of the start of the cookie. (If 'userdata' is
+ * null, then the return value is not meaningful.) */
+size_t
+odp_put_userspace_action(uint32_t pid,
+ const void *userdata, size_t userdata_size,
+ struct ofpbuf *odp_actions)
+{
+ size_t userdata_ofs;
+ size_t offset;
+
+ offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
+ nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
+ if (userdata) {
+ userdata_ofs = ofpbuf_size(odp_actions) + NLA_HDRLEN;
+
+ /* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
+ * module before Linux 3.10 required the userdata to be exactly 8 bytes
+ * long:
+ *
+ * - The kernel rejected shorter userdata with -ERANGE.
+ *
+ * - The kernel silently dropped userdata beyond the first 8 bytes.
+ *
+ * Thus, for maximum compatibility, always put at least 8 bytes. (We
+ * separately disable features that required more than 8 bytes.) */
+ memcpy(nl_msg_put_unspec_zero(odp_actions, OVS_USERSPACE_ATTR_USERDATA,
+ MAX(8, userdata_size)),
+ userdata, userdata_size);
+ } else {
+ userdata_ofs = 0;
+ }
+ nl_msg_end_nested(odp_actions, offset);
+
+ return userdata_ofs;
+}
+
+void
+odp_put_tunnel_action(const struct flow_tnl *tunnel,
+ struct ofpbuf *odp_actions)
+{
+ size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
+ tun_key_to_attr(odp_actions, tunnel);
+ nl_msg_end_nested(odp_actions, offset);
+}
+\f
+/* The commit_odp_actions() function and its helpers. */
+
+static void
+commit_set_action(struct ofpbuf *odp_actions, enum ovs_key_attr key_type,
+ const void *key, size_t key_size)
+{
+ size_t offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SET);
+ nl_msg_put_unspec(odp_actions, key_type, key, key_size);
+ nl_msg_end_nested(odp_actions, offset);
+}
+
+void
+odp_put_pkt_mark_action(const uint32_t pkt_mark,
+ struct ofpbuf *odp_actions)
+{
+ commit_set_action(odp_actions, OVS_KEY_ATTR_SKB_MARK, &pkt_mark,
+ sizeof(pkt_mark));
+}
+
+/* If any of the flow key data that ODP actions can modify are different in
+ * 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
+ * 'odp_actions' that change the flow tunneling information in key from
+ * 'base->tunnel' into 'flow->tunnel', and then changes 'base->tunnel' in the
+ * same way. In other words, operates the same as commit_odp_actions(), but
+ * only on tunneling information. */
+void
+commit_odp_tunnel_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions)
+{
+ /* A valid IPV4_TUNNEL must have non-zero ip_dst. */
+ if (flow->tunnel.ip_dst) {
+ if (!memcmp(&base->tunnel, &flow->tunnel, sizeof base->tunnel)) {
+ return;
+ }
+ memcpy(&base->tunnel, &flow->tunnel, sizeof base->tunnel);
+ odp_put_tunnel_action(&base->tunnel, odp_actions);
+ }
+}
+
+static void
+commit_set_ether_addr_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc)
+{
+ struct ovs_key_ethernet eth_key;
+
+ if (eth_addr_equals(base->dl_src, flow->dl_src) &&
+ eth_addr_equals(base->dl_dst, flow->dl_dst)) {
+ return;
+ }
+
+ memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
+ memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+
+ memcpy(base->dl_src, flow->dl_src, ETH_ADDR_LEN);
+ memcpy(base->dl_dst, flow->dl_dst, ETH_ADDR_LEN);
+
+ memcpy(eth_key.eth_src, base->dl_src, ETH_ADDR_LEN);
+ memcpy(eth_key.eth_dst, base->dl_dst, ETH_ADDR_LEN);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_ETHERNET,
+ ð_key, sizeof(eth_key));
+}
+
+static void
+pop_vlan(struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
+
+ if (base->vlan_tci & htons(VLAN_CFI)) {
+ nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
+ base->vlan_tci = 0;
+ }
+}
+
+static void
+commit_vlan_action(ovs_be16 vlan_tci, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ if (base->vlan_tci == vlan_tci) {
+ return;
+ }
+
+ pop_vlan(base, odp_actions, wc);
+ if (vlan_tci & htons(VLAN_CFI)) {
+ struct ovs_action_push_vlan vlan;
+
+ vlan.vlan_tpid = htons(ETH_TYPE_VLAN);
+ vlan.vlan_tci = vlan_tci;
+ nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
+ &vlan, sizeof vlan);
+ }
+ base->vlan_tci = vlan_tci;
+}
+
+static void
+commit_mpls_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ int base_n = flow_count_mpls_labels(base, wc);
+ int flow_n = flow_count_mpls_labels(flow, wc);
+ int common_n = flow_count_common_mpls_labels(flow, flow_n, base, base_n,
+ wc);
+
+ while (base_n > common_n) {
+ if (base_n - 1 == common_n && flow_n > common_n) {
+ /* If there is only one more LSE in base than there are common
+ * between base and flow; and flow has at least one more LSE than
+ * is common then the topmost LSE of base may be updated using
+ * set */
+ struct ovs_key_mpls mpls_key;
+
+ mpls_key.mpls_lse = flow->mpls_lse[flow_n - base_n];
+ commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
+ &mpls_key, sizeof mpls_key);
+ flow_set_mpls_lse(base, 0, mpls_key.mpls_lse);
+ common_n++;
+ } else {
+ /* Otherwise, if there more LSEs in base than are common between
+ * base and flow then pop the topmost one. */
+ ovs_be16 dl_type;
+ bool popped;
+
+ /* If all the LSEs are to be popped and this is not the outermost
+ * LSE then use ETH_TYPE_MPLS as the ethertype parameter of the
+ * POP_MPLS action instead of flow->dl_type.
+ *
+ * This is because the POP_MPLS action requires its ethertype
+ * argument to be an MPLS ethernet type but in this case
+ * flow->dl_type will be a non-MPLS ethernet type.
+ *
+ * When the final POP_MPLS action occurs it use flow->dl_type and
+ * the and the resulting packet will have the desired dl_type. */
+ if ((!eth_type_mpls(flow->dl_type)) && base_n > 1) {
+ dl_type = htons(ETH_TYPE_MPLS);
+ } else {
+ dl_type = flow->dl_type;
+ }
+ nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
+ popped = flow_pop_mpls(base, base_n, flow->dl_type, wc);
+ ovs_assert(popped);
+ base_n--;
+ }
+ }
+
+ /* If, after the above popping and setting, there are more LSEs in flow
+ * than base then some LSEs need to be pushed. */
+ while (base_n < flow_n) {
+ struct ovs_action_push_mpls *mpls;
+
+ mpls = nl_msg_put_unspec_zero(odp_actions,
+ OVS_ACTION_ATTR_PUSH_MPLS,
+ sizeof *mpls);
+ mpls->mpls_ethertype = flow->dl_type;
+ mpls->mpls_lse = flow->mpls_lse[flow_n - base_n - 1];
+ flow_push_mpls(base, base_n, mpls->mpls_ethertype, wc);
+ flow_set_mpls_lse(base, 0, mpls->mpls_lse);
+ base_n++;
+ }
+}
+
+static void
+commit_set_ipv4_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ struct ovs_key_ipv4 ipv4_key;
+
+ if (base->nw_src == flow->nw_src &&
+ base->nw_dst == flow->nw_dst &&
+ base->nw_tos == flow->nw_tos &&
+ base->nw_ttl == flow->nw_ttl &&
+ base->nw_frag == flow->nw_frag) {
+ return;
+ }
+
+ memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
+ memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
+ memset(&wc->masks.nw_tos, 0xff, sizeof wc->masks.nw_tos);
+ memset(&wc->masks.nw_ttl, 0xff, sizeof wc->masks.nw_ttl);
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ memset(&wc->masks.nw_frag, 0xff, sizeof wc->masks.nw_frag);
+
+ ipv4_key.ipv4_src = base->nw_src = flow->nw_src;
+ ipv4_key.ipv4_dst = base->nw_dst = flow->nw_dst;
+ ipv4_key.ipv4_tos = base->nw_tos = flow->nw_tos;
+ ipv4_key.ipv4_ttl = base->nw_ttl = flow->nw_ttl;
+ ipv4_key.ipv4_proto = base->nw_proto;
+ ipv4_key.ipv4_frag = ovs_to_odp_frag(base->nw_frag);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_IPV4,
+ &ipv4_key, sizeof(ipv4_key));
+}
+
+static void
+commit_set_ipv6_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ struct ovs_key_ipv6 ipv6_key;
+
+ if (ipv6_addr_equals(&base->ipv6_src, &flow->ipv6_src) &&
+ ipv6_addr_equals(&base->ipv6_dst, &flow->ipv6_dst) &&
+ base->ipv6_label == flow->ipv6_label &&
+ base->nw_tos == flow->nw_tos &&
+ base->nw_ttl == flow->nw_ttl &&
+ base->nw_frag == flow->nw_frag) {
+ return;
+ }
+
+ memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
+ memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
+ memset(&wc->masks.ipv6_label, 0xff, sizeof wc->masks.ipv6_label);
+ memset(&wc->masks.nw_tos, 0xff, sizeof wc->masks.nw_tos);
+ memset(&wc->masks.nw_ttl, 0xff, sizeof wc->masks.nw_ttl);
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ memset(&wc->masks.nw_frag, 0xff, sizeof wc->masks.nw_frag);
+
+ base->ipv6_src = flow->ipv6_src;
+ memcpy(&ipv6_key.ipv6_src, &base->ipv6_src, sizeof(ipv6_key.ipv6_src));
+ base->ipv6_dst = flow->ipv6_dst;
+ memcpy(&ipv6_key.ipv6_dst, &base->ipv6_dst, sizeof(ipv6_key.ipv6_dst));
+
+ ipv6_key.ipv6_label = base->ipv6_label = flow->ipv6_label;
+ ipv6_key.ipv6_tclass = base->nw_tos = flow->nw_tos;
+ ipv6_key.ipv6_hlimit = base->nw_ttl = flow->nw_ttl;
+ ipv6_key.ipv6_proto = base->nw_proto;
+ ipv6_key.ipv6_frag = ovs_to_odp_frag(base->nw_frag);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_IPV6,
+ &ipv6_key, sizeof(ipv6_key));
+}
+
+static enum slow_path_reason
+commit_set_arp_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ struct ovs_key_arp arp_key;
+
+ if (base->nw_src == flow->nw_src &&
+ base->nw_dst == flow->nw_dst &&
+ base->nw_proto == flow->nw_proto &&
+ eth_addr_equals(base->arp_sha, flow->arp_sha) &&
+ eth_addr_equals(base->arp_tha, flow->arp_tha)) {
+ return 0;
+ }
+
+ memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
+ memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ memset(&wc->masks.arp_sha, 0xff, sizeof wc->masks.arp_sha);
+ memset(&wc->masks.arp_tha, 0xff, sizeof wc->masks.arp_tha);
+
+ base->nw_src = flow->nw_src;
+ base->nw_dst = flow->nw_dst;
+ base->nw_proto = flow->nw_proto;
+ memcpy(base->arp_sha, flow->arp_sha, ETH_ADDR_LEN);
+ memcpy(base->arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+
+ arp_key.arp_sip = base->nw_src;
+ arp_key.arp_tip = base->nw_dst;
+ arp_key.arp_op = htons(base->nw_proto);
+ memcpy(arp_key.arp_sha, flow->arp_sha, ETH_ADDR_LEN);
+ memcpy(arp_key.arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_ARP, &arp_key, sizeof arp_key);
+
+ return SLOW_ACTION;
+}
+
+static enum slow_path_reason
+commit_set_nw_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ /* Check if 'flow' really has an L3 header. */
+ if (!flow->nw_proto) {
+ return 0;
+ }
+
+ switch (ntohs(base->dl_type)) {
+ case ETH_TYPE_IP:
+ commit_set_ipv4_action(flow, base, odp_actions, wc);
+ break;
+
+ case ETH_TYPE_IPV6:
+ commit_set_ipv6_action(flow, base, odp_actions, wc);
+ break;
+
+ case ETH_TYPE_ARP:
+ return commit_set_arp_action(flow, base, odp_actions, wc);
+ }
+
+ return 0;
+}
+
+static void
+commit_set_port_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ if (!is_ip_any(base) || (!base->tp_src && !base->tp_dst)) {
+ return;
+ }
+
+ if (base->tp_src == flow->tp_src &&
+ base->tp_dst == flow->tp_dst) {
+ return;
+ }
+
+ memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
+ memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
+
+ if (flow->nw_proto == IPPROTO_TCP) {
+ struct ovs_key_tcp port_key;
+
+ port_key.tcp_src = base->tp_src = flow->tp_src;
+ port_key.tcp_dst = base->tp_dst = flow->tp_dst;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_TCP,
+ &port_key, sizeof(port_key));
+
+ } else if (flow->nw_proto == IPPROTO_UDP) {
+ struct ovs_key_udp port_key;
+
+ port_key.udp_src = base->tp_src = flow->tp_src;
+ port_key.udp_dst = base->tp_dst = flow->tp_dst;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_UDP,
+ &port_key, sizeof(port_key));
+ } else if (flow->nw_proto == IPPROTO_SCTP) {
+ struct ovs_key_sctp port_key;
+
+ port_key.sctp_src = base->tp_src = flow->tp_src;
+ port_key.sctp_dst = base->tp_dst = flow->tp_dst;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_SCTP,
+ &port_key, sizeof(port_key));
+ }
+}
+
+static void
+commit_set_priority_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc)
+{
+ if (base->skb_priority == flow->skb_priority) {
+ return;
+ }
+
+ memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
+ base->skb_priority = flow->skb_priority;
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_PRIORITY,
+ &base->skb_priority, sizeof(base->skb_priority));
+}
+
+static void
+commit_set_pkt_mark_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc)
+{
+ if (base->pkt_mark == flow->pkt_mark) {
+ return;
+ }
+
+ memset(&wc->masks.pkt_mark, 0xff, sizeof wc->masks.pkt_mark);
+ base->pkt_mark = flow->pkt_mark;
+
+ odp_put_pkt_mark_action(base->pkt_mark, odp_actions);
+}
+
+/* If any of the flow key data that ODP actions can modify are different in
+ * 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
+ * key from 'base' into 'flow', and then changes 'base' the same way. Does not
+ * commit set_tunnel actions. Users should call commit_odp_tunnel_action()
+ * in addition to this function if needed. Sets fields in 'wc' that are
+ * used as part of the action.
+ *
+ * Returns a reason to force processing the flow's packets into the userspace
+ * slow path, if there is one, otherwise 0. */
+enum slow_path_reason
+commit_odp_actions(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ enum slow_path_reason slow;
+
+ commit_set_ether_addr_action(flow, base, odp_actions, wc);
+ slow = commit_set_nw_action(flow, base, odp_actions, wc);
+ commit_set_port_action(flow, base, odp_actions, wc);
+ commit_mpls_action(flow, base, odp_actions, wc);
+ commit_vlan_action(flow->vlan_tci, base, odp_actions, wc);
+ commit_set_priority_action(flow, base, odp_actions, wc);
+ commit_set_pkt_mark_action(flow, base, odp_actions, wc);